Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: bch2_btree_id_str()

Since we can run with unknown btree IDs, we can't directly index btree
IDs into fixed size arrays.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+74 -68
+3 -3
fs/bcachefs/alloc_background.c
··· 727 727 "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n" 728 728 " for %s", 729 729 set ? "setting" : "clearing", 730 - bch2_btree_ids[btree], 730 + bch2_btree_id_str(btree), 731 731 iter.pos.inode, 732 732 iter.pos.offset, 733 733 bch2_bkey_types[old.k->type], ··· 1245 1245 1246 1246 if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c, 1247 1247 "entry in %s btree for nonexistant dev:bucket %llu:%llu", 1248 - bch2_btree_ids[iter->btree_id], pos.inode, pos.offset)) 1248 + bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset)) 1249 1249 goto delete; 1250 1250 1251 1251 a = bch2_alloc_to_v4(alloc_k, &a_convert); ··· 1255 1255 genbits != alloc_freespace_genbits(*a)), c, 1256 1256 "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)", 1257 1257 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), 1258 - bch2_btree_ids[iter->btree_id], 1258 + bch2_btree_id_str(iter->btree_id), 1259 1259 iter->pos.inode, 1260 1260 iter->pos.offset, 1261 1261 a->data_type == state,
+2 -2
fs/bcachefs/backpointers.c
··· 55 55 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp) 56 56 { 57 57 prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=", 58 - bch2_btree_ids[bp->btree_id], 58 + bch2_btree_id_str(bp->btree_id), 59 59 bp->level, 60 60 (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT), 61 61 (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT), ··· 453 453 return ret; 454 454 missing: 455 455 prt_printf(&buf, "missing backpointer for btree=%s l=%u ", 456 - bch2_btree_ids[bp.btree_id], bp.level); 456 + bch2_btree_id_str(bp.btree_id), bp.level); 457 457 bch2_bkey_val_to_text(&buf, c, orig_k); 458 458 prt_printf(&buf, "\nbp pos "); 459 459 bch2_bpos_to_text(&buf, bp_iter.pos);
+2 -1
fs/bcachefs/bbpos.h
··· 3 3 #define _BCACHEFS_BBPOS_H 4 4 5 5 #include "bkey_methods.h" 6 + #include "btree_cache.h" 6 7 7 8 struct bbpos { 8 9 enum btree_id btree; ··· 41 40 42 41 static inline void bch2_bbpos_to_text(struct printbuf *out, struct bbpos pos) 43 42 { 44 - prt_str(out, bch2_btree_ids[pos.btree]); 43 + prt_str(out, bch2_btree_id_str(pos.btree)); 45 44 prt_char(out, ':'); 46 45 bch2_bpos_to_text(out, pos.pos); 47 46 }
+2 -1
fs/bcachefs/bkey_methods.c
··· 3 3 #include "bcachefs.h" 4 4 #include "backpointers.h" 5 5 #include "bkey_methods.h" 6 + #include "btree_cache.h" 6 7 #include "btree_types.h" 7 8 #include "alloc_background.h" 8 9 #include "dirent.h" ··· 165 164 if (flags & BKEY_INVALID_COMMIT && 166 165 !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type))) { 167 166 prt_printf(err, "invalid key type for btree %s (%s)", 168 - bch2_btree_ids[type], bch2_bkey_types[k.k->type]); 167 + bch2_btree_id_str(type), bch2_bkey_types[k.k->type]); 169 168 return -BCH_ERR_invalid_bkey; 170 169 } 171 170
+17 -4
fs/bcachefs/btree_cache.c
··· 783 783 "btree node header doesn't match ptr\n" 784 784 "btree %s level %u\n" 785 785 "ptr: ", 786 - bch2_btree_ids[b->c.btree_id], b->c.level); 786 + bch2_btree_id_str(b->c.btree_id), b->c.level); 787 787 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); 788 788 789 789 prt_printf(&buf, "\nheader: btree %s level %llu\n" 790 790 "min ", 791 - bch2_btree_ids[BTREE_NODE_ID(b->data)], 791 + bch2_btree_id_str(BTREE_NODE_ID(b->data)), 792 792 BTREE_NODE_LEVEL(b->data)); 793 793 bch2_bpos_to_text(&buf, b->data->min_key); 794 794 ··· 1151 1151 six_unlock_intent(&b->c.lock); 1152 1152 } 1153 1153 1154 - void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, 1155 - const struct btree *b) 1154 + const char *bch2_btree_id_str(enum btree_id btree) 1155 + { 1156 + return btree < BTREE_ID_NR ? __bch2_btree_ids[btree] : "(unknown)"; 1157 + } 1158 + 1159 + void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) 1160 + { 1161 + prt_printf(out, "%s level %u/%u\n ", 1162 + bch2_btree_id_str(b->c.btree_id), 1163 + b->c.level, 1164 + bch2_btree_id_root(c, b->c.btree_id)->level); 1165 + bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key)); 1166 + } 1167 + 1168 + void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) 1156 1169 { 1157 1170 struct bset_stats stats; 1158 1171
+3 -2
fs/bcachefs/btree_cache.h
··· 123 123 return bch2_btree_id_root(c, b->c.btree_id)->b; 124 124 } 125 125 126 - void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *, 127 - const struct btree *); 126 + const char *bch2_btree_id_str(enum btree_id); 127 + void bch2_btree_pos_to_text(struct printbuf *, struct bch_fs *, const struct btree *); 128 + void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *, const struct btree *); 128 129 void bch2_btree_cache_to_text(struct printbuf *, const struct bch_fs *); 129 130 130 131 #endif /* _BCACHEFS_BTREE_CACHE_H */
+10 -10
fs/bcachefs/btree_gc.c
··· 101 101 "btree node with incorrect min_key at btree %s level %u:\n" 102 102 " prev %s\n" 103 103 " cur %s", 104 - bch2_btree_ids[b->c.btree_id], b->c.level, 104 + bch2_btree_id_str(b->c.btree_id), b->c.level, 105 105 buf1.buf, buf2.buf) && 106 106 should_restart_for_topology_repair(c)) { 107 107 bch_info(c, "Halting mark and sweep to start topology repair pass"); ··· 129 129 "btree node with incorrect max_key at btree %s level %u:\n" 130 130 " %s\n" 131 131 " expected %s", 132 - bch2_btree_ids[b->c.btree_id], b->c.level, 132 + bch2_btree_id_str(b->c.btree_id), b->c.level, 133 133 buf1.buf, buf2.buf) && 134 134 should_restart_for_topology_repair(c)) { 135 135 bch_info(c, "Halting mark and sweep to start topology repair pass"); ··· 290 290 "btree node overwritten by next node at btree %s level %u:\n" 291 291 " node %s\n" 292 292 " next %s", 293 - bch2_btree_ids[b->c.btree_id], b->c.level, 293 + bch2_btree_id_str(b->c.btree_id), b->c.level, 294 294 buf1.buf, buf2.buf)) { 295 295 ret = DROP_PREV_NODE; 296 296 goto out; ··· 301 301 "btree node with incorrect max_key at btree %s level %u:\n" 302 302 " node %s\n" 303 303 " next %s", 304 - bch2_btree_ids[b->c.btree_id], b->c.level, 304 + bch2_btree_id_str(b->c.btree_id), b->c.level, 305 305 buf1.buf, buf2.buf)) 306 306 ret = set_node_max(c, prev, 307 307 bpos_predecessor(cur->data->min_key)); ··· 313 313 "btree node overwritten by prev node at btree %s level %u:\n" 314 314 " prev %s\n" 315 315 " node %s", 316 - bch2_btree_ids[b->c.btree_id], b->c.level, 316 + bch2_btree_id_str(b->c.btree_id), b->c.level, 317 317 buf1.buf, buf2.buf)) { 318 318 ret = DROP_THIS_NODE; 319 319 goto out; ··· 323 323 "btree node with incorrect min_key at btree %s level %u:\n" 324 324 " prev %s\n" 325 325 " node %s", 326 - bch2_btree_ids[b->c.btree_id], b->c.level, 326 + bch2_btree_id_str(b->c.btree_id), b->c.level, 327 327 buf1.buf, buf2.buf)) 328 328 ret = set_node_min(c, cur, expected_start); 329 329 } ··· 347 347 "btree node with incorrect max_key at btree %s level %u:\n" 348 348 " %s\n" 349 349 " expected %s", 350 - bch2_btree_ids[b->c.btree_id], b->c.level, 350 + bch2_btree_id_str(b->c.btree_id), b->c.level, 351 351 buf1.buf, buf2.buf)) { 352 352 ret = set_node_max(c, child, b->key.k.p); 353 353 if (ret) ··· 398 398 if (mustfix_fsck_err_on(ret == -EIO, c, 399 399 "Topology repair: unreadable btree node at btree %s level %u:\n" 400 400 " %s", 401 - bch2_btree_ids[b->c.btree_id], 401 + bch2_btree_id_str(b->c.btree_id), 402 402 b->c.level - 1, 403 403 buf.buf)) { 404 404 bch2_btree_node_evict(trans, cur_k.k); ··· 506 506 if (mustfix_fsck_err_on(!have_child, c, 507 507 "empty interior btree node at btree %s level %u\n" 508 508 " %s", 509 - bch2_btree_ids[b->c.btree_id], 509 + bch2_btree_id_str(b->c.btree_id), 510 510 b->c.level, buf.buf)) 511 511 ret = DROP_THIS_NODE; 512 512 err: ··· 970 970 FSCK_NO_RATELIMIT, 971 971 "Unreadable btree node at btree %s level %u:\n" 972 972 " %s", 973 - bch2_btree_ids[b->c.btree_id], 973 + bch2_btree_id_str(b->c.btree_id), 974 974 b->c.level - 1, 975 975 (printbuf_reset(&buf), 976 976 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
+4 -14
fs/bcachefs/btree_io.c
··· 510 510 bch2_trans_node_reinit_iter(trans, b); 511 511 } 512 512 513 - static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c, 514 - struct btree *b) 515 - { 516 - prt_printf(out, "%s level %u/%u\n ", 517 - bch2_btree_ids[b->c.btree_id], 518 - b->c.level, 519 - bch2_btree_id_root(c, b->c.btree_id)->level); 520 - bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key)); 521 - } 522 - 523 513 static void btree_err_msg(struct printbuf *out, struct bch_fs *c, 524 514 struct bch_dev *ca, 525 515 struct btree *b, struct bset *i, ··· 522 532 if (ca) 523 533 prt_printf(out, "on %s ", ca->name); 524 534 prt_printf(out, "at btree "); 525 - btree_pos_to_text(out, c, b); 535 + bch2_btree_pos_to_text(out, c, b); 526 536 527 537 prt_printf(out, "\n node offset %u", b->written); 528 538 if (i) ··· 1167 1177 } 1168 1178 start: 1169 1179 printbuf_reset(&buf); 1170 - btree_pos_to_text(&buf, c, b); 1180 + bch2_btree_pos_to_text(&buf, c, b); 1171 1181 bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s", 1172 1182 bch2_blk_status_to_str(bio->bi_status), buf.buf); 1173 1183 if (rb->have_ioref) ··· 1203 1213 printbuf_reset(&buf); 1204 1214 bch2_bpos_to_text(&buf, b->key.k.p); 1205 1215 bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error", 1206 - __func__, bch2_btree_ids[b->c.btree_id], b->c.level, buf.buf); 1216 + __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf); 1207 1217 1208 1218 bch2_btree_node_rewrite_async(c, b); 1209 1219 } ··· 1514 1524 struct printbuf buf = PRINTBUF; 1515 1525 1516 1526 prt_str(&buf, "btree node read error: no device to read from\n at "); 1517 - btree_pos_to_text(&buf, c, b); 1527 + bch2_btree_pos_to_text(&buf, c, b); 1518 1528 bch_err(c, "%s", buf.buf); 1519 1529 1520 1530 if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
+7 -7
fs/bcachefs/btree_iter.c
··· 362 362 bch2_bpos_to_text(&buf, pos); 363 363 364 364 panic("not locked: %s %s%s\n", 365 - bch2_btree_ids[id], buf.buf, 365 + bch2_btree_id_str(id), buf.buf, 366 366 key_cache ? " cached" : ""); 367 367 } 368 368 ··· 1371 1371 struct bkey_s_c old = { &i->old_k, i->old_v }; 1372 1372 1373 1373 prt_printf(buf, "update: btree=%s cached=%u %pS", 1374 - bch2_btree_ids[i->btree_id], 1374 + bch2_btree_id_str(i->btree_id), 1375 1375 i->cached, 1376 1376 (void *) i->ip_allocated); 1377 1377 prt_newline(buf); ··· 1387 1387 1388 1388 trans_for_each_wb_update(trans, wb) { 1389 1389 prt_printf(buf, "update: btree=%s wb=1 %pS", 1390 - bch2_btree_ids[wb->btree], 1390 + bch2_btree_id_str(wb->btree), 1391 1391 (void *) i->ip_allocated); 1392 1392 prt_newline(buf); 1393 1393 ··· 1416 1416 path->idx, path->ref, path->intent_ref, 1417 1417 path->preserve ? 'P' : ' ', 1418 1418 path->should_be_locked ? 'S' : ' ', 1419 - bch2_btree_ids[path->btree_id], 1419 + bch2_btree_id_str(path->btree_id), 1420 1420 path->level); 1421 1421 bch2_bpos_to_text(out, path->pos); 1422 1422 ··· 3025 3025 trans_for_each_path(trans, path) 3026 3026 if (path->ref) 3027 3027 printk(KERN_ERR " btree %s %pS\n", 3028 - bch2_btree_ids[path->btree_id], 3028 + bch2_btree_id_str(path->btree_id), 3029 3029 (void *) path->ip_allocated); 3030 3030 /* Be noisy about this: */ 3031 3031 bch2_fatal_error(c); ··· 3100 3100 3101 3101 prt_tab(out); 3102 3102 prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b', 3103 - b->level, bch2_btree_ids[b->btree_id]); 3103 + b->level, bch2_btree_id_str(b->btree_id)); 3104 3104 bch2_bpos_to_text(out, btree_node_pos(b)); 3105 3105 3106 3106 prt_tab(out); ··· 3130 3130 path->idx, 3131 3131 path->cached ? 'c' : 'b', 3132 3132 path->level, 3133 - bch2_btree_ids[path->btree_id]); 3133 + bch2_btree_id_str(path->btree_id)); 3134 3134 bch2_bpos_to_text(out, path->pos); 3135 3135 prt_newline(out); 3136 3136
+2 -2
fs/bcachefs/btree_key_cache.c
··· 324 324 ck = bkey_cached_reuse(bc); 325 325 if (unlikely(!ck)) { 326 326 bch_err(c, "error allocating memory for key cache item, btree %s", 327 - bch2_btree_ids[path->btree_id]); 327 + bch2_btree_id_str(path->btree_id)); 328 328 return ERR_PTR(-BCH_ERR_ENOMEM_btree_key_cache_create); 329 329 } 330 330 ··· 407 407 new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL); 408 408 if (!new_k) { 409 409 bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u", 410 - bch2_btree_ids[ck->key.btree_id], new_u64s); 410 + bch2_btree_id_str(ck->key.btree_id), new_u64s); 411 411 ret = -BCH_ERR_ENOMEM_btree_key_cache_fill; 412 412 goto err; 413 413 }
+1 -1
fs/bcachefs/btree_trans_commit.c
··· 349 349 new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS); 350 350 if (!new_k) { 351 351 bch_err(c, "error allocating memory for key cache key, btree %s u64s %u", 352 - bch2_btree_ids[path->btree_id], new_u64s); 352 + bch2_btree_id_str(path->btree_id), new_u64s); 353 353 return -BCH_ERR_ENOMEM_btree_key_cache_insert; 354 354 } 355 355
+4 -4
fs/bcachefs/debug.c
··· 517 517 518 518 prt_printf(out, "%px btree=%s l=%u ", 519 519 b, 520 - bch2_btree_ids[b->c.btree_id], 520 + bch2_btree_id_str(b->c.btree_id), 521 521 b->c.level); 522 522 prt_newline(out); 523 523 ··· 919 919 bd < c->btree_debug + ARRAY_SIZE(c->btree_debug); 920 920 bd++) { 921 921 bd->id = bd - c->btree_debug; 922 - debugfs_create_file(bch2_btree_ids[bd->id], 922 + debugfs_create_file(bch2_btree_id_str(bd->id), 923 923 0400, c->btree_debug_dir, bd, 924 924 &btree_debug_ops); 925 925 926 926 snprintf(name, sizeof(name), "%s-formats", 927 - bch2_btree_ids[bd->id]); 927 + bch2_btree_id_str(bd->id)); 928 928 929 929 debugfs_create_file(name, 0400, c->btree_debug_dir, bd, 930 930 &btree_format_debug_ops); 931 931 932 932 snprintf(name, sizeof(name), "%s-bfloat-failed", 933 - bch2_btree_ids[bd->id]); 933 + bch2_btree_id_str(bd->id)); 934 934 935 935 debugfs_create_file(name, 0400, c->btree_debug_dir, bd, 936 936 &bfloat_failed_debug_ops);
+3 -2
fs/bcachefs/fsck.c
··· 2 2 3 3 #include "bcachefs.h" 4 4 #include "bkey_buf.h" 5 + #include "btree_cache.h" 5 6 #include "btree_update.h" 6 7 #include "buckets.h" 7 8 #include "darray.h" ··· 445 444 if (i->equiv == n.equiv) { 446 445 bch_err(c, "snapshot deletion did not finish:\n" 447 446 " duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u)\n", 448 - bch2_btree_ids[btree_id], 447 + bch2_btree_id_str(btree_id), 449 448 pos.inode, pos.offset, 450 449 i->id, n.id, n.equiv); 451 450 set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags); ··· 810 809 return ret; 811 810 bad_hash: 812 811 if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s", 813 - bch2_btree_ids[desc.btree_id], hash_k.k->p.inode, hash_k.k->p.offset, hash, 812 + bch2_btree_id_str(desc.btree_id), hash_k.k->p.inode, hash_k.k->p.offset, hash, 814 813 (printbuf_reset(&buf), 815 814 bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) { 816 815 ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
+1 -1
fs/bcachefs/journal_io.c
··· 369 369 prt_newline(out); 370 370 prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]); 371 371 } 372 - prt_printf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level); 372 + prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level); 373 373 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k)); 374 374 first = false; 375 375 }
+1 -1
fs/bcachefs/move.c
··· 1110 1110 1111 1111 prt_printf(out, " data type %s btree_id %s position: ", 1112 1112 bch2_data_types[stats->data_type], 1113 - bch2_btree_ids[stats->btree_id]); 1113 + bch2_btree_id_str(stats->btree_id)); 1114 1114 bch2_bpos_to_text(out, stats->pos); 1115 1115 prt_newline(out); 1116 1116 printbuf_indent_add(out, 2);
+1 -2
fs/bcachefs/opts.c
··· 42 42 NULL 43 43 }; 44 44 45 - const char * const bch2_btree_ids[] = { 45 + const char * const __bch2_btree_ids[] = { 46 46 BCH_BTREE_IDS() 47 - "interior btree node", 48 47 NULL 49 48 }; 50 49
+1 -1
fs/bcachefs/opts.h
··· 16 16 extern const char * const bch2_version_upgrade_opts[]; 17 17 extern const char * const bch2_sb_features[]; 18 18 extern const char * const bch2_sb_compat[]; 19 - extern const char * const bch2_btree_ids[]; 19 + extern const char * const __bch2_btree_ids[]; 20 20 extern const char * const bch2_csum_types[]; 21 21 extern const char * const bch2_csum_opts[]; 22 22 extern const char * const bch2_compression_types[];
+3 -3
fs/bcachefs/recovery.c
··· 182 182 bch2_journal_replay_key(trans, k)); 183 183 if (ret) { 184 184 bch_err(c, "journal replay: error while replaying key at btree %s level %u: %s", 185 - bch2_btree_ids[k->btree_id], k->level, bch2_err_str(ret)); 185 + bch2_btree_id_str(k->btree_id), k->level, bch2_err_str(ret)); 186 186 goto err; 187 187 } 188 188 } ··· 367 367 __fsck_err(c, btree_id_is_alloc(i) 368 368 ? FSCK_CAN_IGNORE : 0, 369 369 "invalid btree root %s", 370 - bch2_btree_ids[i]); 370 + bch2_btree_id_str(i)); 371 371 if (i == BTREE_ID_alloc) 372 372 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 373 373 } ··· 376 376 if (ret) { 377 377 fsck_err(c, 378 378 "error reading btree root %s", 379 - bch2_btree_ids[i]); 379 + bch2_btree_id_str(i)); 380 380 if (btree_id_is_alloc(i)) 381 381 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 382 382 ret = 0;
+1 -1
fs/bcachefs/sysfs.c
··· 341 341 342 342 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c) 343 343 { 344 - prt_printf(out, "%s: ", bch2_btree_ids[c->gc_gens_btree]); 344 + prt_printf(out, "%s: ", bch2_btree_id_str(c->gc_gens_btree)); 345 345 bch2_bpos_to_text(out, c->gc_gens_pos); 346 346 prt_printf(out, "\n"); 347 347 }
+6 -6
fs/bcachefs/trace.h
··· 68 68 TP_printk("%d,%d %u %s %llu:%llu:%u", 69 69 MAJOR(__entry->dev), MINOR(__entry->dev), 70 70 __entry->level, 71 - bch2_btree_ids[__entry->btree_id], 71 + bch2_btree_id_str(__entry->btree_id), 72 72 __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot) 73 73 ); 74 74 ··· 461 461 TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u", 462 462 __entry->trans_fn, 463 463 (void *) __entry->caller_ip, 464 - bch2_btree_ids[__entry->btree_id], 464 + bch2_btree_id_str(__entry->btree_id), 465 465 __entry->pos_inode, 466 466 __entry->pos_offset, 467 467 __entry->pos_snapshot, ··· 522 522 TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u", 523 523 __entry->trans_fn, 524 524 (void *) __entry->caller_ip, 525 - bch2_btree_ids[__entry->btree_id], 525 + bch2_btree_id_str(__entry->btree_id), 526 526 __entry->pos_inode, 527 527 __entry->pos_offset, 528 528 __entry->pos_snapshot, ··· 1012 1012 TP_printk("%s %pS btree %s pos %llu:%llu:%u", 1013 1013 __entry->trans_fn, 1014 1014 (void *) __entry->caller_ip, 1015 - bch2_btree_ids[__entry->btree_id], 1015 + bch2_btree_id_str(__entry->btree_id), 1016 1016 __entry->pos_inode, 1017 1017 __entry->pos_offset, 1018 1018 __entry->pos_snapshot) ··· 1061 1061 TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u", 1062 1062 __entry->trans_fn, 1063 1063 (void *) __entry->caller_ip, 1064 - bch2_btree_ids[__entry->btree_id], 1064 + bch2_btree_id_str(__entry->btree_id), 1065 1065 __entry->pos_inode, 1066 1066 __entry->pos_offset, 1067 1067 __entry->pos_snapshot, ··· 1219 1219 TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u", 1220 1220 __entry->trans_fn, 1221 1221 (void *) __entry->caller_ip, 1222 - bch2_btree_ids[__entry->btree_id], 1222 + bch2_btree_id_str(__entry->btree_id), 1223 1223 __entry->pos_inode, 1224 1224 __entry->pos_offset, 1225 1225 __entry->pos_snapshot,