Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

btrfs: make a few more ASSERTs verbose

We have support for optional string to be printed in ASSERT() (added in
19468a623a9109 ("btrfs: enhance ASSERT() to take optional format
string")), it's not yet everywhere it could be so add a few more files.

Signed-off-by: David Sterba <dsterba@suse.com>

+119 -64
+10 -7
fs/btrfs/scrub.c
··· 966 966 const unsigned long old_error_bitmap = scrub_bitmap_read_error(stripe); 967 967 int i; 968 968 969 - ASSERT(stripe->mirror_num >= 1); 970 - ASSERT(atomic_read(&stripe->pending_io) == 0); 969 + ASSERT(stripe->mirror_num >= 1, "stripe->mirror_num=%d", stripe->mirror_num); 970 + ASSERT(atomic_read(&stripe->pending_io) == 0, 971 + "atomic_read(&stripe->pending_io)=%d", atomic_read(&stripe->pending_io)); 971 972 972 973 for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) { 973 974 /* The current sector cannot be merged, submit the bio. */ ··· 1031 1030 int ret; 1032 1031 1033 1032 /* For scrub, our mirror_num should always start at 1. */ 1034 - ASSERT(stripe->mirror_num >= 1); 1033 + ASSERT(stripe->mirror_num >= 1, "stripe->mirror_num=%d", stripe->mirror_num); 1035 1034 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, 1036 1035 stripe->logical, &mapped_len, &bioc, 1037 1036 NULL, NULL); ··· 1171 1170 int mirror; 1172 1171 int i; 1173 1172 1174 - ASSERT(stripe->mirror_num > 0); 1173 + ASSERT(stripe->mirror_num >= 1, "stripe->mirror_num=%d", stripe->mirror_num); 1175 1174 1176 1175 wait_scrub_stripe_io(stripe); 1177 1176 scrub_verify_one_stripe(stripe, scrub_bitmap_read_has_extent(stripe)); ··· 1487 1486 1488 1487 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1489 1488 ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY || 1490 - key.type == BTRFS_METADATA_ITEM_KEY); 1489 + key.type == BTRFS_METADATA_ITEM_KEY, "key.type=%u", key.type); 1491 1490 if (key.type == BTRFS_METADATA_ITEM_KEY) 1492 1491 len = fs_info->nodesize; 1493 1492 else ··· 1592 1591 1593 1592 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1594 1593 ASSERT(key.type == BTRFS_METADATA_ITEM_KEY || 1595 - key.type == BTRFS_EXTENT_ITEM_KEY); 1594 + key.type == BTRFS_EXTENT_ITEM_KEY, "key.type=%u", key.type); 1596 1595 *extent_start_ret = key.objectid; 1597 1596 if (key.type == BTRFS_METADATA_ITEM_KEY) 1598 1597 *size_ret = path->nodes[0]->fs_info->nodesize; ··· 1690 1689 scrub_stripe_reset_bitmaps(stripe); 1691 1690 1692 1691 /* The range must be inside the bg. */ 1693 - ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length); 1692 + ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length, 1693 + "bg->start=%llu logical_start=%llu logical_end=%llu end=%llu", 1694 + bg->start, logical_start, logical_end, bg->start + bg->length); 1694 1695 1695 1696 ret = find_first_extent_item(extent_root, extent_path, logical_start, 1696 1697 logical_len);
+18 -12
fs/btrfs/space-info.c
··· 211 211 if (btrfs_is_zoned(fs_info)) 212 212 return fs_info->zone_size; 213 213 214 - ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK); 214 + ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK, "flags=%llu", flags); 215 215 216 216 if (flags & BTRFS_BLOCK_GROUP_DATA) 217 217 return BTRFS_MAX_DATA_CHUNK_SIZE; ··· 262 262 struct btrfs_space_info *sub_group; 263 263 int ret; 264 264 265 - ASSERT(parent->subgroup_id == BTRFS_SUB_GROUP_PRIMARY); 266 - ASSERT(id != BTRFS_SUB_GROUP_PRIMARY); 265 + ASSERT(parent->subgroup_id == BTRFS_SUB_GROUP_PRIMARY, 266 + "parent->subgroup_id=%d", parent->subgroup_id); 267 + ASSERT(id != BTRFS_SUB_GROUP_PRIMARY, "id=%d", id); 267 268 268 269 sub_group = kzalloc(sizeof(*sub_group), GFP_NOFS); 269 270 if (!sub_group) ··· 532 531 533 532 if (!list_empty(&ticket->list)) { 534 533 list_del_init(&ticket->list); 535 - ASSERT(space_info->reclaim_size >= ticket->bytes); 534 + ASSERT(space_info->reclaim_size >= ticket->bytes, 535 + "space_info->reclaim_size=%llu ticket->bytes=%llu", 536 + space_info->reclaim_size, ticket->bytes); 536 537 space_info->reclaim_size -= ticket->bytes; 537 538 } 538 539 ··· 1674 1671 priority_reclaim_data_space(space_info, ticket); 1675 1672 break; 1676 1673 default: 1677 - ASSERT(0); 1674 + ASSERT(0, "flush=%d", flush); 1678 1675 break; 1679 1676 } 1680 1677 ··· 1686 1683 * releasing reserved space (if an error happens the expectation is that 1687 1684 * space wasn't reserved at all). 1688 1685 */ 1689 - ASSERT(!(ticket->bytes == 0 && ticket->error)); 1686 + ASSERT(!(ticket->bytes == 0 && ticket->error), 1687 + "ticket->bytes=%llu ticket->error=%d", ticket->bytes, ticket->error); 1690 1688 trace_btrfs_reserve_ticket(space_info->fs_info, space_info->flags, 1691 1689 orig_bytes, start_ns, flush, ticket->error); 1692 1690 return ret; ··· 1762 1758 int ret = -ENOSPC; 1763 1759 bool pending_tickets; 1764 1760 1765 - ASSERT(orig_bytes); 1761 + ASSERT(orig_bytes, "orig_bytes=%llu", orig_bytes); 1766 1762 /* 1767 1763 * If have a transaction handle (current->journal_info != NULL), then 1768 1764 * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor ··· 1771 1767 */ 1772 1768 if (current->journal_info) { 1773 1769 /* One assert per line for easier debugging. */ 1774 - ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL); 1775 - ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL); 1776 - ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT); 1770 + ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL, "flush=%d", flush); 1771 + ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL, "flush=%d", flush); 1772 + ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT, "flush=%d", flush); 1777 1773 } 1778 1774 1779 1775 if (flush == BTRFS_RESERVE_FLUSH_DATA) ··· 1934 1930 1935 1931 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || 1936 1932 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE || 1937 - flush == BTRFS_RESERVE_NO_FLUSH); 1938 - ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); 1933 + flush == BTRFS_RESERVE_NO_FLUSH, "flush=%d", flush); 1934 + ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA, 1935 + "current->journal_info=0x%lx flush=%d", 1936 + (unsigned long)current->journal_info, flush); 1939 1937 1940 1938 ret = reserve_bytes(space_info, bytes, flush); 1941 1939 if (ret == -ENOSPC) {
+7 -3
fs/btrfs/subpage.c
··· 180 180 /* Basic checks */ 181 181 ASSERT(folio_test_private(folio) && folio_get_private(folio)); 182 182 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && 183 - IS_ALIGNED(len, fs_info->sectorsize)); 183 + IS_ALIGNED(len, fs_info->sectorsize), "start=%llu len=%u", start, len); 184 184 /* 185 185 * The range check only works for mapped page, we can still have 186 186 * unmapped page like dummy extent buffer pages. ··· 249 249 clear_bit(bit, bfs->bitmaps); 250 250 cleared++; 251 251 } 252 - ASSERT(atomic_read(&bfs->nr_locked) >= cleared); 252 + ASSERT(atomic_read(&bfs->nr_locked) >= cleared, 253 + "atomic_read(&bfs->nr_locked)=%d cleared=%d", 254 + atomic_read(&bfs->nr_locked), cleared); 253 255 last = atomic_sub_and_test(cleared, &bfs->nr_locked); 254 256 spin_unlock_irqrestore(&bfs->lock, flags); 255 257 return last; ··· 330 328 if (test_and_clear_bit(bit + start_bit, bfs->bitmaps)) 331 329 cleared++; 332 330 } 333 - ASSERT(atomic_read(&bfs->nr_locked) >= cleared); 331 + ASSERT(atomic_read(&bfs->nr_locked) >= cleared, 332 + "atomic_read(&bfs->nr_locked)=%d cleared=%d", 333 + atomic_read(&bfs->nr_locked), cleared); 334 334 last = atomic_sub_and_test(cleared, &bfs->nr_locked); 335 335 spin_unlock_irqrestore(&bfs->lock, flags); 336 336 if (last)
+29 -12
fs/btrfs/transaction.c
··· 186 186 * At this point no one can be using this transaction to modify any tree 187 187 * and no one can start another transaction to modify any tree either. 188 188 */ 189 - ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING); 189 + ASSERT(cur_trans->state == TRANS_STATE_COMMIT_DOING, 190 + "cur_trans->state=%d", cur_trans->state); 190 191 191 192 down_write(&fs_info->commit_root_sem); 192 193 ··· 1026 1025 struct btrfs_fs_info *fs_info = trans->fs_info; 1027 1026 1028 1027 if (!trans->block_rsv) { 1029 - ASSERT(!trans->bytes_reserved); 1030 - ASSERT(!trans->delayed_refs_bytes_reserved); 1028 + ASSERT(trans->bytes_reserved == 0, 1029 + "trans->bytes_reserved=%llu", trans->bytes_reserved); 1030 + ASSERT(trans->delayed_refs_bytes_reserved == 0, 1031 + "trans->delayed_refs_bytes_reserved=%llu", 1032 + trans->delayed_refs_bytes_reserved); 1031 1033 return; 1032 1034 } 1033 1035 1034 1036 if (!trans->bytes_reserved) { 1035 - ASSERT(!trans->delayed_refs_bytes_reserved); 1037 + ASSERT(trans->delayed_refs_bytes_reserved == 0, 1038 + "trans->delayed_refs_bytes_reserved=%llu", 1039 + trans->delayed_refs_bytes_reserved); 1036 1040 return; 1037 1041 } 1038 1042 ··· 1236 1230 bool errors = false; 1237 1231 int ret; 1238 1232 1239 - ASSERT(btrfs_root_id(log_root) == BTRFS_TREE_LOG_OBJECTID); 1233 + ASSERT(btrfs_root_id(log_root) == BTRFS_TREE_LOG_OBJECTID, 1234 + "root_id(log_root)=%llu", btrfs_root_id(log_root)); 1240 1235 1241 1236 ret = __btrfs_wait_marked_extents(fs_info, dirty_pages); 1242 1237 if ((mark & EXTENT_DIRTY_LOG1) && ··· 1342 1335 * At this point no one can be using this transaction to modify any tree 1343 1336 * and no one can start another transaction to modify any tree either. 1344 1337 */ 1345 - ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING); 1338 + ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING, 1339 + "trans->transaction->state=%d", trans->transaction->state); 1346 1340 1347 1341 eb = btrfs_lock_root_node(fs_info->tree_root); 1348 1342 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, ··· 1477 1469 * At this point no one can be using this transaction to modify any tree 1478 1470 * and no one can start another transaction to modify any tree either. 1479 1471 */ 1480 - ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING); 1472 + ASSERT(trans->transaction->state == TRANS_STATE_COMMIT_DOING, 1473 + "trans->transaction->state=%d", trans->transaction->state); 1481 1474 1482 1475 spin_lock(&fs_info->fs_roots_radix_lock); 1483 1476 while (1) { ··· 1496 1487 * At this point we can neither have tasks logging inodes 1497 1488 * from a root nor trying to commit a log tree. 1498 1489 */ 1499 - ASSERT(atomic_read(&root->log_writers) == 0); 1500 - ASSERT(atomic_read(&root->log_commit[0]) == 0); 1501 - ASSERT(atomic_read(&root->log_commit[1]) == 0); 1490 + ASSERT(atomic_read(&root->log_writers) == 0, 1491 + "atomic_read(&root->log_writers)=%d", 1492 + atomic_read(&root->log_writers)); 1493 + ASSERT(atomic_read(&root->log_commit[0]) == 0, 1494 + "atomic_read(&root->log_commit[0])=%d", 1495 + atomic_read(&root->log_commit[0])); 1496 + ASSERT(atomic_read(&root->log_commit[1]) == 0, 1497 + "atomic_read(&root->log_commit[1])=%d", 1498 + atomic_read(&root->log_commit[1])); 1502 1499 1503 1500 radix_tree_tag_clear(&fs_info->fs_roots_radix, 1504 1501 (unsigned long)btrfs_root_id(root), ··· 2173 2158 return; 2174 2159 2175 2160 lockdep_assert_held(&trans->fs_info->trans_lock); 2176 - ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP); 2161 + ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_PREP, 2162 + "cur_trans->state=%d", cur_trans->state); 2177 2163 2178 2164 list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots); 2179 2165 } ··· 2201 2185 struct btrfs_transaction *prev_trans = NULL; 2202 2186 int ret; 2203 2187 2204 - ASSERT(refcount_read(&trans->use_count) == 1); 2188 + ASSERT(refcount_read(&trans->use_count) == 1, 2189 + "refcount_read(&trans->use_count)=%d", refcount_read(&trans->use_count)); 2205 2190 btrfs_trans_state_lockdep_acquire(fs_info, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); 2206 2191 2207 2192 clear_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags);
+1 -1
fs/btrfs/tree-checker.c
··· 186 186 key->type == BTRFS_INODE_EXTREF_KEY || 187 187 key->type == BTRFS_DIR_INDEX_KEY || 188 188 key->type == BTRFS_DIR_ITEM_KEY || 189 - key->type == BTRFS_EXTENT_DATA_KEY); 189 + key->type == BTRFS_EXTENT_DATA_KEY, "key->type=%u", key->type); 190 190 191 191 /* 192 192 * Only subvolume trees along with their reloc trees need this check.
+31 -15
fs/btrfs/tree-log.c
··· 263 263 struct btrfs_inode *inode; 264 264 265 265 /* Only meant to be called for subvolume roots and not for log roots. */ 266 - ASSERT(btrfs_is_fstree(btrfs_root_id(root))); 266 + ASSERT(btrfs_is_fstree(btrfs_root_id(root)), "root_id=%llu", btrfs_root_id(root)); 267 267 268 268 /* 269 269 * We're holding a transaction handle whether we are logging or ··· 502 502 * the leaf before writing into the log tree. See the comments at 503 503 * copy_items() for more details. 504 504 */ 505 - ASSERT(btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID); 505 + ASSERT(btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID, "root_id=%llu", btrfs_root_id(root)); 506 506 507 507 item_size = btrfs_item_size(wc->log_leaf, wc->log_slot); 508 508 src_ptr = btrfs_item_ptr_offset(wc->log_leaf, wc->log_slot); ··· 2282 2282 struct btrfs_dir_item *di; 2283 2283 2284 2284 /* We only log dir index keys, which only contain a single dir item. */ 2285 - ASSERT(wc->log_key.type == BTRFS_DIR_INDEX_KEY); 2285 + ASSERT(wc->log_key.type == BTRFS_DIR_INDEX_KEY, 2286 + "wc->log_key.type=%u", wc->log_key.type); 2286 2287 2287 2288 di = btrfs_item_ptr(wc->log_leaf, wc->log_slot, struct btrfs_dir_item); 2288 2289 ret = replay_one_name(wc, di); ··· 2435 2434 * we need to do is process the dir index keys, we (and our caller) can 2436 2435 * safely ignore dir item keys (key type BTRFS_DIR_ITEM_KEY). 2437 2436 */ 2438 - ASSERT(dir_key->type == BTRFS_DIR_INDEX_KEY); 2437 + ASSERT(dir_key->type == BTRFS_DIR_INDEX_KEY, "dir_key->type=%u", dir_key->type); 2439 2438 2440 2439 eb = wc->subvol_path->nodes[0]; 2441 2440 slot = wc->subvol_path->slots[0]; ··· 3340 3339 mutex_unlock(&root->log_mutex); 3341 3340 return ctx->log_ret; 3342 3341 } 3343 - ASSERT(log_transid == root->log_transid); 3342 + ASSERT(log_transid == root->log_transid, 3343 + "log_transid=%d root->log_transid=%d", log_transid, root->log_transid); 3344 3344 atomic_set(&root->log_commit[index1], 1); 3345 3345 3346 3346 /* wait for previous tree log sync to complete */ ··· 3481 3479 ret = root_log_ctx.log_ret; 3482 3480 goto out; 3483 3481 } 3484 - ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid); 3482 + ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid, 3483 + "root_log_ctx.log_transid=%d log_root_tree->log_transid=%d", 3484 + root_log_ctx.log_transid, log_root_tree->log_transid); 3485 3485 atomic_set(&log_root_tree->log_commit[index2], 1); 3486 3486 3487 3487 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { ··· 3587 3583 * someone else already started it. We use <= and not < because the 3588 3584 * first log transaction has an ID of 0. 3589 3585 */ 3590 - ASSERT(btrfs_get_root_last_log_commit(root) <= log_transid); 3586 + ASSERT(btrfs_get_root_last_log_commit(root) <= log_transid, 3587 + "last_log_commit(root)=%d log_transid=%d", 3588 + btrfs_get_root_last_log_commit(root), log_transid); 3591 3589 btrfs_set_root_last_log_commit(root, log_transid); 3592 3590 3593 3591 out_wake_log_root: ··· 4033 4027 int ret; 4034 4028 int i; 4035 4029 4036 - ASSERT(count > 0); 4030 + ASSERT(count > 0, "count=%d", count); 4037 4031 batch.nr = count; 4038 4032 4039 4033 if (count == 1) { ··· 4086 4080 btrfs_release_path(dst_path); 4087 4081 4088 4082 last_index = batch.keys[count - 1].offset; 4089 - ASSERT(last_index > inode->last_dir_index_offset); 4083 + ASSERT(last_index > inode->last_dir_index_offset, 4084 + "last_index=%llu inode->last_dir_index_offset=%llu", 4085 + last_index, inode->last_dir_index_offset); 4090 4086 4091 4087 /* 4092 4088 * If for some unexpected reason the last item's index is not greater ··· 4412 4404 * change in the current transaction), then we don't need to log 4413 4405 * a range, last_old_dentry_offset is == to last_offset. 4414 4406 */ 4415 - ASSERT(last_old_dentry_offset <= last_offset); 4407 + ASSERT(last_old_dentry_offset <= last_offset, 4408 + "last_old_dentry_offset=%llu last_offset=%llu", 4409 + last_old_dentry_offset, last_offset); 4416 4410 if (last_old_dentry_offset < last_offset) 4417 4411 ret = insert_dir_log_key(trans, log, path, ino, 4418 4412 last_old_dentry_offset + 1, ··· 6538 6528 curr = list_next_entry(curr, log_list); 6539 6529 } 6540 6530 6541 - ASSERT(batch.nr >= 1); 6531 + ASSERT(batch.nr >= 1, "batch.nr=%d", batch.nr); 6542 6532 ret = insert_delayed_items_batch(trans, log, path, &batch, first); 6543 6533 6544 6534 curr = list_last_entry(delayed_ins_list, struct btrfs_delayed_item, ··· 6582 6572 } 6583 6573 6584 6574 last_dir_index = curr->index; 6585 - ASSERT(last_dir_index >= first_dir_index); 6575 + ASSERT(last_dir_index >= first_dir_index, 6576 + "last_dir_index=%llu first_dir_index=%llu", 6577 + last_dir_index, first_dir_index); 6586 6578 6587 6579 ret = insert_dir_log_key(trans, inode->root->log_root, path, 6588 6580 ino, first_dir_index, last_dir_index); ··· 6678 6666 goto next_batch; 6679 6667 6680 6668 last_dir_index = last->index; 6681 - ASSERT(last_dir_index >= first_dir_index); 6669 + ASSERT(last_dir_index >= first_dir_index, 6670 + "last_dir_index=%llu first_dir_index=%llu", 6671 + last_dir_index, first_dir_index); 6682 6672 /* 6683 6673 * If this range starts right after where the previous one ends, 6684 6674 * then we want to reuse the previous range item and change its ··· 6747 6733 */ 6748 6734 lockdep_assert_not_held(&inode->log_mutex); 6749 6735 6750 - ASSERT(!ctx->logging_new_delayed_dentries); 6736 + ASSERT(!ctx->logging_new_delayed_dentries, 6737 + "ctx->logging_new_delayed_dentries=%d", ctx->logging_new_delayed_dentries); 6751 6738 ctx->logging_new_delayed_dentries = true; 6752 6739 6753 6740 list_for_each_entry(item, delayed_ins_list, log_list) { ··· 7965 7950 struct btrfs_path *path; 7966 7951 struct fscrypt_name fname; 7967 7952 7968 - ASSERT(old_dir_index >= BTRFS_DIR_START_INDEX); 7953 + ASSERT(old_dir_index >= BTRFS_DIR_START_INDEX, 7954 + "old_dir_index=%llu", old_dir_index); 7969 7955 7970 7956 ret = fscrypt_setup_filename(&old_dir->vfs_inode, 7971 7957 &old_dentry->d_name, 0, &fname);
+23 -14
fs/btrfs/zoned.c
··· 93 93 sector_t sector; 94 94 95 95 for (int i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { 96 - ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL); 96 + ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL, 97 + "zones[%d].type=%d", i, zones[i].type); 97 98 empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY); 98 99 full[i] = sb_zone_is_full(&zones[i]); 99 100 } ··· 167 166 { 168 167 u64 zone = U64_MAX; 169 168 170 - ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX); 169 + ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX, "mirror=%d", mirror); 171 170 switch (mirror) { 172 171 case 0: zone = 0; break; 173 172 case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break; 174 173 case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break; 175 174 } 176 175 177 - ASSERT(zone <= U32_MAX); 176 + ASSERT(zone <= U32_MAX, "zone=%llu", zone); 178 177 179 178 return (u32)zone; 180 179 } ··· 241 240 unsigned int i; 242 241 u32 zno; 243 242 244 - ASSERT(IS_ALIGNED(pos, zinfo->zone_size)); 243 + ASSERT(IS_ALIGNED(pos, zinfo->zone_size), 244 + "pos=%llu zinfo->zone_size=%llu", pos, zinfo->zone_size); 245 245 zno = pos >> zinfo->zone_size_shift; 246 246 /* 247 247 * We cannot report zones beyond the zone end. So, it is OK to ··· 1057 1055 bool have_sb; 1058 1056 int i; 1059 1057 1060 - ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size)); 1061 - ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size)); 1058 + ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size), 1059 + "hole_start=%llu zinfo->zone_size=%llu", hole_start, zinfo->zone_size); 1060 + ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size), 1061 + "num_bytes=%llu zinfo->zone_size=%llu", num_bytes, zinfo->zone_size); 1062 1062 1063 1063 while (pos < hole_end) { 1064 1064 begin = pos >> shift; ··· 1176 1172 u64 pos; 1177 1173 int ret; 1178 1174 1179 - ASSERT(IS_ALIGNED(start, zinfo->zone_size)); 1180 - ASSERT(IS_ALIGNED(size, zinfo->zone_size)); 1175 + ASSERT(IS_ALIGNED(start, zinfo->zone_size), 1176 + "start=%llu, zinfo->zone_size=%llu", start, zinfo->zone_size); 1177 + ASSERT(IS_ALIGNED(size, zinfo->zone_size), 1178 + "size=%llu, zinfo->zone_size=%llu", size, zinfo->zone_size); 1181 1179 1182 1180 if (begin + nbits > zinfo->nr_zones) 1183 1181 return -ERANGE; ··· 1872 1866 em = btrfs_search_extent_mapping(em_tree, ordered->file_offset, 1873 1867 ordered->num_bytes); 1874 1868 /* The em should be a new COW extent, thus it should not have an offset. */ 1875 - ASSERT(em->offset == 0); 1869 + ASSERT(em->offset == 0, "em->offset=%llu", em->offset); 1876 1870 em->disk_bytenr = logical; 1877 1871 btrfs_free_extent_map(em); 1878 1872 write_unlock(&em_tree->lock); ··· 2583 2577 struct btrfs_space_info *reloc_sinfo = data_sinfo->sub_group[0]; 2584 2578 int factor; 2585 2579 2586 - ASSERT(reloc_sinfo->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC); 2580 + ASSERT(reloc_sinfo->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC, 2581 + "reloc_sinfo->subgroup_id=%d", reloc_sinfo->subgroup_id); 2587 2582 factor = btrfs_bg_type_to_factor(bg->flags); 2588 2583 2589 2584 down_write(&space_info->groups_sem); ··· 2598 2591 space_info->disk_total -= bg->length * factor; 2599 2592 space_info->disk_total -= bg->zone_unusable; 2600 2593 /* There is no allocation ever happened. */ 2601 - ASSERT(bg->used == 0); 2594 + ASSERT(bg->used == 0, "bg->used=%llu", bg->used); 2602 2595 /* No super block in a block group on the zoned setup. */ 2603 - ASSERT(bg->bytes_super == 0); 2596 + ASSERT(bg->bytes_super == 0, "bg->bytes_super=%llu", bg->bytes_super); 2604 2597 spin_unlock(&space_info->lock); 2605 2598 2606 2599 bg->space_info = reloc_sinfo; ··· 2626 2619 2627 2620 /* Allocate new BG in the data relocation space_info. */ 2628 2621 space_info = data_sinfo->sub_group[0]; 2629 - ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC); 2622 + ASSERT(space_info->subgroup_id == BTRFS_SUB_GROUP_DATA_RELOC, 2623 + "space_info->subgroup_id=%d", space_info->subgroup_id); 2630 2624 ret = btrfs_chunk_alloc(trans, space_info, alloc_flags, CHUNK_ALLOC_FORCE); 2631 2625 btrfs_end_transaction(trans); 2632 2626 if (ret == 1) { ··· 2968 2960 * This holds because we currently reset fully used then freed 2969 2961 * block group. 2970 2962 */ 2971 - ASSERT(reclaimed == bg->zone_capacity); 2963 + ASSERT(reclaimed == bg->zone_capacity, 2964 + "reclaimed=%llu bg->zone_capacity=%llu", reclaimed, bg->zone_capacity); 2972 2965 bg->free_space_ctl->free_space += reclaimed; 2973 2966 space_info->bytes_zone_unusable -= reclaimed; 2974 2967 spin_unlock(&bg->lock);