Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm vdo permassert: audit all of ASSERT to test for VDO_SUCCESS

Also rename ASSERT to VDO_ASSERT and ASSERT_LOG_ONLY to
VDO_ASSERT_LOG_ONLY.

But re-introduce ASSERT and ASSERT_LOG_ONLY as a placeholder
for the benefit of dm-vdo/indexer.

Signed-off-by: Mike Snitzer <snitzer@kernel.org>
Signed-off-by: Matthew Sakai <msakai@redhat.com>

+563 -560
+4 -4
drivers/md/dm-vdo/action-manager.c
··· 177 177 zone_count_t zone; 178 178 struct action_manager *manager = as_action_manager(completion); 179 179 180 - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == get_acting_zone_thread_id(manager)), 181 - "%s() called on acting zones's thread", __func__); 180 + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == get_acting_zone_thread_id(manager)), 181 + "%s() called on acting zones's thread", __func__); 182 182 183 183 zone = manager->acting_zone++; 184 184 if (manager->acting_zone == manager->zones) { ··· 357 357 { 358 358 struct action *current_action; 359 359 360 - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == manager->initiator_thread_id), 361 - "action initiated from correct thread"); 360 + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == manager->initiator_thread_id), 361 + "action initiated from correct thread"); 362 362 if (!manager->current_action->in_use) { 363 363 current_action = manager->current_action; 364 364 } else if (!manager->current_action->next->in_use) {
+59 -59
drivers/md/dm-vdo/block-map.c
··· 246 246 { 247 247 thread_id_t thread_id = vdo_get_callback_thread_id(); 248 248 249 - ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id), 250 - "%s() must only be called on cache thread %d, not thread %d", 251 - function_name, cache->zone->thread_id, thread_id); 249 + VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id), 250 + "%s() must only be called on cache thread %d, not thread %d", 251 + function_name, cache->zone->thread_id, thread_id); 252 252 } 253 253 254 254 /** assert_io_allowed() - Assert that a page cache may issue I/O. */ 255 255 static inline void assert_io_allowed(struct vdo_page_cache *cache) 256 256 { 257 - ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state), 258 - "VDO page cache may issue I/O"); 257 + VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state), 258 + "VDO page cache may issue I/O"); 259 259 } 260 260 261 261 /** report_cache_pressure() - Log and, if enabled, report cache pressure. */ ··· 287 287 288 288 BUILD_BUG_ON(ARRAY_SIZE(state_names) != PAGE_STATE_COUNT); 289 289 290 - result = ASSERT(state < ARRAY_SIZE(state_names), 291 - "Unknown page_state value %d", state); 292 - if (result != UDS_SUCCESS) 290 + result = VDO_ASSERT(state < ARRAY_SIZE(state_names), 291 + "Unknown page_state value %d", state); 292 + if (result != VDO_SUCCESS) 293 293 return "[UNKNOWN PAGE STATE]"; 294 294 295 295 return state_names[state]; ··· 378 378 struct vdo_page_cache *cache = info->cache; 379 379 380 380 /* Either the new or the old page number must be NO_PAGE. */ 381 - int result = ASSERT((pbn == NO_PAGE) || (info->pbn == NO_PAGE), 382 - "Must free a page before reusing it."); 381 + int result = VDO_ASSERT((pbn == NO_PAGE) || (info->pbn == NO_PAGE), 382 + "Must free a page before reusing it."); 383 383 if (result != VDO_SUCCESS) 384 384 return result; 385 385 ··· 401 401 { 402 402 int result; 403 403 404 - result = ASSERT(info->busy == 0, "VDO Page must not be busy"); 405 - if (result != UDS_SUCCESS) 404 + result = VDO_ASSERT(info->busy == 0, "VDO Page must not be busy"); 405 + if (result != VDO_SUCCESS) 406 406 return result; 407 407 408 - result = ASSERT(!vdo_waitq_has_waiters(&info->waiting), 409 - "VDO Page must not have waiters"); 410 - if (result != UDS_SUCCESS) 408 + result = VDO_ASSERT(!vdo_waitq_has_waiters(&info->waiting), 409 + "VDO Page must not have waiters"); 410 + if (result != VDO_SUCCESS) 411 411 return result; 412 412 413 413 result = set_info_pbn(info, NO_PAGE); ··· 592 592 { 593 593 int result; 594 594 595 - result = ASSERT(completion->ready, "VDO Page completion not ready"); 596 - if (result != UDS_SUCCESS) 595 + result = VDO_ASSERT(completion->ready, "VDO Page completion not ready"); 596 + if (result != VDO_SUCCESS) 597 597 return result; 598 598 599 - result = ASSERT(completion->info != NULL, 600 - "VDO Page Completion must be complete"); 601 - if (result != UDS_SUCCESS) 599 + result = VDO_ASSERT(completion->info != NULL, 600 + "VDO Page Completion must be complete"); 601 + if (result != VDO_SUCCESS) 602 602 return result; 603 603 604 - result = ASSERT(completion->info->pbn == completion->pbn, 605 - "VDO Page Completion pbn must be consistent"); 606 - if (result != UDS_SUCCESS) 604 + result = VDO_ASSERT(completion->info->pbn == completion->pbn, 605 + "VDO Page Completion pbn must be consistent"); 606 + if (result != VDO_SUCCESS) 607 607 return result; 608 608 609 - result = ASSERT(is_valid(completion->info), 610 - "VDO Page Completion page must be valid"); 611 - if (result != UDS_SUCCESS) 609 + result = VDO_ASSERT(is_valid(completion->info), 610 + "VDO Page Completion page must be valid"); 611 + if (result != VDO_SUCCESS) 612 612 return result; 613 613 614 614 if (writable) { 615 - result = ASSERT(completion->writable, 616 - "VDO Page Completion must be writable"); 617 - if (result != UDS_SUCCESS) 615 + result = VDO_ASSERT(completion->writable, 616 + "VDO Page Completion must be writable"); 617 + if (result != VDO_SUCCESS) 618 618 return result; 619 619 } 620 620 ··· 776 776 if (result != VDO_SUCCESS) 777 777 return result; 778 778 779 - result = ASSERT((info->busy == 0), "Page is not busy before loading."); 779 + result = VDO_ASSERT((info->busy == 0), "Page is not busy before loading."); 780 780 if (result != VDO_SUCCESS) 781 781 return result; 782 782 ··· 949 949 return; 950 950 } 951 951 952 - ASSERT_LOG_ONLY(!is_in_flight(info), 953 - "page selected for discard is not in flight"); 952 + VDO_ASSERT_LOG_ONLY(!is_in_flight(info), 953 + "page selected for discard is not in flight"); 954 954 955 955 cache->discard_count++; 956 956 info->write_status = WRITE_STATUS_DISCARD; ··· 1153 1153 discard_info = page_completion->info; 1154 1154 } 1155 1155 1156 - ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL), 1157 - "Page being released after leaving all queues"); 1156 + VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL), 1157 + "Page being released after leaving all queues"); 1158 1158 1159 1159 page_completion->info = NULL; 1160 1160 cache = page_completion->cache; ··· 1217 1217 struct page_info *info; 1218 1218 1219 1219 assert_on_cache_thread(cache, __func__); 1220 - ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL), 1221 - "New page completion was not already on a wait queue"); 1220 + VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL), 1221 + "New page completion was not already on a wait queue"); 1222 1222 1223 1223 *page_completion = (struct vdo_page_completion) { 1224 1224 .pbn = pbn, ··· 1265 1265 } 1266 1266 1267 1267 /* Something horrible has gone wrong. */ 1268 - ASSERT_LOG_ONLY(false, "Info found in a usable state."); 1268 + VDO_ASSERT_LOG_ONLY(false, "Info found in a usable state."); 1269 1269 } 1270 1270 1271 1271 /* The page must be fetched. */ ··· 1334 1334 1335 1335 /* Make sure we don't throw away any dirty pages. */ 1336 1336 for (info = cache->infos; info < cache->infos + cache->page_count; info++) { 1337 - int result = ASSERT(!is_dirty(info), "cache must have no dirty pages"); 1337 + int result = VDO_ASSERT(!is_dirty(info), "cache must have no dirty pages"); 1338 1338 1339 1339 if (result != VDO_SUCCESS) 1340 1340 return result; ··· 1440 1440 { 1441 1441 int result; 1442 1442 1443 - result = ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) && 1444 - in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)), 1445 - "generation(s) %u, %u are out of range [%u, %u]", 1446 - a, b, zone->oldest_generation, zone->generation); 1443 + result = VDO_ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) && 1444 + in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)), 1445 + "generation(s) %u, %u are out of range [%u, %u]", 1446 + a, b, zone->oldest_generation, zone->generation); 1447 1447 if (result != VDO_SUCCESS) { 1448 1448 enter_zone_read_only_mode(zone, result); 1449 1449 return true; ··· 1456 1456 { 1457 1457 int result; 1458 1458 1459 - result = ASSERT((zone->dirty_page_counts[generation] > 0), 1460 - "dirty page count underflow for generation %u", generation); 1459 + result = VDO_ASSERT((zone->dirty_page_counts[generation] > 0), 1460 + "dirty page count underflow for generation %u", generation); 1461 1461 if (result != VDO_SUCCESS) { 1462 1462 enter_zone_read_only_mode(zone, result); 1463 1463 return; ··· 1482 1482 1483 1483 page->generation = new_generation; 1484 1484 new_count = ++zone->dirty_page_counts[new_generation]; 1485 - result = ASSERT((new_count != 0), "dirty page count overflow for generation %u", 1486 - new_generation); 1485 + result = VDO_ASSERT((new_count != 0), "dirty page count overflow for generation %u", 1486 + new_generation); 1487 1487 if (result != VDO_SUCCESS) { 1488 1488 enter_zone_read_only_mode(zone, result); 1489 1489 return; ··· 1698 1698 struct tree_lock *lock_holder; 1699 1699 struct tree_lock *lock = &data_vio->tree_lock; 1700 1700 1701 - ASSERT_LOG_ONLY(lock->locked, 1702 - "release of unlocked block map page %s for key %llu in tree %u", 1703 - what, (unsigned long long) lock->key, lock->root_index); 1701 + VDO_ASSERT_LOG_ONLY(lock->locked, 1702 + "release of unlocked block map page %s for key %llu in tree %u", 1703 + what, (unsigned long long) lock->key, lock->root_index); 1704 1704 1705 1705 zone = data_vio->logical.zone->block_map_zone; 1706 1706 lock_holder = vdo_int_map_remove(zone->loading_pages, lock->key); 1707 - ASSERT_LOG_ONLY((lock_holder == lock), 1708 - "block map page %s mismatch for key %llu in tree %u", 1709 - what, (unsigned long long) lock->key, lock->root_index); 1707 + VDO_ASSERT_LOG_ONLY((lock_holder == lock), 1708 + "block map page %s mismatch for key %llu in tree %u", 1709 + what, (unsigned long long) lock->key, lock->root_index); 1710 1710 lock->locked = false; 1711 1711 } 1712 1712 ··· 2008 2008 2009 2009 list_del_init(&page->entry); 2010 2010 2011 - result = ASSERT(!vdo_waiter_is_waiting(&page->waiter), 2012 - "Newly expired page not already waiting to write"); 2011 + result = VDO_ASSERT(!vdo_waiter_is_waiting(&page->waiter), 2012 + "Newly expired page not already waiting to write"); 2013 2013 if (result != VDO_SUCCESS) { 2014 2014 enter_zone_read_only_mode(zone, result); 2015 2015 continue; ··· 2867 2867 BUILD_BUG_ON(VDO_BLOCK_MAP_ENTRIES_PER_PAGE != 2868 2868 ((VDO_BLOCK_SIZE - sizeof(struct block_map_page)) / 2869 2869 sizeof(struct block_map_entry))); 2870 - result = ASSERT(cache_size > 0, "block map cache size is specified"); 2871 - if (result != UDS_SUCCESS) 2870 + result = VDO_ASSERT(cache_size > 0, "block map cache size is specified"); 2871 + if (result != VDO_SUCCESS) 2872 2872 return result; 2873 2873 2874 2874 result = vdo_allocate_extended(struct block_map, ··· 2937 2937 for (z = 0; z < map->zone_count; z++) { 2938 2938 struct dirty_lists *dirty_lists = map->zones[z].dirty_lists; 2939 2939 2940 - ASSERT_LOG_ONLY(dirty_lists->next_period == 0, "current period not set"); 2940 + VDO_ASSERT_LOG_ONLY(dirty_lists->next_period == 0, "current period not set"); 2941 2941 dirty_lists->oldest_period = map->current_era_point; 2942 2942 dirty_lists->next_period = map->current_era_point + 1; 2943 2943 dirty_lists->offset = map->current_era_point % dirty_lists->maximum_age; ··· 2971 2971 { 2972 2972 struct block_map_zone *zone = container_of(state, struct block_map_zone, state); 2973 2973 2974 - ASSERT_LOG_ONLY((zone->active_lookups == 0), 2975 - "%s() called with no active lookups", __func__); 2974 + VDO_ASSERT_LOG_ONLY((zone->active_lookups == 0), 2975 + "%s() called with no active lookups", __func__); 2976 2976 2977 2977 if (!vdo_is_state_suspending(state)) { 2978 2978 while (zone->dirty_lists->oldest_period < zone->dirty_lists->next_period)
+5 -5
drivers/md/dm-vdo/completion.c
··· 60 60 61 61 static inline void assert_incomplete(struct vdo_completion *completion) 62 62 { 63 - ASSERT_LOG_ONLY(!completion->complete, "completion is not complete"); 63 + VDO_ASSERT_LOG_ONLY(!completion->complete, "completion is not complete"); 64 64 } 65 65 66 66 /** ··· 111 111 struct vdo *vdo = completion->vdo; 112 112 thread_id_t thread_id = completion->callback_thread_id; 113 113 114 - if (ASSERT(thread_id < vdo->thread_config.thread_count, 115 - "thread_id %u (completion type %d) is less than thread count %u", 116 - thread_id, completion->type, 117 - vdo->thread_config.thread_count) != UDS_SUCCESS) 114 + if (VDO_ASSERT(thread_id < vdo->thread_config.thread_count, 115 + "thread_id %u (completion type %d) is less than thread count %u", 116 + thread_id, completion->type, 117 + vdo->thread_config.thread_count) != VDO_SUCCESS) 118 118 BUG(); 119 119 120 120 completion->requeue = false;
+3 -3
drivers/md/dm-vdo/completion.h
··· 85 85 static inline int vdo_assert_completion_type(struct vdo_completion *completion, 86 86 enum vdo_completion_type expected) 87 87 { 88 - return ASSERT(expected == completion->type, 89 - "completion type should be %u, not %u", expected, 90 - completion->type); 88 + return VDO_ASSERT(expected == completion->type, 89 + "completion type should be %u, not %u", expected, 90 + completion->type); 91 91 } 92 92 93 93 static inline void vdo_set_completion_callback(struct vdo_completion *completion,
+54 -54
drivers/md/dm-vdo/data-vio.c
··· 232 232 if (pool->limiter.busy > 0) 233 233 return false; 234 234 235 - ASSERT_LOG_ONLY((pool->discard_limiter.busy == 0), 236 - "no outstanding discard permits"); 235 + VDO_ASSERT_LOG_ONLY((pool->discard_limiter.busy == 0), 236 + "no outstanding discard permits"); 237 237 238 238 return (bio_list_empty(&pool->limiter.new_waiters) && 239 239 bio_list_empty(&pool->discard_limiter.new_waiters)); ··· 277 277 if (bio == NULL) 278 278 return; 279 279 280 - ASSERT_LOG_ONLY((data_vio->remaining_discard <= 281 - (u32) (VDO_BLOCK_SIZE - data_vio->offset)), 282 - "data_vio to acknowledge is not an incomplete discard"); 280 + VDO_ASSERT_LOG_ONLY((data_vio->remaining_discard <= 281 + (u32) (VDO_BLOCK_SIZE - data_vio->offset)), 282 + "data_vio to acknowledge is not an incomplete discard"); 283 283 284 284 data_vio->user_bio = NULL; 285 285 vdo_count_bios(&vdo->stats.bios_acknowledged, bio); ··· 443 443 return; 444 444 } 445 445 446 - result = ASSERT(lock_holder->logical.locked, "logical block lock held"); 446 + result = VDO_ASSERT(lock_holder->logical.locked, "logical block lock held"); 447 447 if (result != VDO_SUCCESS) { 448 448 continue_data_vio_with_error(data_vio, result); 449 449 return; ··· 627 627 struct bio_list *waiters = &limiter->waiters; 628 628 data_vio_count_t available = limiter->limit - limiter->busy; 629 629 630 - ASSERT_LOG_ONLY((limiter->release_count <= limiter->busy), 631 - "Release count %u is not more than busy count %u", 632 - limiter->release_count, limiter->busy); 630 + VDO_ASSERT_LOG_ONLY((limiter->release_count <= limiter->busy), 631 + "Release count %u is not more than busy count %u", 632 + limiter->release_count, limiter->busy); 633 633 634 634 get_waiters(limiter); 635 635 for (; (limiter->release_count > 0) && !bio_list_empty(waiters); limiter->release_count--) ··· 850 850 if (result != VDO_SUCCESS) 851 851 return result; 852 852 853 - ASSERT_LOG_ONLY((discard_limit <= pool_size), 854 - "discard limit does not exceed pool size"); 853 + VDO_ASSERT_LOG_ONLY((discard_limit <= pool_size), 854 + "discard limit does not exceed pool size"); 855 855 initialize_limiter(&pool->discard_limiter, pool, assign_discard_permit, 856 856 discard_limit); 857 857 pool->discard_limiter.permitted_waiters = &pool->permitted_discards; ··· 908 908 BUG_ON(atomic_read(&pool->processing)); 909 909 910 910 spin_lock(&pool->lock); 911 - ASSERT_LOG_ONLY((pool->limiter.busy == 0), 912 - "data_vio pool must not have %u busy entries when being freed", 913 - pool->limiter.busy); 914 - ASSERT_LOG_ONLY((bio_list_empty(&pool->limiter.waiters) && 915 - bio_list_empty(&pool->limiter.new_waiters)), 916 - "data_vio pool must not have threads waiting to read or write when being freed"); 917 - ASSERT_LOG_ONLY((bio_list_empty(&pool->discard_limiter.waiters) && 918 - bio_list_empty(&pool->discard_limiter.new_waiters)), 919 - "data_vio pool must not have threads waiting to discard when being freed"); 911 + VDO_ASSERT_LOG_ONLY((pool->limiter.busy == 0), 912 + "data_vio pool must not have %u busy entries when being freed", 913 + pool->limiter.busy); 914 + VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->limiter.waiters) && 915 + bio_list_empty(&pool->limiter.new_waiters)), 916 + "data_vio pool must not have threads waiting to read or write when being freed"); 917 + VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->discard_limiter.waiters) && 918 + bio_list_empty(&pool->discard_limiter.new_waiters)), 919 + "data_vio pool must not have threads waiting to discard when being freed"); 920 920 spin_unlock(&pool->lock); 921 921 922 922 list_for_each_entry_safe(data_vio, tmp, &pool->available, pool_entry) { ··· 961 961 { 962 962 struct data_vio *data_vio; 963 963 964 - ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&pool->state), 965 - "data_vio_pool not quiescent on acquire"); 964 + VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&pool->state), 965 + "data_vio_pool not quiescent on acquire"); 966 966 967 967 bio->bi_private = (void *) jiffies; 968 968 spin_lock(&pool->lock); ··· 998 998 999 999 static void assert_on_vdo_cpu_thread(const struct vdo *vdo, const char *name) 1000 1000 { 1001 - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.cpu_thread), 1002 - "%s called on cpu thread", name); 1001 + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.cpu_thread), 1002 + "%s called on cpu thread", name); 1003 1003 } 1004 1004 1005 1005 /** ··· 1173 1173 /* The lock is not locked, so it had better not be registered in the lock map. */ 1174 1174 struct data_vio *lock_holder = vdo_int_map_get(lock_map, lock->lbn); 1175 1175 1176 - ASSERT_LOG_ONLY((data_vio != lock_holder), 1177 - "no logical block lock held for block %llu", 1178 - (unsigned long long) lock->lbn); 1176 + VDO_ASSERT_LOG_ONLY((data_vio != lock_holder), 1177 + "no logical block lock held for block %llu", 1178 + (unsigned long long) lock->lbn); 1179 1179 return; 1180 1180 } 1181 1181 1182 1182 /* Release the lock by removing the lock from the map. */ 1183 1183 lock_holder = vdo_int_map_remove(lock_map, lock->lbn); 1184 - ASSERT_LOG_ONLY((data_vio == lock_holder), 1185 - "logical block lock mismatch for block %llu", 1186 - (unsigned long long) lock->lbn); 1184 + VDO_ASSERT_LOG_ONLY((data_vio == lock_holder), 1185 + "logical block lock mismatch for block %llu", 1186 + (unsigned long long) lock->lbn); 1187 1187 lock->locked = false; 1188 1188 } 1189 1189 ··· 1193 1193 struct data_vio *lock_holder, *next_lock_holder; 1194 1194 int result; 1195 1195 1196 - ASSERT_LOG_ONLY(lock->locked, "lbn_lock with waiters is not locked"); 1196 + VDO_ASSERT_LOG_ONLY(lock->locked, "lbn_lock with waiters is not locked"); 1197 1197 1198 1198 /* Another data_vio is waiting for the lock, transfer it in a single lock map operation. */ 1199 1199 next_lock_holder = ··· 1210 1210 return; 1211 1211 } 1212 1212 1213 - ASSERT_LOG_ONLY((lock_holder == data_vio), 1214 - "logical block lock mismatch for block %llu", 1215 - (unsigned long long) lock->lbn); 1213 + VDO_ASSERT_LOG_ONLY((lock_holder == data_vio), 1214 + "logical block lock mismatch for block %llu", 1215 + (unsigned long long) lock->lbn); 1216 1216 lock->locked = false; 1217 1217 1218 1218 /* ··· 1275 1275 { 1276 1276 struct vdo_completion *completion = &data_vio->vio.completion; 1277 1277 1278 - ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL, 1279 - "complete data_vio has no allocation lock"); 1280 - ASSERT_LOG_ONLY(data_vio->hash_lock == NULL, 1281 - "complete data_vio has no hash lock"); 1278 + VDO_ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL, 1279 + "complete data_vio has no allocation lock"); 1280 + VDO_ASSERT_LOG_ONLY(data_vio->hash_lock == NULL, 1281 + "complete data_vio has no hash lock"); 1282 1282 if ((data_vio->remaining_discard <= VDO_BLOCK_SIZE) || 1283 1283 (completion->result != VDO_SUCCESS)) { 1284 1284 struct data_vio_pool *pool = completion->vdo->data_vio_pool; ··· 1404 1404 { 1405 1405 struct allocation *allocation = &data_vio->allocation; 1406 1406 1407 - ASSERT_LOG_ONLY((allocation->pbn == VDO_ZERO_BLOCK), 1408 - "data_vio does not have an allocation"); 1407 + VDO_ASSERT_LOG_ONLY((allocation->pbn == VDO_ZERO_BLOCK), 1408 + "data_vio does not have an allocation"); 1409 1409 allocation->write_lock_type = write_lock_type; 1410 1410 allocation->zone = vdo_get_next_allocation_zone(data_vio->logical.zone); 1411 1411 allocation->first_allocation_zone = allocation->zone->zone_number; ··· 1796 1796 */ 1797 1797 void launch_compress_data_vio(struct data_vio *data_vio) 1798 1798 { 1799 - ASSERT_LOG_ONLY(!data_vio->is_duplicate, "compressing a non-duplicate block"); 1800 - ASSERT_LOG_ONLY(data_vio->hash_lock != NULL, 1801 - "data_vio to compress has a hash_lock"); 1802 - ASSERT_LOG_ONLY(data_vio_has_allocation(data_vio), 1803 - "data_vio to compress has an allocation"); 1799 + VDO_ASSERT_LOG_ONLY(!data_vio->is_duplicate, "compressing a non-duplicate block"); 1800 + VDO_ASSERT_LOG_ONLY(data_vio->hash_lock != NULL, 1801 + "data_vio to compress has a hash_lock"); 1802 + VDO_ASSERT_LOG_ONLY(data_vio_has_allocation(data_vio), 1803 + "data_vio to compress has an allocation"); 1804 1804 1805 1805 /* 1806 1806 * There are 4 reasons why a data_vio which has reached this point will not be eligible for ··· 1841 1841 struct data_vio *data_vio = as_data_vio(completion); 1842 1842 1843 1843 assert_data_vio_on_cpu_thread(data_vio); 1844 - ASSERT_LOG_ONLY(!data_vio->is_zero, "zero blocks should not be hashed"); 1844 + VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "zero blocks should not be hashed"); 1845 1845 1846 1846 murmurhash3_128(data_vio->vio.data, VDO_BLOCK_SIZE, 0x62ea60be, 1847 1847 &data_vio->record_name); ··· 1856 1856 static void prepare_for_dedupe(struct data_vio *data_vio) 1857 1857 { 1858 1858 /* We don't care what thread we are on. */ 1859 - ASSERT_LOG_ONLY(!data_vio->is_zero, "must not prepare to dedupe zero blocks"); 1859 + VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "must not prepare to dedupe zero blocks"); 1860 1860 1861 1861 /* 1862 1862 * Before we can dedupe, we need to know the record name, so the first ··· 1929 1929 struct data_vio *data_vio = as_data_vio(completion); 1930 1930 struct vdo *vdo = completion->vdo; 1931 1931 1932 - ASSERT_LOG_ONLY((!vdo_uses_bio_ack_queue(vdo) || 1933 - (vdo_get_callback_thread_id() == vdo->thread_config.bio_ack_thread)), 1934 - "%s() called on bio ack queue", __func__); 1935 - ASSERT_LOG_ONLY(data_vio_has_flush_generation_lock(data_vio), 1936 - "write VIO to be acknowledged has a flush generation lock"); 1932 + VDO_ASSERT_LOG_ONLY((!vdo_uses_bio_ack_queue(vdo) || 1933 + (vdo_get_callback_thread_id() == vdo->thread_config.bio_ack_thread)), 1934 + "%s() called on bio ack queue", __func__); 1935 + VDO_ASSERT_LOG_ONLY(data_vio_has_flush_generation_lock(data_vio), 1936 + "write VIO to be acknowledged has a flush generation lock"); 1937 1937 acknowledge_data_vio(data_vio); 1938 1938 if (data_vio->new_mapped.pbn == VDO_ZERO_BLOCK) { 1939 1939 /* This is a zero write or discard */ ··· 1998 1998 1999 1999 static int assert_is_discard(struct data_vio *data_vio) 2000 2000 { 2001 - int result = ASSERT(data_vio->is_discard, 2002 - "data_vio with no block map page is a discard"); 2001 + int result = VDO_ASSERT(data_vio->is_discard, 2002 + "data_vio with no block map page is a discard"); 2003 2003 2004 2004 return ((result == VDO_SUCCESS) ? result : VDO_READ_ONLY); 2005 2005 }
+34 -34
drivers/md/dm-vdo/data-vio.h
··· 280 280 281 281 static inline struct data_vio *vio_as_data_vio(struct vio *vio) 282 282 { 283 - ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio"); 283 + VDO_ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio"); 284 284 return container_of(vio, struct data_vio, vio); 285 285 } 286 286 ··· 374 374 * It's odd to use the LBN, but converting the record name to hex is a bit clunky for an 375 375 * inline, and the LBN better than nothing as an identifier. 376 376 */ 377 - ASSERT_LOG_ONLY((expected == thread_id), 378 - "data_vio for logical block %llu on thread %u, should be on hash zone thread %u", 379 - (unsigned long long) data_vio->logical.lbn, thread_id, expected); 377 + VDO_ASSERT_LOG_ONLY((expected == thread_id), 378 + "data_vio for logical block %llu on thread %u, should be on hash zone thread %u", 379 + (unsigned long long) data_vio->logical.lbn, thread_id, expected); 380 380 } 381 381 382 382 static inline void set_data_vio_hash_zone_callback(struct data_vio *data_vio, ··· 402 402 thread_id_t expected = data_vio->logical.zone->thread_id; 403 403 thread_id_t thread_id = vdo_get_callback_thread_id(); 404 404 405 - ASSERT_LOG_ONLY((expected == thread_id), 406 - "data_vio for logical block %llu on thread %u, should be on thread %u", 407 - (unsigned long long) data_vio->logical.lbn, thread_id, expected); 405 + VDO_ASSERT_LOG_ONLY((expected == thread_id), 406 + "data_vio for logical block %llu on thread %u, should be on thread %u", 407 + (unsigned long long) data_vio->logical.lbn, thread_id, expected); 408 408 } 409 409 410 410 static inline void set_data_vio_logical_callback(struct data_vio *data_vio, ··· 430 430 thread_id_t expected = data_vio->allocation.zone->thread_id; 431 431 thread_id_t thread_id = vdo_get_callback_thread_id(); 432 432 433 - ASSERT_LOG_ONLY((expected == thread_id), 434 - "struct data_vio for allocated physical block %llu on thread %u, should be on thread %u", 435 - (unsigned long long) data_vio->allocation.pbn, thread_id, 436 - expected); 433 + VDO_ASSERT_LOG_ONLY((expected == thread_id), 434 + "struct data_vio for allocated physical block %llu on thread %u, should be on thread %u", 435 + (unsigned long long) data_vio->allocation.pbn, thread_id, 436 + expected); 437 437 } 438 438 439 439 static inline void set_data_vio_allocated_zone_callback(struct data_vio *data_vio, ··· 460 460 thread_id_t expected = data_vio->duplicate.zone->thread_id; 461 461 thread_id_t thread_id = vdo_get_callback_thread_id(); 462 462 463 - ASSERT_LOG_ONLY((expected == thread_id), 464 - "data_vio for duplicate physical block %llu on thread %u, should be on thread %u", 465 - (unsigned long long) data_vio->duplicate.pbn, thread_id, 466 - expected); 463 + VDO_ASSERT_LOG_ONLY((expected == thread_id), 464 + "data_vio for duplicate physical block %llu on thread %u, should be on thread %u", 465 + (unsigned long long) data_vio->duplicate.pbn, thread_id, 466 + expected); 467 467 } 468 468 469 469 static inline void set_data_vio_duplicate_zone_callback(struct data_vio *data_vio, ··· 490 490 thread_id_t expected = data_vio->mapped.zone->thread_id; 491 491 thread_id_t thread_id = vdo_get_callback_thread_id(); 492 492 493 - ASSERT_LOG_ONLY((expected == thread_id), 494 - "data_vio for mapped physical block %llu on thread %u, should be on thread %u", 495 - (unsigned long long) data_vio->mapped.pbn, thread_id, expected); 493 + VDO_ASSERT_LOG_ONLY((expected == thread_id), 494 + "data_vio for mapped physical block %llu on thread %u, should be on thread %u", 495 + (unsigned long long) data_vio->mapped.pbn, thread_id, expected); 496 496 } 497 497 498 498 static inline void set_data_vio_mapped_zone_callback(struct data_vio *data_vio, ··· 507 507 thread_id_t expected = data_vio->new_mapped.zone->thread_id; 508 508 thread_id_t thread_id = vdo_get_callback_thread_id(); 509 509 510 - ASSERT_LOG_ONLY((expected == thread_id), 511 - "data_vio for new_mapped physical block %llu on thread %u, should be on thread %u", 512 - (unsigned long long) data_vio->new_mapped.pbn, thread_id, 513 - expected); 510 + VDO_ASSERT_LOG_ONLY((expected == thread_id), 511 + "data_vio for new_mapped physical block %llu on thread %u, should be on thread %u", 512 + (unsigned long long) data_vio->new_mapped.pbn, thread_id, 513 + expected); 514 514 } 515 515 516 516 static inline void set_data_vio_new_mapped_zone_callback(struct data_vio *data_vio, ··· 525 525 thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread; 526 526 thread_id_t thread_id = vdo_get_callback_thread_id(); 527 527 528 - ASSERT_LOG_ONLY((journal_thread == thread_id), 529 - "data_vio for logical block %llu on thread %u, should be on journal thread %u", 530 - (unsigned long long) data_vio->logical.lbn, thread_id, 531 - journal_thread); 528 + VDO_ASSERT_LOG_ONLY((journal_thread == thread_id), 529 + "data_vio for logical block %llu on thread %u, should be on journal thread %u", 530 + (unsigned long long) data_vio->logical.lbn, thread_id, 531 + journal_thread); 532 532 } 533 533 534 534 static inline void set_data_vio_journal_callback(struct data_vio *data_vio, ··· 555 555 thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread; 556 556 thread_id_t thread_id = vdo_get_callback_thread_id(); 557 557 558 - ASSERT_LOG_ONLY((packer_thread == thread_id), 559 - "data_vio for logical block %llu on thread %u, should be on packer thread %u", 560 - (unsigned long long) data_vio->logical.lbn, thread_id, 561 - packer_thread); 558 + VDO_ASSERT_LOG_ONLY((packer_thread == thread_id), 559 + "data_vio for logical block %llu on thread %u, should be on packer thread %u", 560 + (unsigned long long) data_vio->logical.lbn, thread_id, 561 + packer_thread); 562 562 } 563 563 564 564 static inline void set_data_vio_packer_callback(struct data_vio *data_vio, ··· 585 585 thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread; 586 586 thread_id_t thread_id = vdo_get_callback_thread_id(); 587 587 588 - ASSERT_LOG_ONLY((cpu_thread == thread_id), 589 - "data_vio for logical block %llu on thread %u, should be on cpu thread %u", 590 - (unsigned long long) data_vio->logical.lbn, thread_id, 591 - cpu_thread); 588 + VDO_ASSERT_LOG_ONLY((cpu_thread == thread_id), 589 + "data_vio for logical block %llu on thread %u, should be on cpu thread %u", 590 + (unsigned long long) data_vio->logical.lbn, thread_id, 591 + cpu_thread); 592 592 } 593 593 594 594 static inline void set_data_vio_cpu_callback(struct data_vio *data_vio,
+80 -81
drivers/md/dm-vdo/dedupe.c
··· 327 327 328 328 static inline void assert_in_hash_zone(struct hash_zone *zone, const char *name) 329 329 { 330 - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id), 331 - "%s called on hash zone thread", name); 330 + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id), 331 + "%s called on hash zone thread", name); 332 332 } 333 333 334 334 static inline bool change_context_state(struct dedupe_context *context, int old, int new) ··· 404 404 { 405 405 /* Not safe to access the agent field except from the hash zone. */ 406 406 assert_data_vio_in_hash_zone(data_vio); 407 - ASSERT_LOG_ONLY(data_vio == data_vio->hash_lock->agent, 408 - "%s must be for the hash lock agent", where); 407 + VDO_ASSERT_LOG_ONLY(data_vio == data_vio->hash_lock->agent, 408 + "%s must be for the hash lock agent", where); 409 409 } 410 410 411 411 /** ··· 416 416 */ 417 417 static void set_duplicate_lock(struct hash_lock *hash_lock, struct pbn_lock *pbn_lock) 418 418 { 419 - ASSERT_LOG_ONLY((hash_lock->duplicate_lock == NULL), 420 - "hash lock must not already hold a duplicate lock"); 421 - 419 + VDO_ASSERT_LOG_ONLY((hash_lock->duplicate_lock == NULL), 420 + "hash lock must not already hold a duplicate lock"); 422 421 pbn_lock->holder_count += 1; 423 422 hash_lock->duplicate_lock = pbn_lock; 424 423 } ··· 445 446 struct hash_lock *old_lock = data_vio->hash_lock; 446 447 447 448 if (old_lock != NULL) { 448 - ASSERT_LOG_ONLY(data_vio->hash_zone != NULL, 449 - "must have a hash zone when holding a hash lock"); 450 - ASSERT_LOG_ONLY(!list_empty(&data_vio->hash_lock_entry), 451 - "must be on a hash lock ring when holding a hash lock"); 452 - ASSERT_LOG_ONLY(old_lock->reference_count > 0, 453 - "hash lock reference must be counted"); 449 + VDO_ASSERT_LOG_ONLY(data_vio->hash_zone != NULL, 450 + "must have a hash zone when holding a hash lock"); 451 + VDO_ASSERT_LOG_ONLY(!list_empty(&data_vio->hash_lock_entry), 452 + "must be on a hash lock ring when holding a hash lock"); 453 + VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 0, 454 + "hash lock reference must be counted"); 454 455 455 456 if ((old_lock->state != VDO_HASH_LOCK_BYPASSING) && 456 457 (old_lock->state != VDO_HASH_LOCK_UNLOCKING)) { ··· 458 459 * If the reference count goes to zero in a non-terminal state, we're most 459 460 * likely leaking this lock. 460 461 */ 461 - ASSERT_LOG_ONLY(old_lock->reference_count > 1, 462 - "hash locks should only become unreferenced in a terminal state, not state %s", 463 - get_hash_lock_state_name(old_lock->state)); 462 + VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 1, 463 + "hash locks should only become unreferenced in a terminal state, not state %s", 464 + get_hash_lock_state_name(old_lock->state)); 464 465 } 465 466 466 467 list_del_init(&data_vio->hash_lock_entry); ··· 640 641 641 642 assert_hash_lock_agent(agent, __func__); 642 643 643 - ASSERT_LOG_ONLY(lock->duplicate_lock == NULL, 644 - "must have released the duplicate lock for the hash lock"); 644 + VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL, 645 + "must have released the duplicate lock for the hash lock"); 645 646 646 647 if (!lock->verified) { 647 648 /* ··· 695 696 struct hash_lock *lock = agent->hash_lock; 696 697 697 698 assert_data_vio_in_duplicate_zone(agent); 698 - ASSERT_LOG_ONLY(lock->duplicate_lock != NULL, 699 - "must have a duplicate lock to release"); 699 + VDO_ASSERT_LOG_ONLY(lock->duplicate_lock != NULL, 700 + "must have a duplicate lock to release"); 700 701 701 702 vdo_release_physical_zone_pbn_lock(agent->duplicate.zone, agent->duplicate.pbn, 702 703 vdo_forget(lock->duplicate_lock)); ··· 798 799 { 799 800 lock->state = VDO_HASH_LOCK_UPDATING; 800 801 801 - ASSERT_LOG_ONLY(lock->verified, "new advice should have been verified"); 802 - ASSERT_LOG_ONLY(lock->update_advice, "should only update advice if needed"); 802 + VDO_ASSERT_LOG_ONLY(lock->verified, "new advice should have been verified"); 803 + VDO_ASSERT_LOG_ONLY(lock->update_advice, "should only update advice if needed"); 803 804 804 805 agent->last_async_operation = VIO_ASYNC_OP_UPDATE_DEDUPE_INDEX; 805 806 set_data_vio_hash_zone_callback(agent, finish_updating); ··· 821 822 { 822 823 struct data_vio *agent = data_vio; 823 824 824 - ASSERT_LOG_ONLY(lock->agent == NULL, "shouldn't have an agent in DEDUPING"); 825 - ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters), 826 - "shouldn't have any lock waiters in DEDUPING"); 825 + VDO_ASSERT_LOG_ONLY(lock->agent == NULL, "shouldn't have an agent in DEDUPING"); 826 + VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters), 827 + "shouldn't have any lock waiters in DEDUPING"); 827 828 828 829 /* Just release the lock reference if other data_vios are still deduping. */ 829 830 if (lock->reference_count > 1) { ··· 878 879 * Borrow and prepare a lock from the pool so we don't have to do two int_map accesses 879 880 * in the common case of no lock contention. 880 881 */ 881 - result = ASSERT(!list_empty(&zone->lock_pool), 882 - "never need to wait for a free hash lock"); 882 + result = VDO_ASSERT(!list_empty(&zone->lock_pool), 883 + "never need to wait for a free hash lock"); 883 884 if (result != VDO_SUCCESS) 884 885 return result; 885 886 ··· 901 902 902 903 if (replace_lock != NULL) { 903 904 /* On mismatch put the old lock back and return a severe error */ 904 - ASSERT_LOG_ONLY(lock == replace_lock, 905 - "old lock must have been in the lock map"); 905 + VDO_ASSERT_LOG_ONLY(lock == replace_lock, 906 + "old lock must have been in the lock map"); 906 907 /* TODO: Check earlier and bail out? */ 907 - ASSERT_LOG_ONLY(replace_lock->registered, 908 - "old lock must have been marked registered"); 908 + VDO_ASSERT_LOG_ONLY(replace_lock->registered, 909 + "old lock must have been marked registered"); 909 910 replace_lock->registered = false; 910 911 } 911 912 ··· 1017 1018 * deduplicate against it. 1018 1019 */ 1019 1020 if (lock->duplicate_lock == NULL) { 1020 - ASSERT_LOG_ONLY(!vdo_is_state_compressed(agent->new_mapped.state), 1021 - "compression must have shared a lock"); 1022 - ASSERT_LOG_ONLY(agent_is_done, 1023 - "agent must have written the new duplicate"); 1021 + VDO_ASSERT_LOG_ONLY(!vdo_is_state_compressed(agent->new_mapped.state), 1022 + "compression must have shared a lock"); 1023 + VDO_ASSERT_LOG_ONLY(agent_is_done, 1024 + "agent must have written the new duplicate"); 1024 1025 transfer_allocation_lock(agent); 1025 1026 } 1026 1027 1027 - ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(lock->duplicate_lock), 1028 - "duplicate_lock must be a PBN read lock"); 1028 + VDO_ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(lock->duplicate_lock), 1029 + "duplicate_lock must be a PBN read lock"); 1029 1030 1030 1031 /* 1031 1032 * This state is not like any of the other states. There is no designated agent--the agent ··· 1203 1204 agent->scratch_block); 1204 1205 1205 1206 lock->state = VDO_HASH_LOCK_VERIFYING; 1206 - ASSERT_LOG_ONLY(!lock->verified, "hash lock only verifies advice once"); 1207 + VDO_ASSERT_LOG_ONLY(!lock->verified, "hash lock only verifies advice once"); 1207 1208 1208 1209 agent->last_async_operation = VIO_ASYNC_OP_VERIFY_DUPLICATION; 1209 1210 result = vio_reset_bio(vio, buffer, verify_endio, REQ_OP_READ, ··· 1233 1234 assert_hash_lock_agent(agent, __func__); 1234 1235 1235 1236 if (!agent->is_duplicate) { 1236 - ASSERT_LOG_ONLY(lock->duplicate_lock == NULL, 1237 - "must not hold duplicate_lock if not flagged as a duplicate"); 1237 + VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL, 1238 + "must not hold duplicate_lock if not flagged as a duplicate"); 1238 1239 /* 1239 1240 * LOCKING -> WRITING transition: The advice block is being modified or has no 1240 1241 * available references, so try to write or compress the data, remembering to ··· 1246 1247 return; 1247 1248 } 1248 1249 1249 - ASSERT_LOG_ONLY(lock->duplicate_lock != NULL, 1250 - "must hold duplicate_lock if flagged as a duplicate"); 1250 + VDO_ASSERT_LOG_ONLY(lock->duplicate_lock != NULL, 1251 + "must hold duplicate_lock if flagged as a duplicate"); 1251 1252 1252 1253 if (!lock->verified) { 1253 1254 /* ··· 1417 1418 */ 1418 1419 static void start_locking(struct hash_lock *lock, struct data_vio *agent) 1419 1420 { 1420 - ASSERT_LOG_ONLY(lock->duplicate_lock == NULL, 1421 - "must not acquire a duplicate lock when already holding it"); 1421 + VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL, 1422 + "must not acquire a duplicate lock when already holding it"); 1422 1423 1423 1424 lock->state = VDO_HASH_LOCK_LOCKING; 1424 1425 ··· 1724 1725 */ 1725 1726 static void report_bogus_lock_state(struct hash_lock *lock, struct data_vio *data_vio) 1726 1727 { 1727 - ASSERT_LOG_ONLY(false, "hash lock must not be in unimplemented state %s", 1728 - get_hash_lock_state_name(lock->state)); 1728 + VDO_ASSERT_LOG_ONLY(false, "hash lock must not be in unimplemented state %s", 1729 + get_hash_lock_state_name(lock->state)); 1729 1730 continue_data_vio_with_error(data_vio, VDO_LOCK_ERROR); 1730 1731 } 1731 1732 ··· 1747 1748 1748 1749 switch (lock->state) { 1749 1750 case VDO_HASH_LOCK_WRITING: 1750 - ASSERT_LOG_ONLY(data_vio == lock->agent, 1751 - "only the lock agent may continue the lock"); 1751 + VDO_ASSERT_LOG_ONLY(data_vio == lock->agent, 1752 + "only the lock agent may continue the lock"); 1752 1753 finish_writing(lock, data_vio); 1753 1754 break; 1754 1755 ··· 1814 1815 int result; 1815 1816 1816 1817 /* FIXME: BUG_ON() and/or enter read-only mode? */ 1817 - result = ASSERT(data_vio->hash_lock == NULL, 1818 - "must not already hold a hash lock"); 1818 + result = VDO_ASSERT(data_vio->hash_lock == NULL, 1819 + "must not already hold a hash lock"); 1819 1820 if (result != VDO_SUCCESS) 1820 1821 return result; 1821 1822 1822 - result = ASSERT(list_empty(&data_vio->hash_lock_entry), 1823 - "must not already be a member of a hash lock ring"); 1823 + result = VDO_ASSERT(list_empty(&data_vio->hash_lock_entry), 1824 + "must not already be a member of a hash lock ring"); 1824 1825 if (result != VDO_SUCCESS) 1825 1826 return result; 1826 1827 1827 - return ASSERT(data_vio->recovery_sequence_number == 0, 1828 - "must not hold a recovery lock when getting a hash lock"); 1828 + return VDO_ASSERT(data_vio->recovery_sequence_number == 0, 1829 + "must not hold a recovery lock when getting a hash lock"); 1829 1830 } 1830 1831 1831 1832 /** ··· 1932 1933 struct hash_lock *removed; 1933 1934 1934 1935 removed = vdo_int_map_remove(zone->hash_lock_map, lock_key); 1935 - ASSERT_LOG_ONLY(lock == removed, 1936 - "hash lock being released must have been mapped"); 1936 + VDO_ASSERT_LOG_ONLY(lock == removed, 1937 + "hash lock being released must have been mapped"); 1937 1938 } else { 1938 - ASSERT_LOG_ONLY(lock != vdo_int_map_get(zone->hash_lock_map, lock_key), 1939 - "unregistered hash lock must not be in the lock map"); 1939 + VDO_ASSERT_LOG_ONLY(lock != vdo_int_map_get(zone->hash_lock_map, lock_key), 1940 + "unregistered hash lock must not be in the lock map"); 1940 1941 } 1941 1942 1942 - ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters), 1943 - "hash lock returned to zone must have no waiters"); 1944 - ASSERT_LOG_ONLY((lock->duplicate_lock == NULL), 1945 - "hash lock returned to zone must not reference a PBN lock"); 1946 - ASSERT_LOG_ONLY((lock->state == VDO_HASH_LOCK_BYPASSING), 1947 - "returned hash lock must not be in use with state %s", 1948 - get_hash_lock_state_name(lock->state)); 1949 - ASSERT_LOG_ONLY(list_empty(&lock->pool_node), 1950 - "hash lock returned to zone must not be in a pool ring"); 1951 - ASSERT_LOG_ONLY(list_empty(&lock->duplicate_ring), 1952 - "hash lock returned to zone must not reference DataVIOs"); 1943 + VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters), 1944 + "hash lock returned to zone must have no waiters"); 1945 + VDO_ASSERT_LOG_ONLY((lock->duplicate_lock == NULL), 1946 + "hash lock returned to zone must not reference a PBN lock"); 1947 + VDO_ASSERT_LOG_ONLY((lock->state == VDO_HASH_LOCK_BYPASSING), 1948 + "returned hash lock must not be in use with state %s", 1949 + get_hash_lock_state_name(lock->state)); 1950 + VDO_ASSERT_LOG_ONLY(list_empty(&lock->pool_node), 1951 + "hash lock returned to zone must not be in a pool ring"); 1952 + VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_ring), 1953 + "hash lock returned to zone must not reference DataVIOs"); 1953 1954 1954 1955 return_hash_lock_to_pool(zone, lock); 1955 1956 } ··· 1964 1965 struct allocation *allocation = &data_vio->allocation; 1965 1966 struct hash_lock *hash_lock = data_vio->hash_lock; 1966 1967 1967 - ASSERT_LOG_ONLY(data_vio->new_mapped.pbn == allocation->pbn, 1968 - "transferred lock must be for the block written"); 1968 + VDO_ASSERT_LOG_ONLY(data_vio->new_mapped.pbn == allocation->pbn, 1969 + "transferred lock must be for the block written"); 1969 1970 1970 1971 allocation->pbn = VDO_ZERO_BLOCK; 1971 1972 1972 - ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(allocation->lock), 1973 - "must have downgraded the allocation lock before transfer"); 1973 + VDO_ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(allocation->lock), 1974 + "must have downgraded the allocation lock before transfer"); 1974 1975 1975 1976 hash_lock->duplicate = data_vio->new_mapped; 1976 1977 data_vio->duplicate = data_vio->new_mapped; ··· 1996 1997 { 1997 1998 bool claimed; 1998 1999 1999 - ASSERT_LOG_ONLY(vdo_get_duplicate_lock(data_vio) == NULL, 2000 - "a duplicate PBN lock should not exist when writing"); 2001 - ASSERT_LOG_ONLY(vdo_is_state_compressed(data_vio->new_mapped.state), 2002 - "lock transfer must be for a compressed write"); 2000 + VDO_ASSERT_LOG_ONLY(vdo_get_duplicate_lock(data_vio) == NULL, 2001 + "a duplicate PBN lock should not exist when writing"); 2002 + VDO_ASSERT_LOG_ONLY(vdo_is_state_compressed(data_vio->new_mapped.state), 2003 + "lock transfer must be for a compressed write"); 2003 2004 assert_data_vio_in_new_mapped_zone(data_vio); 2004 2005 2005 2006 /* First sharer downgrades the lock. */ ··· 2019 2020 * deduplicating against it before our incRef. 2020 2021 */ 2021 2022 claimed = vdo_claim_pbn_lock_increment(pbn_lock); 2022 - ASSERT_LOG_ONLY(claimed, "impossible to fail to claim an initial increment"); 2023 + VDO_ASSERT_LOG_ONLY(claimed, "impossible to fail to claim an initial increment"); 2023 2024 } 2024 2025 2025 2026 static void dedupe_kobj_release(struct kobject *directory) ··· 2295 2296 */ 2296 2297 if (!change_context_state(context, DEDUPE_CONTEXT_TIMED_OUT, 2297 2298 DEDUPE_CONTEXT_TIMED_OUT_COMPLETE)) { 2298 - ASSERT_LOG_ONLY(false, "uds request was timed out (state %d)", 2299 - atomic_read(&context->state)); 2299 + VDO_ASSERT_LOG_ONLY(false, "uds request was timed out (state %d)", 2300 + atomic_read(&context->state)); 2300 2301 } 2301 2302 2302 2303 uds_funnel_queue_put(context->zone->timed_out_complete, &context->queue_entry); ··· 2340 2341 2341 2342 if (recycled > 0) 2342 2343 WRITE_ONCE(zone->active, zone->active - recycled); 2343 - ASSERT_LOG_ONLY(READ_ONCE(zone->active) == 0, "all contexts inactive"); 2344 + VDO_ASSERT_LOG_ONLY(READ_ONCE(zone->active) == 0, "all contexts inactive"); 2344 2345 vdo_finish_draining(&zone->state); 2345 2346 } 2346 2347
+19 -19
drivers/md/dm-vdo/dm-vdo-target.c
··· 904 904 struct vdo_work_queue *current_work_queue; 905 905 const struct admin_state_code *code = vdo_get_admin_state_code(&vdo->admin.state); 906 906 907 - ASSERT_LOG_ONLY(code->normal, "vdo should not receive bios while in state %s", 908 - code->name); 907 + VDO_ASSERT_LOG_ONLY(code->normal, "vdo should not receive bios while in state %s", 908 + code->name); 909 909 910 910 /* Count all incoming bios. */ 911 911 vdo_count_bios(&vdo->stats.bios_in, bio); ··· 1244 1244 /* Assert that we are operating on the correct thread for the current phase. */ 1245 1245 static void assert_admin_phase_thread(struct vdo *vdo, const char *what) 1246 1246 { 1247 - ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo), 1248 - "%s on correct thread for %s", what, 1249 - ADMIN_PHASE_NAMES[vdo->admin.phase]); 1247 + VDO_ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo), 1248 + "%s on correct thread for %s", what, 1249 + ADMIN_PHASE_NAMES[vdo->admin.phase]); 1250 1250 } 1251 1251 1252 1252 /** ··· 1424 1424 { 1425 1425 mutex_lock(&instances_lock); 1426 1426 if (instance >= instances.bit_count) { 1427 - ASSERT_LOG_ONLY(false, 1428 - "instance number %u must be less than bit count %u", 1429 - instance, instances.bit_count); 1427 + VDO_ASSERT_LOG_ONLY(false, 1428 + "instance number %u must be less than bit count %u", 1429 + instance, instances.bit_count); 1430 1430 } else if (test_bit(instance, instances.words) == 0) { 1431 - ASSERT_LOG_ONLY(false, "instance number %u must be allocated", instance); 1431 + VDO_ASSERT_LOG_ONLY(false, "instance number %u must be allocated", instance); 1432 1432 } else { 1433 1433 __clear_bit(instance, instances.words); 1434 1434 instances.count -= 1; ··· 1577 1577 if (instance >= instances.bit_count) { 1578 1578 /* Nothing free after next, so wrap around to instance zero. */ 1579 1579 instance = find_first_zero_bit(instances.words, instances.bit_count); 1580 - result = ASSERT(instance < instances.bit_count, 1581 - "impossibly, no zero bit found"); 1582 - if (result != UDS_SUCCESS) 1580 + result = VDO_ASSERT(instance < instances.bit_count, 1581 + "impossibly, no zero bit found"); 1582 + if (result != VDO_SUCCESS) 1583 1583 return result; 1584 1584 } 1585 1585 ··· 1729 1729 1730 1730 uds_log_info("Preparing to resize physical to %llu", 1731 1731 (unsigned long long) new_physical_blocks); 1732 - ASSERT_LOG_ONLY((new_physical_blocks > current_physical_blocks), 1733 - "New physical size is larger than current physical size"); 1732 + VDO_ASSERT_LOG_ONLY((new_physical_blocks > current_physical_blocks), 1733 + "New physical size is larger than current physical size"); 1734 1734 result = perform_admin_operation(vdo, PREPARE_GROW_PHYSICAL_PHASE_START, 1735 1735 check_may_grow_physical, 1736 1736 finish_operation_callback, ··· 1829 1829 1830 1830 uds_log_info("Preparing to resize logical to %llu", 1831 1831 (unsigned long long) config->logical_blocks); 1832 - ASSERT_LOG_ONLY((config->logical_blocks > logical_blocks), 1833 - "New logical size is larger than current size"); 1832 + VDO_ASSERT_LOG_ONLY((config->logical_blocks > logical_blocks), 1833 + "New logical size is larger than current size"); 1834 1834 1835 1835 result = vdo_prepare_to_grow_block_map(vdo->block_map, 1836 1836 config->logical_blocks); ··· 2890 2890 if (dm_registered) 2891 2891 dm_unregister_target(&vdo_target_bio); 2892 2892 2893 - ASSERT_LOG_ONLY(instances.count == 0, 2894 - "should have no instance numbers still in use, but have %u", 2895 - instances.count); 2893 + VDO_ASSERT_LOG_ONLY(instances.count == 0, 2894 + "should have no instance numbers still in use, but have %u", 2895 + instances.count); 2896 2896 vdo_free(instances.words); 2897 2897 memset(&instances, 0, sizeof(struct instance_tracker)); 2898 2898
+82 -82
drivers/md/dm-vdo/encodings.c
··· 320 320 321 321 decode_volume_geometry(block, &offset, geometry, header.version.major_version); 322 322 323 - result = ASSERT(header.size == offset + sizeof(u32), 324 - "should have decoded up to the geometry checksum"); 323 + result = VDO_ASSERT(header.size == offset + sizeof(u32), 324 + "should have decoded up to the geometry checksum"); 325 325 if (result != VDO_SUCCESS) 326 326 return result; 327 327 ··· 380 380 initial_offset = *offset; 381 381 382 382 decode_u64_le(buffer, offset, &flat_page_origin); 383 - result = ASSERT(flat_page_origin == VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN, 384 - "Flat page origin must be %u (recorded as %llu)", 385 - VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN, 386 - (unsigned long long) state->flat_page_origin); 387 - if (result != UDS_SUCCESS) 383 + result = VDO_ASSERT(flat_page_origin == VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN, 384 + "Flat page origin must be %u (recorded as %llu)", 385 + VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN, 386 + (unsigned long long) state->flat_page_origin); 387 + if (result != VDO_SUCCESS) 388 388 return result; 389 389 390 390 decode_u64_le(buffer, offset, &flat_page_count); 391 - result = ASSERT(flat_page_count == 0, 392 - "Flat page count must be 0 (recorded as %llu)", 393 - (unsigned long long) state->flat_page_count); 394 - if (result != UDS_SUCCESS) 391 + result = VDO_ASSERT(flat_page_count == 0, 392 + "Flat page count must be 0 (recorded as %llu)", 393 + (unsigned long long) state->flat_page_count); 394 + if (result != VDO_SUCCESS) 395 395 return result; 396 396 397 397 decode_u64_le(buffer, offset, &root_origin); 398 398 decode_u64_le(buffer, offset, &root_count); 399 399 400 - result = ASSERT(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset, 401 - "decoded block map component size must match header size"); 400 + result = VDO_ASSERT(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset, 401 + "decoded block map component size must match header size"); 402 402 if (result != VDO_SUCCESS) 403 403 return result; 404 404 ··· 425 425 encode_u64_le(buffer, offset, state.root_origin); 426 426 encode_u64_le(buffer, offset, state.root_count); 427 427 428 - ASSERT_LOG_ONLY(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset, 429 - "encoded block map component size must match header size"); 428 + VDO_ASSERT_LOG_ONLY(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset, 429 + "encoded block map component size must match header size"); 430 430 } 431 431 432 432 /** ··· 477 477 encode_u64_le(buffer, offset, state.logical_blocks_used); 478 478 encode_u64_le(buffer, offset, state.block_map_data_blocks); 479 479 480 - ASSERT_LOG_ONLY(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset, 481 - "encoded recovery journal component size must match header size"); 480 + VDO_ASSERT_LOG_ONLY(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset, 481 + "encoded recovery journal component size must match header size"); 482 482 } 483 483 484 484 /** ··· 508 508 decode_u64_le(buffer, offset, &logical_blocks_used); 509 509 decode_u64_le(buffer, offset, &block_map_data_blocks); 510 510 511 - result = ASSERT(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset, 512 - "decoded recovery journal component size must match header size"); 513 - if (result != UDS_SUCCESS) 511 + result = VDO_ASSERT(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset, 512 + "decoded recovery journal component size must match header size"); 513 + if (result != VDO_SUCCESS) 514 514 return result; 515 515 516 516 *state = (struct recovery_journal_state_7_0) { ··· 566 566 encode_u64_le(buffer, offset, state.last_block); 567 567 buffer[(*offset)++] = state.zone_count; 568 568 569 - ASSERT_LOG_ONLY(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset, 570 - "encoded block map component size must match header size"); 569 + VDO_ASSERT_LOG_ONLY(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset, 570 + "encoded block map component size must match header size"); 571 571 } 572 572 573 573 /** ··· 618 618 decode_u64_le(buffer, offset, &last_block); 619 619 zone_count = buffer[(*offset)++]; 620 620 621 - result = ASSERT(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset, 622 - "decoded slab depot component size must match header size"); 623 - if (result != UDS_SUCCESS) 621 + result = VDO_ASSERT(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset, 622 + "decoded slab depot component size must match header size"); 623 + if (result != VDO_SUCCESS) 624 624 return result; 625 625 626 626 *state = (struct slab_depot_state_2_0) { ··· 970 970 struct partition *partition; 971 971 int result = vdo_get_partition(layout, id, &partition); 972 972 973 - ASSERT_LOG_ONLY(result == VDO_SUCCESS, "layout has expected partition: %u", id); 973 + VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "layout has expected partition: %u", id); 974 974 975 975 return partition; 976 976 } ··· 982 982 struct header header = VDO_LAYOUT_HEADER_3_0; 983 983 984 984 BUILD_BUG_ON(sizeof(enum partition_id) != sizeof(u8)); 985 - ASSERT_LOG_ONLY(layout->num_partitions <= U8_MAX, 986 - "layout partition count must fit in a byte"); 985 + VDO_ASSERT_LOG_ONLY(layout->num_partitions <= U8_MAX, 986 + "layout partition count must fit in a byte"); 987 987 988 988 vdo_encode_header(buffer, offset, &header); 989 989 ··· 992 992 encode_u64_le(buffer, offset, layout->last_free); 993 993 buffer[(*offset)++] = layout->num_partitions; 994 994 995 - ASSERT_LOG_ONLY(sizeof(struct layout_3_0) == *offset - initial_offset, 996 - "encoded size of a layout header must match structure"); 995 + VDO_ASSERT_LOG_ONLY(sizeof(struct layout_3_0) == *offset - initial_offset, 996 + "encoded size of a layout header must match structure"); 997 997 998 998 for (partition = layout->head; partition != NULL; partition = partition->next) { 999 999 buffer[(*offset)++] = partition->id; ··· 1003 1003 encode_u64_le(buffer, offset, partition->count); 1004 1004 } 1005 1005 1006 - ASSERT_LOG_ONLY(header.size == *offset - initial_offset, 1007 - "encoded size of a layout must match header size"); 1006 + VDO_ASSERT_LOG_ONLY(header.size == *offset - initial_offset, 1007 + "encoded size of a layout must match header size"); 1008 1008 } 1009 1009 1010 1010 static int decode_layout(u8 *buffer, size_t *offset, physical_block_number_t start, ··· 1035 1035 .partition_count = partition_count, 1036 1036 }; 1037 1037 1038 - result = ASSERT(sizeof(struct layout_3_0) == *offset - initial_offset, 1039 - "decoded size of a layout header must match structure"); 1038 + result = VDO_ASSERT(sizeof(struct layout_3_0) == *offset - initial_offset, 1039 + "decoded size of a layout header must match structure"); 1040 1040 if (result != VDO_SUCCESS) 1041 1041 return result; 1042 1042 ··· 1208 1208 struct slab_config slab_config; 1209 1209 int result; 1210 1210 1211 - result = ASSERT(config->slab_size > 0, "slab size unspecified"); 1212 - if (result != UDS_SUCCESS) 1213 - return result; 1214 - 1215 - result = ASSERT(is_power_of_2(config->slab_size), 1216 - "slab size must be a power of two"); 1217 - if (result != UDS_SUCCESS) 1218 - return result; 1219 - 1220 - result = ASSERT(config->slab_size <= (1 << MAX_VDO_SLAB_BITS), 1221 - "slab size must be less than or equal to 2^%d", 1222 - MAX_VDO_SLAB_BITS); 1211 + result = VDO_ASSERT(config->slab_size > 0, "slab size unspecified"); 1223 1212 if (result != VDO_SUCCESS) 1224 1213 return result; 1225 1214 1226 - result = ASSERT(config->slab_journal_blocks >= MINIMUM_VDO_SLAB_JOURNAL_BLOCKS, 1227 - "slab journal size meets minimum size"); 1228 - if (result != UDS_SUCCESS) 1215 + result = VDO_ASSERT(is_power_of_2(config->slab_size), 1216 + "slab size must be a power of two"); 1217 + if (result != VDO_SUCCESS) 1229 1218 return result; 1230 1219 1231 - result = ASSERT(config->slab_journal_blocks <= config->slab_size, 1232 - "slab journal size is within expected bound"); 1233 - if (result != UDS_SUCCESS) 1220 + result = VDO_ASSERT(config->slab_size <= (1 << MAX_VDO_SLAB_BITS), 1221 + "slab size must be less than or equal to 2^%d", 1222 + MAX_VDO_SLAB_BITS); 1223 + if (result != VDO_SUCCESS) 1224 + return result; 1225 + 1226 + result = VDO_ASSERT(config->slab_journal_blocks >= MINIMUM_VDO_SLAB_JOURNAL_BLOCKS, 1227 + "slab journal size meets minimum size"); 1228 + if (result != VDO_SUCCESS) 1229 + return result; 1230 + 1231 + result = VDO_ASSERT(config->slab_journal_blocks <= config->slab_size, 1232 + "slab journal size is within expected bound"); 1233 + if (result != VDO_SUCCESS) 1234 1234 return result; 1235 1235 1236 1236 result = vdo_configure_slab(config->slab_size, config->slab_journal_blocks, ··· 1238 1238 if (result != VDO_SUCCESS) 1239 1239 return result; 1240 1240 1241 - result = ASSERT((slab_config.data_blocks >= 1), 1242 - "slab must be able to hold at least one block"); 1243 - if (result != UDS_SUCCESS) 1241 + result = VDO_ASSERT((slab_config.data_blocks >= 1), 1242 + "slab must be able to hold at least one block"); 1243 + if (result != VDO_SUCCESS) 1244 1244 return result; 1245 1245 1246 - result = ASSERT(config->physical_blocks > 0, "physical blocks unspecified"); 1247 - if (result != UDS_SUCCESS) 1246 + result = VDO_ASSERT(config->physical_blocks > 0, "physical blocks unspecified"); 1247 + if (result != VDO_SUCCESS) 1248 1248 return result; 1249 1249 1250 - result = ASSERT(config->physical_blocks <= MAXIMUM_VDO_PHYSICAL_BLOCKS, 1251 - "physical block count %llu exceeds maximum %llu", 1252 - (unsigned long long) config->physical_blocks, 1253 - (unsigned long long) MAXIMUM_VDO_PHYSICAL_BLOCKS); 1254 - if (result != UDS_SUCCESS) 1250 + result = VDO_ASSERT(config->physical_blocks <= MAXIMUM_VDO_PHYSICAL_BLOCKS, 1251 + "physical block count %llu exceeds maximum %llu", 1252 + (unsigned long long) config->physical_blocks, 1253 + (unsigned long long) MAXIMUM_VDO_PHYSICAL_BLOCKS); 1254 + if (result != VDO_SUCCESS) 1255 1255 return VDO_OUT_OF_RANGE; 1256 1256 1257 1257 if (physical_block_count != config->physical_blocks) { ··· 1262 1262 } 1263 1263 1264 1264 if (logical_block_count > 0) { 1265 - result = ASSERT((config->logical_blocks > 0), 1266 - "logical blocks unspecified"); 1267 - if (result != UDS_SUCCESS) 1265 + result = VDO_ASSERT((config->logical_blocks > 0), 1266 + "logical blocks unspecified"); 1267 + if (result != VDO_SUCCESS) 1268 1268 return result; 1269 1269 1270 1270 if (logical_block_count != config->logical_blocks) { ··· 1275 1275 } 1276 1276 } 1277 1277 1278 - result = ASSERT(config->logical_blocks <= MAXIMUM_VDO_LOGICAL_BLOCKS, 1279 - "logical blocks too large"); 1280 - if (result != UDS_SUCCESS) 1278 + result = VDO_ASSERT(config->logical_blocks <= MAXIMUM_VDO_LOGICAL_BLOCKS, 1279 + "logical blocks too large"); 1280 + if (result != VDO_SUCCESS) 1281 1281 return result; 1282 1282 1283 - result = ASSERT(config->recovery_journal_size > 0, 1284 - "recovery journal size unspecified"); 1285 - if (result != UDS_SUCCESS) 1283 + result = VDO_ASSERT(config->recovery_journal_size > 0, 1284 + "recovery journal size unspecified"); 1285 + if (result != VDO_SUCCESS) 1286 1286 return result; 1287 1287 1288 - result = ASSERT(is_power_of_2(config->recovery_journal_size), 1289 - "recovery journal size must be a power of two"); 1290 - if (result != UDS_SUCCESS) 1288 + result = VDO_ASSERT(is_power_of_2(config->recovery_journal_size), 1289 + "recovery journal size must be a power of two"); 1290 + if (result != VDO_SUCCESS) 1291 1291 return result; 1292 1292 1293 1293 return result; ··· 1341 1341 if (result != VDO_SUCCESS) 1342 1342 return result; 1343 1343 1344 - ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE, 1345 - "All decoded component data was used"); 1344 + VDO_ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE, 1345 + "All decoded component data was used"); 1346 1346 return VDO_SUCCESS; 1347 1347 } 1348 1348 ··· 1416 1416 encode_slab_depot_state_2_0(buffer, offset, states->slab_depot); 1417 1417 encode_block_map_state_2_0(buffer, offset, states->block_map); 1418 1418 1419 - ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE, 1420 - "All super block component data was encoded"); 1419 + VDO_ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE, 1420 + "All super block component data was encoded"); 1421 1421 } 1422 1422 1423 1423 /** ··· 1440 1440 * Even though the buffer is a full block, to avoid the potential corruption from a torn 1441 1441 * write, the entire encoding must fit in the first sector. 1442 1442 */ 1443 - ASSERT_LOG_ONLY(offset <= VDO_SECTOR_SIZE, 1444 - "entire superblock must fit in one sector"); 1443 + VDO_ASSERT_LOG_ONLY(offset <= VDO_SECTOR_SIZE, 1444 + "entire superblock must fit in one sector"); 1445 1445 } 1446 1446 1447 1447 /** ··· 1476 1476 checksum = vdo_crc32(buffer, offset); 1477 1477 decode_u32_le(buffer, &offset, &saved_checksum); 1478 1478 1479 - result = ASSERT(offset == VDO_SUPER_BLOCK_FIXED_SIZE + VDO_COMPONENT_DATA_SIZE, 1480 - "must have decoded entire superblock payload"); 1479 + result = VDO_ASSERT(offset == VDO_SUPER_BLOCK_FIXED_SIZE + VDO_COMPONENT_DATA_SIZE, 1480 + "must have decoded entire superblock payload"); 1481 1481 if (result != VDO_SUCCESS) 1482 1482 return result; 1483 1483
+3 -2
drivers/md/dm-vdo/errors.c
··· 281 281 .infos = infos, 282 282 }; 283 283 284 - result = ASSERT(first_error < next_free_error, "well-defined error block range"); 285 - if (result != UDS_SUCCESS) 284 + result = VDO_ASSERT(first_error < next_free_error, 285 + "well-defined error block range"); 286 + if (result != VDO_SUCCESS) 286 287 return result; 287 288 288 289 if (registered_errors.count == registered_errors.allocated) {
+11 -11
drivers/md/dm-vdo/flush.c
··· 59 59 */ 60 60 static inline void assert_on_flusher_thread(struct flusher *flusher, const char *caller) 61 61 { 62 - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == flusher->thread_id), 63 - "%s() called from flusher thread", caller); 62 + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == flusher->thread_id), 63 + "%s() called from flusher thread", caller); 64 64 } 65 65 66 66 /** ··· 272 272 int result; 273 273 274 274 assert_on_flusher_thread(flusher, __func__); 275 - result = ASSERT(vdo_is_state_normal(&flusher->state), 276 - "flusher is in normal operation"); 275 + result = VDO_ASSERT(vdo_is_state_normal(&flusher->state), 276 + "flusher is in normal operation"); 277 277 if (result != VDO_SUCCESS) { 278 278 vdo_enter_read_only_mode(flusher->vdo, result); 279 279 vdo_complete_flush(flush); ··· 330 330 if (flush->flush_generation >= oldest_active_generation) 331 331 return; 332 332 333 - ASSERT_LOG_ONLY((flush->flush_generation == 334 - flusher->first_unacknowledged_generation), 335 - "acknowledged next expected flush, %llu, was: %llu", 336 - (unsigned long long) flusher->first_unacknowledged_generation, 337 - (unsigned long long) flush->flush_generation); 333 + VDO_ASSERT_LOG_ONLY((flush->flush_generation == 334 + flusher->first_unacknowledged_generation), 335 + "acknowledged next expected flush, %llu, was: %llu", 336 + (unsigned long long) flusher->first_unacknowledged_generation, 337 + (unsigned long long) flush->flush_generation); 338 338 vdo_waitq_dequeue_waiter(&flusher->pending_flushes); 339 339 vdo_complete_flush(flush); 340 340 flusher->first_unacknowledged_generation++; ··· 400 400 struct flusher *flusher = vdo->flusher; 401 401 const struct admin_state_code *code = vdo_get_admin_state_code(&flusher->state); 402 402 403 - ASSERT_LOG_ONLY(!code->quiescent, "Flushing not allowed in state %s", 404 - code->name); 403 + VDO_ASSERT_LOG_ONLY(!code->quiescent, "Flushing not allowed in state %s", 404 + code->name); 405 405 406 406 spin_lock(&flusher->lock); 407 407
+11 -11
drivers/md/dm-vdo/funnel-workqueue.c
··· 110 110 static void enqueue_work_queue_completion(struct simple_work_queue *queue, 111 111 struct vdo_completion *completion) 112 112 { 113 - ASSERT_LOG_ONLY(completion->my_queue == NULL, 114 - "completion %px (fn %px) to enqueue (%px) is not already queued (%px)", 115 - completion, completion->callback, queue, completion->my_queue); 113 + VDO_ASSERT_LOG_ONLY(completion->my_queue == NULL, 114 + "completion %px (fn %px) to enqueue (%px) is not already queued (%px)", 115 + completion, completion->callback, queue, completion->my_queue); 116 116 if (completion->priority == VDO_WORK_Q_DEFAULT_PRIORITY) 117 117 completion->priority = queue->common.type->default_priority; 118 118 119 - if (ASSERT(completion->priority <= queue->common.type->max_priority, 120 - "priority is in range for queue") != VDO_SUCCESS) 119 + if (VDO_ASSERT(completion->priority <= queue->common.type->max_priority, 120 + "priority is in range for queue") != VDO_SUCCESS) 121 121 completion->priority = 0; 122 122 123 123 completion->my_queue = &queue->common; ··· 222 222 static void process_completion(struct simple_work_queue *queue, 223 223 struct vdo_completion *completion) 224 224 { 225 - if (ASSERT(completion->my_queue == &queue->common, 226 - "completion %px from queue %px marked as being in this queue (%px)", 227 - completion, queue, completion->my_queue) == UDS_SUCCESS) 225 + if (VDO_ASSERT(completion->my_queue == &queue->common, 226 + "completion %px from queue %px marked as being in this queue (%px)", 227 + completion, queue, completion->my_queue) == VDO_SUCCESS) 228 228 completion->my_queue = NULL; 229 229 230 230 vdo_run_completion(completion); ··· 319 319 struct task_struct *thread = NULL; 320 320 int result; 321 321 322 - ASSERT_LOG_ONLY((type->max_priority <= VDO_WORK_Q_MAX_PRIORITY), 323 - "queue priority count %u within limit %u", type->max_priority, 324 - VDO_WORK_Q_MAX_PRIORITY); 322 + VDO_ASSERT_LOG_ONLY((type->max_priority <= VDO_WORK_Q_MAX_PRIORITY), 323 + "queue priority count %u within limit %u", type->max_priority, 324 + VDO_WORK_Q_MAX_PRIORITY); 325 325 326 326 result = vdo_allocate(1, struct simple_work_queue, "simple work queue", &queue); 327 327 if (result != VDO_SUCCESS)
+4 -4
drivers/md/dm-vdo/io-submitter.c
··· 94 94 */ 95 95 static void assert_in_bio_zone(struct vio *vio) 96 96 { 97 - ASSERT_LOG_ONLY(!in_interrupt(), "not in interrupt context"); 97 + VDO_ASSERT_LOG_ONLY(!in_interrupt(), "not in interrupt context"); 98 98 assert_vio_in_bio_zone(vio); 99 99 } 100 100 ··· 300 300 mutex_unlock(&bio_queue_data->lock); 301 301 302 302 /* We don't care about failure of int_map_put in this case. */ 303 - ASSERT_LOG_ONLY(result == VDO_SUCCESS, "bio map insertion succeeds"); 303 + VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "bio map insertion succeeds"); 304 304 return merged; 305 305 } 306 306 ··· 345 345 const struct admin_state_code *code = vdo_get_admin_state(completion->vdo); 346 346 347 347 348 - ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name); 349 - ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio"); 348 + VDO_ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name); 349 + VDO_ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio"); 350 350 351 351 vdo_reset_completion(completion); 352 352 completion->error_handler = error_handler;
+11 -11
drivers/md/dm-vdo/logical-zone.c
··· 142 142 143 143 static inline void assert_on_zone_thread(struct logical_zone *zone, const char *what) 144 144 { 145 - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id), 146 - "%s() called on correct thread", what); 145 + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id), 146 + "%s() called on correct thread", what); 147 147 } 148 148 149 149 /** ··· 247 247 sequence_number_t expected_generation) 248 248 { 249 249 assert_on_zone_thread(zone, __func__); 250 - ASSERT_LOG_ONLY((zone->flush_generation == expected_generation), 251 - "logical zone %u flush generation %llu should be %llu before increment", 252 - zone->zone_number, (unsigned long long) zone->flush_generation, 253 - (unsigned long long) expected_generation); 250 + VDO_ASSERT_LOG_ONLY((zone->flush_generation == expected_generation), 251 + "logical zone %u flush generation %llu should be %llu before increment", 252 + zone->zone_number, (unsigned long long) zone->flush_generation, 253 + (unsigned long long) expected_generation); 254 254 255 255 zone->flush_generation++; 256 256 zone->ios_in_flush_generation = 0; ··· 267 267 struct logical_zone *zone = data_vio->logical.zone; 268 268 269 269 assert_on_zone_thread(zone, __func__); 270 - ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal"); 270 + VDO_ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal"); 271 271 272 272 data_vio->flush_generation = zone->flush_generation; 273 273 list_add_tail(&data_vio->write_entry, &zone->write_vios); ··· 332 332 return; 333 333 334 334 list_del_init(&data_vio->write_entry); 335 - ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation), 336 - "data_vio releasing lock on generation %llu is not older than oldest active generation %llu", 337 - (unsigned long long) data_vio->flush_generation, 338 - (unsigned long long) zone->oldest_active_generation); 335 + VDO_ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation), 336 + "data_vio releasing lock on generation %llu is not older than oldest active generation %llu", 337 + (unsigned long long) data_vio->flush_generation, 338 + (unsigned long long) zone->oldest_active_generation); 339 339 340 340 if (!update_oldest_active_generation(zone) || zone->notifying) 341 341 return;
+6 -6
drivers/md/dm-vdo/memory-alloc.c
··· 385 385 386 386 void vdo_memory_exit(void) 387 387 { 388 - ASSERT_LOG_ONLY(memory_stats.kmalloc_bytes == 0, 389 - "kmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel", 390 - memory_stats.kmalloc_bytes, memory_stats.kmalloc_blocks); 391 - ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0, 392 - "vmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel", 393 - memory_stats.vmalloc_bytes, memory_stats.vmalloc_blocks); 388 + VDO_ASSERT_LOG_ONLY(memory_stats.kmalloc_bytes == 0, 389 + "kmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel", 390 + memory_stats.kmalloc_bytes, memory_stats.kmalloc_blocks); 391 + VDO_ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0, 392 + "vmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel", 393 + memory_stats.vmalloc_bytes, memory_stats.vmalloc_blocks); 394 394 uds_log_debug("peak usage %zd bytes", memory_stats.peak_bytes); 395 395 } 396 396
+6 -6
drivers/md/dm-vdo/packer.c
··· 86 86 */ 87 87 static inline void assert_on_packer_thread(struct packer *packer, const char *caller) 88 88 { 89 - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer->thread_id), 90 - "%s() called from packer thread", caller); 89 + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer->thread_id), 90 + "%s() called from packer thread", caller); 91 91 } 92 92 93 93 /** ··· 569 569 570 570 assert_on_packer_thread(packer, __func__); 571 571 572 - result = ASSERT((status.stage == DATA_VIO_COMPRESSING), 573 - "attempt to pack data_vio not ready for packing, stage: %u", 574 - status.stage); 572 + result = VDO_ASSERT((status.stage == DATA_VIO_COMPRESSING), 573 + "attempt to pack data_vio not ready for packing, stage: %u", 574 + status.stage); 575 575 if (result != VDO_SUCCESS) 576 576 return; 577 577 ··· 671 671 672 672 lock_holder = vdo_forget(data_vio->compression.lock_holder); 673 673 bin = lock_holder->compression.bin; 674 - ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin"); 674 + VDO_ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin"); 675 675 676 676 slot = lock_holder->compression.slot; 677 677 bin->slots_used--;
+9 -6
drivers/md/dm-vdo/permassert.h
··· 13 13 /* Utilities for asserting that certain conditions are met */ 14 14 15 15 #define STRINGIFY(X) #X 16 - #define STRINGIFY_VALUE(X) STRINGIFY(X) 17 16 18 17 /* 19 18 * A hack to apply the "warn if unused" attribute to an integral expression. ··· 22 23 * expression. With optimization enabled, this function contributes no additional instructions, but 23 24 * the warn_unused_result attribute still applies to the code calling it. 24 25 */ 25 - static inline int __must_check uds_must_use(int value) 26 + static inline int __must_check vdo_must_use(int value) 26 27 { 27 28 return value; 28 29 } 29 30 30 31 /* Assert that an expression is true and return an error if it is not. */ 31 - #define ASSERT(expr, ...) uds_must_use(__UDS_ASSERT(expr, __VA_ARGS__)) 32 + #define VDO_ASSERT(expr, ...) vdo_must_use(__VDO_ASSERT(expr, __VA_ARGS__)) 32 33 33 34 /* Log a message if the expression is not true. */ 34 - #define ASSERT_LOG_ONLY(expr, ...) __UDS_ASSERT(expr, __VA_ARGS__) 35 + #define VDO_ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__) 35 36 36 - #define __UDS_ASSERT(expr, ...) \ 37 - (likely(expr) ? UDS_SUCCESS \ 37 + /* For use by UDS */ 38 + #define ASSERT(expr, ...) VDO_ASSERT(expr, __VA_ARGS__) 39 + #define ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__) 40 + 41 + #define __VDO_ASSERT(expr, ...) \ 42 + (likely(expr) ? VDO_SUCCESS \ 38 43 : uds_assertion_failed(STRINGIFY(expr), __FILE__, __LINE__, __VA_ARGS__)) 39 44 40 45 /* Log an assertion failure message. */
+24 -24
drivers/md/dm-vdo/physical-zone.c
··· 80 80 */ 81 81 void vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write) 82 82 { 83 - ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock), 84 - "PBN lock must not already have been downgraded"); 85 - ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK), 86 - "must not downgrade block map write locks"); 87 - ASSERT_LOG_ONLY(lock->holder_count == 1, 88 - "PBN write lock should have one holder but has %u", 89 - lock->holder_count); 83 + VDO_ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock), 84 + "PBN lock must not already have been downgraded"); 85 + VDO_ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK), 86 + "must not downgrade block map write locks"); 87 + VDO_ASSERT_LOG_ONLY(lock->holder_count == 1, 88 + "PBN write lock should have one holder but has %u", 89 + lock->holder_count); 90 90 /* 91 91 * data_vio write locks are downgraded in place--the writer retains the hold on the lock. 92 92 * If this was a compressed write, the holder has not yet journaled its own inc ref, ··· 128 128 */ 129 129 void vdo_assign_pbn_lock_provisional_reference(struct pbn_lock *lock) 130 130 { 131 - ASSERT_LOG_ONLY(!lock->has_provisional_reference, 132 - "lock does not have a provisional reference"); 131 + VDO_ASSERT_LOG_ONLY(!lock->has_provisional_reference, 132 + "lock does not have a provisional reference"); 133 133 lock->has_provisional_reference = true; 134 134 } 135 135 ··· 221 221 INIT_LIST_HEAD(&idle->entry); 222 222 list_add_tail(&idle->entry, &pool->idle_list); 223 223 224 - ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed"); 224 + VDO_ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed"); 225 225 pool->borrowed -= 1; 226 226 } 227 227 ··· 267 267 if (pool == NULL) 268 268 return; 269 269 270 - ASSERT_LOG_ONLY(pool->borrowed == 0, 271 - "All PBN locks must be returned to the pool before it is freed, but %zu locks are still on loan", 272 - pool->borrowed); 270 + VDO_ASSERT_LOG_ONLY(pool->borrowed == 0, 271 + "All PBN locks must be returned to the pool before it is freed, but %zu locks are still on loan", 272 + pool->borrowed); 273 273 vdo_free(pool); 274 274 } 275 275 ··· 298 298 "no free PBN locks left to borrow"); 299 299 pool->borrowed += 1; 300 300 301 - result = ASSERT(!list_empty(&pool->idle_list), 302 - "idle list should not be empty if pool not at capacity"); 301 + result = VDO_ASSERT(!list_empty(&pool->idle_list), 302 + "idle list should not be empty if pool not at capacity"); 303 303 if (result != VDO_SUCCESS) 304 304 return result; 305 305 ··· 447 447 448 448 result = borrow_pbn_lock_from_pool(zone->lock_pool, type, &new_lock); 449 449 if (result != VDO_SUCCESS) { 450 - ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock"); 450 + VDO_ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock"); 451 451 return result; 452 452 } 453 453 ··· 461 461 if (lock != NULL) { 462 462 /* The lock is already held, so we don't need the borrowed one. */ 463 463 return_pbn_lock_to_pool(zone->lock_pool, vdo_forget(new_lock)); 464 - result = ASSERT(lock->holder_count > 0, "physical block %llu lock held", 465 - (unsigned long long) pbn); 464 + result = VDO_ASSERT(lock->holder_count > 0, "physical block %llu lock held", 465 + (unsigned long long) pbn); 466 466 if (result != VDO_SUCCESS) 467 467 return result; 468 468 *lock_ptr = lock; ··· 485 485 int result; 486 486 struct pbn_lock *lock; 487 487 488 - ASSERT_LOG_ONLY(allocation->lock == NULL, 489 - "must not allocate a block while already holding a lock on one"); 488 + VDO_ASSERT_LOG_ONLY(allocation->lock == NULL, 489 + "must not allocate a block while already holding a lock on one"); 490 490 491 491 result = vdo_allocate_block(allocation->zone->allocator, &allocation->pbn); 492 492 if (result != VDO_SUCCESS) ··· 617 617 if (lock == NULL) 618 618 return; 619 619 620 - ASSERT_LOG_ONLY(lock->holder_count > 0, 621 - "should not be releasing a lock that is not held"); 620 + VDO_ASSERT_LOG_ONLY(lock->holder_count > 0, 621 + "should not be releasing a lock that is not held"); 622 622 623 623 lock->holder_count -= 1; 624 624 if (lock->holder_count > 0) { ··· 627 627 } 628 628 629 629 holder = vdo_int_map_remove(zone->pbn_operations, locked_pbn); 630 - ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu", 631 - (unsigned long long) locked_pbn); 630 + VDO_ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu", 631 + (unsigned long long) locked_pbn); 632 632 633 633 release_pbn_lock_provisional_reference(lock, locked_pbn, zone->allocator); 634 634 return_pbn_lock_to_pool(zone->lock_pool, lock);
+2 -2
drivers/md/dm-vdo/priority-table.c
··· 127 127 void vdo_priority_table_enqueue(struct priority_table *table, unsigned int priority, 128 128 struct list_head *entry) 129 129 { 130 - ASSERT_LOG_ONLY((priority <= table->max_priority), 131 - "entry priority must be valid for the table"); 130 + VDO_ASSERT_LOG_ONLY((priority <= table->max_priority), 131 + "entry priority must be valid for the table"); 132 132 133 133 /* Append the entry to the queue in the specified bucket. */ 134 134 list_move_tail(entry, &table->buckets[priority].queue);
+30 -30
drivers/md/dm-vdo/recovery-journal.c
··· 119 119 120 120 /* Pairs with barrier in vdo_release_journal_entry_lock() */ 121 121 smp_rmb(); 122 - ASSERT_LOG_ONLY((decrements <= journal_value), 123 - "journal zone lock counter must not underflow"); 122 + VDO_ASSERT_LOG_ONLY((decrements <= journal_value), 123 + "journal zone lock counter must not underflow"); 124 124 return (journal_value != decrements); 125 125 } 126 126 ··· 150 150 lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number); 151 151 current_value = get_counter(journal, lock_number, zone_type, zone_id); 152 152 153 - ASSERT_LOG_ONLY((*current_value >= 1), 154 - "decrement of lock counter must not underflow"); 153 + VDO_ASSERT_LOG_ONLY((*current_value >= 1), 154 + "decrement of lock counter must not underflow"); 155 155 *current_value -= 1; 156 156 157 157 if (zone_type == VDO_ZONE_TYPE_JOURNAL) { ··· 254 254 static void assert_on_journal_thread(struct recovery_journal *journal, 255 255 const char *function_name) 256 256 { 257 - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == journal->thread_id), 258 - "%s() called on journal thread", function_name); 257 + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == journal->thread_id), 258 + "%s() called on journal thread", function_name); 259 259 } 260 260 261 261 /** ··· 353 353 354 354 if (vdo_is_state_saving(&journal->state)) { 355 355 if (journal->active_block != NULL) { 356 - ASSERT_LOG_ONLY(((result == VDO_READ_ONLY) || 357 - !is_block_dirty(journal->active_block)), 358 - "journal being saved has clean active block"); 356 + VDO_ASSERT_LOG_ONLY(((result == VDO_READ_ONLY) || 357 + !is_block_dirty(journal->active_block)), 358 + "journal being saved has clean active block"); 359 359 recycle_journal_block(journal->active_block); 360 360 } 361 361 362 - ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks), 363 - "all blocks in a journal being saved must be inactive"); 362 + VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks), 363 + "all blocks in a journal being saved must be inactive"); 364 364 } 365 365 366 366 vdo_finish_draining_with_result(&journal->state, result); ··· 800 800 * requires opening before use. 801 801 */ 802 802 if (!vdo_is_state_quiescent(&journal->state)) { 803 - ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks), 804 - "journal being freed has no active tail blocks"); 803 + VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks), 804 + "journal being freed has no active tail blocks"); 805 805 } else if (!vdo_is_state_saved(&journal->state) && 806 806 !list_empty(&journal->active_tail_blocks)) { 807 807 uds_log_warning("journal being freed has uncommitted entries"); ··· 989 989 atomic_t *decrement_counter = get_decrement_counter(journal, lock_number); 990 990 991 991 journal_value = get_counter(journal, lock_number, VDO_ZONE_TYPE_JOURNAL, 0); 992 - ASSERT_LOG_ONLY((*journal_value == atomic_read(decrement_counter)), 993 - "count to be initialized not in use"); 992 + VDO_ASSERT_LOG_ONLY((*journal_value == atomic_read(decrement_counter)), 993 + "count to be initialized not in use"); 994 994 *journal_value = journal->entries_per_block + 1; 995 995 atomic_set(decrement_counter, 0); 996 996 } ··· 1175 1175 int result = (is_read_only(journal) ? VDO_READ_ONLY : VDO_SUCCESS); 1176 1176 bool has_decrement; 1177 1177 1178 - ASSERT_LOG_ONLY(vdo_before_journal_point(&journal->commit_point, 1179 - &data_vio->recovery_journal_point), 1180 - "DataVIOs released from recovery journal in order. Recovery journal point is (%llu, %u), but commit waiter point is (%llu, %u)", 1181 - (unsigned long long) journal->commit_point.sequence_number, 1182 - journal->commit_point.entry_count, 1183 - (unsigned long long) data_vio->recovery_journal_point.sequence_number, 1184 - data_vio->recovery_journal_point.entry_count); 1178 + VDO_ASSERT_LOG_ONLY(vdo_before_journal_point(&journal->commit_point, 1179 + &data_vio->recovery_journal_point), 1180 + "DataVIOs released from recovery journal in order. Recovery journal point is (%llu, %u), but commit waiter point is (%llu, %u)", 1181 + (unsigned long long) journal->commit_point.sequence_number, 1182 + journal->commit_point.entry_count, 1183 + (unsigned long long) data_vio->recovery_journal_point.sequence_number, 1184 + data_vio->recovery_journal_point.entry_count); 1185 1185 1186 1186 journal->commit_point = data_vio->recovery_journal_point; 1187 1187 data_vio->last_async_operation = VIO_ASYNC_OP_UPDATE_REFERENCE_COUNTS; ··· 1281 1281 journal->last_write_acknowledged = block->sequence_number; 1282 1282 1283 1283 last_active_block = get_journal_block(&journal->active_tail_blocks); 1284 - ASSERT_LOG_ONLY((block->sequence_number >= last_active_block->sequence_number), 1285 - "completed journal write is still active"); 1284 + VDO_ASSERT_LOG_ONLY((block->sequence_number >= last_active_block->sequence_number), 1285 + "completed journal write is still active"); 1286 1286 1287 1287 notify_commit_waiters(journal); 1288 1288 ··· 1456 1456 return; 1457 1457 } 1458 1458 1459 - ASSERT_LOG_ONLY(data_vio->recovery_sequence_number == 0, 1460 - "journal lock not held for new entry"); 1459 + VDO_ASSERT_LOG_ONLY(data_vio->recovery_sequence_number == 0, 1460 + "journal lock not held for new entry"); 1461 1461 1462 1462 vdo_advance_journal_point(&journal->append_point, journal->entries_per_block); 1463 1463 vdo_waitq_enqueue_waiter(&journal->entry_waiters, &data_vio->waiter); ··· 1564 1564 if (sequence_number == 0) 1565 1565 return; 1566 1566 1567 - ASSERT_LOG_ONLY((zone_type != VDO_ZONE_TYPE_JOURNAL), 1568 - "invalid lock count increment from journal zone"); 1567 + VDO_ASSERT_LOG_ONLY((zone_type != VDO_ZONE_TYPE_JOURNAL), 1568 + "invalid lock count increment from journal zone"); 1569 1569 1570 1570 lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number); 1571 1571 current_value = get_counter(journal, lock_number, zone_type, zone_id); 1572 - ASSERT_LOG_ONLY(*current_value < U16_MAX, 1573 - "increment of lock counter must not overflow"); 1572 + VDO_ASSERT_LOG_ONLY(*current_value < U16_MAX, 1573 + "increment of lock counter must not overflow"); 1574 1574 1575 1575 if (*current_value == 0) { 1576 1576 /*
+6 -6
drivers/md/dm-vdo/repair.c
··· 976 976 if (needs_sort) { 977 977 struct numbered_block_mapping *just_sorted_entry = 978 978 sort_next_heap_element(repair); 979 - ASSERT_LOG_ONLY(just_sorted_entry < current_entry, 980 - "heap is returning elements in an unexpected order"); 979 + VDO_ASSERT_LOG_ONLY(just_sorted_entry < current_entry, 980 + "heap is returning elements in an unexpected order"); 981 981 } 982 982 983 983 current_entry--; ··· 1129 1129 1130 1130 repair->current_entry = &repair->entries[repair->block_map_entry_count - 1]; 1131 1131 first_sorted_entry = sort_next_heap_element(repair); 1132 - ASSERT_LOG_ONLY(first_sorted_entry == repair->current_entry, 1133 - "heap is returning elements in an unexpected order"); 1132 + VDO_ASSERT_LOG_ONLY(first_sorted_entry == repair->current_entry, 1133 + "heap is returning elements in an unexpected order"); 1134 1134 1135 1135 /* Prevent any page from being processed until all pages have been launched. */ 1136 1136 repair->launching = true; ··· 1489 1489 repair->block_map_entry_count++; 1490 1490 } 1491 1491 1492 - result = ASSERT((repair->block_map_entry_count <= repair->entry_count), 1493 - "approximate entry count is an upper bound"); 1492 + result = VDO_ASSERT((repair->block_map_entry_count <= repair->entry_count), 1493 + "approximate entry count is an upper bound"); 1494 1494 if (result != VDO_SUCCESS) 1495 1495 vdo_enter_read_only_mode(vdo, result); 1496 1496
+58 -58
drivers/md/dm-vdo/slab-depot.c
··· 149 149 struct slab_journal *dirty_journal; 150 150 struct list_head *dirty_list = &journal->slab->allocator->dirty_slab_journals; 151 151 152 - ASSERT_LOG_ONLY(journal->recovery_lock == 0, "slab journal was clean"); 152 + VDO_ASSERT_LOG_ONLY(journal->recovery_lock == 0, "slab journal was clean"); 153 153 154 154 journal->recovery_lock = lock; 155 155 list_for_each_entry_reverse(dirty_journal, dirty_list, dirty_entry) { ··· 216 216 { 217 217 block_count_t hint; 218 218 219 - ASSERT_LOG_ONLY((free_blocks < (1 << 23)), "free blocks must be less than 2^23"); 219 + VDO_ASSERT_LOG_ONLY((free_blocks < (1 << 23)), "free blocks must be less than 2^23"); 220 220 221 221 if (free_blocks == 0) 222 222 return 0; ··· 532 532 return; 533 533 } 534 534 535 - ASSERT_LOG_ONLY((adjustment != 0), "adjustment must be non-zero"); 535 + VDO_ASSERT_LOG_ONLY((adjustment != 0), "adjustment must be non-zero"); 536 536 lock = get_lock(journal, sequence_number); 537 537 if (adjustment < 0) { 538 - ASSERT_LOG_ONLY((-adjustment <= lock->count), 539 - "adjustment %d of lock count %u for slab journal block %llu must not underflow", 540 - adjustment, lock->count, 541 - (unsigned long long) sequence_number); 538 + VDO_ASSERT_LOG_ONLY((-adjustment <= lock->count), 539 + "adjustment %d of lock count %u for slab journal block %llu must not underflow", 540 + adjustment, lock->count, 541 + (unsigned long long) sequence_number); 542 542 } 543 543 544 544 lock->count += adjustment; ··· 661 661 struct slab_journal *journal = &slab->journal; 662 662 sequence_number_t block; 663 663 664 - ASSERT_LOG_ONLY(journal->tail_header.entry_count == 0, 665 - "vdo_slab journal's active block empty before reopening"); 664 + VDO_ASSERT_LOG_ONLY(journal->tail_header.entry_count == 0, 665 + "vdo_slab journal's active block empty before reopening"); 666 666 journal->head = journal->tail; 667 667 initialize_journal_state(journal); 668 668 669 669 /* Ensure no locks are spuriously held on an empty journal. */ 670 670 for (block = 1; block <= journal->size; block++) { 671 - ASSERT_LOG_ONLY((get_lock(journal, block)->count == 0), 672 - "Scrubbed journal's block %llu is not locked", 673 - (unsigned long long) block); 671 + VDO_ASSERT_LOG_ONLY((get_lock(journal, block)->count == 0), 672 + "Scrubbed journal's block %llu is not locked", 673 + (unsigned long long) block); 674 674 } 675 675 676 676 add_entries(journal); ··· 757 757 /* Copy the tail block into the vio. */ 758 758 memcpy(pooled->vio.data, journal->block, VDO_BLOCK_SIZE); 759 759 760 - ASSERT_LOG_ONLY(unused_entries >= 0, "vdo_slab journal block is not overfull"); 760 + VDO_ASSERT_LOG_ONLY(unused_entries >= 0, "vdo_slab journal block is not overfull"); 761 761 if (unused_entries > 0) { 762 762 /* 763 763 * Release the per-entry locks for any unused entries in the block we are about to ··· 907 907 struct packed_slab_journal_block *block = journal->block; 908 908 int result; 909 909 910 - result = ASSERT(vdo_before_journal_point(&journal->tail_header.recovery_point, 911 - &recovery_point), 912 - "recovery journal point is monotonically increasing, recovery point: %llu.%u, block recovery point: %llu.%u", 913 - (unsigned long long) recovery_point.sequence_number, 914 - recovery_point.entry_count, 915 - (unsigned long long) journal->tail_header.recovery_point.sequence_number, 916 - journal->tail_header.recovery_point.entry_count); 910 + result = VDO_ASSERT(vdo_before_journal_point(&journal->tail_header.recovery_point, 911 + &recovery_point), 912 + "recovery journal point is monotonically increasing, recovery point: %llu.%u, block recovery point: %llu.%u", 913 + (unsigned long long) recovery_point.sequence_number, 914 + recovery_point.entry_count, 915 + (unsigned long long) journal->tail_header.recovery_point.sequence_number, 916 + journal->tail_header.recovery_point.entry_count); 917 917 if (result != VDO_SUCCESS) { 918 918 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result); 919 919 return; 920 920 } 921 921 922 922 if (operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) { 923 - result = ASSERT((journal->tail_header.entry_count < 924 - journal->full_entries_per_block), 925 - "block has room for full entries"); 923 + result = VDO_ASSERT((journal->tail_header.entry_count < 924 + journal->full_entries_per_block), 925 + "block has room for full entries"); 926 926 if (result != VDO_SUCCESS) { 927 927 vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, 928 928 result); ··· 1371 1371 */ 1372 1372 static void prioritize_slab(struct vdo_slab *slab) 1373 1373 { 1374 - ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry), 1375 - "a slab must not already be on a ring when prioritizing"); 1374 + VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry), 1375 + "a slab must not already be on a ring when prioritizing"); 1376 1376 slab->priority = calculate_slab_priority(slab); 1377 1377 vdo_priority_table_enqueue(slab->allocator->prioritized_slabs, 1378 1378 slab->priority, &slab->allocq_entry); ··· 1655 1655 * the last time it was clean. We must release the per-entry slab journal lock for 1656 1656 * the entry associated with the update we are now doing. 1657 1657 */ 1658 - result = ASSERT(is_valid_journal_point(slab_journal_point), 1659 - "Reference count adjustments need slab journal points."); 1658 + result = VDO_ASSERT(is_valid_journal_point(slab_journal_point), 1659 + "Reference count adjustments need slab journal points."); 1660 1660 if (result != VDO_SUCCESS) 1661 1661 return result; 1662 1662 ··· 1825 1825 * scrubbing thresholds, this should never happen. 1826 1826 */ 1827 1827 if (lock->count > 0) { 1828 - ASSERT_LOG_ONLY((journal->head + journal->size) == journal->tail, 1829 - "New block has locks, but journal is not full"); 1828 + VDO_ASSERT_LOG_ONLY((journal->head + journal->size) == journal->tail, 1829 + "New block has locks, but journal is not full"); 1830 1830 1831 1831 /* 1832 1832 * The blocking threshold must let the journal fill up if the new 1833 1833 * block has locks; if the blocking threshold is smaller than the 1834 1834 * journal size, the new block cannot possibly have locks already. 1835 1835 */ 1836 - ASSERT_LOG_ONLY((journal->blocking_threshold >= journal->size), 1837 - "New block can have locks already iff blocking threshold is at the end of the journal"); 1836 + VDO_ASSERT_LOG_ONLY((journal->blocking_threshold >= journal->size), 1837 + "New block can have locks already iff blocking threshold is at the end of the journal"); 1838 1838 1839 1839 WRITE_ONCE(journal->events->disk_full_count, 1840 1840 journal->events->disk_full_count + 1); ··· 2361 2361 int result; 2362 2362 size_t index, bytes; 2363 2363 2364 - result = ASSERT(slab->reference_blocks == NULL, 2365 - "vdo_slab %u doesn't allocate refcounts twice", 2366 - slab->slab_number); 2364 + result = VDO_ASSERT(slab->reference_blocks == NULL, 2365 + "vdo_slab %u doesn't allocate refcounts twice", 2366 + slab->slab_number); 2367 2367 if (result != VDO_SUCCESS) 2368 2368 return result; 2369 2369 ··· 2503 2503 * 1. This is impossible, due to the scrubbing threshold, on a real system, so 2504 2504 * don't bother reading the (bogus) data off disk. 2505 2505 */ 2506 - ASSERT_LOG_ONLY(((journal->size < 16) || 2507 - (journal->scrubbing_threshold < (journal->size - 1))), 2508 - "Scrubbing threshold protects against reads of unwritten slab journal blocks"); 2506 + VDO_ASSERT_LOG_ONLY(((journal->size < 16) || 2507 + (journal->scrubbing_threshold < (journal->size - 1))), 2508 + "Scrubbing threshold protects against reads of unwritten slab journal blocks"); 2509 2509 vdo_finish_loading_with_result(&slab->state, 2510 2510 allocate_counters_if_clean(slab)); 2511 2511 return; ··· 2519 2519 { 2520 2520 struct slab_scrubber *scrubber = &slab->allocator->scrubber; 2521 2521 2522 - ASSERT_LOG_ONLY((slab->status != VDO_SLAB_REBUILT), 2523 - "slab to be scrubbed is unrecovered"); 2522 + VDO_ASSERT_LOG_ONLY((slab->status != VDO_SLAB_REBUILT), 2523 + "slab to be scrubbed is unrecovered"); 2524 2524 2525 2525 if (slab->status != VDO_SLAB_REQUIRES_SCRUBBING) 2526 2526 return; ··· 2547 2547 block_count_t free_blocks; 2548 2548 int result; 2549 2549 2550 - ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry), 2550 + VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry), 2551 2551 "a requeued slab must not already be on a ring"); 2552 2552 2553 2553 if (vdo_is_read_only(allocator->depot->vdo)) 2554 2554 return; 2555 2555 2556 2556 free_blocks = slab->free_blocks; 2557 - result = ASSERT((free_blocks <= allocator->depot->slab_config.data_blocks), 2558 - "rebuilt slab %u must have a valid free block count (has %llu, expected maximum %llu)", 2559 - slab->slab_number, (unsigned long long) free_blocks, 2560 - (unsigned long long) allocator->depot->slab_config.data_blocks); 2557 + result = VDO_ASSERT((free_blocks <= allocator->depot->slab_config.data_blocks), 2558 + "rebuilt slab %u must have a valid free block count (has %llu, expected maximum %llu)", 2559 + slab->slab_number, (unsigned long long) free_blocks, 2560 + (unsigned long long) allocator->depot->slab_config.data_blocks); 2561 2561 if (result != VDO_SUCCESS) { 2562 2562 vdo_enter_read_only_mode(allocator->depot->vdo, result); 2563 2563 return; ··· 2880 2880 * At the end of rebuild, the reference counters should be accurate to the end of the 2881 2881 * journal we just applied. 2882 2882 */ 2883 - result = ASSERT(!vdo_before_journal_point(&last_entry_applied, 2884 - &ref_counts_point), 2885 - "Refcounts are not more accurate than the slab journal"); 2883 + result = VDO_ASSERT(!vdo_before_journal_point(&last_entry_applied, 2884 + &ref_counts_point), 2885 + "Refcounts are not more accurate than the slab journal"); 2886 2886 if (result != VDO_SUCCESS) { 2887 2887 abort_scrubbing(scrubber, result); 2888 2888 return; ··· 2993 2993 static inline void assert_on_allocator_thread(thread_id_t thread_id, 2994 2994 const char *function_name) 2995 2995 { 2996 - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == thread_id), 2997 - "%s called on correct thread", function_name); 2996 + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == thread_id), 2997 + "%s called on correct thread", function_name); 2998 2998 } 2999 2999 3000 3000 static void register_slab_with_allocator(struct block_allocator *allocator, ··· 3142 3142 if (!search_reference_blocks(slab, &free_index)) 3143 3143 return VDO_NO_SPACE; 3144 3144 3145 - ASSERT_LOG_ONLY((slab->counters[free_index] == EMPTY_REFERENCE_COUNT), 3146 - "free block must have ref count of zero"); 3145 + VDO_ASSERT_LOG_ONLY((slab->counters[free_index] == EMPTY_REFERENCE_COUNT), 3146 + "free block must have ref count of zero"); 3147 3147 make_provisional_reference(slab, free_index); 3148 3148 adjust_free_block_count(slab, false); 3149 3149 ··· 3850 3850 sequence_number_t recovery_lock) 3851 3851 { 3852 3852 if (recovery_lock > journal->recovery_lock) { 3853 - ASSERT_LOG_ONLY((recovery_lock < journal->recovery_lock), 3854 - "slab journal recovery lock is not older than the recovery journal head"); 3853 + VDO_ASSERT_LOG_ONLY((recovery_lock < journal->recovery_lock), 3854 + "slab journal recovery lock is not older than the recovery journal head"); 3855 3855 return false; 3856 3856 } 3857 3857 ··· 4665 4665 return VDO_INCREMENT_TOO_SMALL; 4666 4666 4667 4667 /* Generate the depot configuration for the new block count. */ 4668 - ASSERT_LOG_ONLY(depot->first_block == partition->offset, 4669 - "New slab depot partition doesn't change origin"); 4668 + VDO_ASSERT_LOG_ONLY(depot->first_block == partition->offset, 4669 + "New slab depot partition doesn't change origin"); 4670 4670 result = vdo_configure_slab_depot(partition, depot->slab_config, 4671 4671 depot->zone_count, &new_state); 4672 4672 if (result != VDO_SUCCESS) ··· 4740 4740 */ 4741 4741 void vdo_use_new_slabs(struct slab_depot *depot, struct vdo_completion *parent) 4742 4742 { 4743 - ASSERT_LOG_ONLY(depot->new_slabs != NULL, "Must have new slabs to use"); 4743 + VDO_ASSERT_LOG_ONLY(depot->new_slabs != NULL, "Must have new slabs to use"); 4744 4744 vdo_schedule_operation(depot->action_manager, 4745 4745 VDO_ADMIN_STATE_SUSPENDED_OPERATION, 4746 4746 NULL, register_new_slabs, ··· 4796 4796 return; 4797 4797 4798 4798 case VDO_DRAIN_ALLOCATOR_STEP_FINISHED: 4799 - ASSERT_LOG_ONLY(!is_vio_pool_busy(allocator->vio_pool), 4800 - "vio pool not busy"); 4799 + VDO_ASSERT_LOG_ONLY(!is_vio_pool_busy(allocator->vio_pool), 4800 + "vio pool not busy"); 4801 4801 vdo_finish_draining_with_result(&allocator->state, completion->result); 4802 4802 return; 4803 4803
+2 -2
drivers/md/dm-vdo/thread-registry.c
··· 44 44 list_add_tail_rcu(&new_thread->links, &registry->links); 45 45 spin_unlock(&registry->lock); 46 46 47 - ASSERT_LOG_ONLY(!found_it, "new thread not already in registry"); 47 + VDO_ASSERT_LOG_ONLY(!found_it, "new thread not already in registry"); 48 48 if (found_it) { 49 49 /* Ensure no RCU iterators see it before re-initializing. */ 50 50 synchronize_rcu(); ··· 67 67 } 68 68 spin_unlock(&registry->lock); 69 69 70 - ASSERT_LOG_ONLY(found_it, "thread found in registry"); 70 + VDO_ASSERT_LOG_ONLY(found_it, "thread found in registry"); 71 71 if (found_it) { 72 72 /* Ensure no RCU iterators see it before re-initializing. */ 73 73 synchronize_rcu();
+16 -16
drivers/md/dm-vdo/vdo.c
··· 425 425 type = &default_queue_type; 426 426 427 427 if (thread->queue != NULL) { 428 - return ASSERT(vdo_work_queue_type_is(thread->queue, type), 429 - "already constructed vdo thread %u is of the correct type", 430 - thread_id); 428 + return VDO_ASSERT(vdo_work_queue_type_is(thread->queue, type), 429 + "already constructed vdo thread %u is of the correct type", 430 + thread_id); 431 431 } 432 432 433 433 thread->vdo = vdo; ··· 448 448 int result; 449 449 450 450 write_lock(&registry.lock); 451 - result = ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL, 452 - "VDO not already registered"); 451 + result = VDO_ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL, 452 + "VDO not already registered"); 453 453 if (result == VDO_SUCCESS) { 454 454 INIT_LIST_HEAD(&vdo->registration); 455 455 list_add_tail(&vdo->registration, &registry.links); ··· 1050 1050 struct read_only_listener *read_only_listener; 1051 1051 int result; 1052 1052 1053 - result = ASSERT(thread_id != vdo->thread_config.dedupe_thread, 1054 - "read only listener not registered on dedupe thread"); 1053 + result = VDO_ASSERT(thread_id != vdo->thread_config.dedupe_thread, 1054 + "read only listener not registered on dedupe thread"); 1055 1055 if (result != VDO_SUCCESS) 1056 1056 return result; 1057 1057 ··· 1704 1704 */ 1705 1705 void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name) 1706 1706 { 1707 - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread), 1708 - "%s called on admin thread", name); 1707 + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread), 1708 + "%s called on admin thread", name); 1709 1709 } 1710 1710 1711 1711 /** ··· 1718 1718 void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone, 1719 1719 const char *name) 1720 1720 { 1721 - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == 1722 - vdo->thread_config.logical_threads[logical_zone]), 1723 - "%s called on logical thread", name); 1721 + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == 1722 + vdo->thread_config.logical_threads[logical_zone]), 1723 + "%s called on logical thread", name); 1724 1724 } 1725 1725 1726 1726 /** ··· 1733 1733 void vdo_assert_on_physical_zone_thread(const struct vdo *vdo, 1734 1734 zone_count_t physical_zone, const char *name) 1735 1735 { 1736 - ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == 1737 - vdo->thread_config.physical_threads[physical_zone]), 1738 - "%s called on physical thread", name); 1736 + VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == 1737 + vdo->thread_config.physical_threads[physical_zone]), 1738 + "%s called on physical thread", name); 1739 1739 } 1740 1740 1741 1741 /** ··· 1773 1773 1774 1774 /* With the PBN already checked, we should always succeed in finding a slab. */ 1775 1775 slab = vdo_get_slab(vdo->depot, pbn); 1776 - result = ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs"); 1776 + result = VDO_ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs"); 1777 1777 if (result != VDO_SUCCESS) 1778 1778 return result; 1779 1779
+20 -20
drivers/md/dm-vdo/vio.c
··· 82 82 struct bio *bio; 83 83 int result; 84 84 85 - result = ASSERT(block_count <= MAX_BLOCKS_PER_VIO, 86 - "block count %u does not exceed maximum %u", block_count, 87 - MAX_BLOCKS_PER_VIO); 85 + result = VDO_ASSERT(block_count <= MAX_BLOCKS_PER_VIO, 86 + "block count %u does not exceed maximum %u", block_count, 87 + MAX_BLOCKS_PER_VIO); 88 88 if (result != VDO_SUCCESS) 89 89 return result; 90 90 91 - result = ASSERT(((vio_type != VIO_TYPE_UNINITIALIZED) && (vio_type != VIO_TYPE_DATA)), 92 - "%d is a metadata type", vio_type); 91 + result = VDO_ASSERT(((vio_type != VIO_TYPE_UNINITIALIZED) && (vio_type != VIO_TYPE_DATA)), 92 + "%d is a metadata type", vio_type); 93 93 if (result != VDO_SUCCESS) 94 94 return result; 95 95 ··· 363 363 return; 364 364 365 365 /* Remove all available vios from the object pool. */ 366 - ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting), 367 - "VIO pool must not have any waiters when being freed"); 368 - ASSERT_LOG_ONLY((pool->busy_count == 0), 369 - "VIO pool must not have %zu busy entries when being freed", 370 - pool->busy_count); 371 - ASSERT_LOG_ONLY(list_empty(&pool->busy), 372 - "VIO pool must not have busy entries when being freed"); 366 + VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting), 367 + "VIO pool must not have any waiters when being freed"); 368 + VDO_ASSERT_LOG_ONLY((pool->busy_count == 0), 369 + "VIO pool must not have %zu busy entries when being freed", 370 + pool->busy_count); 371 + VDO_ASSERT_LOG_ONLY(list_empty(&pool->busy), 372 + "VIO pool must not have busy entries when being freed"); 373 373 374 374 list_for_each_entry_safe(pooled, tmp, &pool->available, pool_entry) { 375 375 list_del(&pooled->pool_entry); ··· 377 377 pool->size--; 378 378 } 379 379 380 - ASSERT_LOG_ONLY(pool->size == 0, 381 - "VIO pool must not have missing entries when being freed"); 380 + VDO_ASSERT_LOG_ONLY(pool->size == 0, 381 + "VIO pool must not have missing entries when being freed"); 382 382 383 383 vdo_free(vdo_forget(pool->buffer)); 384 384 vdo_free(pool); ··· 403 403 { 404 404 struct pooled_vio *pooled; 405 405 406 - ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()), 407 - "acquire from active vio_pool called from correct thread"); 406 + VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()), 407 + "acquire from active vio_pool called from correct thread"); 408 408 409 409 if (list_empty(&pool->available)) { 410 410 vdo_waitq_enqueue_waiter(&pool->waiting, waiter); ··· 424 424 */ 425 425 void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio) 426 426 { 427 - ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()), 428 - "vio pool entry returned on same thread as it was acquired"); 427 + VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()), 428 + "vio pool entry returned on same thread as it was acquired"); 429 429 430 430 vio->vio.completion.error_handler = NULL; 431 431 vio->vio.completion.parent = NULL; ··· 465 465 * shouldn't exist. 466 466 */ 467 467 default: 468 - ASSERT_LOG_ONLY(0, "Bio operation %d not a write, read, discard, or empty flush", 469 - bio_op(bio)); 468 + VDO_ASSERT_LOG_ONLY(0, "Bio operation %d not a write, read, discard, or empty flush", 469 + bio_op(bio)); 470 470 } 471 471 472 472 if ((bio->bi_opf & REQ_PREFLUSH) != 0)
+4 -4
drivers/md/dm-vdo/vio.h
··· 67 67 thread_id_t expected = get_vio_bio_zone_thread_id(vio); 68 68 thread_id_t thread_id = vdo_get_callback_thread_id(); 69 69 70 - ASSERT_LOG_ONLY((expected == thread_id), 71 - "vio I/O for physical block %llu on thread %u, should be on bio zone thread %u", 72 - (unsigned long long) pbn_from_vio_bio(vio->bio), thread_id, 73 - expected); 70 + VDO_ASSERT_LOG_ONLY((expected == thread_id), 71 + "vio I/O for physical block %llu on thread %u, should be on bio zone thread %u", 72 + (unsigned long long) pbn_from_vio_bio(vio->bio), thread_id, 73 + expected); 74 74 } 75 75 76 76 int vdo_create_bio(struct bio **bio_ptr);