Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dm cache: prefix all DMERR and DMINFO messages with cache device name

Having the DM device name associated with the ERR or INFO message is
very helpful.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>

+64 -38
+64 -38
drivers/md/dm-cache-target.c
··· 968 968 return cache->features.mode; 969 969 } 970 970 971 + static const char *cache_device_name(struct cache *cache) 972 + { 973 + return dm_device_name(dm_table_get_md(cache->ti->table)); 974 + } 975 + 971 976 static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode) 972 977 { 973 978 const char *descs[] = { ··· 982 977 }; 983 978 984 979 dm_table_event(cache->ti->table); 985 - DMINFO("switching cache to %s mode", descs[(int)mode]); 980 + DMINFO("%s: switching cache to %s mode", 981 + cache_device_name(cache), descs[(int)mode]); 986 982 } 987 983 988 984 static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode) ··· 992 986 enum cache_metadata_mode old_mode = get_cache_mode(cache); 993 987 994 988 if (new_mode == CM_WRITE && needs_check) { 995 - DMERR("unable to switch cache to write mode until repaired."); 989 + DMERR("%s: unable to switch cache to write mode until repaired.", 990 + cache_device_name(cache)); 996 991 if (old_mode != new_mode) 997 992 new_mode = old_mode; 998 993 else ··· 1023 1016 1024 1017 static void abort_transaction(struct cache *cache) 1025 1018 { 1019 + const char *dev_name = cache_device_name(cache); 1020 + 1026 1021 if (get_cache_mode(cache) >= CM_READ_ONLY) 1027 1022 return; 1028 1023 1029 1024 if (dm_cache_metadata_set_needs_check(cache->cmd)) { 1030 - DMERR("failed to set 'needs_check' flag in metadata"); 1025 + DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); 1031 1026 set_cache_mode(cache, CM_FAIL); 1032 1027 } 1033 1028 1034 - DMERR_LIMIT("aborting current metadata transaction"); 1029 + DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); 1035 1030 if (dm_cache_metadata_abort(cache->cmd)) { 1036 - DMERR("failed to abort metadata transaction"); 1031 + DMERR("%s: failed to abort metadata transaction", dev_name); 1037 1032 set_cache_mode(cache, CM_FAIL); 1038 1033 } 1039 1034 } 1040 1035 1041 1036 static void metadata_operation_failed(struct cache *cache, const char *op, int r) 1042 1037 { 1043 - DMERR_LIMIT("metadata operation '%s' failed: error = %d", op, r); 1038 + DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d", 1039 + cache_device_name(cache), op, r); 1044 1040 abort_transaction(cache); 1045 1041 set_cache_mode(cache, CM_READ_ONLY); 1046 1042 } ··· 1130 1120 static void migration_failure(struct dm_cache_migration *mg) 1131 1121 { 1132 1122 struct cache *cache = mg->cache; 1123 + const char *dev_name = cache_device_name(cache); 1133 1124 1134 1125 if (mg->writeback) { 1135 - DMWARN_LIMIT("writeback failed; couldn't copy block"); 1126 + DMERR_LIMIT("%s: writeback failed; couldn't copy block", dev_name); 1136 1127 set_dirty(cache, mg->old_oblock, mg->cblock); 1137 1128 cell_defer(cache, mg->old_ocell, false); 1138 1129 1139 1130 } else if (mg->demote) { 1140 - DMWARN_LIMIT("demotion failed; couldn't copy block"); 1131 + DMERR_LIMIT("%s: demotion failed; couldn't copy block", dev_name); 1141 1132 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); 1142 1133 1143 1134 cell_defer(cache, mg->old_ocell, mg->promote ? false : true); 1144 1135 if (mg->promote) 1145 1136 cell_defer(cache, mg->new_ocell, true); 1146 1137 } else { 1147 - DMWARN_LIMIT("promotion failed; couldn't copy block"); 1138 + DMERR_LIMIT("%s: promotion failed; couldn't copy block", dev_name); 1148 1139 policy_remove_mapping(cache->policy, mg->new_oblock); 1149 1140 cell_defer(cache, mg->new_ocell, true); 1150 1141 } ··· 1168 1157 } else if (mg->demote) { 1169 1158 r = dm_cache_remove_mapping(cache->cmd, mg->cblock); 1170 1159 if (r) { 1171 - DMWARN_LIMIT("demotion failed; couldn't update on disk metadata"); 1160 + DMERR_LIMIT("%s: demotion failed; couldn't update on disk metadata", 1161 + cache_device_name(cache)); 1172 1162 metadata_operation_failed(cache, "dm_cache_remove_mapping", r); 1173 1163 policy_force_mapping(cache->policy, mg->new_oblock, 1174 1164 mg->old_oblock); ··· 1181 1169 } else { 1182 1170 r = dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock); 1183 1171 if (r) { 1184 - DMWARN_LIMIT("promotion failed; couldn't update on disk metadata"); 1172 + DMERR_LIMIT("%s: promotion failed; couldn't update on disk metadata", 1173 + cache_device_name(cache)); 1185 1174 metadata_operation_failed(cache, "dm_cache_insert_mapping", r); 1186 1175 policy_remove_mapping(cache->policy, mg->new_oblock); 1187 1176 free_io_migration(mg); ··· 1202 1189 struct cache *cache = mg->cache; 1203 1190 1204 1191 if (mg->writeback) { 1205 - DMWARN("writeback unexpectedly triggered commit"); 1192 + DMWARN_LIMIT("%s: writeback unexpectedly triggered commit", 1193 + cache_device_name(cache)); 1206 1194 return; 1207 1195 1208 1196 } else if (mg->demote) { ··· 1279 1265 } 1280 1266 1281 1267 if (r < 0) { 1282 - DMERR_LIMIT("issuing migration failed"); 1268 + DMERR_LIMIT("%s: issuing migration failed", cache_device_name(cache)); 1283 1269 migration_failure(mg); 1284 1270 } 1285 1271 } ··· 1877 1863 break; 1878 1864 1879 1865 default: 1880 - DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__, 1866 + DMERR_LIMIT("%s: %s: erroring bio, unknown policy op: %u", 1867 + cache_device_name(cache), __func__, 1881 1868 (unsigned) lookup_result.op); 1882 1869 bio_io_error(bio); 1883 1870 } ··· 2116 2101 r = 0; 2117 2102 2118 2103 } else { 2119 - DMERR("policy_remove_cblock failed"); 2104 + DMERR("%s: policy_remove_cblock failed", cache_device_name(cache)); 2120 2105 break; 2121 2106 } 2122 2107 ··· 3069 3054 return DM_MAPIO_SUBMITTED; 3070 3055 3071 3056 } else if (r) { 3072 - DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); 3057 + DMERR_LIMIT("%s: Unexpected return from cache replacement policy: %d", 3058 + cache_device_name(cache), r); 3073 3059 cell_defer(cache, cell, false); 3074 3060 bio_io_error(bio); 3075 3061 return DM_MAPIO_SUBMITTED; ··· 3129 3113 break; 3130 3114 3131 3115 default: 3132 - DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, 3116 + DMERR_LIMIT("%s: %s: erroring bio: unknown policy op: %u", 3117 + cache_device_name(cache), __func__, 3133 3118 (unsigned) lookup_result.op); 3134 3119 cell_defer(cache, cell, false); 3135 3120 bio_io_error(bio); ··· 3190 3173 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, 3191 3174 cache->discard_nr_blocks); 3192 3175 if (r) { 3193 - DMERR("could not resize on-disk discard bitset"); 3176 + DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache)); 3194 3177 metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r); 3195 3178 return r; 3196 3179 } ··· 3232 3215 3233 3216 r1 = write_dirty_bitset(cache); 3234 3217 if (r1) 3235 - DMERR("could not write dirty bitset"); 3218 + DMERR("%s: could not write dirty bitset", cache_device_name(cache)); 3236 3219 3237 3220 r2 = write_discard_bitset(cache); 3238 3221 if (r2) 3239 - DMERR("could not write discard bitset"); 3222 + DMERR("%s: could not write discard bitset", cache_device_name(cache)); 3240 3223 3241 3224 save_stats(cache); 3242 3225 3243 3226 r3 = write_hints(cache); 3244 3227 if (r3) 3245 - DMERR("could not write hints"); 3228 + DMERR("%s: could not write hints", cache_device_name(cache)); 3246 3229 3247 3230 /* 3248 3231 * If writing the above metadata failed, we still commit, but don't ··· 3251 3234 */ 3252 3235 r4 = commit(cache, !r1 && !r2 && !r3); 3253 3236 if (r4) 3254 - DMERR("could not write cache metadata."); 3237 + DMERR("%s: could not write cache metadata", cache_device_name(cache)); 3255 3238 3256 3239 return !r1 && !r2 && !r3 && !r4; 3257 3240 } ··· 3391 3374 while (from_cblock(new_size) < from_cblock(cache->cache_size)) { 3392 3375 new_size = to_cblock(from_cblock(new_size) + 1); 3393 3376 if (is_dirty(cache, new_size)) { 3394 - DMERR("unable to shrink cache; cache block %llu is dirty", 3377 + DMERR("%s: unable to shrink cache; cache block %llu is dirty", 3378 + cache_device_name(cache), 3395 3379 (unsigned long long) from_cblock(new_size)); 3396 3380 return false; 3397 3381 } ··· 3407 3389 3408 3390 r = dm_cache_resize(cache->cmd, new_size); 3409 3391 if (r) { 3410 - DMERR("could not resize cache metadata"); 3392 + DMERR("%s: could not resize cache metadata", cache_device_name(cache)); 3411 3393 metadata_operation_failed(cache, "dm_cache_resize", r); 3412 3394 return r; 3413 3395 } ··· 3446 3428 r = dm_cache_load_mappings(cache->cmd, cache->policy, 3447 3429 load_mapping, cache); 3448 3430 if (r) { 3449 - DMERR("could not load cache mappings"); 3431 + DMERR("%s: could not load cache mappings", cache_device_name(cache)); 3450 3432 metadata_operation_failed(cache, "dm_cache_load_mappings", r); 3451 3433 return r; 3452 3434 } ··· 3467 3449 discard_load_info_init(cache, &li); 3468 3450 r = dm_cache_load_discards(cache->cmd, load_discard, &li); 3469 3451 if (r) { 3470 - DMERR("could not load origin discards"); 3452 + DMERR("%s: could not load origin discards", cache_device_name(cache)); 3471 3453 metadata_operation_failed(cache, "dm_cache_load_discards", r); 3472 3454 return r; 3473 3455 } ··· 3521 3503 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) 3522 3504 (void) commit(cache, false); 3523 3505 3524 - r = dm_cache_get_free_metadata_block_count(cache->cmd, 3525 - &nr_free_blocks_metadata); 3506 + r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata); 3526 3507 if (r) { 3527 - DMERR("could not get metadata free block count"); 3508 + DMERR("%s: dm_cache_get_free_metadata_block_count returned %d", 3509 + cache_device_name(cache), r); 3528 3510 goto err; 3529 3511 } 3530 3512 3531 3513 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); 3532 3514 if (r) { 3533 - DMERR("could not get metadata device size"); 3515 + DMERR("%s: dm_cache_get_metadata_dev_size returned %d", 3516 + cache_device_name(cache), r); 3534 3517 goto err; 3535 3518 } 3536 3519 ··· 3562 3543 DMEMIT("1 writeback "); 3563 3544 3564 3545 else { 3565 - DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode); 3546 + DMERR("%s: internal error: unknown io mode: %d", 3547 + cache_device_name(cache), (int) cache->features.io_mode); 3566 3548 goto err; 3567 3549 } 3568 3550 ··· 3573 3553 if (sz < maxlen) { 3574 3554 r = policy_emit_config_values(cache->policy, result, maxlen, &sz); 3575 3555 if (r) 3576 - DMERR("policy_emit_config_values returned %d", r); 3556 + DMERR("%s: policy_emit_config_values returned %d", 3557 + cache_device_name(cache), r); 3577 3558 } 3578 3559 3579 3560 if (get_cache_mode(cache) == CM_READ_ONLY) ··· 3643 3622 return 0; 3644 3623 } 3645 3624 3646 - DMERR("invalid cblock range '%s'", str); 3625 + DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str); 3647 3626 return -EINVAL; 3648 3627 } 3649 3628 ··· 3654 3633 uint64_t n = from_cblock(cache->cache_size); 3655 3634 3656 3635 if (b >= n) { 3657 - DMERR("begin cblock out of range: %llu >= %llu", b, n); 3636 + DMERR("%s: begin cblock out of range: %llu >= %llu", 3637 + cache_device_name(cache), b, n); 3658 3638 return -EINVAL; 3659 3639 } 3660 3640 3661 3641 if (e > n) { 3662 - DMERR("end cblock out of range: %llu > %llu", e, n); 3642 + DMERR("%s: end cblock out of range: %llu > %llu", 3643 + cache_device_name(cache), e, n); 3663 3644 return -EINVAL; 3664 3645 } 3665 3646 3666 3647 if (b >= e) { 3667 - DMERR("invalid cblock range: %llu >= %llu", b, e); 3648 + DMERR("%s: invalid cblock range: %llu >= %llu", 3649 + cache_device_name(cache), b, e); 3668 3650 return -EINVAL; 3669 3651 } 3670 3652 ··· 3701 3677 struct cblock_range range; 3702 3678 3703 3679 if (!passthrough_mode(&cache->features)) { 3704 - DMERR("cache has to be in passthrough mode for invalidation"); 3680 + DMERR("%s: cache has to be in passthrough mode for invalidation", 3681 + cache_device_name(cache)); 3705 3682 return -EPERM; 3706 3683 } 3707 3684 ··· 3742 3717 return -EINVAL; 3743 3718 3744 3719 if (get_cache_mode(cache) >= CM_READ_ONLY) { 3745 - DMERR("unable to service cache target messages in READ_ONLY or FAIL mode"); 3720 + DMERR("%s: unable to service cache target messages in READ_ONLY or FAIL mode", 3721 + cache_device_name(cache)); 3746 3722 return -EOPNOTSUPP; 3747 3723 } 3748 3724