Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: Convert from DRM_* to dev_*

Convert from generic DRM_* to dev_* calls to have device context info.

Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Asad Kamal <asad.kamal@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Lijo Lazar and committed by
Alex Deucher
a3e510fd 1bec2f27

+320 -219
+167 -104
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 1288 1288 */ 1289 1289 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) 1290 1290 { 1291 - DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); 1291 + dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg); 1292 1292 BUG(); 1293 1293 return 0; 1294 1294 } 1295 1295 1296 1296 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg) 1297 1297 { 1298 - DRM_ERROR("Invalid callback to read register 0x%llX\n", reg); 1298 + dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg); 1299 1299 BUG(); 1300 1300 return 0; 1301 1301 } ··· 1312 1312 */ 1313 1313 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 1314 1314 { 1315 - DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", 1316 - reg, v); 1315 + dev_err(adev->dev, 1316 + "Invalid callback to write register 0x%04X with 0x%08X\n", reg, 1317 + v); 1317 1318 BUG(); 1318 1319 } 1319 1320 1320 1321 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v) 1321 1322 { 1322 - DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n", 1323 - reg, v); 1323 + dev_err(adev->dev, 1324 + "Invalid callback to write register 0x%llX with 0x%08X\n", reg, 1325 + v); 1324 1326 BUG(); 1325 1327 } 1326 1328 ··· 1338 1336 */ 1339 1337 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) 1340 1338 { 1341 - DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg); 1339 + dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n", 1340 + reg); 1342 1341 BUG(); 1343 1342 return 0; 1344 1343 } 1345 1344 1346 1345 static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg) 1347 1346 { 1348 - DRM_ERROR("Invalid callback to read register 0x%llX\n", reg); 1347 + dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg); 1349 1348 BUG(); 1350 1349 return 0; 1351 1350 } ··· 1363 1360 */ 1364 1361 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) 1365 1362 { 1366 - DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", 1367 - reg, v); 1363 + dev_err(adev->dev, 1364 + "Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", 1365 + reg, v); 1368 1366 BUG(); 1369 1367 } 1370 1368 1371 1369 static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v) 1372 1370 { 1373 - DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n", 1374 - reg, v); 1371 + dev_err(adev->dev, 1372 + "Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n", 1373 + reg, v); 1375 1374 BUG(); 1376 1375 } 1377 1376 ··· 1391 1386 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, 1392 1387 uint32_t block, uint32_t reg) 1393 1388 { 1394 - DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", 1395 - reg, block); 1389 + dev_err(adev->dev, 1390 + "Invalid callback to read register 0x%04X in block 0x%04X\n", 1391 + reg, block); 1396 1392 BUG(); 1397 1393 return 0; 1398 1394 } ··· 1413 1407 uint32_t block, 1414 1408 uint32_t reg, uint32_t v) 1415 1409 { 1416 - DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", 1417 - reg, block, v); 1410 + dev_err(adev->dev, 1411 + "Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", 1412 + reg, block, v); 1418 1413 BUG(); 1419 1414 } 1420 1415 ··· 1701 1694 1702 1695 /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ 1703 1696 if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) 1704 - DRM_WARN("System can't access extended configuration space, please check!!\n"); 1697 + dev_warn( 1698 + adev->dev, 1699 + "System can't access extended configuration space, please check!!\n"); 1705 1700 1706 1701 /* skip if the bios has already enabled large BAR */ 1707 1702 if (adev->gmc.real_vram_size && ··· 1743 1734 1744 1735 r = pci_resize_resource(adev->pdev, 0, rbar_size); 1745 1736 if (r == -ENOSPC) 1746 - DRM_INFO("Not enough PCI address space for a large BAR."); 1737 + dev_info(adev->dev, 1738 + "Not enough PCI address space for a large BAR."); 1747 1739 else if (r && r != -ENOTSUPP) 1748 - DRM_ERROR("Problem resizing BAR0 (%d).", r); 1740 + dev_err(adev->dev, "Problem resizing BAR0 (%d).", r); 1749 1741 1750 1742 pci_assign_unassigned_bus_resources(adev->pdev->bus); 1751 1743 ··· 1848 1838 case 0: 1849 1839 return false; 1850 1840 default: 1851 - DRM_ERROR("Invalid value for amdgpu.seamless: %d\n", 1852 - amdgpu_seamless); 1841 + dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n", 1842 + amdgpu_seamless); 1853 1843 return false; 1854 1844 } 1855 1845 ··· 2025 2015 return; 2026 2016 2027 2017 if (!is_os_64) { 2028 - DRM_WARN("Not 64-bit OS, feature not supported\n"); 2018 + dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n"); 2029 2019 goto def_value; 2030 2020 } 2031 2021 si_meminfo(&si); ··· 2040 2030 if (total_memory < dram_size_seven_GB) 2041 2031 goto def_value1; 2042 2032 } else { 2043 - DRM_WARN("Smu memory pool size not supported\n"); 2033 + dev_warn(adev->dev, "Smu memory pool size not supported\n"); 2044 2034 goto def_value; 2045 2035 } 2046 2036 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; ··· 2048 2038 return; 2049 2039 2050 2040 def_value1: 2051 - DRM_WARN("No enough system memory\n"); 2041 + dev_warn(adev->dev, "No enough system memory\n"); 2052 2042 def_value: 2053 2043 adev->pm.smu_prv_buffer_size = 0; 2054 2044 } ··· 2212 2202 amdgpu_device_load_pci_state(pdev); 2213 2203 r = pci_enable_device(pdev); 2214 2204 if (r) 2215 - DRM_WARN("pci_enable_device failed (%d)\n", r); 2205 + dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n", 2206 + r); 2216 2207 amdgpu_device_resume(dev, true); 2217 2208 2218 2209 dev->switch_power_state = DRM_SWITCH_POWER_ON; 2219 2210 } else { 2220 - pr_info("switched off\n"); 2211 + dev_info(&pdev->dev, "switched off\n"); 2221 2212 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 2222 2213 amdgpu_device_prepare(dev); 2223 2214 amdgpu_device_suspend(dev, true); ··· 2285 2274 r = adev->ip_blocks[i].version->funcs->set_clockgating_state( 2286 2275 &adev->ip_blocks[i], state); 2287 2276 if (r) 2288 - DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", 2289 - adev->ip_blocks[i].version->funcs->name, r); 2277 + dev_err(adev->dev, 2278 + "set_clockgating_state of IP block <%s> failed %d\n", 2279 + adev->ip_blocks[i].version->funcs->name, r); 2290 2280 } 2291 2281 return r; 2292 2282 } ··· 2320 2308 r = adev->ip_blocks[i].version->funcs->set_powergating_state( 2321 2309 &adev->ip_blocks[i], state); 2322 2310 if (r) 2323 - DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", 2324 - adev->ip_blocks[i].version->funcs->name, r); 2311 + dev_err(adev->dev, 2312 + "set_powergating_state of IP block <%s> failed %d\n", 2313 + adev->ip_blocks[i].version->funcs->name, r); 2325 2314 } 2326 2315 return r; 2327 2316 } ··· 2538 2525 } 2539 2526 } 2540 2527 2541 - DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", 2542 - amdgpu_virtual_display, pci_address_name, 2543 - adev->enable_virtual_display, adev->mode_info.num_crtc); 2528 + dev_info( 2529 + adev->dev, 2530 + "virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", 2531 + amdgpu_virtual_display, pci_address_name, 2532 + adev->enable_virtual_display, adev->mode_info.num_crtc); 2544 2533 2545 2534 kfree(pciaddstr); 2546 2535 } ··· 2553 2538 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) { 2554 2539 adev->mode_info.num_crtc = 1; 2555 2540 adev->enable_virtual_display = true; 2556 - DRM_INFO("virtual_display:%d, num_crtc:%d\n", 2557 - adev->enable_virtual_display, adev->mode_info.num_crtc); 2541 + dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n", 2542 + adev->enable_virtual_display, 2543 + adev->mode_info.num_crtc); 2558 2544 } 2559 2545 } 2560 2546 ··· 2800 2784 ip_block = &adev->ip_blocks[i]; 2801 2785 2802 2786 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 2803 - DRM_WARN("disabled ip block: %d <%s>\n", 2804 - i, adev->ip_blocks[i].version->funcs->name); 2787 + dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i, 2788 + adev->ip_blocks[i].version->funcs->name); 2805 2789 adev->ip_blocks[i].status.valid = false; 2806 2790 } else if (ip_block->version->funcs->early_init) { 2807 2791 r = ip_block->version->funcs->early_init(ip_block); 2808 2792 if (r == -ENOENT) { 2809 2793 adev->ip_blocks[i].status.valid = false; 2810 2794 } else if (r) { 2811 - DRM_ERROR("early_init of IP block <%s> failed %d\n", 2812 - adev->ip_blocks[i].version->funcs->name, r); 2795 + dev_err(adev->dev, 2796 + "early_init of IP block <%s> failed %d\n", 2797 + adev->ip_blocks[i].version->funcs->name, 2798 + r); 2813 2799 total = false; 2814 2800 } else { 2815 2801 adev->ip_blocks[i].status.valid = true; ··· 2892 2874 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { 2893 2875 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); 2894 2876 if (r) { 2895 - DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2896 - adev->ip_blocks[i].version->funcs->name, r); 2877 + dev_err(adev->dev, 2878 + "hw_init of IP block <%s> failed %d\n", 2879 + adev->ip_blocks[i].version->funcs->name, 2880 + r); 2897 2881 return r; 2898 2882 } 2899 2883 adev->ip_blocks[i].status.hw = true; ··· 2919 2899 continue; 2920 2900 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); 2921 2901 if (r) { 2922 - DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2923 - adev->ip_blocks[i].version->funcs->name, r); 2902 + dev_err(adev->dev, 2903 + "hw_init of IP block <%s> failed %d\n", 2904 + adev->ip_blocks[i].version->funcs->name, r); 2924 2905 return r; 2925 2906 } 2926 2907 adev->ip_blocks[i].status.hw = true; ··· 2959 2938 } else { 2960 2939 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); 2961 2940 if (r) { 2962 - DRM_ERROR("hw_init of IP block <%s> failed %d\n", 2963 - adev->ip_blocks[i].version->funcs->name, r); 2941 + dev_err(adev->dev, 2942 + "hw_init of IP block <%s> failed %d\n", 2943 + adev->ip_blocks[i] 2944 + .version->funcs->name, 2945 + r); 2964 2946 return r; 2965 2947 } 2966 2948 adev->ip_blocks[i].status.hw = true; ··· 3018 2994 3019 2995 r = drm_sched_init(&ring->sched, &args); 3020 2996 if (r) { 3021 - DRM_ERROR("Failed to create scheduler on ring %s.\n", 3022 - ring->name); 2997 + dev_err(adev->dev, 2998 + "Failed to create scheduler on ring %s.\n", 2999 + ring->name); 3023 3000 return r; 3024 3001 } 3025 3002 r = amdgpu_uvd_entity_init(adev, ring); 3026 3003 if (r) { 3027 - DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n", 3028 - ring->name); 3004 + dev_err(adev->dev, 3005 + "Failed to create UVD scheduling entity on ring %s.\n", 3006 + ring->name); 3029 3007 return r; 3030 3008 } 3031 3009 r = amdgpu_vce_entity_init(adev, ring); 3032 3010 if (r) { 3033 - DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n", 3034 - ring->name); 3011 + dev_err(adev->dev, 3012 + "Failed to create VCE scheduling entity on ring %s.\n", 3013 + ring->name); 3035 3014 return r; 3036 3015 } 3037 3016 } ··· 3072 3045 if (adev->ip_blocks[i].version->funcs->sw_init) { 3073 3046 r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]); 3074 3047 if (r) { 3075 - DRM_ERROR("sw_init of IP block <%s> failed %d\n", 3076 - adev->ip_blocks[i].version->funcs->name, r); 3048 + dev_err(adev->dev, 3049 + "sw_init of IP block <%s> failed %d\n", 3050 + adev->ip_blocks[i].version->funcs->name, 3051 + r); 3077 3052 goto init_failed; 3078 3053 } 3079 3054 } ··· 3089 3060 /* need to do common hw init early so everything is set up for gmc */ 3090 3061 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); 3091 3062 if (r) { 3092 - DRM_ERROR("hw_init %d failed %d\n", i, r); 3063 + dev_err(adev->dev, "hw_init %d failed %d\n", i, 3064 + r); 3093 3065 goto init_failed; 3094 3066 } 3095 3067 adev->ip_blocks[i].status.hw = true; ··· 3102 3072 3103 3073 r = amdgpu_device_mem_scratch_init(adev); 3104 3074 if (r) { 3105 - DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r); 3075 + dev_err(adev->dev, 3076 + "amdgpu_mem_scratch_init failed %d\n", 3077 + r); 3106 3078 goto init_failed; 3107 3079 } 3108 3080 r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); 3109 3081 if (r) { 3110 - DRM_ERROR("hw_init %d failed %d\n", i, r); 3082 + dev_err(adev->dev, "hw_init %d failed %d\n", i, 3083 + r); 3111 3084 goto init_failed; 3112 3085 } 3113 3086 r = amdgpu_device_wb_init(adev); 3114 3087 if (r) { 3115 - DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); 3088 + dev_err(adev->dev, 3089 + "amdgpu_device_wb_init failed %d\n", r); 3116 3090 goto init_failed; 3117 3091 } 3118 3092 adev->ip_blocks[i].status.hw = true; ··· 3128 3094 AMDGPU_GEM_DOMAIN_GTT, 3129 3095 AMDGPU_CSA_SIZE); 3130 3096 if (r) { 3131 - DRM_ERROR("allocate CSA failed %d\n", r); 3097 + dev_err(adev->dev, 3098 + "allocate CSA failed %d\n", r); 3132 3099 goto init_failed; 3133 3100 } 3134 3101 } 3135 3102 3136 3103 r = amdgpu_seq64_init(adev); 3137 3104 if (r) { 3138 - DRM_ERROR("allocate seq64 failed %d\n", r); 3105 + dev_err(adev->dev, "allocate seq64 failed %d\n", 3106 + r); 3139 3107 goto init_failed; 3140 3108 } 3141 3109 } ··· 3327 3291 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i], 3328 3292 state); 3329 3293 if (r) { 3330 - DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", 3331 - adev->ip_blocks[i].version->funcs->name, r); 3294 + dev_err(adev->dev, 3295 + "set_clockgating_state(gate) of IP block <%s> failed %d\n", 3296 + adev->ip_blocks[i].version->funcs->name, 3297 + r); 3332 3298 return r; 3333 3299 } 3334 3300 } ··· 3366 3328 r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i], 3367 3329 state); 3368 3330 if (r) { 3369 - DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", 3370 - adev->ip_blocks[i].version->funcs->name, r); 3331 + dev_err(adev->dev, 3332 + "set_powergating_state(gate) of IP block <%s> failed %d\n", 3333 + adev->ip_blocks[i].version->funcs->name, 3334 + r); 3371 3335 return r; 3372 3336 } 3373 3337 } ··· 3435 3395 if (adev->ip_blocks[i].version->funcs->late_init) { 3436 3396 r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]); 3437 3397 if (r) { 3438 - DRM_ERROR("late_init of IP block <%s> failed %d\n", 3439 - adev->ip_blocks[i].version->funcs->name, r); 3398 + dev_err(adev->dev, 3399 + "late_init of IP block <%s> failed %d\n", 3400 + adev->ip_blocks[i].version->funcs->name, 3401 + r); 3440 3402 return r; 3441 3403 } 3442 3404 } ··· 3447 3405 3448 3406 r = amdgpu_ras_late_init(adev); 3449 3407 if (r) { 3450 - DRM_ERROR("amdgpu_ras_late_init failed %d", r); 3408 + dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r); 3451 3409 return r; 3452 3410 } 3453 3411 ··· 3461 3419 3462 3420 r = amdgpu_device_enable_mgpu_fan_boost(); 3463 3421 if (r) 3464 - DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); 3422 + dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r); 3465 3423 3466 3424 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */ 3467 3425 if (amdgpu_passthrough(adev) && ··· 3494 3452 r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 3495 3453 AMDGPU_XGMI_PSTATE_MIN); 3496 3454 if (r) { 3497 - DRM_ERROR("pstate setting failed (%d).\n", r); 3455 + dev_err(adev->dev, 3456 + "pstate setting failed (%d).\n", 3457 + r); 3498 3458 break; 3499 3459 } 3500 3460 } ··· 3510 3466 3511 3467 static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block) 3512 3468 { 3469 + struct amdgpu_device *adev = ip_block->adev; 3513 3470 int r; 3514 3471 3515 3472 if (!ip_block->version->funcs->hw_fini) { 3516 - DRM_ERROR("hw_fini of IP block <%s> not defined\n", 3517 - ip_block->version->funcs->name); 3473 + dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n", 3474 + ip_block->version->funcs->name); 3518 3475 } else { 3519 3476 r = ip_block->version->funcs->hw_fini(ip_block); 3520 3477 /* XXX handle errors */ 3521 3478 if (r) { 3522 - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 3523 - ip_block->version->funcs->name, r); 3479 + dev_dbg(adev->dev, 3480 + "hw_fini of IP block <%s> failed %d\n", 3481 + ip_block->version->funcs->name, r); 3524 3482 } 3525 3483 } 3526 3484 ··· 3563 3517 3564 3518 r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]); 3565 3519 if (r) { 3566 - DRM_DEBUG("early_fini of IP block <%s> failed %d\n", 3567 - adev->ip_blocks[i].version->funcs->name, r); 3520 + dev_dbg(adev->dev, 3521 + "early_fini of IP block <%s> failed %d\n", 3522 + adev->ip_blocks[i].version->funcs->name, r); 3568 3523 } 3569 3524 } 3570 3525 ··· 3587 3540 3588 3541 if (amdgpu_sriov_vf(adev)) { 3589 3542 if (amdgpu_virt_release_full_gpu(adev, false)) 3590 - DRM_ERROR("failed to release exclusive mode on fini\n"); 3543 + dev_err(adev->dev, 3544 + "failed to release exclusive mode on fini\n"); 3591 3545 } 3592 3546 3593 3547 return 0; ··· 3636 3588 r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); 3637 3589 /* XXX handle errors */ 3638 3590 if (r) { 3639 - DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", 3640 - adev->ip_blocks[i].version->funcs->name, r); 3591 + dev_dbg(adev->dev, 3592 + "sw_fini of IP block <%s> failed %d\n", 3593 + adev->ip_blocks[i].version->funcs->name, 3594 + r); 3641 3595 } 3642 3596 } 3643 3597 adev->ip_blocks[i].status.sw = false; ··· 3672 3622 3673 3623 r = amdgpu_ib_ring_tests(adev); 3674 3624 if (r) 3675 - DRM_ERROR("ib ring test failed (%d).\n", r); 3625 + dev_err(adev->dev, "ib ring test failed (%d).\n", r); 3676 3626 } 3677 3627 3678 3628 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) ··· 3813 3763 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { 3814 3764 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); 3815 3765 if (r) { 3816 - DRM_ERROR("SMC failed to set mp1 state %d, %d\n", 3817 - adev->mp1_state, r); 3766 + dev_err(adev->dev, 3767 + "SMC failed to set mp1 state %d, %d\n", 3768 + adev->mp1_state, r); 3818 3769 return r; 3819 3770 } 3820 3771 } ··· 4147 4096 #else 4148 4097 default: 4149 4098 if (amdgpu_dc > 0) 4150 - DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n"); 4099 + dev_info_once( 4100 + adev->dev, 4101 + "Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n"); 4151 4102 return false; 4152 4103 #endif 4153 4104 } ··· 4210 4157 4211 4158 fail: 4212 4159 if (adev->asic_reset_res) 4213 - DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", 4160 + dev_warn(adev->dev, 4161 + "ASIC reset failed with error, %d for drm dev, %s", 4214 4162 adev->asic_reset_res, adev_to_drm(adev)->unique); 4215 4163 amdgpu_put_xgmi_hive(hive); 4216 4164 } ··· 4335 4281 adev->gfx.mcbp = true; 4336 4282 4337 4283 if (adev->gfx.mcbp) 4338 - DRM_INFO("MCBP is enabled\n"); 4284 + dev_info(adev->dev, "MCBP is enabled\n"); 4339 4285 } 4340 4286 4341 4287 /** ··· 4403 4349 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 4404 4350 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 4405 4351 4406 - DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 4407 - amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 4408 - pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 4352 + dev_info( 4353 + adev->dev, 4354 + "initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 4355 + amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 4356 + pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 4409 4357 4410 4358 /* mutex initialization are all done here so we 4411 4359 * can recall function without having locking issues ··· 4524 4468 if (!adev->rmmio) 4525 4469 return -ENOMEM; 4526 4470 4527 - DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); 4528 - DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size); 4471 + dev_info(adev->dev, "register mmio base: 0x%08X\n", 4472 + (uint32_t)adev->rmmio_base); 4473 + dev_info(adev->dev, "register mmio size: %u\n", 4474 + (unsigned int)adev->rmmio_size); 4529 4475 4530 4476 /* 4531 4477 * Reset domain needs to be present early, before XGMI hive discovered ··· 4664 4606 r = -EINVAL; 4665 4607 goto failed; 4666 4608 } 4667 - DRM_INFO("GPU posting now...\n"); 4609 + dev_info(adev->dev, "GPU posting now...\n"); 4668 4610 r = amdgpu_device_asic_init(adev); 4669 4611 if (r) { 4670 4612 dev_err(adev->dev, "gpu post error!\n"); ··· 4774 4716 4775 4717 r = amdgpu_pm_sysfs_init(adev); 4776 4718 if (r) 4777 - DRM_ERROR("registering pm sysfs failed (%d).\n", r); 4719 + dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r); 4778 4720 4779 4721 r = amdgpu_ucode_sysfs_init(adev); 4780 4722 if (r) { 4781 4723 adev->ucode_sysfs_en = false; 4782 - DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); 4724 + dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r); 4783 4725 } else 4784 4726 adev->ucode_sysfs_en = true; 4785 4727 ··· 5028 4970 5029 4971 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); 5030 4972 if (ret) 5031 - DRM_WARN("evicting device resources failed\n"); 4973 + dev_warn(adev->dev, "evicting device resources failed\n"); 5032 4974 return ret; 5033 4975 } 5034 4976 ··· 5151 5093 } 5152 5094 5153 5095 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) 5154 - DRM_WARN("smart shift update failed\n"); 5096 + dev_warn(adev->dev, "smart shift update failed\n"); 5155 5097 5156 5098 if (notify_clients) 5157 5099 drm_client_dev_suspend(adev_to_drm(adev), false); ··· 5320 5262 adev->in_suspend = false; 5321 5263 5322 5264 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) 5323 - DRM_WARN("smart shift update failed\n"); 5265 + dev_warn(adev->dev, "smart shift update failed\n"); 5324 5266 5325 5267 return 0; 5326 5268 } ··· 5851 5793 amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job); 5852 5794 5853 5795 if (vram_lost) { 5854 - DRM_INFO("VRAM is lost due to GPU reset!\n"); 5796 + dev_info( 5797 + tmp_adev->dev, 5798 + "VRAM is lost due to GPU reset!\n"); 5855 5799 amdgpu_inc_vram_lost(tmp_adev); 5856 5800 } 5857 5801 ··· 6365 6305 } else { 6366 6306 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); 6367 6307 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0)) 6368 - DRM_WARN("smart shift update failed\n"); 6308 + dev_warn(tmp_adev->dev, 6309 + "smart shift update failed\n"); 6369 6310 } 6370 6311 } 6371 6312 ··· 6447 6386 */ 6448 6387 if (need_emergency_restart && amdgpu_ras_get_context(adev) && 6449 6388 amdgpu_ras_get_context(adev)->reboot) { 6450 - DRM_WARN("Emergency reboot."); 6389 + dev_warn(adev->dev, "Emergency reboot."); 6451 6390 6452 6391 ksys_sync_helper(); 6453 6392 emergency_restart(); ··· 7103 7042 adev->pci_state = pci_store_saved_state(pdev); 7104 7043 7105 7044 if (!adev->pci_state) { 7106 - DRM_ERROR("Failed to store PCI saved state"); 7045 + dev_err(adev->dev, "Failed to store PCI saved state"); 7107 7046 return false; 7108 7047 } 7109 7048 } else { 7110 - DRM_WARN("Failed to save PCI state, err:%d\n", r); 7049 + dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r); 7111 7050 return false; 7112 7051 } 7113 7052 ··· 7128 7067 if (!r) { 7129 7068 pci_restore_state(pdev); 7130 7069 } else { 7131 - DRM_WARN("Failed to load PCI state, err:%d\n", r); 7070 + dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r); 7132 7071 return false; 7133 7072 } 7134 7073 ··· 7374 7313 dep = amdgpu_sync_peek_fence(&isolation->prev, ring); 7375 7314 r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT); 7376 7315 if (r) 7377 - DRM_WARN("OOM tracking isolation\n"); 7316 + dev_warn(adev->dev, "OOM tracking isolation\n"); 7378 7317 7379 7318 out_grab_ref: 7380 7319 dma_fence_get(dep); ··· 7442 7381 tmp_ = RREG32(reg_addr); 7443 7382 loop--; 7444 7383 if (!loop) { 7445 - DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn", 7446 - inst, reg_name, (uint32_t)expected_value, 7447 - (uint32_t)(tmp_ & (mask))); 7384 + dev_warn( 7385 + adev->dev, 7386 + "Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn", 7387 + inst, reg_name, (uint32_t)expected_value, 7388 + (uint32_t)(tmp_ & (mask))); 7448 7389 ret = -ETIMEDOUT; 7449 7390 break; 7450 7391 }
+10 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
··· 41 41 if (index < adev->doorbell.num_kernel_doorbells) 42 42 return readl(adev->doorbell.cpu_addr + index); 43 43 44 - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 44 + dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n", 45 + index); 45 46 return 0; 46 47 } 47 48 ··· 64 63 if (index < adev->doorbell.num_kernel_doorbells) 65 64 writel(v, adev->doorbell.cpu_addr + index); 66 65 else 67 - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 66 + dev_err(adev->dev, 67 + "writing beyond doorbell aperture: 0x%08x!\n", index); 68 68 } 69 69 70 70 /** ··· 85 83 if (index < adev->doorbell.num_kernel_doorbells) 86 84 return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index)); 87 85 88 - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); 86 + dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n", 87 + index); 89 88 return 0; 90 89 } 91 90 ··· 108 105 if (index < adev->doorbell.num_kernel_doorbells) 109 106 atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v); 110 107 else 111 - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); 108 + dev_err(adev->dev, 109 + "writing beyond doorbell aperture: 0x%08x!\n", index); 112 110 } 113 111 114 112 /** ··· 170 166 NULL, 171 167 (void **)&adev->doorbell.cpu_addr); 172 168 if (r) { 173 - DRM_ERROR("Failed to allocate kernel doorbells, err=%d\n", r); 169 + dev_err(adev->dev, 170 + "Failed to allocate kernel doorbells, err=%d\n", r); 174 171 return r; 175 172 } 176 173
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
··· 295 295 fence_drv.fallback_timer); 296 296 297 297 if (amdgpu_fence_process(ring)) 298 - DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name); 298 + dev_warn(ring->adev->dev, 299 + "Fence fallback timer expired on ring %s\n", 300 + ring->name); 299 301 } 300 302 301 303 /**
+16 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
··· 144 144 145 145 /* If algo exists, it means that the i2c_adapter's initialized */ 146 146 if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) { 147 - DRM_WARN("Cannot access FRU, EEPROM accessor not initialized"); 147 + dev_warn(adev->dev, 148 + "Cannot access FRU, EEPROM accessor not initialized"); 148 149 return -ENODEV; 149 150 } 150 151 ··· 153 152 len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, fru_addr, buf, 154 153 sizeof(buf)); 155 154 if (len != 8) { 156 - DRM_ERROR("Couldn't read the IPMI Common Header: %d", len); 155 + dev_err(adev->dev, "Couldn't read the IPMI Common Header: %d", 156 + len); 157 157 return len < 0 ? len : -EIO; 158 158 } 159 159 160 160 if (buf[0] != 1) { 161 - DRM_ERROR("Bad IPMI Common Header version: 0x%02x", buf[0]); 161 + dev_err(adev->dev, "Bad IPMI Common Header version: 0x%02x", 162 + buf[0]); 162 163 return -EIO; 163 164 } 164 165 165 166 for (csum = 0; len > 0; len--) 166 167 csum += buf[len - 1]; 167 168 if (csum) { 168 - DRM_ERROR("Bad IPMI Common Header checksum: 0x%02x", csum); 169 + dev_err(adev->dev, "Bad IPMI Common Header checksum: 0x%02x", 170 + csum); 169 171 return -EIO; 170 172 } 171 173 ··· 183 179 /* Read the header of the PIA. */ 184 180 len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, buf, 3); 185 181 if (len != 3) { 186 - DRM_ERROR("Couldn't read the Product Info Area header: %d", len); 182 + dev_err(adev->dev, 183 + "Couldn't read the Product Info Area header: %d", len); 187 184 return len < 0 ? len : -EIO; 188 185 } 189 186 190 187 if (buf[0] != 1) { 191 - DRM_ERROR("Bad IPMI Product Info Area version: 0x%02x", buf[0]); 188 + dev_err(adev->dev, "Bad IPMI Product Info Area version: 0x%02x", 189 + buf[0]); 192 190 return -EIO; 193 191 } 194 192 ··· 203 197 len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, pia, size); 204 198 if (len != size) { 205 199 kfree(pia); 206 - DRM_ERROR("Couldn't read the Product Info Area: %d", len); 200 + dev_err(adev->dev, "Couldn't read the Product Info Area: %d", 201 + len); 207 202 return len < 0 ? len : -EIO; 208 203 } 209 204 210 205 for (csum = 0; size > 0; size--) 211 206 csum += pia[size - 1]; 212 207 if (csum) { 213 - DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum); 208 + dev_err(adev->dev, "Bad Product Info Area checksum: 0x%02x", 209 + csum); 214 210 kfree(pia); 215 211 return -EIO; 216 212 }
+10 -10
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 149 149 static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev) 150 150 { 151 151 if (amdgpu_compute_multipipe != -1) { 152 - DRM_INFO("amdgpu: forcing compute pipe policy %d\n", 152 + dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n", 153 153 amdgpu_compute_multipipe); 154 154 return amdgpu_compute_multipipe == 1; 155 155 } ··· 674 674 * generation exposes more than 64 queues. If so, the 675 675 * definition of queue_mask needs updating */ 676 676 if (WARN_ON(i > (sizeof(queue_mask)*8))) { 677 - DRM_ERROR("Invalid KCQ enabled: %d\n", i); 677 + dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i); 678 678 break; 679 679 } 680 680 ··· 683 683 684 684 amdgpu_device_flush_hdp(adev, NULL); 685 685 686 - DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, 687 - kiq_ring->queue); 686 + dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me, 687 + kiq_ring->pipe, kiq_ring->queue); 688 688 689 689 spin_lock(&kiq->ring_lock); 690 690 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * 691 691 adev->gfx.num_compute_rings + 692 692 kiq->pmf->set_resources_size); 693 693 if (r) { 694 - DRM_ERROR("Failed to lock KIQ (%d).\n", r); 694 + dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r); 695 695 spin_unlock(&kiq->ring_lock); 696 696 return r; 697 697 } ··· 712 712 r = amdgpu_ring_test_helper(kiq_ring); 713 713 spin_unlock(&kiq->ring_lock); 714 714 if (r) 715 - DRM_ERROR("KCQ enable failed\n"); 715 + dev_err(adev->dev, "KCQ enable failed\n"); 716 716 717 717 return r; 718 718 } ··· 734 734 r = amdgpu_mes_map_legacy_queue(adev, 735 735 &adev->gfx.gfx_ring[j]); 736 736 if (r) { 737 - DRM_ERROR("failed to map gfx queue\n"); 737 + dev_err(adev->dev, "failed to map gfx queue\n"); 738 738 return r; 739 739 } 740 740 } ··· 748 748 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * 749 749 adev->gfx.num_gfx_rings); 750 750 if (r) { 751 - DRM_ERROR("Failed to lock KIQ (%d).\n", r); 751 + dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r); 752 752 spin_unlock(&kiq->ring_lock); 753 753 return r; 754 754 } ··· 769 769 r = amdgpu_ring_test_helper(kiq_ring); 770 770 spin_unlock(&kiq->ring_lock); 771 771 if (r) 772 - DRM_ERROR("KGQ enable failed\n"); 772 + dev_err(adev->dev, "KGQ enable failed\n"); 773 773 774 774 return r; 775 775 } ··· 1030 1030 1031 1031 ih_data.head = *ras_if; 1032 1032 1033 - DRM_ERROR("CP ECC ERROR IRQ\n"); 1033 + dev_err(adev->dev, "CP ECC ERROR IRQ\n"); 1034 1034 amdgpu_ras_interrupt_dispatch(adev, &ih_data); 1035 1035 return 0; 1036 1036 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
··· 218 218 219 219 restart_ih: 220 220 count = AMDGPU_IH_MAX_NUM_IVS; 221 - DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); 221 + dev_dbg(adev->dev, "%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); 222 222 223 223 /* Order reading of wptr vs. reading of IH ring data */ 224 224 rmb();
+14 -10
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
··· 142 142 r = src->funcs->set(adev, src, k, 143 143 AMDGPU_IRQ_STATE_DISABLE); 144 144 if (r) 145 - DRM_ERROR("error disabling interrupt (%d)\n", 146 - r); 145 + dev_err(adev->dev, 146 + "error disabling interrupt (%d)\n", 147 + r); 147 148 } 148 149 } 149 150 } ··· 316 315 adev->irq.irq = irq; 317 316 adev_to_drm(adev)->max_vblank_count = 0x00ffffff; 318 317 319 - DRM_DEBUG("amdgpu: irq initialized.\n"); 318 + dev_dbg(adev->dev, "amdgpu: irq initialized.\n"); 320 319 return 0; 321 320 322 321 free_vectors: ··· 462 461 src_id = entry.src_id; 463 462 464 463 if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) { 465 - DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); 464 + dev_dbg(adev->dev, "Invalid client_id in IV: %d\n", client_id); 466 465 467 466 } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { 468 - DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); 467 + dev_dbg(adev->dev, "Invalid src_id in IV: %d\n", src_id); 469 468 470 469 } else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) || 471 470 (client_id == SOC15_IH_CLIENTID_ISP)) && ··· 473 472 generic_handle_domain_irq(adev->irq.domain, src_id); 474 473 475 474 } else if (!adev->irq.client[client_id].sources) { 476 - DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", 477 - client_id, src_id); 475 + dev_dbg(adev->dev, 476 + "Unregistered interrupt client_id: %d src_id: %d\n", 477 + client_id, src_id); 478 478 479 479 } else if ((src = adev->irq.client[client_id].sources[src_id])) { 480 480 r = src->funcs->process(adev, src, &entry); 481 481 if (r < 0) 482 - DRM_ERROR("error processing interrupt (%d)\n", r); 482 + dev_err(adev->dev, "error processing interrupt (%d)\n", 483 + r); 483 484 else if (r) 484 485 handled = true; 485 486 486 487 } else { 487 - DRM_DEBUG("Unregistered interrupt src_id: %d of client_id:%d\n", 488 + dev_dbg(adev->dev, 489 + "Unregistered interrupt src_id: %d of client_id:%d\n", 488 490 src_id, client_id); 489 491 } 490 492 ··· 736 732 adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, 737 733 &amdgpu_hw_irqdomain_ops, adev); 738 734 if (!adev->irq.domain) { 739 - DRM_ERROR("GPU irq add domain failed\n"); 735 + dev_err(adev->dev, "GPU irq add domain failed\n"); 740 736 return -ENODEV; 741 737 } 742 738
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
··· 463 463 adev->jpeg.ip_dump = kcalloc(adev->jpeg.num_jpeg_inst * count, 464 464 sizeof(uint32_t), GFP_KERNEL); 465 465 if (!adev->jpeg.ip_dump) { 466 - DRM_ERROR("Failed to allocate memory for JPEG IP Dump\n"); 466 + dev_err(adev->dev, 467 + "Failed to allocate memory for JPEG IP Dump\n"); 467 468 return -ENOMEM; 468 469 } 469 470 adev->jpeg.reg_list = reg;
+12 -10
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
··· 47 47 /* Bitmap for dynamic allocation of kernel doorbells */ 48 48 mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL); 49 49 if (!mes->doorbell_bitmap) { 50 - DRM_ERROR("Failed to allocate MES doorbell bitmap\n"); 50 + dev_err(adev->dev, "Failed to allocate MES doorbell bitmap\n"); 51 51 return -ENOMEM; 52 52 } 53 53 ··· 256 256 r = adev->mes.funcs->suspend_gang(&adev->mes, &input); 257 257 amdgpu_mes_unlock(&adev->mes); 258 258 if (r) 259 - DRM_ERROR("failed to suspend all gangs"); 259 + dev_err(adev->dev, "failed to suspend all gangs"); 260 260 261 261 return r; 262 262 } ··· 280 280 r = adev->mes.funcs->resume_gang(&adev->mes, &input); 281 281 amdgpu_mes_unlock(&adev->mes); 282 282 if (r) 283 - DRM_ERROR("failed to resume all gangs"); 283 + dev_err(adev->dev, "failed to resume all gangs"); 284 284 285 285 return r; 286 286 } ··· 304 304 r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input); 305 305 amdgpu_mes_unlock(&adev->mes); 306 306 if (r) 307 - DRM_ERROR("failed to map legacy queue\n"); 307 + dev_err(adev->dev, "failed to map legacy queue\n"); 308 308 309 309 return r; 310 310 } ··· 329 329 r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); 330 330 amdgpu_mes_unlock(&adev->mes); 331 331 if (r) 332 - DRM_ERROR("failed to unmap legacy queue\n"); 332 + dev_err(adev->dev, "failed to unmap legacy queue\n"); 333 333 334 334 return r; 335 335 } ··· 361 361 r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input); 362 362 amdgpu_mes_unlock(&adev->mes); 363 363 if (r) 364 - DRM_ERROR("failed to reset legacy queue\n"); 364 + dev_err(adev->dev, "failed to reset legacy queue\n"); 365 365 366 366 return r; 367 367 } ··· 469 469 int r; 470 470 471 471 if (!adev->mes.funcs->misc_op) { 472 - DRM_ERROR("mes set shader debugger is not supported!\n"); 472 + dev_err(adev->dev, 473 + "mes set shader debugger is not supported!\n"); 473 474 return -EINVAL; 474 475 } 475 476 ··· 494 493 495 494 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 496 495 if (r) 497 - DRM_ERROR("failed to set_shader_debugger\n"); 496 + dev_err(adev->dev, "failed to set_shader_debugger\n"); 498 497 499 498 amdgpu_mes_unlock(&adev->mes); 500 499 ··· 508 507 int r; 509 508 510 509 if (!adev->mes.funcs->misc_op) { 511 - DRM_ERROR("mes flush shader debugger is not supported!\n"); 510 + dev_err(adev->dev, 511 + "mes flush shader debugger is not supported!\n"); 512 512 return -EINVAL; 513 513 } 514 514 ··· 521 519 522 520 r = adev->mes.funcs->misc_op(&adev->mes, &op_input); 523 521 if (r) 524 - DRM_ERROR("failed to set_shader_debugger\n"); 522 + dev_err(adev->dev, "failed to set_shader_debugger\n"); 525 523 526 524 amdgpu_mes_unlock(&adev->mes); 527 525
+43 -34
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
··· 277 277 up_read(&adev->reset_domain->sem); 278 278 279 279 if (res < 0) { 280 - DRM_ERROR("Failed to write EEPROM table header:%d", res); 280 + dev_err(adev->dev, "Failed to write EEPROM table header:%d", 281 + res); 281 282 } else if (res < RAS_TABLE_HEADER_SIZE) { 282 - DRM_ERROR("Short write:%d out of %d\n", 283 - res, RAS_TABLE_HEADER_SIZE); 283 + dev_err(adev->dev, "Short write:%d out of %d\n", res, 284 + RAS_TABLE_HEADER_SIZE); 284 285 res = -EIO; 285 286 } else { 286 287 res = 0; ··· 324 323 325 324 buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); 326 325 if (!buf) { 327 - DRM_ERROR("Failed to alloc buf to write table ras info\n"); 326 + dev_err(adev->dev, 327 + "Failed to alloc buf to write table ras info\n"); 328 328 return -ENOMEM; 329 329 } 330 330 ··· 340 338 up_read(&adev->reset_domain->sem); 341 339 342 340 if (res < 0) { 343 - DRM_ERROR("Failed to write EEPROM table ras info:%d", res); 341 + dev_err(adev->dev, "Failed to write EEPROM table ras info:%d", 342 + res); 344 343 } else if (res < RAS_TABLE_V2_1_INFO_SIZE) { 345 - DRM_ERROR("Short write:%d out of %d\n", 346 - res, RAS_TABLE_V2_1_INFO_SIZE); 344 + dev_err(adev->dev, "Short write:%d out of %d\n", res, 345 + RAS_TABLE_V2_1_INFO_SIZE); 347 346 res = -EIO; 348 347 } else { 349 348 res = 0; ··· 612 609 buf, buf_size); 613 610 up_read(&adev->reset_domain->sem); 614 611 if (res < 0) { 615 - DRM_ERROR("Writing %d EEPROM table records error:%d", 616 - num, res); 612 + dev_err(adev->dev, "Writing %d EEPROM table records error:%d", 613 + num, res); 617 614 } else if (res < buf_size) { 618 615 /* Short write, return error. 619 616 */ 620 - DRM_ERROR("Wrote %d records out of %d", 621 - res / RAS_TABLE_RECORD_SIZE, num); 617 + dev_err(adev->dev, "Wrote %d records out of %d", 618 + res / RAS_TABLE_RECORD_SIZE, num); 622 619 res = -EIO; 623 620 } else { 624 621 res = 0; ··· 791 788 buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE; 792 789 buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); 793 790 if (!buf) { 794 - DRM_ERROR("allocating memory for table of size %d bytes failed\n", 795 - control->tbl_hdr.tbl_size); 791 + dev_err(adev->dev, 792 + "allocating memory for table of size %d bytes failed\n", 793 + control->tbl_hdr.tbl_size); 796 794 res = -ENOMEM; 797 795 goto Out; 798 796 } ··· 805 801 buf, buf_size); 806 802 up_read(&adev->reset_domain->sem); 807 803 if (res < 0) { 808 - DRM_ERROR("EEPROM failed reading records:%d\n", 809 - res); 804 + dev_err(adev->dev, "EEPROM failed reading records:%d\n", res); 810 805 goto Out; 811 806 } else if (res < buf_size) { 812 - DRM_ERROR("EEPROM read %d out of %d bytes\n", 813 - res, buf_size); 807 + dev_err(adev->dev, "EEPROM read %d out of %d bytes\n", res, 808 + buf_size); 814 809 res = -EIO; 815 810 goto Out; 816 811 } ··· 870 867 return 0; 871 868 872 869 if (num == 0) { 873 - DRM_ERROR("will not append 0 records\n"); 870 + dev_err(adev->dev, "will not append 0 records\n"); 874 871 return -EINVAL; 875 872 } else if (num > control->ras_max_record_count) { 876 - DRM_ERROR("cannot append %d records than the size of table %d\n", 877 - num, control->ras_max_record_count); 873 + dev_err(adev->dev, 874 + "cannot append %d records than the size of table %d\n", 875 + num, control->ras_max_record_count); 878 876 return -EINVAL; 879 877 } 880 878 ··· 929 925 buf, buf_size); 930 926 up_read(&adev->reset_domain->sem); 931 927 if (res < 0) { 932 - DRM_ERROR("Reading %d EEPROM table records error:%d", 933 - num, res); 928 + dev_err(adev->dev, "Reading %d EEPROM table records error:%d", 929 + num, res); 934 930 } else if (res < buf_size) { 935 931 /* Short read, return error. 936 932 */ 937 - DRM_ERROR("Read %d records out of %d", 938 - res / RAS_TABLE_RECORD_SIZE, num); 933 + dev_err(adev->dev, "Read %d records out of %d", 934 + res / RAS_TABLE_RECORD_SIZE, num); 939 935 res = -EIO; 940 936 } else { 941 937 res = 0; ··· 969 965 return 0; 970 966 971 967 if (num == 0) { 972 - DRM_ERROR("will not read 0 records\n"); 968 + dev_err(adev->dev, "will not read 0 records\n"); 973 969 return -EINVAL; 974 970 } else if (num > control->ras_num_recs) { 975 - DRM_ERROR("too many records to read:%d available:%d\n", 976 - num, control->ras_num_recs); 971 + dev_err(adev->dev, "too many records to read:%d available:%d\n", 972 + num, control->ras_num_recs); 977 973 return -EINVAL; 978 974 } 979 975 ··· 1305 1301 1306 1302 buf = kzalloc(buf_size, GFP_KERNEL); 1307 1303 if (!buf) { 1308 - DRM_ERROR("Out of memory checking RAS table checksum.\n"); 1304 + dev_err(adev->dev, 1305 + "Out of memory checking RAS table checksum.\n"); 1309 1306 return -ENOMEM; 1310 1307 } 1311 1308 ··· 1315 1310 control->ras_header_offset, 1316 1311 buf, buf_size); 1317 1312 if (res < buf_size) { 1318 - DRM_ERROR("Partial read for checksum, res:%d\n", res); 1313 + dev_err(adev->dev, "Partial read for checksum, res:%d\n", res); 1319 1314 /* On partial reads, return -EIO. 1320 1315 */ 1321 1316 if (res >= 0) ··· 1340 1335 1341 1336 buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); 1342 1337 if (!buf) { 1343 - DRM_ERROR("Failed to alloc buf to read EEPROM table ras info\n"); 1338 + dev_err(adev->dev, 1339 + "Failed to alloc buf to read EEPROM table ras info\n"); 1344 1340 return -ENOMEM; 1345 1341 } 1346 1342 ··· 1353 1347 control->i2c_address + control->ras_info_offset, 1354 1348 buf, RAS_TABLE_V2_1_INFO_SIZE); 1355 1349 if (res < RAS_TABLE_V2_1_INFO_SIZE) { 1356 - DRM_ERROR("Failed to read EEPROM table ras info, res:%d", res); 1350 + dev_err(adev->dev, 1351 + "Failed to read EEPROM table ras info, res:%d", res); 1357 1352 res = res >= 0 ? -EIO : res; 1358 1353 goto Out; 1359 1354 } ··· 1395 1388 control->i2c_address + control->ras_header_offset, 1396 1389 buf, RAS_TABLE_HEADER_SIZE); 1397 1390 if (res < RAS_TABLE_HEADER_SIZE) { 1398 - DRM_ERROR("Failed to read EEPROM table header, res:%d", res); 1391 + dev_err(adev->dev, "Failed to read EEPROM table header, res:%d", 1392 + res); 1399 1393 return res >= 0 ? -EIO : res; 1400 1394 } 1401 1395 ··· 1461 1453 control->ras_num_mca_recs * adev->umc.retire_unit; 1462 1454 1463 1455 if (hdr->header == RAS_TABLE_HDR_VAL) { 1464 - DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records", 1465 - control->ras_num_bad_pages); 1456 + dev_dbg(adev->dev, 1457 + "Found existing EEPROM table with %d records", 1458 + control->ras_num_bad_pages); 1466 1459 1467 1460 if (hdr->version >= RAS_TABLE_VER_V2_1) { 1468 1461 res = __read_table_ras_info(control);
+31 -25
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 299 299 struct amdgpu_bo *abo_src, *abo_dst; 300 300 301 301 if (!adev->mman.buffer_funcs_enabled) { 302 - DRM_ERROR("Trying to move memory with ring turned off.\n"); 302 + dev_err(adev->dev, 303 + "Trying to move memory with ring turned off.\n"); 303 304 return -EINVAL; 304 305 } 305 306 ··· 935 934 if (gtt->userptr) { 936 935 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); 937 936 if (r) { 938 - DRM_ERROR("failed to pin userptr\n"); 937 + dev_err(adev->dev, "failed to pin userptr\n"); 939 938 return r; 940 939 } 941 940 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) { ··· 1782 1781 &ctx->c2p_bo, 1783 1782 NULL); 1784 1783 if (ret) { 1785 - DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); 1784 + dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret); 1786 1785 amdgpu_ttm_training_reserve_vram_fini(adev); 1787 1786 return ret; 1788 1787 } ··· 1794 1793 adev, adev->gmc.real_vram_size - reserve_size, 1795 1794 reserve_size, &adev->mman.fw_reserved_memory, NULL); 1796 1795 if (ret) { 1797 - DRM_ERROR("alloc tmr failed(%d)!\n", ret); 1796 + dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret); 1798 1797 amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, 1799 1798 NULL, NULL); 1800 1799 return ret; ··· 1865 1864 adev->need_swiotlb, 1866 1865 dma_addressing_limited(adev->dev)); 1867 1866 if (r) { 1868 - DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 1867 + dev_err(adev->dev, 1868 + "failed initializing buffer object driver(%d).\n", r); 1869 1869 return r; 1870 1870 } 1871 1871 1872 1872 r = amdgpu_ttm_pools_init(adev); 1873 1873 if (r) { 1874 - DRM_ERROR("failed to init ttm pools(%d).\n", r); 1874 + dev_err(adev->dev, "failed to init ttm pools(%d).\n", r); 1875 1875 return r; 1876 1876 } 1877 1877 adev->mman.initialized = true; ··· 1880 1878 /* Initialize VRAM pool with all of VRAM divided into pages */ 1881 1879 r = amdgpu_vram_mgr_init(adev); 1882 1880 if (r) { 1883 - DRM_ERROR("Failed initializing VRAM heap.\n"); 1881 + dev_err(adev->dev, "Failed initializing VRAM heap.\n"); 1884 1882 return r; 1885 1883 } 1886 1884 ··· 1960 1958 DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n"); 1961 1959 } 1962 1960 1963 - DRM_INFO("amdgpu: %uM of VRAM memory ready\n", 1961 + dev_info(adev->dev, "amdgpu: %uM of VRAM memory ready\n", 1964 1962 (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024))); 1965 1963 1966 1964 /* Compute GTT size, either based on TTM limit ··· 1983 1981 /* Initialize GTT memory pool */ 1984 1982 r = amdgpu_gtt_mgr_init(adev, gtt_size); 1985 1983 if (r) { 1986 - DRM_ERROR("Failed initializing GTT heap.\n"); 1984 + dev_err(adev->dev, "Failed initializing GTT heap.\n"); 1987 1985 return r; 1988 1986 } 1989 - DRM_INFO("amdgpu: %uM of GTT memory ready.\n", 1987 + dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n", 1990 1988 (unsigned int)(gtt_size / (1024 * 1024))); 1991 1989 1992 1990 if (adev->flags & AMD_IS_APU) { ··· 1997 1995 /* Initialize doorbell pool on PCI BAR */ 1998 1996 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE); 1999 1997 if (r) { 2000 - DRM_ERROR("Failed initializing doorbell heap.\n"); 1998 + dev_err(adev->dev, "Failed initializing doorbell heap.\n"); 2001 1999 return r; 2002 2000 } 2003 2001 2004 2002 /* Create a boorbell page for kernel usages */ 2005 2003 r = amdgpu_doorbell_create_kernel_doorbells(adev); 2006 2004 if (r) { 2007 - DRM_ERROR("Failed to initialize kernel doorbells.\n"); 2005 + dev_err(adev->dev, "Failed to initialize kernel doorbells.\n"); 2008 2006 return r; 2009 2007 } 2010 2008 2011 2009 /* Initialize preemptible memory pool */ 2012 2010 r = amdgpu_preempt_mgr_init(adev); 2013 2011 if (r) { 2014 - DRM_ERROR("Failed initializing PREEMPT heap.\n"); 2012 + dev_err(adev->dev, "Failed initializing PREEMPT heap.\n"); 2015 2013 return r; 2016 2014 } 2017 2015 2018 2016 /* Initialize various on-chip memory pools */ 2019 2017 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size); 2020 2018 if (r) { 2021 - DRM_ERROR("Failed initializing GDS heap.\n"); 2019 + dev_err(adev->dev, "Failed initializing GDS heap.\n"); 2022 2020 return r; 2023 2021 } 2024 2022 2025 2023 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size); 2026 2024 if (r) { 2027 - DRM_ERROR("Failed initializing gws heap.\n"); 2025 + dev_err(adev->dev, "Failed initializing gws heap.\n"); 2028 2026 return r; 2029 2027 } 2030 2028 2031 2029 r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size); 2032 2030 if (r) { 2033 - DRM_ERROR("Failed initializing oa heap.\n"); 2031 + dev_err(adev->dev, "Failed initializing oa heap.\n"); 2034 2032 return r; 2035 2033 } 2036 2034 if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, ··· 2093 2091 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL); 2094 2092 ttm_device_fini(&adev->mman.bdev); 2095 2093 adev->mman.initialized = false; 2096 - DRM_INFO("amdgpu: ttm finalized\n"); 2094 + dev_info(adev->dev, "amdgpu: ttm finalized\n"); 2097 2095 } 2098 2096 2099 2097 /** ··· 2125 2123 DRM_SCHED_PRIORITY_KERNEL, &sched, 2126 2124 1, NULL); 2127 2125 if (r) { 2128 - DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", 2129 - r); 2126 + dev_err(adev->dev, 2127 + "Failed setting up TTM BO move entity (%d)\n", 2128 + r); 2130 2129 return; 2131 2130 } 2132 2131 ··· 2135 2132 DRM_SCHED_PRIORITY_NORMAL, &sched, 2136 2133 1, NULL); 2137 2134 if (r) { 2138 - DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", 2139 - r); 2135 + dev_err(adev->dev, 2136 + "Failed setting up TTM BO move entity (%d)\n", 2137 + r); 2140 2138 goto error_free_entity; 2141 2139 } 2142 2140 } else { ··· 2208 2204 int r; 2209 2205 2210 2206 if (!direct_submit && !ring->sched.ready) { 2211 - DRM_ERROR("Trying to move memory with ring turned off.\n"); 2207 + dev_err(adev->dev, 2208 + "Trying to move memory with ring turned off.\n"); 2212 2209 return -EINVAL; 2213 2210 } 2214 2211 ··· 2244 2239 2245 2240 error_free: 2246 2241 amdgpu_job_free(job); 2247 - DRM_ERROR("Error scheduling IBs (%d)\n", r); 2242 + dev_err(adev->dev, "Error scheduling IBs (%d)\n", r); 2248 2243 return r; 2249 2244 } 2250 2245 ··· 2363 2358 int r; 2364 2359 2365 2360 if (!adev->mman.buffer_funcs_enabled) { 2366 - DRM_ERROR("Trying to clear memory with ring turned off.\n"); 2361 + dev_err(adev->dev, 2362 + "Trying to clear memory with ring turned off.\n"); 2367 2363 return -EINVAL; 2368 2364 } 2369 2365 ··· 2424 2418 man = ttm_manager_type(&adev->mman.bdev, mem_type); 2425 2419 break; 2426 2420 default: 2427 - DRM_ERROR("Trying to evict invalid memory type\n"); 2421 + dev_err(adev->dev, "Trying to evict invalid memory type\n"); 2428 2422 return -EINVAL; 2429 2423 } 2430 2424
+11 -10
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 2395 2395 else 2396 2396 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; 2397 2397 2398 - DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", 2399 - vm_size, adev->vm_manager.num_level + 1, 2400 - adev->vm_manager.block_size, 2401 - adev->vm_manager.fragment_size); 2398 + dev_info( 2399 + adev->dev, 2400 + "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", 2401 + vm_size, adev->vm_manager.num_level + 1, 2402 + adev->vm_manager.block_size, adev->vm_manager.fragment_size); 2402 2403 } 2403 2404 2404 2405 /** ··· 2565 2564 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2566 2565 AMDGPU_VM_USE_CPU_FOR_GFX); 2567 2566 2568 - DRM_DEBUG_DRIVER("VM update mode is %s\n", 2569 - vm->use_cpu_for_update ? "CPU" : "SDMA"); 2567 + dev_dbg(adev->dev, "VM update mode is %s\n", 2568 + vm->use_cpu_for_update ? "CPU" : "SDMA"); 2570 2569 WARN_ONCE((vm->use_cpu_for_update && 2571 2570 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 2572 2571 "CPU update of VM recommended only for large BAR system\n"); ··· 2608 2607 2609 2608 r = amdgpu_vm_create_task_info(vm); 2610 2609 if (r) 2611 - DRM_DEBUG("Failed to create task info for VM\n"); 2610 + dev_dbg(adev->dev, "Failed to create task info for VM\n"); 2612 2611 2613 2612 amdgpu_bo_unreserve(vm->root.bo); 2614 2613 amdgpu_bo_unref(&root_bo); ··· 2659 2658 /* Update VM state */ 2660 2659 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2661 2660 AMDGPU_VM_USE_CPU_FOR_COMPUTE); 2662 - DRM_DEBUG_DRIVER("VM update mode is %s\n", 2663 - vm->use_cpu_for_update ? "CPU" : "SDMA"); 2661 + dev_dbg(adev->dev, "VM update mode is %s\n", 2662 + vm->use_cpu_for_update ? "CPU" : "SDMA"); 2664 2663 WARN_ONCE((vm->use_cpu_for_update && 2665 2664 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 2666 2665 "CPU update of VM recommended only for large BAR system\n"); ··· 2983 2982 error_unlock: 2984 2983 amdgpu_bo_unreserve(root); 2985 2984 if (r < 0) 2986 - DRM_ERROR("Can't handle page fault (%d)\n", r); 2985 + dev_err(adev->dev, "Can't handle page fault (%d)\n", r); 2987 2986 2988 2987 error_unref: 2989 2988 amdgpu_bo_unref(&root);