Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'amd-drm-fixes-5.11-2020-12-16' of git://people.freedesktop.org/~agd5f/linux into drm-next

amd-drm-fixes-5.11-2020-12-16:

amdgpu:
- Fix a eDP regression for DCE asics
- SMU fixes for sienna cichlid
- Misc W=1 fixes
- SDMA 5.2 reset fix
- Suspend/resume fix
- Misc display fixes
- Misc runtime PM fixes and cleanups
- Dimgrey Cavefish fixes
- printk cleanup
- Documentation warning fixes

amdkfd:
- Error logging fix
- Fix pipe offset calculation

radeon:
- printk cleanup

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201216192421.18627-1-alexander.deucher@amd.com

+603 -277
+4 -2
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1024 1024 /* enable runtime pm on the device */ 1025 1025 bool runpm; 1026 1026 bool in_runpm; 1027 + bool has_pr3; 1027 1028 1028 1029 bool pm_sysfs_en; 1029 1030 bool ucode_sysfs_en; ··· 1231 1230 const u32 *registers, 1232 1231 const u32 array_size); 1233 1232 1233 + bool amdgpu_device_supports_atpx(struct drm_device *dev); 1234 1234 bool amdgpu_device_supports_boco(struct drm_device *dev); 1235 1235 bool amdgpu_device_supports_baco(struct drm_device *dev); 1236 1236 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, ··· 1315 1313 1316 1314 void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev, 1317 1315 struct amdgpu_dm_backlight_caps *caps); 1318 - bool amdgpu_acpi_is_s0ix_supported(void); 1316 + bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev); 1319 1317 #else 1320 1318 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } 1321 1319 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } 1322 - static inline bool amdgpu_acpi_is_s0ix_supported(void) { return false; } 1320 + static inline bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { return false; } 1323 1321 #endif 1324 1322 1325 1323 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+5 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
··· 901 901 * 902 902 * returns true if supported, false if not. 903 903 */ 904 - bool amdgpu_acpi_is_s0ix_supported(void) 904 + bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) 905 905 { 906 - if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) 907 - return true; 906 + if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) { 907 + if (adev->flags & AMD_IS_APU) 908 + return true; 909 + } 908 910 909 911 return false; 910 912 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 1213 1213 1214 1214 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg); 1215 1215 if (ret) { 1216 - pr_debug("Insufficient system memory\n"); 1216 + pr_debug("Insufficient memory\n"); 1217 1217 goto err_reserve_limit; 1218 1218 } 1219 1219
+30 -13
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 212 212 amdgpu_device_get_serial_number, NULL); 213 213 214 214 /** 215 - * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control 215 + * amdgpu_device_supports_atpx - Is the device a dGPU with HG/PX power control 216 + * 217 + * @dev: drm_device pointer 218 + * 219 + * Returns true if the device is a dGPU with HG/PX power control, 220 + * otherwise return false. 221 + */ 222 + bool amdgpu_device_supports_atpx(struct drm_device *dev) 223 + { 224 + struct amdgpu_device *adev = drm_to_adev(dev); 225 + 226 + if (adev->flags & AMD_IS_PX) 227 + return true; 228 + return false; 229 + } 230 + 231 + /** 232 + * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources 216 233 * 217 234 * @dev: drm_device pointer 218 235 * ··· 240 223 { 241 224 struct amdgpu_device *adev = drm_to_adev(dev); 242 225 243 - if (adev->flags & AMD_IS_PX) 226 + if (adev->has_pr3) 244 227 return true; 245 228 return false; 246 229 } ··· 1415 1398 struct drm_device *dev = pci_get_drvdata(pdev); 1416 1399 int r; 1417 1400 1418 - if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF) 1401 + if (amdgpu_device_supports_atpx(dev) && state == VGA_SWITCHEROO_OFF) 1419 1402 return; 1420 1403 1421 1404 if (state == VGA_SWITCHEROO_ON) { ··· 2667 2650 { 2668 2651 int i, r; 2669 2652 2670 - if (!amdgpu_acpi_is_s0ix_supported() || amdgpu_in_reset(adev)) { 2653 + if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) { 2671 2654 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); 2672 2655 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); 2673 2656 } ··· 3194 3177 struct drm_device *ddev = adev_to_drm(adev); 3195 3178 struct pci_dev *pdev = adev->pdev; 3196 3179 int r, i; 3197 - bool boco = false; 3180 + bool atpx = false; 3198 3181 u32 max_MBps; 3199 3182 3200 3183 adev->shutdown = false; ··· 3366 3349 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 3367 3350 vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); 3368 3351 3369 - if (amdgpu_device_supports_boco(ddev)) 3370 - boco = true; 3352 + if (amdgpu_device_supports_atpx(ddev)) 3353 + atpx = true; 3371 3354 if (amdgpu_has_atpx() && 3372 3355 (amdgpu_is_atpx_hybrid() || 3373 3356 amdgpu_has_atpx_dgpu_power_cntl()) && 3374 3357 !pci_is_thunderbolt_attached(adev->pdev)) 3375 3358 vga_switcheroo_register_client(adev->pdev, 3376 - &amdgpu_switcheroo_ops, boco); 3377 - if (boco) 3359 + &amdgpu_switcheroo_ops, atpx); 3360 + if (atpx) 3378 3361 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 3379 3362 3380 3363 if (amdgpu_emu_mode == 1) { ··· 3557 3540 3558 3541 failed: 3559 3542 amdgpu_vf_error_trans_all(adev); 3560 - if (boco) 3543 + if (atpx) 3561 3544 vga_switcheroo_fini_domain_pm_ops(adev->dev); 3562 3545 3563 3546 failed_unmap: ··· 3621 3604 amdgpu_has_atpx_dgpu_power_cntl()) && 3622 3605 !pci_is_thunderbolt_attached(adev->pdev)) 3623 3606 vga_switcheroo_unregister_client(adev->pdev); 3624 - if (amdgpu_device_supports_boco(adev_to_drm(adev))) 3607 + if (amdgpu_device_supports_atpx(adev_to_drm(adev))) 3625 3608 vga_switcheroo_fini_domain_pm_ops(adev->dev); 3626 3609 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 3627 3610 vga_client_register(adev->pdev, NULL, NULL, NULL); ··· 3727 3710 3728 3711 amdgpu_fence_driver_suspend(adev); 3729 3712 3730 - if (!amdgpu_acpi_is_s0ix_supported() || amdgpu_in_reset(adev)) 3713 + if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) 3731 3714 r = amdgpu_device_ip_suspend_phase2(adev); 3732 3715 else 3733 3716 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry); ··· 3761 3744 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 3762 3745 return 0; 3763 3746 3764 - if (amdgpu_acpi_is_s0ix_supported()) 3747 + if (amdgpu_acpi_is_s0ix_supported(adev)) 3765 3748 amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry); 3766 3749 3767 3750 /* post card */
+12 -11
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 1340 1340 } 1341 1341 1342 1342 adev->in_runpm = true; 1343 - if (amdgpu_device_supports_boco(drm_dev)) 1343 + if (amdgpu_device_supports_atpx(drm_dev)) 1344 1344 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1345 1345 drm_kms_helper_poll_disable(drm_dev); 1346 1346 ··· 1348 1348 if (ret) 1349 1349 return ret; 1350 1350 1351 - if (amdgpu_device_supports_boco(drm_dev)) { 1351 + if (amdgpu_device_supports_atpx(drm_dev)) { 1352 1352 /* Only need to handle PCI state in the driver for ATPX 1353 1353 * PCI core handles it for _PR3. 1354 1354 */ 1355 - if (amdgpu_is_atpx_hybrid()) { 1356 - pci_ignore_hotplug(pdev); 1357 - } else { 1355 + if (!amdgpu_is_atpx_hybrid()) { 1358 1356 amdgpu_device_cache_pci_state(pdev); 1359 1357 pci_disable_device(pdev); 1360 1358 pci_ignore_hotplug(pdev); ··· 1376 1378 if (!adev->runpm) 1377 1379 return -EINVAL; 1378 1380 1379 - if (amdgpu_device_supports_boco(drm_dev)) { 1381 + if (amdgpu_device_supports_atpx(drm_dev)) { 1380 1382 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1381 1383 1382 1384 /* Only need to handle PCI state in the driver for ATPX 1383 1385 * PCI core handles it for _PR3. 1384 1386 */ 1385 - if (amdgpu_is_atpx_hybrid()) { 1386 - pci_set_master(pdev); 1387 - } else { 1387 + if (!amdgpu_is_atpx_hybrid()) { 1388 1388 pci_set_power_state(pdev, PCI_D0); 1389 1389 amdgpu_device_load_pci_state(pdev); 1390 1390 ret = pci_enable_device(pdev); 1391 1391 if (ret) 1392 1392 return ret; 1393 - pci_set_master(pdev); 1394 1393 } 1394 + pci_set_master(pdev); 1395 + } else if (amdgpu_device_supports_boco(drm_dev)) { 1396 + /* Only need to handle PCI state in the driver for ATPX 1397 + * PCI core handles it for _PR3. 1398 + */ 1399 + pci_set_master(pdev); 1395 1400 } else if (amdgpu_device_supports_baco(drm_dev)) { 1396 1401 amdgpu_device_baco_exit(drm_dev); 1397 1402 } 1398 1403 ret = amdgpu_device_resume(drm_dev, false); 1399 1404 drm_kms_helper_poll_enable(drm_dev); 1400 - if (amdgpu_device_supports_boco(drm_dev)) 1405 + if (amdgpu_device_supports_atpx(drm_dev)) 1401 1406 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; 1402 1407 adev->in_runpm = false; 1403 1408 return 0;
+5 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 496 496 break; 497 497 } 498 498 499 - if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) 499 + if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) { 500 500 size = 0; 501 - else 501 + } else { 502 502 size = amdgpu_gmc_get_vbios_fb_size(adev); 503 503 504 - if (adev->mman.keep_stolen_vga_memory) 505 - size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION); 504 + if (adev->mman.keep_stolen_vga_memory) 505 + size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION); 506 + } 506 507 507 508 /* set to 0 if the pre-OS buffer uses up most of vram */ 508 509 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+14 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 133 133 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) 134 134 { 135 135 struct drm_device *dev; 136 + struct pci_dev *parent; 136 137 int r, acpi_status; 137 138 138 139 dev = adev_to_drm(adev); ··· 144 143 ((flags & AMD_IS_APU) == 0) && 145 144 !pci_is_thunderbolt_attached(dev->pdev)) 146 145 flags |= AMD_IS_PX; 146 + 147 + parent = pci_upstream_bridge(adev->pdev); 148 + adev->has_pr3 = parent ? pci_pr3_present(parent) : false; 147 149 148 150 /* amdgpu_device_init should report only fatal error 149 151 * like memory allocation failure or iomapping failure, ··· 160 156 goto out; 161 157 } 162 158 163 - if (amdgpu_device_supports_boco(dev) && 164 - (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */ 159 + if (amdgpu_device_supports_atpx(dev) && 160 + (amdgpu_runtime_pm != 0)) { /* enable runpm by default for atpx */ 165 161 adev->runpm = true; 162 + dev_info(adev->dev, "Using ATPX for runtime pm\n"); 163 + } else if (amdgpu_device_supports_boco(dev) && 164 + (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */ 165 + adev->runpm = true; 166 + dev_info(adev->dev, "Using BOCO for runtime pm\n"); 166 167 } else if (amdgpu_device_supports_baco(dev) && 167 168 (amdgpu_runtime_pm != 0)) { 168 169 switch (adev->asic_type) { ··· 189 180 adev->runpm = true; 190 181 break; 191 182 } 183 + if (adev->runpm) 184 + dev_info(adev->dev, "Using BACO for runtime pm\n"); 192 185 } 193 186 194 187 /* Call ACPI methods: require modeset init ··· 203 192 204 193 if (adev->runpm) { 205 194 /* only need to skip on ATPX */ 206 - if (amdgpu_device_supports_boco(dev) && 195 + if (amdgpu_device_supports_atpx(dev) && 207 196 !amdgpu_is_atpx_hybrid()) 208 197 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); 209 198 pm_runtime_use_autosuspend(dev->dev);
+3 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
··· 358 358 } 359 359 ), 360 360 TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx," 361 - " flags:0x%llx, incr:%llu, dst:\n%s", __entry->pid, 361 + " flags:0x%llx, incr:%llu, dst:\n%s%s", __entry->pid, 362 362 __entry->vm_ctx, __entry->start, __entry->end, 363 363 __entry->flags, __entry->incr, __print_array( 364 - __get_dynamic_array(dst), __entry->nptes, 8)) 364 + __get_dynamic_array(dst), min(__entry->nptes, 32u), 8), 365 + __entry->nptes > 32 ? "..." : "") 365 366 ); 366 367 367 368 TRACE_EVENT(amdgpu_vm_set_ptes,
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 240 240 241 241 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; 242 242 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 243 - DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", 243 + DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n", 244 244 version_major, version_minor, family_id); 245 245 246 246 /* ··· 267 267 dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 268 268 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f; 269 269 enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3; 270 - DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n", 270 + DRM_INFO("Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n", 271 271 enc_major, enc_minor, dec_minor, family_id); 272 272 273 273 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 179 179 version_major = (ucode_version >> 20) & 0xfff; 180 180 version_minor = (ucode_version >> 8) & 0xfff; 181 181 binary_id = ucode_version & 0xff; 182 - DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n", 182 + DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n", 183 183 version_major, version_minor, binary_id); 184 184 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) | 185 185 (binary_id << 8));
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
··· 181 181 enc_major = fw_check; 182 182 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf; 183 183 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf; 184 - DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n", 184 + DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n", 185 185 enc_major, enc_minor, dec_ver, vep, fw_rev); 186 186 } else { 187 187 unsigned int version_major, version_minor, family_id; ··· 189 189 family_id = le32_to_cpu(hdr->ucode_version) & 0xff; 190 190 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; 191 191 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 192 - DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n", 192 + DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n", 193 193 version_major, version_minor, family_id); 194 194 } 195 195
+1
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
··· 136 136 break; 137 137 case CHIP_SIENNA_CICHLID: 138 138 case CHIP_NAVY_FLOUNDER: 139 + case CHIP_DIMGREY_CAVEFISH: 139 140 mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw]; 140 141 break; 141 142 default:
+10 -1
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
··· 187 187 188 188 static int xgpu_ai_request_reset(struct amdgpu_device *adev) 189 189 { 190 - return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); 190 + int ret, i = 0; 191 + 192 + while (i < AI_MAILBOX_POLL_MSG_REP_MAX) { 193 + ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); 194 + if (!ret) 195 + break; 196 + i++; 197 + } 198 + 199 + return ret; 191 200 } 192 201 193 202 static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
+2 -1
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
··· 25 25 #define __MXGPU_AI_H__ 26 26 27 27 #define AI_MAILBOX_POLL_ACK_TIMEDOUT 500 28 - #define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000 28 + #define AI_MAILBOX_POLL_MSG_TIMEDOUT 6000 29 29 #define AI_MAILBOX_POLL_FLR_TIMEDOUT 5000 30 + #define AI_MAILBOX_POLL_MSG_REP_MAX 11 30 31 31 32 enum idh_request { 32 33 IDH_REQ_GPU_INIT_ACCESS = 1,
+10 -1
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
··· 200 200 201 201 static int xgpu_nv_request_reset(struct amdgpu_device *adev) 202 202 { 203 - return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); 203 + int ret, i = 0; 204 + 205 + while (i < NV_MAILBOX_POLL_MSG_REP_MAX) { 206 + ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); 207 + if (!ret) 208 + break; 209 + i++; 210 + } 211 + 212 + return ret; 204 213 } 205 214 206 215 static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
+1
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
··· 27 27 #define NV_MAILBOX_POLL_ACK_TIMEDOUT 500 28 28 #define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000 29 29 #define NV_MAILBOX_POLL_FLR_TIMEDOUT 5000 30 + #define NV_MAILBOX_POLL_MSG_REP_MAX 11 30 31 31 32 enum idh_request { 32 33 IDH_REQ_GPU_INIT_ACCESS = 1,
+1
drivers/gpu/drm/amd/amdgpu/nv.c
··· 362 362 switch (adev->asic_type) { 363 363 case CHIP_SIENNA_CICHLID: 364 364 case CHIP_NAVY_FLOUNDER: 365 + case CHIP_DIMGREY_CAVEFISH: 365 366 return AMD_RESET_METHOD_MODE1; 366 367 default: 367 368 if (smu_baco_is_support(smu))
+35 -7
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
··· 153 153 struct amdgpu_firmware_info *info = NULL; 154 154 const struct common_firmware_header *header = NULL; 155 155 156 + if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID)) 157 + return 0; 158 + 156 159 DRM_DEBUG("\n"); 157 160 158 161 switch (adev->asic_type) { ··· 810 807 return 0; 811 808 } 812 809 810 + static int sdma_v5_2_soft_reset(void *handle) 811 + { 812 + struct amdgpu_device *adev = (struct amdgpu_device *)handle; 813 + u32 grbm_soft_reset; 814 + u32 tmp; 815 + int i; 816 + 817 + for (i = 0; i < adev->sdma.num_instances; i++) { 818 + grbm_soft_reset = REG_SET_FIELD(0, 819 + GRBM_SOFT_RESET, SOFT_RESET_SDMA0, 820 + 1); 821 + grbm_soft_reset <<= i; 822 + 823 + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 824 + tmp |= grbm_soft_reset; 825 + DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp); 826 + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); 827 + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 828 + 829 + udelay(50); 830 + 831 + tmp &= ~grbm_soft_reset; 832 + WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); 833 + tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); 834 + 835 + udelay(50); 836 + } 837 + 838 + return 0; 839 + } 840 + 813 841 /** 814 842 * sdma_v5_2_start - setup and start the async dma engines 815 843 * ··· 872 838 msleep(1000); 873 839 } 874 840 841 + sdma_v5_2_soft_reset(adev); 875 842 /* unhalt the MEs */ 876 843 sdma_v5_2_enable(adev, true); 877 844 /* enable sdma ring preemption */ ··· 1399 1364 udelay(1); 1400 1365 } 1401 1366 return -ETIMEDOUT; 1402 - } 1403 - 1404 - static int sdma_v5_2_soft_reset(void *handle) 1405 - { 1406 - /* todo */ 1407 - 1408 - return 0; 1409 1367 } 1410 1368 1411 1369 static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)
+1 -1
drivers/gpu/drm/amd/amdkfd/Kconfig
··· 1 1 # SPDX-License-Identifier: MIT 2 2 # 3 - # Heterogenous system architecture configuration 3 + # Heterogeneous system architecture configuration 4 4 # 5 5 6 6 config HSA_AMD
+2 -2
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 72 72 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) 73 73 { 74 74 int i; 75 - int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec 76 - + pipe * dqm->dev->shared_resources.num_queue_per_pipe; 75 + int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec 76 + + pipe) * dqm->dev->shared_resources.num_queue_per_pipe; 77 77 78 78 /* queue is available for KFD usage if bit is 1 */ 79 79 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
+4 -25
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 196 196 197 197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); 198 198 199 - static int amdgpu_dm_atomic_commit(struct drm_device *dev, 200 - struct drm_atomic_state *state, 201 - bool nonblock); 202 - 203 199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); 204 200 205 201 static int amdgpu_dm_atomic_check(struct drm_device *dev, ··· 2208 2212 .get_format_info = amd_get_format_info, 2209 2213 .output_poll_changed = drm_fb_helper_output_poll_changed, 2210 2214 .atomic_check = amdgpu_dm_atomic_check, 2211 - .atomic_commit = amdgpu_dm_atomic_commit, 2215 + .atomic_commit = drm_atomic_helper_commit, 2212 2216 }; 2213 2217 2214 2218 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { ··· 5120 5124 int preferred_refresh = 0; 5121 5125 #if defined(CONFIG_DRM_AMD_DC_DCN) 5122 5126 struct dsc_dec_dpcd_caps dsc_caps; 5123 - #endif 5124 5127 uint32_t link_bandwidth_kbps; 5125 - 5128 + #endif 5126 5129 struct dc_sink *sink = NULL; 5127 5130 if (aconnector == NULL) { 5128 5131 DRM_ERROR("aconnector is NULL!\n"); ··· 5203 5208 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 5204 5209 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, 5205 5210 &dsc_caps); 5206 - #endif 5207 5211 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, 5208 5212 dc_link_get_link_cap(aconnector->dc_link)); 5209 5213 5210 - #if defined(CONFIG_DRM_AMD_DC_DCN) 5211 5214 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) { 5212 5215 /* Set DSC policy according to dsc_clock_en */ 5213 5216 dc_dsc_policy_set_enable_dsc_when_not_needed( ··· 5342 5349 } 5343 5350 5344 5351 #ifdef CONFIG_DEBUG_FS 5345 - int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc, 5352 + static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc, 5346 5353 struct drm_crtc_state *crtc_state, 5347 5354 struct drm_property *property, 5348 5355 uint64_t val) ··· 5366 5373 return 0; 5367 5374 } 5368 5375 5369 - int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc, 5376 + static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc, 5370 5377 const struct drm_crtc_state *state, 5371 5378 struct drm_property *property, 5372 5379 uint64_t *val) ··· 8061 8068 struct dc_stream_state *stream_state) 8062 8069 { 8063 8070 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); 8064 - } 8065 - 8066 - static int amdgpu_dm_atomic_commit(struct drm_device *dev, 8067 - struct drm_atomic_state *state, 8068 - bool nonblock) 8069 - { 8070 - /* 8071 - * Add check here for SoC's that support hardware cursor plane, to 8072 - * unset legacy_cursor_update 8073 - */ 8074 - 8075 - return drm_atomic_helper_commit(dev, state, nonblock); 8076 - 8077 - /*TODO Handle EINTR, reenable IRQ*/ 8078 8071 } 8079 8072 8080 8073 /**
+20 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 337 337 const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; 338 338 339 339 #ifdef CONFIG_DEBUG_FS 340 - /* set the crc calculation window*/ 340 + /** 341 + * @crc_win_x_start_property: 342 + * 343 + * X start of the crc calculation window 344 + */ 341 345 struct drm_property *crc_win_x_start_property; 346 + /** 347 + * @crc_win_y_start_property: 348 + * 349 + * Y start of the crc calculation window 350 + */ 342 351 struct drm_property *crc_win_y_start_property; 352 + /** 353 + * @crc_win_x_end_property: 354 + * 355 + * X end of the crc calculation window 356 + */ 343 357 struct drm_property *crc_win_x_end_property; 358 + /** 359 + * @crc_win_y_end_property: 360 + * 361 + * Y end of the crc calculation window 362 + */ 344 363 struct drm_property *crc_win_y_end_property; 345 364 #endif 346 365 /**
+12 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
··· 81 81 return pipe_crc_sources; 82 82 } 83 83 84 + static void amdgpu_dm_set_crc_window_default(struct dm_crtc_state *dm_crtc_state) 85 + { 86 + dm_crtc_state->crc_window.x_start = 0; 87 + dm_crtc_state->crc_window.y_start = 0; 88 + dm_crtc_state->crc_window.x_end = 0; 89 + dm_crtc_state->crc_window.y_end = 0; 90 + } 91 + 84 92 bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state) 85 93 { 86 94 bool ret = true; ··· 149 141 mutex_lock(&adev->dm.dc_lock); 150 142 151 143 /* Enable CRTC CRC generation if necessary. */ 152 - if (dm_is_crc_source_crtc(source)) { 144 + if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { 145 + if (!enable) 146 + amdgpu_dm_set_crc_window_default(dm_crtc_state); 147 + 153 148 if (!amdgpu_dm_crc_window_is_default(dm_crtc_state)) { 154 149 crc_window = &tmp_window; 155 150
+6 -6
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
··· 746 746 .wm_inst = WM_B, 747 747 .wm_type = WM_TYPE_PSTATE_CHG, 748 748 .pstate_latency_us = 11.72, 749 - .sr_exit_time_us = 10.12, 750 - .sr_enter_plus_exit_time_us = 11.48, 749 + .sr_exit_time_us = 11.12, 750 + .sr_enter_plus_exit_time_us = 12.48, 751 751 .valid = true, 752 752 }, 753 753 { 754 754 .wm_inst = WM_C, 755 755 .wm_type = WM_TYPE_PSTATE_CHG, 756 756 .pstate_latency_us = 11.72, 757 - .sr_exit_time_us = 10.12, 758 - .sr_enter_plus_exit_time_us = 11.48, 757 + .sr_exit_time_us = 11.12, 758 + .sr_enter_plus_exit_time_us = 12.48, 759 759 .valid = true, 760 760 }, 761 761 { 762 762 .wm_inst = WM_D, 763 763 .wm_type = WM_TYPE_PSTATE_CHG, 764 764 .pstate_latency_us = 11.72, 765 - .sr_exit_time_us = 10.12, 766 - .sr_enter_plus_exit_time_us = 11.48, 765 + .sr_exit_time_us = 11.12, 766 + .sr_enter_plus_exit_time_us = 12.48, 767 767 .valid = true, 768 768 }, 769 769 }
+20
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 2625 2625 } 2626 2626 } 2627 2627 2628 + if (update_type != UPDATE_TYPE_FAST) { 2629 + // If changing VTG FP2: wait until back in vactive to program FP2 2630 + // Need to ensure that pipe unlock happens soon after to minimize race condition 2631 + for (i = 0; i < dc->res_pool->pipe_count; i++) { 2632 + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2633 + 2634 + if (pipe_ctx->top_pipe || pipe_ctx->stream != stream) 2635 + continue; 2636 + 2637 + if (!pipe_ctx->update_flags.bits.global_sync) 2638 + continue; 2639 + 2640 + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); 2641 + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); 2642 + 2643 + pipe_ctx->stream_res.tg->funcs->set_vtg_params( 2644 + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true); 2645 + } 2646 + } 2647 + 2628 2648 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) 2629 2649 dc->hwss.interdependent_update_lock(dc, context, false); 2630 2650 else
-3
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 3267 3267 } 3268 3268 } 3269 3269 3270 - #if defined(CONFIG_DRM_AMD_DC_DCN3_0) 3271 - #endif 3272 - 3273 3270 /* turn off otg test pattern if enable */ 3274 3271 if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) 3275 3272 pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+1 -1
drivers/gpu/drm/amd/display/dc/dc.h
··· 42 42 #include "inc/hw/dmcu.h" 43 43 #include "dml/display_mode_lib.h" 44 44 45 - #define DC_VER "3.2.115" 45 + #define DC_VER "3.2.116" 46 46 47 47 #define MAX_SURFACES 3 48 48 #define MAX_PLANES 6
+1 -1
drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
··· 1268 1268 tg110->min_h_front_porch = 0; 1269 1269 tg110->min_h_back_porch = 0; 1270 1270 1271 - tg110->min_h_sync_width = 8; 1271 + tg110->min_h_sync_width = 4; 1272 1272 tg110->min_v_sync_width = 1; 1273 1273 tg110->min_v_blank = 3; 1274 1274 }
+3 -3
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
··· 124 124 * still not asserted, we are probably stuck and going to hang 125 125 * 126 126 * TODO: Figure out why it takes ~100us on linux 127 - * pstate takes around ~100us on linux. Unknown currently as to 128 - * why it takes that long on linux 127 + * pstate takes around ~100us (up to 200us) on linux. Unknown currently 128 + * as to why it takes that long on linux 129 129 */ 130 130 const unsigned int pstate_wait_timeout_us = 200; 131 - const unsigned int pstate_wait_expected_timeout_us = 40; 131 + const unsigned int pstate_wait_expected_timeout_us = 180; 132 132 static unsigned int max_sampled_pstate_wait_us; /* data collection */ 133 133 static bool forced_pstate_allow; /* help with revert wa */ 134 134
+1 -1
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
··· 2736 2736 pipe_ctx->pipe_dlg_param.vupdate_width); 2737 2737 2738 2738 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 2739 - pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 2739 + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true); 2740 2740 2741 2741 if (hws->funcs.setup_vupdate_interrupt) 2742 2742 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+9 -6
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
··· 272 272 vupdate_offset, 273 273 vupdate_width); 274 274 275 - optc->funcs->set_vtg_params(optc, dc_crtc_timing); 275 + optc->funcs->set_vtg_params(optc, dc_crtc_timing, true); 276 276 277 277 /* TODO 278 278 * patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1 ··· 312 312 } 313 313 314 314 void optc1_set_vtg_params(struct timing_generator *optc, 315 - const struct dc_crtc_timing *dc_crtc_timing) 315 + const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2) 316 316 { 317 317 struct dc_crtc_timing patched_crtc_timing; 318 318 uint32_t asic_blank_end; ··· 348 348 } 349 349 } 350 350 351 - REG_UPDATE_2(CONTROL, 352 - VTG0_FP2, v_fp2, 353 - VTG0_VCOUNT_INIT, v_init); 351 + if (program_fp2) 352 + REG_UPDATE_2(CONTROL, 353 + VTG0_FP2, v_fp2, 354 + VTG0_VCOUNT_INIT, v_init); 355 + else 356 + REG_UPDATE(CONTROL, VTG0_VCOUNT_INIT, v_init); 354 357 } 355 358 356 359 void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable) ··· 1543 1540 optc1->min_h_blank = 32; 1544 1541 optc1->min_v_blank = 3; 1545 1542 optc1->min_v_blank_interlace = 5; 1546 - optc1->min_h_sync_width = 8; 1543 + optc1->min_h_sync_width = 4; 1547 1544 optc1->min_v_sync_width = 1; 1548 1545 } 1549 1546
+1 -1
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
··· 700 700 bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); 701 701 702 702 void optc1_set_vtg_params(struct timing_generator *optc, 703 - const struct dc_crtc_timing *dc_crtc_timing); 703 + const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2); 704 704 705 705 #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
+6
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
··· 81 81 SRI(DP_MSE_RATE_UPDATE, DP, id), \ 82 82 SRI(DP_PIXEL_FORMAT, DP, id), \ 83 83 SRI(DP_SEC_CNTL, DP, id), \ 84 + SRI(DP_SEC_CNTL1, DP, id), \ 84 85 SRI(DP_SEC_CNTL2, DP, id), \ 86 + SRI(DP_SEC_CNTL5, DP, id), \ 85 87 SRI(DP_SEC_CNTL6, DP, id), \ 86 88 SRI(DP_STEER_FIFO, DP, id), \ 87 89 SRI(DP_VID_M, DP, id), \ ··· 128 126 uint32_t DP_MSE_RATE_UPDATE; 129 127 uint32_t DP_PIXEL_FORMAT; 130 128 uint32_t DP_SEC_CNTL; 129 + uint32_t DP_SEC_CNTL1; 131 130 uint32_t DP_SEC_CNTL2; 131 + uint32_t DP_SEC_CNTL5; 132 132 uint32_t DP_SEC_CNTL6; 133 133 uint32_t DP_STEER_FIFO; 134 134 uint32_t DP_VID_M; ··· 415 411 type DP_SEC_GSP3_ENABLE;\ 416 412 type DP_SEC_GSP4_ENABLE;\ 417 413 type DP_SEC_GSP5_ENABLE;\ 414 + type DP_SEC_GSP5_LINE_NUM;\ 415 + type DP_SEC_GSP5_LINE_REFERENCE;\ 418 416 type DP_SEC_GSP6_ENABLE;\ 419 417 type DP_SEC_GSP7_ENABLE;\ 420 418 type DP_SEC_GSP7_PPS;\
+9 -20
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
··· 1595 1595 pipe_ctx->pipe_dlg_param.vupdate_width); 1596 1596 1597 1597 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 1598 - pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 1598 + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false); 1599 1599 1600 1600 if (hws->funcs.setup_vupdate_interrupt) 1601 1601 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx); ··· 1695 1695 && context->res_ctx.pipe_ctx[i].stream) 1696 1696 hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); 1697 1697 1698 - /* wait for outstanding pending changes before adding or removing planes */ 1699 - for (i = 0; i < dc->res_pool->pipe_count; i++) { 1700 - if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable || 1701 - context->res_ctx.pipe_ctx[i].update_flags.bits.enable) { 1702 - dc->hwss.wait_for_pending_cleared(dc, context); 1703 - break; 1704 - } 1705 - } 1706 1698 1707 1699 /* Disconnect mpcc */ 1708 1700 for (i = 0; i < dc->res_pool->pipe_count; i++) ··· 1848 1856 pipe_ctx->pipe_dlg_param.vupdate_width); 1849 1857 1850 1858 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 1851 - pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 1859 + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false); 1852 1860 1853 1861 if (pipe_ctx->prev_odm_pipe == NULL) 1854 1862 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank); ··· 2243 2251 { 2244 2252 const struct tg_color pipe_colors[6] = { 2245 2253 {MAX_TG_COLOR_VALUE, 0, 0}, // red 2246 - {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow 2247 - {0, MAX_TG_COLOR_VALUE, 0}, // blue 2254 + {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE / 4, 0}, // orange 2255 + {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, // yellow 2256 + {0, MAX_TG_COLOR_VALUE, 0}, // green 2257 + {0, 0, MAX_TG_COLOR_VALUE}, // blue 2248 2258 {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple 2249 - {0, 0, MAX_TG_COLOR_VALUE}, // green 2250 - {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange 2251 2259 }; 2252 2260 2253 2261 struct pipe_ctx *top_pipe = pipe_ctx; ··· 2272 2280 2273 2281 // input to MPCC is always RGB, by default leave black_color at 0 2274 2282 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { 2275 - hws->funcs.get_hdr_visual_confirm_color( 2276 - pipe_ctx, &blnd_cfg.black_color); 2283 + hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color); 2277 2284 } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { 2278 - hws->funcs.get_surface_visual_confirm_color( 2279 - pipe_ctx, &blnd_cfg.black_color); 2285 + hws->funcs.get_surface_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color); 2280 2286 } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) { 2281 - dcn20_get_mpctree_visual_confirm_color( 2282 - pipe_ctx, &blnd_cfg.black_color); 2287 + dcn20_get_mpctree_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color); 2283 2288 } 2284 2289 2285 2290 if (per_pixel_alpha)
+2
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
··· 83 83 SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE, mask_sh),\ 84 84 SE_SF(DIG0_DIG_FE_CNTL, DOLBY_VISION_EN, mask_sh),\ 85 85 SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, mask_sh),\ 86 + SE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, mask_sh),\ 87 + SE_SF(DP0_DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, mask_sh),\ 86 88 SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh) 87 89 88 90 void dcn20_stream_encoder_construct(
+1
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
··· 32 32 const struct dccg_shift *dccg_shift, 33 33 const struct dccg_mask *dccg_mask); 34 34 35 + void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk); 35 36 36 37 #endif /* __DCN21_DCCG_H__ */
+1 -1
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
··· 668 668 is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal); 669 669 is_dp = dc_is_dp_signal(pipe_ctx->stream->signal); 670 670 671 - if (!is_hdmi_tmds) 671 + if (!is_hdmi_tmds && !is_dp) 672 672 return; 673 673 674 674 if (is_hdmi_tmds)
+1 -1
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
··· 350 350 optc1->min_h_blank = 32; 351 351 optc1->min_v_blank = 3; 352 352 optc1->min_v_blank_interlace = 5; 353 - optc1->min_h_sync_width = 8; 353 + optc1->min_h_sync_width = 4; 354 354 optc1->min_v_sync_width = 1; 355 355 } 356 356
+1 -1
drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
··· 271 271 struct dc_crtc_timing *hw_crtc_timing); 272 272 273 273 void (*set_vtg_params)(struct timing_generator *optc, 274 - const struct dc_crtc_timing *dc_crtc_timing); 274 + const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2); 275 275 276 276 void (*set_dsc_config)(struct timing_generator *optc, 277 277 enum optc_dsc_mode dsc_mode,
+35 -23
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
··· 47 47 48 48 /* Firmware versioning. */ 49 49 #ifdef DMUB_EXPOSE_VERSION 50 - #define DMUB_FW_VERSION_GIT_HASH 0x931573111 50 + #define DMUB_FW_VERSION_GIT_HASH 0xa18e25995 51 51 #define DMUB_FW_VERSION_MAJOR 0 52 52 #define DMUB_FW_VERSION_MINOR 0 53 - #define DMUB_FW_VERSION_REVISION 45 53 + #define DMUB_FW_VERSION_REVISION 46 54 54 #define DMUB_FW_VERSION_TEST 0 55 55 #define DMUB_FW_VERSION_VBIOS 0 56 56 #define DMUB_FW_VERSION_HOTFIX 0 ··· 514 514 515 515 enum aux_return_code_type { 516 516 AUX_RET_SUCCESS = 0, 517 + AUX_RET_ERROR_UNKNOWN, 518 + AUX_RET_ERROR_INVALID_REPLY, 517 519 AUX_RET_ERROR_TIMEOUT, 518 - AUX_RET_ERROR_NO_DATA, 520 + AUX_RET_ERROR_HPD_DISCON, 521 + AUX_RET_ERROR_ENGINE_ACQUIRE, 519 522 AUX_RET_ERROR_INVALID_OPERATION, 520 523 AUX_RET_ERROR_PROTOCOL_ERROR, 524 + }; 525 + 526 + enum aux_channel_type { 527 + AUX_CHANNEL_LEGACY_DDC, 528 + AUX_CHANNEL_DPIA 521 529 }; 522 530 523 531 /* DP AUX command */ ··· 540 532 541 533 struct dmub_cmd_dp_aux_control_data { 542 534 uint32_t handle; 543 - uint8_t port_index; 535 + uint8_t instance; 544 536 uint8_t sw_crc_enabled; 545 537 uint16_t timeout; 538 + enum aux_channel_type type; 546 539 struct aux_transaction_parameters dpaux; 547 540 }; 548 541 ··· 567 558 568 559 struct aux_reply_control_data { 569 560 uint32_t handle; 570 - uint8_t phy_port_index; 561 + uint8_t instance; 571 562 uint8_t result; 572 563 uint16_t pad; 573 564 }; ··· 590 581 }; 591 582 592 583 struct dp_hpd_data { 593 - uint8_t phy_port_index; 584 + uint8_t instance; 594 585 uint8_t hpd_type; 595 586 uint8_t hpd_status; 596 587 uint8_t pad; ··· 741 732 struct abm_config_table { 742 733 /* Parameters for crgb conversion */ 743 734 uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; // 0B 744 - uint16_t crgb_offset[NUM_POWER_FN_SEGS]; // 15B 745 - uint16_t crgb_slope[NUM_POWER_FN_SEGS]; // 31B 735 + uint16_t crgb_offset[NUM_POWER_FN_SEGS]; // 16B 736 + uint16_t crgb_slope[NUM_POWER_FN_SEGS]; // 32B 746 737 747 738 /* Parameters for custom curve */ 748 - uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; // 47B 749 - uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; // 79B 739 + uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; // 48B 740 + uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; // 78B 750 741 751 - uint16_t ambient_thresholds_lux[NUM_AMBI_LEVEL]; // 111B 752 - uint16_t min_abm_backlight; // 121B 742 + uint16_t ambient_thresholds_lux[NUM_AMBI_LEVEL]; // 112B 743 + uint16_t min_abm_backlight; // 122B 753 744 754 - uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 123B 755 - uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 143B 756 - uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 163B 757 - uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 183B 758 - uint8_t hybrid_factor[NUM_AGGR_LEVEL]; // 203B 759 - uint8_t contrast_factor[NUM_AGGR_LEVEL]; // 207B 760 - uint8_t deviation_gain[NUM_AGGR_LEVEL]; // 211B 761 - uint8_t min_knee[NUM_AGGR_LEVEL]; // 215B 762 - uint8_t max_knee[NUM_AGGR_LEVEL]; // 219B 763 - uint8_t iir_curve[NUM_AMBI_LEVEL]; // 223B 764 - uint8_t pad3[3]; // 228B 745 + uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 124B 746 + uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 144B 747 + uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 164B 748 + uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 184B 749 + uint8_t hybrid_factor[NUM_AGGR_LEVEL]; // 204B 750 + uint8_t contrast_factor[NUM_AGGR_LEVEL]; // 208B 751 + uint8_t deviation_gain[NUM_AGGR_LEVEL]; // 212B 752 + uint8_t min_knee[NUM_AGGR_LEVEL]; // 216B 753 + uint8_t max_knee[NUM_AGGR_LEVEL]; // 220B 754 + uint8_t iir_curve[NUM_AMBI_LEVEL]; // 224B 755 + uint8_t pad3[3]; // 229B 756 + 757 + uint16_t blRampReduction[NUM_AGGR_LEVEL]; // 232B 758 + uint16_t blRampStart[NUM_AGGR_LEVEL]; // 240B 765 759 }; 766 760 767 761 struct dmub_cmd_abm_set_pipe_data {
+17 -5
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
··· 30 30 #include "opp.h" 31 31 #include "color_gamma.h" 32 32 33 + /* When calculating LUT values the first region and at least one subsequent 34 + * region are calculated with full precision. These defines are a demarcation 35 + * of where the second region starts and ends. 36 + * These are hardcoded values to avoid recalculating them in loops. 37 + */ 38 + #define PRECISE_LUT_REGION_START 224 39 + #define PRECISE_LUT_REGION_END 239 40 + 33 41 static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2]; 34 42 35 43 // these are helpers for calculations to reduce stack usage ··· 354 346 dc_fixpt_recip(args->gamma)); 355 347 } 356 348 scratch_1 = dc_fixpt_add(one, args->a3); 357 - if (cal_buffer->buffer_index < 16) 349 + /* In the first region (first 16 points) and in the 350 + * region delimited by START/END we calculate with 351 + * full precision to avoid error accumulation. 352 + */ 353 + if ((cal_buffer->buffer_index >= PRECISE_LUT_REGION_START && 354 + cal_buffer->buffer_index <= PRECISE_LUT_REGION_END) || 355 + (cal_buffer->buffer_index < 16)) 358 356 scratch_2 = dc_fixpt_pow(args->arg, 359 357 dc_fixpt_recip(args->gamma)); 360 358 else ··· 411 397 dc_fixpt_recip(args->gamma))), 412 398 args->a2); 413 399 else 414 - return dc_fixpt_mul( 415 - args->arg, 416 - args->a1); 400 + return dc_fixpt_mul(args->arg, args->a1); 417 401 } 418 402 419 403 static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf, struct calculate_buffer *cal_buffer) ··· 729 717 BREAK_TO_DEBUGGER(); 730 718 result = dc_fixpt_zero; 731 719 } else { 732 - BREAK_TO_DEBUGGER(); 733 720 result = dc_fixpt_one; 734 721 } 735 722 ··· 987 976 cal_buffer->buffer_index = 0; // see var definition for more info 988 977 rgb += 32; // first 32 points have problems with fixed point, too small 989 978 coord_x += 32; 979 + 990 980 for (i = 32; i <= hw_points_num; i++) { 991 981 if (!is_clipped) { 992 982 if (use_eetf) {
+1
drivers/gpu/drm/amd/include/atomfirmware.h
··· 499 499 ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100, 500 500 ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200, 501 501 ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING = 0x00000400, 502 + ATOM_FIRMWARE_CAP_ENABLE_2ND_USB20PORT = 0x0008000, 502 503 }; 503 504 504 505 enum atom_cooling_solution_id{
+1
drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
··· 227 227 uint32_t content_revision; 228 228 uint32_t fclk; 229 229 uint32_t lclk; 230 + uint32_t firmware_caps; 230 231 }; 231 232 232 233 enum smu_table_id
+3 -1
drivers/gpu/drm/amd/pm/inc/smu_types.h
··· 178 178 __SMU_DUMMY_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW), \ 179 179 __SMU_DUMMY_MAP(GET_UMC_FW_WA), \ 180 180 __SMU_DUMMY_MAP(Mode1Reset), \ 181 - __SMU_DUMMY_MAP(Spare), \ 181 + __SMU_DUMMY_MAP(RlcPowerNotify), \ 182 182 __SMU_DUMMY_MAP(SetHardMinIspiclkByFreq), \ 183 183 __SMU_DUMMY_MAP(SetHardMinIspxclkByFreq), \ 184 184 __SMU_DUMMY_MAP(SetSoftMinSocclkByFreq), \ ··· 209 209 __SMU_DUMMY_MAP(SetSoftMinCclk), \ 210 210 __SMU_DUMMY_MAP(SetSoftMaxCclk), \ 211 211 __SMU_DUMMY_MAP(SetGpoFeaturePMask), \ 212 + __SMU_DUMMY_MAP(DisallowGpo), \ 213 + __SMU_DUMMY_MAP(Enable2ndUSB20Port), \ 212 214 213 215 #undef __SMU_DUMMY_MAP 214 216 #define __SMU_DUMMY_MAP(type) SMU_MSG_##type
+5 -1
drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h
··· 134 134 #define PPSMC_MSG_SetGpoFeaturePMask 0x45 135 135 #define PPSMC_MSG_SetSMBUSInterrupt 0x46 136 136 137 - #define PPSMC_Message_Count 0x47 137 + #define PPSMC_MSG_DisallowGpo 0x56 138 + 139 + #define PPSMC_MSG_Enable2ndUSB20Port 0x57 140 + 141 + #define PPSMC_Message_Count 0x58 138 142 139 143 #endif
+1 -1
drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
··· 41 41 #define PPSMC_MSG_PowerUpIspByTile 0x7 42 42 #define PPSMC_MSG_PowerDownVcn 0x8 // VCN is power gated by default 43 43 #define PPSMC_MSG_PowerUpVcn 0x9 44 - #define PPSMC_MSG_spare 0xA 44 + #define PPSMC_MSG_RlcPowerNotify 0xA 45 45 #define PPSMC_MSG_SetHardMinVcn 0xB // For wireless display 46 46 #define PPSMC_MSG_SetSoftMinGfxclk 0xC //Sets SoftMin for GFXCLK. Arg is in MHz 47 47 #define PPSMC_MSG_ActiveProcessNotify 0xD
+4 -6
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 847 847 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 848 848 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 849 849 850 - if (!amdgpu_sriov_vf(adev) || (adev->asic_type != CHIP_NAVI12)) { 851 - ret = smu_init_microcode(smu); 852 - if (ret) { 853 - dev_err(adev->dev, "Failed to load smu firmware!\n"); 854 - return ret; 855 - } 850 + ret = smu_init_microcode(smu); 851 + if (ret) { 852 + dev_err(adev->dev, "Failed to load smu firmware!\n"); 853 + return ret; 856 854 } 857 855 858 856 ret = smu_smc_table_sw_init(smu);
+183 -104
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
··· 128 128 MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), 129 129 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), 130 130 MSG_MAP(SetGpoFeaturePMask, PPSMC_MSG_SetGpoFeaturePMask, 0), 131 + MSG_MAP(DisallowGpo, PPSMC_MSG_DisallowGpo, 0), 132 + MSG_MAP(Enable2ndUSB20Port, PPSMC_MSG_Enable2ndUSB20Port, 0), 131 133 }; 132 134 133 135 static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = { ··· 304 302 table_context->power_play_table; 305 303 struct smu_baco_context *smu_baco = &smu->smu_baco; 306 304 305 + if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_HARDWAREDC) 306 + smu->dc_controlled_by_gpio = true; 307 + 307 308 if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO || 308 309 powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) 309 310 smu_baco->platform_support = true; ··· 382 377 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 383 378 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 384 379 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 385 - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 380 + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), 386 381 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 387 382 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), 388 383 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); ··· 391 386 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, 392 387 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 393 388 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, 394 - sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE, 389 + sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, 395 390 AMDGPU_GEM_DOMAIN_VRAM); 396 391 397 - smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 392 + smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); 398 393 if (!smu_table->metrics_table) 399 394 goto err0_out; 400 395 smu_table->metrics_time = 0; ··· 423 418 uint32_t *value) 424 419 { 425 420 struct smu_table_context *smu_table= &smu->smu_table; 426 - SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 421 + SmuMetrics_t *metrics = 422 + &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 427 423 int ret = 0; 428 424 429 425 mutex_lock(&smu->metrics_lock); ··· 1071 1065 1072 1066 pstate_table->gfxclk_pstate.min = gfx_table->min; 1073 1067 pstate_table->gfxclk_pstate.peak = gfx_table->max; 1068 + if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK) 1069 + pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK; 1074 1070 1075 1071 pstate_table->uclk_pstate.min = mem_table->min; 1076 1072 pstate_table->uclk_pstate.peak = mem_table->max; 1073 + if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK) 1074 + pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK; 1077 1075 1078 1076 pstate_table->socclk_pstate.min = soc_table->min; 1079 1077 pstate_table->socclk_pstate.peak = soc_table->max; 1078 + if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK) 1079 + pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK; 1080 1080 1081 1081 return 0; 1082 1082 } ··· 1168 1156 1169 1157 static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *buf) 1170 1158 { 1171 - DpmActivityMonitorCoeffInt_t activity_monitor; 1159 + DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 1160 + DpmActivityMonitorCoeffInt_t *activity_monitor = 1161 + &(activity_monitor_external.DpmActivityMonitorCoeffInt); 1172 1162 uint32_t i, size = 0; 1173 1163 int16_t workload_type = 0; 1174 1164 static const char *profile_name[] = { ··· 1212 1198 1213 1199 result = smu_cmn_update_table(smu, 1214 1200 SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type, 1215 - (void *)(&activity_monitor), false); 1201 + (void *)(&activity_monitor_external), false); 1216 1202 if (result) { 1217 1203 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1218 1204 return result; ··· 1225 1211 " ", 1226 1212 0, 1227 1213 "GFXCLK", 1228 - activity_monitor.Gfx_FPS, 1229 - activity_monitor.Gfx_MinFreqStep, 1230 - activity_monitor.Gfx_MinActiveFreqType, 1231 - activity_monitor.Gfx_MinActiveFreq, 1232 - activity_monitor.Gfx_BoosterFreqType, 1233 - activity_monitor.Gfx_BoosterFreq, 1234 - activity_monitor.Gfx_PD_Data_limit_c, 1235 - activity_monitor.Gfx_PD_Data_error_coeff, 1236 - activity_monitor.Gfx_PD_Data_error_rate_coeff); 1214 + activity_monitor->Gfx_FPS, 1215 + activity_monitor->Gfx_MinFreqStep, 1216 + activity_monitor->Gfx_MinActiveFreqType, 1217 + activity_monitor->Gfx_MinActiveFreq, 1218 + activity_monitor->Gfx_BoosterFreqType, 1219 + activity_monitor->Gfx_BoosterFreq, 1220 + activity_monitor->Gfx_PD_Data_limit_c, 1221 + activity_monitor->Gfx_PD_Data_error_coeff, 1222 + activity_monitor->Gfx_PD_Data_error_rate_coeff); 1237 1223 1238 1224 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 1239 1225 " ", 1240 1226 1, 1241 1227 "SOCCLK", 1242 - activity_monitor.Fclk_FPS, 1243 - activity_monitor.Fclk_MinFreqStep, 1244 - activity_monitor.Fclk_MinActiveFreqType, 1245 - activity_monitor.Fclk_MinActiveFreq, 1246 - activity_monitor.Fclk_BoosterFreqType, 1247 - activity_monitor.Fclk_BoosterFreq, 1248 - activity_monitor.Fclk_PD_Data_limit_c, 1249 - activity_monitor.Fclk_PD_Data_error_coeff, 1250 - activity_monitor.Fclk_PD_Data_error_rate_coeff); 1228 + activity_monitor->Fclk_FPS, 1229 + activity_monitor->Fclk_MinFreqStep, 1230 + activity_monitor->Fclk_MinActiveFreqType, 1231 + activity_monitor->Fclk_MinActiveFreq, 1232 + activity_monitor->Fclk_BoosterFreqType, 1233 + activity_monitor->Fclk_BoosterFreq, 1234 + activity_monitor->Fclk_PD_Data_limit_c, 1235 + activity_monitor->Fclk_PD_Data_error_coeff, 1236 + activity_monitor->Fclk_PD_Data_error_rate_coeff); 1251 1237 1252 1238 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 1253 1239 " ", 1254 1240 2, 1255 1241 "MEMLK", 1256 - activity_monitor.Mem_FPS, 1257 - activity_monitor.Mem_MinFreqStep, 1258 - activity_monitor.Mem_MinActiveFreqType, 1259 - activity_monitor.Mem_MinActiveFreq, 1260 - activity_monitor.Mem_BoosterFreqType, 1261 - activity_monitor.Mem_BoosterFreq, 1262 - activity_monitor.Mem_PD_Data_limit_c, 1263 - activity_monitor.Mem_PD_Data_error_coeff, 1264 - activity_monitor.Mem_PD_Data_error_rate_coeff); 1242 + activity_monitor->Mem_FPS, 1243 + activity_monitor->Mem_MinFreqStep, 1244 + activity_monitor->Mem_MinActiveFreqType, 1245 + activity_monitor->Mem_MinActiveFreq, 1246 + activity_monitor->Mem_BoosterFreqType, 1247 + activity_monitor->Mem_BoosterFreq, 1248 + activity_monitor->Mem_PD_Data_limit_c, 1249 + activity_monitor->Mem_PD_Data_error_coeff, 1250 + activity_monitor->Mem_PD_Data_error_rate_coeff); 1265 1251 } 1266 1252 1267 1253 return size; ··· 1269 1255 1270 1256 static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) 1271 1257 { 1272 - DpmActivityMonitorCoeffInt_t activity_monitor; 1258 + 1259 + DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 1260 + DpmActivityMonitorCoeffInt_t *activity_monitor = 1261 + &(activity_monitor_external.DpmActivityMonitorCoeffInt); 1273 1262 int workload_type, ret = 0; 1274 1263 1275 1264 smu->power_profile_mode = input[size]; ··· 1286 1269 1287 1270 ret = smu_cmn_update_table(smu, 1288 1271 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, 1289 - (void *)(&activity_monitor), false); 1272 + (void *)(&activity_monitor_external), false); 1290 1273 if (ret) { 1291 1274 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 1292 1275 return ret; ··· 1294 1277 1295 1278 switch (input[0]) { 1296 1279 case 0: /* Gfxclk */ 1297 - activity_monitor.Gfx_FPS = input[1]; 1298 - activity_monitor.Gfx_MinFreqStep = input[2]; 1299 - activity_monitor.Gfx_MinActiveFreqType = input[3]; 1300 - activity_monitor.Gfx_MinActiveFreq = input[4]; 1301 - activity_monitor.Gfx_BoosterFreqType = input[5]; 1302 - activity_monitor.Gfx_BoosterFreq = input[6]; 1303 - activity_monitor.Gfx_PD_Data_limit_c = input[7]; 1304 - activity_monitor.Gfx_PD_Data_error_coeff = input[8]; 1305 - activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; 1280 + activity_monitor->Gfx_FPS = input[1]; 1281 + activity_monitor->Gfx_MinFreqStep = input[2]; 1282 + activity_monitor->Gfx_MinActiveFreqType = input[3]; 1283 + activity_monitor->Gfx_MinActiveFreq = input[4]; 1284 + activity_monitor->Gfx_BoosterFreqType = input[5]; 1285 + activity_monitor->Gfx_BoosterFreq = input[6]; 1286 + activity_monitor->Gfx_PD_Data_limit_c = input[7]; 1287 + activity_monitor->Gfx_PD_Data_error_coeff = input[8]; 1288 + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9]; 1306 1289 break; 1307 1290 case 1: /* Socclk */ 1308 - activity_monitor.Fclk_FPS = input[1]; 1309 - activity_monitor.Fclk_MinFreqStep = input[2]; 1310 - activity_monitor.Fclk_MinActiveFreqType = input[3]; 1311 - activity_monitor.Fclk_MinActiveFreq = input[4]; 1312 - activity_monitor.Fclk_BoosterFreqType = input[5]; 1313 - activity_monitor.Fclk_BoosterFreq = input[6]; 1314 - activity_monitor.Fclk_PD_Data_limit_c = input[7]; 1315 - activity_monitor.Fclk_PD_Data_error_coeff = input[8]; 1316 - activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9]; 1291 + activity_monitor->Fclk_FPS = input[1]; 1292 + activity_monitor->Fclk_MinFreqStep = input[2]; 1293 + activity_monitor->Fclk_MinActiveFreqType = input[3]; 1294 + activity_monitor->Fclk_MinActiveFreq = input[4]; 1295 + activity_monitor->Fclk_BoosterFreqType = input[5]; 1296 + activity_monitor->Fclk_BoosterFreq = input[6]; 1297 + activity_monitor->Fclk_PD_Data_limit_c = input[7]; 1298 + activity_monitor->Fclk_PD_Data_error_coeff = input[8]; 1299 + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9]; 1317 1300 break; 1318 1301 case 2: /* Memlk */ 1319 - activity_monitor.Mem_FPS = input[1]; 1320 - activity_monitor.Mem_MinFreqStep = input[2]; 1321 - activity_monitor.Mem_MinActiveFreqType = input[3]; 1322 - activity_monitor.Mem_MinActiveFreq = input[4]; 1323 - activity_monitor.Mem_BoosterFreqType = input[5]; 1324 - activity_monitor.Mem_BoosterFreq = input[6]; 1325 - activity_monitor.Mem_PD_Data_limit_c = input[7]; 1326 - activity_monitor.Mem_PD_Data_error_coeff = input[8]; 1327 - activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; 1302 + activity_monitor->Mem_FPS = input[1]; 1303 + activity_monitor->Mem_MinFreqStep = input[2]; 1304 + activity_monitor->Mem_MinActiveFreqType = input[3]; 1305 + activity_monitor->Mem_MinActiveFreq = input[4]; 1306 + activity_monitor->Mem_BoosterFreqType = input[5]; 1307 + activity_monitor->Mem_BoosterFreq = input[6]; 1308 + activity_monitor->Mem_PD_Data_limit_c = input[7]; 1309 + activity_monitor->Mem_PD_Data_error_coeff = input[8]; 1310 + activity_monitor->Mem_PD_Data_error_rate_coeff = input[9]; 1328 1311 break; 1329 1312 } 1330 1313 1331 1314 ret = smu_cmn_update_table(smu, 1332 1315 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, 1333 - (void *)(&activity_monitor), true); 1316 + (void *)(&activity_monitor_external), true); 1334 1317 if (ret) { 1335 1318 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); 1336 1319 return ret; ··· 2599 2582 struct smu_table_context *smu_table = &smu->smu_table; 2600 2583 struct gpu_metrics_v1_0 *gpu_metrics = 2601 2584 (struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table; 2602 - SmuMetrics_t metrics; 2585 + SmuMetricsExternal_t metrics_external; 2586 + SmuMetrics_t *metrics = 2587 + &(metrics_external.SmuMetrics); 2603 2588 int ret = 0; 2604 2589 2605 2590 ret = smu_cmn_get_metrics_table(smu, 2606 - &metrics, 2591 + &metrics_external, 2607 2592 true); 2608 2593 if (ret) 2609 2594 return ret; 2610 2595 2611 2596 smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics); 2612 2597 2613 - gpu_metrics->temperature_edge = metrics.TemperatureEdge; 2614 - gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot; 2615 - gpu_metrics->temperature_mem = metrics.TemperatureMem; 2616 - gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx; 2617 - gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc; 2618 - gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0; 2598 + gpu_metrics->temperature_edge = metrics->TemperatureEdge; 2599 + gpu_metrics->temperature_hotspot = metrics->TemperatureHotspot; 2600 + gpu_metrics->temperature_mem = metrics->TemperatureMem; 2601 + gpu_metrics->temperature_vrgfx = metrics->TemperatureVrGfx; 2602 + gpu_metrics->temperature_vrsoc = metrics->TemperatureVrSoc; 2603 + gpu_metrics->temperature_vrmem = metrics->TemperatureVrMem0; 2619 2604 2620 - gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; 2621 - gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; 2622 - gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage; 2605 + gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; 2606 + gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; 2607 + gpu_metrics->average_mm_activity = metrics->VcnActivityPercentage; 2623 2608 2624 - gpu_metrics->average_socket_power = metrics.AverageSocketPower; 2625 - gpu_metrics->energy_accumulator = metrics.EnergyAccumulator; 2609 + gpu_metrics->average_socket_power = metrics->AverageSocketPower; 2610 + gpu_metrics->energy_accumulator = metrics->EnergyAccumulator; 2626 2611 2627 - if (metrics.AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD) 2628 - gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs; 2612 + if (metrics->AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD) 2613 + gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs; 2629 2614 else 2630 - gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs; 2631 - gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs; 2632 - gpu_metrics->average_vclk0_frequency = metrics.AverageVclk0Frequency; 2633 - gpu_metrics->average_dclk0_frequency = metrics.AverageDclk0Frequency; 2634 - gpu_metrics->average_vclk1_frequency = metrics.AverageVclk1Frequency; 2635 - gpu_metrics->average_dclk1_frequency = metrics.AverageDclk1Frequency; 2615 + gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs; 2616 + gpu_metrics->average_uclk_frequency = metrics->AverageUclkFrequencyPostDs; 2617 + gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency; 2618 + gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency; 2619 + gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; 2620 + gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; 2636 2621 2637 - gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK]; 2638 - gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK]; 2639 - gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK]; 2640 - gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK_0]; 2641 - gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK_0]; 2642 - gpu_metrics->current_vclk1 = metrics.CurrClock[PPCLK_VCLK_1]; 2643 - gpu_metrics->current_dclk1 = metrics.CurrClock[PPCLK_DCLK_1]; 2622 + gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; 2623 + gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; 2624 + gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; 2625 + gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; 2626 + gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; 2627 + gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1]; 2628 + gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1]; 2644 2629 2645 - gpu_metrics->throttle_status = metrics.ThrottlerStatus; 2630 + gpu_metrics->throttle_status = metrics->ThrottlerStatus; 2646 2631 2647 - gpu_metrics->current_fan_speed = metrics.CurrFanSpeed; 2632 + gpu_metrics->current_fan_speed = metrics->CurrFanSpeed; 2648 2633 2649 2634 gpu_metrics->pcie_link_width = 2650 2635 smu_v11_0_get_current_pcie_link_width(smu); ··· 2669 2650 static int sienna_cichlid_gpo_control(struct smu_context *smu, 2670 2651 bool enablement) 2671 2652 { 2653 + uint32_t smu_version; 2672 2654 int ret = 0; 2673 2655 2656 + 2674 2657 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFX_GPO_BIT)) { 2675 - if (enablement) 2676 - ret = smu_cmn_send_smc_msg_with_param(smu, 2677 - SMU_MSG_SetGpoFeaturePMask, 2678 - GFX_GPO_PACE_MASK | GFX_GPO_DEM_MASK, 2679 - NULL); 2680 - else 2681 - ret = smu_cmn_send_smc_msg_with_param(smu, 2682 - SMU_MSG_SetGpoFeaturePMask, 2683 - 0, 2684 - NULL); 2658 + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); 2659 + if (ret) 2660 + return ret; 2661 + 2662 + if (enablement) { 2663 + if (smu_version < 0x003a2500) { 2664 + ret = smu_cmn_send_smc_msg_with_param(smu, 2665 + SMU_MSG_SetGpoFeaturePMask, 2666 + GFX_GPO_PACE_MASK | GFX_GPO_DEM_MASK, 2667 + NULL); 2668 + } else { 2669 + ret = smu_cmn_send_smc_msg_with_param(smu, 2670 + SMU_MSG_DisallowGpo, 2671 + 0, 2672 + NULL); 2673 + } 2674 + } else { 2675 + if (smu_version < 0x003a2500) { 2676 + ret = smu_cmn_send_smc_msg_with_param(smu, 2677 + SMU_MSG_SetGpoFeaturePMask, 2678 + 0, 2679 + NULL); 2680 + } else { 2681 + ret = smu_cmn_send_smc_msg_with_param(smu, 2682 + SMU_MSG_DisallowGpo, 2683 + 1, 2684 + NULL); 2685 + } 2686 + } 2685 2687 } 2686 2688 2687 2689 return ret; 2688 2690 } 2691 + 2692 + static int sienna_cichlid_notify_2nd_usb20_port(struct smu_context *smu) 2693 + { 2694 + uint32_t smu_version; 2695 + int ret = 0; 2696 + 2697 + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); 2698 + if (ret) 2699 + return ret; 2700 + 2701 + /* 2702 + * Message SMU_MSG_Enable2ndUSB20Port is supported by 58.45 2703 + * onwards PMFWs. 2704 + */ 2705 + if (smu_version < 0x003A2D00) 2706 + return 0; 2707 + 2708 + return smu_cmn_send_smc_msg_with_param(smu, 2709 + SMU_MSG_Enable2ndUSB20Port, 2710 + smu->smu_table.boot_values.firmware_caps & ATOM_FIRMWARE_CAP_ENABLE_2ND_USB20PORT ? 2711 + 1 : 0, 2712 + NULL); 2713 + } 2714 + 2715 + static int sienna_cichlid_system_features_control(struct smu_context *smu, 2716 + bool en) 2717 + { 2718 + int ret = 0; 2719 + 2720 + if (en) { 2721 + ret = sienna_cichlid_notify_2nd_usb20_port(smu); 2722 + if (ret) 2723 + return ret; 2724 + } 2725 + 2726 + return smu_v11_0_system_features_control(smu, en); 2727 + } 2728 + 2689 2729 static const struct pptable_funcs sienna_cichlid_ppt_funcs = { 2690 2730 .get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask, 2691 2731 .set_default_dpm_table = sienna_cichlid_set_default_dpm_table, ··· 2785 2707 .set_driver_table_location = smu_v11_0_set_driver_table_location, 2786 2708 .set_tool_table_location = smu_v11_0_set_tool_table_location, 2787 2709 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, 2788 - .system_features_control = smu_v11_0_system_features_control, 2710 + .system_features_control = sienna_cichlid_system_features_control, 2789 2711 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 2790 2712 .send_smc_msg = smu_cmn_send_smc_msg, 2791 2713 .init_display_count = NULL, ··· 2818 2740 .get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq, 2819 2741 .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, 2820 2742 .run_btc = sienna_cichlid_run_btc, 2743 + .set_power_source = smu_v11_0_set_power_source, 2821 2744 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 2822 2745 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, 2823 2746 .get_gpu_metrics = sienna_cichlid_get_gpu_metrics,
+4
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
··· 29 29 POWER_SOURCE_COUNT, 30 30 } POWER_SOURCE_e; 31 31 32 + #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK 1825 33 + #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960 34 + #define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000 35 + 32 36 extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu); 33 37 34 38 #endif
+45 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
··· 91 91 const struct common_firmware_header *header; 92 92 struct amdgpu_firmware_info *ucode = NULL; 93 93 94 + if (amdgpu_sriov_vf(adev) && 95 + ((adev->asic_type == CHIP_NAVI12) || 96 + (adev->asic_type == CHIP_SIENNA_CICHLID))) 97 + return 0; 98 + 94 99 switch (adev->asic_type) { 95 100 case CHIP_ARCTURUS: 96 101 chip_name = "arcturus"; ··· 559 554 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 560 555 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 561 556 smu->smu_table.boot_values.pp_table_id = 0; 557 + smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability; 562 558 break; 563 559 case 3: 564 560 default: ··· 575 569 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 576 570 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 577 571 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; 572 + smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability; 578 573 } 579 574 580 575 smu->smu_table.boot_values.format_revision = header->format_revision; ··· 936 929 if (power_src < 0) 937 930 return -EINVAL; 938 931 932 + /* 933 + * BIT 24-31: ControllerId (only PPT0 is supported for now) 934 + * BIT 16-23: PowerSource 935 + */ 939 936 ret = smu_cmn_send_smc_msg_with_param(smu, 940 937 SMU_MSG_GetPptLimit, 941 - power_src << 16, 938 + (0 << 24) | (power_src << 16), 942 939 power_limit); 943 940 if (ret) 944 941 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__); ··· 952 941 953 942 int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) 954 943 { 944 + int power_src; 955 945 int ret = 0; 956 946 957 947 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { ··· 960 948 return -EOPNOTSUPP; 961 949 } 962 950 951 + power_src = smu_cmn_to_asic_specific_index(smu, 952 + CMN2ASIC_MAPPING_PWR, 953 + smu->adev->pm.ac_power ? 954 + SMU_POWER_SOURCE_AC : 955 + SMU_POWER_SOURCE_DC); 956 + if (power_src < 0) 957 + return -EINVAL; 958 + 959 + /* 960 + * BIT 24-31: ControllerId (only PPT0 is supported for now) 961 + * BIT 16-23: PowerSource 962 + * BIT 0-15: PowerLimit 963 + */ 964 + n &= 0xFFFF; 965 + n |= 0 << 24; 966 + n |= (power_src) << 16; 963 967 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL); 964 968 if (ret) { 965 969 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); ··· 2088 2060 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement); 2089 2061 if (ret) { 2090 2062 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable"); 2063 + return ret; 2064 + } 2065 + } 2066 + 2067 + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) { 2068 + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement); 2069 + if (ret) { 2070 + dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable"); 2071 + return ret; 2072 + } 2073 + } 2074 + 2075 + if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) { 2076 + ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement); 2077 + if (ret) { 2078 + dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable"); 2091 2079 return ret; 2092 2080 } 2093 2081 }
+8 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
··· 64 64 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0), 65 65 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 66 66 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 67 - MSG_MAP(Spare, PPSMC_MSG_spare, 0), 67 + MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0), 68 68 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0), 69 69 MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0), 70 70 MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0), ··· 722 722 return 0; 723 723 } 724 724 725 + static int vangogh_system_features_control(struct smu_context *smu, bool en) 726 + { 727 + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify, 728 + en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL); 729 + } 730 + 725 731 static const struct pptable_funcs vangogh_ppt_funcs = { 726 732 727 733 .check_fw_status = smu_v11_0_check_fw_status, ··· 755 749 .print_clk_levels = vangogh_print_fine_grain_clk, 756 750 .set_default_dpm_table = vangogh_set_default_dpm_tables, 757 751 .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters, 752 + .system_features_control = vangogh_system_features_control, 758 753 }; 759 754 760 755 void vangogh_set_ppt_funcs(struct smu_context *smu)
+4
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
··· 32 32 #define VANGOGH_UMD_PSTATE_SOCCLK 678 33 33 #define VANGOGH_UMD_PSTATE_FCLK 800 34 34 35 + /* RLC Power Status */ 36 + #define RLC_STATUS_OFF 0 37 + #define RLC_STATUS_NORMAL 1 38 + 35 39 #endif
+1 -1
drivers/gpu/drm/radeon/radeon_uvd.c
··· 155 155 family_id = le32_to_cpu(hdr->ucode_version) & 0xff; 156 156 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; 157 157 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; 158 - DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", 158 + DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n", 159 159 version_major, version_minor, family_id); 160 160 161 161 /*
+1 -1
drivers/gpu/drm/radeon/radeon_vce.c
··· 122 122 if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1) 123 123 return -EINVAL; 124 124 125 - DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n", 125 + DRM_INFO("Found VCE firmware/feedback version %d.%d.%d / %d!\n", 126 126 start, mid, end, rdev->vce.fb_version); 127 127 128 128 rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);