Merge tag 'amd-drm-next-5.13-2021-04-23' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-5.13-2021-04-23:

amdgpu:
- Fixes for Aldebaran
- Display LTTPR fixes
- eDP fixes
- Fixes for Vangogh
- RAS fixes
- ASPM support
- Renoir SMU fixes
- Modifier fixes
- Misc code cleanups
- Freesync fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210423223920.3786-1-alexander.deucher@amd.com

+1872 -463
-4
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
··· 234 }) 235 236 /* GPUVM API */ 237 - int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid, 238 - void **vm, void **process_info, 239 - struct dma_fence **ef); 240 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, 241 struct file *filp, u32 pasid, 242 void **vm, void **process_info, 243 struct dma_fence **ef); 244 - void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm); 245 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm); 246 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm); 247 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
··· 234 }) 235 236 /* GPUVM API */ 237 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, 238 struct file *filp, u32 pasid, 239 void **vm, void **process_info, 240 struct dma_fence **ef); 241 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm); 242 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm); 243 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+5 -58
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 445 mapping_flags |= AMDGPU_VM_MTYPE_UC; 446 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 447 if (bo_adev == adev) { 448 - mapping_flags |= AMDGPU_VM_MTYPE_RW; 449 if (adev->gmc.xgmi.connected_to_cpu) 450 snoop = true; 451 } else { 452 - mapping_flags |= AMDGPU_VM_MTYPE_NC; 453 if (amdgpu_xgmi_same_hive(adev, bo_adev)) 454 snoop = true; 455 } 456 } else { 457 snoop = true; 458 - if (adev->gmc.xgmi.connected_to_cpu) 459 - /* system memory uses NC on A+A */ 460 - mapping_flags |= AMDGPU_VM_MTYPE_NC; 461 - else 462 - mapping_flags |= coherent ? 463 - AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 464 } 465 break; 466 default: ··· 1034 return ret; 1035 } 1036 1037 - int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid, 1038 - void **vm, void **process_info, 1039 - struct dma_fence **ef) 1040 - { 1041 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 1042 - struct amdgpu_vm *new_vm; 1043 - int ret; 1044 - 1045 - new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL); 1046 - if (!new_vm) 1047 - return -ENOMEM; 1048 - 1049 - /* Initialize AMDGPU part of the VM */ 1050 - ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid); 1051 - if (ret) { 1052 - pr_err("Failed init vm ret %d\n", ret); 1053 - goto amdgpu_vm_init_fail; 1054 - } 1055 - 1056 - /* Initialize KFD part of the VM and process info */ 1057 - ret = init_kfd_vm(new_vm, process_info, ef); 1058 - if (ret) 1059 - goto init_kfd_vm_fail; 1060 - 1061 - *vm = (void *) new_vm; 1062 - 1063 - return 0; 1064 - 1065 - init_kfd_vm_fail: 1066 - amdgpu_vm_fini(adev, new_vm); 1067 - amdgpu_vm_init_fail: 1068 - kfree(new_vm); 1069 - return ret; 1070 - } 1071 - 1072 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, 1073 struct file *filp, u32 pasid, 1074 void **vm, void **process_info, ··· 1098 mutex_destroy(&process_info->lock); 1099 kfree(process_info); 1100 } 1101 - } 1102 - 1103 - void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm) 1104 - { 1105 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 1106 - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; 1107 - 1108 - if (WARN_ON(!kgd || !vm)) 1109 - return; 1110 - 1111 - pr_debug("Destroying process vm %p\n", vm); 1112 - 1113 - /* Release the VM context */ 1114 - amdgpu_vm_fini(adev, avm); 1115 - kfree(vm); 1116 } 1117 1118 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
··· 445 mapping_flags |= AMDGPU_VM_MTYPE_UC; 446 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 447 if (bo_adev == adev) { 448 + mapping_flags |= coherent ? 449 + AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 450 if (adev->gmc.xgmi.connected_to_cpu) 451 snoop = true; 452 } else { 453 + mapping_flags |= AMDGPU_VM_MTYPE_UC; 454 if (amdgpu_xgmi_same_hive(adev, bo_adev)) 455 snoop = true; 456 } 457 } else { 458 snoop = true; 459 + mapping_flags |= coherent ? 460 + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 461 } 462 break; 463 default: ··· 1037 return ret; 1038 } 1039 1040 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, 1041 struct file *filp, u32 pasid, 1042 void **vm, void **process_info, ··· 1136 mutex_destroy(&process_info->lock); 1137 kfree(process_info); 1138 } 1139 } 1140 1141 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
+13
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 908 &amdgpu_fb_funcs); 909 if (ret) 910 goto err; 911 912 ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj); 913 if (ret)
··· 908 &amdgpu_fb_funcs); 909 if (ret) 910 goto err; 911 + /* Verify that the modifier is supported. */ 912 + if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format, 913 + mode_cmd->modifier[0])) { 914 + struct drm_format_name_buf format_name; 915 + drm_dbg_kms(dev, 916 + "unsupported pixel format %s / modifier 0x%llx\n", 917 + drm_get_format_name(mode_cmd->pixel_format, 918 + &format_name), 919 + mode_cmd->modifier[0]); 920 + 921 + ret = -EINVAL; 922 + goto err; 923 + } 924 925 ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj); 926 if (ret)
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
··· 291 break; 292 293 case TTM_PL_VRAM: 294 - r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev, 295 - dir, &sgt); 296 if (r) 297 return ERR_PTR(r); 298 break;
··· 291 break; 292 293 case TTM_PL_VRAM: 294 + r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0, 295 + bo->tbo.base.size, attach->dev, dir, &sgt); 296 if (r) 297 return ERR_PTR(r); 298 break;
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 185 int amdgpu_bad_page_threshold = -1; 186 struct amdgpu_watchdog_timer amdgpu_watchdog_timer = { 187 .timeout_fatal_disable = false, 188 - .period = 0x23, /* default to max. timeout = 1 << 0x23 cycles */ 189 }; 190 191 /** ··· 553 * DOC: timeout_period (uint) 554 * Modify the watchdog timeout max_cycles as (1 << period) 555 */ 556 - MODULE_PARM_DESC(timeout_period, "watchdog timeout period (1 to 0x23(default), timeout maxCycles = (1 << period)"); 557 module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644); 558 559 /**
··· 185 int amdgpu_bad_page_threshold = -1; 186 struct amdgpu_watchdog_timer amdgpu_watchdog_timer = { 187 .timeout_fatal_disable = false, 188 + .period = 0x0, /* default to 0x0 (timeout disable) */ 189 }; 190 191 /** ··· 553 * DOC: timeout_period (uint) 554 * Modify the watchdog timeout max_cycles as (1 << period) 555 */ 556 + MODULE_PARM_DESC(timeout_period, "watchdog timeout period (0 = timeout disabled, 1 ~ 0x23 = timeout maxcycles = (1 << period)"); 557 module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644); 558 559 /**
+1 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
··· 205 struct drm_gem_object *gobj = NULL; 206 struct amdgpu_bo *abo = NULL; 207 int ret; 208 - unsigned long tmp; 209 210 memset(&mode_cmd, 0, sizeof(mode_cmd)); 211 mode_cmd.width = sizes->surface_width; ··· 245 246 info->fbops = &amdgpufb_ops; 247 248 - tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start; 249 - info->fix.smem_start = adev->gmc.aper_base + tmp; 250 info->fix.smem_len = amdgpu_bo_size(abo); 251 info->screen_base = amdgpu_bo_kptr(abo); 252 info->screen_size = amdgpu_bo_size(abo);
··· 205 struct drm_gem_object *gobj = NULL; 206 struct amdgpu_bo *abo = NULL; 207 int ret; 208 209 memset(&mode_cmd, 0, sizeof(mode_cmd)); 210 mode_cmd.width = sizes->surface_width; ··· 246 247 info->fbops = &amdgpufb_ops; 248 249 + info->fix.smem_start = amdgpu_gmc_vram_cpu_pa(adev, abo); 250 info->fix.smem_len = amdgpu_bo_size(abo); 251 info->screen_base = amdgpu_bo_kptr(abo); 252 info->screen_size = amdgpu_bo_size(abo);
+38 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 487 { 488 switch (adev->asic_type) { 489 case CHIP_RAVEN: 490 if (amdgpu_tmz == 0) { 491 adev->gmc.tmz_enabled = false; 492 dev_info(adev->dev, ··· 498 "Trusted Memory Zone (TMZ) feature enabled\n"); 499 } 500 break; 501 - case CHIP_RENOIR: 502 case CHIP_NAVI10: 503 case CHIP_NAVI14: 504 case CHIP_NAVI12: ··· 661 u64 vram_addr = adev->vm_manager.vram_base_offset - 662 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 663 u64 vram_end = vram_addr + vram_size; 664 - u64 gart_ptb_gpu_pa = amdgpu_bo_gpu_offset(adev->gart.bo) + 665 - adev->vm_manager.vram_base_offset - adev->gmc.vram_start; 666 667 flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; 668 flags |= AMDGPU_PTE_WRITEABLE; ··· 683 flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED; 684 /* Requires gart_ptb_gpu_pa to be 4K aligned */ 685 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags); 686 }
··· 487 { 488 switch (adev->asic_type) { 489 case CHIP_RAVEN: 490 + case CHIP_RENOIR: 491 if (amdgpu_tmz == 0) { 492 adev->gmc.tmz_enabled = false; 493 dev_info(adev->dev, ··· 497 "Trusted Memory Zone (TMZ) feature enabled\n"); 498 } 499 break; 500 case CHIP_NAVI10: 501 case CHIP_NAVI14: 502 case CHIP_NAVI12: ··· 661 u64 vram_addr = adev->vm_manager.vram_base_offset - 662 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; 663 u64 vram_end = vram_addr + vram_size; 664 + u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo); 665 666 flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; 667 flags |= AMDGPU_PTE_WRITEABLE; ··· 684 flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED; 685 /* Requires gart_ptb_gpu_pa to be 4K aligned */ 686 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags); 687 + } 688 + 689 + /** 690 + * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC 691 + * address 692 + * 693 + * @adev: amdgpu_device pointer 694 + * @mc_addr: MC address of buffer 695 + */ 696 + uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr) 697 + { 698 + return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset; 699 + } 700 + 701 + /** 702 + * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from 703 + * GPU's view 704 + * 705 + * @adev: amdgpu_device pointer 706 + * @bo: amdgpu buffer object 707 + */ 708 + uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo) 709 + { 710 + return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo)); 711 + } 712 + 713 + /** 714 + * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address 715 + * from CPU's view 716 + * 717 + * @adev: amdgpu_device pointer 718 + * @bo: amdgpu buffer object 719 + */ 720 + uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo) 721 + { 722 + return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base; 723 }
+3 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
··· 218 */ 219 u64 fb_start; 220 u64 fb_end; 221 - /* In the case of use GART table for vmid0 FB access, [fb_start, fb_end] 222 - * will be squeezed to GART aperture. But we have a PSP FW issue to fix 223 - * for now. To temporarily workaround the PSP FW issue, added below two 224 - * variables to remember the original fb_start/end to re-enable FB 225 - * aperture to workaround the PSP FW issue. Will delete it after we 226 - * get a proper PSP FW fix. 227 - */ 228 - u64 fb_start_original; 229 - u64 fb_end_original; 230 unsigned vram_width; 231 u64 real_vram_size; 232 int vram_mtrr; ··· 332 void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev); 333 334 void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev); 335 #endif
··· 218 */ 219 u64 fb_start; 220 u64 fb_end; 221 unsigned vram_width; 222 u64 real_vram_size; 223 int vram_mtrr; ··· 341 void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev); 342 343 void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev); 344 + uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr); 345 + uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); 346 + uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); 347 #endif
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 76 } 77 78 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); 79 80 if (!vm) 81 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
··· 76 } 77 78 ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo); 79 + /* flush the cache before commit the IB */ 80 + ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC; 81 82 if (!vm) 83 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
··· 519 pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), 520 GFP_KERNEL); 521 522 - if (!pmu_entry->pmu.attr_groups) 523 goto err_attr_group; 524 525 snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix, 526 adev_to_drm(pmu_entry->adev)->primary->index);
··· 519 pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), 520 GFP_KERNEL); 521 522 + if (!pmu_entry->pmu.attr_groups) { 523 + ret = -ENOMEM; 524 goto err_attr_group; 525 + } 526 527 snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix, 528 adev_to_drm(pmu_entry->adev)->primary->index);
+10 -14
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 328 329 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 330 struct psp_gfx_cmd_resp *cmd, 331 - uint64_t tmr_mc, uint32_t size) 332 { 333 if (amdgpu_sriov_vf(psp->adev)) 334 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 335 else ··· 341 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 342 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 343 cmd->cmd.cmd_setup_tmr.buf_size = size; 344 } 345 346 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, ··· 414 AMDGPU_GEM_DOMAIN_VRAM, 415 &psp->tmr_bo, &psp->tmr_mc_addr, pptr); 416 417 - /* workaround the tmr_mc_addr: 418 - * PSP requires an address in FB aperture. Right now driver produce 419 - * tmr_mc_addr in the GART aperture. Convert it back to FB aperture 420 - * for PSP. Will revert it after we get a fix from PSP FW. 421 - */ 422 - if (psp->adev->asic_type == CHIP_ALDEBARAN) { 423 - psp->tmr_mc_addr -= psp->adev->gmc.fb_start; 424 - psp->tmr_mc_addr += psp->adev->gmc.fb_start_original; 425 - } 426 - 427 return ret; 428 } 429 ··· 463 if (!cmd) 464 return -ENOMEM; 465 466 - psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, 467 - amdgpu_bo_size(psp->tmr_bo)); 468 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", 469 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 470 ··· 557 struct psp_context *psp = &adev->psp; 558 struct psp_gfx_cmd_resp *cmd = psp->cmd; 559 560 - if (adev->asic_type != CHIP_SIENNA_CICHLID) 561 return 0; 562 563 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
··· 328 329 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 330 struct psp_gfx_cmd_resp *cmd, 331 + uint64_t tmr_mc, struct amdgpu_bo *tmr_bo) 332 { 333 + struct amdgpu_device *adev = psp->adev; 334 + uint32_t size = amdgpu_bo_size(tmr_bo); 335 + uint64_t tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo); 336 + 337 if (amdgpu_sriov_vf(psp->adev)) 338 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR; 339 else ··· 337 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc); 338 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc); 339 cmd->cmd.cmd_setup_tmr.buf_size = size; 340 + cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1; 341 + cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa); 342 + cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa); 343 } 344 345 static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd, ··· 407 AMDGPU_GEM_DOMAIN_VRAM, 408 &psp->tmr_bo, &psp->tmr_mc_addr, pptr); 409 410 return ret; 411 } 412 ··· 466 if (!cmd) 467 return -ENOMEM; 468 469 + psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo); 470 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n", 471 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr); 472 ··· 561 struct psp_context *psp = &adev->psp; 562 struct psp_gfx_cmd_resp *cmd = psp->cmd; 563 564 + if (adev->asic_type != CHIP_SIENNA_CICHLID || amdgpu_sriov_vf(adev)) 565 return 0; 566 567 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
+44 -38
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 114 115 if (amdgpu_ras_check_bad_page(adev, address)) { 116 dev_warn(adev->dev, 117 - "RAS WARN: 0x%llx has been marked as bad page!\n", 118 address); 119 return 0; 120 } ··· 221 op = 1; 222 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) 223 op = 2; 224 - else if (sscanf(str, "retire_page") == 0) 225 op = 3; 226 else if (str[0] && str[1] && str[2] && str[3]) 227 /* ascii string, but commands are not matched. */ 228 return -EINVAL; 229 230 if (op != -1) { 231 - 232 if (op == 3) { 233 - if (sscanf(str, "%*s %llu", &address) != 1) 234 - if (sscanf(str, "%*s 0x%llx", &address) != 1) 235 - return -EINVAL; 236 237 data->op = op; 238 data->inject.address = address; ··· 254 data->op = op; 255 256 if (op == 2) { 257 - if (sscanf(str, "%*s %*s %*s %u %llu %llu", 258 - &sub_block, &address, &value) != 3) 259 - if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", 260 - &sub_block, &address, &value) != 3) 261 - return -EINVAL; 262 data->head.sub_block_index = sub_block; 263 data->inject.address = address; 264 data->inject.value = value; ··· 277 /** 278 * DOC: AMDGPU RAS debugfs control interface 279 * 280 - * It accepts struct ras_debug_if who has two members. 281 * 282 * First member: ras_debug_if::head or ras_debug_if::inject. 283 * ··· 302 * 303 * How to use the interface? 304 * 305 - * Programs 306 * 307 - * Copy the struct ras_debug_if in your codes and initialize it. 308 - * Write the struct to the control node. 309 * 310 - * Shells 311 * 312 * .. code-block:: bash 313 * 314 - * echo op block [error [sub_block address value]] > .../ras/ras_ctrl 315 * 316 - * Parameters: 317 * 318 - * op: disable, enable, inject 319 - * disable: only block is needed 320 - * enable: block and error are needed 321 - * inject: error, address, value are needed 322 - * block: umc, sdma, gfx, ......... 323 * see ras_block_string[] for details 324 - * error: ue, ce 325 - * ue: multi_uncorrectable 326 - * ce: single_correctable 327 - * sub_block: 328 - * sub block index, pass 0 if there is no sub block 329 * 330 - * here are some examples for bash commands: 331 * 332 * .. code-block:: bash 333 * ··· 336 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl 337 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl 338 * 339 - * How to check the result? 340 * 341 - * For disable/enable, please check ras features at 342 * /sys/class/drm/card[0/1/2...]/device/ras/features 343 * 344 - * For inject, please check corresponding err count at 345 - * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count 346 * 347 * .. note:: 348 * Operations are only allowed on blocks which are supported. 349 - * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask 350 * to see which blocks support RAS on a particular asic. 351 * 352 */ ··· 367 if (ret) 368 return -EINVAL; 369 370 - if (data.op == 3) 371 - { 372 ret = amdgpu_reserve_page_direct(adev, data.inject.address); 373 - 374 - if (ret) 375 return size; 376 else 377 return ret; ··· 500 501 if (amdgpu_ras_query_error_status(obj->adev, &info)) 502 return -EINVAL; 503 504 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, 505 "ce", info.ce_count); ··· 1273 &amdgpu_ras_debugfs_ctrl_ops); 1274 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev, 1275 &amdgpu_ras_debugfs_eeprom_ops); 1276 1277 /* 1278 * After one uncorrectable error happens, usually GPU recovery will
··· 114 115 if (amdgpu_ras_check_bad_page(adev, address)) { 116 dev_warn(adev->dev, 117 + "RAS WARN: 0x%llx has already been marked as bad page!\n", 118 address); 119 return 0; 120 } ··· 221 op = 1; 222 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2) 223 op = 2; 224 + else if (strstr(str, "retire_page") != NULL) 225 op = 3; 226 else if (str[0] && str[1] && str[2] && str[3]) 227 /* ascii string, but commands are not matched. */ 228 return -EINVAL; 229 230 if (op != -1) { 231 if (op == 3) { 232 + if (sscanf(str, "%*s 0x%llx", &address) != 1 && 233 + sscanf(str, "%*s %llu", &address) != 1) 234 + return -EINVAL; 235 236 data->op = op; 237 data->inject.address = address; ··· 255 data->op = op; 256 257 if (op == 2) { 258 + if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx", 259 + &sub_block, &address, &value) != 3 && 260 + sscanf(str, "%*s %*s %*s %u %llu %llu", 261 + &sub_block, &address, &value) != 3) 262 + return -EINVAL; 263 data->head.sub_block_index = sub_block; 264 data->inject.address = address; 265 data->inject.value = value; ··· 278 /** 279 * DOC: AMDGPU RAS debugfs control interface 280 * 281 + * The control interface accepts struct ras_debug_if which has two members. 282 * 283 * First member: ras_debug_if::head or ras_debug_if::inject. 284 * ··· 303 * 304 * How to use the interface? 305 * 306 + * In a program 307 * 308 + * Copy the struct ras_debug_if in your code and initialize it. 309 + * Write the struct to the control interface. 310 * 311 + * From shell 312 * 313 * .. code-block:: bash 314 * 315 + * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 316 + * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 317 + * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl 318 * 319 + * Where N, is the card which you want to affect. 320 * 321 + * "disable" requires only the block. 322 + * "enable" requires the block and error type. 323 + * "inject" requires the block, error type, address, and value. 324 + * The block is one of: umc, sdma, gfx, etc. 325 * see ras_block_string[] for details 326 + * The error type is one of: ue, ce, where, 327 + * ue is multi-uncorrectable 328 + * ce is single-correctable 329 + * The sub-block is a the sub-block index, pass 0 if there is no sub-block. 330 + * The address and value are hexadecimal numbers, leading 0x is optional. 331 * 332 + * For instance, 333 * 334 * .. code-block:: bash 335 * ··· 336 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl 337 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl 338 * 339 + * How to check the result of the operation? 340 * 341 + * To check disable/enable, see "ras" features at, 342 * /sys/class/drm/card[0/1/2...]/device/ras/features 343 * 344 + * To check inject, see the corresponding error count at, 345 + * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count 346 * 347 * .. note:: 348 * Operations are only allowed on blocks which are supported. 349 + * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask 350 * to see which blocks support RAS on a particular asic. 351 * 352 */ ··· 367 if (ret) 368 return -EINVAL; 369 370 + if (data.op == 3) { 371 ret = amdgpu_reserve_page_direct(adev, data.inject.address); 372 + if (!ret) 373 return size; 374 else 375 return ret; ··· 502 503 if (amdgpu_ras_query_error_status(obj->adev, &info)) 504 return -EINVAL; 505 + 506 + 507 + if (obj->adev->asic_type == CHIP_ALDEBARAN) { 508 + if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) 509 + DRM_WARN("Failed to reset error counter and error status"); 510 + } 511 512 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, 513 "ce", info.ce_count); ··· 1269 &amdgpu_ras_debugfs_ctrl_ops); 1270 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev, 1271 &amdgpu_ras_debugfs_eeprom_ops); 1272 + debugfs_create_u32("bad_page_cnt_threshold", 0444, dir, 1273 + &con->bad_page_cnt_threshold); 1274 1275 /* 1276 * After one uncorrectable error happens, usually GPU recovery will
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 112 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); 113 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, 114 struct ttm_resource *mem, 115 struct device *dev, 116 enum dma_data_direction dir, 117 struct sg_table **sgt);
··· 112 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); 113 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, 114 struct ttm_resource *mem, 115 + u64 offset, u64 size, 116 struct device *dev, 117 enum dma_data_direction dir, 118 struct sg_table **sgt);
+24 -10
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 25 #include <linux/dma-mapping.h> 26 #include "amdgpu.h" 27 #include "amdgpu_vm.h" 28 #include "amdgpu_atomfirmware.h" 29 #include "atom.h" 30 ··· 566 * 567 * @adev: amdgpu device pointer 568 * @mem: TTM memory object 569 * @dev: the other device 570 * @dir: dma direction 571 * @sgt: resulting sg table ··· 576 */ 577 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, 578 struct ttm_resource *mem, 579 struct device *dev, 580 enum dma_data_direction dir, 581 struct sg_table **sgt) 582 { 583 - struct drm_mm_node *node; 584 struct scatterlist *sg; 585 int num_entries = 0; 586 - unsigned int pages; 587 int i, r; 588 589 *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL); 590 if (!*sgt) 591 return -ENOMEM; 592 593 - for (pages = mem->num_pages, node = mem->mm_node; 594 - pages; pages -= node->size, ++node) 595 - ++num_entries; 596 597 r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL); 598 if (r) 599 goto error_free; 600 601 for_each_sgtable_sg((*sgt), sg, i) 602 sg->length = 0; 603 604 - node = mem->mm_node; 605 for_each_sgtable_sg((*sgt), sg, i) { 606 - phys_addr_t phys = (node->start << PAGE_SHIFT) + 607 - adev->gmc.aper_base; 608 - size_t size = node->size << PAGE_SHIFT; 609 dma_addr_t addr; 610 611 - ++node; 612 addr = dma_map_resource(dev, phys, size, dir, 613 DMA_ATTR_SKIP_CPU_SYNC); 614 r = dma_mapping_error(dev, addr); ··· 626 sg_set_page(sg, NULL, size, 0); 627 sg_dma_address(sg) = addr; 628 sg_dma_len(sg) = size; 629 } 630 return 0; 631 632 error_unmap:
··· 25 #include <linux/dma-mapping.h> 26 #include "amdgpu.h" 27 #include "amdgpu_vm.h" 28 + #include "amdgpu_res_cursor.h" 29 #include "amdgpu_atomfirmware.h" 30 #include "atom.h" 31 ··· 565 * 566 * @adev: amdgpu device pointer 567 * @mem: TTM memory object 568 + * @offset: byte offset from the base of VRAM BO 569 + * @length: number of bytes to export in sg_table 570 * @dev: the other device 571 * @dir: dma direction 572 * @sgt: resulting sg table ··· 573 */ 574 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, 575 struct ttm_resource *mem, 576 + u64 offset, u64 length, 577 struct device *dev, 578 enum dma_data_direction dir, 579 struct sg_table **sgt) 580 { 581 + struct amdgpu_res_cursor cursor; 582 struct scatterlist *sg; 583 int num_entries = 0; 584 int i, r; 585 586 *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL); 587 if (!*sgt) 588 return -ENOMEM; 589 590 + /* Determine the number of DRM_MM nodes to export */ 591 + amdgpu_res_first(mem, offset, length, &cursor); 592 + while (cursor.remaining) { 593 + num_entries++; 594 + amdgpu_res_next(&cursor, cursor.size); 595 + } 596 597 r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL); 598 if (r) 599 goto error_free; 600 601 + /* Initialize scatterlist nodes of sg_table */ 602 for_each_sgtable_sg((*sgt), sg, i) 603 sg->length = 0; 604 605 + /* 606 + * Walk down DRM_MM nodes to populate scatterlist nodes 607 + * @note: Use iterator api to get first the DRM_MM node 608 + * and the number of bytes from it. Access the following 609 + * DRM_MM node(s) if more buffer needs to exported 610 + */ 611 + amdgpu_res_first(mem, offset, length, &cursor); 612 for_each_sgtable_sg((*sgt), sg, i) { 613 + phys_addr_t phys = cursor.start + adev->gmc.aper_base; 614 + size_t size = cursor.size; 615 dma_addr_t addr; 616 617 addr = dma_map_resource(dev, phys, size, dir, 618 DMA_ATTR_SKIP_CPU_SYNC); 619 r = dma_mapping_error(dev, addr); ··· 615 sg_set_page(sg, NULL, size, 0); 616 sg_dma_address(sg) = addr; 617 sg_dma_len(sg) = size; 618 + 619 + amdgpu_res_next(&cursor, cursor.size); 620 } 621 + 622 return 0; 623 624 error_unmap:
+1 -1
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 3373 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), 3374 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000280, 0x00000280), 3375 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07800000, 0x00800000), 3376 - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x00001d00, 0x00000500), 3377 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003c0000, 0x00280400), 3378 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), 3379 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
··· 3373 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), 3374 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000280, 0x00000280), 3375 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07800000, 0x00800000), 3376 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x00001d00, 0x00000500), 3377 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003c0000, 0x00280400), 3378 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), 3379 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
+3
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 1587 err = 0; 1588 adev->gfx.mec2_fw = NULL; 1589 } 1590 } 1591 1592 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
··· 1587 err = 0; 1588 adev->gfx.mec2_fw = NULL; 1589 } 1590 + } else { 1591 + adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version; 1592 + adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version; 1593 } 1594 1595 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+11 -5
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
··· 994 return ret; 995 } 996 997 - static const struct soc15_reg_entry gfx_v9_4_rdrsp_status_regs = 998 { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 }; 999 1000 static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev) ··· 1007 1008 mutex_lock(&adev->grbm_idx_mutex); 1009 1010 - for (i = 0; i < gfx_v9_4_rdrsp_status_regs.se_num; i++) { 1011 - for (j = 0; j < gfx_v9_4_rdrsp_status_regs.instance; 1012 j++) { 1013 gfx_v9_4_select_se_sh(adev, i, 0, j); 1014 reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( 1015 - gfx_v9_4_rdrsp_status_regs)); 1016 - if (reg_value) 1017 dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n", 1018 j, reg_value); 1019 } 1020 } 1021
··· 994 return ret; 995 } 996 997 + static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs = 998 { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 }; 999 1000 static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev) ··· 1007 1008 mutex_lock(&adev->grbm_idx_mutex); 1009 1010 + for (i = 0; i < gfx_v9_4_ea_err_status_regs.se_num; i++) { 1011 + for (j = 0; j < gfx_v9_4_ea_err_status_regs.instance; 1012 j++) { 1013 gfx_v9_4_select_se_sh(adev, i, 0, j); 1014 reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( 1015 + gfx_v9_4_ea_err_status_regs)); 1016 + if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) || 1017 + REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) || 1018 + REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { 1019 + /* SDP read/write error/parity error in FUE_IS_FATAL mode 1020 + * can cause system fatal error in arcturas. Harvest the error 1021 + * status before GPU reset */ 1022 dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n", 1023 j, reg_value); 1024 + } 1025 } 1026 } 1027
+18 -14
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
··· 808 REG_SET_FIELD(0, ATC_L2_CACHE_4K_DSM_CNTL, WRITE_COUNTERS, 1) }, 809 }; 810 811 - static const struct soc15_reg_entry gfx_v9_4_2_rdrsp_status_regs = 812 { SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 }; 813 814 static int gfx_v9_4_2_get_reg_error_count(struct amdgpu_device *adev, ··· 997 blk->clear); 998 999 /* print the edc count */ 1000 - gfx_v9_4_2_log_utc_edc_count(adev, blk, j, sec_cnt, 1001 - ded_cnt); 1002 } 1003 } 1004 ··· 1041 uint32_t i, j; 1042 1043 mutex_lock(&adev->grbm_idx_mutex); 1044 - for (i = 0; i < gfx_v9_4_2_rdrsp_status_regs.se_num; i++) { 1045 - for (j = 0; j < gfx_v9_4_2_rdrsp_status_regs.instance; 1046 j++) { 1047 gfx_v9_4_2_select_se_sh(adev, i, 0, j); 1048 - WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_rdrsp_status_regs), 0x10); 1049 } 1050 } 1051 gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); ··· 1090 1091 mutex_lock(&adev->grbm_idx_mutex); 1092 1093 - for (i = 0; i < gfx_v9_4_2_rdrsp_status_regs.se_num; i++) { 1094 - for (j = 0; j < gfx_v9_4_2_rdrsp_status_regs.instance; 1095 j++) { 1096 gfx_v9_4_2_select_se_sh(adev, i, 0, j); 1097 reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( 1098 - gfx_v9_4_2_rdrsp_status_regs)); 1099 - if (reg_value) 1100 dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n", 1101 j, reg_value); 1102 /* clear after read */ 1103 - WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_rdrsp_status_regs), 0x10); 1104 } 1105 } 1106 ··· 1116 uint32_t data; 1117 1118 data = RREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS); 1119 - if (!data) { 1120 dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data); 1121 WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3); 1122 } 1123 1124 data = RREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS); 1125 - if (!data) { 1126 dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data); 1127 WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3); 1128 } 1129 1130 data = RREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS); 1131 - if (!data) { 1132 dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data); 1133 WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3); 1134 }
··· 808 REG_SET_FIELD(0, ATC_L2_CACHE_4K_DSM_CNTL, WRITE_COUNTERS, 1) }, 809 }; 810 811 + static const struct soc15_reg_entry gfx_v9_4_2_ea_err_status_regs = 812 { SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 }; 813 814 static int gfx_v9_4_2_get_reg_error_count(struct amdgpu_device *adev, ··· 997 blk->clear); 998 999 /* print the edc count */ 1000 + if (sec_cnt || ded_cnt) 1001 + gfx_v9_4_2_log_utc_edc_count(adev, blk, j, sec_cnt, 1002 + ded_cnt); 1003 } 1004 } 1005 ··· 1040 uint32_t i, j; 1041 1042 mutex_lock(&adev->grbm_idx_mutex); 1043 + for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) { 1044 + for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance; 1045 j++) { 1046 gfx_v9_4_2_select_se_sh(adev, i, 0, j); 1047 + WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), 0x10); 1048 } 1049 } 1050 gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); ··· 1089 1090 mutex_lock(&adev->grbm_idx_mutex); 1091 1092 + for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) { 1093 + for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance; 1094 j++) { 1095 gfx_v9_4_2_select_se_sh(adev, i, 0, j); 1096 reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( 1097 + gfx_v9_4_2_ea_err_status_regs)); 1098 + if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) || 1099 + REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) || 1100 + REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { 1101 dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n", 1102 j, reg_value); 1103 + } 1104 /* clear after read */ 1105 + WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), 0x10); 1106 } 1107 } 1108 ··· 1112 uint32_t data; 1113 1114 data = RREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS); 1115 + if (data) { 1116 dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data); 1117 WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3); 1118 } 1119 1120 data = RREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS); 1121 + if (data) { 1122 dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data); 1123 WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3); 1124 } 1125 1126 data = RREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS); 1127 + if (data) { 1128 dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data); 1129 WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3); 1130 }
+7 -17
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
··· 120 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 121 122 /* Set default page address. */ 123 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 124 - adev->vm_manager.vram_base_offset; 125 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 126 (u32)(value >> 12)); 127 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, ··· 140 * FB aperture and AGP aperture. Disable them. 141 */ 142 if (adev->gmc.pdb0_bo) { 143 - if (adev->asic_type == CHIP_ALDEBARAN) { 144 - WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, adev->gmc.fb_end_original >> 24); 145 - WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, adev->gmc.fb_start_original >> 24); 146 - WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0); 147 - WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF); 148 - WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->gmc.fb_start_original >> 18); 149 - WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.fb_end_original >> 18); 150 - } else { 151 - WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0); 152 - WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); 153 - WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0); 154 - WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF); 155 - WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF); 156 - WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); 157 - } 158 } 159 } 160
··· 120 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 121 122 /* Set default page address. */ 123 + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 124 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 125 (u32)(value >> 12)); 126 WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, ··· 141 * FB aperture and AGP aperture. Disable them. 142 */ 143 if (adev->gmc.pdb0_bo) { 144 + WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0); 145 + WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); 146 + WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0); 147 + WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF); 148 + WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF); 149 + WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); 150 } 151 } 152
+1 -2
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
··· 165 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 166 167 /* Set default page address. */ 168 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start 169 - + adev->vm_manager.vram_base_offset; 170 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 171 (u32)(value >> 12)); 172 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
··· 165 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 166 167 /* Set default page address. */ 168 + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 169 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 170 (u32)(value >> 12)); 171 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+1 -2
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
··· 164 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 165 166 /* Set default page address. */ 167 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start 168 - + adev->vm_manager.vram_base_offset; 169 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 170 (u32)(value >> 12)); 171 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
··· 164 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 165 166 /* Set default page address. */ 167 + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 168 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 169 (u32)(value >> 12)); 170 WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+1 -2
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 568 uint64_t *addr, uint64_t *flags) 569 { 570 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) 571 - *addr = adev->vm_manager.vram_base_offset + *addr - 572 - adev->gmc.vram_start; 573 BUG_ON(*addr & 0xFFFF00000000003FULL); 574 575 if (!adev->gmc.translate_further)
··· 568 uint64_t *addr, uint64_t *flags) 569 { 570 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) 571 + *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); 572 BUG_ON(*addr & 0xFFFF00000000003FULL); 573 574 if (!adev->gmc.translate_further)
+5 -4
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 574 * be updated to avoid reading an incorrect value due to 575 * the new fast GRBM interface. 576 */ 577 - if (entry->vmid_src == AMDGPU_GFXHUB_0) 578 RREG32(hub->vm_l2_pro_fault_status); 579 580 status = RREG32(hub->vm_l2_pro_fault_status); ··· 803 * be cleared to avoid a false ACK due to the new fast 804 * GRBM interface. 805 */ 806 - if (vmhub == AMDGPU_GFXHUB_0) 807 RREG32_NO_KIQ(hub->vm_inv_eng0_req + 808 hub->eng_distance * eng); 809 ··· 1050 uint64_t *addr, uint64_t *flags) 1051 { 1052 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) 1053 - *addr = adev->vm_manager.vram_base_offset + *addr - 1054 - adev->gmc.vram_start; 1055 BUG_ON(*addr & 0xFFFF00000000003FULL); 1056 1057 if (!adev->gmc.translate_further)
··· 574 * be updated to avoid reading an incorrect value due to 575 * the new fast GRBM interface. 576 */ 577 + if ((entry->vmid_src == AMDGPU_GFXHUB_0) && 578 + (adev->asic_type < CHIP_ALDEBARAN)) 579 RREG32(hub->vm_l2_pro_fault_status); 580 581 status = RREG32(hub->vm_l2_pro_fault_status); ··· 802 * be cleared to avoid a false ACK due to the new fast 803 * GRBM interface. 804 */ 805 + if ((vmhub == AMDGPU_GFXHUB_0) && 806 + (adev->asic_type < CHIP_ALDEBARAN)) 807 RREG32_NO_KIQ(hub->vm_inv_eng0_req + 808 hub->eng_distance * eng); 809 ··· 1048 uint64_t *addr, uint64_t *flags) 1049 { 1050 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM)) 1051 + *addr = amdgpu_gmc_vram_mc2pa(adev, *addr); 1052 BUG_ON(*addr & 0xFFFF00000000003FULL); 1053 1054 if (!adev->gmc.translate_further)
+1 -2
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
··· 114 return; 115 116 /* Set default page address. */ 117 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 118 - adev->vm_manager.vram_base_offset; 119 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 120 (u32)(value >> 12)); 121 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
··· 114 return; 115 116 /* Set default page address. */ 117 + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 118 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 119 (u32)(value >> 12)); 120 WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+12 -12
drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
··· 47 48 adev->gmc.fb_start = base; 49 adev->gmc.fb_end = top; 50 - adev->gmc.fb_start_original = base; 51 - adev->gmc.fb_end_original = top; 52 53 return base; 54 } ··· 124 if (adev->gmc.pdb0_bo) { 125 WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, 0xFFFFFF); 126 WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, 0); 127 - WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, adev->gmc.fb_end_original >> 24); 128 - WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, adev->gmc.fb_start_original >> 24); 129 - WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->gmc.fb_start_original >> 18); 130 - WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.fb_end_original >> 18); 131 } 132 if (amdgpu_sriov_vf(adev)) 133 return; 134 135 /* Set default page address. */ 136 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 137 - adev->vm_manager.vram_base_offset; 138 WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 139 (u32)(value >> 12)); 140 WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, ··· 1284 } 1285 } 1286 1287 - static const struct soc15_reg_entry mmhub_v1_7_err_status_regs[] = { 1288 { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_ERR_STATUS), 0, 0, 0 }, 1289 { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_ERR_STATUS), 0, 0, 0 }, 1290 { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_ERR_STATUS), 0, 0, 0 }, ··· 1301 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) 1302 return; 1303 1304 - for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_err_status_regs); i++) { 1305 reg_value = 1306 - RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_err_status_regs[i])); 1307 - if (reg_value) 1308 dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n", 1309 i, reg_value); 1310 } 1311 } 1312
··· 47 48 adev->gmc.fb_start = base; 49 adev->gmc.fb_end = top; 50 51 return base; 52 } ··· 126 if (adev->gmc.pdb0_bo) { 127 WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, 0xFFFFFF); 128 WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, 0); 129 + WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, 0); 130 + WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); 131 + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF); 132 + WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); 133 } 134 if (amdgpu_sriov_vf(adev)) 135 return; 136 137 /* Set default page address. */ 138 + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 139 WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 140 (u32)(value >> 12)); 141 WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, ··· 1287 } 1288 } 1289 1290 + static const struct soc15_reg_entry mmhub_v1_7_ea_err_status_regs[] = { 1291 { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_ERR_STATUS), 0, 0, 0 }, 1292 { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_ERR_STATUS), 0, 0, 0 }, 1293 { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_ERR_STATUS), 0, 0, 0 }, ··· 1304 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) 1305 return; 1306 1307 + for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_ea_err_status_regs); i++) { 1308 reg_value = 1309 + RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_ea_err_status_regs[i])); 1310 + if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) || 1311 + REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) || 1312 + REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { 1313 dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n", 1314 i, reg_value); 1315 + } 1316 } 1317 } 1318
+1 -2
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
··· 210 } 211 212 /* Set default page address. */ 213 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 214 - adev->vm_manager.vram_base_offset; 215 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 216 (u32)(value >> 12)); 217 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
··· 210 } 211 212 /* Set default page address. */ 213 + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 214 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 215 (u32)(value >> 12)); 216 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+1 -2
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
··· 162 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 163 164 /* Set default page address. */ 165 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 166 - adev->vm_manager.vram_base_offset; 167 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 168 (u32)(value >> 12)); 169 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
··· 162 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 163 164 /* Set default page address. */ 165 + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 166 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 167 (u32)(value >> 12)); 168 WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+8 -3
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
··· 136 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 137 138 /* Set default page address. */ 139 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 140 - adev->vm_manager.vram_base_offset; 141 WREG32_SOC15_OFFSET( 142 MMHUB, 0, 143 mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, ··· 1645 for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_err_status_regs); i++) { 1646 reg_value = 1647 RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_err_status_regs[i])); 1648 - if (reg_value) 1649 dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n", 1650 i, reg_value); 1651 } 1652 } 1653
··· 136 max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 137 138 /* Set default page address. */ 139 + value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); 140 WREG32_SOC15_OFFSET( 141 MMHUB, 0, 142 mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, ··· 1646 for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_err_status_regs); i++) { 1647 reg_value = 1648 RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_err_status_regs[i])); 1649 + if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) || 1650 + REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) || 1651 + REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { 1652 + /* SDP read/write error/parity error in FUE_IS_FATAL mode 1653 + * can cause system fatal error in arcturas. Harvest the error 1654 + * status before GPU reset */ 1655 dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n", 1656 i, reg_value); 1657 + } 1658 } 1659 } 1660
+128
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
··· 31 #include "vega10_enum.h" 32 #include <uapi/linux/kfd_ioctl.h> 33 34 static void nbio_v6_1_remap_hdp_registers(struct amdgpu_device *adev) 35 { 36 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, ··· 278 WREG32_PCIE(smnPCIE_CI_CNTL, data); 279 } 280 281 const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { 282 .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset, 283 .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset, ··· 401 .ih_control = nbio_v6_1_ih_control, 402 .init_registers = nbio_v6_1_init_registers, 403 .remap_hdp_registers = nbio_v6_1_remap_hdp_registers, 404 };
··· 31 #include "vega10_enum.h" 32 #include <uapi/linux/kfd_ioctl.h> 33 34 + #define smnPCIE_LC_CNTL 0x11140280 35 + #define smnPCIE_LC_CNTL3 0x111402d4 36 + #define smnPCIE_LC_CNTL6 0x111402ec 37 + #define smnPCIE_LC_CNTL7 0x111402f0 38 + #define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c 39 + #define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK 0x00001000L 40 + #define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL 41 + #define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L 42 + #define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123530 43 + #define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c 44 + #define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324 45 + #define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4 46 + #define smnRCC_BIF_STRAP2 0x10123488 47 + #define smnRCC_BIF_STRAP3 0x1012348c 48 + #define smnRCC_BIF_STRAP5 0x10123494 49 + #define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L 50 + #define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL 51 + #define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L 52 + #define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0 53 + #define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10 54 + #define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0 55 + 56 static void nbio_v6_1_remap_hdp_registers(struct amdgpu_device *adev) 57 { 58 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, ··· 256 WREG32_PCIE(smnPCIE_CI_CNTL, data); 257 } 258 259 + static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) 260 + { 261 + uint32_t def, data; 262 + 263 + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB); 264 + 265 + def = data = RREG32_PCIE(smnRCC_BIF_STRAP2); 266 + data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK; 267 + if (def != data) 268 + WREG32_PCIE(smnRCC_BIF_STRAP2, data); 269 + 270 + def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL); 271 + data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK; 272 + if (def != data) 273 + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data); 274 + 275 + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); 276 + data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; 277 + if (def != data) 278 + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); 279 + } 280 + 281 + static void nbio_v6_1_program_aspm(struct amdgpu_device *adev) 282 + { 283 + uint32_t def, data; 284 + 285 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); 286 + data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; 287 + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 288 + data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; 289 + if (def != data) 290 + WREG32_PCIE(smnPCIE_LC_CNTL, data); 291 + 292 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL7); 293 + data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK; 294 + if (def != data) 295 + WREG32_PCIE(smnPCIE_LC_CNTL7, data); 296 + 297 + def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK); 298 + data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK; 299 + if (def != data) 300 + WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data); 301 + 302 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); 303 + data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; 304 + if (def != data) 305 + WREG32_PCIE(smnPCIE_LC_CNTL3, data); 306 + 307 + def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); 308 + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK; 309 + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK; 310 + if (def != data) 311 + WREG32_PCIE(smnRCC_BIF_STRAP3, data); 312 + 313 + def = data = RREG32_PCIE(smnRCC_BIF_STRAP5); 314 + data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK; 315 + if (def != data) 316 + WREG32_PCIE(smnRCC_BIF_STRAP5, data); 317 + 318 + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); 319 + data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; 320 + if (def != data) 321 + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); 322 + 323 + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001); 324 + 325 + def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2); 326 + data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | 327 + PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; 328 + data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK; 329 + if (def != data) 330 + WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data); 331 + 332 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL6); 333 + data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK | 334 + PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK; 335 + if (def != data) 336 + WREG32_PCIE(smnPCIE_LC_CNTL6, data); 337 + 338 + nbio_v6_1_program_ltr(adev); 339 + 340 + def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); 341 + data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; 342 + data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT; 343 + if (def != data) 344 + WREG32_PCIE(smnRCC_BIF_STRAP3, data); 345 + 346 + def = data = RREG32_PCIE(smnRCC_BIF_STRAP5); 347 + data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT; 348 + if (def != data) 349 + WREG32_PCIE(smnRCC_BIF_STRAP5, data); 350 + 351 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); 352 + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 353 + data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; 354 + data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT; 355 + if (def != data) 356 + WREG32_PCIE(smnPCIE_LC_CNTL, data); 357 + 358 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); 359 + data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; 360 + if (def != data) 361 + WREG32_PCIE(smnPCIE_LC_CNTL3, data); 362 + } 363 + 364 const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { 365 .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset, 366 .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset, ··· 274 .ih_control = nbio_v6_1_ih_control, 275 .init_registers = nbio_v6_1_init_registers, 276 .remap_hdp_registers = nbio_v6_1_remap_hdp_registers, 277 + .program_aspm = nbio_v6_1_program_aspm, 278 };
+125
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
··· 31 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" 32 #include <uapi/linux/kfd_ioctl.h> 33 34 #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c 35 36 /* 37 * These are nbio v7_4_1 registers mask. Temporarily define these here since ··· 586 .ras_fini = amdgpu_nbio_ras_fini, 587 }; 588 589 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { 590 .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset, 591 .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset, ··· 711 .ih_control = nbio_v7_4_ih_control, 712 .init_registers = nbio_v7_4_init_registers, 713 .remap_hdp_registers = nbio_v7_4_remap_hdp_registers, 714 };
··· 31 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" 32 #include <uapi/linux/kfd_ioctl.h> 33 34 + #define smnPCIE_LC_CNTL 0x11140280 35 + #define smnPCIE_LC_CNTL3 0x111402d4 36 + #define smnPCIE_LC_CNTL6 0x111402ec 37 + #define smnPCIE_LC_CNTL7 0x111402f0 38 #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c 39 + #define smnRCC_BIF_STRAP3 0x1012348c 40 + #define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL 41 + #define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L 42 + #define smnRCC_BIF_STRAP5 0x10123494 43 + #define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL 44 + #define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c 45 + #define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L 46 + #define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324 47 + #define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4 48 + #define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538 49 + #define smnRCC_BIF_STRAP2 0x10123488 50 + #define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L 51 + #define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0 52 + #define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10 53 + #define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0 54 55 /* 56 * These are nbio v7_4_1 registers mask. Temporarily define these here since ··· 567 .ras_fini = amdgpu_nbio_ras_fini, 568 }; 569 570 + static void nbio_v7_4_program_ltr(struct amdgpu_device *adev) 571 + { 572 + uint32_t def, data; 573 + 574 + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB); 575 + 576 + def = data = RREG32_PCIE(smnRCC_BIF_STRAP2); 577 + data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK; 578 + if (def != data) 579 + WREG32_PCIE(smnRCC_BIF_STRAP2, data); 580 + 581 + def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL); 582 + data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK; 583 + if (def != data) 584 + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data); 585 + 586 + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); 587 + data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; 588 + if (def != data) 589 + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); 590 + } 591 + 592 + static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) 593 + { 594 + uint32_t def, data; 595 + 596 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); 597 + data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; 598 + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 599 + data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; 600 + if (def != data) 601 + WREG32_PCIE(smnPCIE_LC_CNTL, data); 602 + 603 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL7); 604 + data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK; 605 + if (def != data) 606 + WREG32_PCIE(smnPCIE_LC_CNTL7, data); 607 + 608 + def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK); 609 + data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK; 610 + if (def != data) 611 + WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data); 612 + 613 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); 614 + data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; 615 + if (def != data) 616 + WREG32_PCIE(smnPCIE_LC_CNTL3, data); 617 + 618 + def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); 619 + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK; 620 + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK; 621 + if (def != data) 622 + WREG32_PCIE(smnRCC_BIF_STRAP3, data); 623 + 624 + def = data = RREG32_PCIE(smnRCC_BIF_STRAP5); 625 + data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK; 626 + if (def != data) 627 + WREG32_PCIE(smnRCC_BIF_STRAP5, data); 628 + 629 + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); 630 + data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; 631 + if (def != data) 632 + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); 633 + 634 + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001); 635 + 636 + def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2); 637 + data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | 638 + PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; 639 + data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK; 640 + if (def != data) 641 + WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data); 642 + 643 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL6); 644 + data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK | 645 + PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK; 646 + if (def != data) 647 + WREG32_PCIE(smnPCIE_LC_CNTL6, data); 648 + 649 + nbio_v7_4_program_ltr(adev); 650 + 651 + def = data = RREG32_PCIE(smnRCC_BIF_STRAP3); 652 + data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; 653 + data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT; 654 + if (def != data) 655 + WREG32_PCIE(smnRCC_BIF_STRAP3, data); 656 + 657 + def = data = RREG32_PCIE(smnRCC_BIF_STRAP5); 658 + data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT; 659 + if (def != data) 660 + WREG32_PCIE(smnRCC_BIF_STRAP5, data); 661 + 662 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); 663 + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 664 + data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; 665 + data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT; 666 + if (def != data) 667 + WREG32_PCIE(smnPCIE_LC_CNTL, data); 668 + 669 + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); 670 + data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; 671 + if (def != data) 672 + WREG32_PCIE(smnPCIE_LC_CNTL3, data); 673 + } 674 + 675 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { 676 .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset, 677 .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset, ··· 587 .ih_control = nbio_v7_4_ih_control, 588 .init_registers = nbio_v7_4_init_registers, 589 .remap_hdp_registers = nbio_v7_4_remap_hdp_registers, 590 + .program_aspm = nbio_v7_4_program_aspm, 591 };
+2 -8
drivers/gpu/drm/amd/amdgpu/nv.c
··· 601 if (amdgpu_aspm != 1) 602 return; 603 604 - if ((adev->asic_type >= CHIP_SIENNA_CICHLID) && 605 - !(adev->flags & AMD_IS_APU) && 606 (adev->nbio.funcs->program_aspm)) 607 adev->nbio.funcs->program_aspm(adev); 608 ··· 933 if (adev->gfx.funcs->update_perfmon_mgcg) 934 adev->gfx.funcs->update_perfmon_mgcg(adev, !enter); 935 936 - /* 937 - * The ASPM function is not fully enabled and verified on 938 - * Navi yet. Temporarily skip this until ASPM enabled. 939 - */ 940 - if ((adev->asic_type >= CHIP_SIENNA_CICHLID) && 941 - !(adev->flags & AMD_IS_APU) && 942 (adev->nbio.funcs->enable_aspm)) 943 adev->nbio.funcs->enable_aspm(adev, !enter); 944
··· 601 if (amdgpu_aspm != 1) 602 return; 603 604 + if (!(adev->flags & AMD_IS_APU) && 605 (adev->nbio.funcs->program_aspm)) 606 adev->nbio.funcs->program_aspm(adev); 607 ··· 934 if (adev->gfx.funcs->update_perfmon_mgcg) 935 adev->gfx.funcs->update_perfmon_mgcg(adev, !enter); 936 937 + if (!(adev->flags & AMD_IS_APU) && 938 (adev->nbio.funcs->enable_aspm)) 939 adev->nbio.funcs->enable_aspm(adev, !enter); 940
+10 -1
drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
··· 185 uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of TMR buffer (must be 4 KB aligned) */ 186 uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of TMR buffer */ 187 uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB) */ 188 189 }; 190 - 191 192 /* FW types for GFX_CMD_ID_LOAD_IP_FW command. Limit 31. */ 193 enum psp_gfx_fw_type {
··· 185 uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of TMR buffer (must be 4 KB aligned) */ 186 uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of TMR buffer */ 187 uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB) */ 188 + union { 189 + struct { 190 + uint32_t sriov_enabled:1; /* whether the device runs under SR-IOV*/ 191 + uint32_t virt_phy_addr:1; /* driver passes both virtual and physical address to PSP*/ 192 + uint32_t reserved:30; 193 + } bitfield; 194 + uint32_t tmr_flags; 195 + }; 196 + uint32_t system_phy_addr_lo; /* bits [31:0] of system physical address of TMR buffer (must be 4 KB aligned) */ 197 + uint32_t system_phy_addr_hi; /* bits [63:32] of system physical address of TMR buffer */ 198 199 }; 200 201 /* FW types for GFX_CMD_ID_LOAD_IP_FW command. Limit 31. */ 202 enum psp_gfx_fw_type {
+28 -7
drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
··· 160 }; 161 162 static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev, 163 uint32_t value, 164 uint32_t instance, 165 uint32_t *sec_count) ··· 170 171 /* double bits error (multiple bits) error detection is not supported */ 172 for (i = 0; i < ARRAY_SIZE(sdma_v4_4_ras_fields); i++) { 173 /* the SDMA_EDC_COUNTER register in each sdma instance 174 * shares the same sed shift_mask 175 * */ ··· 201 reg_value = RREG32(reg_offset); 202 /* double bit error is not supported */ 203 if (reg_value) 204 - sdma_v4_4_get_ras_error_count(adev, reg_value, instance, &sec_count); 205 - /* err_data->ce_count should be initialized to 0 206 - * before calling into this function */ 207 - err_data->ce_count += sec_count; 208 - /* double bit error is not supported 209 - * set ue count to 0 */ 210 - err_data->ue_count = 0; 211 212 return 0; 213 };
··· 160 }; 161 162 static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev, 163 + uint32_t reg_offset, 164 uint32_t value, 165 uint32_t instance, 166 uint32_t *sec_count) ··· 169 170 /* double bits error (multiple bits) error detection is not supported */ 171 for (i = 0; i < ARRAY_SIZE(sdma_v4_4_ras_fields); i++) { 172 + if (sdma_v4_4_ras_fields[i].reg_offset != reg_offset) 173 + continue; 174 + 175 /* the SDMA_EDC_COUNTER register in each sdma instance 176 * shares the same sed shift_mask 177 * */ ··· 197 reg_value = RREG32(reg_offset); 198 /* double bit error is not supported */ 199 if (reg_value) 200 + sdma_v4_4_get_ras_error_count(adev, regSDMA0_EDC_COUNTER, reg_value, 201 + instance, &sec_count); 202 + 203 + reg_offset = sdma_v4_4_get_reg_offset(adev, instance, regSDMA0_EDC_COUNTER2); 204 + reg_value = RREG32(reg_offset); 205 + /* double bit error is not supported */ 206 + if (reg_value) 207 + sdma_v4_4_get_ras_error_count(adev, regSDMA0_EDC_COUNTER2, reg_value, 208 + instance, &sec_count); 209 + 210 + /* 211 + * err_data->ue_count should be initialized to 0 212 + * before calling into this function 213 + * 214 + * SDMA RAS supports single bit uncorrectable error detection. 215 + * So, increment uncorrectable error count. 216 + */ 217 + err_data->ue_count += sec_count; 218 + 219 + /* 220 + * SDMA RAS does not support correctable errors. 221 + * Set ce count to 0. 222 + */ 223 + err_data->ce_count = 0; 224 225 return 0; 226 };
+28
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
··· 370 } 371 372 /** 373 * sdma_v5_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring 374 * 375 * @ring: amdgpu ring pointer ··· 1690 10 + 10 + 10, /* sdma_v5_2_ring_emit_fence x3 for user fence, vm fence */ 1691 .emit_ib_size = 7 + 6, /* sdma_v5_2_ring_emit_ib */ 1692 .emit_ib = sdma_v5_2_ring_emit_ib, 1693 .emit_fence = sdma_v5_2_ring_emit_fence, 1694 .emit_pipeline_sync = sdma_v5_2_ring_emit_pipeline_sync, 1695 .emit_vm_flush = sdma_v5_2_ring_emit_vm_flush,
··· 370 } 371 372 /** 373 + * sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse 374 + * 375 + * @ring: amdgpu ring pointer 376 + * @job: job to retrieve vmid from 377 + * @ib: IB object to schedule 378 + * 379 + * flush the IB by graphics cache rinse. 380 + */ 381 + static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring) 382 + { 383 + uint32_t gcr_cntl = 384 + SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV | 385 + SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV | 386 + SDMA_GCR_GLI_INV(1); 387 + 388 + /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */ 389 + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)); 390 + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0)); 391 + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) | 392 + SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0)); 393 + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) | 394 + SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16)); 395 + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) | 396 + SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0)); 397 + } 398 + 399 + /** 400 * sdma_v5_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring 401 * 402 * @ring: amdgpu ring pointer ··· 1663 10 + 10 + 10, /* sdma_v5_2_ring_emit_fence x3 for user fence, vm fence */ 1664 .emit_ib_size = 7 + 6, /* sdma_v5_2_ring_emit_ib */ 1665 .emit_ib = sdma_v5_2_ring_emit_ib, 1666 + .emit_mem_sync = sdma_v5_2_ring_emit_mem_sync, 1667 .emit_fence = sdma_v5_2_ring_emit_fence, 1668 .emit_pipeline_sync = sdma_v5_2_ring_emit_pipeline_sync, 1669 .emit_vm_flush = sdma_v5_2_ring_emit_vm_flush,
+4 -3
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 816 817 static void soc15_program_aspm(struct amdgpu_device *adev) 818 { 819 - 820 - if (amdgpu_aspm == 0) 821 return; 822 823 - /* todo */ 824 } 825 826 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
··· 816 817 static void soc15_program_aspm(struct amdgpu_device *adev) 818 { 819 + if (amdgpu_aspm != 1) 820 return; 821 822 + if (!(adev->flags & AMD_IS_APU) && 823 + (adev->nbio.funcs->program_aspm)) 824 + adev->nbio.funcs->program_aspm(adev); 825 } 826 827 static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
+191 -2
drivers/gpu/drm/amd/amdgpu/vi.c
··· 81 #include "mxgpu_vi.h" 82 #include "amdgpu_dm.h" 83 84 /* Topaz */ 85 static const struct amdgpu_video_codecs topaz_video_codecs_encode = 86 { ··· 1115 /* todo */ 1116 } 1117 1118 static void vi_program_aspm(struct amdgpu_device *adev) 1119 { 1120 1121 - if (amdgpu_aspm == 0) 1122 return; 1123 1124 - /* todo */ 1125 } 1126 1127 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
··· 81 #include "mxgpu_vi.h" 82 #include "amdgpu_dm.h" 83 84 + #define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6 85 + #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L 86 + #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L 87 + #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK 0x00000004L 88 + #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK 0x00000008L 89 + #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK 0x00000010L 90 + #define ixPCIE_L1_PM_SUB_CNTL 0x378 91 + #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK 0x00000004L 92 + #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK 0x00000008L 93 + #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK 0x00000001L 94 + #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK 0x00000002L 95 + #define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK 0x00200000L 96 + #define LINK_CAP 0x64 97 + #define PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L 98 + #define ixCPM_CONTROL 0x1400118 99 + #define ixPCIE_LC_CNTL7 0x100100BC 100 + #define PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK 0x00000400L 101 + #define PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT 0x00000007 102 + #define PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT 0x00000009 103 + #define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK 0x01000000L 104 + #define PCIE_L1_PM_SUB_CNTL 0x378 105 + #define ASIC_IS_P22(asic_type, rid) ((asic_type >= CHIP_POLARIS10) && \ 106 + (asic_type <= CHIP_POLARIS12) && \ 107 + (rid >= 0x6E)) 108 /* Topaz */ 109 static const struct amdgpu_video_codecs topaz_video_codecs_encode = 110 { ··· 1091 /* todo */ 1092 } 1093 1094 + static void vi_enable_aspm(struct amdgpu_device *adev) 1095 + { 1096 + u32 data, orig; 1097 + 1098 + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); 1099 + data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT << 1100 + PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; 1101 + data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT << 1102 + PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; 1103 + data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; 1104 + data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK; 1105 + if (orig != data) 1106 + WREG32_PCIE(ixPCIE_LC_CNTL, data); 1107 + } 1108 + 1109 static void vi_program_aspm(struct amdgpu_device *adev) 1110 { 1111 + u32 data, data1, orig; 1112 + bool bL1SS = false; 1113 + bool bClkReqSupport = true; 1114 1115 + if (amdgpu_aspm != 1) 1116 return; 1117 1118 + if (adev->flags & AMD_IS_APU || 1119 + adev->asic_type < CHIP_POLARIS10) 1120 + return; 1121 + 1122 + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); 1123 + data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; 1124 + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 1125 + data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; 1126 + if (orig != data) 1127 + WREG32_PCIE(ixPCIE_LC_CNTL, data); 1128 + 1129 + orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); 1130 + data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK; 1131 + data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT; 1132 + data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK; 1133 + if (orig != data) 1134 + WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data); 1135 + 1136 + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3); 1137 + data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK; 1138 + if (orig != data) 1139 + WREG32_PCIE(ixPCIE_LC_CNTL3, data); 1140 + 1141 + orig = data = RREG32_PCIE(ixPCIE_P_CNTL); 1142 + data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK; 1143 + if (orig != data) 1144 + WREG32_PCIE(ixPCIE_P_CNTL, data); 1145 + 1146 + data = RREG32_PCIE(ixPCIE_LC_L1_PM_SUBSTATE); 1147 + pci_read_config_dword(adev->pdev, PCIE_L1_PM_SUB_CNTL, &data1); 1148 + if (data & PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK && 1149 + (data & (PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK | 1150 + PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK | 1151 + PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK | 1152 + PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK))) { 1153 + bL1SS = true; 1154 + } else if (data1 & (PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK | 1155 + PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK | 1156 + PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK | 1157 + PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK)) { 1158 + bL1SS = true; 1159 + } 1160 + 1161 + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6); 1162 + data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK; 1163 + if (orig != data) 1164 + WREG32_PCIE(ixPCIE_LC_CNTL6, data); 1165 + 1166 + orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL); 1167 + data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK; 1168 + if (orig != data) 1169 + WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data); 1170 + 1171 + pci_read_config_dword(adev->pdev, LINK_CAP, &data); 1172 + if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK)) 1173 + bClkReqSupport = false; 1174 + 1175 + if (bClkReqSupport) { 1176 + orig = data = RREG32_SMC(ixTHM_CLK_CNTL); 1177 + data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK); 1178 + data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) | 1179 + (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT); 1180 + if (orig != data) 1181 + WREG32_SMC(ixTHM_CLK_CNTL, data); 1182 + 1183 + orig = data = RREG32_SMC(ixMISC_CLK_CTRL); 1184 + data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK | 1185 + MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK); 1186 + data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) | 1187 + (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT); 1188 + data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT); 1189 + if (orig != data) 1190 + WREG32_SMC(ixMISC_CLK_CTRL, data); 1191 + 1192 + orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL); 1193 + data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK; 1194 + if (orig != data) 1195 + WREG32_SMC(ixCG_CLKPIN_CNTL, data); 1196 + 1197 + orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 1198 + data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK; 1199 + if (orig != data) 1200 + WREG32_SMC(ixCG_CLKPIN_CNTL, data); 1201 + 1202 + orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL); 1203 + data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK; 1204 + data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT); 1205 + if (orig != data) 1206 + WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data); 1207 + 1208 + orig = data = RREG32_PCIE(ixCPM_CONTROL); 1209 + data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK | 1210 + CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK); 1211 + if (orig != data) 1212 + WREG32_PCIE(ixCPM_CONTROL, data); 1213 + 1214 + orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL); 1215 + data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK; 1216 + data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT); 1217 + if (orig != data) 1218 + WREG32_PCIE(ixPCIE_CONFIG_CNTL, data); 1219 + 1220 + orig = data = RREG32(mmBIF_CLK_CTRL); 1221 + data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK; 1222 + if (orig != data) 1223 + WREG32(mmBIF_CLK_CTRL, data); 1224 + 1225 + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7); 1226 + data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK; 1227 + if (orig != data) 1228 + WREG32_PCIE(ixPCIE_LC_CNTL7, data); 1229 + 1230 + orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG); 1231 + data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK; 1232 + if (orig != data) 1233 + WREG32_PCIE(ixPCIE_HW_DEBUG, data); 1234 + 1235 + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2); 1236 + data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; 1237 + data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK; 1238 + if (bL1SS) 1239 + data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK; 1240 + if (orig != data) 1241 + WREG32_PCIE(ixPCIE_LC_CNTL2, data); 1242 + 1243 + } 1244 + 1245 + vi_enable_aspm(adev); 1246 + 1247 + data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL); 1248 + data1 = RREG32_PCIE(ixPCIE_LC_STATUS1); 1249 + if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) && 1250 + data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK && 1251 + data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) { 1252 + orig = data = RREG32_PCIE(ixPCIE_LC_CNTL); 1253 + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; 1254 + if (orig != data) 1255 + WREG32_PCIE(ixPCIE_LC_CNTL, data); 1256 + } 1257 + 1258 + if ((adev->asic_type == CHIP_POLARIS12 && 1259 + !(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) || 1260 + ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) { 1261 + orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL); 1262 + data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK; 1263 + if (orig != data) 1264 + WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data); 1265 + } 1266 } 1267 1268 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
+122 -7
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
··· 25 #include "soc15_int.h" 26 #include "kfd_device_queue_manager.h" 27 #include "kfd_smi_events.h" 28 29 static bool event_interrupt_isr_v9(struct kfd_dev *dev, 30 const uint32_t *ih_ring_entry, ··· 172 const uint32_t *ih_ring_entry) 173 { 174 uint16_t source_id, client_id, pasid, vmid; 175 - uint32_t context_id; 176 177 source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); 178 client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); 179 pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); 180 vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); 181 - context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry); 182 183 if (client_id == SOC15_IH_CLIENTID_GRBM_CP || 184 client_id == SOC15_IH_CLIENTID_SE0SH || ··· 188 client_id == SOC15_IH_CLIENTID_SE2SH || 189 client_id == SOC15_IH_CLIENTID_SE3SH) { 190 if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) 191 - kfd_signal_event_interrupt(pasid, context_id, 32); 192 - else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) 193 - kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24); 194 - else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) 195 kfd_signal_hw_exception_event(pasid); 196 } else if (client_id == SOC15_IH_CLIENTID_SDMA0 || 197 client_id == SOC15_IH_CLIENTID_SDMA1 || ··· 251 client_id == SOC15_IH_CLIENTID_SDMA6 || 252 client_id == SOC15_IH_CLIENTID_SDMA7) { 253 if (source_id == SOC15_INTSRC_SDMA_TRAP) 254 - kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28); 255 } else if (client_id == SOC15_IH_CLIENTID_VMC || 256 client_id == SOC15_IH_CLIENTID_VMC1 || 257 client_id == SOC15_IH_CLIENTID_UTCL2) {
··· 25 #include "soc15_int.h" 26 #include "kfd_device_queue_manager.h" 27 #include "kfd_smi_events.h" 28 + #include "amdgpu.h" 29 + 30 + enum SQ_INTERRUPT_WORD_ENCODING { 31 + SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0, 32 + SQ_INTERRUPT_WORD_ENCODING_INST, 33 + SQ_INTERRUPT_WORD_ENCODING_ERROR, 34 + }; 35 + 36 + enum SQ_INTERRUPT_ERROR_TYPE { 37 + SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0, 38 + SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST, 39 + SQ_INTERRUPT_ERROR_TYPE_MEMVIOL, 40 + SQ_INTERRUPT_ERROR_TYPE_EDC_FED, 41 + }; 42 + 43 + /* SQ_INTERRUPT_WORD_AUTO_CTXID */ 44 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE__SHIFT 0 45 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT__SHIFT 1 46 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL__SHIFT 2 47 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP__SHIFT 3 48 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP__SHIFT 4 49 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW__SHIFT 5 50 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW__SHIFT 6 51 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW__SHIFT 7 52 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR__SHIFT 8 53 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID__SHIFT 24 54 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING__SHIFT 26 55 + 56 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_MASK 0x00000001 57 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT_MASK 0x00000002 58 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL_MASK 0x00000004 59 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP_MASK 0x00000008 60 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP_MASK 0x00000010 61 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW_MASK 0x00000020 62 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW_MASK 0x00000040 63 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW_MASK 0x00000080 64 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR_MASK 0x00000100 65 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID_MASK 0x03000000 66 + #define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING_MASK 0x0c000000 67 + 68 + /* SQ_INTERRUPT_WORD_WAVE_CTXID */ 69 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA__SHIFT 0 70 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID__SHIFT 12 71 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV__SHIFT 13 72 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID__SHIFT 14 73 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID__SHIFT 18 74 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID__SHIFT 20 75 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID__SHIFT 24 76 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING__SHIFT 26 77 + 78 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA_MASK 0x00000fff 79 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID_MASK 0x00001000 80 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK 0x00002000 81 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID_MASK 0x0003c000 82 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID_MASK 0x000c0000 83 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID_MASK 0x00f00000 84 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x03000000 85 + #define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0x0c000000 86 + 87 + #define KFD_CONTEXT_ID_GET_SQ_INT_DATA(ctx0, ctx1) \ 88 + ((ctx0 & 0xfff) | ((ctx0 >> 16) & 0xf000) | ((ctx1 << 16) & 0xff0000)) 89 + 90 + #define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000 91 + #define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20 92 93 static bool event_interrupt_isr_v9(struct kfd_dev *dev, 94 const uint32_t *ih_ring_entry, ··· 108 const uint32_t *ih_ring_entry) 109 { 110 uint16_t source_id, client_id, pasid, vmid; 111 + uint32_t context_id0, context_id1; 112 + uint32_t sq_intr_err, sq_int_data, encoding; 113 114 source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); 115 client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); 116 pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); 117 vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); 118 + context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry); 119 + context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry); 120 121 if (client_id == SOC15_IH_CLIENTID_GRBM_CP || 122 client_id == SOC15_IH_CLIENTID_SE0SH || ··· 122 client_id == SOC15_IH_CLIENTID_SE2SH || 123 client_id == SOC15_IH_CLIENTID_SE3SH) { 124 if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) 125 + kfd_signal_event_interrupt(pasid, context_id0, 32); 126 + else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) { 127 + sq_int_data = KFD_CONTEXT_ID_GET_SQ_INT_DATA(context_id0, context_id1); 128 + encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING); 129 + switch (encoding) { 130 + case SQ_INTERRUPT_WORD_ENCODING_AUTO: 131 + pr_debug( 132 + "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n", 133 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID), 134 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE), 135 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, WLT), 136 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_BUF_FULL), 137 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, REG_TIMESTAMP), 138 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, CMD_TIMESTAMP), 139 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_CMD_OVERFLOW), 140 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_REG_OVERFLOW), 141 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, IMMED_OVERFLOW), 142 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR)); 143 + break; 144 + case SQ_INTERRUPT_WORD_ENCODING_INST: 145 + pr_debug("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n", 146 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID), 147 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA), 148 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID), 149 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV), 150 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID), 151 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID), 152 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID), 153 + sq_int_data); 154 + break; 155 + case SQ_INTERRUPT_WORD_ENCODING_ERROR: 156 + sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE); 157 + pr_warn("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n", 158 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID), 159 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA), 160 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID), 161 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV), 162 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID), 163 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID), 164 + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID), 165 + sq_intr_err); 166 + if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && 167 + sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) { 168 + kfd_signal_hw_exception_event(pasid); 169 + amdgpu_amdkfd_gpu_reset(dev->kgd); 170 + return; 171 + } 172 + break; 173 + default: 174 + break; 175 + } 176 + kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24); 177 + } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) 178 kfd_signal_hw_exception_event(pasid); 179 } else if (client_id == SOC15_IH_CLIENTID_SDMA0 || 180 client_id == SOC15_IH_CLIENTID_SDMA1 || ··· 136 client_id == SOC15_IH_CLIENTID_SDMA6 || 137 client_id == SOC15_IH_CLIENTID_SDMA7) { 138 if (source_id == SOC15_INTSRC_SDMA_TRAP) 139 + kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28); 140 } else if (client_id == SOC15_IH_CLIENTID_VMC || 141 client_id == SOC15_IH_CLIENTID_VMC1 || 142 client_id == SOC15_IH_CLIENTID_UTCL2) {
+10 -17
drivers/gpu/drm/amd/amdkfd/kfd_process.c
··· 935 pdd->dev->kgd, pdd->vm); 936 fput(pdd->drm_file); 937 } 938 - else if (pdd->vm) 939 - amdgpu_amdkfd_gpuvm_destroy_process_vm( 940 - pdd->dev->kgd, pdd->vm); 941 942 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base) 943 free_pages((unsigned long)pdd->qpd.cwsr_kaddr, ··· 1372 struct kfd_dev *dev; 1373 int ret; 1374 1375 if (pdd->vm) 1376 - return drm_file ? -EBUSY : 0; 1377 1378 p = pdd->process; 1379 dev = pdd->dev; 1380 1381 - if (drm_file) 1382 - ret = amdgpu_amdkfd_gpuvm_acquire_process_vm( 1383 - dev->kgd, drm_file, p->pasid, 1384 - &pdd->vm, &p->kgd_process_info, &p->ef); 1385 - else 1386 - ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid, 1387 - &pdd->vm, &p->kgd_process_info, &p->ef); 1388 if (ret) { 1389 pr_err("Failed to create process VM object\n"); 1390 return ret; ··· 1405 err_init_cwsr: 1406 err_reserve_ib_mem: 1407 kfd_process_device_free_bos(pdd); 1408 - if (!drm_file) 1409 - amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm); 1410 pdd->vm = NULL; 1411 1412 return ret; ··· 1429 return ERR_PTR(-ENOMEM); 1430 } 1431 1432 /* 1433 * signal runtime-pm system to auto resume and prevent 1434 * further runtime suspend once device pdd is created until ··· 1446 } 1447 1448 err = kfd_iommu_bind_process_to_device(pdd); 1449 - if (err) 1450 - goto out; 1451 - 1452 - err = kfd_process_device_init_vm(pdd, NULL); 1453 if (err) 1454 goto out; 1455
··· 935 pdd->dev->kgd, pdd->vm); 936 fput(pdd->drm_file); 937 } 938 939 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base) 940 free_pages((unsigned long)pdd->qpd.cwsr_kaddr, ··· 1375 struct kfd_dev *dev; 1376 int ret; 1377 1378 + if (!drm_file) 1379 + return -EINVAL; 1380 + 1381 if (pdd->vm) 1382 + return -EBUSY; 1383 1384 p = pdd->process; 1385 dev = pdd->dev; 1386 1387 + ret = amdgpu_amdkfd_gpuvm_acquire_process_vm( 1388 + dev->kgd, drm_file, p->pasid, 1389 + &pdd->vm, &p->kgd_process_info, &p->ef); 1390 if (ret) { 1391 pr_err("Failed to create process VM object\n"); 1392 return ret; ··· 1409 err_init_cwsr: 1410 err_reserve_ib_mem: 1411 kfd_process_device_free_bos(pdd); 1412 pdd->vm = NULL; 1413 1414 return ret; ··· 1435 return ERR_PTR(-ENOMEM); 1436 } 1437 1438 + if (!pdd->vm) 1439 + return ERR_PTR(-ENODEV); 1440 + 1441 /* 1442 * signal runtime-pm system to auto resume and prevent 1443 * further runtime suspend once device pdd is created until ··· 1449 } 1450 1451 err = kfd_iommu_bind_process_to_device(pdd); 1452 if (err) 1453 goto out; 1454
+27 -11
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 2552 struct drm_connector *connector = &aconnector->base; 2553 struct drm_device *dev = connector->dev; 2554 enum dc_connection_type new_connection_type = dc_connection_none; 2555 - #ifdef CONFIG_DRM_AMD_DC_HDCP 2556 struct amdgpu_device *adev = drm_to_adev(dev); 2557 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 2558 #endif 2559 2560 /* 2561 * In case of failure or MST no need to update connector status or notify the OS ··· 2698 union hpd_irq_data hpd_irq_data; 2699 2700 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 2701 2702 /* 2703 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio ··· 4232 { 4233 struct amdgpu_device *adev = drm_to_adev(plane->dev); 4234 const struct drm_format_info *info = drm_format_info(format); 4235 4236 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; 4237 ··· 4240 return false; 4241 4242 /* 4243 - * We always have to allow this modifier, because core DRM still 4244 - * checks LINEAR support if userspace does not provide modifers. 4245 */ 4246 - if (modifier == DRM_FORMAT_MOD_LINEAR) 4247 return true; 4248 4249 - /* 4250 - * The arbitrary tiling support for multiplane formats has not been hooked 4251 - * up. 4252 - */ 4253 - if (info->num_planes > 1) 4254 return false; 4255 4256 /* ··· 4274 if (modifier_has_dcc(modifier)) { 4275 /* Per radeonsi comments 16/64 bpp are more complicated. */ 4276 if (info->cpp[0] != 4) 4277 return false; 4278 } 4279 ··· 4479 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4480 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4481 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 4482 - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 4483 4484 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4485 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | ··· 4491 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4492 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4493 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 4494 - AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); 4495 4496 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4497 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
··· 2552 struct drm_connector *connector = &aconnector->base; 2553 struct drm_device *dev = connector->dev; 2554 enum dc_connection_type new_connection_type = dc_connection_none; 2555 struct amdgpu_device *adev = drm_to_adev(dev); 2556 + #ifdef CONFIG_DRM_AMD_DC_HDCP 2557 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); 2558 #endif 2559 + 2560 + if (adev->dm.disable_hpd_irq) 2561 + return; 2562 2563 /* 2564 * In case of failure or MST no need to update connector status or notify the OS ··· 2695 union hpd_irq_data hpd_irq_data; 2696 2697 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); 2698 + 2699 + if (adev->dm.disable_hpd_irq) 2700 + return; 2701 + 2702 2703 /* 2704 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio ··· 4225 { 4226 struct amdgpu_device *adev = drm_to_adev(plane->dev); 4227 const struct drm_format_info *info = drm_format_info(format); 4228 + int i; 4229 4230 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; 4231 ··· 4232 return false; 4233 4234 /* 4235 + * We always have to allow these modifiers: 4236 + * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers. 4237 + * 2. Not passing any modifiers is the same as explicitly passing INVALID. 4238 */ 4239 + if (modifier == DRM_FORMAT_MOD_LINEAR || 4240 + modifier == DRM_FORMAT_MOD_INVALID) { 4241 return true; 4242 + } 4243 4244 + /* Check that the modifier is on the list of the plane's supported modifiers. */ 4245 + for (i = 0; i < plane->modifier_count; i++) { 4246 + if (modifier == plane->modifiers[i]) 4247 + break; 4248 + } 4249 + if (i == plane->modifier_count) 4250 return false; 4251 4252 /* ··· 4262 if (modifier_has_dcc(modifier)) { 4263 /* Per radeonsi comments 16/64 bpp are more complicated. */ 4264 if (info->cpp[0] != 4) 4265 + return false; 4266 + /* We support multi-planar formats, but not when combined with 4267 + * additional DCC metadata planes. */ 4268 + if (info->num_planes > 1) 4269 return false; 4270 } 4271 ··· 4463 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4464 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4465 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 4466 + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 4467 4468 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4469 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | ··· 4475 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | 4476 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | 4477 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | 4478 + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); 4479 4480 add_modifier(mods, size, capacity, AMD_FMT_MOD | 4481 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+2 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 1 /* 2 - * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), ··· 410 */ 411 struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; 412 bool force_timing_sync; 413 bool dmcub_trace_event_en; 414 /** 415 * @da_list:
··· 1 /* 2 + * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), ··· 410 */ 411 struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; 412 bool force_timing_sync; 413 + bool disable_hpd_irq; 414 bool dmcub_trace_event_en; 415 /** 416 * @da_list:
+35
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
··· 3077 DEFINE_DEBUGFS_ATTRIBUTE(force_timing_sync_ops, force_timing_sync_get, 3078 force_timing_sync_set, "%llu\n"); 3079 3080 /* 3081 * Sets the DC visual confirm debug option from the given string. 3082 * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm ··· 3244 3245 debugfs_create_file_unsafe("amdgpu_dm_dcc_en", 0644, root, adev, 3246 &dcc_en_bits_fops); 3247 }
··· 3077 DEFINE_DEBUGFS_ATTRIBUTE(force_timing_sync_ops, force_timing_sync_get, 3078 force_timing_sync_set, "%llu\n"); 3079 3080 + 3081 + /* 3082 + * Disables all HPD and HPD RX interrupt handling in the 3083 + * driver when set to 1. Default is 0. 3084 + */ 3085 + static int disable_hpd_set(void *data, u64 val) 3086 + { 3087 + struct amdgpu_device *adev = data; 3088 + 3089 + adev->dm.disable_hpd_irq = (bool)val; 3090 + 3091 + return 0; 3092 + } 3093 + 3094 + 3095 + /* 3096 + * Returns 1 if HPD and HPRX interrupt handling is disabled, 3097 + * 0 otherwise. 3098 + */ 3099 + static int disable_hpd_get(void *data, u64 *val) 3100 + { 3101 + struct amdgpu_device *adev = data; 3102 + 3103 + *val = adev->dm.disable_hpd_irq; 3104 + 3105 + return 0; 3106 + } 3107 + 3108 + DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get, 3109 + disable_hpd_set, "%llu\n"); 3110 + 3111 /* 3112 * Sets the DC visual confirm debug option from the given string. 3113 * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm ··· 3213 3214 debugfs_create_file_unsafe("amdgpu_dm_dcc_en", 0644, root, adev, 3215 &dcc_en_bits_fops); 3216 + 3217 + debugfs_create_file_unsafe("amdgpu_dm_disable_hpd", 0644, root, adev, 3218 + &disable_hpd_ops); 3219 + 3220 }
+7 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
··· 434 int link_index = aconnector->dc_link->link_index; 435 struct mod_hdcp_display *display = &hdcp_work[link_index].display; 436 struct mod_hdcp_link *link = &hdcp_work[link_index].link; 437 438 if (config->dpms_off) { 439 hdcp_remove_display(hdcp_work, link_index, aconnector); ··· 460 display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; 461 link->adjust.auth_delay = 3; 462 link->adjust.hdcp1.disable = 0; 463 464 - hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false); 465 } 466 467
··· 434 int link_index = aconnector->dc_link->link_index; 435 struct mod_hdcp_display *display = &hdcp_work[link_index].display; 436 struct mod_hdcp_link *link = &hdcp_work[link_index].link; 437 + struct drm_connector_state *conn_state; 438 439 if (config->dpms_off) { 440 hdcp_remove_display(hdcp_work, link_index, aconnector); ··· 459 display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; 460 link->adjust.auth_delay = 3; 461 link->adjust.hdcp1.disable = 0; 462 + conn_state = aconnector->base.state; 463 464 + pr_debug("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index, 465 + (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1, 466 + (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1); 467 + 468 + hdcp_update_display(hdcp_work, link_index, aconnector, conn_state->hdcp_content_type, false); 469 } 470 471
+25
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
··· 711 enable ? "en" : "dis", ret); 712 return ret; 713 }
··· 711 enable ? "en" : "dis", ret); 712 return ret; 713 } 714 + 715 + void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) 716 + { 717 + /* TODO: virtual DPCD */ 718 + struct dc_link *link = stream->link; 719 + union down_spread_ctrl old_downspread; 720 + union down_spread_ctrl new_downspread; 721 + 722 + if (link->aux_access_disabled) 723 + return; 724 + 725 + if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 726 + &old_downspread.raw, 727 + sizeof(old_downspread))) 728 + return; 729 + 730 + new_downspread.raw = old_downspread.raw; 731 + new_downspread.bits.IGNORE_MSA_TIMING_PARAM = 732 + (stream->ignore_msa_timing_param) ? 1 : 0; 733 + 734 + if (new_downspread.raw != old_downspread.raw) 735 + dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 736 + &new_downspread.raw, 737 + sizeof(new_downspread)); 738 + }
+7 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 229 (aconnector->edid->extensions + 1) * EDID_LENGTH, 230 &init_params); 231 232 dc_sink->priv = aconnector; 233 /* dc_link_add_remote_sink returns a new reference */ 234 aconnector->dc_sink = dc_sink; ··· 750 if (!dc_dsc_compute_bandwidth_range( 751 stream->sink->ctx->dc->res_pool->dscs[0], 752 stream->sink->ctx->dc->debug.dsc_min_slice_height_override, 753 - dsc_policy.min_target_bpp, 754 - dsc_policy.max_target_bpp, 755 &stream->sink->dsc_caps.dsc_dec_caps, 756 &stream->timing, &params[count].bw_range)) 757 params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
··· 229 (aconnector->edid->extensions + 1) * EDID_LENGTH, 230 &init_params); 231 232 + if (!dc_sink) { 233 + DRM_ERROR("Unable to add a remote sink\n"); 234 + return 0; 235 + } 236 + 237 dc_sink->priv = aconnector; 238 /* dc_link_add_remote_sink returns a new reference */ 239 aconnector->dc_sink = dc_sink; ··· 745 if (!dc_dsc_compute_bandwidth_range( 746 stream->sink->ctx->dc->res_pool->dscs[0], 747 stream->sink->ctx->dc->debug.dsc_min_slice_height_override, 748 + dsc_policy.min_target_bpp * 16, 749 + dsc_policy.max_target_bpp * 16, 750 &stream->sink->dsc_caps.dsc_dec_caps, 751 &stream->timing, &params[count].bw_range)) 752 params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+55 -3
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
··· 128 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 129 struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; 130 struct dc *dc = clk_mgr_base->ctx->dc; 131 - int display_count; 132 bool update_dppclk = false; 133 bool update_dispclk = false; 134 bool dpp_clock_lowered = false; ··· 209 context, 210 clk_mgr_base->clks.dppclk_khz, 211 safe_to_lower); 212 213 clk_mgr_base->clks.actual_dppclk_khz = 214 rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); ··· 769 } 770 }; 771 772 static struct wm_table lpddr4_wm_table_rn = { 773 .entries = { 774 { ··· 990 } else { 991 if (is_green_sardine) 992 rn_bw_params.wm_table = ddr4_wm_table_gs; 993 - else 994 - rn_bw_params.wm_table = ddr4_wm_table_rn; 995 } 996 /* Saved clocks configured at boot for debug purposes */ 997 rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); ··· 1013 if (status == PP_SMU_RESULT_OK && 1014 ctx->dc_bios && ctx->dc_bios->integrated_info) { 1015 rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info); 1016 } 1017 } 1018
··· 128 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 129 struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; 130 struct dc *dc = clk_mgr_base->ctx->dc; 131 + int display_count, i; 132 bool update_dppclk = false; 133 bool update_dispclk = false; 134 bool dpp_clock_lowered = false; ··· 209 context, 210 clk_mgr_base->clks.dppclk_khz, 211 safe_to_lower); 212 + 213 + for (i = 0; i < context->stream_count; i++) { 214 + if (context->streams[i]->signal == SIGNAL_TYPE_EDP && 215 + context->streams[i]->apply_seamless_boot_optimization) { 216 + dc_wait_for_vblank(dc, context->streams[i]); 217 + break; 218 + } 219 + } 220 221 clk_mgr_base->clks.actual_dppclk_khz = 222 rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); ··· 761 } 762 }; 763 764 + static struct wm_table ddr4_1R_wm_table_rn = { 765 + .entries = { 766 + { 767 + .wm_inst = WM_A, 768 + .wm_type = WM_TYPE_PSTATE_CHG, 769 + .pstate_latency_us = 11.72, 770 + .sr_exit_time_us = 13.90, 771 + .sr_enter_plus_exit_time_us = 14.80, 772 + .valid = true, 773 + }, 774 + { 775 + .wm_inst = WM_B, 776 + .wm_type = WM_TYPE_PSTATE_CHG, 777 + .pstate_latency_us = 11.72, 778 + .sr_exit_time_us = 13.90, 779 + .sr_enter_plus_exit_time_us = 14.80, 780 + .valid = true, 781 + }, 782 + { 783 + .wm_inst = WM_C, 784 + .wm_type = WM_TYPE_PSTATE_CHG, 785 + .pstate_latency_us = 11.72, 786 + .sr_exit_time_us = 13.90, 787 + .sr_enter_plus_exit_time_us = 14.80, 788 + .valid = true, 789 + }, 790 + { 791 + .wm_inst = WM_D, 792 + .wm_type = WM_TYPE_PSTATE_CHG, 793 + .pstate_latency_us = 11.72, 794 + .sr_exit_time_us = 13.90, 795 + .sr_enter_plus_exit_time_us = 14.80, 796 + .valid = true, 797 + }, 798 + } 799 + }; 800 + 801 static struct wm_table lpddr4_wm_table_rn = { 802 .entries = { 803 { ··· 945 } else { 946 if (is_green_sardine) 947 rn_bw_params.wm_table = ddr4_wm_table_gs; 948 + else { 949 + if (ctx->dc->config.is_single_rank_dimm) 950 + rn_bw_params.wm_table = ddr4_1R_wm_table_rn; 951 + else 952 + rn_bw_params.wm_table = ddr4_wm_table_rn; 953 + } 954 } 955 /* Saved clocks configured at boot for debug purposes */ 956 rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info); ··· 964 if (status == PP_SMU_RESULT_OK && 965 ctx->dc_bios && ctx->dc_bios->integrated_info) { 966 rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info); 967 + /* treat memory config as single channel if memory is asymmetrics. */ 968 + if (ctx->dc->config.is_asymmetric_memory) 969 + clk_mgr->base.bw_params->num_channels = 1; 970 } 971 } 972
+2 -2
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
··· 432 clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); 433 } 434 435 - static bool dcn3_is_smu_prsent(struct clk_mgr *clk_mgr_base) 436 { 437 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 438 return clk_mgr->smu_present; ··· 500 .are_clock_states_equal = dcn3_are_clock_states_equal, 501 .enable_pme_wa = dcn3_enable_pme_wa, 502 .notify_link_rate_change = dcn30_notify_link_rate_change, 503 - .is_smu_present = dcn3_is_smu_prsent 504 }; 505 506 static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
··· 432 clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); 433 } 434 435 + static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base) 436 { 437 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 438 return clk_mgr->smu_present; ··· 500 .are_clock_states_equal = dcn3_are_clock_states_equal, 501 .enable_pme_wa = dcn3_enable_pme_wa, 502 .notify_link_rate_change = dcn30_notify_link_rate_change, 503 + .is_smu_present = dcn3_is_smu_present 504 }; 505 506 static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
+32 -5
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 55 #include "link_encoder.h" 56 #include "link_enc_cfg.h" 57 58 #include "dc_link_ddc.h" 59 #include "dm_helpers.h" 60 #include "mem_input.h" ··· 1323 struct dc_link *link = sink->link; 1324 unsigned int i, enc_inst, tg_inst = 0; 1325 1326 - // Seamless port only support single DP and EDP so far 1327 - if ((sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT && 1328 - sink->sink_signal != SIGNAL_TYPE_EDP) || 1329 - sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 1330 return false; 1331 1332 /* Check for enabled DIG to identify enabled display */ 1333 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) ··· 1399 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) 1400 return false; 1401 1402 if (dc_is_dp_signal(link->connector_signal)) { 1403 unsigned int pix_clk_100hz; 1404 ··· 1430 } 1431 1432 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1433 return false; 1434 } 1435 ··· 2687 plane_state->triplebuffer_flips = true; 2688 } 2689 } 2690 } 2691 } 2692 ··· 2834 2835 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 2836 !pipe_ctx->stream || pipe_ctx->stream != stream || 2837 - !pipe_ctx->plane_state->update_flags.bits.addr_update) 2838 continue; 2839 2840 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) ··· 3217 return; 3218 } 3219 } 3220 } 3221 3222 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
··· 55 #include "link_encoder.h" 56 #include "link_enc_cfg.h" 57 58 + #include "dc_link.h" 59 #include "dc_link_ddc.h" 60 #include "dm_helpers.h" 61 #include "mem_input.h" ··· 1322 struct dc_link *link = sink->link; 1323 unsigned int i, enc_inst, tg_inst = 0; 1324 1325 + /* Support seamless boot on EDP displays only */ 1326 + if (sink->sink_signal != SIGNAL_TYPE_EDP) { 1327 return false; 1328 + } 1329 1330 /* Check for enabled DIG to identify enabled display */ 1331 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) ··· 1399 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) 1400 return false; 1401 1402 + /* block DSC for now, as VBIOS does not currently support DSC timings */ 1403 + if (crtc_timing->flags.DSC) 1404 + return false; 1405 + 1406 if (dc_is_dp_signal(link->connector_signal)) { 1407 unsigned int pix_clk_100hz; 1408 ··· 1426 } 1427 1428 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1429 + return false; 1430 + } 1431 + 1432 + if (is_edp_ilr_optimization_required(link, crtc_timing)) { 1433 + DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); 1434 return false; 1435 } 1436 ··· 2678 plane_state->triplebuffer_flips = true; 2679 } 2680 } 2681 + if (update_type == UPDATE_TYPE_FULL) { 2682 + /* force vsync flip when reconfiguring pipes to prevent underflow */ 2683 + plane_state->flip_immediate = false; 2684 + } 2685 } 2686 } 2687 ··· 2821 2822 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 2823 !pipe_ctx->stream || pipe_ctx->stream != stream || 2824 + !pipe_ctx->plane_state->update_flags.bits.addr_update || 2825 + pipe_ctx->plane_state->skip_manual_trigger) 2826 continue; 2827 2828 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) ··· 3203 return; 3204 } 3205 } 3206 + } 3207 + 3208 + void dc_wait_for_vblank(struct dc *dc, struct dc_stream_state *stream) 3209 + { 3210 + int i; 3211 + 3212 + for (i = 0; i < dc->res_pool->pipe_count; i++) 3213 + if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 3214 + struct timing_generator *tg = 3215 + dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; 3216 + tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK); 3217 + break; 3218 + } 3219 } 3220 3221 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
+25 -62
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 1679 static void enable_stream_features(struct pipe_ctx *pipe_ctx) 1680 { 1681 struct dc_stream_state *stream = pipe_ctx->stream; 1682 - struct dc_link *link = stream->link; 1683 - union down_spread_ctrl old_downspread; 1684 - union down_spread_ctrl new_downspread; 1685 1686 - core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL, 1687 - &old_downspread.raw, sizeof(old_downspread)); 1688 1689 - new_downspread.raw = old_downspread.raw; 1690 1691 - new_downspread.bits.IGNORE_MSA_TIMING_PARAM = 1692 - (stream->ignore_msa_timing_param) ? 1 : 0; 1693 1694 - if (new_downspread.raw != old_downspread.raw) { 1695 - core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, 1696 - &new_downspread.raw, sizeof(new_downspread)); 1697 } 1698 } 1699 ··· 2819 2820 psr_context->psr_level.u32all = 0; 2821 2822 - #if defined(CONFIG_DRM_AMD_DC_DCN) 2823 /*skip power down the single pipe since it blocks the cstate*/ 2824 - if ((link->ctx->asic_id.chip_family == FAMILY_RV) && 2825 - ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev)) 2826 psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; 2827 - #endif 2828 2829 /* SMU will perform additional powerdown sequence. 2830 * For unsupported ASICs, set psr_level flag to skip PSR ··· 3142 return DC_OK; 3143 } 3144 3145 - enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link) 3146 - { 3147 - int i; 3148 - struct pipe_ctx *pipe_ctx; 3149 - 3150 - // Clear all of MST payload then reallocate 3151 - for (i = 0; i < MAX_PIPES; i++) { 3152 - pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; 3153 - 3154 - /* driver enable split pipe for external monitors 3155 - * we have to check pipe_ctx is split pipe or not 3156 - * If it's split pipe, driver using top pipe to 3157 - * reaallocate. 3158 - */ 3159 - if (!pipe_ctx || pipe_ctx->top_pipe) 3160 - continue; 3161 - 3162 - if (pipe_ctx->stream && pipe_ctx->stream->link == link && 3163 - pipe_ctx->stream->dpms_off == false && 3164 - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 3165 - deallocate_mst_payload(pipe_ctx); 3166 - } 3167 - } 3168 - 3169 - for (i = 0; i < MAX_PIPES; i++) { 3170 - pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; 3171 - 3172 - if (!pipe_ctx || pipe_ctx->top_pipe) 3173 - continue; 3174 - 3175 - if (pipe_ctx->stream && pipe_ctx->stream->link == link && 3176 - pipe_ctx->stream->dpms_off == false && 3177 - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 3178 - /* enable/disable PHY will clear connection between BE and FE 3179 - * need to restore it. 3180 - */ 3181 - link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, 3182 - pipe_ctx->stream_res.stream_enc->id, true); 3183 - dc_link_allocate_mst_payload(pipe_ctx); 3184 - } 3185 - } 3186 - 3187 - return DC_OK; 3188 - } 3189 3190 #if defined(CONFIG_DRM_AMD_DC_HDCP) 3191 static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) ··· 3255 3256 /* eDP lit up by bios already, no need to enable again. */ 3257 if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && 3258 - apply_edp_fast_boot_optimization) { 3259 pipe_ctx->stream->dpms_off = false; 3260 #if defined(CONFIG_DRM_AMD_DC_HDCP) 3261 update_psp_stream_config(pipe_ctx, false); ··· 3318 /* Set DPS PPS SDP (AKA "info frames") */ 3319 if (pipe_ctx->stream->timing.flags.DSC) { 3320 if (dc_is_dp_signal(pipe_ctx->stream->signal) || 3321 - dc_is_virtual_signal(pipe_ctx->stream->signal)) 3322 dp_set_dsc_pps_sdp(pipe_ctx, true); 3323 } 3324 3325 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) ··· 3716 if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && 3717 link->local_sink && 3718 link->local_sink->edid_caps.panel_patch.disable_fec) || 3719 - link->connector_signal == SIGNAL_TYPE_EDP) // Disable FEC for eDP 3720 is_fec_disable = true; 3721 3722 if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)
··· 1679 static void enable_stream_features(struct pipe_ctx *pipe_ctx) 1680 { 1681 struct dc_stream_state *stream = pipe_ctx->stream; 1682 1683 + if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) { 1684 + struct dc_link *link = stream->link; 1685 + union down_spread_ctrl old_downspread; 1686 + union down_spread_ctrl new_downspread; 1687 1688 + core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL, 1689 + &old_downspread.raw, sizeof(old_downspread)); 1690 1691 + new_downspread.raw = old_downspread.raw; 1692 1693 + new_downspread.bits.IGNORE_MSA_TIMING_PARAM = 1694 + (stream->ignore_msa_timing_param) ? 1 : 0; 1695 + 1696 + if (new_downspread.raw != old_downspread.raw) { 1697 + core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, 1698 + &new_downspread.raw, sizeof(new_downspread)); 1699 + } 1700 + 1701 + } else { 1702 + dm_helpers_mst_enable_stream_features(stream); 1703 } 1704 } 1705 ··· 2813 2814 psr_context->psr_level.u32all = 0; 2815 2816 /*skip power down the single pipe since it blocks the cstate*/ 2817 + if (link->ctx->asic_id.chip_family >= FAMILY_RV) 2818 psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; 2819 2820 /* SMU will perform additional powerdown sequence. 2821 * For unsupported ASICs, set psr_level flag to skip PSR ··· 3139 return DC_OK; 3140 } 3141 3142 3143 #if defined(CONFIG_DRM_AMD_DC_HDCP) 3144 static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) ··· 3296 3297 /* eDP lit up by bios already, no need to enable again. */ 3298 if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && 3299 + apply_edp_fast_boot_optimization && 3300 + !pipe_ctx->stream->timing.flags.DSC) { 3301 pipe_ctx->stream->dpms_off = false; 3302 #if defined(CONFIG_DRM_AMD_DC_HDCP) 3303 update_psp_stream_config(pipe_ctx, false); ··· 3358 /* Set DPS PPS SDP (AKA "info frames") */ 3359 if (pipe_ctx->stream->timing.flags.DSC) { 3360 if (dc_is_dp_signal(pipe_ctx->stream->signal) || 3361 + dc_is_virtual_signal(pipe_ctx->stream->signal)) { 3362 + dp_set_dsc_on_rx(pipe_ctx, true); 3363 dp_set_dsc_pps_sdp(pipe_ctx, true); 3364 + } 3365 } 3366 3367 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) ··· 3754 if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && 3755 link->local_sink && 3756 link->local_sink->edid_caps.panel_patch.disable_fec) || 3757 + (link->connector_signal == SIGNAL_TYPE_EDP && 3758 + link->dc->debug.force_enable_edp_fec == false)) // Disable FEC for eDP 3759 is_fec_disable = true; 3760 3761 if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)
+19 -14
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 2506 hdmi_info.bits.ITC = itc_value; 2507 } 2508 2509 /* TODO : We should handle YCC quantization */ 2510 /* but we do not have matrix calculation */ 2511 - if (stream->qs_bit == 1 && 2512 - stream->qy_bit == 1) { 2513 if (color_space == COLOR_SPACE_SRGB || 2514 - color_space == COLOR_SPACE_2020_RGB_FULLRANGE) { 2515 - hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE; 2516 hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; 2517 - } else if (color_space == COLOR_SPACE_SRGB_LIMITED || 2518 - color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) { 2519 - hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE; 2520 hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; 2521 - } else { 2522 - hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE; 2523 hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; 2524 - } 2525 - } else { 2526 - hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE; 2527 - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; 2528 - } 2529 2530 ///VIC 2531 format = stream->timing.timing_3d_format;
··· 2506 hdmi_info.bits.ITC = itc_value; 2507 } 2508 2509 + if (stream->qs_bit == 1) { 2510 + if (color_space == COLOR_SPACE_SRGB || 2511 + color_space == COLOR_SPACE_2020_RGB_FULLRANGE) 2512 + hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE; 2513 + else if (color_space == COLOR_SPACE_SRGB_LIMITED || 2514 + color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) 2515 + hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE; 2516 + else 2517 + hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE; 2518 + } else 2519 + hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE; 2520 + 2521 /* TODO : We should handle YCC quantization */ 2522 /* but we do not have matrix calculation */ 2523 + if (stream->qy_bit == 1) { 2524 if (color_space == COLOR_SPACE_SRGB || 2525 + color_space == COLOR_SPACE_2020_RGB_FULLRANGE) 2526 hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; 2527 + else if (color_space == COLOR_SPACE_SRGB_LIMITED || 2528 + color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) 2529 hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; 2530 + else 2531 hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; 2532 + } else 2533 + hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; 2534 2535 ///VIC 2536 format = stream->timing.timing_3d_format;
+11 -2
drivers/gpu/drm/amd/display/dc/dc.h
··· 45 /* forward declaration */ 46 struct aux_payload; 47 48 - #define DC_VER "3.2.130" 49 50 #define MAX_SURFACES 3 51 #define MAX_PLANES 6 ··· 293 bool gpu_vm_support; 294 bool disable_disp_pll_sharing; 295 bool fbc_support; 296 - bool optimize_edp_link_rate; 297 bool disable_fractional_pwm; 298 bool allow_seamless_boot_optimization; 299 bool power_down_display_on_boot; ··· 308 #endif 309 uint64_t vblank_alignment_dto_params; 310 uint8_t vblank_alignment_max_frame_time_diff; 311 }; 312 313 enum visual_confirm { ··· 542 543 /* Enable dmub aux for legacy ddc */ 544 bool enable_dmub_aux_for_legacy_ddc; 545 }; 546 547 struct dc_debug_data { ··· 719 void dc_deinit_callbacks(struct dc *dc); 720 void dc_destroy(struct dc **dc); 721 722 /******************************************************************************* 723 * Surface Interfaces 724 ******************************************************************************/ ··· 907 908 union surface_update_flags update_flags; 909 bool flip_int_enabled; 910 /* private to DC core */ 911 struct dc_plane_status status; 912 struct dc_context *ctx;
··· 45 /* forward declaration */ 46 struct aux_payload; 47 48 + #define DC_VER "3.2.132" 49 50 #define MAX_SURFACES 3 51 #define MAX_PLANES 6 ··· 293 bool gpu_vm_support; 294 bool disable_disp_pll_sharing; 295 bool fbc_support; 296 bool disable_fractional_pwm; 297 bool allow_seamless_boot_optimization; 298 bool power_down_display_on_boot; ··· 309 #endif 310 uint64_t vblank_alignment_dto_params; 311 uint8_t vblank_alignment_max_frame_time_diff; 312 + bool is_asymmetric_memory; 313 + bool is_single_rank_dimm; 314 }; 315 316 enum visual_confirm { ··· 541 542 /* Enable dmub aux for legacy ddc */ 543 bool enable_dmub_aux_for_legacy_ddc; 544 + bool optimize_edp_link_rate; /* eDP ILR */ 545 + /* force enable edp FEC */ 546 + bool force_enable_edp_fec; 547 + /* FEC/PSR1 sequence enable delay in 100us */ 548 + uint8_t fec_enable_delay_in100us; 549 }; 550 551 struct dc_debug_data { ··· 713 void dc_deinit_callbacks(struct dc *dc); 714 void dc_destroy(struct dc **dc); 715 716 + void dc_wait_for_vblank(struct dc *dc, struct dc_stream_state *stream); 717 /******************************************************************************* 718 * Surface Interfaces 719 ******************************************************************************/ ··· 900 901 union surface_update_flags update_flags; 902 bool flip_int_enabled; 903 + bool skip_manual_trigger; 904 + 905 /* private to DC core */ 906 struct dc_plane_status status; 907 struct dc_context *ctx;
-1
drivers/gpu/drm/amd/display/dc/dc_link.h
··· 276 bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); 277 bool dc_link_get_hpd_state(struct dc_link *dc_link); 278 enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); 279 - enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link); 280 281 /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). 282 * Return:
··· 276 bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); 277 bool dc_link_get_hpd_state(struct dc_link *dc_link); 278 enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); 279 280 /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). 281 * Return:
-1
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 238 bool apply_seamless_boot_optimization; 239 240 uint32_t stream_id; 241 - bool is_dsc_enabled; 242 243 struct test_pattern test_pattern; 244 union stream_update_flags update_flags;
··· 238 bool apply_seamless_boot_optimization; 239 240 uint32_t stream_id; 241 242 struct test_pattern test_pattern; 243 union stream_update_flags update_flags;
+2
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
··· 284 copy_settings_data->debug.u32All = 0; 285 copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR; 286 copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1; 287 288 dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 289 dc_dmub_srv_cmd_execute(dc->dmub_srv);
··· 284 copy_settings_data->debug.u32All = 0; 285 copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR; 286 copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1; 287 + copy_settings_data->fec_enable_status = (link->fec_state == dc_link_fec_enabled); 288 + copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us; 289 290 dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 291 dc_dmub_srv_cmd_execute(dc->dmub_srv);
+8 -2
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
··· 48 #include "stream_encoder.h" 49 #include "link_encoder.h" 50 #include "link_hwss.h" 51 #include "clock_source.h" 52 #include "clk_mgr.h" 53 #include "abm.h" ··· 1695 bool can_apply_edp_fast_boot = false; 1696 bool can_apply_seamless_boot = false; 1697 bool keep_edp_vdd_on = false; 1698 1699 get_edp_links_with_sink(dc, edp_links_with_sink, &edp_with_sink_num); 1700 get_edp_links(dc, edp_links, &edp_num); ··· 1717 /* Set optimization flag on eDP stream*/ 1718 if (edp_stream_num && edp_link->link_status.link_active) { 1719 edp_stream = edp_streams[0]; 1720 - edp_stream->apply_edp_fast_boot_optimization = true; 1721 - can_apply_edp_fast_boot = true; 1722 break; 1723 } 1724 }
··· 48 #include "stream_encoder.h" 49 #include "link_encoder.h" 50 #include "link_hwss.h" 51 + #include "dc_link_dp.h" 52 #include "clock_source.h" 53 #include "clk_mgr.h" 54 #include "abm.h" ··· 1694 bool can_apply_edp_fast_boot = false; 1695 bool can_apply_seamless_boot = false; 1696 bool keep_edp_vdd_on = false; 1697 + DC_LOGGER_INIT(); 1698 + 1699 1700 get_edp_links_with_sink(dc, edp_links_with_sink, &edp_with_sink_num); 1701 get_edp_links(dc, edp_links, &edp_num); ··· 1714 /* Set optimization flag on eDP stream*/ 1715 if (edp_stream_num && edp_link->link_status.link_active) { 1716 edp_stream = edp_streams[0]; 1717 + can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing); 1718 + edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot; 1719 + if (can_apply_edp_fast_boot) 1720 + DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n"); 1721 + 1722 break; 1723 } 1724 }
+9 -6
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
··· 1 /* 2 - * Copyright 2012-17 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), ··· 181 else 182 Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0 183 */ 184 - if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width 185 - + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) { 186 - value = 1; 187 - } else 188 - value = 0; 189 REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value); 190 } 191
··· 1 /* 2 + * Copyright 2012-2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), ··· 181 else 182 Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0 183 */ 184 + if (pipe_dest->htotal != 0) { 185 + if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width 186 + + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) { 187 + value = 1; 188 + } else 189 + value = 0; 190 + } 191 + 192 REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value); 193 } 194
+4 -3
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
··· 2201 pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2; 2202 break; 2203 case PIXEL_ENCODING_YCBCR422: 2204 - if (true) /* todo */ 2205 - pipes[pipe_cnt].dout.output_format = dm_s422; 2206 - else 2207 pipes[pipe_cnt].dout.output_format = dm_n422; 2208 pipes[pipe_cnt].dout.output_bpp = output_bpc * 2; 2209 break; 2210 default:
··· 2201 pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2; 2202 break; 2203 case PIXEL_ENCODING_YCBCR422: 2204 + if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC && 2205 + !res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.ycbcr422_simple) 2206 pipes[pipe_cnt].dout.output_format = dm_n422; 2207 + else 2208 + pipes[pipe_cnt].dout.output_format = dm_s422; 2209 pipes[pipe_cnt].dout.output_bpp = output_bpc * 2; 2210 break; 2211 default:
+2
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
··· 218 cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; 219 cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp; 220 cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16; 221 cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); 222 223 dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
··· 218 cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT; 219 cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp; 220 cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16; 221 + cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_SET_BACKLIGHT_VERSION_1; 222 + cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_cntl->inst); 223 cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); 224 225 dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+2
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
··· 99 .set_pipe = dcn21_set_pipe, 100 .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, 101 .get_dcc_en_bits = dcn10_get_dcc_en_bits, 102 }; 103 104 static const struct hwseq_private_funcs dcn301_private_funcs = {
··· 99 .set_pipe = dcn21_set_pipe, 100 .set_disp_pattern_generator = dcn30_set_disp_pattern_generator, 101 .get_dcc_en_bits = dcn10_get_dcc_en_bits, 102 + .optimize_pwr_state = dcn21_optimize_pwr_state, 103 + .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state, 104 }; 105 106 static const struct hwseq_private_funcs dcn301_private_funcs = {
+2
drivers/gpu/drm/amd/display/dc/dm_helpers.h
··· 147 bool dm_helpers_is_dp_sink_present( 148 struct dc_link *link); 149 150 enum dc_edid_status dm_helpers_read_local_edid( 151 struct dc_context *ctx, 152 struct dc_link *link,
··· 147 bool dm_helpers_is_dp_sink_present( 148 struct dc_link *link); 149 150 + void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream); 151 + 152 enum dc_edid_status dm_helpers_read_local_edid( 153 struct dc_context *ctx, 154 struct dc_link *link,
+29 -2
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
··· 47 48 /* Firmware versioning. */ 49 #ifdef DMUB_EXPOSE_VERSION 50 - #define DMUB_FW_VERSION_GIT_HASH 0x7f2db1846 51 #define DMUB_FW_VERSION_MAJOR 0 52 #define DMUB_FW_VERSION_MINOR 0 53 - #define DMUB_FW_VERSION_REVISION 59 54 #define DMUB_FW_VERSION_TEST 0 55 #define DMUB_FW_VERSION_VBIOS 0 56 #define DMUB_FW_VERSION_HOTFIX 0 ··· 119 120 /* Trace buffer offset for entry */ 121 #define TRACE_BUFFER_ENTRY_OFFSET 16 122 123 /** 124 * Physical framebuffer address location, 64-bit. ··· 1635 * Requested backlight level from user. 1636 */ 1637 uint32_t backlight_user_level; 1638 }; 1639 1640 /**
··· 47 48 /* Firmware versioning. */ 49 #ifdef DMUB_EXPOSE_VERSION 50 + #define DMUB_FW_VERSION_GIT_HASH 0x23db9b126 51 #define DMUB_FW_VERSION_MAJOR 0 52 #define DMUB_FW_VERSION_MINOR 0 53 + #define DMUB_FW_VERSION_REVISION 62 54 #define DMUB_FW_VERSION_TEST 0 55 #define DMUB_FW_VERSION_VBIOS 0 56 #define DMUB_FW_VERSION_HOTFIX 0 ··· 119 120 /* Trace buffer offset for entry */ 121 #define TRACE_BUFFER_ENTRY_OFFSET 16 122 + 123 + /** 124 + * ABM backlight control version legacy 125 + */ 126 + #define DMUB_CMD_ABM_SET_BACKLIGHT_VERSION_UNKNOWN 0x0 127 + 128 + /** 129 + * ABM backlight control version with multi edp support 130 + */ 131 + #define DMUB_CMD_ABM_SET_BACKLIGHT_VERSION_1 0x1 132 133 /** 134 * Physical framebuffer address location, 64-bit. ··· 1625 * Requested backlight level from user. 1626 */ 1627 uint32_t backlight_user_level; 1628 + 1629 + /** 1630 + * Backlight data version. 1631 + */ 1632 + uint8_t version; 1633 + 1634 + /** 1635 + * Panel Control HW instance mask. 1636 + * Bit 0 is Panel Control HW instance 0. 1637 + * Bit 1 is Panel Control HW instance 1. 1638 + */ 1639 + uint8_t panel_mask; 1640 + 1641 + /** 1642 + * Explicit padding to 4 byte boundary. 1643 + */ 1644 + uint8_t pad[2]; 1645 }; 1646 1647 /**
-2
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
··· 427 event_ctx->unexpected_event = 1; 428 goto out; 429 } 430 - if (!mod_hdcp_is_link_encryption_enabled(hdcp)) 431 - goto out; 432 433 if (status == MOD_HDCP_STATUS_SUCCESS) 434 mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
··· 427 event_ctx->unexpected_event = 1; 428 goto out; 429 } 430 431 if (status == MOD_HDCP_STATUS_SUCCESS) 432 mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
-2
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
··· 564 event_ctx->unexpected_event = 1; 565 goto out; 566 } 567 - if (!mod_hdcp_is_link_encryption_enabled(hdcp)) 568 - goto out; 569 570 process_rxstatus(hdcp, event_ctx, input, &status); 571
··· 564 event_ctx->unexpected_event = 1; 565 goto out; 566 } 567 568 process_rxstatus(hdcp, event_ctx, input, &status); 569
+2
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
··· 791 TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) { 792 hdcp->connection.is_hdcp2_revoked = 1; 793 status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED; 794 } 795 } 796 mutex_unlock(&psp->hdcp_context.mutex);
··· 791 TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) { 792 hdcp->connection.is_hdcp2_revoked = 1; 793 status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED; 794 + } else { 795 + status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE; 796 } 797 } 798 mutex_unlock(&psp->hdcp_context.mutex);
+16
drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
··· 617 #define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x30000000L 618 #define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0xC0000000L 619 620 // addressBlock: gc_gfxudec 621 //GRBM_GFX_INDEX 622 #define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
··· 617 #define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x30000000L 618 #define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0xC0000000L 619 620 + //GCEA_ERR_STATUS 621 + #define GCEA_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0 622 + #define GCEA_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4 623 + #define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8 624 + #define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa 625 + #define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb 626 + #define GCEA_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc 627 + #define GCEA_ERR_STATUS__FUE_FLAG__SHIFT 0xd 628 + #define GCEA_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL 629 + #define GCEA_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L 630 + #define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L 631 + #define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L 632 + #define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L 633 + #define GCEA_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L 634 + #define GCEA_ERR_STATUS__FUE_FLAG_MASK 0x00002000L 635 + 636 // addressBlock: gc_gfxudec 637 //GRBM_GFX_INDEX 638 #define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
+41
drivers/gpu/drm/amd/include/atomfirmware.h
··· 3336 SMU11_SYSPLL3_1_LCLK_ID = 2, // LCLK 3337 }; 3338 3339 struct atom_get_smu_clock_info_output_parameters_v3_1 3340 { 3341 union {
··· 3336 SMU11_SYSPLL3_1_LCLK_ID = 2, // LCLK 3337 }; 3338 3339 + enum atom_smu12_syspll_id { 3340 + SMU12_SYSPLL0_ID = 0, 3341 + SMU12_SYSPLL1_ID = 1, 3342 + SMU12_SYSPLL2_ID = 2, 3343 + SMU12_SYSPLL3_0_ID = 3, 3344 + SMU12_SYSPLL3_1_ID = 4, 3345 + }; 3346 + 3347 + enum atom_smu12_syspll0_clock_id { 3348 + SMU12_SYSPLL0_SMNCLK_ID = 0, // SOCCLK 3349 + SMU12_SYSPLL0_SOCCLK_ID = 1, // SOCCLK 3350 + SMU12_SYSPLL0_MP0CLK_ID = 2, // MP0CLK 3351 + SMU12_SYSPLL0_MP1CLK_ID = 3, // MP1CLK 3352 + SMU12_SYSPLL0_MP2CLK_ID = 4, // MP2CLK 3353 + SMU12_SYSPLL0_VCLK_ID = 5, // VCLK 3354 + SMU12_SYSPLL0_LCLK_ID = 6, // LCLK 3355 + SMU12_SYSPLL0_DCLK_ID = 7, // DCLK 3356 + SMU12_SYSPLL0_ACLK_ID = 8, // ACLK 3357 + SMU12_SYSPLL0_ISPCLK_ID = 9, // ISPCLK 3358 + SMU12_SYSPLL0_SHUBCLK_ID = 10, // SHUBCLK 3359 + }; 3360 + 3361 + enum atom_smu12_syspll1_clock_id { 3362 + SMU12_SYSPLL1_DISPCLK_ID = 0, // DISPCLK 3363 + SMU12_SYSPLL1_DPPCLK_ID = 1, // DPPCLK 3364 + SMU12_SYSPLL1_DPREFCLK_ID = 2, // DPREFCLK 3365 + SMU12_SYSPLL1_DCFCLK_ID = 3, // DCFCLK 3366 + }; 3367 + 3368 + enum atom_smu12_syspll2_clock_id { 3369 + SMU12_SYSPLL2_Pre_GFXCLK_ID = 0, // Pre_GFXCLK 3370 + }; 3371 + 3372 + enum atom_smu12_syspll3_0_clock_id { 3373 + SMU12_SYSPLL3_0_FCLK_ID = 0, // FCLK 3374 + }; 3375 + 3376 + enum atom_smu12_syspll3_1_clock_id { 3377 + SMU12_SYSPLL3_1_UMCCLK_ID = 0, // UMCCLK 3378 + }; 3379 + 3380 struct atom_get_smu_clock_info_output_parameters_v3_1 3381 { 3382 union {
+3 -1
drivers/gpu/drm/amd/pm/amdgpu_pm.c
··· 1844 if (asic_type < CHIP_VEGA10) 1845 *states = ATTR_STATE_UNSUPPORTED; 1846 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { 1847 - if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS) 1848 *states = ATTR_STATE_UNSUPPORTED; 1849 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { 1850 if (asic_type < CHIP_VEGA20)
··· 1844 if (asic_type < CHIP_VEGA10) 1845 *states = ATTR_STATE_UNSUPPORTED; 1846 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { 1847 + if (asic_type < CHIP_VEGA10 || 1848 + asic_type == CHIP_ARCTURUS || 1849 + asic_type == CHIP_ALDEBARAN) 1850 *states = ATTR_STATE_UNSUPPORTED; 1851 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { 1852 if (asic_type < CHIP_VEGA20)
+39 -1
drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
··· 26 // *** IMPORTANT *** 27 // SMU TEAM: Always increment the interface version if 28 // any structure is changed in this file 29 - #define SMU13_DRIVER_IF_VERSION 2 30 31 typedef struct { 32 int32_t value; ··· 191 uint16_t SocTemperature; //[centi-Celsius] 192 uint16_t EdgeTemperature; 193 uint16_t ThrottlerStatus; 194 } SmuMetrics_t; 195 196
··· 26 // *** IMPORTANT *** 27 // SMU TEAM: Always increment the interface version if 28 // any structure is changed in this file 29 + #define SMU13_DRIVER_IF_VERSION 3 30 31 typedef struct { 32 int32_t value; ··· 191 uint16_t SocTemperature; //[centi-Celsius] 192 uint16_t EdgeTemperature; 193 uint16_t ThrottlerStatus; 194 + } SmuMetrics_legacy_t; 195 + 196 + typedef struct { 197 + uint16_t GfxclkFrequency; //[MHz] 198 + uint16_t SocclkFrequency; //[MHz] 199 + uint16_t VclkFrequency; //[MHz] 200 + uint16_t DclkFrequency; //[MHz] 201 + uint16_t MemclkFrequency; //[MHz] 202 + uint16_t spare; 203 + 204 + uint16_t GfxActivity; //[centi] 205 + uint16_t UvdActivity; //[centi] 206 + uint16_t C0Residency[4]; //percentage 207 + 208 + uint16_t Voltage[3]; //[mV] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX 209 + uint16_t Current[3]; //[mA] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX 210 + uint16_t Power[3]; //[mW] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX 211 + uint16_t CurrentSocketPower; //[mW] 212 + 213 + //3rd party tools in Windows need info in the case of APUs 214 + uint16_t CoreFrequency[4]; //[MHz] 215 + uint16_t CorePower[4]; //[mW] 216 + uint16_t CoreTemperature[4]; //[centi-Celsius] 217 + uint16_t L3Frequency[1]; //[MHz] 218 + uint16_t L3Temperature[1]; //[centi-Celsius] 219 + 220 + uint16_t GfxTemperature; //[centi-Celsius] 221 + uint16_t SocTemperature; //[centi-Celsius] 222 + uint16_t EdgeTemperature; 223 + uint16_t ThrottlerStatus; 224 + } SmuMetricsTable_t; 225 + 226 + typedef struct { 227 + SmuMetricsTable_t Current; 228 + SmuMetricsTable_t Average; 229 + //uint32_t AccCnt; 230 + uint32_t SampleStartTime; 231 + uint32_t SampleStopTime; 232 } SmuMetrics_t; 233 234
+1 -1
drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
··· 32 #define SMU11_DRIVER_IF_VERSION_NV14 0x38 33 #define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D 34 #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE 35 - #define SMU11_DRIVER_IF_VERSION_VANGOGH 0x02 36 #define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF 37 38 /* MP Apertures */
··· 32 #define SMU11_DRIVER_IF_VERSION_NV14 0x38 33 #define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D 34 #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE 35 + #define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03 36 #define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF 37 38 /* MP Apertures */
+2
drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
··· 60 61 int smu_v12_0_set_driver_table_location(struct smu_context *smu); 62 63 #endif 64 #endif
··· 60 61 int smu_v12_0_set_driver_table_location(struct smu_context *smu); 62 63 + int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu); 64 + 65 #endif 66 #endif
+1
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 2221 dev_err(smu->adev->dev, 2222 "New power limit (%d) is over the max allowed %d\n", 2223 limit, smu->max_power_limit); 2224 goto out; 2225 } 2226
··· 2221 dev_err(smu->adev->dev, 2222 "New power limit (%d) is over the max allowed %d\n", 2223 limit, smu->max_power_limit); 2224 + ret = -EINVAL; 2225 goto out; 2226 } 2227
+360 -54
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
··· 194 { 195 struct smu_table_context *smu_table = &smu->smu_table; 196 struct smu_table *tables = smu_table->tables; 197 198 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 199 - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 200 - SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 201 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 202 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 203 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); ··· 212 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 213 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), 214 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 215 - smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 216 if (!smu_table->metrics_table) 217 goto err0_out; 218 smu_table->metrics_time = 0; ··· 251 return -ENOMEM; 252 } 253 254 - static int vangogh_get_smu_metrics_data(struct smu_context *smu, 255 MetricsMember_t member, 256 uint32_t *value) 257 { 258 struct smu_table_context *smu_table = &smu->smu_table; 259 - 260 - SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 261 int ret = 0; 262 263 mutex_lock(&smu->metrics_lock); ··· 270 } 271 272 switch (member) { 273 - case METRICS_AVERAGE_GFXCLK: 274 *value = metrics->GfxclkFrequency; 275 break; 276 case METRICS_AVERAGE_SOCCLK: ··· 282 case METRICS_AVERAGE_DCLK: 283 *value = metrics->DclkFrequency; 284 break; 285 - case METRICS_AVERAGE_UCLK: 286 *value = metrics->MemclkFrequency; 287 break; 288 case METRICS_AVERAGE_GFXACTIVITY: ··· 322 } 323 324 mutex_unlock(&smu->metrics_lock); 325 326 return ret; 327 } ··· 559 return 0; 560 } 561 562 - static int vangogh_print_clk_levels(struct smu_context *smu, 563 enum smu_clk_type clk_type, char *buf) 564 { 565 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 566 - SmuMetrics_t metrics; 567 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 568 int i, size = 0, ret = 0; 569 uint32_t cur_value = 0, value = 0, count = 0; ··· 656 } 657 658 return size; 659 } 660 661 static int vangogh_get_profiling_clk_mask(struct smu_context *smu, ··· 1092 return ret; 1093 break; 1094 case SMU_FCLK: 1095 - case SMU_MCLK: 1096 ret = smu_cmn_send_smc_msg_with_param(smu, 1097 SMU_MSG_SetHardMinFclkByFreq, 1098 min, NULL); ··· 1179 if (ret) 1180 return ret; 1181 break; 1182 - case SMU_MCLK: 1183 case SMU_FCLK: 1184 ret = vangogh_get_dpm_clk_limited(smu, 1185 clk_type, soft_min_level, &min_freq); ··· 1265 SMU_SOCCLK, 1266 SMU_VCLK, 1267 SMU_DCLK, 1268 - SMU_MCLK, 1269 SMU_FCLK, 1270 }; 1271 ··· 1293 enum smu_clk_type clk_type; 1294 uint32_t feature; 1295 } clk_feature_map[] = { 1296 - {SMU_MCLK, SMU_FEATURE_DPM_FCLK_BIT}, 1297 {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT}, 1298 {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, 1299 {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT}, ··· 1424 if (ret) 1425 return ret; 1426 1427 - vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); 1428 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1429 vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); 1430 vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); ··· 1463 if (ret) 1464 return ret; 1465 1466 - vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); 1467 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1468 break; 1469 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: ··· 1504 mutex_lock(&smu->sensor_lock); 1505 switch (sensor) { 1506 case AMDGPU_PP_SENSOR_GPU_LOAD: 1507 - ret = vangogh_get_smu_metrics_data(smu, 1508 METRICS_AVERAGE_GFXACTIVITY, 1509 (uint32_t *)data); 1510 *size = 4; 1511 break; 1512 case AMDGPU_PP_SENSOR_GPU_POWER: 1513 - ret = vangogh_get_smu_metrics_data(smu, 1514 METRICS_AVERAGE_SOCKETPOWER, 1515 (uint32_t *)data); 1516 *size = 4; 1517 break; 1518 case AMDGPU_PP_SENSOR_EDGE_TEMP: 1519 - ret = vangogh_get_smu_metrics_data(smu, 1520 METRICS_TEMPERATURE_EDGE, 1521 (uint32_t *)data); 1522 *size = 4; 1523 break; 1524 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 1525 - ret = vangogh_get_smu_metrics_data(smu, 1526 METRICS_TEMPERATURE_HOTSPOT, 1527 (uint32_t *)data); 1528 *size = 4; 1529 break; 1530 case AMDGPU_PP_SENSOR_GFX_MCLK: 1531 - ret = vangogh_get_smu_metrics_data(smu, 1532 - METRICS_AVERAGE_UCLK, 1533 (uint32_t *)data); 1534 *(uint32_t *)data *= 100; 1535 *size = 4; 1536 break; 1537 case AMDGPU_PP_SENSOR_GFX_SCLK: 1538 - ret = vangogh_get_smu_metrics_data(smu, 1539 - METRICS_AVERAGE_GFXCLK, 1540 (uint32_t *)data); 1541 *(uint32_t *)data *= 100; 1542 *size = 4; 1543 break; 1544 case AMDGPU_PP_SENSOR_VDDGFX: 1545 - ret = vangogh_get_smu_metrics_data(smu, 1546 METRICS_VOLTAGE_VDDGFX, 1547 (uint32_t *)data); 1548 *size = 4; 1549 break; 1550 case AMDGPU_PP_SENSOR_VDDNB: 1551 - ret = vangogh_get_smu_metrics_data(smu, 1552 METRICS_VOLTAGE_VDDSOC, 1553 (uint32_t *)data); 1554 *size = 4; 1555 break; 1556 case AMDGPU_PP_SENSOR_CPU_CLK: 1557 - ret = vangogh_get_smu_metrics_data(smu, 1558 METRICS_AVERAGE_CPUCLK, 1559 (uint32_t *)data); 1560 *size = smu->cpu_core_num * sizeof(uint16_t); ··· 1628 return 0; 1629 } 1630 1631 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, 1632 void **table) 1633 { ··· 1697 1698 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 1699 1700 - gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1701 - gpu_metrics->temperature_soc = metrics.SocTemperature; 1702 memcpy(&gpu_metrics->temperature_core[0], 1703 - &metrics.CoreTemperature[0], 1704 - sizeof(uint16_t) * 8); 1705 - gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 1706 - gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1]; 1707 1708 - gpu_metrics->average_gfx_activity = metrics.GfxActivity; 1709 - gpu_metrics->average_mm_activity = metrics.UvdActivity; 1710 1711 - gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 1712 - gpu_metrics->average_cpu_power = metrics.Power[0]; 1713 - gpu_metrics->average_soc_power = metrics.Power[1]; 1714 - gpu_metrics->average_gfx_power = metrics.Power[2]; 1715 memcpy(&gpu_metrics->average_core_power[0], 1716 - &metrics.CorePower[0], 1717 - sizeof(uint16_t) * 8); 1718 1719 - gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 1720 - gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 1721 - gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 1722 - gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 1723 - gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 1724 - gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 1725 1726 memcpy(&gpu_metrics->current_coreclk[0], 1727 - &metrics.CoreFrequency[0], 1728 - sizeof(uint16_t) * 8); 1729 - gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1730 - gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1]; 1731 1732 - gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1733 1734 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1735 1736 *table = (void *)gpu_metrics; 1737 1738 return sizeof(struct gpu_metrics_v2_1); 1739 } 1740 1741 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, ··· 2182 .set_watermarks_table = vangogh_set_watermarks_table, 2183 .set_driver_table_location = smu_v11_0_set_driver_table_location, 2184 .interrupt_work = smu_v11_0_interrupt_work, 2185 - .get_gpu_metrics = vangogh_get_gpu_metrics, 2186 .od_edit_dpm_table = vangogh_od_edit_dpm_table, 2187 - .print_clk_levels = vangogh_print_clk_levels, 2188 .set_default_dpm_table = vangogh_set_default_dpm_tables, 2189 .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters, 2190 .system_features_control = vangogh_system_features_control,
··· 194 { 195 struct smu_table_context *smu_table = &smu->smu_table; 196 struct smu_table *tables = smu_table->tables; 197 + struct amdgpu_device *adev = smu->adev; 198 + uint32_t if_version; 199 + uint32_t ret = 0; 200 + 201 + ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 202 + if (ret) { 203 + dev_err(adev->dev, "Failed to get smu if version!\n"); 204 + goto err0_out; 205 + } 206 207 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 208 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 209 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 210 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); ··· 205 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 206 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t), 207 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 208 + 209 + if (if_version < 0x3) { 210 + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t), 211 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 212 + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL); 213 + } else { 214 + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 215 + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 216 + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 217 + } 218 if (!smu_table->metrics_table) 219 goto err0_out; 220 smu_table->metrics_time = 0; ··· 235 return -ENOMEM; 236 } 237 238 + static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, 239 MetricsMember_t member, 240 uint32_t *value) 241 { 242 struct smu_table_context *smu_table = &smu->smu_table; 243 + SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; 244 int ret = 0; 245 246 mutex_lock(&smu->metrics_lock); ··· 255 } 256 257 switch (member) { 258 + case METRICS_CURR_GFXCLK: 259 *value = metrics->GfxclkFrequency; 260 break; 261 case METRICS_AVERAGE_SOCCLK: ··· 267 case METRICS_AVERAGE_DCLK: 268 *value = metrics->DclkFrequency; 269 break; 270 + case METRICS_CURR_UCLK: 271 *value = metrics->MemclkFrequency; 272 break; 273 case METRICS_AVERAGE_GFXACTIVITY: ··· 307 } 308 309 mutex_unlock(&smu->metrics_lock); 310 + 311 + return ret; 312 + } 313 + 314 + static int vangogh_get_smu_metrics_data(struct smu_context *smu, 315 + MetricsMember_t member, 316 + uint32_t *value) 317 + { 318 + struct smu_table_context *smu_table = &smu->smu_table; 319 + SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 320 + int ret = 0; 321 + 322 + mutex_lock(&smu->metrics_lock); 323 + 324 + ret = smu_cmn_get_metrics_table_locked(smu, 325 + NULL, 326 + false); 327 + if (ret) { 328 + mutex_unlock(&smu->metrics_lock); 329 + return ret; 330 + } 331 + 332 + switch (member) { 333 + case METRICS_CURR_GFXCLK: 334 + *value = metrics->Current.GfxclkFrequency; 335 + break; 336 + case METRICS_AVERAGE_SOCCLK: 337 + *value = metrics->Current.SocclkFrequency; 338 + break; 339 + case METRICS_AVERAGE_VCLK: 340 + *value = metrics->Current.VclkFrequency; 341 + break; 342 + case METRICS_AVERAGE_DCLK: 343 + *value = metrics->Current.DclkFrequency; 344 + break; 345 + case METRICS_CURR_UCLK: 346 + *value = metrics->Current.MemclkFrequency; 347 + break; 348 + case METRICS_AVERAGE_GFXACTIVITY: 349 + *value = metrics->Current.GfxActivity; 350 + break; 351 + case METRICS_AVERAGE_VCNACTIVITY: 352 + *value = metrics->Current.UvdActivity; 353 + break; 354 + case METRICS_AVERAGE_SOCKETPOWER: 355 + *value = (metrics->Current.CurrentSocketPower << 8) / 356 + 1000; 357 + break; 358 + case METRICS_TEMPERATURE_EDGE: 359 + *value = metrics->Current.GfxTemperature / 100 * 360 + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 361 + break; 362 + case METRICS_TEMPERATURE_HOTSPOT: 363 + *value = metrics->Current.SocTemperature / 100 * 364 + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 365 + break; 366 + case METRICS_THROTTLER_STATUS: 367 + *value = metrics->Current.ThrottlerStatus; 368 + break; 369 + case METRICS_VOLTAGE_VDDGFX: 370 + *value = metrics->Current.Voltage[2]; 371 + break; 372 + case METRICS_VOLTAGE_VDDSOC: 373 + *value = metrics->Current.Voltage[1]; 374 + break; 375 + case METRICS_AVERAGE_CPUCLK: 376 + memcpy(value, &metrics->Current.CoreFrequency[0], 377 + smu->cpu_core_num * sizeof(uint16_t)); 378 + break; 379 + default: 380 + *value = UINT_MAX; 381 + break; 382 + } 383 + 384 + mutex_unlock(&smu->metrics_lock); 385 + 386 + return ret; 387 + } 388 + 389 + static int vangogh_common_get_smu_metrics_data(struct smu_context *smu, 390 + MetricsMember_t member, 391 + uint32_t *value) 392 + { 393 + struct amdgpu_device *adev = smu->adev; 394 + uint32_t if_version; 395 + int ret = 0; 396 + 397 + ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 398 + if (ret) { 399 + dev_err(adev->dev, "Failed to get smu if version!\n"); 400 + return ret; 401 + } 402 + 403 + if (if_version < 0x3) 404 + ret = vangogh_get_legacy_smu_metrics_data(smu, member, value); 405 + else 406 + ret = vangogh_get_smu_metrics_data(smu, member, value); 407 408 return ret; 409 } ··· 447 return 0; 448 } 449 450 + static int vangogh_print_legacy_clk_levels(struct smu_context *smu, 451 enum smu_clk_type clk_type, char *buf) 452 { 453 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 454 + SmuMetrics_legacy_t metrics; 455 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 456 int i, size = 0, ret = 0; 457 uint32_t cur_value = 0, value = 0, count = 0; ··· 544 } 545 546 return size; 547 + } 548 + 549 + static int vangogh_print_clk_levels(struct smu_context *smu, 550 + enum smu_clk_type clk_type, char *buf) 551 + { 552 + DpmClocks_t *clk_table = smu->smu_table.clocks_table; 553 + SmuMetrics_t metrics; 554 + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 555 + int i, size = 0, ret = 0; 556 + uint32_t cur_value = 0, value = 0, count = 0; 557 + bool cur_value_match_level = false; 558 + 559 + memset(&metrics, 0, sizeof(metrics)); 560 + 561 + ret = smu_cmn_get_metrics_table(smu, &metrics, false); 562 + if (ret) 563 + return ret; 564 + 565 + switch (clk_type) { 566 + case SMU_OD_SCLK: 567 + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 568 + size = sprintf(buf, "%s:\n", "OD_SCLK"); 569 + size += sprintf(buf + size, "0: %10uMhz\n", 570 + (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 571 + size += sprintf(buf + size, "1: %10uMhz\n", 572 + (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 573 + } 574 + break; 575 + case SMU_OD_CCLK: 576 + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 577 + size = sprintf(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); 578 + size += sprintf(buf + size, "0: %10uMhz\n", 579 + (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); 580 + size += sprintf(buf + size, "1: %10uMhz\n", 581 + (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); 582 + } 583 + break; 584 + case SMU_OD_RANGE: 585 + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 586 + size = sprintf(buf, "%s:\n", "OD_RANGE"); 587 + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", 588 + smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 589 + size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n", 590 + smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); 591 + } 592 + break; 593 + case SMU_SOCCLK: 594 + /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ 595 + count = clk_table->NumSocClkLevelsEnabled; 596 + cur_value = metrics.Current.SocclkFrequency; 597 + break; 598 + case SMU_VCLK: 599 + count = clk_table->VcnClkLevelsEnabled; 600 + cur_value = metrics.Current.VclkFrequency; 601 + break; 602 + case SMU_DCLK: 603 + count = clk_table->VcnClkLevelsEnabled; 604 + cur_value = metrics.Current.DclkFrequency; 605 + break; 606 + case SMU_MCLK: 607 + count = clk_table->NumDfPstatesEnabled; 608 + cur_value = metrics.Current.MemclkFrequency; 609 + break; 610 + case SMU_FCLK: 611 + count = clk_table->NumDfPstatesEnabled; 612 + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value); 613 + if (ret) 614 + return ret; 615 + break; 616 + default: 617 + break; 618 + } 619 + 620 + switch (clk_type) { 621 + case SMU_SOCCLK: 622 + case SMU_VCLK: 623 + case SMU_DCLK: 624 + case SMU_MCLK: 625 + case SMU_FCLK: 626 + for (i = 0; i < count; i++) { 627 + ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); 628 + if (ret) 629 + return ret; 630 + if (!value) 631 + continue; 632 + size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, 633 + cur_value == value ? "*" : ""); 634 + if (cur_value == value) 635 + cur_value_match_level = true; 636 + } 637 + 638 + if (!cur_value_match_level) 639 + size += sprintf(buf + size, " %uMhz *\n", cur_value); 640 + break; 641 + default: 642 + break; 643 + } 644 + 645 + return size; 646 + } 647 + 648 + static int vangogh_common_print_clk_levels(struct smu_context *smu, 649 + enum smu_clk_type clk_type, char *buf) 650 + { 651 + struct amdgpu_device *adev = smu->adev; 652 + uint32_t if_version; 653 + int ret = 0; 654 + 655 + ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 656 + if (ret) { 657 + dev_err(adev->dev, "Failed to get smu if version!\n"); 658 + return ret; 659 + } 660 + 661 + if (if_version < 0x3) 662 + ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf); 663 + else 664 + ret = vangogh_print_clk_levels(smu, clk_type, buf); 665 + 666 + return ret; 667 } 668 669 static int vangogh_get_profiling_clk_mask(struct smu_context *smu, ··· 860 return ret; 861 break; 862 case SMU_FCLK: 863 ret = smu_cmn_send_smc_msg_with_param(smu, 864 SMU_MSG_SetHardMinFclkByFreq, 865 min, NULL); ··· 948 if (ret) 949 return ret; 950 break; 951 case SMU_FCLK: 952 ret = vangogh_get_dpm_clk_limited(smu, 953 clk_type, soft_min_level, &min_freq); ··· 1035 SMU_SOCCLK, 1036 SMU_VCLK, 1037 SMU_DCLK, 1038 SMU_FCLK, 1039 }; 1040 ··· 1064 enum smu_clk_type clk_type; 1065 uint32_t feature; 1066 } clk_feature_map[] = { 1067 {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT}, 1068 {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, 1069 {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT}, ··· 1196 if (ret) 1197 return ret; 1198 1199 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1200 vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); 1201 vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask); ··· 1236 if (ret) 1237 return ret; 1238 1239 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask); 1240 break; 1241 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: ··· 1278 mutex_lock(&smu->sensor_lock); 1279 switch (sensor) { 1280 case AMDGPU_PP_SENSOR_GPU_LOAD: 1281 + ret = vangogh_common_get_smu_metrics_data(smu, 1282 METRICS_AVERAGE_GFXACTIVITY, 1283 (uint32_t *)data); 1284 *size = 4; 1285 break; 1286 case AMDGPU_PP_SENSOR_GPU_POWER: 1287 + ret = vangogh_common_get_smu_metrics_data(smu, 1288 METRICS_AVERAGE_SOCKETPOWER, 1289 (uint32_t *)data); 1290 *size = 4; 1291 break; 1292 case AMDGPU_PP_SENSOR_EDGE_TEMP: 1293 + ret = vangogh_common_get_smu_metrics_data(smu, 1294 METRICS_TEMPERATURE_EDGE, 1295 (uint32_t *)data); 1296 *size = 4; 1297 break; 1298 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 1299 + ret = vangogh_common_get_smu_metrics_data(smu, 1300 METRICS_TEMPERATURE_HOTSPOT, 1301 (uint32_t *)data); 1302 *size = 4; 1303 break; 1304 case AMDGPU_PP_SENSOR_GFX_MCLK: 1305 + ret = vangogh_common_get_smu_metrics_data(smu, 1306 + METRICS_CURR_UCLK, 1307 (uint32_t *)data); 1308 *(uint32_t *)data *= 100; 1309 *size = 4; 1310 break; 1311 case AMDGPU_PP_SENSOR_GFX_SCLK: 1312 + ret = vangogh_common_get_smu_metrics_data(smu, 1313 + METRICS_CURR_GFXCLK, 1314 (uint32_t *)data); 1315 *(uint32_t *)data *= 100; 1316 *size = 4; 1317 break; 1318 case AMDGPU_PP_SENSOR_VDDGFX: 1319 + ret = vangogh_common_get_smu_metrics_data(smu, 1320 METRICS_VOLTAGE_VDDGFX, 1321 (uint32_t *)data); 1322 *size = 4; 1323 break; 1324 case AMDGPU_PP_SENSOR_VDDNB: 1325 + ret = vangogh_common_get_smu_metrics_data(smu, 1326 METRICS_VOLTAGE_VDDSOC, 1327 (uint32_t *)data); 1328 *size = 4; 1329 break; 1330 case AMDGPU_PP_SENSOR_CPU_CLK: 1331 + ret = vangogh_common_get_smu_metrics_data(smu, 1332 METRICS_AVERAGE_CPUCLK, 1333 (uint32_t *)data); 1334 *size = smu->cpu_core_num * sizeof(uint16_t); ··· 1402 return 0; 1403 } 1404 1405 + static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu, 1406 + void **table) 1407 + { 1408 + struct smu_table_context *smu_table = &smu->smu_table; 1409 + struct gpu_metrics_v2_1 *gpu_metrics = 1410 + (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; 1411 + SmuMetrics_legacy_t metrics; 1412 + int ret = 0; 1413 + 1414 + ret = smu_cmn_get_metrics_table(smu, &metrics, true); 1415 + if (ret) 1416 + return ret; 1417 + 1418 + smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 1419 + 1420 + gpu_metrics->temperature_gfx = metrics.GfxTemperature; 1421 + gpu_metrics->temperature_soc = metrics.SocTemperature; 1422 + memcpy(&gpu_metrics->temperature_core[0], 1423 + &metrics.CoreTemperature[0], 1424 + sizeof(uint16_t) * 4); 1425 + gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0]; 1426 + 1427 + gpu_metrics->average_gfx_activity = metrics.GfxActivity; 1428 + gpu_metrics->average_mm_activity = metrics.UvdActivity; 1429 + 1430 + gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 1431 + gpu_metrics->average_cpu_power = metrics.Power[0]; 1432 + gpu_metrics->average_soc_power = metrics.Power[1]; 1433 + gpu_metrics->average_gfx_power = metrics.Power[2]; 1434 + memcpy(&gpu_metrics->average_core_power[0], 1435 + &metrics.CorePower[0], 1436 + sizeof(uint16_t) * 4); 1437 + 1438 + gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 1439 + gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 1440 + gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 1441 + gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 1442 + gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 1443 + gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 1444 + 1445 + memcpy(&gpu_metrics->current_coreclk[0], 1446 + &metrics.CoreFrequency[0], 1447 + sizeof(uint16_t) * 4); 1448 + gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0]; 1449 + 1450 + gpu_metrics->throttle_status = metrics.ThrottlerStatus; 1451 + 1452 + gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1453 + 1454 + *table = (void *)gpu_metrics; 1455 + 1456 + return sizeof(struct gpu_metrics_v2_1); 1457 + } 1458 + 1459 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, 1460 void **table) 1461 { ··· 1417 1418 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 1419 1420 + gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature; 1421 + gpu_metrics->temperature_soc = metrics.Current.SocTemperature; 1422 memcpy(&gpu_metrics->temperature_core[0], 1423 + &metrics.Current.CoreTemperature[0], 1424 + sizeof(uint16_t) * 4); 1425 + gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0]; 1426 1427 + gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity; 1428 + gpu_metrics->average_mm_activity = metrics.Current.UvdActivity; 1429 1430 + gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower; 1431 + gpu_metrics->average_cpu_power = metrics.Current.Power[0]; 1432 + gpu_metrics->average_soc_power = metrics.Current.Power[1]; 1433 + gpu_metrics->average_gfx_power = metrics.Current.Power[2]; 1434 memcpy(&gpu_metrics->average_core_power[0], 1435 + &metrics.Average.CorePower[0], 1436 + sizeof(uint16_t) * 4); 1437 1438 + gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency; 1439 + gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency; 1440 + gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency; 1441 + gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency; 1442 + gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency; 1443 + gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency; 1444 + 1445 + gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency; 1446 + gpu_metrics->current_socclk = metrics.Current.SocclkFrequency; 1447 + gpu_metrics->current_uclk = metrics.Current.MemclkFrequency; 1448 + gpu_metrics->current_fclk = metrics.Current.MemclkFrequency; 1449 + gpu_metrics->current_vclk = metrics.Current.VclkFrequency; 1450 + gpu_metrics->current_dclk = metrics.Current.DclkFrequency; 1451 1452 memcpy(&gpu_metrics->current_coreclk[0], 1453 + &metrics.Current.CoreFrequency[0], 1454 + sizeof(uint16_t) * 4); 1455 + gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0]; 1456 1457 + gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus; 1458 1459 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1460 1461 *table = (void *)gpu_metrics; 1462 1463 return sizeof(struct gpu_metrics_v2_1); 1464 + } 1465 + 1466 + static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu, 1467 + void **table) 1468 + { 1469 + struct amdgpu_device *adev = smu->adev; 1470 + uint32_t if_version; 1471 + int ret = 0; 1472 + 1473 + ret = smu_cmn_get_smc_version(smu, &if_version, NULL); 1474 + if (ret) { 1475 + dev_err(adev->dev, "Failed to get smu if version!\n"); 1476 + return ret; 1477 + } 1478 + 1479 + if (if_version < 0x3) 1480 + ret = vangogh_get_legacy_gpu_metrics(smu, table); 1481 + else 1482 + ret = vangogh_get_gpu_metrics(smu, table); 1483 + 1484 + return ret; 1485 } 1486 1487 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, ··· 1876 .set_watermarks_table = vangogh_set_watermarks_table, 1877 .set_driver_table_location = smu_v11_0_set_driver_table_location, 1878 .interrupt_work = smu_v11_0_interrupt_work, 1879 + .get_gpu_metrics = vangogh_common_get_gpu_metrics, 1880 .od_edit_dpm_table = vangogh_od_edit_dpm_table, 1881 + .print_clk_levels = vangogh_common_print_clk_levels, 1882 .set_default_dpm_table = vangogh_set_default_dpm_tables, 1883 .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters, 1884 .system_features_control = vangogh_system_features_control,
+1
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
··· 1332 .gfx_state_change_set = renoir_gfx_state_change_set, 1333 .set_fine_grain_gfx_freq_parameters = renoir_set_fine_grain_gfx_freq_parameters, 1334 .od_edit_dpm_table = renoir_od_edit_dpm_table, 1335 }; 1336 1337 void renoir_set_ppt_funcs(struct smu_context *smu)
··· 1332 .gfx_state_change_set = renoir_gfx_state_change_set, 1333 .set_fine_grain_gfx_freq_parameters = renoir_set_fine_grain_gfx_freq_parameters, 1334 .od_edit_dpm_table = renoir_od_edit_dpm_table, 1335 + .get_vbios_bootup_values = smu_v12_0_get_vbios_bootup_values, 1336 }; 1337 1338 void renoir_set_ppt_funcs(struct smu_context *smu)
+123
drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
··· 27 #include "amdgpu_smu.h" 28 #include "atomfirmware.h" 29 #include "amdgpu_atomfirmware.h" 30 #include "smu_v12_0.h" 31 #include "soc15_common.h" 32 #include "atom.h" ··· 278 } 279 280 return ret; 281 }
··· 27 #include "amdgpu_smu.h" 28 #include "atomfirmware.h" 29 #include "amdgpu_atomfirmware.h" 30 + #include "amdgpu_atombios.h" 31 #include "smu_v12_0.h" 32 #include "soc15_common.h" 33 #include "atom.h" ··· 277 } 278 279 return ret; 280 + } 281 + 282 + static int smu_v12_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, 283 + uint8_t clk_id, 284 + uint8_t syspll_id, 285 + uint32_t *clk_freq) 286 + { 287 + struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; 288 + struct atom_get_smu_clock_info_output_parameters_v3_1 *output; 289 + int ret, index; 290 + 291 + input.clk_id = clk_id; 292 + input.syspll_id = syspll_id; 293 + input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 294 + index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 295 + getsmuclockinfo); 296 + 297 + ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 298 + (uint32_t *)&input); 299 + if (ret) 300 + return -EINVAL; 301 + 302 + output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; 303 + *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; 304 + 305 + return 0; 306 + } 307 + 308 + int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu) 309 + { 310 + int ret, index; 311 + uint16_t size; 312 + uint8_t frev, crev; 313 + struct atom_common_table_header *header; 314 + struct atom_firmware_info_v3_1 *v_3_1; 315 + struct atom_firmware_info_v3_3 *v_3_3; 316 + 317 + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 318 + firmwareinfo); 319 + 320 + ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 321 + (uint8_t **)&header); 322 + if (ret) 323 + return ret; 324 + 325 + if (header->format_revision != 3) { 326 + dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu12\n"); 327 + return -EINVAL; 328 + } 329 + 330 + switch (header->content_revision) { 331 + case 0: 332 + case 1: 333 + case 2: 334 + v_3_1 = (struct atom_firmware_info_v3_1 *)header; 335 + smu->smu_table.boot_values.revision = v_3_1->firmware_revision; 336 + smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; 337 + smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; 338 + smu->smu_table.boot_values.socclk = 0; 339 + smu->smu_table.boot_values.dcefclk = 0; 340 + smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; 341 + smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; 342 + smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; 343 + smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 344 + smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 345 + smu->smu_table.boot_values.pp_table_id = 0; 346 + smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability; 347 + break; 348 + case 3: 349 + case 4: 350 + default: 351 + v_3_3 = (struct atom_firmware_info_v3_3 *)header; 352 + smu->smu_table.boot_values.revision = v_3_3->firmware_revision; 353 + smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; 354 + smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; 355 + smu->smu_table.boot_values.socclk = 0; 356 + smu->smu_table.boot_values.dcefclk = 0; 357 + smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; 358 + smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; 359 + smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; 360 + smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 361 + smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 362 + smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; 363 + smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability; 364 + } 365 + 366 + smu->smu_table.boot_values.format_revision = header->format_revision; 367 + smu->smu_table.boot_values.content_revision = header->content_revision; 368 + 369 + smu_v12_0_atom_get_smu_clockinfo(smu->adev, 370 + (uint8_t)SMU12_SYSPLL0_SOCCLK_ID, 371 + (uint8_t)SMU12_SYSPLL0_ID, 372 + &smu->smu_table.boot_values.socclk); 373 + 374 + smu_v12_0_atom_get_smu_clockinfo(smu->adev, 375 + (uint8_t)SMU12_SYSPLL1_DCFCLK_ID, 376 + (uint8_t)SMU12_SYSPLL1_ID, 377 + &smu->smu_table.boot_values.dcefclk); 378 + 379 + smu_v12_0_atom_get_smu_clockinfo(smu->adev, 380 + (uint8_t)SMU12_SYSPLL0_VCLK_ID, 381 + (uint8_t)SMU12_SYSPLL0_ID, 382 + &smu->smu_table.boot_values.vclk); 383 + 384 + smu_v12_0_atom_get_smu_clockinfo(smu->adev, 385 + (uint8_t)SMU12_SYSPLL0_DCLK_ID, 386 + (uint8_t)SMU12_SYSPLL0_ID, 387 + &smu->smu_table.boot_values.dclk); 388 + 389 + if ((smu->smu_table.boot_values.format_revision == 3) && 390 + (smu->smu_table.boot_values.content_revision >= 2)) 391 + smu_v12_0_atom_get_smu_clockinfo(smu->adev, 392 + (uint8_t)SMU12_SYSPLL3_0_FCLK_ID, 393 + (uint8_t)SMU12_SYSPLL3_0_ID, 394 + &smu->smu_table.boot_values.fclk); 395 + 396 + smu_v12_0_atom_get_smu_clockinfo(smu->adev, 397 + (uint8_t)SMU12_SYSPLL0_LCLK_ID, 398 + (uint8_t)SMU12_SYSPLL0_ID, 399 + &smu->smu_table.boot_values.lclk); 400 + 401 + return 0; 402 }
+44 -11
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
··· 78 79 #define smnPCIE_ESM_CTRL 0x111003D0 80 81 static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = { 82 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), 83 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), ··· 407 { 408 int ret = 0; 409 410 ret = smu_v13_0_setup_pptable(smu); 411 if (ret) 412 return ret; ··· 675 struct smu_13_0_dpm_context *dpm_context = NULL; 676 uint32_t display_levels; 677 uint32_t freq_values[3] = {0}; 678 679 if (amdgpu_ras_intr_triggered()) 680 return snprintf(buf, PAGE_SIZE, "unavailable\n"); ··· 703 704 display_levels = clocks.num_levels; 705 706 /* fine-grained dpm has only 2 levels */ 707 - if (now > single_dpm_table->dpm_levels[0].value && 708 - now < single_dpm_table->dpm_levels[1].value) { 709 display_levels = clocks.num_levels + 1; 710 - freq_values[0] = single_dpm_table->dpm_levels[0].value; 711 - freq_values[2] = single_dpm_table->dpm_levels[1].value; 712 freq_values[1] = now; 713 } 714 ··· 726 */ 727 if (display_levels == clocks.num_levels) { 728 for (i = 0; i < clocks.num_levels; i++) 729 - size += sprintf(buf + size, "%d: %uMhz %s\n", i, 730 - clocks.data[i].clocks_in_khz / 1000, 731 - (clocks.num_levels == 1) ? "*" : 732 (aldebaran_freqs_in_same_level( 733 - clocks.data[i].clocks_in_khz / 1000, 734 - now) ? "*" : "")); 735 } else { 736 for (i = 0; i < display_levels; i++) 737 size += sprintf(buf + size, "%d: %uMhz %s\n", i, ··· 1134 && (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) 1135 smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL); 1136 1137 1138 switch (level) { 1139 ··· 1178 if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 1179 min_clk = max(min, dpm_context->dpm_tables.gfx_table.min); 1180 max_clk = min(max, dpm_context->dpm_tables.gfx_table.max); 1181 - return smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk); 1182 } 1183 1184 if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { ··· 1205 ret = smu_cmn_send_smc_msg_with_param(smu, 1206 SMU_MSG_EnableDeterminism, 1207 max, NULL); 1208 - if (ret) 1209 dev_err(adev->dev, 1210 "Failed to enable determinism at GFX clock %d MHz\n", max); 1211 } 1212 } 1213
··· 78 79 #define smnPCIE_ESM_CTRL 0x111003D0 80 81 + #define CLOCK_VALID (1 << 31) 82 + 83 static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = { 84 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0), 85 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), ··· 405 { 406 int ret = 0; 407 408 + /* VBIOS pptable is the first choice */ 409 + smu->smu_table.boot_values.pp_table_id = 0; 410 + 411 ret = smu_v13_0_setup_pptable(smu); 412 if (ret) 413 return ret; ··· 670 struct smu_13_0_dpm_context *dpm_context = NULL; 671 uint32_t display_levels; 672 uint32_t freq_values[3] = {0}; 673 + uint32_t min_clk, max_clk; 674 675 if (amdgpu_ras_intr_triggered()) 676 return snprintf(buf, PAGE_SIZE, "unavailable\n"); ··· 697 698 display_levels = clocks.num_levels; 699 700 + min_clk = smu->gfx_actual_hard_min_freq & CLOCK_VALID ? 701 + smu->gfx_actual_hard_min_freq & ~CLOCK_VALID : 702 + single_dpm_table->dpm_levels[0].value; 703 + max_clk = smu->gfx_actual_soft_max_freq & CLOCK_VALID ? 704 + smu->gfx_actual_soft_max_freq & ~CLOCK_VALID : 705 + single_dpm_table->dpm_levels[1].value; 706 + 707 + freq_values[0] = min_clk; 708 + freq_values[1] = max_clk; 709 + 710 /* fine-grained dpm has only 2 levels */ 711 + if (now > min_clk && now < max_clk) { 712 display_levels = clocks.num_levels + 1; 713 + freq_values[2] = max_clk; 714 freq_values[1] = now; 715 } 716 ··· 712 */ 713 if (display_levels == clocks.num_levels) { 714 for (i = 0; i < clocks.num_levels; i++) 715 + size += sprintf( 716 + buf + size, "%d: %uMhz %s\n", i, 717 + freq_values[i], 718 + (clocks.num_levels == 1) ? 719 + "*" : 720 (aldebaran_freqs_in_same_level( 721 + freq_values[i], now) ? 722 + "*" : 723 + "")); 724 } else { 725 for (i = 0; i < display_levels; i++) 726 size += sprintf(buf + size, "%d: %uMhz %s\n", i, ··· 1117 && (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) 1118 smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL); 1119 1120 + /* Reset user min/max gfx clock */ 1121 + smu->gfx_actual_hard_min_freq = 0; 1122 + smu->gfx_actual_soft_max_freq = 0; 1123 1124 switch (level) { 1125 ··· 1158 if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 1159 min_clk = max(min, dpm_context->dpm_tables.gfx_table.min); 1160 max_clk = min(max, dpm_context->dpm_tables.gfx_table.max); 1161 + ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, 1162 + min_clk, max_clk); 1163 + 1164 + if (!ret) { 1165 + smu->gfx_actual_hard_min_freq = min_clk | CLOCK_VALID; 1166 + smu->gfx_actual_soft_max_freq = max_clk | CLOCK_VALID; 1167 + } 1168 + return ret; 1169 } 1170 1171 if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { ··· 1178 ret = smu_cmn_send_smc_msg_with_param(smu, 1179 SMU_MSG_EnableDeterminism, 1180 max, NULL); 1181 + if (ret) { 1182 dev_err(adev->dev, 1183 "Failed to enable determinism at GFX clock %d MHz\n", max); 1184 + } else { 1185 + smu->gfx_actual_hard_min_freq = 1186 + min_clk | CLOCK_VALID; 1187 + smu->gfx_actual_soft_max_freq = 1188 + max | CLOCK_VALID; 1189 + } 1190 } 1191 } 1192
-2
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
··· 276 void *table; 277 uint16_t version_major, version_minor; 278 279 - /* temporarily hardcode to use vbios pptable */ 280 - smu->smu_table.boot_values.pp_table_id = 0; 281 282 if (amdgpu_smu_pptable_id >= 0) { 283 smu->smu_table.boot_values.pp_table_id = amdgpu_smu_pptable_id;
··· 276 void *table; 277 uint16_t version_major, version_minor; 278 279 280 if (amdgpu_smu_pptable_id >= 0) { 281 smu->smu_table.boot_values.pp_table_id = amdgpu_smu_pptable_id;
-4
drivers/gpu/drm/radeon/cik.c
··· 7948 DRM_ERROR("Illegal register access in command stream\n"); 7949 /* XXX check the bitfield order! */ 7950 me_id = (ring_id & 0x60) >> 5; 7951 - pipe_id = (ring_id & 0x18) >> 3; 7952 - queue_id = (ring_id & 0x7) >> 0; 7953 switch (me_id) { 7954 case 0: 7955 /* This results in a full GPU reset, but all we need to do is soft ··· 7969 DRM_ERROR("Illegal instruction in command stream\n"); 7970 /* XXX check the bitfield order! */ 7971 me_id = (ring_id & 0x60) >> 5; 7972 - pipe_id = (ring_id & 0x18) >> 3; 7973 - queue_id = (ring_id & 0x7) >> 0; 7974 switch (me_id) { 7975 case 0: 7976 /* This results in a full GPU reset, but all we need to do is soft
··· 7948 DRM_ERROR("Illegal register access in command stream\n"); 7949 /* XXX check the bitfield order! */ 7950 me_id = (ring_id & 0x60) >> 5; 7951 switch (me_id) { 7952 case 0: 7953 /* This results in a full GPU reset, but all we need to do is soft ··· 7971 DRM_ERROR("Illegal instruction in command stream\n"); 7972 /* XXX check the bitfield order! */ 7973 me_id = (ring_id & 0x60) >> 5; 7974 switch (me_id) { 7975 case 0: 7976 /* This results in a full GPU reset, but all we need to do is soft
+1 -1
drivers/gpu/drm/radeon/si.c
··· 4511 } else { 4512 for (i = 0; i < (command & 0x1fffff); i++) { 4513 reg = start_reg + (4 * i); 4514 - if (!si_vm_reg_valid(reg)) { 4515 DRM_ERROR("CP DMA Bad DST register\n"); 4516 return -EINVAL; 4517 }
··· 4511 } else { 4512 for (i = 0; i < (command & 0x1fffff); i++) { 4513 reg = start_reg + (4 * i); 4514 + if (!si_vm_reg_valid(reg)) { 4515 DRM_ERROR("CP DMA Bad DST register\n"); 4516 return -EINVAL; 4517 }