Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'drm-next-5.2' of git://people.freedesktop.org/~agd5f/linux into drm-next

- SR-IOV fixes
- Raven flickering fix
- Misc spelling fixes
- Vega20 power fixes
- Freesync improvements
- DC fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190502193020.3562-1-alexander.deucher@amd.com

+734 -473
+37
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 335 335 amdgpu_bo_unref(&(bo)); 336 336 } 337 337 338 + uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, 339 + enum kgd_engine_type type) 340 + { 341 + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 342 + 343 + switch (type) { 344 + case KGD_ENGINE_PFP: 345 + return adev->gfx.pfp_fw_version; 346 + 347 + case KGD_ENGINE_ME: 348 + return adev->gfx.me_fw_version; 349 + 350 + case KGD_ENGINE_CE: 351 + return adev->gfx.ce_fw_version; 352 + 353 + case KGD_ENGINE_MEC1: 354 + return adev->gfx.mec_fw_version; 355 + 356 + case KGD_ENGINE_MEC2: 357 + return adev->gfx.mec2_fw_version; 358 + 359 + case KGD_ENGINE_RLC: 360 + return adev->gfx.rlc_fw_version; 361 + 362 + case KGD_ENGINE_SDMA1: 363 + return adev->sdma.instance[0].fw_version; 364 + 365 + case KGD_ENGINE_SDMA2: 366 + return adev->sdma.instance[1].fw_version; 367 + 368 + default: 369 + return 0; 370 + } 371 + 372 + return 0; 373 + } 374 + 338 375 void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, 339 376 struct kfd_local_mem_info *mem_info) 340 377 {
+14
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
··· 81 81 uint64_t vram_used; 82 82 }; 83 83 84 + enum kgd_engine_type { 85 + KGD_ENGINE_PFP = 1, 86 + KGD_ENGINE_ME, 87 + KGD_ENGINE_CE, 88 + KGD_ENGINE_MEC1, 89 + KGD_ENGINE_MEC2, 90 + KGD_ENGINE_RLC, 91 + KGD_ENGINE_SDMA1, 92 + KGD_ENGINE_SDMA2, 93 + KGD_ENGINE_MAX 94 + }; 95 + 84 96 struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, 85 97 struct mm_struct *mm); 86 98 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); ··· 154 142 void **mem_obj, uint64_t *gpu_addr, 155 143 void **cpu_ptr, bool mqd_gfx9); 156 144 void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); 145 + uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, 146 + enum kgd_engine_type type); 157 147 void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, 158 148 struct kfd_local_mem_info *mem_info); 159 149 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
-61
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
··· 22 22 23 23 #include <linux/fdtable.h> 24 24 #include <linux/uaccess.h> 25 - #include <linux/firmware.h> 26 25 #include <linux/mmu_context.h> 27 26 #include <drm/drmP.h> 28 27 #include "amdgpu.h" 29 28 #include "amdgpu_amdkfd.h" 30 29 #include "cikd.h" 31 30 #include "cik_sdma.h" 32 - #include "amdgpu_ucode.h" 33 31 #include "gfx_v7_0.h" 34 32 #include "gca/gfx_7_2_d.h" 35 33 #include "gca/gfx_7_2_enum.h" ··· 137 139 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, 138 140 uint8_t vmid); 139 141 140 - static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); 141 142 static void set_scratch_backing_va(struct kgd_dev *kgd, 142 143 uint64_t va, uint32_t vmid); 143 144 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, ··· 188 191 .address_watch_get_offset = kgd_address_watch_get_offset, 189 192 .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid, 190 193 .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, 191 - .get_fw_version = get_fw_version, 192 194 .set_scratch_backing_va = set_scratch_backing_va, 193 195 .get_tile_config = get_tile_config, 194 196 .set_vm_context_page_table_base = set_vm_context_page_table_base, ··· 786 790 lock_srbm(kgd, 0, 0, 0, vmid); 787 791 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); 788 792 unlock_srbm(kgd); 789 - } 790 - 791 - static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) 792 - { 793 - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 794 - const union amdgpu_firmware_header *hdr; 795 - 796 - switch (type) { 797 - case KGD_ENGINE_PFP: 798 - hdr = (const union amdgpu_firmware_header *) 799 - adev->gfx.pfp_fw->data; 800 - break; 801 - 802 - case KGD_ENGINE_ME: 803 - hdr = (const union amdgpu_firmware_header *) 804 - adev->gfx.me_fw->data; 805 - break; 806 - 807 - case KGD_ENGINE_CE: 808 - hdr = (const union amdgpu_firmware_header *) 809 - adev->gfx.ce_fw->data; 810 - break; 811 - 812 - case KGD_ENGINE_MEC1: 813 - hdr = (const union amdgpu_firmware_header *) 814 - adev->gfx.mec_fw->data; 815 - break; 816 - 817 - case KGD_ENGINE_MEC2: 818 - hdr = (const union amdgpu_firmware_header *) 819 - adev->gfx.mec2_fw->data; 820 - break; 821 - 822 - case KGD_ENGINE_RLC: 823 - hdr = (const union amdgpu_firmware_header *) 824 - adev->gfx.rlc_fw->data; 825 - break; 826 - 827 - case KGD_ENGINE_SDMA1: 828 - hdr = (const union amdgpu_firmware_header *) 829 - adev->sdma.instance[0].fw->data; 830 - break; 831 - 832 - case KGD_ENGINE_SDMA2: 833 - hdr = (const union amdgpu_firmware_header *) 834 - adev->sdma.instance[1].fw->data; 835 - break; 836 - 837 - default: 838 - return 0; 839 - } 840 - 841 - if (hdr == NULL) 842 - return 0; 843 - 844 - /* Only 12 bit in use*/ 845 - return hdr->common.ucode_version; 846 793 } 847 794 848 795 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
-61
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
··· 23 23 #include <linux/module.h> 24 24 #include <linux/fdtable.h> 25 25 #include <linux/uaccess.h> 26 - #include <linux/firmware.h> 27 26 #include <linux/mmu_context.h> 28 27 #include <drm/drmP.h> 29 28 #include "amdgpu.h" 30 29 #include "amdgpu_amdkfd.h" 31 - #include "amdgpu_ucode.h" 32 30 #include "gfx_v8_0.h" 33 31 #include "gca/gfx_8_0_sh_mask.h" 34 32 #include "gca/gfx_8_0_d.h" ··· 93 95 uint8_t vmid); 94 96 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, 95 97 uint8_t vmid); 96 - static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); 97 98 static void set_scratch_backing_va(struct kgd_dev *kgd, 98 99 uint64_t va, uint32_t vmid); 99 100 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, ··· 145 148 get_atc_vmid_pasid_mapping_pasid, 146 149 .get_atc_vmid_pasid_mapping_valid = 147 150 get_atc_vmid_pasid_mapping_valid, 148 - .get_fw_version = get_fw_version, 149 151 .set_scratch_backing_va = set_scratch_backing_va, 150 152 .get_tile_config = get_tile_config, 151 153 .set_vm_context_page_table_base = set_vm_context_page_table_base, ··· 745 749 lock_srbm(kgd, 0, 0, 0, vmid); 746 750 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); 747 751 unlock_srbm(kgd); 748 - } 749 - 750 - static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) 751 - { 752 - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 753 - const union amdgpu_firmware_header *hdr; 754 - 755 - switch (type) { 756 - case KGD_ENGINE_PFP: 757 - hdr = (const union amdgpu_firmware_header *) 758 - adev->gfx.pfp_fw->data; 759 - break; 760 - 761 - case KGD_ENGINE_ME: 762 - hdr = (const union amdgpu_firmware_header *) 763 - adev->gfx.me_fw->data; 764 - break; 765 - 766 - case KGD_ENGINE_CE: 767 - hdr = (const union amdgpu_firmware_header *) 768 - adev->gfx.ce_fw->data; 769 - break; 770 - 771 - case KGD_ENGINE_MEC1: 772 - hdr = (const union amdgpu_firmware_header *) 773 - adev->gfx.mec_fw->data; 774 - break; 775 - 776 - case KGD_ENGINE_MEC2: 777 - hdr = (const union amdgpu_firmware_header *) 778 - adev->gfx.mec2_fw->data; 779 - break; 780 - 781 - case KGD_ENGINE_RLC: 782 - hdr = (const union amdgpu_firmware_header *) 783 - adev->gfx.rlc_fw->data; 784 - break; 785 - 786 - case KGD_ENGINE_SDMA1: 787 - hdr = (const union amdgpu_firmware_header *) 788 - adev->sdma.instance[0].fw->data; 789 - break; 790 - 791 - case KGD_ENGINE_SDMA2: 792 - hdr = (const union amdgpu_firmware_header *) 793 - adev->sdma.instance[1].fw->data; 794 - break; 795 - 796 - default: 797 - return 0; 798 - } 799 - 800 - if (hdr == NULL) 801 - return 0; 802 - 803 - /* Only 12 bit in use*/ 804 - return hdr->common.ucode_version; 805 752 } 806 753 807 754 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
-54
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
··· 25 25 #include <linux/module.h> 26 26 #include <linux/fdtable.h> 27 27 #include <linux/uaccess.h> 28 - #include <linux/firmware.h> 29 28 #include <linux/mmu_context.h> 30 29 #include <drm/drmP.h> 31 30 #include "amdgpu.h" 32 31 #include "amdgpu_amdkfd.h" 33 - #include "amdgpu_ucode.h" 34 32 #include "soc15_hw_ip.h" 35 33 #include "gc/gc_9_0_offset.h" 36 34 #include "gc/gc_9_0_sh_mask.h" ··· 109 111 uint8_t vmid); 110 112 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, 111 113 uint64_t page_table_base); 112 - static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); 113 114 static void set_scratch_backing_va(struct kgd_dev *kgd, 114 115 uint64_t va, uint32_t vmid); 115 116 static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); ··· 155 158 get_atc_vmid_pasid_mapping_pasid, 156 159 .get_atc_vmid_pasid_mapping_valid = 157 160 get_atc_vmid_pasid_mapping_valid, 158 - .get_fw_version = get_fw_version, 159 161 .set_scratch_backing_va = set_scratch_backing_va, 160 162 .get_tile_config = amdgpu_amdkfd_get_tile_config, 161 163 .set_vm_context_page_table_base = set_vm_context_page_table_base, ··· 868 872 * passed to the shader by the CP. It's the user mode driver's 869 873 * responsibility. 870 874 */ 871 - } 872 - 873 - /* FIXME: Does this need to be ASIC-specific code? */ 874 - static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) 875 - { 876 - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; 877 - const union amdgpu_firmware_header *hdr; 878 - 879 - switch (type) { 880 - case KGD_ENGINE_PFP: 881 - hdr = (const union amdgpu_firmware_header *)adev->gfx.pfp_fw->data; 882 - break; 883 - 884 - case KGD_ENGINE_ME: 885 - hdr = (const union amdgpu_firmware_header *)adev->gfx.me_fw->data; 886 - break; 887 - 888 - case KGD_ENGINE_CE: 889 - hdr = (const union amdgpu_firmware_header *)adev->gfx.ce_fw->data; 890 - break; 891 - 892 - case KGD_ENGINE_MEC1: 893 - hdr = (const union amdgpu_firmware_header *)adev->gfx.mec_fw->data; 894 - break; 895 - 896 - case KGD_ENGINE_MEC2: 897 - hdr = (const union amdgpu_firmware_header *)adev->gfx.mec2_fw->data; 898 - break; 899 - 900 - case KGD_ENGINE_RLC: 901 - hdr = (const union amdgpu_firmware_header *)adev->gfx.rlc_fw->data; 902 - break; 903 - 904 - case KGD_ENGINE_SDMA1: 905 - hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[0].fw->data; 906 - break; 907 - 908 - case KGD_ENGINE_SDMA2: 909 - hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[1].fw->data; 910 - break; 911 - 912 - default: 913 - return 0; 914 - } 915 - 916 - if (hdr == NULL) 917 - return 0; 918 - 919 - /* Only 12 bit in use*/ 920 - return hdr->common.ucode_version; 921 875 } 922 876 923 877 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 3437 3437 3438 3438 vram_lost = amdgpu_device_check_vram_lost(tmp_adev); 3439 3439 if (vram_lost) { 3440 - DRM_ERROR("VRAM is lost!\n"); 3440 + DRM_INFO("VRAM is lost due to GPU reset!\n"); 3441 3441 atomic_inc(&tmp_adev->vram_lost_counter); 3442 3442 } 3443 3443
+3 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 88 88 if (bo->gem_base.import_attach) 89 89 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg); 90 90 drm_gem_object_release(&bo->gem_base); 91 - amdgpu_bo_unref(&bo->parent); 91 + /* in case amdgpu_device_recover_vram got NULL of bo->parent */ 92 92 if (!list_empty(&bo->shadow_list)) { 93 93 mutex_lock(&adev->shadow_list_lock); 94 94 list_del_init(&bo->shadow_list); 95 95 mutex_unlock(&adev->shadow_list_lock); 96 96 } 97 + amdgpu_bo_unref(&bo->parent); 98 + 97 99 kfree(bo->metadata); 98 100 kfree(bo); 99 101 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 144 144 struct amdgpu_device *adev = ddev->dev_private; 145 145 enum amd_pm_state_type pm; 146 146 147 - if (adev->smu.ppt_funcs->get_current_power_state) 147 + if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) 148 148 pm = amdgpu_smu_get_current_power_state(adev); 149 149 else if (adev->powerplay.pp_funcs->get_current_power_state) 150 150 pm = amdgpu_dpm_get_current_power_state(adev);
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
··· 36 36 /* enable virtual display */ 37 37 adev->mode_info.num_crtc = 1; 38 38 adev->enable_virtual_display = true; 39 + adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC; 39 40 adev->cg_flags = 0; 40 41 adev->pg_flags = 0; 41 42 }
+1 -1
drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
··· 515 515 516 516 /* wait until RCV_MSG become 3 */ 517 517 if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) { 518 - pr_err("failed to recieve FLR_CMPL\n"); 518 + pr_err("failed to receive FLR_CMPL\n"); 519 519 return; 520 520 } 521 521
+2 -4
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
··· 156 156 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), 157 157 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 158 158 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), 159 - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xFE000000, 0x00000000), 160 159 }; 161 160 162 161 static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { ··· 185 186 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), 186 187 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 187 188 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0), 188 - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xFE000000, 0x00000000), 189 189 }; 190 190 191 191 static const struct soc15_reg_golden golden_settings_sdma_rv1[] = ··· 849 851 wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL); 850 852 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, 851 853 SDMA0_GFX_RB_WPTR_POLL_CNTL, 852 - F32_POLL_ENABLE, amdgpu_sriov_vf(adev)); 854 + F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0); 853 855 WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl); 854 856 855 857 /* enable DMA RB */ ··· 940 942 wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL); 941 943 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, 942 944 SDMA0_PAGE_RB_WPTR_POLL_CNTL, 943 - F32_POLL_ENABLE, amdgpu_sriov_vf(adev)); 945 + F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0); 944 946 WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl); 945 947 946 948 /* enable DMA RB */
+8 -1
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 470 470 case CHIP_VEGA12: 471 471 soc15_asic_get_baco_capability(adev, &baco_reset); 472 472 break; 473 + case CHIP_VEGA20: 474 + if (adev->psp.sos_fw_version >= 0x80067) 475 + soc15_asic_get_baco_capability(adev, &baco_reset); 476 + else 477 + baco_reset = false; 478 + break; 473 479 default: 474 480 baco_reset = false; 475 481 break; ··· 901 895 902 896 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 903 897 } else if (adev->pdev->device == 0x15d8) { 904 - adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS | 898 + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 899 + AMD_CG_SUPPORT_GFX_MGLS | 905 900 AMD_CG_SUPPORT_GFX_CP_LS | 906 901 AMD_CG_SUPPORT_GFX_3D_CGCG | 907 902 AMD_CG_SUPPORT_GFX_3D_CGLS |
+1 -1
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
··· 283 283 } 284 284 285 285 if (vce_v2_0_wait_for_idle(adev)) { 286 - DRM_INFO("VCE is busy, Can't set clock gateing"); 286 + DRM_INFO("VCE is busy, Can't set clock gating"); 287 287 return 0; 288 288 } 289 289
+6 -9
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
··· 382 382 static int vce_v4_0_stop(struct amdgpu_device *adev) 383 383 { 384 384 385 + /* Disable VCPU */ 385 386 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001); 386 387 387 388 /* hold on ECPU */ ··· 390 389 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 391 390 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 392 391 393 - /* clear BUSY flag */ 394 - WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK); 392 + /* clear VCE_STATUS */ 393 + WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0); 395 394 396 395 /* Set Clock-Gating off */ 397 396 /* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) ··· 923 922 924 923 return 0; 925 924 } 925 + #endif 926 926 927 927 static int vce_v4_0_set_powergating_state(void *handle, 928 928 enum amd_powergating_state state) ··· 937 935 */ 938 936 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 939 937 940 - if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) 941 - return 0; 942 - 943 938 if (state == AMD_PG_STATE_GATE) 944 - /* XXX do we need a vce_v4_0_stop()? */ 945 - return 0; 939 + return vce_v4_0_stop(adev); 946 940 else 947 941 return vce_v4_0_start(adev); 948 942 } 949 - #endif 950 943 951 944 static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, 952 945 struct amdgpu_ib *ib, uint32_t flags) ··· 1056 1059 .soft_reset = NULL /* vce_v4_0_soft_reset */, 1057 1060 .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */, 1058 1061 .set_clockgating_state = vce_v4_0_set_clockgating_state, 1059 - .set_powergating_state = NULL /* vce_v4_0_set_powergating_state */, 1062 + .set_powergating_state = vce_v4_0_set_powergating_state, 1060 1063 }; 1061 1064 1062 1065 static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
+2 -2
drivers/gpu/drm/amd/amdkfd/kfd_device.c
··· 494 494 { 495 495 unsigned int size; 496 496 497 - kfd->mec_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd, 497 + kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, 498 498 KGD_ENGINE_MEC1); 499 - kfd->sdma_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd, 499 + kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, 500 500 KGD_ENGINE_SDMA1); 501 501 kfd->shared_resources = *gpu_resources; 502 502
+239 -63
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 364 364 struct amdgpu_device *adev = irq_params->adev; 365 365 struct amdgpu_crtc *acrtc; 366 366 struct dm_crtc_state *acrtc_state; 367 + unsigned long flags; 367 368 368 369 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); 369 370 ··· 380 379 * page-flip completion events that have been queued to us 381 380 * if a pageflip happened inside front-porch. 382 381 */ 383 - if (amdgpu_dm_vrr_active(acrtc_state)) 382 + if (amdgpu_dm_vrr_active(acrtc_state)) { 384 383 drm_crtc_handle_vblank(&acrtc->base); 384 + 385 + /* BTR processing for pre-DCE12 ASICs */ 386 + if (acrtc_state->stream && 387 + adev->family < AMDGPU_FAMILY_AI) { 388 + spin_lock_irqsave(&adev->ddev->event_lock, flags); 389 + mod_freesync_handle_v_update( 390 + adev->dm.freesync_module, 391 + acrtc_state->stream, 392 + &acrtc_state->vrr_params); 393 + 394 + dc_stream_adjust_vmin_vmax( 395 + adev->dm.dc, 396 + acrtc_state->stream, 397 + &acrtc_state->vrr_params.adjust); 398 + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 399 + } 400 + } 385 401 } 386 402 } 387 403 ··· 408 390 struct amdgpu_device *adev = irq_params->adev; 409 391 struct amdgpu_crtc *acrtc; 410 392 struct dm_crtc_state *acrtc_state; 393 + unsigned long flags; 411 394 412 395 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 413 396 ··· 431 412 */ 432 413 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 433 414 434 - if (acrtc_state->stream && 415 + if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI && 435 416 acrtc_state->vrr_params.supported && 436 417 acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { 418 + spin_lock_irqsave(&adev->ddev->event_lock, flags); 437 419 mod_freesync_handle_v_update( 438 420 adev->dm.freesync_module, 439 421 acrtc_state->stream, ··· 444 424 adev->dm.dc, 445 425 acrtc_state->stream, 446 426 &acrtc_state->vrr_params.adjust); 427 + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 447 428 } 448 429 } 449 430 } ··· 554 533 555 534 if (amdgpu_dc_feature_mask & DC_FBC_MASK) 556 535 init_data.flags.fbc_support = true; 536 + 537 + init_data.flags.power_down_display_on_boot = true; 557 538 558 539 /* Display Core create. */ 559 540 adev->dm.dc = dc_create(&init_data); ··· 3461 3438 dc_stream_retain(state->stream); 3462 3439 } 3463 3440 3441 + state->active_planes = cur->active_planes; 3442 + state->interrupts_enabled = cur->interrupts_enabled; 3464 3443 state->vrr_params = cur->vrr_params; 3465 3444 state->vrr_infopacket = cur->vrr_infopacket; 3466 3445 state->abm_level = cur->abm_level; ··· 3887 3862 { 3888 3863 } 3889 3864 3890 - static bool does_crtc_have_active_plane(struct drm_crtc_state *new_crtc_state) 3865 + static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state) 3866 + { 3867 + struct drm_device *dev = new_crtc_state->crtc->dev; 3868 + struct drm_plane *plane; 3869 + 3870 + drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) { 3871 + if (plane->type == DRM_PLANE_TYPE_CURSOR) 3872 + return true; 3873 + } 3874 + 3875 + return false; 3876 + } 3877 + 3878 + static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) 3891 3879 { 3892 3880 struct drm_atomic_state *state = new_crtc_state->state; 3893 3881 struct drm_plane *plane; ··· 3929 3891 num_active += (new_plane_state->fb != NULL); 3930 3892 } 3931 3893 3932 - return num_active > 0; 3894 + return num_active; 3895 + } 3896 + 3897 + /* 3898 + * Sets whether interrupts should be enabled on a specific CRTC. 3899 + * We require that the stream be enabled and that there exist active 3900 + * DC planes on the stream. 3901 + */ 3902 + static void 3903 + dm_update_crtc_interrupt_state(struct drm_crtc *crtc, 3904 + struct drm_crtc_state *new_crtc_state) 3905 + { 3906 + struct dm_crtc_state *dm_new_crtc_state = 3907 + to_dm_crtc_state(new_crtc_state); 3908 + 3909 + dm_new_crtc_state->active_planes = 0; 3910 + dm_new_crtc_state->interrupts_enabled = false; 3911 + 3912 + if (!dm_new_crtc_state->stream) 3913 + return; 3914 + 3915 + dm_new_crtc_state->active_planes = 3916 + count_crtc_active_planes(new_crtc_state); 3917 + 3918 + dm_new_crtc_state->interrupts_enabled = 3919 + dm_new_crtc_state->active_planes > 0; 3933 3920 } 3934 3921 3935 3922 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, ··· 3964 3901 struct dc *dc = adev->dm.dc; 3965 3902 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state); 3966 3903 int ret = -EINVAL; 3904 + 3905 + /* 3906 + * Update interrupt state for the CRTC. This needs to happen whenever 3907 + * the CRTC has changed or whenever any of its planes have changed. 3908 + * Atomic check satisfies both of these requirements since the CRTC 3909 + * is added to the state by DRM during drm_atomic_helper_check_planes. 3910 + */ 3911 + dm_update_crtc_interrupt_state(crtc, state); 3967 3912 3968 3913 if (unlikely(!dm_crtc_state->stream && 3969 3914 modeset_required(state, NULL, dm_crtc_state->stream))) { ··· 3983 3912 if (!dm_crtc_state->stream) 3984 3913 return 0; 3985 3914 3986 - /* We want at least one hardware plane enabled to use the stream. */ 3915 + /* 3916 + * We want at least one hardware plane enabled to use 3917 + * the stream with a cursor enabled. 3918 + */ 3987 3919 if (state->enable && state->active && 3988 - !does_crtc_have_active_plane(state)) 3920 + does_crtc_have_active_cursor(state) && 3921 + dm_crtc_state->active_planes == 0) 3989 3922 return -EINVAL; 3990 3923 3991 3924 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) ··· 4263 4188 DRM_FORMAT_ABGR2101010, 4264 4189 DRM_FORMAT_XBGR8888, 4265 4190 DRM_FORMAT_ABGR8888, 4191 + DRM_FORMAT_RGB565, 4266 4192 }; 4267 4193 4268 4194 static const uint32_t overlay_formats[] = { ··· 4272 4196 DRM_FORMAT_RGBA8888, 4273 4197 DRM_FORMAT_XBGR8888, 4274 4198 DRM_FORMAT_ABGR8888, 4199 + DRM_FORMAT_RGB565 4275 4200 }; 4276 4201 4277 4202 static const u32 cursor_formats[] = { ··· 5076 4999 struct dc_plane_state *surface, 5077 5000 u32 flip_timestamp_in_us) 5078 5001 { 5079 - struct mod_vrr_params vrr_params = new_crtc_state->vrr_params; 5002 + struct mod_vrr_params vrr_params; 5080 5003 struct dc_info_packet vrr_infopacket = {0}; 5004 + struct amdgpu_device *adev = dm->adev; 5005 + unsigned long flags; 5081 5006 5082 5007 if (!new_stream) 5083 5008 return; ··· 5092 5013 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 5093 5014 return; 5094 5015 5016 + spin_lock_irqsave(&adev->ddev->event_lock, flags); 5017 + vrr_params = new_crtc_state->vrr_params; 5018 + 5095 5019 if (surface) { 5096 5020 mod_freesync_handle_preflip( 5097 5021 dm->freesync_module, ··· 5102 5020 new_stream, 5103 5021 flip_timestamp_in_us, 5104 5022 &vrr_params); 5023 + 5024 + if (adev->family < AMDGPU_FAMILY_AI && 5025 + amdgpu_dm_vrr_active(new_crtc_state)) { 5026 + mod_freesync_handle_v_update(dm->freesync_module, 5027 + new_stream, &vrr_params); 5028 + } 5105 5029 } 5106 5030 5107 5031 mod_freesync_build_vrr_infopacket( ··· 5139 5051 new_crtc_state->base.crtc->base.id, 5140 5052 (int)new_crtc_state->base.vrr_enabled, 5141 5053 (int)vrr_params.state); 5054 + 5055 + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 5142 5056 } 5143 5057 5144 5058 static void pre_update_freesync_state_on_stream( ··· 5148 5058 struct dm_crtc_state *new_crtc_state) 5149 5059 { 5150 5060 struct dc_stream_state *new_stream = new_crtc_state->stream; 5151 - struct mod_vrr_params vrr_params = new_crtc_state->vrr_params; 5061 + struct mod_vrr_params vrr_params; 5152 5062 struct mod_freesync_config config = new_crtc_state->freesync_config; 5063 + struct amdgpu_device *adev = dm->adev; 5064 + unsigned long flags; 5153 5065 5154 5066 if (!new_stream) 5155 5067 return; ··· 5162 5070 */ 5163 5071 if (!new_stream->timing.h_total || !new_stream->timing.v_total) 5164 5072 return; 5073 + 5074 + spin_lock_irqsave(&adev->ddev->event_lock, flags); 5075 + vrr_params = new_crtc_state->vrr_params; 5165 5076 5166 5077 if (new_crtc_state->vrr_supported && 5167 5078 config.min_refresh_in_uhz && ··· 5186 5091 sizeof(vrr_params.adjust)) != 0); 5187 5092 5188 5093 new_crtc_state->vrr_params = vrr_params; 5094 + spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 5189 5095 } 5190 5096 5191 5097 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, ··· 5217 5121 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", 5218 5122 __func__, new_state->base.crtc->base.id); 5219 5123 } 5124 + } 5125 + 5126 + static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) 5127 + { 5128 + struct drm_plane *plane; 5129 + struct drm_plane_state *old_plane_state, *new_plane_state; 5130 + int i; 5131 + 5132 + /* 5133 + * TODO: Make this per-stream so we don't issue redundant updates for 5134 + * commits with multiple streams. 5135 + */ 5136 + for_each_oldnew_plane_in_state(state, plane, old_plane_state, 5137 + new_plane_state, i) 5138 + if (plane->type == DRM_PLANE_TYPE_CURSOR) 5139 + handle_cursor_update(plane, old_plane_state); 5220 5140 } 5221 5141 5222 5142 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, ··· 5273 5161 dm_error("Failed to allocate update bundle\n"); 5274 5162 goto cleanup; 5275 5163 } 5164 + 5165 + /* 5166 + * Disable the cursor first if we're disabling all the planes. 5167 + * It'll remain on the screen after the planes are re-enabled 5168 + * if we don't. 5169 + */ 5170 + if (acrtc_state->active_planes == 0) 5171 + amdgpu_dm_commit_cursors(state); 5276 5172 5277 5173 /* update planes when needed */ 5278 5174 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { ··· 5325 5205 continue; 5326 5206 } 5327 5207 5208 + abo = gem_to_amdgpu_bo(fb->obj[0]); 5209 + 5210 + /* 5211 + * Wait for all fences on this FB. Do limited wait to avoid 5212 + * deadlock during GPU reset when this fence will not signal 5213 + * but we hold reservation lock for the BO. 5214 + */ 5215 + r = reservation_object_wait_timeout_rcu(abo->tbo.resv, true, 5216 + false, 5217 + msecs_to_jiffies(5000)); 5218 + if (unlikely(r <= 0)) 5219 + DRM_ERROR("Waiting for fences timed out or interrupted!"); 5220 + 5328 5221 /* 5329 5222 * TODO This might fail and hence better not used, wait 5330 5223 * explicitly on fences instead 5331 5224 * and in general should be called for 5332 5225 * blocking commit to as per framework helpers 5333 5226 */ 5334 - abo = gem_to_amdgpu_bo(fb->obj[0]); 5335 5227 r = amdgpu_bo_reserve(abo, true); 5336 - if (unlikely(r != 0)) { 5228 + if (unlikely(r != 0)) 5337 5229 DRM_ERROR("failed to reserve buffer before flip\n"); 5338 - WARN_ON(1); 5339 - } 5340 - 5341 - /* Wait for all fences on this FB */ 5342 - WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false, 5343 - MAX_SCHEDULE_TIMEOUT) < 0); 5344 5230 5345 5231 amdgpu_bo_get_tiling_flags(abo, &tiling_flags); 5346 5232 ··· 5455 5329 } 5456 5330 } 5457 5331 5458 - if (planes_count) { 5332 + /* Update the planes if changed or disable if we don't have any. */ 5333 + if (planes_count || acrtc_state->active_planes == 0) { 5459 5334 if (new_pcrtc_state->mode_changed) { 5460 5335 bundle->stream_update.src = acrtc_state->stream->src; 5461 5336 bundle->stream_update.dst = acrtc_state->stream->dst; ··· 5479 5352 mutex_unlock(&dm->dc_lock); 5480 5353 } 5481 5354 5482 - for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) 5483 - if (plane->type == DRM_PLANE_TYPE_CURSOR) 5484 - handle_cursor_update(plane, old_plane_state); 5355 + /* 5356 + * Update cursor state *after* programming all the planes. 5357 + * This avoids redundant programming in the case where we're going 5358 + * to be disabling a single plane - those pipes are being disabled. 5359 + */ 5360 + if (acrtc_state->active_planes) 5361 + amdgpu_dm_commit_cursors(state); 5485 5362 5486 5363 cleanup: 5487 5364 kfree(bundle); 5365 + } 5366 + 5367 + /* 5368 + * Enable interrupts on CRTCs that are newly active, undergone 5369 + * a modeset, or have active planes again. 5370 + * 5371 + * Done in two passes, based on the for_modeset flag: 5372 + * Pass 1: For CRTCs going through modeset 5373 + * Pass 2: For CRTCs going from 0 to n active planes 5374 + * 5375 + * Interrupts can only be enabled after the planes are programmed, 5376 + * so this requires a two-pass approach since we don't want to 5377 + * just defer the interrupts until after commit planes every time. 5378 + */ 5379 + static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev, 5380 + struct drm_atomic_state *state, 5381 + bool for_modeset) 5382 + { 5383 + struct amdgpu_device *adev = dev->dev_private; 5384 + struct drm_crtc *crtc; 5385 + struct drm_crtc_state *old_crtc_state, *new_crtc_state; 5386 + int i; 5387 + 5388 + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 5389 + new_crtc_state, i) { 5390 + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 5391 + struct dm_crtc_state *dm_new_crtc_state = 5392 + to_dm_crtc_state(new_crtc_state); 5393 + struct dm_crtc_state *dm_old_crtc_state = 5394 + to_dm_crtc_state(old_crtc_state); 5395 + bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state); 5396 + bool run_pass; 5397 + 5398 + run_pass = (for_modeset && modeset) || 5399 + (!for_modeset && !modeset && 5400 + !dm_old_crtc_state->interrupts_enabled); 5401 + 5402 + if (!run_pass) 5403 + continue; 5404 + 5405 + if (!dm_new_crtc_state->interrupts_enabled) 5406 + continue; 5407 + 5408 + manage_dm_interrupts(adev, acrtc, true); 5409 + 5410 + #ifdef CONFIG_DEBUG_FS 5411 + /* The stream has changed so CRC capture needs to re-enabled. */ 5412 + if (dm_new_crtc_state->crc_enabled) { 5413 + dm_new_crtc_state->crc_enabled = false; 5414 + amdgpu_dm_crtc_set_crc_source(crtc, "auto"); 5415 + } 5416 + #endif 5417 + } 5488 5418 } 5489 5419 5490 5420 /* ··· 5568 5384 int i; 5569 5385 5570 5386 /* 5571 - * We evade vblanks and pflips on crtc that 5572 - * should be changed. We do it here to flush & disable 5573 - * interrupts before drm_swap_state is called in drm_atomic_helper_commit 5574 - * it will update crtc->dm_crtc_state->stream pointer which is used in 5575 - * the ISRs. 5387 + * We evade vblank and pflip interrupts on CRTCs that are undergoing 5388 + * a modeset, being disabled, or have no active planes. 5389 + * 5390 + * It's done in atomic commit rather than commit tail for now since 5391 + * some of these interrupt handlers access the current CRTC state and 5392 + * potentially the stream pointer itself. 5393 + * 5394 + * Since the atomic state is swapped within atomic commit and not within 5395 + * commit tail this would leave to new state (that hasn't been committed yet) 5396 + * being accesssed from within the handlers. 5397 + * 5398 + * TODO: Fix this so we can do this in commit tail and not have to block 5399 + * in atomic check. 5576 5400 */ 5577 5401 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 5578 5402 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 5579 5403 struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 5580 5404 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 5581 5405 5582 - if (drm_atomic_crtc_needs_modeset(new_crtc_state) 5583 - && dm_old_crtc_state->stream) { 5406 + if (dm_old_crtc_state->interrupts_enabled && 5407 + (!dm_new_crtc_state->interrupts_enabled || 5408 + drm_atomic_crtc_needs_modeset(new_crtc_state))) { 5584 5409 /* 5585 - * If the stream is removed and CRC capture was 5586 - * enabled on the CRTC the extra vblank reference 5587 - * needs to be dropped since CRC capture will be 5588 - * disabled. 5410 + * Drop the extra vblank reference added by CRC 5411 + * capture if applicable. 5589 5412 */ 5590 - if (!dm_new_crtc_state->stream 5591 - && dm_new_crtc_state->crc_enabled) { 5413 + if (dm_new_crtc_state->crc_enabled) 5592 5414 drm_crtc_vblank_put(crtc); 5415 + 5416 + /* 5417 + * Only keep CRC capture enabled if there's 5418 + * still a stream for the CRTC. 5419 + */ 5420 + if (!dm_new_crtc_state->stream) 5593 5421 dm_new_crtc_state->crc_enabled = false; 5594 - } 5595 5422 5596 5423 manage_dm_interrupts(adev, acrtc, false); 5597 5424 } ··· 5818 5623 mutex_unlock(&dm->dc_lock); 5819 5624 } 5820 5625 5821 - /* Update freesync state before amdgpu_dm_handle_vrr_transition(). */ 5822 - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 5823 - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 5824 - pre_update_freesync_state_on_stream(dm, dm_new_crtc_state); 5825 - } 5826 - 5626 + /* Count number of newly disabled CRTCs for dropping PM refs later. */ 5827 5627 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 5828 - new_crtc_state, i) { 5829 - /* 5830 - * loop to enable interrupts on newly arrived crtc 5831 - */ 5832 - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 5833 - bool modeset_needed; 5834 - 5628 + new_crtc_state, i) { 5835 5629 if (old_crtc_state->active && !new_crtc_state->active) 5836 5630 crtc_disable_count++; 5837 5631 5838 5632 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 5839 5633 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 5840 5634 5635 + /* Update freesync active state. */ 5636 + pre_update_freesync_state_on_stream(dm, dm_new_crtc_state); 5637 + 5841 5638 /* Handle vrr on->off / off->on transitions */ 5842 5639 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, 5843 5640 dm_new_crtc_state); 5844 - 5845 - modeset_needed = modeset_required( 5846 - new_crtc_state, 5847 - dm_new_crtc_state->stream, 5848 - dm_old_crtc_state->stream); 5849 - 5850 - if (dm_new_crtc_state->stream == NULL || !modeset_needed) 5851 - continue; 5852 - 5853 - manage_dm_interrupts(adev, acrtc, true); 5854 - 5855 - #ifdef CONFIG_DEBUG_FS 5856 - /* The stream has changed so CRC capture needs to re-enabled. */ 5857 - if (dm_new_crtc_state->crc_enabled) 5858 - amdgpu_dm_crtc_set_crc_source(crtc, "auto"); 5859 - #endif 5860 5641 } 5642 + 5643 + /* Enable interrupts for CRTCs going through a modeset. */ 5644 + amdgpu_dm_enable_crtc_interrupts(dev, state, true); 5861 5645 5862 5646 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) 5863 5647 if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) ··· 5851 5677 dm, crtc, wait_for_vblank); 5852 5678 } 5853 5679 5680 + /* Enable interrupts for CRTCs going from 0 to n active planes. */ 5681 + amdgpu_dm_enable_crtc_interrupts(dev, state, false); 5854 5682 5855 5683 /* 5856 5684 * send vblank event on all events not handled in flip and
+3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 271 271 struct drm_crtc_state base; 272 272 struct dc_stream_state *stream; 273 273 274 + int active_planes; 275 + bool interrupts_enabled; 276 + 274 277 int crc_skip_count; 275 278 bool crc_enabled; 276 279
+55 -16
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 514 514 } 515 515 516 516 517 + static void read_edp_current_link_settings_on_detect(struct dc_link *link) 518 + { 519 + union lane_count_set lane_count_set = { {0} }; 520 + uint8_t link_bw_set; 521 + uint8_t link_rate_set; 522 + 523 + // Read DPCD 00101h to find out the number of lanes currently set 524 + core_link_read_dpcd(link, DP_LANE_COUNT_SET, 525 + &lane_count_set.raw, sizeof(lane_count_set)); 526 + link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET; 527 + 528 + // Read DPCD 00100h to find if standard link rates are set 529 + core_link_read_dpcd(link, DP_LINK_BW_SET, 530 + &link_bw_set, sizeof(link_bw_set)); 531 + 532 + if (link_bw_set == 0) { 533 + /* If standard link rates are not being used, 534 + * Read DPCD 00115h to find the link rate set used 535 + */ 536 + core_link_read_dpcd(link, DP_LINK_RATE_SET, 537 + &link_rate_set, sizeof(link_rate_set)); 538 + 539 + if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { 540 + link->cur_link_settings.link_rate = 541 + link->dpcd_caps.edp_supported_link_rates[link_rate_set]; 542 + link->cur_link_settings.link_rate_set = link_rate_set; 543 + link->cur_link_settings.use_link_rate_set = true; 544 + } 545 + } else { 546 + link->cur_link_settings.link_rate = link_bw_set; 547 + link->cur_link_settings.use_link_rate_set = false; 548 + } 549 + } 550 + 517 551 static bool detect_dp( 518 552 struct dc_link *link, 519 553 struct display_sink_capability *sink_caps, ··· 682 648 return false; 683 649 } 684 650 685 - if (link->connector_signal == SIGNAL_TYPE_EDP && 686 - link->local_sink) 687 - return true; 651 + if (link->connector_signal == SIGNAL_TYPE_EDP) { 652 + /* On detect, we want to make sure current link settings are 653 + * up to date, especially if link was powered on by GOP. 654 + */ 655 + read_edp_current_link_settings_on_detect(link); 656 + if (link->local_sink) 657 + return true; 658 + } 688 659 689 660 if (link->connector_signal == SIGNAL_TYPE_LVDS && 690 661 link->local_sink) ··· 1435 1396 /* get link settings for video mode timing */ 1436 1397 decide_link_settings(stream, &link_settings); 1437 1398 1438 - /* If link settings are different than current and link already enabled 1439 - * then need to disable before programming to new rate. 1440 - */ 1441 - if (link->link_status.link_active && 1442 - (link->cur_link_settings.lane_count != link_settings.lane_count || 1443 - link->cur_link_settings.link_rate != link_settings.link_rate)) { 1444 - dp_disable_link_phy(link, pipe_ctx->stream->signal); 1399 + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { 1400 + /* If link settings are different than current and link already enabled 1401 + * then need to disable before programming to new rate. 1402 + */ 1403 + if (link->link_status.link_active && 1404 + (link->cur_link_settings.lane_count != link_settings.lane_count || 1405 + link->cur_link_settings.link_rate != link_settings.link_rate)) { 1406 + dp_disable_link_phy(link, pipe_ctx->stream->signal); 1407 + } 1408 + 1409 + /*in case it is not on*/ 1410 + link->dc->hwss.edp_power_control(link, true); 1411 + link->dc->hwss.edp_wait_for_hpd_ready(link, true); 1445 1412 } 1446 1413 1447 1414 pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = ··· 1493 1448 struct pipe_ctx *pipe_ctx) 1494 1449 { 1495 1450 enum dc_status status; 1496 - struct dc_stream_state *stream = pipe_ctx->stream; 1497 - struct dc_link *link = stream->link; 1498 - /*in case it is not on*/ 1499 - link->dc->hwss.edp_power_control(link, true); 1500 - link->dc->hwss.edp_wait_for_hpd_ready(link, true); 1501 1451 1502 1452 status = enable_link_dp(state, pipe_ctx); 1503 - 1504 1453 1505 1454 return status; 1506 1455 }
+22 -1
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 163 163 return stream; 164 164 } 165 165 166 + struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) 167 + { 168 + struct dc_stream_state *new_stream; 169 + 170 + new_stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL); 171 + if (!new_stream) 172 + return NULL; 173 + 174 + memcpy(new_stream, stream, sizeof(struct dc_stream_state)); 175 + 176 + if (new_stream->sink) 177 + dc_sink_retain(new_stream->sink); 178 + 179 + if (new_stream->out_transfer_func) 180 + dc_transfer_func_retain(new_stream->out_transfer_func); 181 + 182 + kref_init(&new_stream->refcount); 183 + 184 + return new_stream; 185 + } 186 + 166 187 /** 167 188 * dc_stream_get_status_from_state - Get stream status from given dc state 168 189 * @state: DC state to find the stream status in ··· 333 312 (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) || 334 313 !pipe_ctx->plane_state || 335 314 (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) || 336 - !pipe_ctx->plane_res.ipp) 315 + (!pipe_ctx->plane_res.ipp && !pipe_ctx->plane_res.dpp)) 337 316 continue; 338 317 339 318 if (!pipe_to_program) {
+2 -1
drivers/gpu/drm/amd/display/dc/dc.h
··· 39 39 #include "inc/hw/dmcu.h" 40 40 #include "dml/display_mode_lib.h" 41 41 42 - #define DC_VER "3.2.26" 42 + #define DC_VER "3.2.27" 43 43 44 44 #define MAX_SURFACES 3 45 45 #define MAX_PLANES 6 ··· 204 204 bool optimize_edp_link_rate; 205 205 bool disable_fractional_pwm; 206 206 bool allow_seamless_boot_optimization; 207 + bool power_down_display_on_boot; 207 208 }; 208 209 209 210 enum visual_confirm {
+1
drivers/gpu/drm/amd/display/dc/dc_link.h
··· 120 120 /* MST record stream using this link */ 121 121 struct link_flags { 122 122 bool dp_keep_receiver_powered; 123 + bool dp_skip_DID2; 123 124 } wa_flags; 124 125 struct link_mst_stream_allocation_table mst_stream_alloc_table; 125 126
+2
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 307 307 */ 308 308 struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink); 309 309 310 + struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream); 311 + 310 312 void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink); 311 313 312 314 void dc_stream_retain(struct dc_stream_state *dc_stream);
-18
drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
··· 50 50 #define MCP_ABM_LEVEL_SET 0x65 51 51 #define MCP_ABM_PIPE_SET 0x66 52 52 #define MCP_BL_SET 0x67 53 - #define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */ 54 53 55 54 #define MCP_DISABLE_ABM_IMMEDIATELY 255 56 55 ··· 389 390 /* Unlock group 2 backlight registers */ 390 391 REG_UPDATE(BL_PWM_GRP1_REG_LOCK, 391 392 BL_PWM_GRP1_REG_LOCK, 0); 392 - 393 - /* Wait until microcontroller is ready to process interrupt */ 394 - REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); 395 - 396 - /* Set PWM fractional enable/disable */ 397 - value = (abm->ctx->dc->config.disable_fractional_pwm == false) ? 1 : 0; 398 - REG_WRITE(MASTER_COMM_DATA_REG1, value); 399 - 400 - /* Set command to enable or disable fractional PWM microcontroller */ 401 - REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, 402 - MCP_BL_SET_PWM_FRAC); 403 - 404 - /* Notify microcontroller of new command */ 405 - REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); 406 - 407 - /* Ensure command has been executed before continuing */ 408 - REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); 409 393 410 394 return true; 411 395 }
+33 -2
drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
··· 51 51 #define PSR_SET_WAITLOOP 0x31 52 52 #define MCP_INIT_DMCU 0x88 53 53 #define MCP_INIT_IRAM 0x89 54 + #define MCP_SYNC_PHY_LOCK 0x90 55 + #define MCP_SYNC_PHY_UNLOCK 0x91 56 + #define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */ 54 57 #define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L 55 58 56 59 static bool dce_dmcu_init(struct dmcu *dmcu) ··· 342 339 IRAM_RD_ADDR_AUTO_INC, 0); 343 340 } 344 341 342 + static void dcn10_dmcu_enable_fractional_pwm(struct dmcu *dmcu, 343 + uint32_t fractional_pwm) 344 + { 345 + struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); 346 + 347 + /* Wait until microcontroller is ready to process interrupt */ 348 + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); 349 + 350 + /* Set PWM fractional enable/disable */ 351 + REG_WRITE(MASTER_COMM_DATA_REG1, fractional_pwm); 352 + 353 + /* Set command to enable or disable fractional PWM microcontroller */ 354 + REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, 355 + MCP_BL_SET_PWM_FRAC); 356 + 357 + /* Notify microcontroller of new command */ 358 + REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); 359 + 360 + /* Ensure command has been executed before continuing */ 361 + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); 362 + } 363 + 345 364 static bool dcn10_dmcu_init(struct dmcu *dmcu) 346 365 { 347 366 struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); 367 + const struct dc_config *config = &dmcu->ctx->dc->config; 348 368 bool status = false; 349 369 350 370 /* Definition of DC_DMCU_SCRATCH ··· 405 379 if (dmcu->dmcu_state == DMCU_RUNNING) { 406 380 /* Retrieve and cache the DMCU firmware version. */ 407 381 dcn10_get_dmcu_version(dmcu); 382 + 383 + /* Initialize DMCU to use fractional PWM or not */ 384 + dcn10_dmcu_enable_fractional_pwm(dmcu, 385 + (config->disable_fractional_pwm == false) ? 1 : 0); 408 386 status = true; 409 - } else 387 + } else { 410 388 status = false; 389 + } 411 390 412 391 break; 413 392 case DMCU_RUNNING: ··· 721 690 return true; 722 691 } 723 692 724 - #endif 693 + #endif //(CONFIG_DRM_AMD_DC_DCN1_0) 725 694 726 695 static const struct dmcu_funcs dce_funcs = { 727 696 .dmcu_init = dce_dmcu_init,
+6 -21
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
··· 151 151 struct dc *dc = clk_mgr->ctx->dc; 152 152 struct dc_debug_options *debug = &dc->debug; 153 153 struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; 154 - struct pp_smu_display_requirement_rv *smu_req_cur = 155 - &dc->res_pool->pp_smu_req; 156 - struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; 157 154 struct pp_smu_funcs_rv *pp_smu = NULL; 158 155 bool send_request_to_increase = false; 159 156 bool send_request_to_lower = false; ··· 172 175 */ 173 176 if (pp_smu && pp_smu->set_display_count) 174 177 pp_smu->set_display_count(&pp_smu->pp_smu, display_count); 175 - 176 - smu_req.display_count = display_count; 177 178 } 178 179 179 180 if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz ··· 182 187 183 188 if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) { 184 189 clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz; 185 - 186 190 send_request_to_lower = true; 187 191 } 188 192 ··· 191 197 192 198 if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) { 193 199 clk_mgr->clks.fclk_khz = new_clocks->fclk_khz; 194 - smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000; 195 - 196 200 send_request_to_lower = true; 197 201 } 198 202 199 203 //DCF Clock 200 204 if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) { 201 205 clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz; 202 - smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000; 203 - 204 206 send_request_to_lower = true; 205 207 } 206 208 207 209 if (should_set_clock(safe_to_lower, 208 210 new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) { 209 211 clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; 210 - smu_req.min_deep_sleep_dcefclk_mhz = (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000; 211 - 212 212 send_request_to_lower = true; 213 213 } 214 214 ··· 215 227 pp_smu->set_hard_min_dcfclk_by_freq && 216 228 pp_smu->set_min_deep_sleep_dcfclk) { 217 229 218 - pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz); 219 - pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz); 220 - pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz); 230 + pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000); 231 + pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000); 232 + pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000); 221 233 } 222 234 } 223 235 ··· 227 239 || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) { 228 240 dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks); 229 241 clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; 230 - 231 242 send_request_to_lower = true; 232 243 } 233 244 ··· 236 249 pp_smu->set_hard_min_dcfclk_by_freq && 237 250 pp_smu->set_min_deep_sleep_dcfclk) { 238 251 239 - pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz); 240 - pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz); 241 - pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz); 252 + pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000); 253 + pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000); 254 + pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000); 242 255 } 243 256 } 244 - 245 - *smu_req_cur = smu_req; 246 257 } 247 258 static const struct clk_mgr_funcs dcn1_funcs = { 248 259 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+33 -16
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
··· 283 283 hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns; 284 284 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, 285 285 refclk_mhz, 0x1fffff); 286 - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); 286 + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, 287 + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); 287 288 288 289 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" 289 290 "HW register value = 0x%x\n", ··· 311 310 prog_wm_value = convert_and_clamp( 312 311 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, 313 312 refclk_mhz, 0x1fffff); 314 - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); 313 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, 314 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); 315 315 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" 316 316 "HW register value = 0x%x\n", 317 317 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); ··· 325 323 prog_wm_value = convert_and_clamp( 326 324 watermarks->a.cstate_pstate.cstate_exit_ns, 327 325 refclk_mhz, 0x1fffff); 328 - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); 326 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, 327 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); 329 328 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" 330 329 "HW register value = 0x%x\n", 331 330 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); ··· 340 337 prog_wm_value = convert_and_clamp( 341 338 watermarks->a.cstate_pstate.pstate_change_ns, 342 339 refclk_mhz, 0x1fffff); 343 - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); 340 + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, 341 + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); 344 342 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" 345 343 "HW register value = 0x%x\n\n", 346 344 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); ··· 352 348 hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns; 353 349 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, 354 350 refclk_mhz, 0x1fffff); 355 - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); 351 + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, 352 + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); 356 353 357 354 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" 358 355 "HW register value = 0x%x\n", ··· 380 375 prog_wm_value = convert_and_clamp( 381 376 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, 382 377 refclk_mhz, 0x1fffff); 383 - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); 378 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, 379 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); 384 380 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" 385 381 "HW register value = 0x%x\n", 386 382 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); ··· 394 388 prog_wm_value = convert_and_clamp( 395 389 watermarks->b.cstate_pstate.cstate_exit_ns, 396 390 refclk_mhz, 0x1fffff); 397 - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); 391 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, 392 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); 398 393 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" 399 394 "HW register value = 0x%x\n", 400 395 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); ··· 409 402 prog_wm_value = convert_and_clamp( 410 403 watermarks->b.cstate_pstate.pstate_change_ns, 411 404 refclk_mhz, 0x1fffff); 412 - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); 405 + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, 406 + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); 413 407 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" 414 408 "HW register value = 0x%x\n\n", 415 409 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); ··· 421 413 hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; 422 414 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, 423 415 refclk_mhz, 0x1fffff); 424 - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); 416 + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, 417 + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); 425 418 426 419 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" 427 420 "HW register value = 0x%x\n", ··· 449 440 prog_wm_value = convert_and_clamp( 450 441 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, 451 442 refclk_mhz, 0x1fffff); 452 - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); 443 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, 444 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); 453 445 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" 454 446 "HW register value = 0x%x\n", 455 447 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); ··· 463 453 prog_wm_value = convert_and_clamp( 464 454 watermarks->c.cstate_pstate.cstate_exit_ns, 465 455 refclk_mhz, 0x1fffff); 466 - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); 456 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, 457 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); 467 458 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" 468 459 "HW register value = 0x%x\n", 469 460 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); ··· 478 467 prog_wm_value = convert_and_clamp( 479 468 watermarks->c.cstate_pstate.pstate_change_ns, 480 469 refclk_mhz, 0x1fffff); 481 - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); 470 + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, 471 + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); 482 472 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" 483 473 "HW register value = 0x%x\n\n", 484 474 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); ··· 490 478 hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns; 491 479 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, 492 480 refclk_mhz, 0x1fffff); 493 - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); 481 + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, 482 + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); 494 483 495 484 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" 496 485 "HW register value = 0x%x\n", ··· 518 505 prog_wm_value = convert_and_clamp( 519 506 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, 520 507 refclk_mhz, 0x1fffff); 521 - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); 508 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, 509 + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); 522 510 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" 523 511 "HW register value = 0x%x\n", 524 512 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); ··· 532 518 prog_wm_value = convert_and_clamp( 533 519 watermarks->d.cstate_pstate.cstate_exit_ns, 534 520 refclk_mhz, 0x1fffff); 535 - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); 521 + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, 522 + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); 536 523 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" 537 524 "HW register value = 0x%x\n", 538 525 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); ··· 547 532 prog_wm_value = convert_and_clamp( 548 533 watermarks->d.cstate_pstate.pstate_change_ns, 549 534 refclk_mhz, 0x1fffff); 550 - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); 535 + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, 536 + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); 551 537 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" 552 538 "HW register value = 0x%x\n\n", 553 539 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); ··· 883 867 .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format, 884 868 .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap, 885 869 .wm_read_state = hubbub1_wm_read_state, 870 + .program_watermarks = hubbub1_program_watermarks, 886 871 }; 887 872 888 873 void hubbub1_construct(struct hubbub *hubbub,
+52 -11
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
··· 32 32 #define TO_DCN10_HUBBUB(hubbub)\ 33 33 container_of(hubbub, struct dcn10_hubbub, base) 34 34 35 - #define HUBHUB_REG_LIST_DCN()\ 35 + #define HUBBUB_REG_LIST_DCN_COMMON()\ 36 36 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\ 37 - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\ 38 37 SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\ 39 38 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\ 40 - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\ 41 39 SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\ 42 40 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\ 43 - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\ 44 41 SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\ 45 42 SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\ 46 - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D),\ 47 43 SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\ 48 44 SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\ 49 45 SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\ ··· 49 53 SR(DCHUBBUB_TEST_DEBUG_INDEX), \ 50 54 SR(DCHUBBUB_TEST_DEBUG_DATA),\ 51 55 SR(DCHUBBUB_SOFT_RESET) 56 + 57 + #define HUBBUB_VM_REG_LIST() \ 58 + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\ 59 + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\ 60 + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\ 61 + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D) 52 62 53 63 #define HUBBUB_SR_WATERMARK_REG_LIST()\ 54 64 SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\ ··· 67 65 SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D) 68 66 69 67 #define HUBBUB_REG_LIST_DCN10(id)\ 70 - HUBHUB_REG_LIST_DCN(), \ 68 + HUBBUB_REG_LIST_DCN_COMMON(), \ 69 + HUBBUB_VM_REG_LIST(), \ 71 70 HUBBUB_SR_WATERMARK_REG_LIST(), \ 72 71 SR(DCHUBBUB_SDPIF_FB_TOP),\ 73 72 SR(DCHUBBUB_SDPIF_FB_BASE),\ ··· 125 122 #define HUBBUB_SF(reg_name, field_name, post_fix)\ 126 123 .field_name = reg_name ## __ ## field_name ## post_fix 127 124 128 - 129 - #define HUBBUB_MASK_SH_LIST_DCN(mask_sh)\ 125 + #define HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh)\ 130 126 HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \ 131 127 HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \ 132 128 HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \ ··· 135 133 HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \ 136 134 HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \ 137 135 HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \ 138 - HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh) 136 + HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \ 137 + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \ 138 + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \ 139 + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \ 140 + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, mask_sh), \ 141 + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \ 142 + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \ 143 + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \ 144 + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh) 145 + 146 + #define HUBBUB_MASK_SH_LIST_STUTTER(mask_sh) \ 147 + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \ 148 + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \ 149 + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \ 150 + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \ 151 + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \ 152 + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \ 153 + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \ 154 + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, mask_sh) 139 155 140 156 #define HUBBUB_MASK_SH_LIST_DCN10(mask_sh)\ 141 - HUBBUB_MASK_SH_LIST_DCN(mask_sh), \ 157 + HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ 158 + HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ 142 159 HUBBUB_SF(DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \ 143 160 HUBBUB_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \ 144 161 HUBBUB_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \ ··· 188 167 type FB_OFFSET;\ 189 168 type AGP_BOT;\ 190 169 type AGP_TOP;\ 191 - type AGP_BASE 170 + type AGP_BASE;\ 171 + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;\ 172 + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;\ 173 + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;\ 174 + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;\ 175 + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;\ 176 + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;\ 177 + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\ 178 + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D 179 + 180 + #define HUBBUB_STUTTER_REG_FIELD_LIST(type) \ 181 + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\ 182 + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;\ 183 + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;\ 184 + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;\ 185 + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;\ 186 + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;\ 187 + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\ 188 + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D 192 189 193 190 194 191 struct dcn_hubbub_shift { 195 192 DCN_HUBBUB_REG_FIELD_LIST(uint8_t); 193 + HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t); 196 194 }; 197 195 198 196 struct dcn_hubbub_mask { 199 197 DCN_HUBBUB_REG_FIELD_LIST(uint32_t); 198 + HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t); 200 199 }; 201 200 202 201 struct dc;
+5 -1
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
··· 1178 1178 REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst); 1179 1179 } 1180 1180 1181 + void hubp1_init(struct hubp *hubp) 1182 + { 1183 + //do nothing 1184 + } 1181 1185 static const struct hubp_funcs dcn10_hubp_funcs = { 1182 1186 .hubp_program_surface_flip_and_addr = 1183 1187 hubp1_program_surface_flip_and_addr, ··· 1205 1201 .hubp_clear_underflow = hubp1_clear_underflow, 1206 1202 .hubp_disable_control = hubp1_disable_control, 1207 1203 .hubp_get_underflow_status = hubp1_get_underflow_status, 1208 - 1204 + .hubp_init = hubp1_init, 1209 1205 }; 1210 1206 1211 1207 /*****************************************/
+4
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
··· 34 34 #define HUBP_REG_LIST_DCN(id)\ 35 35 SRI(DCHUBP_CNTL, HUBP, id),\ 36 36 SRI(HUBPREQ_DEBUG_DB, HUBP, id),\ 37 + SRI(HUBPREQ_DEBUG, HUBP, id),\ 37 38 SRI(DCSURF_ADDR_CONFIG, HUBP, id),\ 38 39 SRI(DCSURF_TILING_CONFIG, HUBP, id),\ 39 40 SRI(DCSURF_SURFACE_PITCH, HUBPREQ, id),\ ··· 139 138 #define HUBP_COMMON_REG_VARIABLE_LIST \ 140 139 uint32_t DCHUBP_CNTL; \ 141 140 uint32_t HUBPREQ_DEBUG_DB; \ 141 + uint32_t HUBPREQ_DEBUG; \ 142 142 uint32_t DCSURF_ADDR_CONFIG; \ 143 143 uint32_t DCSURF_TILING_CONFIG; \ 144 144 uint32_t DCSURF_SURFACE_PITCH; \ ··· 750 748 751 749 void hubp1_vready_workaround(struct hubp *hubp, 752 750 struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); 751 + 752 + void hubp1_init(struct hubp *hubp); 753 753 754 754 #endif
+10 -3
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
··· 1118 1118 * Otherwise, if taking control is not possible, we need to power 1119 1119 * everything down. 1120 1120 */ 1121 - if (dcb->funcs->is_accelerated_mode(dcb)) { 1121 + if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { 1122 1122 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1123 1123 struct hubp *hubp = dc->res_pool->hubps[i]; 1124 1124 struct dpp *dpp = dc->res_pool->dpps[i]; 1125 1125 1126 + hubp->funcs->hubp_init(hubp); 1126 1127 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; 1127 1128 plane_atomic_power_down(dc, dpp, hubp); 1128 1129 } 1130 + 1131 + apply_DEGVIDCN10_253_wa(dc); 1129 1132 } 1130 1133 1131 1134 for (i = 0; i < dc->res_pool->audio_count; i++) { ··· 2439 2436 struct dc *dc, 2440 2437 struct dc_state *context) 2441 2438 { 2439 + struct hubbub *hubbub = dc->res_pool->hubbub; 2440 + 2442 2441 if (dc->debug.sanity_checks) 2443 2442 dcn10_verify_allow_pstate_change_high(dc); 2444 2443 ··· 2454 2449 false); 2455 2450 } 2456 2451 2457 - hubbub1_program_watermarks(dc->res_pool->hubbub, 2452 + hubbub->funcs->program_watermarks(hubbub, 2458 2453 &context->bw_ctx.bw.dcn.watermarks, 2459 2454 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 2460 2455 true); ··· 2471 2466 struct dc *dc, 2472 2467 struct dc_state *context) 2473 2468 { 2469 + struct hubbub *hubbub = dc->res_pool->hubbub; 2470 + 2474 2471 if (dc->debug.sanity_checks) 2475 2472 dcn10_verify_allow_pstate_change_high(dc); 2476 2473 ··· 2486 2479 true); 2487 2480 } 2488 2481 2489 - hubbub1_program_watermarks(dc->res_pool->hubbub, 2482 + hubbub->funcs->program_watermarks(hubbub, 2490 2483 &context->bw_ctx.bw.dcn.watermarks, 2491 2484 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, 2492 2485 true);
-23
drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
··· 74 74 struct pp_smu_wm_set_range writer_wm_sets[MAX_WATERMARK_SETS]; 75 75 }; 76 76 77 - struct pp_smu_display_requirement_rv { 78 - /* PPSMC_MSG_SetDisplayCount: count 79 - * 0 triggers S0i2 optimization 80 - */ 81 - unsigned int display_count; 82 - 83 - /* PPSMC_MSG_SetHardMinFclkByFreq: mhz 84 - * FCLK will vary with DPM, but never below requested hard min 85 - */ 86 - unsigned int hard_min_fclk_mhz; 87 - 88 - /* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz 89 - * fixed clock at requested freq, either from FCH bypass or DFS 90 - */ 91 - unsigned int hard_min_dcefclk_mhz; 92 - 93 - /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz 94 - * when DF is in cstate, dcf clock is further divided down 95 - * to just above given frequency 96 - */ 97 - unsigned int min_deep_sleep_dcefclk_mhz; 98 - }; 99 - 100 77 struct pp_smu_funcs_rv { 101 78 struct pp_smu pp_smu; 102 79
-1
drivers/gpu/drm/amd/display/dc/inc/core_types.h
··· 145 145 struct hubbub *hubbub; 146 146 struct mpc *mpc; 147 147 struct pp_smu_funcs *pp_smu; 148 - struct pp_smu_display_requirement_rv pp_smu_req; 149 148 struct dce_aux *engines[MAX_PIPES]; 150 149 struct dce_i2c_hw *hw_i2cs[MAX_PIPES]; 151 150 struct dce_i2c_sw *sw_i2cs[MAX_PIPES];
+6
drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
··· 77 77 void (*get_dchub_ref_freq)(struct hubbub *hubbub, 78 78 unsigned int dccg_ref_freq_inKhz, 79 79 unsigned int *dchub_ref_freq_inKhz); 80 + 81 + void (*program_watermarks)( 82 + struct hubbub *hubbub, 83 + struct dcn_watermark_set *watermarks, 84 + unsigned int refclk_mhz, 85 + bool safe_to_lower); 80 86 }; 81 87 82 88 struct hubbub {
+2
drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
··· 70 70 void (*get_psr_wait_loop)(struct dmcu *dmcu, 71 71 unsigned int *psr_wait_loop_number); 72 72 bool (*is_dmcu_initialized)(struct dmcu *dmcu); 73 + bool (*lock_phy)(struct dmcu *dmcu); 74 + bool (*unlock_phy)(struct dmcu *dmcu); 73 75 }; 74 76 75 77 #endif
+1
drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
··· 130 130 void (*hubp_clear_underflow)(struct hubp *hubp); 131 131 void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp); 132 132 unsigned int (*hubp_get_underflow_status)(struct hubp *hubp); 133 + void (*hubp_init)(struct hubp *hubp); 133 134 134 135 }; 135 136
+2 -4
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
··· 437 437 inserted_frame_duration_in_us = last_render_time_in_us / 438 438 frames_to_insert; 439 439 440 - if (inserted_frame_duration_in_us < 441 - (1000000 / in_out_vrr->max_refresh_in_uhz)) 442 - inserted_frame_duration_in_us = 443 - (1000000 / in_out_vrr->max_refresh_in_uhz); 440 + if (inserted_frame_duration_in_us < in_out_vrr->min_duration_in_us) 441 + inserted_frame_duration_in_us = in_out_vrr->min_duration_in_us; 444 442 445 443 /* Cache the calculated variables */ 446 444 in_out_vrr->btr.inserted_duration_in_us =
+8
drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
··· 2347 2347 #define mmHUBP0_DCHUBP_VMPG_CONFIG_BASE_IDX 2 2348 2348 #define mmHUBP0_HUBPREQ_DEBUG_DB 0x0569 2349 2349 #define mmHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2 2350 + #define mmHUBP0_HUBPREQ_DEBUG 0x056a 2351 + #define mmHUBP0_HUBPREQ_DEBUG_BASE_IDX 2 2350 2352 #define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x056e 2351 2353 #define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 2352 2354 #define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x056f ··· 2633 2631 #define mmHUBP1_DCHUBP_VMPG_CONFIG_BASE_IDX 2 2634 2632 #define mmHUBP1_HUBPREQ_DEBUG_DB 0x062d 2635 2633 #define mmHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2 2634 + #define mmHUBP1_HUBPREQ_DEBUG 0x062e 2635 + #define mmHUBP1_HUBPREQ_DEBUG_BASE_IDX 2 2636 2636 #define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x0632 2637 2637 #define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 2638 2638 #define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x0633 ··· 2919 2915 #define mmHUBP2_DCHUBP_VMPG_CONFIG_BASE_IDX 2 2920 2916 #define mmHUBP2_HUBPREQ_DEBUG_DB 0x06f1 2921 2917 #define mmHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2 2918 + #define mmHUBP2_HUBPREQ_DEBUG 0x06f2 2919 + #define mmHUBP2_HUBPREQ_DEBUG_BASE_IDX 2 2922 2920 #define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x06f6 2923 2921 #define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 2924 2922 #define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x06f7 ··· 3205 3199 #define mmHUBP3_DCHUBP_VMPG_CONFIG_BASE_IDX 2 3206 3200 #define mmHUBP3_HUBPREQ_DEBUG_DB 0x07b5 3207 3201 #define mmHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2 3202 + #define mmHUBP3_HUBPREQ_DEBUG 0x07b6 3203 + #define mmHUBP3_HUBPREQ_DEBUG_BASE_IDX 2 3208 3204 #define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x07ba 3209 3205 #define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2 3210 3206 #define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x07bb
+1
drivers/gpu/drm/amd/include/atomfirmware.h
··· 718 718 ATOM_ENCODER_CAP_RECORD_HBR2_EN =0x02, // DP1.2 HBR2 setting is qualified and HBR2 can be enabled 719 719 ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN =0x04, // HDMI2.0 6Gbps enable or not. 720 720 ATOM_ENCODER_CAP_RECORD_HBR3_EN =0x08, // DP1.3 HBR3 is supported by board. 721 + ATOM_ENCODER_CAP_RECORD_USB_C_TYPE =0x100, // the DP connector is a USB-C type. 721 722 }; 722 723 723 724 struct atom_encoder_caps_record
-16
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
··· 85 85 KGD_POOL_FRAMEBUFFER = 3, 86 86 }; 87 87 88 - enum kgd_engine_type { 89 - KGD_ENGINE_PFP = 1, 90 - KGD_ENGINE_ME, 91 - KGD_ENGINE_CE, 92 - KGD_ENGINE_MEC1, 93 - KGD_ENGINE_MEC2, 94 - KGD_ENGINE_RLC, 95 - KGD_ENGINE_SDMA1, 96 - KGD_ENGINE_SDMA2, 97 - KGD_ENGINE_MAX 98 - }; 99 - 100 88 /** 101 89 * enum kfd_sched_policy 102 90 * ··· 218 230 * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that 219 231 * SDMA hqd slot. 220 232 * 221 - * @get_fw_version: Returns FW versions from the header 222 - * 223 233 * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID. 224 234 * Only used for no cp scheduling mode 225 235 * ··· 297 311 struct kgd_dev *kgd, 298 312 uint8_t vmid); 299 313 300 - uint16_t (*get_fw_version)(struct kgd_dev *kgd, 301 - enum kgd_engine_type type); 302 314 void (*set_scratch_backing_va)(struct kgd_dev *kgd, 303 315 uint64_t va, uint32_t vmid); 304 316 int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
+97 -17
drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
··· 35 35 #include "smu10_hwmgr.h" 36 36 #include "power_state.h" 37 37 #include "soc15_common.h" 38 + #include "smu10.h" 38 39 39 40 #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5 40 41 #define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */ ··· 205 204 return 0; 206 205 } 207 206 208 - static inline uint32_t convert_10k_to_mhz(uint32_t clock) 209 - { 210 - return (clock + 99) / 100; 211 - } 212 - 213 207 static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock) 214 208 { 215 209 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 216 210 217 211 if (smu10_data->need_min_deep_sleep_dcefclk && 218 - smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) { 219 - smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock); 212 + smu10_data->deep_sleep_dcefclk != clock) { 213 + smu10_data->deep_sleep_dcefclk = clock; 220 214 smum_send_msg_to_smc_with_parameter(hwmgr, 221 215 PPSMC_MSG_SetMinDeepSleepDcefclk, 222 216 smu10_data->deep_sleep_dcefclk); ··· 224 228 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 225 229 226 230 if (smu10_data->dcf_actual_hard_min_freq && 227 - smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) { 228 - smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock); 231 + smu10_data->dcf_actual_hard_min_freq != clock) { 232 + smu10_data->dcf_actual_hard_min_freq = clock; 229 233 smum_send_msg_to_smc_with_parameter(hwmgr, 230 234 PPSMC_MSG_SetHardMinDcefclkByFreq, 231 235 smu10_data->dcf_actual_hard_min_freq); ··· 238 242 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); 239 243 240 244 if (smu10_data->f_actual_hard_min_freq && 241 - smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) { 242 - smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock); 245 + smu10_data->f_actual_hard_min_freq != clock) { 246 + smu10_data->f_actual_hard_min_freq = clock; 243 247 smum_send_msg_to_smc_with_parameter(hwmgr, 244 248 PPSMC_MSG_SetHardMinFclkByFreq, 245 249 smu10_data->f_actual_hard_min_freq); ··· 568 572 enum amd_dpm_forced_level level) 569 573 { 570 574 struct smu10_hwmgr *data = hwmgr->backend; 571 - struct amdgpu_device *adev = hwmgr->adev; 572 575 uint32_t min_sclk = hwmgr->display_config->min_core_set_clock; 573 576 uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100; 574 577 ··· 575 580 pr_info("smu firmware version too old, can not set dpm level\n"); 576 581 return 0; 577 582 } 578 - 579 - /* Disable UMDPSTATE support on rv2 temporarily */ 580 - if ((adev->asic_type == CHIP_RAVEN) && 581 - (adev->rev_id >= 8)) 582 - return 0; 583 583 584 584 if (min_sclk < data->gfx_min_freq_limit) 585 585 min_sclk = data->gfx_min_freq_limit; ··· 1190 1200 } 1191 1201 } 1192 1202 1203 + static int conv_power_profile_to_pplib_workload(int power_profile) 1204 + { 1205 + int pplib_workload = 0; 1206 + 1207 + switch (power_profile) { 1208 + case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT: 1209 + pplib_workload = WORKLOAD_DEFAULT_BIT; 1210 + break; 1211 + case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 1212 + pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; 1213 + break; 1214 + case PP_SMC_POWER_PROFILE_POWERSAVING: 1215 + pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT; 1216 + break; 1217 + case PP_SMC_POWER_PROFILE_VIDEO: 1218 + pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; 1219 + break; 1220 + case PP_SMC_POWER_PROFILE_VR: 1221 + pplib_workload = WORKLOAD_PPLIB_VR_BIT; 1222 + break; 1223 + case PP_SMC_POWER_PROFILE_COMPUTE: 1224 + pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; 1225 + break; 1226 + } 1227 + 1228 + return pplib_workload; 1229 + } 1230 + 1231 + static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) 1232 + { 1233 + uint32_t i, size = 0; 1234 + static const uint8_t 1235 + profile_mode_setting[6][4] = {{70, 60, 0, 0,}, 1236 + {70, 60, 1, 3,}, 1237 + {90, 60, 0, 0,}, 1238 + {70, 60, 0, 0,}, 1239 + {70, 90, 0, 0,}, 1240 + {30, 60, 0, 6,}, 1241 + }; 1242 + static const char *profile_name[6] = { 1243 + "BOOTUP_DEFAULT", 1244 + "3D_FULL_SCREEN", 1245 + "POWER_SAVING", 1246 + "VIDEO", 1247 + "VR", 1248 + "COMPUTE"}; 1249 + static const char *title[6] = {"NUM", 1250 + "MODE_NAME", 1251 + "BUSY_SET_POINT", 1252 + "FPS", 1253 + "USE_RLC_BUSY", 1254 + "MIN_ACTIVE_LEVEL"}; 1255 + 1256 + if (!buf) 1257 + return -EINVAL; 1258 + 1259 + size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0], 1260 + title[1], title[2], title[3], title[4], title[5]); 1261 + 1262 + for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++) 1263 + size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", 1264 + i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ", 1265 + profile_mode_setting[i][0], profile_mode_setting[i][1], 1266 + profile_mode_setting[i][2], profile_mode_setting[i][3]); 1267 + 1268 + return size; 1269 + } 1270 + 1271 + static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) 1272 + { 1273 + int workload_type = 0; 1274 + 1275 + if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) { 1276 + pr_err("Invalid power profile mode %ld\n", input[size]); 1277 + return -EINVAL; 1278 + } 1279 + hwmgr->power_profile_mode = input[size]; 1280 + 1281 + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 1282 + workload_type = 1283 + conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode); 1284 + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify, 1285 + 1 << workload_type); 1286 + 1287 + return 0; 1288 + } 1289 + 1290 + 1193 1291 static const struct pp_hwmgr_func smu10_hwmgr_funcs = { 1194 1292 .backend_init = smu10_hwmgr_backend_init, 1195 1293 .backend_fini = smu10_hwmgr_backend_fini, ··· 1319 1241 .powergate_sdma = smu10_powergate_sdma, 1320 1242 .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq, 1321 1243 .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq, 1244 + .get_power_profile_mode = smu10_get_power_profile_mode, 1245 + .set_power_profile_mode = smu10_set_power_profile_mode, 1322 1246 }; 1323 1247 1324 1248 int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
+12 -1
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
··· 3460 3460 return ; 3461 3461 3462 3462 data->vce_power_gated = bgate; 3463 - vega20_enable_disable_vce_dpm(hwmgr, !bgate); 3463 + if (bgate) { 3464 + vega20_enable_disable_vce_dpm(hwmgr, !bgate); 3465 + amdgpu_device_ip_set_powergating_state(hwmgr->adev, 3466 + AMD_IP_BLOCK_TYPE_VCE, 3467 + AMD_PG_STATE_GATE); 3468 + } else { 3469 + amdgpu_device_ip_set_powergating_state(hwmgr->adev, 3470 + AMD_IP_BLOCK_TYPE_VCE, 3471 + AMD_PG_STATE_UNGATE); 3472 + vega20_enable_disable_vce_dpm(hwmgr, !bgate); 3473 + } 3474 + 3464 3475 } 3465 3476 3466 3477 static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-1
drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
··· 85 85 #define PPSMC_MSG_SetRccPfcPmeRestoreRegister 0x36 86 86 #define PPSMC_Message_Count 0x37 87 87 88 - 89 88 typedef uint16_t PPSMC_Result; 90 89 typedef int PPSMC_Msg; 91 90
+8 -6
drivers/gpu/drm/amd/powerplay/inc/smu10.h
··· 136 136 #define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT) 137 137 138 138 /* Workload bits */ 139 - #define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 140 - #define WORKLOAD_PPLIB_VIDEO_BIT 2 141 - #define WORKLOAD_PPLIB_VR_BIT 3 142 - #define WORKLOAD_PPLIB_COMPUTE_BIT 4 143 - #define WORKLOAD_PPLIB_CUSTOM_BIT 5 144 - #define WORKLOAD_PPLIB_COUNT 6 139 + #define WORKLOAD_DEFAULT_BIT 0 140 + #define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 141 + #define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 142 + #define WORKLOAD_PPLIB_VIDEO_BIT 3 143 + #define WORKLOAD_PPLIB_VR_BIT 4 144 + #define WORKLOAD_PPLIB_COMPUTE_BIT 5 145 + #define WORKLOAD_PPLIB_CUSTOM_BIT 6 146 + #define WORKLOAD_PPLIB_COUNT 7 145 147 146 148 typedef struct { 147 149 /* MP1_EXT_SCRATCH0 */
+7 -2
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
··· 1896 1896 static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, 1897 1897 uint32_t pstate) 1898 1898 { 1899 - /* send msg to SMU to set pstate */ 1900 - return 0; 1899 + int ret = 0; 1900 + mutex_lock(&(smu->mutex)); 1901 + ret = smu_send_smc_msg_with_param(smu, 1902 + SMU_MSG_SetXgmiMode, 1903 + pstate ? XGMI_STATE_D0 : XGMI_STATE_D3); 1904 + mutex_unlock(&(smu->mutex)); 1905 + return ret; 1901 1906 } 1902 1907 1903 1908 static const struct smu_funcs smu_v11_0_funcs = {