Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-next-2019-01-05' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"Happy New Year, just decloaking from leave to get some stuff from the
last week in before rc1:

core:
- two regression fixes for damage blob and atomic

i915 gvt:
- Some missed GVT fixes from the original pull

amdgpu:
- new PCI IDs
- SR-IOV fixes
- DC fixes
- Vega20 fixes"

* tag 'drm-next-2019-01-05' of git://anongit.freedesktop.org/drm/drm: (53 commits)
drm: Put damage blob when destroy plane state
drm: fix null pointer dereference on null state pointer
drm/amdgpu: Add new VegaM pci id
drm/ttm: Use drm_debug_printer for all ttm_bo_mem_space_debug output
drm/amdgpu: add Vega20 PSP ASD firmware loading
drm/amd/display: Fix MST dp_blank REG_WAIT timeout
drm/amd/display: validate extended dongle caps
drm/amd/display: Use div_u64 for flip timestamp ns to ms
drm/amdgpu/uvd:Change uvd ring name convention
drm/amd/powerplay: add Vega20 LCLK DPM level setting support
drm/amdgpu: print process info when job timeout
drm/amdgpu/nbio7.4: add hw bug workaround for vega20
drm/amdgpu/nbio6.1: add hw bug workaround for vega10/12
drm/amd/display: Optimize passive update planes.
drm/amd/display: verify lane status before exiting verify link cap
drm/amd/display: Fix bug with not updating VSP infoframe
drm/amd/display: Add retry to read ddc_clock pin
drm/amd/display: Don't skip link training for empty dongle
drm/amd/display: Wait edp HPD to high in detect_sink
drm/amd/display: fix surface update sequence
...

+631 -293
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 1428 1428 if (IS_ERR(fence)) 1429 1429 return PTR_ERR(fence); 1430 1430 1431 + if (!fence) 1432 + fence = dma_fence_get_stub(); 1433 + 1431 1434 switch (info->in.what) { 1432 1435 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: 1433 1436 r = drm_syncobj_create(&syncobj, 0, fence);
+6 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 3476 3476 mutex_lock(&adev->lock_reset); 3477 3477 atomic_inc(&adev->gpu_reset_counter); 3478 3478 adev->in_gpu_reset = 1; 3479 - /* Block kfd */ 3480 - amdgpu_amdkfd_pre_reset(adev); 3479 + /* Block kfd: SRIOV would do it separately */ 3480 + if (!amdgpu_sriov_vf(adev)) 3481 + amdgpu_amdkfd_pre_reset(adev); 3481 3482 } 3482 3483 3483 3484 static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) 3484 3485 { 3485 - /*unlock kfd */ 3486 - amdgpu_amdkfd_post_reset(adev); 3486 + /*unlock kfd: SRIOV would do it separately */ 3487 + if (!amdgpu_sriov_vf(adev)) 3488 + amdgpu_amdkfd_post_reset(adev); 3487 3489 amdgpu_vf_error_trans_all(adev); 3488 3490 adev->in_gpu_reset = 0; 3489 3491 mutex_unlock(&adev->lock_reset);
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 865 865 /* VEGAM */ 866 866 {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, 867 867 {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, 868 + {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, 868 869 /* Vega 10 */ 869 870 {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 870 871 {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+6
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 32 32 { 33 33 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); 34 34 struct amdgpu_job *job = to_amdgpu_job(s_job); 35 + struct amdgpu_task_info ti; 36 + 37 + memset(&ti, 0, sizeof(struct amdgpu_task_info)); 35 38 36 39 if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { 37 40 DRM_ERROR("ring %s timeout, but soft recovered\n", ··· 42 39 return; 43 40 } 44 41 42 + amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); 45 43 DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n", 46 44 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), 47 45 ring->fence_drv.sync_seq); 46 + DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n", 47 + ti.process_name, ti.tgid, ti.task_name, ti.pid); 48 48 49 49 if (amdgpu_device_should_recover_gpu(ring->adev)) 50 50 amdgpu_device_gpu_recover(ring->adev, job);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 912 912 struct ttm_operation_ctx ctx = { false, false }; 913 913 int r, i; 914 914 915 - if (!bo->pin_count) { 915 + if (WARN_ON_ONCE(!bo->pin_count)) { 916 916 dev_warn(adev->dev, "%p unpin not necessary\n", bo); 917 917 return 0; 918 918 }
-8
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 155 155 return ret; 156 156 } 157 157 158 - bool psp_support_vmr_ring(struct psp_context *psp) 159 - { 160 - if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045) 161 - return true; 162 - else 163 - return false; 164 - } 165 - 166 158 static void psp_prep_tmr_cmd_buf(struct psp_context *psp, 167 159 struct psp_gfx_cmd_resp *cmd, 168 160 uint64_t tmr_mc, uint32_t size)
+9 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
··· 83 83 enum AMDGPU_UCODE_ID ucode_type); 84 84 bool (*smu_reload_quirk)(struct psp_context *psp); 85 85 int (*mode1_reset)(struct psp_context *psp); 86 - uint64_t (*xgmi_get_node_id)(struct psp_context *psp); 87 - uint64_t (*xgmi_get_hive_id)(struct psp_context *psp); 86 + int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id); 87 + int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id); 88 88 int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices, 89 89 struct psp_xgmi_topology_info *topology); 90 90 int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices, 91 91 struct psp_xgmi_topology_info *topology); 92 + bool (*support_vmr_ring)(struct psp_context *psp); 92 93 }; 93 94 94 95 struct psp_xgmi_context { ··· 193 192 ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0) 194 193 #define psp_smu_reload_quirk(psp) \ 195 194 ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false) 195 + #define psp_support_vmr_ring(psp) \ 196 + ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false) 196 197 #define psp_mode1_reset(psp) \ 197 198 ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false) 198 - #define psp_xgmi_get_node_id(psp) \ 199 - ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0) 200 - #define psp_xgmi_get_hive_id(psp) \ 201 - ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0) 199 + #define psp_xgmi_get_node_id(psp, node_id) \ 200 + ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL) 201 + #define psp_xgmi_get_hive_id(psp, hive_id) \ 202 + ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL) 202 203 #define psp_xgmi_get_topology_info(psp, num_device, topology) \ 203 204 ((psp)->funcs->xgmi_get_topology_info ? \ 204 205 (psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL) ··· 220 217 221 218 int psp_gpu_reset(struct amdgpu_device *adev); 222 219 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id); 223 - bool psp_support_vmr_ring(struct psp_context *psp); 224 - 225 220 extern const struct amdgpu_ip_block_version psp_v11_0_ip_block; 226 221 227 222 #endif
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
··· 29 29 #include <drm/drm_print.h> 30 30 31 31 /* max number of rings */ 32 - #define AMDGPU_MAX_RINGS 21 32 + #define AMDGPU_MAX_RINGS 23 33 33 #define AMDGPU_MAX_GFX_RINGS 1 34 34 #define AMDGPU_MAX_COMPUTE_RINGS 8 35 35 #define AMDGPU_MAX_VCE_RINGS 3
+18 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
··· 262 262 263 263 ring = &adev->vcn.ring_dec; 264 264 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 265 - RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2)); 265 + RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); 266 266 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 267 267 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, 268 268 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); ··· 322 322 323 323 ring = &adev->vcn.ring_dec; 324 324 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, 325 - RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2)); 325 + RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); 326 326 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 327 327 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, 328 328 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); ··· 396 396 397 397 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 398 398 struct dpg_pause_state new_state; 399 + unsigned int fences = 0; 400 + unsigned int i; 401 + 402 + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 403 + fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); 404 + } 405 + if (fences) 406 + new_state.fw_based = VCN_DPG_STATE__PAUSE; 407 + else 408 + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 409 + 410 + if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg)) 411 + new_state.jpeg = VCN_DPG_STATE__PAUSE; 412 + else 413 + new_state.jpeg = VCN_DPG_STATE__UNPAUSE; 399 414 400 415 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) 401 416 new_state.fw_based = VCN_DPG_STATE__PAUSE; 402 - else 403 - new_state.fw_based = adev->vcn.pause_state.fw_based; 404 - 405 - if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) 417 + else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) 406 418 new_state.jpeg = VCN_DPG_STATE__PAUSE; 407 - else 408 - new_state.jpeg = adev->vcn.pause_state.jpeg; 409 419 410 420 amdgpu_vcn_pause_dpg_mode(adev, &new_state); 411 421 }
+13 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
··· 97 97 if (!adev->gmc.xgmi.supported) 98 98 return 0; 99 99 100 - adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp); 101 - adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp); 100 + ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id); 101 + if (ret) { 102 + dev_err(adev->dev, 103 + "XGMI: Failed to get node id\n"); 104 + return ret; 105 + } 106 + 107 + ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id); 108 + if (ret) { 109 + dev_err(adev->dev, 110 + "XGMI: Failed to get hive id\n"); 111 + return ret; 112 + } 102 113 103 114 mutex_lock(&xgmi_mutex); 104 115 hive = amdgpu_get_xgmi_hive(adev);
+32 -23
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 718 718 } 719 719 } 720 720 721 + static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) 722 + { 723 + struct amdgpu_ring *ring; 724 + unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = 725 + {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP}; 726 + unsigned i; 727 + unsigned vmhub, inv_eng; 728 + 729 + for (i = 0; i < adev->num_rings; ++i) { 730 + ring = adev->rings[i]; 731 + vmhub = ring->funcs->vmhub; 732 + 733 + inv_eng = ffs(vm_inv_engs[vmhub]); 734 + if (!inv_eng) { 735 + dev_err(adev->dev, "no VM inv eng for ring %s\n", 736 + ring->name); 737 + return -EINVAL; 738 + } 739 + 740 + ring->vm_inv_eng = inv_eng - 1; 741 + change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub])); 742 + 743 + dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", 744 + ring->name, ring->vm_inv_eng, ring->funcs->vmhub); 745 + } 746 + 747 + return 0; 748 + } 749 + 721 750 static int gmc_v9_0_late_init(void *handle) 722 751 { 723 752 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 724 - /* 725 - * The latest engine allocation on gfx9 is: 726 - * Engine 0, 1: idle 727 - * Engine 2, 3: firmware 728 - * Engine 4~13: amdgpu ring, subject to change when ring number changes 729 - * Engine 14~15: idle 730 - * Engine 16: kfd tlb invalidation 731 - * Engine 17: Gart flushes 732 - */ 733 - unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; 734 - unsigned i; 735 753 int r; 736 754 737 755 if (!gmc_v9_0_keep_stolen_memory(adev)) 738 756 amdgpu_bo_late_init(adev); 739 757 740 - for(i = 0; i < adev->num_rings; ++i) { 741 - struct amdgpu_ring *ring = adev->rings[i]; 742 - unsigned vmhub = ring->funcs->vmhub; 743 - 744 - ring->vm_inv_eng = vm_inv_eng[vmhub]++; 745 - dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", 746 - ring->name, ring->vm_inv_eng, ring->funcs->vmhub); 747 - } 748 - 749 - /* Engine 16 is used for KFD and 17 for GART flushes */ 750 - for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) 751 - BUG_ON(vm_inv_eng[i] > 16); 758 + r = gmc_v9_0_allocate_vm_inv_eng(adev); 759 + if (r) 760 + return r; 752 761 753 762 if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) { 754 763 r = gmc_v9_0_ecc_available(adev);
+10
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
··· 24 24 #ifndef __GMC_V9_0_H__ 25 25 #define __GMC_V9_0_H__ 26 26 27 + /* 28 + * The latest engine allocation on gfx9 is: 29 + * Engine 2, 3: firmware 30 + * Engine 0, 1, 4~16: amdgpu ring, 31 + * subject to change when ring number changes 32 + * Engine 17: Gart flushes 33 + */ 34 + #define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 35 + #define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 36 + 27 37 extern const struct amd_ip_funcs gmc_v9_0_ip_funcs; 28 38 extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block; 29 39
+7
drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
··· 32 32 #define smnCPM_CONTROL 0x11180460 33 33 #define smnPCIE_CNTL2 0x11180070 34 34 #define smnPCIE_CONFIG_CNTL 0x11180044 35 + #define smnPCIE_CI_CNTL 0x11180080 35 36 36 37 static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev) 37 38 { ··· 271 270 272 271 if (def != data) 273 272 WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); 273 + 274 + def = data = RREG32_PCIE(smnPCIE_CI_CNTL); 275 + data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1); 276 + 277 + if (def != data) 278 + WREG32_PCIE(smnPCIE_CI_CNTL, data); 274 279 } 275 280 276 281 const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
+7
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
··· 31 31 32 32 #define smnCPM_CONTROL 0x11180460 33 33 #define smnPCIE_CNTL2 0x11180070 34 + #define smnPCIE_CI_CNTL 0x11180080 34 35 35 36 static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev) 36 37 { ··· 223 222 224 223 static void nbio_v7_4_init_registers(struct amdgpu_device *adev) 225 224 { 225 + uint32_t def, data; 226 226 227 + def = data = RREG32_PCIE(smnPCIE_CI_CNTL); 228 + data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1); 229 + 230 + if (def != data) 231 + WREG32_PCIE(smnPCIE_CI_CNTL, data); 227 232 } 228 233 229 234 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
+56 -22
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
··· 34 34 #include "nbio/nbio_7_4_offset.h" 35 35 36 36 MODULE_FIRMWARE("amdgpu/vega20_sos.bin"); 37 + MODULE_FIRMWARE("amdgpu/vega20_asd.bin"); 37 38 MODULE_FIRMWARE("amdgpu/vega20_ta.bin"); 38 39 39 40 /* address block */ ··· 101 100 char fw_name[30]; 102 101 int err = 0; 103 102 const struct psp_firmware_header_v1_0 *sos_hdr; 103 + const struct psp_firmware_header_v1_0 *asd_hdr; 104 104 const struct ta_firmware_header_v1_0 *ta_hdr; 105 105 106 106 DRM_DEBUG("\n"); ··· 134 132 adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + 135 133 le32_to_cpu(sos_hdr->sos_offset_bytes); 136 134 135 + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name); 136 + err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev); 137 + if (err) 138 + goto out1; 139 + 140 + err = amdgpu_ucode_validate(adev->psp.asd_fw); 141 + if (err) 142 + goto out1; 143 + 144 + asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; 145 + adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); 146 + adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); 147 + adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); 148 + adev->psp.asd_start_addr = (uint8_t *)asd_hdr + 149 + le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); 150 + 137 151 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 138 152 err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); 139 153 if (err) 140 - goto out; 154 + goto out2; 141 155 142 156 err = amdgpu_ucode_validate(adev->psp.ta_fw); 143 157 if (err) 144 - goto out; 158 + goto out2; 145 159 146 160 ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; 147 161 adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); ··· 166 148 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 167 149 168 150 return 0; 151 + 152 + out2: 153 + release_firmware(adev->psp.ta_fw); 154 + adev->psp.ta_fw = NULL; 155 + out1: 156 + release_firmware(adev->psp.asd_fw); 157 + adev->psp.asd_fw = NULL; 169 158 out: 170 - if (err) { 171 - dev_err(adev->dev, 172 - "psp v11.0: Failed to load firmware \"%s\"\n", 173 - fw_name); 174 - release_firmware(adev->psp.sos_fw); 175 - adev->psp.sos_fw = NULL; 176 - } 159 + dev_err(adev->dev, 160 + "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); 161 + release_firmware(adev->psp.sos_fw); 162 + adev->psp.sos_fw = NULL; 177 163 178 164 return err; 179 165 } ··· 313 291 return 0; 314 292 } 315 293 294 + static bool psp_v11_0_support_vmr_ring(struct psp_context *psp) 295 + { 296 + if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045) 297 + return true; 298 + return false; 299 + } 300 + 316 301 static int psp_v11_0_ring_create(struct psp_context *psp, 317 302 enum psp_ring_type ring_type) 318 303 { ··· 328 299 struct psp_ring *ring = &psp->km_ring; 329 300 struct amdgpu_device *adev = psp->adev; 330 301 331 - if (psp_support_vmr_ring(psp)) { 302 + if (psp_v11_0_support_vmr_ring(psp)) { 332 303 /* Write low address of the ring to C2PMSG_102 */ 333 304 psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); 334 305 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg); ··· 380 351 struct amdgpu_device *adev = psp->adev; 381 352 382 353 /* Write the ring destroy command*/ 383 - if (psp_support_vmr_ring(psp)) 354 + if (psp_v11_0_support_vmr_ring(psp)) 384 355 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, 385 356 GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); 386 357 else ··· 391 362 mdelay(20); 392 363 393 364 /* Wait for response flag (bit 31) */ 394 - if (psp_support_vmr_ring(psp)) 365 + if (psp_v11_0_support_vmr_ring(psp)) 395 366 ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), 396 367 0x80000000, 0x80000000, false); 397 368 else ··· 435 406 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4; 436 407 437 408 /* KM (GPCOM) prepare write pointer */ 438 - if (psp_support_vmr_ring(psp)) 409 + if (psp_v11_0_support_vmr_ring(psp)) 439 410 psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); 440 411 else 441 412 psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); ··· 467 438 468 439 /* Update the write Pointer in DWORDs */ 469 440 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; 470 - if (psp_support_vmr_ring(psp)) { 441 + if (psp_v11_0_support_vmr_ring(psp)) { 471 442 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg); 472 443 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); 473 444 } else ··· 709 680 return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); 710 681 } 711 682 712 - static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp) 683 + static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id) 713 684 { 714 685 struct ta_xgmi_shared_memory *xgmi_cmd; 715 686 int ret; ··· 722 693 /* Invoke xgmi ta to get hive id */ 723 694 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 724 695 if (ret) 725 - return 0; 726 - else 727 - return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 696 + return ret; 697 + 698 + *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; 699 + 700 + return 0; 728 701 } 729 702 730 - static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp) 703 + static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id) 731 704 { 732 705 struct ta_xgmi_shared_memory *xgmi_cmd; 733 706 int ret; ··· 742 711 /* Invoke xgmi ta to get the node id */ 743 712 ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); 744 713 if (ret) 745 - return 0; 746 - else 747 - return xgmi_cmd->xgmi_out_message.get_node_id.node_id; 714 + return ret; 715 + 716 + *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id; 717 + 718 + return 0; 748 719 } 749 720 750 721 static const struct psp_funcs psp_v11_0_funcs = { ··· 765 732 .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info, 766 733 .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id, 767 734 .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id, 735 + .support_vmr_ring = psp_v11_0_support_vmr_ring, 768 736 }; 769 737 770 738 void psp_v11_0_set_psp_funcs(struct psp_context *psp)
+4 -1
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
··· 240 240 * are already been loaded. 241 241 */ 242 242 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 243 - if (sol_reg) 243 + if (sol_reg) { 244 + psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); 245 + printk("sos fw version = 0x%x.\n", psp->sos_fw_version); 244 246 return 0; 247 + } 245 248 246 249 /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ 247 250 ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+5 -4
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
··· 1458 1458 /*return fw_version >= 31;*/ 1459 1459 return false; 1460 1460 case CHIP_VEGA20: 1461 - /*return fw_version >= 115;*/ 1462 - return false; 1461 + return fw_version >= 123; 1463 1462 default: 1464 1463 return false; 1465 1464 } ··· 1705 1706 amdgpu_fence_process(&adev->sdma.instance[instance].ring); 1706 1707 break; 1707 1708 case 1: 1708 - /* XXX compute */ 1709 + if (adev->asic_type == CHIP_VEGA20) 1710 + amdgpu_fence_process(&adev->sdma.instance[instance].page); 1709 1711 break; 1710 1712 case 2: 1711 1713 /* XXX compute */ 1712 1714 break; 1713 1715 case 3: 1714 - amdgpu_fence_process(&adev->sdma.instance[instance].page); 1716 + if (adev->asic_type != CHIP_VEGA20) 1717 + amdgpu_fence_process(&adev->sdma.instance[instance].page); 1715 1718 break; 1716 1719 } 1717 1720 return 0;
+7 -2
drivers/gpu/drm/amd/amdgpu/soc15_common.h
··· 49 49 50 50 #define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \ 51 51 do { \ 52 + uint32_t old_ = 0; \ 52 53 uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ 53 54 uint32_t loop = adev->usec_timeout; \ 54 55 while ((tmp_ & (mask)) != (expected_value)) { \ 55 - udelay(2); \ 56 + if (old_ != tmp_) { \ 57 + loop = adev->usec_timeout; \ 58 + old_ = tmp_; \ 59 + } else \ 60 + udelay(1); \ 56 61 tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ 57 62 loop--; \ 58 63 if (!loop) { \ 59 - DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \ 64 + DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \ 60 65 inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \ 61 66 ret = -ETIMEDOUT; \ 62 67 break; \
+2 -2
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
··· 435 435 continue; 436 436 if (!amdgpu_sriov_vf(adev)) { 437 437 ring = &adev->uvd.inst[j].ring; 438 - sprintf(ring->name, "uvd<%d>", j); 438 + sprintf(ring->name, "uvd_%d", ring->me); 439 439 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); 440 440 if (r) 441 441 return r; ··· 443 443 444 444 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 445 445 ring = &adev->uvd.inst[j].ring_enc[i]; 446 - sprintf(ring->name, "uvd_enc%d<%d>", i, j); 446 + sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i); 447 447 if (amdgpu_sriov_vf(adev)) { 448 448 ring->use_doorbell = true; 449 449
+20 -10
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
··· 214 214 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 215 215 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 216 216 217 - if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) 217 + if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || 218 + RREG32_SOC15(VCN, 0, mmUVD_STATUS)) 218 219 vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE); 219 220 220 221 ring->sched.ready = false; ··· 1088 1087 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, 1089 1088 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); 1090 1089 1091 - /* initialize wptr */ 1090 + /* initialize JPEG wptr */ 1091 + ring = &adev->vcn.ring_jpeg; 1092 1092 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); 1093 1093 1094 1094 /* copy patch commands to the jpeg ring */ ··· 1161 1159 static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev) 1162 1160 { 1163 1161 int ret_code = 0; 1162 + uint32_t tmp; 1164 1163 1165 1164 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */ 1166 1165 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1167 1166 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, 1168 1167 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); 1169 1168 1170 - if (!ret_code) { 1171 - int tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; 1172 - /* wait for read ptr to be equal to write ptr */ 1173 - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); 1169 + /* wait for read ptr to be equal to write ptr */ 1170 + tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); 1171 + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); 1174 1172 1175 - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1176 - UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, 1177 - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); 1178 - } 1173 + tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); 1174 + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); 1175 + 1176 + tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); 1177 + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); 1178 + 1179 + tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; 1180 + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); 1181 + 1182 + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1183 + UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, 1184 + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); 1179 1185 1180 1186 /* disable dynamic power gating mode */ 1181 1187 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
+9 -9
drivers/gpu/drm/amd/amdgpu/vi.c
··· 87 87 u32 r; 88 88 89 89 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 90 - WREG32(mmPCIE_INDEX, reg); 91 - (void)RREG32(mmPCIE_INDEX); 92 - r = RREG32(mmPCIE_DATA); 90 + WREG32_NO_KIQ(mmPCIE_INDEX, reg); 91 + (void)RREG32_NO_KIQ(mmPCIE_INDEX); 92 + r = RREG32_NO_KIQ(mmPCIE_DATA); 93 93 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 94 94 return r; 95 95 } ··· 99 99 unsigned long flags; 100 100 101 101 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 102 - WREG32(mmPCIE_INDEX, reg); 103 - (void)RREG32(mmPCIE_INDEX); 104 - WREG32(mmPCIE_DATA, v); 105 - (void)RREG32(mmPCIE_DATA); 102 + WREG32_NO_KIQ(mmPCIE_INDEX, reg); 103 + (void)RREG32_NO_KIQ(mmPCIE_INDEX); 104 + WREG32_NO_KIQ(mmPCIE_DATA, v); 105 + (void)RREG32_NO_KIQ(mmPCIE_DATA); 106 106 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 107 107 } 108 108 ··· 123 123 unsigned long flags; 124 124 125 125 spin_lock_irqsave(&adev->smc_idx_lock, flags); 126 - WREG32(mmSMC_IND_INDEX_11, (reg)); 127 - WREG32(mmSMC_IND_DATA_11, (v)); 126 + WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 127 + WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v)); 128 128 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 129 129 } 130 130
+2 -2
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 1623 1623 return -EINVAL; 1624 1624 1625 1625 dmabuf = dma_buf_get(args->dmabuf_fd); 1626 - if (!dmabuf) 1627 - return -EINVAL; 1626 + if (IS_ERR(dmabuf)) 1627 + return PTR_ERR(dmabuf); 1628 1628 1629 1629 mutex_lock(&p->mutex); 1630 1630
+75 -18
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 331 331 struct common_irq_params *irq_params = interrupt_params; 332 332 struct amdgpu_device *adev = irq_params->adev; 333 333 struct amdgpu_crtc *acrtc; 334 + struct dm_crtc_state *acrtc_state; 334 335 335 336 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); 336 337 337 338 if (acrtc) { 338 339 drm_crtc_handle_vblank(&acrtc->base); 339 340 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); 341 + 342 + acrtc_state = to_dm_crtc_state(acrtc->base.state); 343 + 344 + if (acrtc_state->stream && 345 + acrtc_state->vrr_params.supported && 346 + acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) { 347 + mod_freesync_handle_v_update( 348 + adev->dm.freesync_module, 349 + acrtc_state->stream, 350 + &acrtc_state->vrr_params); 351 + 352 + dc_stream_adjust_vmin_vmax( 353 + adev->dm.dc, 354 + acrtc_state->stream, 355 + &acrtc_state->vrr_params.adjust); 356 + } 340 357 } 341 358 } 342 359 ··· 3026 3009 dc_stream_retain(state->stream); 3027 3010 } 3028 3011 3029 - state->adjust = cur->adjust; 3012 + state->vrr_params = cur->vrr_params; 3030 3013 state->vrr_infopacket = cur->vrr_infopacket; 3031 3014 state->abm_level = cur->abm_level; 3032 3015 state->vrr_supported = cur->vrr_supported; ··· 3645 3628 static int dm_plane_atomic_async_check(struct drm_plane *plane, 3646 3629 struct drm_plane_state *new_plane_state) 3647 3630 { 3631 + struct drm_plane_state *old_plane_state = 3632 + drm_atomic_get_old_plane_state(new_plane_state->state, plane); 3633 + 3648 3634 /* Only support async updates on cursor planes. */ 3649 3635 if (plane->type != DRM_PLANE_TYPE_CURSOR) 3636 + return -EINVAL; 3637 + 3638 + /* 3639 + * DRM calls prepare_fb and cleanup_fb on new_plane_state for 3640 + * async commits so don't allow fb changes. 3641 + */ 3642 + if (old_plane_state->fb != new_plane_state->fb) 3650 3643 return -EINVAL; 3651 3644 3652 3645 return 0; ··· 4472 4445 static void update_freesync_state_on_stream( 4473 4446 struct amdgpu_display_manager *dm, 4474 4447 struct dm_crtc_state *new_crtc_state, 4475 - struct dc_stream_state *new_stream) 4448 + struct dc_stream_state *new_stream, 4449 + struct dc_plane_state *surface, 4450 + u32 flip_timestamp_in_us) 4476 4451 { 4477 - struct mod_vrr_params vrr = {0}; 4452 + struct mod_vrr_params vrr_params = new_crtc_state->vrr_params; 4478 4453 struct dc_info_packet vrr_infopacket = {0}; 4479 4454 struct mod_freesync_config config = new_crtc_state->freesync_config; 4480 4455 ··· 4503 4474 4504 4475 mod_freesync_build_vrr_params(dm->freesync_module, 4505 4476 new_stream, 4506 - &config, &vrr); 4477 + &config, &vrr_params); 4478 + 4479 + if (surface) { 4480 + mod_freesync_handle_preflip( 4481 + dm->freesync_module, 4482 + surface, 4483 + new_stream, 4484 + flip_timestamp_in_us, 4485 + &vrr_params); 4486 + } 4507 4487 4508 4488 mod_freesync_build_vrr_infopacket( 4509 4489 dm->freesync_module, 4510 4490 new_stream, 4511 - &vrr, 4491 + &vrr_params, 4512 4492 PACKET_TYPE_VRR, 4513 4493 TRANSFER_FUNC_UNKNOWN, 4514 4494 &vrr_infopacket); 4515 4495 4516 4496 new_crtc_state->freesync_timing_changed = 4517 - (memcmp(&new_crtc_state->adjust, 4518 - &vrr.adjust, 4519 - sizeof(vrr.adjust)) != 0); 4497 + (memcmp(&new_crtc_state->vrr_params.adjust, 4498 + &vrr_params.adjust, 4499 + sizeof(vrr_params.adjust)) != 0); 4520 4500 4521 4501 new_crtc_state->freesync_vrr_info_changed = 4522 4502 (memcmp(&new_crtc_state->vrr_infopacket, 4523 4503 &vrr_infopacket, 4524 4504 sizeof(vrr_infopacket)) != 0); 4525 4505 4526 - new_crtc_state->adjust = vrr.adjust; 4506 + new_crtc_state->vrr_params = vrr_params; 4527 4507 new_crtc_state->vrr_infopacket = vrr_infopacket; 4528 4508 4529 - new_stream->adjust = new_crtc_state->adjust; 4509 + new_stream->adjust = new_crtc_state->vrr_params.adjust; 4530 4510 new_stream->vrr_infopacket = vrr_infopacket; 4531 4511 4532 4512 if (new_crtc_state->freesync_vrr_info_changed) 4533 4513 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", 4534 4514 new_crtc_state->base.crtc->base.id, 4535 4515 (int)new_crtc_state->base.vrr_enabled, 4536 - (int)vrr.state); 4516 + (int)vrr_params.state); 4537 4517 4538 4518 if (new_crtc_state->freesync_timing_changed) 4539 4519 DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n", 4540 4520 new_crtc_state->base.crtc->base.id, 4541 - vrr.adjust.v_total_min, 4542 - vrr.adjust.v_total_max); 4521 + vrr_params.adjust.v_total_min, 4522 + vrr_params.adjust.v_total_max); 4543 4523 } 4544 4524 4545 4525 /* ··· 4562 4524 struct dc_state *state) 4563 4525 { 4564 4526 unsigned long flags; 4527 + uint64_t timestamp_ns; 4565 4528 uint32_t target_vblank; 4566 4529 int r, vpos, hpos; 4567 4530 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); ··· 4576 4537 struct dc_stream_update stream_update = {0}; 4577 4538 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); 4578 4539 struct dc_stream_status *stream_status; 4540 + struct dc_plane_state *surface; 4579 4541 4580 4542 4581 4543 /* Prepare wait for target vblank early - before the fence-waits */ ··· 4626 4586 addr.address.grph.addr.high_part = upper_32_bits(afb->address); 4627 4587 addr.flip_immediate = async_flip; 4628 4588 4589 + timestamp_ns = ktime_get_ns(); 4590 + addr.flip_timestamp_in_us = div_u64(timestamp_ns, 1000); 4591 + 4629 4592 4630 4593 if (acrtc->base.state->event) 4631 4594 prepare_flip_isr(acrtc); ··· 4642 4599 return; 4643 4600 } 4644 4601 4645 - surface_updates->surface = stream_status->plane_states[0]; 4646 - if (!surface_updates->surface) { 4602 + surface = stream_status->plane_states[0]; 4603 + surface_updates->surface = surface; 4604 + 4605 + if (!surface) { 4647 4606 DRM_ERROR("No surface for CRTC: id=%d\n", 4648 4607 acrtc->crtc_id); 4649 4608 return; ··· 4656 4611 update_freesync_state_on_stream( 4657 4612 &adev->dm, 4658 4613 acrtc_state, 4659 - acrtc_state->stream); 4614 + acrtc_state->stream, 4615 + surface, 4616 + addr.flip_timestamp_in_us); 4660 4617 4661 4618 if (acrtc_state->freesync_timing_changed) 4662 4619 stream_update.adjust = ··· 4669 4622 &acrtc_state->stream->vrr_infopacket; 4670 4623 } 4671 4624 4625 + /* Update surface timing information. */ 4626 + surface->time.time_elapsed_in_us[surface->time.index] = 4627 + addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us; 4628 + surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us; 4629 + surface->time.index++; 4630 + if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 4631 + surface->time.index = 0; 4632 + 4672 4633 mutex_lock(&adev->dm.dc_lock); 4634 + 4673 4635 dc_commit_updates_for_stream(adev->dm.dc, 4674 4636 surface_updates, 4675 4637 1, ··· 5370 5314 config.max_refresh_in_uhz = 5371 5315 aconnector->max_vfreq * 1000000; 5372 5316 config.vsif_supported = true; 5317 + config.btr = true; 5373 5318 } 5374 5319 5375 5320 new_crtc_state->freesync_config = config; ··· 5381 5324 { 5382 5325 new_crtc_state->vrr_supported = false; 5383 5326 5384 - memset(&new_crtc_state->adjust, 0, 5385 - sizeof(new_crtc_state->adjust)); 5327 + memset(&new_crtc_state->vrr_params, 0, 5328 + sizeof(new_crtc_state->vrr_params)); 5386 5329 memset(&new_crtc_state->vrr_infopacket, 0, 5387 5330 sizeof(new_crtc_state->vrr_infopacket)); 5388 5331 }
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 268 268 269 269 bool vrr_supported; 270 270 struct mod_freesync_config freesync_config; 271 - struct dc_crtc_timing_adjust adjust; 271 + struct mod_vrr_params vrr_params; 272 272 struct dc_info_packet vrr_infopacket; 273 273 274 274 int abm_level;
+15
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
··· 638 638 { 639 639 enum bp_result result = BP_RESULT_OK; 640 640 struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL; 641 + struct atom_smu_info_v3_3 *smu_info = NULL; 641 642 642 643 if (!ss_info) 643 644 return BP_RESULT_BADINPUT; ··· 650 649 DATA_TABLES(dce_info)); 651 650 if (!disp_cntl_tbl) 652 651 return BP_RESULT_BADBIOSTABLE; 652 + 653 653 654 654 ss_info->type.STEP_AND_DELAY_INFO = false; 655 655 ss_info->spread_percentage_divider = 1000; ··· 689 687 * copy it into dce_info 690 688 */ 691 689 result = BP_RESULT_UNSUPPORTED; 690 + break; 691 + case AS_SIGNAL_TYPE_XGMI: 692 + smu_info = GET_IMAGE(struct atom_smu_info_v3_3, 693 + DATA_TABLES(smu_info)); 694 + if (!smu_info) 695 + return BP_RESULT_BADBIOSTABLE; 696 + 697 + ss_info->spread_spectrum_percentage = 698 + smu_info->waflclk_ss_percentage; 699 + ss_info->spread_spectrum_range = 700 + smu_info->gpuclk_ss_rate_10hz * 10; 701 + if (smu_info->waflclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) 702 + ss_info->type.CENTER_MODE = true; 692 703 break; 693 704 default: 694 705 result = BP_RESULT_UNSUPPORTED;
+1
drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
··· 67 67 return true; 68 68 #endif 69 69 case DCE_VERSION_12_0: 70 + case DCE_VERSION_12_1: 70 71 *h = dal_cmd_tbl_helper_dce112_get_table2(); 71 72 return true; 72 73
+2 -8
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 151 151 return false; 152 152 } 153 153 154 - if (connectors_num == 0 && num_virtual_links == 0) { 155 - dm_error("DC: Number of connectors is zero!\n"); 156 - } 157 - 158 154 dm_output_to_console( 159 155 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 160 156 __func__, ··· 1467 1471 1468 1472 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 1469 1473 stream_update->vrr_infopacket || 1470 - stream_update->vsc_infopacket) { 1474 + stream_update->vsc_infopacket || 1475 + stream_update->vsp_infopacket) { 1471 1476 resource_build_info_frame(pipe_ctx); 1472 1477 dc->hwss.update_info_frame(pipe_ctx); 1473 1478 } ··· 1569 1572 dc, pipe_ctx->stream, stream_status->plane_count, context); 1570 1573 } 1571 1574 } 1572 - 1573 - if (update_type == UPDATE_TYPE_FULL) 1574 - context_timing_trace(dc, &context->res_ctx); 1575 1575 1576 1576 // Update Type FAST, Surface updates 1577 1577 if (update_type == UPDATE_TYPE_FAST) {
+38 -10
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 215 215 return true; 216 216 } 217 217 218 + if (link->connector_signal == SIGNAL_TYPE_EDP) 219 + link->dc->hwss.edp_wait_for_hpd_ready(link, true); 220 + 218 221 /* todo: may need to lock gpio access */ 219 222 hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); 220 223 if (hpd_pin == NULL) ··· 342 339 { 343 340 enum gpio_result gpio_result; 344 341 uint32_t clock_pin = 0; 345 - 342 + uint8_t retry = 0; 346 343 struct ddc *ddc; 347 344 348 345 enum connector_id connector_id = ··· 371 368 return present; 372 369 } 373 370 374 - /* Read GPIO: DP sink is present if both clock and data pins are zero */ 375 - /* [anaumov] in DAL2, there was no check for GPIO failure */ 376 - 377 - gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); 378 - ASSERT(gpio_result == GPIO_RESULT_OK); 371 + /* 372 + * Read GPIO: DP sink is present if both clock and data pins are zero 373 + * 374 + * [W/A] plug-unplug DP cable, sometimes customer board has 375 + * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI 376 + * then monitor can't br light up. Add retry 3 times 377 + * But in real passive dongle, it need additional 3ms to detect 378 + */ 379 + do { 380 + gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); 381 + ASSERT(gpio_result == GPIO_RESULT_OK); 382 + if (clock_pin) 383 + udelay(1000); 384 + else 385 + break; 386 + } while (retry++ < 3); 379 387 380 388 present = (gpio_result == GPIO_RESULT_OK) && !clock_pin; 381 389 ··· 717 703 if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps))) 718 704 same_dpcd = false; 719 705 } 720 - /* Active dongle downstream unplug */ 706 + /* Active dongle plug in without display or downstream unplug*/ 721 707 if (link->type == dc_connection_active_dongle 722 708 && link->dpcd_caps.sink_count. 723 709 bits.SINK_COUNT == 0) { 724 - if (prev_sink != NULL) 710 + if (prev_sink != NULL) { 711 + /* Downstream unplug */ 725 712 dc_sink_release(prev_sink); 713 + } else { 714 + /* Empty dongle plug in */ 715 + for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) { 716 + int fail_count = 0; 717 + 718 + dp_verify_link_cap(link, 719 + &link->reported_link_cap, 720 + &fail_count); 721 + 722 + if (fail_count == 0) 723 + break; 724 + } 725 + } 726 726 return true; 727 727 } 728 728 ··· 2650 2622 { 2651 2623 struct dc *core_dc = pipe_ctx->stream->ctx->dc; 2652 2624 2625 + core_dc->hwss.blank_stream(pipe_ctx); 2626 + 2653 2627 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 2654 2628 deallocate_mst_payload(pipe_ctx); 2655 - 2656 - core_dc->hwss.blank_stream(pipe_ctx); 2657 2629 2658 2630 core_dc->hwss.disable_stream(pipe_ctx, option); 2659 2631
+5 -1
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 83 83 dc_version = DCE_VERSION_11_22; 84 84 break; 85 85 case FAMILY_AI: 86 - dc_version = DCE_VERSION_12_0; 86 + if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev)) 87 + dc_version = DCE_VERSION_12_1; 88 + else 89 + dc_version = DCE_VERSION_12_0; 87 90 break; 88 91 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 89 92 case FAMILY_RV: ··· 139 136 num_virtual_links, dc); 140 137 break; 141 138 case DCE_VERSION_12_0: 139 + case DCE_VERSION_12_1: 142 140 res_pool = dce120_create_resource_pool( 143 141 num_virtual_links, dc); 144 142 break;
+2 -2
drivers/gpu/drm/amd/display/dc/dc_helper.c
··· 234 234 if (field_value == condition_value) { 235 235 if (i * delay_between_poll_us > 1000 && 236 236 !IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) 237 - dm_output_to_console("REG_WAIT taking a while: %dms in %s line:%d\n", 237 + DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n", 238 238 delay_between_poll_us * i / 1000, 239 239 func_name, line); 240 240 return reg_val; 241 241 } 242 242 } 243 243 244 - dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n", 244 + DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n", 245 245 delay_between_poll_us, time_out_num_tries, 246 246 func_name, line); 247 247
+1 -1
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
··· 192 192 /*swaped & float*/ 193 193 SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F, 194 194 /*grow graphics here if necessary */ 195 - SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888, 196 195 SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, 197 196 SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr = 198 197 SURFACE_PIXEL_FORMAT_VIDEO_BEGIN, ··· 199 200 SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr, 200 201 SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb, 201 202 SURFACE_PIXEL_FORMAT_SUBSAMPLE_END, 203 + SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888, 202 204 SURFACE_PIXEL_FORMAT_INVALID 203 205 204 206 /*grow 444 video here if necessary */
+7
drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
··· 676 676 { 677 677 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 678 678 struct dm_pp_power_level_change_request level_change_req; 679 + int unpatched_disp_clk = context->bw.dce.dispclk_khz; 680 + 681 + /*TODO: W/A for dal3 linux, investigate why this works */ 682 + if (!clk_mgr_dce->dfs_bypass_active) 683 + context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; 679 684 680 685 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); 681 686 /* get max clock state from PPLIB */ ··· 695 690 clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; 696 691 } 697 692 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 693 + 694 + context->bw.dce.dispclk_khz = unpatched_disp_clk; 698 695 } 699 696 700 697 static void dce12_update_clocks(struct clk_mgr *clk_mgr,
+10 -1
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
··· 1267 1267 pipe_ctx->plane_res.scl_data.lb_params.depth, 1268 1268 &pipe_ctx->stream->bit_depth_params); 1269 1269 1270 - if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) 1270 + if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) { 1271 + /* 1272 + * The way 420 is packed, 2 channels carry Y component, 1 channel 1273 + * alternate between Cb and Cr, so both channels need the pixel 1274 + * value for Y 1275 + */ 1276 + if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) 1277 + color.color_r_cr = color.color_g_y; 1278 + 1271 1279 pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color( 1272 1280 pipe_ctx->stream_res.tg, 1273 1281 &color); 1282 + } 1274 1283 1275 1284 pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm, 1276 1285 &pipe_ctx->plane_res.scl_data);
+13 -2
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
··· 2159 2159 color_space = stream->output_color_space; 2160 2160 color_space_to_black_color(dc, color_space, &black_color); 2161 2161 2162 + /* 2163 + * The way 420 is packed, 2 channels carry Y component, 1 channel 2164 + * alternate between Cb and Cr, so both channels need the pixel 2165 + * value for Y 2166 + */ 2167 + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) 2168 + black_color.color_r_cr = black_color.color_g_y; 2169 + 2170 + 2162 2171 if (stream_res->tg->funcs->set_blank_color) 2163 2172 stream_res->tg->funcs->set_blank_color( 2164 2173 stream_res->tg, ··· 2357 2348 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2358 2349 2359 2350 /* Skip inactive pipes and ones already updated */ 2360 - if (!pipe_ctx->stream || pipe_ctx->stream == stream) 2351 + if (!pipe_ctx->stream || pipe_ctx->stream == stream 2352 + || !pipe_ctx->plane_state) 2361 2353 continue; 2362 2354 2363 2355 pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); ··· 2372 2362 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2373 2363 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2374 2364 2375 - if (!pipe_ctx->stream || pipe_ctx->stream == stream) 2365 + if (!pipe_ctx->stream || pipe_ctx->stream == stream 2366 + || !pipe_ctx->plane_state) 2376 2367 continue; 2377 2368 2378 2369 dcn10_pipe_control_lock(dc, pipe_ctx, false);
+1
drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
··· 79 79 dal_hw_factory_dce110_init(factory); 80 80 return true; 81 81 case DCE_VERSION_12_0: 82 + case DCE_VERSION_12_1: 82 83 dal_hw_factory_dce120_init(factory); 83 84 return true; 84 85 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+1
drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
··· 76 76 dal_hw_translate_dce110_init(translate); 77 77 return true; 78 78 case DCE_VERSION_12_0: 79 + case DCE_VERSION_12_1: 79 80 dal_hw_translate_dce120_init(translate); 80 81 return true; 81 82 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+1
drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
··· 90 90 case DCE_VERSION_10_0: 91 91 return dal_i2caux_dce100_create(ctx); 92 92 case DCE_VERSION_12_0: 93 + case DCE_VERSION_12_1: 93 94 return dal_i2caux_dce120_create(ctx); 94 95 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 95 96 case DCN_VERSION_1_0:
+1
drivers/gpu/drm/amd/display/include/bios_parser_types.h
··· 41 41 AS_SIGNAL_TYPE_LVDS, 42 42 AS_SIGNAL_TYPE_DISPLAY_PORT, 43 43 AS_SIGNAL_TYPE_GPU_PLL, 44 + AS_SIGNAL_TYPE_XGMI, 44 45 AS_SIGNAL_TYPE_UNKNOWN 45 46 }; 46 47
+1
drivers/gpu/drm/amd/display/include/dal_types.h
··· 42 42 DCE_VERSION_11_2, 43 43 DCE_VERSION_11_22, 44 44 DCE_VERSION_12_0, 45 + DCE_VERSION_12_1, 45 46 DCE_VERSION_MAX, 46 47 DCN_VERSION_1_0, 47 48 #if defined(CONFIG_DRM_AMD_DC_DCN1_01)
+43
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
··· 49 49 #include "soc15_common.h" 50 50 #include "smuio/smuio_9_0_offset.h" 51 51 #include "smuio/smuio_9_0_sh_mask.h" 52 + #include "nbio/nbio_7_4_sh_mask.h" 53 + 54 + #define smnPCIE_LC_SPEED_CNTL 0x11140290 55 + #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 52 56 53 57 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) 54 58 { ··· 2286 2282 break; 2287 2283 2288 2284 case PP_PCIE: 2285 + soft_min_level = mask ? (ffs(mask) - 1) : 0; 2286 + soft_max_level = mask ? (fls(mask) - 1) : 0; 2287 + if (soft_min_level >= NUM_LINK_LEVELS || 2288 + soft_max_level >= NUM_LINK_LEVELS) 2289 + return -EINVAL; 2290 + 2291 + ret = smum_send_msg_to_smc_with_parameter(hwmgr, 2292 + PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level); 2293 + PP_ASSERT_WITH_CODE(!ret, 2294 + "Failed to set min link dpm level!", 2295 + return ret); 2296 + 2289 2297 break; 2290 2298 2291 2299 default: ··· 2774 2758 data->od8_settings.od8_settings_array; 2775 2759 OverDriveTable_t *od_table = 2776 2760 &(data->smc_state_table.overdrive_table); 2761 + struct phm_ppt_v3_information *pptable_information = 2762 + (struct phm_ppt_v3_information *)hwmgr->pptable; 2763 + PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable; 2764 + struct amdgpu_device *adev = hwmgr->adev; 2777 2765 struct pp_clock_levels_with_latency clocks; 2778 2766 int i, now, size = 0; 2779 2767 int ret = 0; 2768 + uint32_t gen_speed, lane_width; 2780 2769 2781 2770 switch (type) { 2782 2771 case PP_SCLK: ··· 2819 2798 break; 2820 2799 2821 2800 case PP_PCIE: 2801 + gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 2802 + PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 2803 + >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 2804 + lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 2805 + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 2806 + >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 2807 + for (i = 0; i < NUM_LINK_LEVELS; i++) 2808 + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, 2809 + (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," : 2810 + (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," : 2811 + (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," : 2812 + (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "", 2813 + (pptable->PcieLaneCount[i] == 1) ? "x1" : 2814 + (pptable->PcieLaneCount[i] == 2) ? "x2" : 2815 + (pptable->PcieLaneCount[i] == 3) ? "x4" : 2816 + (pptable->PcieLaneCount[i] == 4) ? "x8" : 2817 + (pptable->PcieLaneCount[i] == 5) ? "x12" : 2818 + (pptable->PcieLaneCount[i] == 6) ? "x16" : "", 2819 + pptable->LclkFreq[i], 2820 + (gen_speed == pptable->PcieGenSpeed[i]) && 2821 + (lane_width == pptable->PcieLaneCount[i]) ? 2822 + "*" : ""); 2822 2823 break; 2823 2824 2824 2825 case OD_SCLK:
+3
drivers/gpu/drm/drm_atomic_state_helper.c
··· 241 241 242 242 state->fence = NULL; 243 243 state->commit = NULL; 244 + state->fb_damage_clips = NULL; 244 245 } 245 246 EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state); 246 247 ··· 286 285 287 286 if (state->commit) 288 287 drm_crtc_commit_put(state->commit); 288 + 289 + drm_property_blob_put(state->fb_damage_clips); 289 290 } 290 291 EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state); 291 292
+2 -1
drivers/gpu/drm/drm_damage_helper.c
··· 178 178 state = drm_atomic_state_alloc(fb->dev); 179 179 if (!state) { 180 180 ret = -ENOMEM; 181 - goto out; 181 + goto out_drop_locks; 182 182 } 183 183 state->acquire_ctx = &ctx; 184 184 ··· 238 238 kfree(rects); 239 239 drm_atomic_state_put(state); 240 240 241 + out_drop_locks: 241 242 drm_modeset_drop_locks(&ctx); 242 243 drm_modeset_acquire_fini(&ctx); 243 244
+3 -3
drivers/gpu/drm/i915/gvt/cmd_parser.c
··· 1900 1900 1901 1901 {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, 1902 1902 1903 - {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL, 1903 + {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL, 1904 1904 D_BDW_PLUS, 0, 8, NULL}, 1905 1905 1906 - {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 1907 - ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait}, 1906 + {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, 1907 + D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait}, 1908 1908 1909 1909 {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS, 1910 1910 ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
+1 -1
drivers/gpu/drm/i915/gvt/gvt.c
··· 437 437 438 438 ret = intel_gvt_debugfs_init(gvt); 439 439 if (ret) 440 - gvt_err("debugfs registeration failed, go on.\n"); 440 + gvt_err("debugfs registration failed, go on.\n"); 441 441 442 442 gvt_dbg_core("gvt device initialization is done\n"); 443 443 dev_priv->gvt = gvt;
+4
drivers/gpu/drm/i915/gvt/gvt.h
··· 159 159 struct kmem_cache *workloads; 160 160 atomic_t running_workload_num; 161 161 struct i915_gem_context *shadow_ctx; 162 + union { 163 + u64 i915_context_pml4; 164 + u64 i915_context_pdps[GEN8_3LVL_PDPES]; 165 + }; 162 166 DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); 163 167 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); 164 168 void *ring_scan_buffer[I915_NUM_ENGINES];
+1
drivers/gpu/drm/i915/gvt/handlers.c
··· 475 475 _MMIO(0x7704), 476 476 _MMIO(0x7708), 477 477 _MMIO(0x770c), 478 + _MMIO(0x83a8), 478 479 _MMIO(0xb110), 479 480 GEN8_L3SQCREG4,//_MMIO(0xb118) 480 481 _MMIO(0xe100),
+1 -1
drivers/gpu/drm/i915/gvt/interrupt.c
··· 126 126 [FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C", 127 127 [AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C", 128 128 [AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C", 129 - [ERR_AND_DBG] = "South Error and Debug Interupts Combined", 129 + [ERR_AND_DBG] = "South Error and Debug Interrupts Combined", 130 130 [GMBUS] = "Gmbus", 131 131 [SDVO_B_HOTPLUG] = "SDVO B hotplug", 132 132 [CRT_HOTPLUG] = "CRT Hotplug",
+33
drivers/gpu/drm/i915/gvt/scheduler.c
··· 1079 1079 return ret; 1080 1080 } 1081 1081 1082 + static void 1083 + i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s) 1084 + { 1085 + struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; 1086 + int i; 1087 + 1088 + if (i915_vm_is_48bit(&i915_ppgtt->vm)) 1089 + px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4; 1090 + else { 1091 + for (i = 0; i < GEN8_3LVL_PDPES; i++) 1092 + px_dma(i915_ppgtt->pdp.page_directory[i]) = 1093 + s->i915_context_pdps[i]; 1094 + } 1095 + } 1096 + 1082 1097 /** 1083 1098 * intel_vgpu_clean_submission - free submission-related resource for vGPU 1084 1099 * @vgpu: a vGPU ··· 1106 1091 struct intel_vgpu_submission *s = &vgpu->submission; 1107 1092 1108 1093 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); 1094 + i915_context_ppgtt_root_restore(s); 1109 1095 i915_gem_context_put(s->shadow_ctx); 1110 1096 kmem_cache_destroy(s->workloads); 1111 1097 } ··· 1132 1116 s->ops->reset(vgpu, engine_mask); 1133 1117 } 1134 1118 1119 + static void 1120 + i915_context_ppgtt_root_save(struct intel_vgpu_submission *s) 1121 + { 1122 + struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; 1123 + int i; 1124 + 1125 + if (i915_vm_is_48bit(&i915_ppgtt->vm)) 1126 + s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4); 1127 + else { 1128 + for (i = 0; i < GEN8_3LVL_PDPES; i++) 1129 + s->i915_context_pdps[i] = 1130 + px_dma(i915_ppgtt->pdp.page_directory[i]); 1131 + } 1132 + } 1133 + 1135 1134 /** 1136 1135 * intel_vgpu_setup_submission - setup submission-related resource for vGPU 1137 1136 * @vgpu: a vGPU ··· 1168 1137 &vgpu->gvt->dev_priv->drm); 1169 1138 if (IS_ERR(s->shadow_ctx)) 1170 1139 return PTR_ERR(s->shadow_ctx); 1140 + 1141 + i915_context_ppgtt_root_save(s); 1171 1142 1172 1143 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); 1173 1144
+17 -16
drivers/gpu/drm/ttm/ttm_bo.c
··· 77 77 return 0; 78 78 } 79 79 80 - static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) 80 + static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p, 81 + int mem_type) 81 82 { 82 83 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 83 - struct drm_printer p = drm_debug_printer(TTM_PFX); 84 84 85 - pr_err(" has_type: %d\n", man->has_type); 86 - pr_err(" use_type: %d\n", man->use_type); 87 - pr_err(" flags: 0x%08X\n", man->flags); 88 - pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset); 89 - pr_err(" size: %llu\n", man->size); 90 - pr_err(" available_caching: 0x%08X\n", man->available_caching); 91 - pr_err(" default_caching: 0x%08X\n", man->default_caching); 85 + drm_printf(p, " has_type: %d\n", man->has_type); 86 + drm_printf(p, " use_type: %d\n", man->use_type); 87 + drm_printf(p, " flags: 0x%08X\n", man->flags); 88 + drm_printf(p, " gpu_offset: 0x%08llX\n", man->gpu_offset); 89 + drm_printf(p, " size: %llu\n", man->size); 90 + drm_printf(p, " available_caching: 0x%08X\n", man->available_caching); 91 + drm_printf(p, " default_caching: 0x%08X\n", man->default_caching); 92 92 if (mem_type != TTM_PL_SYSTEM) 93 - (*man->func->debug)(man, &p); 93 + (*man->func->debug)(man, p); 94 94 } 95 95 96 96 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 97 97 struct ttm_placement *placement) 98 98 { 99 + struct drm_printer p = drm_debug_printer(TTM_PFX); 99 100 int i, ret, mem_type; 100 101 101 - pr_err("No space for %p (%lu pages, %luK, %luM)\n", 102 - bo, bo->mem.num_pages, bo->mem.size >> 10, 103 - bo->mem.size >> 20); 102 + drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", 103 + bo, bo->mem.num_pages, bo->mem.size >> 10, 104 + bo->mem.size >> 20); 104 105 for (i = 0; i < placement->num_placement; i++) { 105 106 ret = ttm_mem_type_from_place(&placement->placement[i], 106 107 &mem_type); 107 108 if (ret) 108 109 return; 109 - pr_err(" placement[%d]=0x%08X (%d)\n", 110 - i, placement->placement[i].flags, mem_type); 111 - ttm_mem_type_debug(bo->bdev, mem_type); 110 + drm_printf(&p, " placement[%d]=0x%08X (%d)\n", 111 + i, placement->placement[i].flags, mem_type); 112 + ttm_mem_type_debug(bo->bdev, &p, mem_type); 112 113 } 113 114 } 114 115