Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-next-2020-12-24' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"Xmas eve pull request present.

Just some fixes that trickled in this past week: Mostly amdgpu fixes,
with a dma-buf/mips build fix and some misc komeda fixes.

dma-buf:
- fix build on mips

komeda:
- fix commit tail operation order
- NULL pointer fix
- out of bounds access fix

ttm:
- remove an unused function

amdgpu:
- Vangogh SMU fixes
- Arcturus gfx9 fixes
- Misc display fixes
- Sienna Cichlid SMU update
- Fix S3 display memory leak
- Fix regression caused by DP sub-connector support

amdkfd:
- Properly require pcie atomics for gfx10"

* tag 'drm-next-2020-12-24' of git://anongit.freedesktop.org/drm/drm: (31 commits)
drm/amd/display: Fix memory leaks in S3 resume
drm/amdgpu: Fix a copy-pasta comment
drm/amdgpu: only set DP subconnector type on DP and eDP connectors
drm/amd/pm: bump Sienna Cichlid smu_driver_if version to match latest pmfw
drm/amd/display: add getter routine to retrieve mpcc mux
drm/amd/display: always program DPPDTO unless not safe to lower
drm/amd/display: [FW Promotion] Release 0.0.47
drm/amd/display: updated wm table for Renoir
drm/amd/display: Acquire DSC during split stream for ODM only if top_pipe
drm/amd/display: Multi-display underflow observed
drm/amd/display: Remove unnecessary NULL check
drm/amd/display: Update RN/VGH active display count workaround
drm/amd/display: change SMU repsonse timeout to 2s.
drm/amd/display: gradually ramp ABM intensity
drm/amd/display: To modify the condition in indicating branch device
drm/amd/display: Modify the hdcp device count check condition
drm/amd/display: Interfaces for hubp blank and soft reset
drm/amd/display: handler not correctly checked at remove_irq_handler
drm/amdgpu: check gfx pipe availability before toggling its interrupts
drm/amdgpu: remove unnecessary asic type check
...

+181 -131
+1
drivers/dma-buf/heaps/cma_heap.c
··· 20 20 #include <linux/module.h> 21 21 #include <linux/scatterlist.h> 22 22 #include <linux/slab.h> 23 + #include <linux/vmalloc.h> 23 24 24 25 25 26 struct cma_heap {
+6 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
··· 1414 1414 pm_runtime_put_autosuspend(connector->dev->dev); 1415 1415 } 1416 1416 1417 - drm_dp_set_subconnector_property(&amdgpu_connector->base, 1418 - ret, 1419 - amdgpu_dig_connector->dpcd, 1420 - amdgpu_dig_connector->downstream_ports); 1417 + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 1418 + connector->connector_type == DRM_MODE_CONNECTOR_eDP) 1419 + drm_dp_set_subconnector_property(&amdgpu_connector->base, 1420 + ret, 1421 + amdgpu_dig_connector->dpcd, 1422 + amdgpu_dig_connector->downstream_ports); 1421 1423 return ret; 1422 1424 } 1423 1425
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 5069 5069 * @pdev: pointer to PCI device 5070 5070 * 5071 5071 * Called when the error recovery driver tells us that its 5072 - * OK to resume normal operation. Use completion to allow 5073 - * halted scsi ops to resume. 5072 + * OK to resume normal operation. 5074 5073 */ 5075 5074 void amdgpu_pci_resume(struct pci_dev *pdev) 5076 5075 {
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 496 496 break; 497 497 } 498 498 499 - if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) { 499 + if (amdgpu_sriov_vf(adev) || 500 + !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) { 500 501 size = 0; 501 502 } else { 502 503 size = amdgpu_gmc_get_vbios_fb_size(adev);
+14 -7
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 1647 1647 } 1648 1648 1649 1649 /* No CPG in Arcturus */ 1650 - if (adev->asic_type != CHIP_ARCTURUS) { 1650 + if (adev->gfx.num_gfx_rings) { 1651 1651 r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name); 1652 1652 if (r) 1653 1653 return r; ··· 2633 2633 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, 2634 2634 bool enable) 2635 2635 { 2636 - u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0); 2636 + u32 tmp; 2637 + 2638 + /* don't toggle interrupts that are only applicable 2639 + * to me0 pipe0 on AISCs that have me0 removed */ 2640 + if (!adev->gfx.num_gfx_rings) 2641 + return; 2642 + 2643 + tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0); 2637 2644 2638 2645 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); 2639 2646 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); ··· 3829 3822 gfx_v9_0_enable_gui_idle_interrupt(adev, false); 3830 3823 3831 3824 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 3832 - if (adev->asic_type != CHIP_ARCTURUS) { 3825 + if (adev->gfx.num_gfx_rings) { 3833 3826 /* legacy firmware loading */ 3834 3827 r = gfx_v9_0_cp_gfx_load_microcode(adev); 3835 3828 if (r) ··· 3845 3838 if (r) 3846 3839 return r; 3847 3840 3848 - if (adev->asic_type != CHIP_ARCTURUS) { 3841 + if (adev->gfx.num_gfx_rings) { 3849 3842 r = gfx_v9_0_cp_gfx_resume(adev); 3850 3843 if (r) 3851 3844 return r; ··· 3855 3848 if (r) 3856 3849 return r; 3857 3850 3858 - if (adev->asic_type != CHIP_ARCTURUS) { 3851 + if (adev->gfx.num_gfx_rings) { 3859 3852 ring = &adev->gfx.gfx_ring[0]; 3860 3853 r = amdgpu_ring_test_helper(ring); 3861 3854 if (r) ··· 3891 3884 3892 3885 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable) 3893 3886 { 3894 - if (adev->asic_type != CHIP_ARCTURUS) 3887 + if (adev->gfx.num_gfx_rings) 3895 3888 gfx_v9_0_cp_gfx_enable(adev, enable); 3896 3889 gfx_v9_0_cp_compute_enable(adev, enable); 3897 3890 } ··· 4032 4025 /* stop the rlc */ 4033 4026 adev->gfx.rlc.funcs->stop(adev); 4034 4027 4035 - if (adev->asic_type != CHIP_ARCTURUS) 4028 + if (adev->gfx.num_gfx_rings) 4036 4029 /* Disable GFX parsing/prefetching */ 4037 4030 gfx_v9_0_cp_gfx_enable(adev, false); 4038 4031
+4 -7
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 1577 1577 gmc_v9_0_init_golden_registers(adev); 1578 1578 1579 1579 if (adev->mode_info.num_crtc) { 1580 - if (adev->asic_type != CHIP_ARCTURUS) { 1581 - /* Lockout access through VGA aperture*/ 1582 - WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 1583 - 1584 - /* disable VGA render */ 1585 - WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 1586 - } 1580 + /* Lockout access through VGA aperture*/ 1581 + WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 1582 + /* disable VGA render */ 1583 + WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 1587 1584 } 1588 1585 1589 1586 amdgpu_device_program_register_sequence(adev,
+6 -6
drivers/gpu/drm/amd/amdkfd/kfd_device.c
··· 422 422 .mqd_size_aligned = MQD_SIZE_ALIGNED, 423 423 .needs_iommu_device = false, 424 424 .supports_cwsr = true, 425 - .needs_pci_atomics = false, 425 + .needs_pci_atomics = true, 426 426 .num_sdma_engines = 2, 427 427 .num_xgmi_sdma_engines = 0, 428 428 .num_sdma_queues_per_engine = 8, ··· 440 440 .mqd_size_aligned = MQD_SIZE_ALIGNED, 441 441 .needs_iommu_device = false, 442 442 .supports_cwsr = true, 443 - .needs_pci_atomics = false, 443 + .needs_pci_atomics = true, 444 444 .num_sdma_engines = 2, 445 445 .num_xgmi_sdma_engines = 0, 446 446 .num_sdma_queues_per_engine = 8, ··· 458 458 .mqd_size_aligned = MQD_SIZE_ALIGNED, 459 459 .needs_iommu_device = false, 460 460 .supports_cwsr = true, 461 - .needs_pci_atomics = false, 461 + .needs_pci_atomics = true, 462 462 .num_sdma_engines = 2, 463 463 .num_xgmi_sdma_engines = 0, 464 464 .num_sdma_queues_per_engine = 8, ··· 476 476 .mqd_size_aligned = MQD_SIZE_ALIGNED, 477 477 .needs_iommu_device = false, 478 478 .supports_cwsr = true, 479 - .needs_pci_atomics = false, 479 + .needs_pci_atomics = true, 480 480 .num_sdma_engines = 4, 481 481 .num_xgmi_sdma_engines = 0, 482 482 .num_sdma_queues_per_engine = 8, ··· 494 494 .mqd_size_aligned = MQD_SIZE_ALIGNED, 495 495 .needs_iommu_device = false, 496 496 .supports_cwsr = true, 497 - .needs_pci_atomics = false, 497 + .needs_pci_atomics = true, 498 498 .num_sdma_engines = 2, 499 499 .num_xgmi_sdma_engines = 0, 500 500 .num_sdma_queues_per_engine = 8, ··· 530 530 .mqd_size_aligned = MQD_SIZE_ALIGNED, 531 531 .needs_iommu_device = false, 532 532 .supports_cwsr = true, 533 - .needs_pci_atomics = false, 533 + .needs_pci_atomics = true, 534 534 .num_sdma_engines = 2, 535 535 .num_xgmi_sdma_engines = 0, 536 536 .num_sdma_queues_per_engine = 8,
+3 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 2386 2386 2387 2387 drm_connector_update_edid_property(connector, 2388 2388 aconnector->edid); 2389 - drm_add_edid_modes(connector, aconnector->edid); 2389 + aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid); 2390 + drm_connector_list_update(connector); 2390 2391 2391 2392 if (aconnector->dc_link->aux_mode) 2392 2393 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, ··· 9368 9367 if (ret) 9369 9368 goto fail; 9370 9369 9371 - if (dm_old_crtc_state->dsc_force_changed && new_crtc_state) 9370 + if (dm_old_crtc_state->dsc_force_changed) 9372 9371 new_crtc_state->mode_changed = true; 9373 9372 } 9374 9373
+4 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
··· 165 165 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, 166 166 list); 167 167 168 - if (ih == handler) { 168 + if (handler == NULL) 169 + continue; 170 + 171 + if (ih == handler->handler) { 169 172 /* Found our handler. Remove it from the list. */ 170 173 list_del(&handler->list); 171 174 handler_removed = true;
+14 -22
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
··· 75 75 for (i = 0; i < dc->link_count; i++) { 76 76 const struct dc_link *link = dc->links[i]; 77 77 78 - /* 79 - * Only notify active stream or virtual stream. 80 - * Need to notify virtual stream to work around 81 - * headless case. HPD does not fire when system is in 82 - * S0i2. 83 - */ 84 78 /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ 85 - if (link->connector_signal == SIGNAL_TYPE_VIRTUAL || 86 - link->link_enc->funcs->is_dig_enabled(link->link_enc)) 79 + if (link->link_enc->funcs->is_dig_enabled(link->link_enc)) 87 80 display_count++; 88 81 } 89 82 ··· 227 234 rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); 228 235 229 236 // always update dtos unless clock is lowered and not safe to lower 230 - if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) 231 - rn_update_clocks_update_dpp_dto( 232 - clk_mgr, 233 - context, 234 - clk_mgr_base->clks.actual_dppclk_khz, 235 - safe_to_lower); 237 + rn_update_clocks_update_dpp_dto( 238 + clk_mgr, 239 + context, 240 + clk_mgr_base->clks.actual_dppclk_khz, 241 + safe_to_lower); 236 242 } 237 243 238 244 if (update_dispclk && ··· 730 738 .wm_inst = WM_A, 731 739 .wm_type = WM_TYPE_PSTATE_CHG, 732 740 .pstate_latency_us = 11.72, 733 - .sr_exit_time_us = 9.09, 734 - .sr_enter_plus_exit_time_us = 10.14, 741 + .sr_exit_time_us = 11.90, 742 + .sr_enter_plus_exit_time_us = 12.80, 735 743 .valid = true, 736 744 }, 737 745 { 738 746 .wm_inst = WM_B, 739 747 .wm_type = WM_TYPE_PSTATE_CHG, 740 748 .pstate_latency_us = 11.72, 741 - .sr_exit_time_us = 11.12, 742 - .sr_enter_plus_exit_time_us = 12.48, 749 + .sr_exit_time_us = 13.18, 750 + .sr_enter_plus_exit_time_us = 14.30, 743 751 .valid = true, 744 752 }, 745 753 { 746 754 .wm_inst = WM_C, 747 755 .wm_type = WM_TYPE_PSTATE_CHG, 748 756 .pstate_latency_us = 11.72, 749 - .sr_exit_time_us = 11.12, 750 - .sr_enter_plus_exit_time_us = 12.48, 757 + .sr_exit_time_us = 13.18, 758 + .sr_enter_plus_exit_time_us = 14.30, 751 759 .valid = true, 752 760 }, 753 761 { 754 762 .wm_inst = WM_D, 755 763 .wm_type = WM_TYPE_PSTATE_CHG, 756 764 .pstate_latency_us = 11.72, 757 - .sr_exit_time_us = 11.12, 758 - .sr_enter_plus_exit_time_us = 12.48, 765 + .sr_exit_time_us = 13.18, 766 + .sr_enter_plus_exit_time_us = 14.30, 759 767 .valid = true, 760 768 }, 761 769 }
+1 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
··· 99 99 /* Trigger the message transaction by writing the message ID */ 100 100 REG_WRITE(MP1_SMN_C2PMSG_67, msg_id); 101 101 102 - result = rn_smu_wait_for_response(clk_mgr, 10, 1000); 102 + result = rn_smu_wait_for_response(clk_mgr, 10, 200000); 103 103 104 104 ASSERT(result == VBIOSSMC_Result_OK || result == VBIOSSMC_Result_UnknownCmd); 105 105
+1 -8
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
··· 74 74 for (i = 0; i < dc->link_count; i++) { 75 75 const struct dc_link *link = dc->links[i]; 76 76 77 - /* 78 - * Only notify active stream or virtual stream. 79 - * Need to notify virtual stream to work around 80 - * headless case. HPD does not fire when system is in 81 - * S0i2. 82 - */ 83 77 /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ 84 - if (link->connector_signal == SIGNAL_TYPE_VIRTUAL || 85 - link->link_enc->funcs->is_dig_enabled(link->link_enc)) 78 + if (link->link_enc->funcs->is_dig_enabled(link->link_enc)) 86 79 display_count++; 87 80 } 88 81
-20
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 2625 2625 } 2626 2626 } 2627 2627 2628 - if (update_type != UPDATE_TYPE_FAST) { 2629 - // If changing VTG FP2: wait until back in vactive to program FP2 2630 - // Need to ensure that pipe unlock happens soon after to minimize race condition 2631 - for (i = 0; i < dc->res_pool->pipe_count; i++) { 2632 - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2633 - 2634 - if (pipe_ctx->top_pipe || pipe_ctx->stream != stream) 2635 - continue; 2636 - 2637 - if (!pipe_ctx->update_flags.bits.global_sync) 2638 - continue; 2639 - 2640 - pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); 2641 - pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); 2642 - 2643 - pipe_ctx->stream_res.tg->funcs->set_vtg_params( 2644 - pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true); 2645 - } 2646 - } 2647 - 2648 2628 if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock) 2649 2629 dc->hwss.interdependent_update_lock(dc, context, false); 2650 2630 else
+18
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
··· 1241 1241 REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst); 1242 1242 } 1243 1243 1244 + bool hubp1_in_blank(struct hubp *hubp) 1245 + { 1246 + uint32_t in_blank; 1247 + struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); 1248 + 1249 + REG_GET(DCHUBP_CNTL, HUBP_IN_BLANK, &in_blank); 1250 + return in_blank ? true : false; 1251 + } 1252 + 1253 + void hubp1_soft_reset(struct hubp *hubp, bool reset) 1254 + { 1255 + struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); 1256 + 1257 + REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0); 1258 + } 1259 + 1244 1260 void hubp1_init(struct hubp *hubp) 1245 1261 { 1246 1262 //do nothing ··· 1288 1272 1289 1273 .dmdata_set_attributes = NULL, 1290 1274 .dmdata_load = NULL, 1275 + .hubp_soft_reset = hubp1_soft_reset, 1276 + .hubp_in_blank = hubp1_in_blank, 1291 1277 }; 1292 1278 1293 1279 /*****************************************/
+4
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
··· 260 260 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\ 261 261 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\ 262 262 HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\ 263 + HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_IN_BLANK, mask_sh),\ 263 264 HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\ 264 265 HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\ 265 266 HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\ ··· 456 455 type HUBP_VTG_SEL;\ 457 456 type HUBP_UNDERFLOW_STATUS;\ 458 457 type HUBP_UNDERFLOW_CLEAR;\ 458 + type HUBP_IN_BLANK;\ 459 459 type NUM_PIPES;\ 460 460 type NUM_BANKS;\ 461 461 type PIPE_INTERLEAVE;\ ··· 774 772 775 773 void hubp1_init(struct hubp *hubp); 776 774 void hubp1_read_state_common(struct hubp *hubp); 775 + bool hubp1_in_blank(struct hubp *hubp); 776 + void hubp1_soft_reset(struct hubp *hubp, bool reset); 777 777 778 778 #endif
+12
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
··· 467 467 REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0); 468 468 } 469 469 470 + unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id) 471 + { 472 + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); 473 + uint32_t val; 474 + 475 + if (opp_id < MAX_OPP && REG(MUX[opp_id])) 476 + REG_GET(MUX[opp_id], MPC_OUT_MUX, &val); 477 + 478 + return val; 479 + } 480 + 470 481 static const struct mpc_funcs dcn10_mpc_funcs = { 471 482 .read_mpcc_state = mpc1_read_mpcc_state, 472 483 .insert_plane = mpc1_insert_plane, ··· 494 483 .set_denorm_clamp = NULL, 495 484 .set_output_csc = NULL, 496 485 .set_output_gamma = NULL, 486 + .get_mpc_out_mux = mpc1_get_mpc_out_mux, 497 487 }; 498 488 499 489 void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
+1
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
··· 200 200 201 201 void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock); 202 202 203 + unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id); 203 204 #endif
+2
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
··· 1595 1595 .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, 1596 1596 .hubp_init = hubp1_init, 1597 1597 .validate_dml_output = hubp2_validate_dml_output, 1598 + .hubp_in_blank = hubp1_in_blank, 1599 + .hubp_soft_reset = hubp1_soft_reset, 1598 1600 }; 1599 1601 1600 1602
+8 -2
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
··· 1586 1586 && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) 1587 1587 hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); 1588 1588 1589 - if (pipe_ctx->update_flags.bits.global_sync) { 1589 + /* Only update TG on top pipe */ 1590 + if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe 1591 + && !pipe_ctx->prev_odm_pipe) { 1592 + 1590 1593 pipe_ctx->stream_res.tg->funcs->program_global_sync( 1591 1594 pipe_ctx->stream_res.tg, 1592 1595 pipe_ctx->pipe_dlg_param.vready_offset, ··· 1597 1594 pipe_ctx->pipe_dlg_param.vupdate_offset, 1598 1595 pipe_ctx->pipe_dlg_param.vupdate_width); 1599 1596 1597 + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); 1598 + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); 1599 + 1600 1600 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 1601 - pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false); 1601 + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true); 1602 1602 1603 1603 if (hws->funcs.setup_vupdate_interrupt) 1604 1604 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
+1
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
··· 556 556 .set_ocsc_default = mpc2_set_ocsc_default, 557 557 .set_output_gamma = mpc2_set_output_gamma, 558 558 .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut, 559 + .get_mpc_out_mux = mpc1_get_mpc_out_mux, 559 560 }; 560 561 561 562 void dcn20_mpc_construct(struct dcn20_mpc *mpc20,
+1 -1
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
··· 1933 1933 next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; 1934 1934 else 1935 1935 next_odm_pipe->stream_res.opp = next_odm_pipe->top_pipe->stream_res.opp; 1936 - if (next_odm_pipe->stream->timing.flags.DSC == 1) { 1936 + if (next_odm_pipe->stream->timing.flags.DSC == 1 && !next_odm_pipe->top_pipe) { 1937 1937 dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx); 1938 1938 ASSERT(next_odm_pipe->stream_res.dsc); 1939 1939 if (next_odm_pipe->stream_res.dsc == NULL)
+2
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
··· 509 509 .hubp_clear_underflow = hubp2_clear_underflow, 510 510 .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, 511 511 .hubp_init = hubp3_init, 512 + .hubp_in_blank = hubp1_in_blank, 513 + .hubp_soft_reset = hubp1_soft_reset, 512 514 }; 513 515 514 516 bool hubp3_construct(
+1
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
··· 1428 1428 .program_3dlut = mpc3_program_3dlut, 1429 1429 .release_rmu = mpcc3_release_rmu, 1430 1430 .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut, 1431 + .get_mpc_out_mux = mpc1_get_mpc_out_mux, 1431 1432 1432 1433 }; 1433 1434
+2
drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
··· 188 188 void (*set_unbounded_requesting)( 189 189 struct hubp *hubp, 190 190 bool enable); 191 + bool (*hubp_in_blank)(struct hubp *hubp); 192 + void (*hubp_soft_reset)(struct hubp *hubp, bool reset); 191 193 192 194 }; 193 195
+4
drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
··· 359 359 360 360 int (*release_rmu)(struct mpc *mpc, int mpcc_id); 361 361 362 + unsigned int (*get_mpc_out_mux)( 363 + struct mpc *mpc, 364 + int opp_id); 365 + 362 366 }; 363 367 364 368 #endif
+2 -2
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
··· 47 47 48 48 /* Firmware versioning. */ 49 49 #ifdef DMUB_EXPOSE_VERSION 50 - #define DMUB_FW_VERSION_GIT_HASH 0xa18e25995 50 + #define DMUB_FW_VERSION_GIT_HASH 0xf51b86a 51 51 #define DMUB_FW_VERSION_MAJOR 0 52 52 #define DMUB_FW_VERSION_MINOR 0 53 - #define DMUB_FW_VERSION_REVISION 46 53 + #define DMUB_FW_VERSION_REVISION 47 54 54 #define DMUB_FW_VERSION_TEST 0 55 55 #define DMUB_FW_VERSION_VBIOS 0 56 56 #define DMUB_FW_VERSION_HOTFIX 0
+6 -2
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
··· 128 128 129 129 static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) 130 130 { 131 - /* device count must be greater than or equal to tracked hdcp displays */ 132 - return (get_device_count(hdcp) < get_active_display_count(hdcp)) ? 131 + /* Some MST display may choose to report the internal panel as an HDCP RX. 132 + * To update this condition with 1(because the immediate repeater's internal 133 + * panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). 134 + * Device count must be greater than or equal to tracked hdcp displays. 135 + */ 136 + return ((1 + get_device_count(hdcp)) < get_active_display_count(hdcp)) ? 133 137 MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE : 134 138 MOD_HDCP_STATUS_SUCCESS; 135 139 }
+5 -2
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
··· 207 207 208 208 static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) 209 209 { 210 - /* device count must be greater than or equal to tracked hdcp displays */ 211 - return (get_device_count(hdcp) < get_active_display_count(hdcp)) ? 210 + /* Some MST display may choose to report the internal panel as an HDCP RX. */ 211 + /* To update this condition with 1(because the immediate repeater's internal */ 212 + /* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). */ 213 + /* Device count must be greater than or equal to tracked hdcp displays. */ 214 + return ((1 + get_device_count(hdcp)) < get_active_display_count(hdcp)) ? 212 215 MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE : 213 216 MOD_HDCP_STATUS_SUCCESS; 214 217 }
+25 -10
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
··· 82 82 unsigned char deviation_gain; 83 83 unsigned char min_knee; 84 84 unsigned char max_knee; 85 + unsigned short blRampReduction; 86 + unsigned short blRampStart; 85 87 }; 86 88 87 89 static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = { 88 - // min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee 89 - {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0}, 90 - {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf}, 91 - {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0}, 92 - {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, 90 + // min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed 91 + {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0, 0xCCCC, 0xCCCC}, 92 + {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf, 0xCCCC, 0xCCCC}, 93 + {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0, 0xCCCC, 0xCCCC}, 94 + {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC}, 93 95 }; 94 96 95 97 static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = { 96 - // min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee 97 - {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, 98 - {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, 99 - {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, 100 - {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, 98 + // min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed 99 + {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC}, 100 + {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC}, 101 + {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC}, 102 + {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC}, 101 103 }; 102 104 103 105 static const struct abm_parameters * const abm_settings[] = { ··· 664 662 { 665 663 struct iram_table_v_2_2 ram_table; 666 664 struct abm_config_table config; 665 + unsigned int set = params.set; 667 666 bool result = false; 668 667 uint32_t i, j = 0; 669 668 ··· 712 709 config.min_knee[i] = ram_table.min_knee[i]; 713 710 config.max_knee[i] = ram_table.max_knee[i]; 714 711 } 712 + 713 + if (params.backlight_ramping_override) { 714 + for (i = 0; i < NUM_AGGR_LEVEL; i++) { 715 + config.blRampReduction[i] = params.backlight_ramping_reduction; 716 + config.blRampStart[i] = params.backlight_ramping_start; 717 + } 718 + } else { 719 + for (i = 0; i < NUM_AGGR_LEVEL; i++) { 720 + config.blRampReduction[i] = abm_settings[set][i].blRampReduction; 721 + config.blRampStart[i] = abm_settings[set][i].blRampStart; 722 + } 723 + } 715 724 716 725 config.min_abm_backlight = ram_table.min_abm_backlight; 717 726
+1
drivers/gpu/drm/amd/display/modules/power/power_helpers.h
··· 39 39 struct dmcu_iram_parameters { 40 40 unsigned int *backlight_lut_array; 41 41 unsigned int backlight_lut_array_size; 42 + bool backlight_ramping_override; 42 43 unsigned int backlight_ramping_reduction; 43 44 unsigned int backlight_ramping_start; 44 45 unsigned int min_abm_backlight;
+1 -1
drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
··· 30 30 #define SMU11_DRIVER_IF_VERSION_NV10 0x36 31 31 #define SMU11_DRIVER_IF_VERSION_NV12 0x36 32 32 #define SMU11_DRIVER_IF_VERSION_NV14 0x36 33 - #define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3B 33 + #define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D 34 34 #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xC 35 35 #define SMU11_DRIVER_IF_VERSION_VANGOGH 0x02 36 36 #define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
+7 -2
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
··· 724 724 725 725 static int vangogh_system_features_control(struct smu_context *smu, bool en) 726 726 { 727 - return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify, 728 - en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL); 727 + struct amdgpu_device *adev = smu->adev; 728 + 729 + if (adev->pm.fw_version >= 0x43f1700) 730 + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify, 731 + en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL); 732 + else 733 + return 0; 729 734 } 730 735 731 736 static const struct pptable_funcs vangogh_ppt_funcs = {
-1
drivers/gpu/drm/arm/display/komeda/komeda_dev.c
··· 152 152 ret = of_reserved_mem_device_init(dev); 153 153 if (ret && ret != -ENODEV) 154 154 return ret; 155 - ret = 0; 156 155 157 156 for_each_available_child_of_node(np, child) { 158 157 if (of_node_name_eq(child, "pipeline")) {
+2 -2
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
··· 81 81 82 82 drm_atomic_helper_commit_modeset_enables(dev, old_state); 83 83 84 - drm_atomic_helper_wait_for_flip_done(dev, old_state); 85 - 86 84 drm_atomic_helper_commit_hw_done(old_state); 85 + 86 + drm_atomic_helper_wait_for_flip_done(dev, old_state); 87 87 88 88 drm_atomic_helper_cleanup_planes(dev, old_state); 89 89 }
+2 -1
drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
··· 137 137 u32 comp_mask) 138 138 { 139 139 struct komeda_component *c = NULL; 140 + unsigned long comp_mask_local = (unsigned long)comp_mask; 140 141 int id; 141 142 142 - id = find_first_bit((unsigned long *)&comp_mask, 32); 143 + id = find_first_bit(&comp_mask_local, 32); 143 144 if (id < 32) 144 145 c = komeda_pipeline_get_component(pipe, id); 145 146
+2 -2
drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
··· 704 704 cin->layer_alpha = dflow->layer_alpha; 705 705 706 706 old_st = komeda_component_get_old_state(&compiz->base, drm_st); 707 - WARN_ON(!old_st); 708 707 709 708 /* compare with old to check if this input has been changed */ 710 - if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin))) 709 + if (WARN_ON(!old_st) || 710 + memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin))) 711 711 c_st->changed_active_inputs |= BIT(idx); 712 712 713 713 komeda_component_add_input(c_st, &dflow->input, idx);
+14 -15
drivers/gpu/drm/ttm/ttm_pool.c
··· 239 239 return p; 240 240 } 241 241 242 - /* Count the number of pages available in a pool_type */ 243 - static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) 244 - { 245 - unsigned int count = 0; 246 - struct page *p; 247 - 248 - spin_lock(&pt->lock); 249 - /* Only used for debugfs, the overhead doesn't matter */ 250 - list_for_each_entry(p, &pt->pages, lru) 251 - ++count; 252 - spin_unlock(&pt->lock); 253 - 254 - return count; 255 - } 256 - 257 242 /* Initialize and add a pool type to the global shrinker list */ 258 243 static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, 259 244 enum ttm_caching caching, unsigned int order) ··· 528 543 EXPORT_SYMBOL(ttm_pool_fini); 529 544 530 545 #ifdef CONFIG_DEBUG_FS 546 + /* Count the number of pages available in a pool_type */ 547 + static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) 548 + { 549 + unsigned int count = 0; 550 + struct page *p; 551 + 552 + spin_lock(&pt->lock); 553 + /* Only used for debugfs, the overhead doesn't matter */ 554 + list_for_each_entry(p, &pt->pages, lru) 555 + ++count; 556 + spin_unlock(&pt->lock); 557 + 558 + return count; 559 + } 531 560 532 561 /* Dump information about the different pool types */ 533 562 static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,