Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-2023-12-15' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"More regular fixes, amdgpu, i915, mediatek and nouveau are most of
them this week. Nothing too major, then a few misc bits and pieces in
core, panel and ivpu.

drm:
- fix uninit problems in crtc
- fix fd ownership check
- edid: add modes in fallback paths

panel:
- move LG panel into DSI yaml
- ltk050h3146w: set burst mode

mediatek:
- mtk_disp_gamma: Fix breakage due to merge issue
- fix kernel oops if no crtc is found
- Add spinlock for setting vblank event in atomic_begin
- Fix access violation in mtk_drm_crtc_dma_dev_get

i915:
- Fix selftest engine reset count storage for multi-tile
- Fix out-of-bounds reads for engine reset counts
- Fix ADL+ remapped stride with CCS
- Fix intel_atomic_setup_scalers() plane_state handling
- Fix ADL+ tiled plane stride when the POT stride is smaller than the original
- Fix eDP 1.4 rate select method link configuration

amdgpu:
- Fix suspend fix that got accidently mangled last week
- Fix OD regression
- PSR fixes
- OLED Backlight regression fix
- JPEG 4.0.5 fix
- Misc display fixes
- SDMA 5.2 fix
- SDMA 2.4 regression fix
- GPUVM race fix

nouveau:
- fix gk20a instobj hierarchy
- fix headless iors inheritance regression

ivpu:
- fix WA initialisation"

* tag 'drm-fixes-2023-12-15' of git://anongit.freedesktop.org/drm/drm: (31 commits)
drm/nouveau/kms/nv50-: Don't allow inheritance of headless iors
drm/nouveau: Fixup gk20a instobj hierarchy
drm/amdgpu: warn when there are still mappings when a BO is destroyed v2
drm/amdgpu: fix tear down order in amdgpu_vm_pt_free
drm/amd: Fix a probing order problem on SDMA 2.4
drm/amdgpu/sdma5.2: add begin/end_use ring callbacks
drm/panel: ltk050h3146w: Set burst mode for ltk050h3148w
dt-bindings: panel-simple-dsi: move LG 5" HD TFT LCD panel into DSI yaml
drm/amd/display: Disable PSR-SU on Parade 0803 TCON again
drm/amd/display: Populate dtbclk from bounding box
drm/amd/display: Revert "Fix conversions between bytes and KB"
drm/amdgpu/jpeg: configure doorbell for each playback
drm/amd/display: Restore guard against default backlight value < 1 nit
drm/amd/display: fix hw rotated modes when PSR-SU is enabled
drm/amd/pm: fix pp_*clk_od typo
drm/amdgpu: fix buffer funcs setting order on suspend harder
drm/mediatek: Fix access violation in mtk_drm_crtc_dma_dev_get
drm/edid: also call add modes in EDID connector update fallback
drm/i915/edp: don't write to DP_LINK_BW_SET when using rate select
drm/i915: Fix ADL+ tiled plane stride when the POT stride is smaller than the original
...

+190 -83
+2
Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
··· 42 42 - lg,acx467akm-7 43 43 # LG Corporation 7" WXGA TFT LCD panel 44 44 - lg,ld070wx3-sl01 45 + # LG Corporation 5" HD TFT LCD panel 46 + - lg,lh500wx1-sd03 45 47 # One Stop Displays OSD101T2587-53TS 10.1" 1920x1200 panel 46 48 - osddisplays,osd101t2587-53ts 47 49 # Panasonic 10" WUXGA TFT LCD panel
-2
Documentation/devicetree/bindings/display/panel/panel-simple.yaml
··· 208 208 - lemaker,bl035-rgb-002 209 209 # LG 7" (800x480 pixels) TFT LCD panel 210 210 - lg,lb070wv8 211 - # LG Corporation 5" HD TFT LCD panel 212 - - lg,lh500wx1-sd03 213 211 # LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel 214 212 - lg,lp079qx1-sp0v 215 213 # LG 9.7" (2048x1536 pixels) TFT LCD panel
+9 -3
drivers/accel/ivpu/ivpu_hw_37xx.c
··· 53 53 54 54 #define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK) 55 55 56 - #define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \ 57 - (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \ 56 + #define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \ 58 57 (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, UFI_ERR))) 58 + 59 + #define BUTTRESS_ALL_IRQ_MASK (BUTTRESS_IRQ_MASK | \ 60 + (REG_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE))) 59 61 60 62 #define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK) 61 63 #define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1) ··· 76 74 vdev->wa.clear_runtime_mem = false; 77 75 vdev->wa.d3hot_after_power_off = true; 78 76 79 - if (ivpu_device_id(vdev) == PCI_DEVICE_ID_MTL && ivpu_revision(vdev) < 4) 77 + REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK); 78 + if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) { 79 + /* Writing 1s does not clear the interrupt status register */ 80 80 vdev->wa.interrupt_clear_with_0 = true; 81 + REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, 0x0); 82 + } 81 83 82 84 IVPU_PRINT_WA(punit_disabled); 83 85 IVPU_PRINT_WA(clear_runtime_mem);
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 4516 4516 4517 4517 amdgpu_ras_suspend(adev); 4518 4518 4519 - amdgpu_ttm_set_buffer_funcs_status(adev, false); 4520 - 4521 4519 amdgpu_device_ip_suspend_phase1(adev); 4522 4520 4523 4521 if (!adev->in_s0ix)
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 1343 1343 1344 1344 abo = ttm_to_amdgpu_bo(bo); 1345 1345 1346 + WARN_ON(abo->vm_bo); 1347 + 1346 1348 if (abo->kfd_bo) 1347 1349 amdgpu_amdkfd_release_notify(abo); 1348 1350
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
··· 642 642 643 643 if (!entry->bo) 644 644 return; 645 + 646 + entry->bo->vm_bo = NULL; 645 647 shadow = amdgpu_bo_shadowed(entry->bo); 646 648 if (shadow) { 647 649 ttm_bo_set_bulk_move(&shadow->tbo, NULL); 648 650 amdgpu_bo_unref(&shadow); 649 651 } 650 652 ttm_bo_set_bulk_move(&entry->bo->tbo, NULL); 651 - entry->bo->vm_bo = NULL; 652 653 653 654 spin_lock(&entry->vm->status_lock); 654 655 list_del(&entry->vm_status);
+8 -7
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
··· 155 155 struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec; 156 156 int r; 157 157 158 - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 159 - (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); 160 - 161 - WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, 162 - ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | 163 - VCN_JPEG_DB_CTRL__EN_MASK); 164 - 165 158 r = amdgpu_ring_test_helper(ring); 166 159 if (r) 167 160 return r; ··· 328 335 329 336 if (adev->pm.dpm_enabled) 330 337 amdgpu_dpm_enable_jpeg(adev, true); 338 + 339 + /* doorbell programming is done for every playback */ 340 + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 341 + (adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0); 342 + 343 + WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL, 344 + ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT | 345 + VCN_JPEG_DB_CTRL__EN_MASK); 331 346 332 347 /* disable power gating */ 333 348 r = jpeg_v4_0_5_disable_static_power_gating(adev);
+2 -2
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
··· 813 813 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 814 814 int r; 815 815 816 + adev->sdma.num_instances = SDMA_MAX_INSTANCE; 817 + 816 818 r = sdma_v2_4_init_microcode(adev); 817 819 if (r) 818 820 return r; 819 - 820 - adev->sdma.num_instances = SDMA_MAX_INSTANCE; 821 821 822 822 sdma_v2_4_set_ring_funcs(adev); 823 823 sdma_v2_4_set_buffer_funcs(adev);
+28
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
··· 1643 1643 *flags |= AMD_CG_SUPPORT_SDMA_LS; 1644 1644 } 1645 1645 1646 + static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring) 1647 + { 1648 + struct amdgpu_device *adev = ring->adev; 1649 + 1650 + /* SDMA 5.2.3 (RMB) FW doesn't seem to properly 1651 + * disallow GFXOFF in some cases leading to 1652 + * hangs in SDMA. Disallow GFXOFF while SDMA is active. 1653 + * We can probably just limit this to 5.2.3, 1654 + * but it shouldn't hurt for other parts since 1655 + * this GFXOFF will be disallowed anyway when SDMA is 1656 + * active, this just makes it explicit. 1657 + */ 1658 + amdgpu_gfx_off_ctrl(adev, false); 1659 + } 1660 + 1661 + static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring) 1662 + { 1663 + struct amdgpu_device *adev = ring->adev; 1664 + 1665 + /* SDMA 5.2.3 (RMB) FW doesn't seem to properly 1666 + * disallow GFXOFF in some cases leading to 1667 + * hangs in SDMA. Allow GFXOFF when SDMA is complete. 1668 + */ 1669 + amdgpu_gfx_off_ctrl(adev, true); 1670 + } 1671 + 1646 1672 const struct amd_ip_funcs sdma_v5_2_ip_funcs = { 1647 1673 .name = "sdma_v5_2", 1648 1674 .early_init = sdma_v5_2_early_init, ··· 1716 1690 .test_ib = sdma_v5_2_ring_test_ib, 1717 1691 .insert_nop = sdma_v5_2_ring_insert_nop, 1718 1692 .pad_ib = sdma_v5_2_ring_pad_ib, 1693 + .begin_use = sdma_v5_2_ring_begin_use, 1694 + .end_use = sdma_v5_2_ring_end_use, 1719 1695 .emit_wreg = sdma_v5_2_ring_emit_wreg, 1720 1696 .emit_reg_wait = sdma_v5_2_ring_emit_reg_wait, 1721 1697 .emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
+3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 5182 5182 if (plane->type == DRM_PLANE_TYPE_CURSOR) 5183 5183 return; 5184 5184 5185 + if (new_plane_state->rotation != DRM_MODE_ROTATE_0) 5186 + goto ffu; 5187 + 5185 5188 num_clips = drm_plane_get_damage_clips_count(new_plane_state); 5186 5189 clips = drm_plane_get_damage_clips(new_plane_state); 5187 5190
+1
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
··· 465 465 struct fixed31_32 v_scale_ratio; 466 466 enum dc_rotation_angle rotation; 467 467 bool mirror; 468 + struct dc_stream_state *stream; 468 469 }; 469 470 470 471 /* IPP related types */
+10 -2
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
··· 1077 1077 if (src_y_offset < 0) 1078 1078 src_y_offset = 0; 1079 1079 /* Save necessary cursor info x, y position. w, h is saved in attribute func. */ 1080 - hubp->cur_rect.x = src_x_offset + param->viewport.x; 1081 - hubp->cur_rect.y = src_y_offset + param->viewport.y; 1080 + if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && 1081 + param->rotation != ROTATION_ANGLE_0) { 1082 + hubp->cur_rect.x = 0; 1083 + hubp->cur_rect.y = 0; 1084 + hubp->cur_rect.w = param->stream->timing.h_addressable; 1085 + hubp->cur_rect.h = param->stream->timing.v_addressable; 1086 + } else { 1087 + hubp->cur_rect.x = src_x_offset + param->viewport.x; 1088 + hubp->cur_rect.y = src_y_offset + param->viewport.y; 1089 + } 1082 1090 } 1083 1091 1084 1092 void hubp2_clk_cntl(struct hubp *hubp, bool enable)
+9 -5
drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
··· 124 124 .phyclk_mhz = 600.0, 125 125 .phyclk_d18_mhz = 667.0, 126 126 .dscclk_mhz = 186.0, 127 - .dtbclk_mhz = 625.0, 127 + .dtbclk_mhz = 600.0, 128 128 }, 129 129 { 130 130 .state = 1, ··· 133 133 .phyclk_mhz = 810.0, 134 134 .phyclk_d18_mhz = 667.0, 135 135 .dscclk_mhz = 209.0, 136 - .dtbclk_mhz = 625.0, 136 + .dtbclk_mhz = 600.0, 137 137 }, 138 138 { 139 139 .state = 2, ··· 142 142 .phyclk_mhz = 810.0, 143 143 .phyclk_d18_mhz = 667.0, 144 144 .dscclk_mhz = 209.0, 145 - .dtbclk_mhz = 625.0, 145 + .dtbclk_mhz = 600.0, 146 146 }, 147 147 { 148 148 .state = 3, ··· 151 151 .phyclk_mhz = 810.0, 152 152 .phyclk_d18_mhz = 667.0, 153 153 .dscclk_mhz = 371.0, 154 - .dtbclk_mhz = 625.0, 154 + .dtbclk_mhz = 600.0, 155 155 }, 156 156 { 157 157 .state = 4, ··· 160 160 .phyclk_mhz = 810.0, 161 161 .phyclk_d18_mhz = 667.0, 162 162 .dscclk_mhz = 417.0, 163 - .dtbclk_mhz = 625.0, 163 + .dtbclk_mhz = 600.0, 164 164 }, 165 165 }, 166 166 .num_states = 5, ··· 348 348 clock_limits[i].socclk_mhz; 349 349 dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz = 350 350 clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio; 351 + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz = 352 + clock_limits[i].dtbclk_mhz; 351 353 dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels = 352 354 clk_table->num_entries; 353 355 dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels = ··· 361 359 dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels = 362 360 clk_table->num_entries; 363 361 dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels = 362 + clk_table->num_entries; 363 + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels = 364 364 clk_table->num_entries; 365 365 } 366 366 }
+8 -8
drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
··· 6329 6329 mode_lib->ms.NoOfDPPThisState, 6330 6330 mode_lib->ms.dpte_group_bytes, 6331 6331 s->HostVMInefficiencyFactor, 6332 - mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024, 6332 + mode_lib->ms.soc.hostvm_min_page_size_kbytes, 6333 6333 mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels); 6334 6334 6335 6335 s->NextMaxVStartup = s->MaxVStartupAllPlanes[j]; ··· 6542 6542 mode_lib->ms.cache_display_cfg.plane.HostVMEnable, 6543 6543 mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels, 6544 6544 mode_lib->ms.cache_display_cfg.plane.GPUVMEnable, 6545 - mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024, 6545 + mode_lib->ms.soc.hostvm_min_page_size_kbytes, 6546 6546 mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k], 6547 6547 mode_lib->ms.MetaRowBytes[j][k], 6548 6548 mode_lib->ms.DPTEBytesPerRow[j][k], ··· 7687 7687 CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels; 7688 7688 CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels; 7689 7689 CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes; 7690 - CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024; 7690 + CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes; 7691 7691 CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn; 7692 7692 CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode; 7693 7693 CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState; ··· 7957 7957 UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels; 7958 7958 UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable; 7959 7959 UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes; 7960 - UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024; 7960 + UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes; 7961 7961 UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels; 7962 7962 UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled; 7963 7963 UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal; ··· 8699 8699 CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels; 8700 8700 CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels; 8701 8701 CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes; 8702 - CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024; 8702 + CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes; 8703 8703 CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn; 8704 8704 CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode; 8705 8705 CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0]; ··· 8805 8805 mode_lib->ms.cache_display_cfg.hw.DPPPerSurface, 8806 8806 locals->dpte_group_bytes, 8807 8807 s->HostVMInefficiencyFactor, 8808 - mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024, 8808 + mode_lib->ms.soc.hostvm_min_page_size_kbytes, 8809 8809 mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels); 8810 8810 8811 8811 locals->TCalc = 24.0 / locals->DCFCLKDeepSleep; ··· 8995 8995 CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable; 8996 8996 CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable; 8997 8997 CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels; 8998 - CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024; 8998 + CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes; 8999 8999 CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k]; 9000 9000 CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled; 9001 9001 CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k]; ··· 9240 9240 mode_lib->ms.cache_display_cfg.plane.HostVMEnable, 9241 9241 mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels, 9242 9242 mode_lib->ms.cache_display_cfg.plane.GPUVMEnable, 9243 - mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024, 9243 + mode_lib->ms.soc.hostvm_min_page_size_kbytes, 9244 9244 locals->PDEAndMetaPTEBytesFrame[k], 9245 9245 locals->MetaRowByte[k], 9246 9246 locals->PixelPTEBytesPerRow[k],
+3 -2
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
··· 423 423 } 424 424 425 425 for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels; i++) { 426 - p->in_states->state_array[i].dtbclk_mhz = 427 - dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz; 426 + if (dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz > 0) 427 + p->in_states->state_array[i].dtbclk_mhz = 428 + dml2->config.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz; 428 429 } 429 430 430 431 for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels; i++) {
+2 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
··· 3417 3417 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz, 3418 3418 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert, 3419 3419 .rotation = pipe_ctx->plane_state->rotation, 3420 - .mirror = pipe_ctx->plane_state->horizontal_mirror 3420 + .mirror = pipe_ctx->plane_state->horizontal_mirror, 3421 + .stream = pipe_ctx->stream, 3421 3422 }; 3422 3423 bool pipe_split_on = false; 3423 3424 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
+2
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
··· 839 839 ((dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x08) || 840 840 (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x07))) 841 841 isPSRSUSupported = false; 842 + else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03) 843 + isPSRSUSupported = false; 842 844 else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1) 843 845 isPSRSUSupported = true; 844 846 }
+2 -2
drivers/gpu/drm/amd/pm/amdgpu_pm.c
··· 2198 2198 } else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) { 2199 2199 if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE) 2200 2200 *states = ATTR_STATE_UNSUPPORTED; 2201 - } else if (DEVICE_ATTR_IS(pp_dpm_mclk_od)) { 2201 + } else if (DEVICE_ATTR_IS(pp_mclk_od)) { 2202 2202 if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP) 2203 2203 *states = ATTR_STATE_UNSUPPORTED; 2204 - } else if (DEVICE_ATTR_IS(pp_dpm_sclk_od)) { 2204 + } else if (DEVICE_ATTR_IS(pp_sclk_od)) { 2205 2205 if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP) 2206 2206 *states = ATTR_STATE_UNSUPPORTED; 2207 2207 } else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
+1 -1
drivers/gpu/drm/drm_auth.c
··· 236 236 drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv) 237 237 { 238 238 if (file_priv->was_master && 239 - rcu_access_pointer(file_priv->pid) == task_pid(current)) 239 + rcu_access_pointer(file_priv->pid) == task_tgid(current)) 240 240 return 0; 241 241 242 242 if (!capable(CAP_SYS_ADMIN))
+4 -4
drivers/gpu/drm/drm_crtc.c
··· 715 715 struct drm_mode_set set; 716 716 uint32_t __user *set_connectors_ptr; 717 717 struct drm_modeset_acquire_ctx ctx; 718 - int ret; 719 - int i; 718 + int ret, i, num_connectors = 0; 720 719 721 720 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 722 721 return -EOPNOTSUPP; ··· 870 871 connector->name); 871 872 872 873 connector_set[i] = connector; 874 + num_connectors++; 873 875 } 874 876 } 875 877 ··· 879 879 set.y = crtc_req->y; 880 880 set.mode = mode; 881 881 set.connectors = connector_set; 882 - set.num_connectors = crtc_req->count_connectors; 882 + set.num_connectors = num_connectors; 883 883 set.fb = fb; 884 884 885 885 if (drm_drv_uses_atomic_modeset(dev)) ··· 892 892 drm_framebuffer_put(fb); 893 893 894 894 if (connector_set) { 895 - for (i = 0; i < crtc_req->count_connectors; i++) { 895 + for (i = 0; i < num_connectors; i++) { 896 896 if (connector_set[i]) 897 897 drm_connector_put(connector_set[i]); 898 898 }
+2 -1
drivers/gpu/drm/drm_edid.c
··· 2309 2309 2310 2310 override = drm_edid_override_get(connector); 2311 2311 if (override) { 2312 - num_modes = drm_edid_connector_update(connector, override); 2312 + if (drm_edid_connector_update(connector, override) == 0) 2313 + num_modes = drm_edid_connector_add_modes(connector); 2313 2314 2314 2315 drm_edid_free(override); 2315 2316
+16 -3
drivers/gpu/drm/i915/display/intel_fb.c
··· 1374 1374 struct drm_i915_private *i915 = to_i915(fb->base.dev); 1375 1375 unsigned int stride_tiles; 1376 1376 1377 - if (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) 1377 + if ((IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) && 1378 + src_stride_tiles < dst_stride_tiles) 1378 1379 stride_tiles = src_stride_tiles; 1379 1380 else 1380 1381 stride_tiles = dst_stride_tiles; ··· 1502 1501 1503 1502 size += remap_info->size; 1504 1503 } else { 1505 - unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane, 1506 - remap_info->width); 1504 + unsigned int dst_stride; 1505 + 1506 + /* 1507 + * The hardware automagically calculates the CCS AUX surface 1508 + * stride from the main surface stride so can't really remap a 1509 + * smaller subset (unless we'd remap in whole AUX page units). 1510 + */ 1511 + if (intel_fb_needs_pot_stride_remap(fb) && 1512 + intel_fb_is_ccs_modifier(fb->base.modifier)) 1513 + dst_stride = remap_info->src_stride; 1514 + else 1515 + dst_stride = remap_info->width; 1516 + 1517 + dst_stride = plane_view_dst_stride_tiles(fb, color_plane, dst_stride); 1507 1518 1508 1519 assign_chk_ovf(i915, remap_info->dst_stride, dst_stride); 1509 1520 color_plane_info->mapping_stride = dst_stride *
+1 -1
drivers/gpu/drm/i915/display/skl_scaler.c
··· 504 504 { 505 505 struct drm_plane *plane = NULL; 506 506 struct intel_plane *intel_plane; 507 - struct intel_plane_state *plane_state = NULL; 508 507 struct intel_crtc_scaler_state *scaler_state = 509 508 &crtc_state->scaler_state; 510 509 struct drm_atomic_state *drm_state = crtc_state->uapi.state; ··· 535 536 536 537 /* walkthrough scaler_users bits and start assigning scalers */ 537 538 for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { 539 + struct intel_plane_state *plane_state = NULL; 538 540 int *scaler_id; 539 541 const char *name; 540 542 int idx, ret;
+1 -1
drivers/gpu/drm/i915/gt/intel_reset.c
··· 1293 1293 if (msg) 1294 1294 drm_notice(&engine->i915->drm, 1295 1295 "Resetting %s for %s\n", engine->name, msg); 1296 - atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]); 1296 + i915_increase_reset_engine_count(&engine->i915->gpu_error, engine); 1297 1297 1298 1298 ret = intel_gt_reset_engine(engine); 1299 1299 if (ret) {
+3 -2
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
··· 5001 5001 if (match) { 5002 5002 intel_engine_set_hung_context(e, ce); 5003 5003 engine_mask |= e->mask; 5004 - atomic_inc(&i915->gpu_error.reset_engine_count[e->uabi_class]); 5004 + i915_increase_reset_engine_count(&i915->gpu_error, 5005 + e); 5005 5006 } 5006 5007 } 5007 5008 ··· 5014 5013 } else { 5015 5014 intel_engine_set_hung_context(ce->engine, ce); 5016 5015 engine_mask = ce->engine->mask; 5017 - atomic_inc(&i915->gpu_error.reset_engine_count[ce->engine->uabi_class]); 5016 + i915_increase_reset_engine_count(&i915->gpu_error, ce->engine); 5018 5017 } 5019 5018 5020 5019 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+10 -2
drivers/gpu/drm/i915/i915_gpu_error.h
··· 16 16 17 17 #include "display/intel_display_device.h" 18 18 #include "gt/intel_engine.h" 19 + #include "gt/intel_engine_types.h" 19 20 #include "gt/intel_gt_types.h" 20 21 #include "gt/uc/intel_uc_fw.h" 21 22 ··· 233 232 atomic_t reset_count; 234 233 235 234 /** Number of times an engine has been reset */ 236 - atomic_t reset_engine_count[I915_NUM_ENGINES]; 235 + atomic_t reset_engine_count[MAX_ENGINE_CLASS]; 237 236 }; 238 237 239 238 struct drm_i915_error_state_buf { ··· 256 255 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, 257 256 const struct intel_engine_cs *engine) 258 257 { 259 - return atomic_read(&error->reset_engine_count[engine->uabi_class]); 258 + return atomic_read(&error->reset_engine_count[engine->class]); 259 + } 260 + 261 + static inline void 262 + i915_increase_reset_engine_count(struct i915_gpu_error *error, 263 + const struct intel_engine_cs *engine) 264 + { 265 + atomic_inc(&error->reset_engine_count[engine->class]); 260 266 } 261 267 262 268 #define CORE_DUMP_FLAG_NONE 0x0
+5 -4
drivers/gpu/drm/i915/selftests/igt_live_test.c
··· 37 37 } 38 38 39 39 for_each_engine(engine, gt, id) 40 - t->reset_engine[id] = 41 - i915_reset_engine_count(&i915->gpu_error, engine); 40 + t->reset_engine[i][id] = 41 + i915_reset_engine_count(&i915->gpu_error, 42 + engine); 42 43 } 43 44 44 45 t->reset_global = i915_reset_count(&i915->gpu_error); ··· 67 66 68 67 for_each_gt(gt, i915, i) { 69 68 for_each_engine(engine, gt, id) { 70 - if (t->reset_engine[id] == 69 + if (t->reset_engine[i][id] == 71 70 i915_reset_engine_count(&i915->gpu_error, engine)) 72 71 continue; 73 72 74 73 gt_err(gt, "%s(%s): engine '%s' was reset %d times!\n", 75 74 t->func, t->name, engine->name, 76 75 i915_reset_engine_count(&i915->gpu_error, engine) - 77 - t->reset_engine[id]); 76 + t->reset_engine[i][id]); 78 77 return -EIO; 79 78 } 80 79 }
+2 -1
drivers/gpu/drm/i915/selftests/igt_live_test.h
··· 7 7 #ifndef IGT_LIVE_TEST_H 8 8 #define IGT_LIVE_TEST_H 9 9 10 + #include "gt/intel_gt_defines.h" /* for I915_MAX_GT */ 10 11 #include "gt/intel_engine.h" /* for I915_NUM_ENGINES */ 11 12 12 13 struct drm_i915_private; ··· 18 17 const char *name; 19 18 20 19 unsigned int reset_global; 21 - unsigned int reset_engine[I915_NUM_ENGINES]; 20 + unsigned int reset_engine[I915_MAX_GT][I915_NUM_ENGINES]; 22 21 }; 23 22 24 23 /*
+1 -1
drivers/gpu/drm/mediatek/mtk_disp_gamma.c
··· 203 203 /* Disable RELAY mode to pass the processed image */ 204 204 cfg_val &= ~GAMMA_RELAY_MODE; 205 205 206 - cfg_val = readl(gamma->regs + DISP_GAMMA_CFG); 206 + writel(cfg_val, gamma->regs + DISP_GAMMA_CFG); 207 207 } 208 208 209 209 void mtk_gamma_config(struct device *dev, unsigned int w,
+13 -1
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
··· 788 788 crtc); 789 789 struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state); 790 790 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 791 + unsigned long flags; 791 792 792 793 if (mtk_crtc->event && mtk_crtc_state->base.event) 793 794 DRM_ERROR("new event while there is still a pending event\n"); ··· 796 795 if (mtk_crtc_state->base.event) { 797 796 mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc); 798 797 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 798 + 799 + spin_lock_irqsave(&crtc->dev->event_lock, flags); 799 800 mtk_crtc->event = mtk_crtc_state->base.event; 801 + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 802 + 800 803 mtk_crtc_state->base.event = NULL; 801 804 } 802 805 } ··· 926 921 927 922 struct device *mtk_drm_crtc_dma_dev_get(struct drm_crtc *crtc) 928 923 { 929 - struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); 924 + struct mtk_drm_crtc *mtk_crtc = NULL; 925 + 926 + if (!crtc) 927 + return NULL; 928 + 929 + mtk_crtc = to_mtk_crtc(crtc); 930 + if (!mtk_crtc) 931 + return NULL; 930 932 931 933 return mtk_crtc->dma_dev; 932 934 }
+4 -1
drivers/gpu/drm/mediatek/mtk_drm_drv.c
··· 443 443 struct mtk_drm_private *private = drm->dev_private; 444 444 struct mtk_drm_private *priv_n; 445 445 struct device *dma_dev = NULL; 446 + struct drm_crtc *crtc; 446 447 int ret, i, j; 447 448 448 449 if (drm_firmware_drivers_only()) ··· 520 519 } 521 520 522 521 /* Use OVL device for all DMA memory allocations */ 523 - dma_dev = mtk_drm_crtc_dma_dev_get(drm_crtc_from_index(drm, 0)); 522 + crtc = drm_crtc_from_index(drm, 0); 523 + if (crtc) 524 + dma_dev = mtk_drm_crtc_dma_dev_get(crtc); 524 525 if (!dma_dev) { 525 526 ret = -ENODEV; 526 527 dev_err(drm->dev, "Need at least one OVL device\n");
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
··· 385 385 386 386 /* Ensure an ior is hooked up to this outp already */ 387 387 ior = outp->func->inherit(outp); 388 - if (!ior) 388 + if (!ior || !ior->arm.head) 389 389 return -ENODEV; 390 390 391 391 /* With iors, there will be a separate output path for each type of connector - and all of
+9 -9
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
··· 49 49 #include <subdev/mmu.h> 50 50 51 51 struct gk20a_instobj { 52 - struct nvkm_memory memory; 52 + struct nvkm_instobj base; 53 53 struct nvkm_mm_node *mn; 54 54 struct gk20a_instmem *imem; 55 55 56 56 /* CPU mapping */ 57 57 u32 *vaddr; 58 58 }; 59 - #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory) 59 + #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, base.memory) 60 60 61 61 /* 62 62 * Used for objects allocated using the DMA API ··· 148 148 list_del(&obj->vaddr_node); 149 149 vunmap(obj->base.vaddr); 150 150 obj->base.vaddr = NULL; 151 - imem->vaddr_use -= nvkm_memory_size(&obj->base.memory); 151 + imem->vaddr_use -= nvkm_memory_size(&obj->base.base.memory); 152 152 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use, 153 153 imem->vaddr_max); 154 154 } ··· 283 283 { 284 284 struct gk20a_instobj *node = gk20a_instobj(memory); 285 285 struct nvkm_vmm_map map = { 286 - .memory = &node->memory, 286 + .memory = &node->base.memory, 287 287 .offset = offset, 288 288 .mem = node->mn, 289 289 }; ··· 391 391 return -ENOMEM; 392 392 *_node = &node->base; 393 393 394 - nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory); 395 - node->base.memory.ptrs = &gk20a_instobj_ptrs; 394 + nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.base.memory); 395 + node->base.base.memory.ptrs = &gk20a_instobj_ptrs; 396 396 397 397 node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, 398 398 &node->handle, GFP_KERNEL, ··· 438 438 *_node = &node->base; 439 439 node->dma_addrs = (void *)(node->pages + npages); 440 440 441 - nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory); 442 - node->base.memory.ptrs = &gk20a_instobj_ptrs; 441 + nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.base.memory); 442 + node->base.base.memory.ptrs = &gk20a_instobj_ptrs; 443 443 444 444 /* Allocate backing memory */ 445 445 for (i = 0; i < npages; i++) { ··· 533 533 else 534 534 ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT, 535 535 align, &node); 536 - *pmemory = node ? &node->memory : NULL; 536 + *pmemory = node ? &node->base.memory : NULL; 537 537 if (ret) 538 538 return ret; 539 539
+1 -1
drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
··· 326 326 static const struct ltk050h3146w_desc ltk050h3148w_data = { 327 327 .mode = &ltk050h3148w_mode, 328 328 .init = ltk050h3148w_init_sequence, 329 - .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE, 329 + .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_BURST, 330 330 }; 331 331 332 332 static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)