Merge tag 'drm-fixes-2026-01-23' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
"Probably a good thing you decided to do an rc8 in this round. Nothing
stands out, but xe/amdgpu and mediatek all have a bunch of fixes, and
then there are a few other single patches. Hopefully next week is
calmer for release.

xe:
- Disallow bind-queue sharing across multiple VMs
- Fix xe userptr in the absence of CONFIG_DEVICE_PRIVATE
- Fix a missed page count update
- Fix a confused argument to alloc_workqueue()
- Kernel-doc fixes
- Disable a workaround on VFs
- Fix a job lock assert
- Update wedged.mode only after successful reset policy change
- Select CONFIG_DEVICE_PRIVATE when DRM_XE_GPUSVM is selected

amdgpu:
- fix color pipeline string leak
- GC 12 fix
- Misc error path fixes
- DC analog fix
- SMU 6 fixes
- TLB flush fix
- DC idle optimization fix

amdkfd:
- GC 11 cooperative launch fix

imagination:
- sync wait for logtype update completion to ensure FW trace
is available

bridge/synopsis:
- Fix error paths in dw_dp_bind

nouveau:
- Add and implement missing DSB connector types, and improve
unknown connector handling
- Set missing atomic function ops

intel:
- place 3D lut at correct place in pipeline
- fix color pipeline string leak

vkms:
- fix color pipeline string leak

mediatek:
- Fix platform_get_irq() error checking
- HDMI DDC v2 driver fixes
- dpi: Find next bridge during probe
- mtk_gem: Partial refactor and use drm_gem_dma_object
- dt-bindings: Fix typo 'hardwares' to 'hardware'"

* tag 'drm-fixes-2026-01-23' of https://gitlab.freedesktop.org/drm/kernel: (38 commits)
Revert "drm/amd/display: pause the workload setting in dm"
drm/xe: Select CONFIG_DEVICE_PRIVATE when DRM_XE_GPUSVM is selected
drm, drm/xe: Fix xe userptr in the absence of CONFIG_DEVICE_PRIVATE
drm/i915/display: Fix color pipeline enum name leak
drm/vkms: Fix color pipeline enum name leak
drm/amd/display: Fix color pipeline enum name leak
drm/i915/color: Place 3D LUT after CSC in plane color pipeline
drm/nouveau/disp: Set drm_mode_config_funcs.atomic_(check|commit)
drm/nouveau: implement missing DCB connector types; gracefully handle unknown connectors
drm/nouveau: add missing DCB connector types
drm/amdgpu: fix type for wptr in ring backup
drm/amdgpu: Fix validating flush_gpu_tlb_pasid()
drm/amd/pm: Workaround SI powertune issue on Radeon 430 (v2)
drm/amd/pm: Don't clear SI SMC table when setting power limit
drm/amd/pm: Fix si_dpm mmCG_THERMAL_INT setting
drm/xe: Update wedged.mode only after successful reset policy change
drm/xe/migrate: fix job lock assert
drm/xe/uapi: disallow bind queue sharing
drm/amd/display: Only poll analog connectors
drm/amdgpu: fix error handling in ib_schedule()
...

+561 -405
+1 -1
Documentation/devicetree/bindings/display/mediatek/mediatek,dp.yaml
··· 11 11 - Jitao shi <jitao.shi@mediatek.com> 12 12 13 13 description: | 14 - MediaTek DP and eDP are different hardwares and there are some features 14 + MediaTek DP and eDP are different hardware and there are some features 15 15 which are not supported for eDP. For example, audio is not supported for 16 16 eDP. Therefore, we need to use two different compatibles to describe them. 17 17 In addition, We just need to enable the power domain of DP, so the clock
+1 -1
drivers/gpu/drm/Kconfig
··· 210 210 211 211 config DRM_GPUSVM 212 212 tristate 213 - depends on DRM && DEVICE_PRIVATE 213 + depends on DRM 214 214 select HMM_MIRROR 215 215 select MMU_NOTIFIER 216 216 help
+3 -1
drivers/gpu/drm/Makefile
··· 108 108 obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o 109 109 110 110 drm_gpusvm_helper-y := \ 111 - drm_gpusvm.o\ 111 + drm_gpusvm.o 112 + drm_gpusvm_helper-$(CONFIG_ZONE_DEVICE) += \ 112 113 drm_pagemap.o 114 + 113 115 obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o 114 116 115 117 obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
··· 763 763 } 764 764 765 765 static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring, 766 - u64 start_wptr, u32 end_wptr) 766 + u64 start_wptr, u64 end_wptr) 767 767 { 768 768 unsigned int first_idx = start_wptr & ring->buf_mask; 769 769 unsigned int last_idx = end_wptr & ring->buf_mask;
+4 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 733 733 734 734 if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) { 735 735 736 - if (!adev->gmc.gmc_funcs->flush_gpu_tlb_pasid) 737 - return 0; 736 + if (!adev->gmc.gmc_funcs->flush_gpu_tlb_pasid) { 737 + r = 0; 738 + goto error_unlock_reset; 739 + } 738 740 739 741 if (adev->gmc.flush_tlb_needs_extra_type_2) 740 742 adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 302 302 if (job && job->vmid) 303 303 amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid); 304 304 amdgpu_ring_undo(ring); 305 - return r; 305 + goto free_fence; 306 306 } 307 307 *f = &af->base; 308 308 /* get a ref for the job */
+5 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 217 217 if (!entity) 218 218 return 0; 219 219 220 - return drm_sched_job_init(&(*job)->base, entity, 1, owner, 221 - drm_client_id); 220 + r = drm_sched_job_init(&(*job)->base, entity, 1, owner, drm_client_id); 221 + if (!r) 222 + return 0; 223 + 224 + kfree((*job)->hw_vm_fence); 222 225 223 226 err_fence: 224 227 kfree((*job)->hw_fence);
-12
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
··· 278 278 u32 sh_num, u32 instance, int xcc_id); 279 279 static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 280 280 281 - static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 282 281 static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 283 282 uint32_t val); 284 283 static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); ··· 4633 4634 return r; 4634 4635 } 4635 4636 4636 - static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, 4637 - bool start, 4638 - bool secure) 4639 - { 4640 - uint32_t v = secure ? FRAME_TMZ : 0; 4641 - 4642 - amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 4643 - amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); 4644 - } 4645 - 4646 4637 static void gfx_v12_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 4647 4638 uint32_t reg_val_offs) 4648 4639 { ··· 5509 5520 .emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl, 5510 5521 .init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec, 5511 5522 .preempt_ib = gfx_v12_0_ring_preempt_ib, 5512 - .emit_frame_cntl = gfx_v12_0_ring_emit_frame_cntl, 5513 5523 .emit_wreg = gfx_v12_0_ring_emit_wreg, 5514 5524 .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 5515 5525 .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
+1 -2
drivers/gpu/drm/amd/amdkfd/kfd_debug.h
··· 120 120 && dev->kfd->mec2_fw_version < 0x1b6) || 121 121 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) 122 122 && dev->kfd->mec2_fw_version < 0x30) || 123 - (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) && 124 - KFD_GC_VERSION(dev) < IP_VERSION(12, 0, 0))) 123 + kfd_dbg_has_cwsr_workaround(dev)) 125 124 return false; 126 125 127 126 /* Assume debugging and cooperative launch supported otherwise. */
+3 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
··· 79 79 goto cleanup; 80 80 81 81 list->type = ops[i]->base.id; 82 - list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id); 83 82 84 83 i++; 85 84 ··· 196 197 goto cleanup; 197 198 198 199 drm_colorop_set_next_property(ops[i-1], ops[i]); 200 + 201 + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[0]->base.id); 202 + 199 203 return 0; 200 204 201 205 cleanup:
-11
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
··· 248 248 struct vblank_control_work *vblank_work = 249 249 container_of(work, struct vblank_control_work, work); 250 250 struct amdgpu_display_manager *dm = vblank_work->dm; 251 - struct amdgpu_device *adev = drm_to_adev(dm->ddev); 252 - int r; 253 251 254 252 mutex_lock(&dm->dc_lock); 255 253 ··· 277 279 278 280 if (dm->active_vblank_irq_count == 0) { 279 281 dc_post_update_surfaces_to_stream(dm->dc); 280 - 281 - r = amdgpu_dpm_pause_power_profile(adev, true); 282 - if (r) 283 - dev_warn(adev->dev, "failed to set default power profile mode\n"); 284 - 285 282 dc_allow_idle_optimizations(dm->dc, true); 286 - 287 - r = amdgpu_dpm_pause_power_profile(adev, false); 288 - if (r) 289 - dev_warn(adev->dev, "failed to restore the power profile mode\n"); 290 283 } 291 284 292 285 mutex_unlock(&dm->dc_lock);
+8 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
··· 915 915 struct amdgpu_dm_connector *amdgpu_dm_connector; 916 916 const struct dc_link *dc_link; 917 917 918 - use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD; 919 - 920 918 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 921 919 continue; 922 920 923 921 amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 922 + 923 + /* 924 + * Analog connectors may be hot-plugged unlike other connector 925 + * types that don't support HPD. Only poll analog connectors. 926 + */ 927 + use_polling |= 928 + amdgpu_dm_connector->dc_link && 929 + dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id); 924 930 925 931 dc_link = amdgpu_dm_connector->dc_link; 926 932
+9 -4
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
··· 1790 1790 static int 1791 1791 dm_plane_init_colorops(struct drm_plane *plane) 1792 1792 { 1793 - struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES]; 1793 + struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES] = {}; 1794 1794 struct drm_device *dev = plane->dev; 1795 1795 struct amdgpu_device *adev = drm_to_adev(dev); 1796 1796 struct dc *dc = adev->dm.dc; 1797 1797 int len = 0; 1798 - int ret; 1798 + int ret = 0; 1799 + int i; 1799 1800 1800 1801 if (plane->type == DRM_PLANE_TYPE_CURSOR) 1801 1802 return 0; ··· 1807 1806 if (ret) { 1808 1807 drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n", 1809 1808 plane->base.id, ret); 1810 - return ret; 1809 + goto out; 1811 1810 } 1812 1811 len++; 1813 1812 ··· 1815 1814 drm_plane_create_color_pipeline_property(plane, pipelines, len); 1816 1815 } 1817 1816 1818 - return 0; 1817 + out: 1818 + for (i = 0; i < len; i++) 1819 + kfree(pipelines[i].name); 1820 + 1821 + return ret; 1819 1822 } 1820 1823 #endif 1821 1824
+16 -15
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
··· 2273 2273 if (scaling_factor == 0) 2274 2274 return -EINVAL; 2275 2275 2276 - memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); 2277 - 2278 2276 ret = si_calculate_adjusted_tdp_limits(adev, 2279 2277 false, /* ??? */ 2280 2278 adev->pm.dpm.tdp_adjustment, ··· 2280 2282 &near_tdp_limit); 2281 2283 if (ret) 2282 2284 return ret; 2285 + 2286 + if (adev->pdev->device == 0x6611 && adev->pdev->revision == 0x87) { 2287 + /* Workaround buggy powertune on Radeon 430 and 520. */ 2288 + tdp_limit = 32; 2289 + near_tdp_limit = 28; 2290 + } 2283 2291 2284 2292 smc_table->dpm2Params.TDPLimit = 2285 2293 cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000); ··· 2332 2328 2333 2329 if (ni_pi->enable_power_containment) { 2334 2330 SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; 2335 - u32 scaling_factor = si_get_smc_power_scaling_factor(adev); 2336 2331 int ret; 2337 - 2338 - memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); 2339 - 2340 - smc_table->dpm2Params.NearTDPLimit = 2341 - cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000); 2342 - smc_table->dpm2Params.SafePowerLimit = 2343 - cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); 2344 2332 2345 2333 ret = amdgpu_si_copy_bytes_to_smc(adev, 2346 2334 (si_pi->state_table_start + ··· 3469 3473 (adev->pdev->revision == 0x80) || 3470 3474 (adev->pdev->revision == 0x81) || 3471 3475 (adev->pdev->revision == 0x83) || 3472 - (adev->pdev->revision == 0x87) || 3476 + (adev->pdev->revision == 0x87 && 3477 + adev->pdev->device != 0x6611) || 3473 3478 (adev->pdev->device == 0x6604) || 3474 3479 (adev->pdev->device == 0x6605)) { 3475 3480 max_sclk = 75000; 3481 + } else if (adev->pdev->revision == 0x87 && 3482 + adev->pdev->device == 0x6611) { 3483 + /* Radeon 430 and 520 */ 3484 + max_sclk = 78000; 3476 3485 } 3477 3486 } 3478 3487 ··· 7601 7600 case AMDGPU_IRQ_STATE_DISABLE: 7602 7601 cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); 7603 7602 cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK; 7604 - WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); 7603 + WREG32(mmCG_THERMAL_INT, cg_thermal_int); 7605 7604 break; 7606 7605 case AMDGPU_IRQ_STATE_ENABLE: 7607 7606 cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); 7608 7607 cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK; 7609 - WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); 7608 + WREG32(mmCG_THERMAL_INT, cg_thermal_int); 7610 7609 break; 7611 7610 default: 7612 7611 break; ··· 7618 7617 case AMDGPU_IRQ_STATE_DISABLE: 7619 7618 cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); 7620 7619 cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK; 7621 - WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); 7620 + WREG32(mmCG_THERMAL_INT, cg_thermal_int); 7622 7621 break; 7623 7622 case AMDGPU_IRQ_STATE_ENABLE: 7624 7623 cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); 7625 7624 cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK; 7626 - WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); 7625 + WREG32(mmCG_THERMAL_INT, cg_thermal_int); 7627 7626 break; 7628 7627 default: 7629 7628 break;
+14 -6
drivers/gpu/drm/bridge/synopsys/dw-dp.c
··· 2062 2062 } 2063 2063 2064 2064 ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); 2065 - if (ret) 2065 + if (ret) { 2066 2066 dev_err_probe(dev, ret, "Failed to attach bridge\n"); 2067 + goto unregister_aux; 2068 + } 2067 2069 2068 2070 dw_dp_init_hw(dp); 2069 2071 2070 2072 ret = phy_init(dp->phy); 2071 2073 if (ret) { 2072 2074 dev_err_probe(dev, ret, "phy init failed\n"); 2073 - return ERR_PTR(ret); 2075 + goto unregister_aux; 2074 2076 } 2075 2077 2076 2078 ret = devm_add_action_or_reset(dev, dw_dp_phy_exit, dp); 2077 2079 if (ret) 2078 - return ERR_PTR(ret); 2080 + goto unregister_aux; 2079 2081 2080 2082 dp->irq = platform_get_irq(pdev, 0); 2081 - if (dp->irq < 0) 2082 - return ERR_PTR(ret); 2083 + if (dp->irq < 0) { 2084 + ret = dp->irq; 2085 + goto unregister_aux; 2086 + } 2083 2087 2084 2088 ret = devm_request_threaded_irq(dev, dp->irq, NULL, dw_dp_irq, 2085 2089 IRQF_ONESHOT, dev_name(dev), dp); 2086 2090 if (ret) { 2087 2091 dev_err_probe(dev, ret, "failed to request irq\n"); 2088 - return ERR_PTR(ret); 2092 + goto unregister_aux; 2089 2093 } 2090 2094 2091 2095 return dp; 2096 + 2097 + unregister_aux: 2098 + drm_dp_aux_unregister(&dp->aux); 2099 + return ERR_PTR(ret); 2092 2100 } 2093 2101 EXPORT_SYMBOL_GPL(dw_dp_bind); 2094 2102
+22 -14
drivers/gpu/drm/i915/display/intel_color_pipeline.c
··· 34 34 return ret; 35 35 36 36 list->type = colorop->base.base.id; 37 - list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", colorop->base.base.id); 38 37 39 38 /* TODO: handle failures and clean up */ 39 + prev_op = &colorop->base; 40 + 41 + colorop = intel_colorop_create(INTEL_PLANE_CB_CSC); 42 + ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane, 43 + DRM_COLOROP_FLAG_ALLOW_BYPASS); 44 + if (ret) 45 + return ret; 46 + 47 + drm_colorop_set_next_property(prev_op, &colorop->base); 40 48 prev_op = &colorop->base; 41 49 42 50 if (DISPLAY_VER(display) >= 35 && ··· 63 55 prev_op = &colorop->base; 64 56 } 65 57 66 - colorop = intel_colorop_create(INTEL_PLANE_CB_CSC); 67 - ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane, 68 - DRM_COLOROP_FLAG_ALLOW_BYPASS); 69 - if (ret) 70 - return ret; 71 - 72 - drm_colorop_set_next_property(prev_op, &colorop->base); 73 - prev_op = &colorop->base; 74 - 75 58 colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT); 76 59 ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, 77 60 PLANE_GAMMA_SIZE, ··· 73 74 74 75 drm_colorop_set_next_property(prev_op, &colorop->base); 75 76 77 + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", list->type); 78 + 76 79 return 0; 77 80 } 78 81 ··· 82 81 { 83 82 struct drm_device *dev = plane->dev; 84 83 struct intel_display *display = to_intel_display(dev); 85 - struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES]; 84 + struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES] = {}; 86 85 int len = 0; 87 - int ret; 86 + int ret = 0; 87 + int i; 88 88 89 89 /* Currently expose pipeline only for HDR planes */ 90 90 if (!icl_is_hdr_plane(display, to_intel_plane(plane)->id)) ··· 94 92 /* Add pipeline consisting of transfer functions */ 95 93 ret = _intel_color_pipeline_plane_init(plane, &pipelines[len], pipe); 96 94 if (ret) 97 - return ret; 95 + goto out; 98 96 len++; 99 97 100 - return drm_plane_create_color_pipeline_property(plane, pipelines, len); 98 + ret = drm_plane_create_color_pipeline_property(plane, pipelines, len); 99 + 100 + for (i = 0; i < len; i++) 101 + kfree(pipelines[i].name); 102 + 103 + out: 104 + return ret; 101 105 }
+7 -1
drivers/gpu/drm/imagination/pvr_fw_trace.c
··· 137 137 struct rogue_fwif_kccb_cmd cmd; 138 138 int idx; 139 139 int err; 140 + int slot; 140 141 141 142 if (group_mask) 142 143 fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_TRACE | group_mask; ··· 155 154 cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE; 156 155 cmd.kccb_flags = 0; 157 156 158 - err = pvr_kccb_send_cmd(pvr_dev, &cmd, NULL); 157 + err = pvr_kccb_send_cmd(pvr_dev, &cmd, &slot); 158 + if (err) 159 + goto err_drm_dev_exit; 159 160 161 + err = pvr_kccb_wait_for_completion(pvr_dev, slot, HZ, NULL); 162 + 163 + err_drm_dev_exit: 160 164 drm_dev_exit(idx); 161 165 162 166 err_up_read:
+1 -1
drivers/gpu/drm/mediatek/Kconfig
··· 8 8 depends on OF 9 9 depends on MTK_MMSYS 10 10 select DRM_CLIENT_SELECTION 11 - select DRM_GEM_DMA_HELPER if DRM_FBDEV_EMULATION 11 + select DRM_GEM_DMA_HELPER 12 12 select DRM_KMS_HELPER 13 13 select DRM_DISPLAY_HELPER 14 14 select DRM_BRIDGE_CONNECTOR
+9 -14
drivers/gpu/drm/mediatek/mtk_dpi.c
··· 836 836 enum drm_bridge_attach_flags flags) 837 837 { 838 838 struct mtk_dpi *dpi = bridge_to_dpi(bridge); 839 - int ret; 840 - 841 - dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 1, -1); 842 - if (IS_ERR(dpi->next_bridge)) { 843 - ret = PTR_ERR(dpi->next_bridge); 844 - if (ret == -EPROBE_DEFER) 845 - return ret; 846 - 847 - /* Old devicetree has only one endpoint */ 848 - dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 0, 0); 849 - if (IS_ERR(dpi->next_bridge)) 850 - return dev_err_probe(dpi->dev, PTR_ERR(dpi->next_bridge), 851 - "Failed to get bridge\n"); 852 - } 853 839 854 840 return drm_bridge_attach(encoder, dpi->next_bridge, 855 841 &dpi->bridge, flags); ··· 1304 1318 dpi->irq = platform_get_irq(pdev, 0); 1305 1319 if (dpi->irq < 0) 1306 1320 return dpi->irq; 1321 + 1322 + dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 1, -1); 1323 + if (IS_ERR(dpi->next_bridge) && PTR_ERR(dpi->next_bridge) == -ENODEV) { 1324 + /* Old devicetree has only one endpoint */ 1325 + dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 0, 0); 1326 + } 1327 + if (IS_ERR(dpi->next_bridge)) 1328 + return dev_err_probe(dpi->dev, PTR_ERR(dpi->next_bridge), 1329 + "Failed to get bridge\n"); 1307 1330 1308 1331 platform_set_drvdata(pdev, dpi); 1309 1332
+103 -161
drivers/gpu/drm/mediatek/mtk_gem.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 3 * Copyright (c) 2015 MediaTek Inc. 4 + * Copyright (c) 2025 Collabora Ltd. 5 + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> 4 6 */ 5 7 6 8 #include <linux/dma-buf.h> ··· 20 18 21 19 static int mtk_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 22 20 23 - static const struct vm_operations_struct vm_ops = { 24 - .open = drm_gem_vm_open, 25 - .close = drm_gem_vm_close, 26 - }; 21 + static void mtk_gem_free_object(struct drm_gem_object *obj) 22 + { 23 + struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj); 24 + struct mtk_drm_private *priv = obj->dev->dev_private; 25 + 26 + if (dma_obj->sgt) 27 + drm_prime_gem_destroy(obj, dma_obj->sgt); 28 + else 29 + dma_free_wc(priv->dma_dev, dma_obj->base.size, 30 + dma_obj->vaddr, dma_obj->dma_addr); 31 + 32 + /* release file pointer to gem object. */ 33 + drm_gem_object_release(obj); 34 + 35 + kfree(dma_obj); 36 + } 37 + 38 + /* 39 + * Allocate a sg_table for this GEM object. 40 + * Note: Both the table's contents, and the sg_table itself must be freed by 41 + * the caller. 42 + * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. 43 + */ 44 + static struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj) 45 + { 46 + struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj); 47 + struct mtk_drm_private *priv = obj->dev->dev_private; 48 + struct sg_table *sgt; 49 + int ret; 50 + 51 + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 52 + if (!sgt) 53 + return ERR_PTR(-ENOMEM); 54 + 55 + ret = dma_get_sgtable(priv->dma_dev, sgt, dma_obj->vaddr, 56 + dma_obj->dma_addr, obj->size); 57 + if (ret) { 58 + DRM_ERROR("failed to allocate sgt, %d\n", ret); 59 + kfree(sgt); 60 + return ERR_PTR(ret); 61 + } 62 + 63 + return sgt; 64 + } 27 65 28 66 static const struct drm_gem_object_funcs mtk_gem_object_funcs = { 29 67 .free = mtk_gem_free_object, 68 + .print_info = drm_gem_dma_object_print_info, 30 69 .get_sg_table = mtk_gem_prime_get_sg_table, 31 - .vmap = mtk_gem_prime_vmap, 32 - .vunmap = mtk_gem_prime_vunmap, 70 + .vmap = drm_gem_dma_object_vmap, 33 71 .mmap = mtk_gem_object_mmap, 34 - .vm_ops = &vm_ops, 72 + .vm_ops = &drm_gem_dma_vm_ops, 35 73 }; 36 74 37 - static struct mtk_gem_obj *mtk_gem_init(struct drm_device *dev, 38 - unsigned long size) 75 + static struct drm_gem_dma_object *mtk_gem_init(struct drm_device *dev, 76 + unsigned long size, bool private) 39 77 { 40 - struct mtk_gem_obj *mtk_gem_obj; 78 + struct drm_gem_dma_object *dma_obj; 41 79 int ret; 42 80 43 81 size = round_up(size, PAGE_SIZE); ··· 85 43 if (size == 0) 86 44 return ERR_PTR(-EINVAL); 87 45 88 - mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL); 89 - if (!mtk_gem_obj) 46 + dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL); 47 + if (!dma_obj) 90 48 return ERR_PTR(-ENOMEM); 91 49 92 - mtk_gem_obj->base.funcs = &mtk_gem_object_funcs; 50 + dma_obj->base.funcs = &mtk_gem_object_funcs; 93 51 94 - ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size); 95 - if (ret < 0) { 52 + if (private) { 53 + ret = 0; 54 + drm_gem_private_object_init(dev, &dma_obj->base, size); 55 + } else { 56 + ret = drm_gem_object_init(dev, &dma_obj->base, size); 57 + } 58 + if (ret) { 96 59 DRM_ERROR("failed to initialize gem object\n"); 97 - kfree(mtk_gem_obj); 60 + kfree(dma_obj); 98 61 return ERR_PTR(ret); 99 62 } 100 63 101 - return mtk_gem_obj; 64 + return dma_obj; 102 65 } 103 66 104 - struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, 105 - size_t size, bool alloc_kmap) 67 + static struct drm_gem_dma_object *mtk_gem_create(struct drm_device *dev, size_t size) 106 68 { 107 69 struct mtk_drm_private *priv = dev->dev_private; 108 - struct mtk_gem_obj *mtk_gem; 70 + struct drm_gem_dma_object *dma_obj; 109 71 struct drm_gem_object *obj; 110 72 int ret; 111 73 112 - mtk_gem = mtk_gem_init(dev, size); 113 - if (IS_ERR(mtk_gem)) 114 - return ERR_CAST(mtk_gem); 74 + dma_obj = mtk_gem_init(dev, size, false); 75 + if (IS_ERR(dma_obj)) 76 + return ERR_CAST(dma_obj); 115 77 116 - obj = &mtk_gem->base; 78 + obj = &dma_obj->base; 117 79 118 - mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE; 119 - 120 - if (!alloc_kmap) 121 - mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 122 - 123 - mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size, 124 - &mtk_gem->dma_addr, GFP_KERNEL, 125 - mtk_gem->dma_attrs); 126 - if (!mtk_gem->cookie) { 80 + dma_obj->vaddr = dma_alloc_wc(priv->dma_dev, obj->size, 81 + &dma_obj->dma_addr, 82 + GFP_KERNEL | __GFP_NOWARN); 83 + if (!dma_obj->vaddr) { 127 84 DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size); 128 85 ret = -ENOMEM; 129 86 goto err_gem_free; 130 87 } 131 88 132 - if (alloc_kmap) 133 - mtk_gem->kvaddr = mtk_gem->cookie; 134 - 135 - DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n", 136 - mtk_gem->cookie, &mtk_gem->dma_addr, 89 + DRM_DEBUG_DRIVER("vaddr = %p dma_addr = %pad size = %zu\n", 90 + dma_obj->vaddr, &dma_obj->dma_addr, 137 91 size); 138 92 139 - return mtk_gem; 93 + return dma_obj; 140 94 141 95 err_gem_free: 142 96 drm_gem_object_release(obj); 143 - kfree(mtk_gem); 97 + kfree(dma_obj); 144 98 return ERR_PTR(ret); 145 - } 146 - 147 - void mtk_gem_free_object(struct drm_gem_object *obj) 148 - { 149 - struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 150 - struct mtk_drm_private *priv = obj->dev->dev_private; 151 - 152 - if (mtk_gem->sg) 153 - drm_prime_gem_destroy(obj, mtk_gem->sg); 154 - else 155 - dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie, 156 - mtk_gem->dma_addr, mtk_gem->dma_attrs); 157 - 158 - /* release file pointer to gem object. */ 159 - drm_gem_object_release(obj); 160 - 161 - kfree(mtk_gem); 162 99 } 163 100 164 101 int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, 165 102 struct drm_mode_create_dumb *args) 166 103 { 167 - struct mtk_gem_obj *mtk_gem; 104 + struct drm_gem_dma_object *dma_obj; 168 105 int ret; 169 106 170 107 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); ··· 156 135 args->size = args->pitch; 157 136 args->size *= args->height; 158 137 159 - mtk_gem = mtk_gem_create(dev, args->size, false); 160 - if (IS_ERR(mtk_gem)) 161 - return PTR_ERR(mtk_gem); 138 + dma_obj = mtk_gem_create(dev, args->size); 139 + if (IS_ERR(dma_obj)) 140 + return PTR_ERR(dma_obj); 162 141 163 142 /* 164 143 * allocate a id of idr table where the obj is registered 165 144 * and handle has the id what user can see. 166 145 */ 167 - ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle); 146 + ret = drm_gem_handle_create(file_priv, &dma_obj->base, &args->handle); 168 147 if (ret) 169 148 goto err_handle_create; 170 149 171 150 /* drop reference from allocate - handle holds it now. */ 172 - drm_gem_object_put(&mtk_gem->base); 151 + drm_gem_object_put(&dma_obj->base); 173 152 174 153 return 0; 175 154 176 155 err_handle_create: 177 - mtk_gem_free_object(&mtk_gem->base); 156 + mtk_gem_free_object(&dma_obj->base); 178 157 return ret; 179 158 } 180 159 ··· 182 161 struct vm_area_struct *vma) 183 162 184 163 { 185 - int ret; 186 - struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 164 + struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj); 187 165 struct mtk_drm_private *priv = obj->dev->dev_private; 166 + int ret; 188 167 189 168 /* 190 169 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the 191 170 * whole buffer from the start. 192 171 */ 193 - vma->vm_pgoff = 0; 172 + vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); 194 173 195 174 /* 196 175 * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear 197 176 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). 198 177 */ 199 - vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); 178 + vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP); 179 + 200 180 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 201 181 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 202 182 203 - ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie, 204 - mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs); 183 + ret = dma_mmap_wc(priv->dma_dev, vma, dma_obj->vaddr, 184 + dma_obj->dma_addr, obj->size); 185 + if (ret) 186 + drm_gem_vm_close(vma); 205 187 206 188 return ret; 207 189 } 208 190 209 - /* 210 - * Allocate a sg_table for this GEM object. 211 - * Note: Both the table's contents, and the sg_table itself must be freed by 212 - * the caller. 213 - * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. 214 - */ 215 - struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj) 216 - { 217 - struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 218 - struct mtk_drm_private *priv = obj->dev->dev_private; 219 - struct sg_table *sgt; 220 - int ret; 221 - 222 - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 223 - if (!sgt) 224 - return ERR_PTR(-ENOMEM); 225 - 226 - ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie, 227 - mtk_gem->dma_addr, obj->size, 228 - mtk_gem->dma_attrs); 229 - if (ret) { 230 - DRM_ERROR("failed to allocate sgt, %d\n", ret); 231 - kfree(sgt); 232 - return ERR_PTR(ret); 233 - } 234 - 235 - return sgt; 236 - } 237 - 238 191 struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, 239 - struct dma_buf_attachment *attach, struct sg_table *sg) 192 + struct dma_buf_attachment *attach, struct sg_table *sgt) 240 193 { 241 - struct mtk_gem_obj *mtk_gem; 194 + struct drm_gem_dma_object *dma_obj; 242 195 243 196 /* check if the entries in the sg_table are contiguous */ 244 - if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { 197 + if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) { 245 198 DRM_ERROR("sg_table is not contiguous"); 246 199 return ERR_PTR(-EINVAL); 247 200 } 248 201 249 - mtk_gem = mtk_gem_init(dev, attach->dmabuf->size); 250 - if (IS_ERR(mtk_gem)) 251 - return ERR_CAST(mtk_gem); 202 + dma_obj = mtk_gem_init(dev, attach->dmabuf->size, true); 203 + if (IS_ERR(dma_obj)) 204 + return ERR_CAST(dma_obj); 252 205 253 - mtk_gem->dma_addr = sg_dma_address(sg->sgl); 254 - mtk_gem->sg = sg; 206 + dma_obj->dma_addr = sg_dma_address(sgt->sgl); 207 + dma_obj->sgt = sgt; 255 208 256 - return &mtk_gem->base; 257 - } 258 - 259 - int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) 260 - { 261 - struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 262 - struct sg_table *sgt = NULL; 263 - unsigned int npages; 264 - 265 - if (mtk_gem->kvaddr) 266 - goto out; 267 - 268 - sgt = mtk_gem_prime_get_sg_table(obj); 269 - if (IS_ERR(sgt)) 270 - return PTR_ERR(sgt); 271 - 272 - npages = obj->size >> PAGE_SHIFT; 273 - mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); 274 - if (!mtk_gem->pages) { 275 - sg_free_table(sgt); 276 - kfree(sgt); 277 - return -ENOMEM; 278 - } 279 - 280 - drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages); 281 - 282 - mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, 283 - pgprot_writecombine(PAGE_KERNEL)); 284 - if (!mtk_gem->kvaddr) { 285 - sg_free_table(sgt); 286 - kfree(sgt); 287 - kfree(mtk_gem->pages); 288 - return -ENOMEM; 289 - } 290 - sg_free_table(sgt); 291 - kfree(sgt); 292 - 293 - out: 294 - iosys_map_set_vaddr(map, mtk_gem->kvaddr); 295 - 296 - return 0; 297 - } 298 - 299 - void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 300 - { 301 - struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 302 - void *vaddr = map->vaddr; 303 - 304 - if (!mtk_gem->pages) 305 - return; 306 - 307 - vunmap(vaddr); 308 - mtk_gem->kvaddr = NULL; 309 - kfree(mtk_gem->pages); 209 + return &dma_obj->base; 310 210 }
+1 -32
drivers/gpu/drm/mediatek/mtk_gem.h
··· 7 7 #define _MTK_GEM_H_ 8 8 9 9 #include <drm/drm_gem.h> 10 + #include <drm/drm_gem_dma_helper.h> 10 11 11 - /* 12 - * mtk drm buffer structure. 13 - * 14 - * @base: a gem object. 15 - * - a new handle to this gem object would be created 16 - * by drm_gem_handle_create(). 17 - * @cookie: the return value of dma_alloc_attrs(), keep it for dma_free_attrs() 18 - * @kvaddr: kernel virtual address of gem buffer. 19 - * @dma_addr: dma address of gem buffer. 20 - * @dma_attrs: dma attributes of gem buffer. 21 - * 22 - * P.S. this object would be transferred to user as kms_bo.handle so 23 - * user can access the buffer through kms_bo.handle. 24 - */ 25 - struct mtk_gem_obj { 26 - struct drm_gem_object base; 27 - void *cookie; 28 - void *kvaddr; 29 - dma_addr_t dma_addr; 30 - unsigned long dma_attrs; 31 - struct sg_table *sg; 32 - struct page **pages; 33 - }; 34 - 35 - #define to_mtk_gem_obj(x) container_of(x, struct mtk_gem_obj, base) 36 - 37 - void mtk_gem_free_object(struct drm_gem_object *gem); 38 - struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, size_t size, 39 - bool alloc_kmap); 40 12 int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, 41 13 struct drm_mode_create_dumb *args); 42 - struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); 43 14 struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, 44 15 struct dma_buf_attachment *attach, struct sg_table *sg); 45 - int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); 46 - void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); 47 16 48 17 #endif
+1 -1
drivers/gpu/drm/mediatek/mtk_hdmi_common.c
··· 303 303 return dev_err_probe(dev, ret, "Failed to get clocks\n"); 304 304 305 305 hdmi->irq = platform_get_irq(pdev, 0); 306 - if (!hdmi->irq) 306 + if (hdmi->irq < 0) 307 307 return hdmi->irq; 308 308 309 309 hdmi->regs = device_node_to_regmap(dev->of_node);
+1 -1
drivers/gpu/drm/mediatek/mtk_hdmi_common.h
··· 168 168 bool audio_enable; 169 169 bool powered; 170 170 bool enabled; 171 - unsigned int irq; 171 + int irq; 172 172 enum hdmi_hpd_state hpd; 173 173 hdmi_codec_plugged_cb plugged_cb; 174 174 struct device *codec_dev;
+33 -25
drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c
··· 66 66 return 0; 67 67 } 68 68 69 - static int mtk_ddc_wr_one(struct mtk_hdmi_ddc *ddc, u16 addr_id, 70 - u16 offset_id, u8 *wr_data) 69 + static int mtk_ddcm_write_hdmi(struct mtk_hdmi_ddc *ddc, u16 addr_id, 70 + u16 offset_id, u16 data_cnt, u8 *wr_data) 71 71 { 72 72 u32 val; 73 - int ret; 73 + int ret, i; 74 + 75 + /* Don't allow transfer with a size over than the transfer fifo size 76 + * (16 byte) 77 + */ 78 + if (data_cnt > 16) { 79 + dev_err(ddc->dev, "Invalid DDCM write request\n"); 80 + return -EINVAL; 81 + } 74 82 75 83 /* If down, rise bus for write operation */ 76 84 mtk_ddc_check_and_rise_low_bus(ddc); ··· 86 78 regmap_update_bits(ddc->regs, HPD_DDC_CTRL, HPD_DDC_DELAY_CNT, 87 79 FIELD_PREP(HPD_DDC_DELAY_CNT, DDC2_DLY_CNT)); 88 80 81 + /* In case there is no payload data, just do a single write for the 82 + * address only 83 + */ 89 84 if (wr_data) { 90 - regmap_write(ddc->regs, SI2C_CTRL, 91 - FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) | 92 - FIELD_PREP(SI2C_WDATA, *wr_data) | 93 - SI2C_WR); 85 + /* Fill transfer fifo with payload data */ 86 + for (i = 0; i < data_cnt; i++) { 87 + regmap_write(ddc->regs, SI2C_CTRL, 88 + FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) | 89 + FIELD_PREP(SI2C_WDATA, wr_data[i]) | 90 + SI2C_WR); 91 + } 94 92 } 95 - 96 93 regmap_write(ddc->regs, DDC_CTRL, 97 94 FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_SEQ_WRITE) | 98 - FIELD_PREP(DDC_CTRL_DIN_CNT, wr_data == NULL ? 0 : 1) | 95 + FIELD_PREP(DDC_CTRL_DIN_CNT, wr_data == NULL ? 0 : data_cnt) | 99 96 FIELD_PREP(DDC_CTRL_OFFSET, offset_id) | 100 97 FIELD_PREP(DDC_CTRL_ADDR, addr_id)); 101 98 usleep_range(1000, 1250); ··· 109 96 !(val & DDC_I2C_IN_PROG), 500, 1000); 110 97 if (ret) { 111 98 dev_err(ddc->dev, "DDC I2C write timeout\n"); 99 + 100 + /* Abort transfer if it is still in progress */ 101 + regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD, 102 + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_ABORT_XFER)); 103 + 112 104 return ret; 113 105 } 114 106 ··· 197 179 500 * (temp_length + 5)); 198 180 if (ret) { 199 181 dev_err(ddc->dev, "Timeout waiting for DDC I2C\n"); 182 + 183 + /* Abort transfer if it is still in progress */ 184 + regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD, 185 + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_ABORT_XFER)); 186 + 200 187 return ret; 201 188 } 202 189 ··· 273 250 static int mtk_hdmi_ddc_fg_data_write(struct mtk_hdmi_ddc *ddc, u16 b_dev, 274 251 u8 data_addr, u16 data_cnt, u8 *pr_data) 275 252 { 276 - int i, ret; 277 - 278 253 regmap_set_bits(ddc->regs, HDCP2X_POL_CTRL, HDCP2X_DIS_POLL_EN); 279 - /* 280 - * In case there is no payload data, just do a single write for the 281 - * address only 282 - */ 283 - if (data_cnt == 0) 284 - return mtk_ddc_wr_one(ddc, b_dev, data_addr, NULL); 285 254 286 - i = 0; 287 - do { 288 - ret = mtk_ddc_wr_one(ddc, b_dev, data_addr + i, pr_data + i); 289 - if (ret) 290 - return ret; 291 - } while (++i < data_cnt); 292 - 293 - return 0; 255 + return mtk_ddcm_write_hdmi(ddc, b_dev, data_addr, data_cnt, pr_data); 294 256 } 295 257 296 258 static int mtk_hdmi_ddc_v2_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
+4 -3
drivers/gpu/drm/mediatek/mtk_hdmi_v2.c
··· 1120 1120 mtk_hdmi_v2_disable(hdmi); 1121 1121 } 1122 1122 1123 - static int mtk_hdmi_v2_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge, 1124 - const struct drm_display_mode *mode, 1125 - unsigned long long tmds_rate) 1123 + static enum drm_mode_status 1124 + mtk_hdmi_v2_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge, 1125 + const struct drm_display_mode *mode, 1126 + unsigned long long tmds_rate) 1126 1127 { 1127 1128 if (mode->clock < MTK_HDMI_V2_CLOCK_MIN) 1128 1129 return MODE_CLOCK_LOW;
+4 -4
drivers/gpu/drm/mediatek/mtk_plane.c
··· 11 11 #include <drm/drm_fourcc.h> 12 12 #include <drm/drm_framebuffer.h> 13 13 #include <drm/drm_gem_atomic_helper.h> 14 + #include <drm/drm_gem_dma_helper.h> 14 15 #include <drm/drm_print.h> 15 16 #include <linux/align.h> 16 17 17 18 #include "mtk_crtc.h" 18 19 #include "mtk_ddp_comp.h" 19 20 #include "mtk_drm_drv.h" 20 - #include "mtk_gem.h" 21 21 #include "mtk_plane.h" 22 22 23 23 static const u64 modifiers[] = { ··· 114 114 struct mtk_plane_state *mtk_plane_state) 115 115 { 116 116 struct drm_framebuffer *fb = new_state->fb; 117 + struct drm_gem_dma_object *dma_obj; 117 118 struct drm_gem_object *gem; 118 - struct mtk_gem_obj *mtk_gem; 119 119 unsigned int pitch, format; 120 120 u64 modifier; 121 121 dma_addr_t addr; ··· 124 124 int offset; 125 125 126 126 gem = fb->obj[0]; 127 - mtk_gem = to_mtk_gem_obj(gem); 128 - addr = mtk_gem->dma_addr; 127 + dma_obj = to_drm_gem_dma_obj(gem); 128 + addr = dma_obj->dma_addr; 129 129 pitch = fb->pitches[0]; 130 130 format = fb->format->format; 131 131 modifier = fb->modifier;
+74 -21
drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
··· 1 1 /* SPDX-License-Identifier: MIT */ 2 2 #ifndef __NVBIOS_CONN_H__ 3 3 #define __NVBIOS_CONN_H__ 4 + 5 + /* 6 + * An enumerator representing all of the possible VBIOS connector types defined 7 + * by Nvidia at 8 + * https://nvidia.github.io/open-gpu-doc/DCB/DCB-4.x-Specification.html. 9 + * 10 + * [1] Nvidia's documentation actually claims DCB_CONNECTOR_HDMI_0 is a "3-Pin 11 + * DIN Stereo Connector". This seems very likely to be a documentation typo 12 + * or some sort of funny historical baggage, because we've treated this 13 + * connector type as HDMI for years without issue. 14 + * TODO: Check with Nvidia what's actually happening here. 15 + */ 4 16 enum dcb_connector_type { 5 - DCB_CONNECTOR_VGA = 0x00, 6 - DCB_CONNECTOR_TV_0 = 0x10, 7 - DCB_CONNECTOR_TV_1 = 0x11, 8 - DCB_CONNECTOR_TV_3 = 0x13, 9 - DCB_CONNECTOR_DVI_I = 0x30, 10 - DCB_CONNECTOR_DVI_D = 0x31, 11 - DCB_CONNECTOR_DMS59_0 = 0x38, 12 - DCB_CONNECTOR_DMS59_1 = 0x39, 13 - DCB_CONNECTOR_LVDS = 0x40, 14 - DCB_CONNECTOR_LVDS_SPWG = 0x41, 15 - DCB_CONNECTOR_DP = 0x46, 16 - DCB_CONNECTOR_eDP = 0x47, 17 - DCB_CONNECTOR_mDP = 0x48, 18 - DCB_CONNECTOR_HDMI_0 = 0x60, 19 - DCB_CONNECTOR_HDMI_1 = 0x61, 20 - DCB_CONNECTOR_HDMI_C = 0x63, 21 - DCB_CONNECTOR_DMS59_DP0 = 0x64, 22 - DCB_CONNECTOR_DMS59_DP1 = 0x65, 23 - DCB_CONNECTOR_WFD = 0x70, 24 - DCB_CONNECTOR_USB_C = 0x71, 25 - DCB_CONNECTOR_NONE = 0xff 17 + /* Analog outputs */ 18 + DCB_CONNECTOR_VGA = 0x00, // VGA 15-pin connector 19 + DCB_CONNECTOR_DVI_A = 0x01, // DVI-A 20 + DCB_CONNECTOR_POD_VGA = 0x02, // Pod - VGA 15-pin connector 21 + DCB_CONNECTOR_TV_0 = 0x10, // TV - Composite Out 22 + DCB_CONNECTOR_TV_1 = 0x11, // TV - S-Video Out 23 + DCB_CONNECTOR_TV_2 = 0x12, // TV - S-Video Breakout - Composite 24 + DCB_CONNECTOR_TV_3 = 0x13, // HDTV Component - YPrPb 25 + DCB_CONNECTOR_TV_SCART = 0x14, // TV - SCART Connector 26 + DCB_CONNECTOR_TV_SCART_D = 0x16, // TV - Composite SCART over D-connector 27 + DCB_CONNECTOR_TV_DTERM = 0x17, // HDTV - D-connector (EIAJ4120) 28 + DCB_CONNECTOR_POD_TV_3 = 0x18, // Pod - HDTV - YPrPb 29 + DCB_CONNECTOR_POD_TV_1 = 0x19, // Pod - S-Video 30 + DCB_CONNECTOR_POD_TV_0 = 0x1a, // Pod - Composite 31 + 32 + /* DVI digital outputs */ 33 + DCB_CONNECTOR_DVI_I_TV_1 = 0x20, // DVI-I-TV-S-Video 34 + DCB_CONNECTOR_DVI_I_TV_0 = 0x21, // DVI-I-TV-Composite 35 + DCB_CONNECTOR_DVI_I_TV_2 = 0x22, // DVI-I-TV-S-Video Breakout-Composite 36 + DCB_CONNECTOR_DVI_I = 0x30, // DVI-I 37 + DCB_CONNECTOR_DVI_D = 0x31, // DVI-D 38 + DCB_CONNECTOR_DVI_ADC = 0x32, // Apple Display Connector (ADC) 39 + DCB_CONNECTOR_DMS59_0 = 0x38, // LFH-DVI-I-1 40 + DCB_CONNECTOR_DMS59_1 = 0x39, // LFH-DVI-I-2 41 + DCB_CONNECTOR_BNC = 0x3c, // BNC Connector [for SDI?] 42 + 43 + /* LVDS / TMDS digital outputs */ 44 + DCB_CONNECTOR_LVDS = 0x40, // LVDS-SPWG-Attached [is this name correct?] 45 + DCB_CONNECTOR_LVDS_SPWG = 0x41, // LVDS-OEM-Attached (non-removable) 46 + DCB_CONNECTOR_LVDS_REM = 0x42, // LVDS-SPWG-Detached [following naming above] 47 + DCB_CONNECTOR_LVDS_SPWG_REM = 0x43, // LVDS-OEM-Detached (removable) 48 + DCB_CONNECTOR_TMDS = 0x45, // TMDS-OEM-Attached (non-removable) 49 + 50 + /* DP digital outputs */ 51 + DCB_CONNECTOR_DP = 0x46, // DisplayPort External Connector 52 + DCB_CONNECTOR_eDP = 0x47, // DisplayPort Internal Connector 53 + DCB_CONNECTOR_mDP = 0x48, // DisplayPort (Mini) External Connector 54 + 55 + /* Dock outputs (not used) */ 56 + DCB_CONNECTOR_DOCK_VGA_0 = 0x50, // VGA 15-pin if not docked 57 + DCB_CONNECTOR_DOCK_VGA_1 = 0x51, // VGA 15-pin if docked 58 + DCB_CONNECTOR_DOCK_DVI_I_0 = 0x52, // DVI-I if not docked 59 + DCB_CONNECTOR_DOCK_DVI_I_1 = 0x53, // DVI-I if docked 60 + DCB_CONNECTOR_DOCK_DVI_D_0 = 0x54, // DVI-D if not docked 61 + DCB_CONNECTOR_DOCK_DVI_D_1 = 0x55, // DVI-D if docked 62 + DCB_CONNECTOR_DOCK_DP_0 = 0x56, // DisplayPort if not docked 63 + DCB_CONNECTOR_DOCK_DP_1 = 0x57, // DisplayPort if docked 64 + DCB_CONNECTOR_DOCK_mDP_0 = 0x58, // DisplayPort (Mini) if not docked 65 + DCB_CONNECTOR_DOCK_mDP_1 = 0x59, // DisplayPort (Mini) if docked 66 + 67 + /* HDMI? digital outputs */ 68 + DCB_CONNECTOR_HDMI_0 = 0x60, // HDMI? See [1] in top-level enum comment above 69 + DCB_CONNECTOR_HDMI_1 = 0x61, // HDMI-A connector 70 + DCB_CONNECTOR_SPDIF = 0x62, // Audio S/PDIF connector 71 + DCB_CONNECTOR_HDMI_C = 0x63, // HDMI-C (Mini) connector 72 + 73 + /* Misc. digital outputs */ 74 + DCB_CONNECTOR_DMS59_DP0 = 0x64, // LFH-DP-1 75 + DCB_CONNECTOR_DMS59_DP1 = 0x65, // LFH-DP-2 76 + DCB_CONNECTOR_WFD = 0x70, // Virtual connector for Wifi Display (WFD) 77 + DCB_CONNECTOR_USB_C = 0x71, // [DP over USB-C; not present in docs] 78 + DCB_CONNECTOR_NONE = 0xff // Skip Entry 26 79 }; 27 80 28 81 struct nvbios_connT {
+2
drivers/gpu/drm/nouveau/nouveau_display.c
··· 352 352 353 353 static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 354 354 .fb_create = nouveau_user_framebuffer_create, 355 + .atomic_commit = drm_atomic_helper_commit, 356 + .atomic_check = drm_atomic_helper_check, 355 357 }; 356 358 357 359
+53 -20
drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
··· 191 191 spin_lock(&disp->client.lock); 192 192 if (!conn->object.func) { 193 193 switch (conn->info.type) { 194 - case DCB_CONNECTOR_VGA : args->v0.type = NVIF_CONN_V0_VGA; break; 195 - case DCB_CONNECTOR_TV_0 : 196 - case DCB_CONNECTOR_TV_1 : 197 - case DCB_CONNECTOR_TV_3 : args->v0.type = NVIF_CONN_V0_TV; break; 198 - case DCB_CONNECTOR_DMS59_0 : 199 - case DCB_CONNECTOR_DMS59_1 : 200 - case DCB_CONNECTOR_DVI_I : args->v0.type = NVIF_CONN_V0_DVI_I; break; 201 - case DCB_CONNECTOR_DVI_D : args->v0.type = NVIF_CONN_V0_DVI_D; break; 202 - case DCB_CONNECTOR_LVDS : args->v0.type = NVIF_CONN_V0_LVDS; break; 203 - case DCB_CONNECTOR_LVDS_SPWG: args->v0.type = NVIF_CONN_V0_LVDS_SPWG; break; 204 - case DCB_CONNECTOR_DMS59_DP0: 205 - case DCB_CONNECTOR_DMS59_DP1: 206 - case DCB_CONNECTOR_DP : 207 - case DCB_CONNECTOR_mDP : 208 - case DCB_CONNECTOR_USB_C : args->v0.type = NVIF_CONN_V0_DP; break; 209 - case DCB_CONNECTOR_eDP : args->v0.type = NVIF_CONN_V0_EDP; break; 210 - case DCB_CONNECTOR_HDMI_0 : 211 - case DCB_CONNECTOR_HDMI_1 : 212 - case DCB_CONNECTOR_HDMI_C : args->v0.type = NVIF_CONN_V0_HDMI; break; 194 + /* VGA */ 195 + case DCB_CONNECTOR_DVI_A : 196 + case DCB_CONNECTOR_POD_VGA : 197 + case DCB_CONNECTOR_VGA : args->v0.type = NVIF_CONN_V0_VGA; break; 198 + 199 + /* TV */ 200 + case DCB_CONNECTOR_TV_0 : 201 + case DCB_CONNECTOR_TV_1 : 202 + case DCB_CONNECTOR_TV_2 : 203 + case DCB_CONNECTOR_TV_SCART : 204 + case DCB_CONNECTOR_TV_SCART_D : 205 + case DCB_CONNECTOR_TV_DTERM : 206 + case DCB_CONNECTOR_POD_TV_3 : 207 + case DCB_CONNECTOR_POD_TV_1 : 208 + case DCB_CONNECTOR_POD_TV_0 : 209 + case DCB_CONNECTOR_TV_3 : args->v0.type = NVIF_CONN_V0_TV; break; 210 + 211 + /* DVI */ 212 + case DCB_CONNECTOR_DVI_I_TV_1 : 213 + case DCB_CONNECTOR_DVI_I_TV_0 : 214 + case DCB_CONNECTOR_DVI_I_TV_2 : 215 + case DCB_CONNECTOR_DVI_ADC : 216 + case DCB_CONNECTOR_DMS59_0 : 217 + case DCB_CONNECTOR_DMS59_1 : 218 + case DCB_CONNECTOR_DVI_I : args->v0.type = NVIF_CONN_V0_DVI_I; break; 219 + case DCB_CONNECTOR_TMDS : 220 + case DCB_CONNECTOR_DVI_D : args->v0.type = NVIF_CONN_V0_DVI_D; break; 221 + 222 + /* LVDS */ 223 + case DCB_CONNECTOR_LVDS : args->v0.type = NVIF_CONN_V0_LVDS; break; 224 + case DCB_CONNECTOR_LVDS_SPWG : args->v0.type = NVIF_CONN_V0_LVDS_SPWG; break; 225 + 226 + /* DP */ 227 + case DCB_CONNECTOR_DMS59_DP0 : 228 + case DCB_CONNECTOR_DMS59_DP1 : 229 + case DCB_CONNECTOR_DP : 230 + case DCB_CONNECTOR_mDP : 231 + case DCB_CONNECTOR_USB_C : args->v0.type = NVIF_CONN_V0_DP; break; 232 + case DCB_CONNECTOR_eDP : args->v0.type = NVIF_CONN_V0_EDP; break; 233 + 234 + /* HDMI */ 235 + case DCB_CONNECTOR_HDMI_0 : 236 + case DCB_CONNECTOR_HDMI_1 : 237 + case DCB_CONNECTOR_HDMI_C : args->v0.type = NVIF_CONN_V0_HDMI; break; 238 + 239 + /* 240 + * Dock & unused outputs. 241 + * BNC, SPDIF, WFD, and detached LVDS go here. 242 + */ 213 243 default: 214 - WARN_ON(1); 244 + nvkm_warn(&disp->engine.subdev, 245 + "unimplemented connector type 0x%02x\n", 246 + conn->info.type); 247 + args->v0.type = NVIF_CONN_V0_VGA; 215 248 ret = -EINVAL; 216 249 break; 217 250 }
+8 -7
drivers/gpu/drm/vkms/vkms_colorop.c
··· 37 37 goto cleanup; 38 38 39 39 list->type = ops[i]->base.id; 40 - list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id); 41 40 42 41 i++; 43 42 ··· 87 88 88 89 drm_colorop_set_next_property(ops[i - 1], ops[i]); 89 90 91 + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[0]->base.id); 92 + 90 93 return 0; 91 94 92 95 cleanup: ··· 104 103 105 104 int vkms_initialize_colorops(struct drm_plane *plane) 106 105 { 107 - struct drm_prop_enum_list pipeline; 108 - int ret; 106 + struct drm_prop_enum_list pipeline = {}; 107 + int ret = 0; 109 108 110 109 /* Add color pipeline */ 111 110 ret = vkms_initialize_color_pipeline(plane, &pipeline); 112 111 if (ret) 113 - return ret; 112 + goto out; 114 113 115 114 /* Create COLOR_PIPELINE property and attach */ 116 115 ret = drm_plane_create_color_pipeline_property(plane, &pipeline, 1); 117 - if (ret) 118 - return ret; 119 116 120 - return 0; 117 + kfree(pipeline.name); 118 + out: 119 + return ret; 121 120 }
+3 -2
drivers/gpu/drm/xe/Kconfig
··· 39 39 select DRM_TTM 40 40 select DRM_TTM_HELPER 41 41 select DRM_EXEC 42 - select DRM_GPUSVM if !UML && DEVICE_PRIVATE 42 + select DRM_GPUSVM if !UML 43 43 select DRM_GPUVM 44 44 select DRM_SCHED 45 45 select MMU_NOTIFIER ··· 80 80 bool "Enable CPU to GPU address mirroring" 81 81 depends on DRM_XE 82 82 depends on !UML 83 - depends on DEVICE_PRIVATE 83 + depends on ZONE_DEVICE 84 84 default y 85 + select DEVICE_PRIVATE 85 86 select DRM_GPUSVM 86 87 help 87 88 Enable this option if you want support for CPU to GPU address
+7 -2
drivers/gpu/drm/xe/xe_bo.c
··· 1055 1055 unsigned long *scanned) 1056 1056 { 1057 1057 struct xe_device *xe = ttm_to_xe_device(bo->bdev); 1058 + struct ttm_tt *tt = bo->ttm; 1058 1059 long lret; 1059 1060 1060 1061 /* Fake move to system, without copying data. */ ··· 1080 1079 .writeback = false, 1081 1080 .allow_move = false}); 1082 1081 1083 - if (lret > 0) 1082 + if (lret > 0) { 1084 1083 xe_ttm_tt_account_subtract(xe, bo->ttm); 1084 + update_global_total_pages(bo->bdev, -(long)tt->num_pages); 1085 + } 1085 1086 1086 1087 return lret; 1087 1088 } ··· 1169 1166 if (needs_rpm) 1170 1167 xe_pm_runtime_put(xe); 1171 1168 1172 - if (lret > 0) 1169 + if (lret > 0) { 1173 1170 xe_ttm_tt_account_subtract(xe, tt); 1171 + update_global_total_pages(bo->bdev, -(long)tt->num_pages); 1172 + } 1174 1173 1175 1174 out_unref: 1176 1175 xe_bo_put(xe_bo);
+57 -15
drivers/gpu/drm/xe/xe_debugfs.c
··· 256 256 return simple_read_from_buffer(ubuf, size, pos, buf, len); 257 257 } 258 258 259 + static int __wedged_mode_set_reset_policy(struct xe_gt *gt, enum xe_wedged_mode mode) 260 + { 261 + bool enable_engine_reset; 262 + int ret; 263 + 264 + enable_engine_reset = (mode != XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET); 265 + ret = xe_guc_ads_scheduler_policy_toggle_reset(&gt->uc.guc.ads, 266 + enable_engine_reset); 267 + if (ret) 268 + xe_gt_err(gt, "Failed to update GuC ADS scheduler policy (%pe)\n", ERR_PTR(ret)); 269 + 270 + return ret; 271 + } 272 + 273 + static int wedged_mode_set_reset_policy(struct xe_device *xe, enum xe_wedged_mode mode) 274 + { 275 + struct xe_gt *gt; 276 + int ret; 277 + u8 id; 278 + 279 + guard(xe_pm_runtime)(xe); 280 + for_each_gt(gt, xe, id) { 281 + ret = __wedged_mode_set_reset_policy(gt, mode); 282 + if (ret) { 283 + if (id > 0) { 284 + xe->wedged.inconsistent_reset = true; 285 + drm_err(&xe->drm, "Inconsistent reset policy state between GTs\n"); 286 + } 287 + return ret; 288 + } 289 + } 290 + 291 + xe->wedged.inconsistent_reset = false; 292 + 293 + return 0; 294 + } 295 + 296 + static bool wedged_mode_needs_policy_update(struct xe_device *xe, enum xe_wedged_mode mode) 297 + { 298 + if (xe->wedged.inconsistent_reset) 299 + return true; 300 + 301 + if (xe->wedged.mode == mode) 302 + return false; 303 + 304 + if (xe->wedged.mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET || 305 + mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET) 306 + return true; 307 + 308 + return false; 309 + } 310 + 259 311 static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf, 260 312 size_t size, loff_t *pos) 261 313 { 262 314 struct xe_device *xe = file_inode(f)->i_private; 263 - struct xe_gt *gt; 264 315 u32 wedged_mode; 265 316 ssize_t ret; 266 - u8 id; 267 317 268 318 ret = kstrtouint_from_user(ubuf, size, 0, &wedged_mode); 269 319 if (ret) ··· 322 272 if (wedged_mode > 2) 323 273 return -EINVAL; 324 274 325 - if (xe->wedged.mode == wedged_mode) 326 - return size; 275 + if (wedged_mode_needs_policy_update(xe, wedged_mode)) { 276 + ret = wedged_mode_set_reset_policy(xe, wedged_mode); 277 + if (ret) 278 + return ret; 279 + } 327 280 328 281 xe->wedged.mode = wedged_mode; 329 - 330 - xe_pm_runtime_get(xe); 331 - for_each_gt(gt, xe, id) { 332 - ret = xe_guc_ads_scheduler_policy_toggle_reset(&gt->uc.guc.ads); 333 - if (ret) { 334 - xe_gt_err(gt, "Failed to update GuC ADS scheduler policy. GuC may still cause engine reset even with wedged_mode=2\n"); 335 - xe_pm_runtime_put(xe); 336 - return -EIO; 337 - } 338 - } 339 - xe_pm_runtime_put(xe); 340 282 341 283 return size; 342 284 }
+18
drivers/gpu/drm/xe/xe_device_types.h
··· 44 44 struct xe_pxp; 45 45 struct xe_vram_region; 46 46 47 + /** 48 + * enum xe_wedged_mode - possible wedged modes 49 + * @XE_WEDGED_MODE_NEVER: Device will never be declared wedged. 50 + * @XE_WEDGED_MODE_UPON_CRITICAL_ERROR: Device will be declared wedged only 51 + * when critical error occurs like GT reset failure or firmware failure. 52 + * This is the default mode. 53 + * @XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET: Device will be declared wedged on 54 + * any hang. In this mode, engine resets are disabled to avoid automatic 55 + * recovery attempts. This mode is primarily intended for debugging hangs. 56 + */ 57 + enum xe_wedged_mode { 58 + XE_WEDGED_MODE_NEVER = 0, 59 + XE_WEDGED_MODE_UPON_CRITICAL_ERROR = 1, 60 + XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET = 2, 61 + }; 62 + 47 63 #define XE_BO_INVALID_OFFSET LONG_MAX 48 64 49 65 #define GRAPHICS_VER(xe) ((xe)->info.graphics_verx100 / 100) ··· 603 587 int mode; 604 588 /** @wedged.method: Recovery method to be sent in the drm device wedged uevent */ 605 589 unsigned long method; 590 + /** @wedged.inconsistent_reset: Inconsistent reset policy state between GTs */ 591 + bool inconsistent_reset; 606 592 } wedged; 607 593 608 594 /** @bo_device: Struct to control async free of BOs */
+31 -1
drivers/gpu/drm/xe/xe_exec_queue.c
··· 328 328 * @xe: Xe device. 329 329 * @tile: tile which bind exec queue belongs to. 330 330 * @flags: exec queue creation flags 331 + * @user_vm: The user VM which this exec queue belongs to 331 332 * @extensions: exec queue creation extensions 332 333 * 333 334 * Normalize bind exec queue creation. Bind exec queue is tied to migration VM ··· 342 341 */ 343 342 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, 344 343 struct xe_tile *tile, 344 + struct xe_vm *user_vm, 345 345 u32 flags, u64 extensions) 346 346 { 347 347 struct xe_gt *gt = tile->primary_gt; ··· 379 377 xe_exec_queue_put(q); 380 378 return ERR_PTR(err); 381 379 } 380 + 381 + if (user_vm) 382 + q->user_vm = xe_vm_get(user_vm); 382 383 } 383 384 384 385 return q; ··· 410 405 list_for_each_entry_safe(eq, next, &q->multi_gt_list, 411 406 multi_gt_link) 412 407 xe_exec_queue_put(eq); 408 + } 409 + 410 + if (q->user_vm) { 411 + xe_vm_put(q->user_vm); 412 + q->user_vm = NULL; 413 413 } 414 414 415 415 q->ops->destroy(q); ··· 752 742 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) 753 743 return -EINVAL; 754 744 745 + vm = xe_vm_lookup(xef, args->vm_id); 746 + if (XE_IOCTL_DBG(xe, !vm)) 747 + return -ENOENT; 748 + 749 + err = down_read_interruptible(&vm->lock); 750 + if (err) { 751 + xe_vm_put(vm); 752 + return err; 753 + } 754 + 755 + if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { 756 + up_read(&vm->lock); 757 + xe_vm_put(vm); 758 + return -ENOENT; 759 + } 760 + 755 761 for_each_tile(tile, xe, id) { 756 762 struct xe_exec_queue *new; 757 763 ··· 775 749 if (id) 776 750 flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD; 777 751 778 - new = xe_exec_queue_create_bind(xe, tile, flags, 752 + new = xe_exec_queue_create_bind(xe, tile, vm, flags, 779 753 args->extensions); 780 754 if (IS_ERR(new)) { 755 + up_read(&vm->lock); 756 + xe_vm_put(vm); 781 757 err = PTR_ERR(new); 782 758 if (q) 783 759 goto put_exec_queue; ··· 791 763 list_add_tail(&new->multi_gt_list, 792 764 &q->multi_gt_link); 793 765 } 766 + up_read(&vm->lock); 767 + xe_vm_put(vm); 794 768 } else { 795 769 logical_mask = calc_validate_logical_mask(xe, eci, 796 770 args->width,
+1
drivers/gpu/drm/xe/xe_exec_queue.h
··· 28 28 u32 flags, u64 extensions); 29 29 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, 30 30 struct xe_tile *tile, 31 + struct xe_vm *user_vm, 31 32 u32 flags, u64 extensions); 32 33 33 34 void xe_exec_queue_fini(struct xe_exec_queue *q);
+6
drivers/gpu/drm/xe/xe_exec_queue_types.h
··· 54 54 struct kref refcount; 55 55 /** @vm: VM (address space) for this exec queue */ 56 56 struct xe_vm *vm; 57 + /** 58 + * @user_vm: User VM (address space) for this exec queue (bind queues 59 + * only) 60 + */ 61 + struct xe_vm *user_vm; 62 + 57 63 /** @class: class of this exec queue */ 58 64 enum xe_engine_class class; 59 65 /**
+1 -1
drivers/gpu/drm/xe/xe_ggtt.c
··· 322 322 else 323 323 ggtt->pt_ops = &xelp_pt_ops; 324 324 325 - ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM); 325 + ggtt->wq = alloc_workqueue("xe-ggtt-wq", WQ_MEM_RECLAIM, 0); 326 326 if (!ggtt->wq) 327 327 return -ENOMEM; 328 328
+2 -2
drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
··· 41 41 }; 42 42 43 43 /** 44 - * xe_gt_sriov_vf_migration - VF migration data. 44 + * struct xe_gt_sriov_vf_migration - VF migration data. 45 45 */ 46 46 struct xe_gt_sriov_vf_migration { 47 - /** @migration: VF migration recovery worker */ 47 + /** @worker: VF migration recovery worker */ 48 48 struct work_struct worker; 49 49 /** @lock: Protects recovery_queued, teardown */ 50 50 spinlock_t lock;
+8 -6
drivers/gpu/drm/xe/xe_guc_ads.c
··· 983 983 /** 984 984 * xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy 985 985 * @ads: Additional data structures object 986 + * @enable_engine_reset: true to enable engine resets, false otherwise 986 987 * 987 - * This function update the GuC's engine reset policy based on wedged.mode. 988 + * This function update the GuC's engine reset policy. 988 989 * 989 990 * Return: 0 on success, and negative error code otherwise. 990 991 */ 991 - int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads) 992 + int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads, 993 + bool enable_engine_reset) 992 994 { 993 995 struct guc_policies *policies; 994 996 struct xe_guc *guc = ads_to_guc(ads); 995 - struct xe_device *xe = ads_to_xe(ads); 996 997 CLASS(xe_guc_buf, buf)(&guc->buf, sizeof(*policies)); 997 998 998 999 if (!xe_guc_buf_is_valid(buf)) ··· 1005 1004 policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time); 1006 1005 policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items); 1007 1006 policies->is_valid = 1; 1008 - if (xe->wedged.mode == 2) 1009 - policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET; 1010 - else 1007 + 1008 + if (enable_engine_reset) 1011 1009 policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET; 1010 + else 1011 + policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET; 1012 1012 1013 1013 return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf)); 1014 1014 }
+4 -1
drivers/gpu/drm/xe/xe_guc_ads.h
··· 6 6 #ifndef _XE_GUC_ADS_H_ 7 7 #define _XE_GUC_ADS_H_ 8 8 9 + #include <linux/types.h> 10 + 9 11 struct xe_guc_ads; 10 12 11 13 int xe_guc_ads_init(struct xe_guc_ads *ads); ··· 15 13 void xe_guc_ads_populate(struct xe_guc_ads *ads); 16 14 void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads); 17 15 void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads); 18 - int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads); 16 + int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads, 17 + bool enable_engine_reset); 19 18 20 19 #endif
+3 -1
drivers/gpu/drm/xe/xe_late_bind_fw_types.h
··· 15 15 #define XE_LB_MAX_PAYLOAD_SIZE SZ_4K 16 16 17 17 /** 18 - * xe_late_bind_fw_id - enum to determine late binding fw index 18 + * enum xe_late_bind_fw_id - enum to determine late binding fw index 19 19 */ 20 20 enum xe_late_bind_fw_id { 21 + /** @XE_LB_FW_FAN_CONTROL: Fan control */ 21 22 XE_LB_FW_FAN_CONTROL = 0, 23 + /** @XE_LB_FW_MAX_ID: Number of IDs */ 22 24 XE_LB_FW_MAX_ID 23 25 }; 24 26
+3
drivers/gpu/drm/xe/xe_lrc.c
··· 1050 1050 { 1051 1051 u32 *cmd = batch; 1052 1052 1053 + if (IS_SRIOV_VF(gt_to_xe(lrc->gt))) 1054 + return 0; 1055 + 1053 1056 if (xe_gt_WARN_ON(lrc->gt, max_len < 12)) 1054 1057 return -ENOSPC; 1055 1058
+2 -2
drivers/gpu/drm/xe/xe_migrate.c
··· 2445 2445 if (is_migrate) 2446 2446 mutex_lock(&m->job_mutex); 2447 2447 else 2448 - xe_vm_assert_held(q->vm); /* User queues VM's should be locked */ 2448 + xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */ 2449 2449 } 2450 2450 2451 2451 /** ··· 2463 2463 if (is_migrate) 2464 2464 mutex_unlock(&m->job_mutex); 2465 2465 else 2466 - xe_vm_assert_held(q->vm); /* User queues VM's should be locked */ 2466 + xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */ 2467 2467 } 2468 2468 2469 2469 #if IS_ENABLED(CONFIG_PROVE_LOCKING)
+1 -1
drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
··· 346 346 flags = EXEC_QUEUE_FLAG_KERNEL | 347 347 EXEC_QUEUE_FLAG_PERMANENT | 348 348 EXEC_QUEUE_FLAG_MIGRATE; 349 - q = xe_exec_queue_create_bind(xe, tile, flags, 0); 349 + q = xe_exec_queue_create_bind(xe, tile, NULL, flags, 0); 350 350 if (IS_ERR(q)) { 351 351 err = PTR_ERR(q); 352 352 goto err_ret;
+6 -1
drivers/gpu/drm/xe/xe_vm.c
··· 1617 1617 if (!vm->pt_root[id]) 1618 1618 continue; 1619 1619 1620 - q = xe_exec_queue_create_bind(xe, tile, create_flags, 0); 1620 + q = xe_exec_queue_create_bind(xe, tile, vm, create_flags, 0); 1621 1621 if (IS_ERR(q)) { 1622 1622 err = PTR_ERR(q); 1623 1623 goto err_close; ··· 3576 3576 err = -EINVAL; 3577 3577 goto put_exec_queue; 3578 3578 } 3579 + } 3580 + 3581 + if (XE_IOCTL_DBG(xe, q && vm != q->user_vm)) { 3582 + err = -EINVAL; 3583 + goto put_exec_queue; 3579 3584 } 3580 3585 3581 3586 /* Ensure all UNMAPs visible */
+1 -1
drivers/gpu/drm/xe/xe_vm.h
··· 379 379 } 380 380 381 381 /** 382 - * xe_vm_set_validation_exec() - Accessor to read the drm_exec object 382 + * xe_vm_validation_exec() - Accessor to read the drm_exec object 383 383 * @vm: The vm we want to register a drm_exec object with. 384 384 * 385 385 * Return: The drm_exec object used to lock the vm's resv. The value
+17 -2
include/drm/drm_pagemap.h
··· 209 209 struct dma_fence *pre_migrate_fence); 210 210 }; 211 211 212 + #if IS_ENABLED(CONFIG_ZONE_DEVICE) 213 + 214 + struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page); 215 + 216 + #else 217 + 218 + static inline struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page) 219 + { 220 + return NULL; 221 + } 222 + 223 + #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */ 224 + 212 225 /** 213 226 * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation 214 227 * ··· 246 233 struct dma_fence *pre_migrate_fence; 247 234 }; 248 235 236 + #if IS_ENABLED(CONFIG_ZONE_DEVICE) 237 + 249 238 int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, 250 239 struct mm_struct *mm, 251 240 unsigned long start, unsigned long end, ··· 257 242 int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation); 258 243 259 244 const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void); 260 - 261 - struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page); 262 245 263 246 void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, 264 247 struct device *dev, struct mm_struct *mm, ··· 268 255 unsigned long start, unsigned long end, 269 256 struct mm_struct *mm, 270 257 unsigned long timeslice_ms); 258 + 259 + #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */ 271 260 272 261 #endif