Merge tag 'drm-fixes-2026-01-23' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
"Probably a good thing you decided to do an rc8 in this round. Nothing
stands out, but xe/amdgpu and mediatek all have a bunch of fixes, and
then there are a few other single patches. Hopefully next week is
calmer for release.

xe:
- Disallow bind-queue sharing across multiple VMs
- Fix xe userptr in the absence of CONFIG_DEVICE_PRIVATE
- Fix a missed page count update
- Fix a confused argument to alloc_workqueue()
- Kernel-doc fixes
- Disable a workaround on VFs
- Fix a job lock assert
- Update wedged.mode only after successful reset policy change
- Select CONFIG_DEVICE_PRIVATE when DRM_XE_GPUSVM is selected

amdgpu:
- fix color pipeline string leak
- GC 12 fix
- Misc error path fixes
- DC analog fix
- SMU 6 fixes
- TLB flush fix
- DC idle optimization fix

amdkfd:
- GC 11 cooperative launch fix

imagination:
- sync wait for logtype update completion to ensure FW trace
is available

bridge/synopsis:
- Fix error paths in dw_dp_bind

nouveau:
- Add and implement missing DSB connector types, and improve
unknown connector handling
- Set missing atomic function ops

intel:
- place 3D lut at correct place in pipeline
- fix color pipeline string leak

vkms:
- fix color pipeline string leak

mediatek:
- Fix platform_get_irq() error checking
- HDMI DDC v2 driver fixes
- dpi: Find next bridge during probe
- mtk_gem: Partial refactor and use drm_gem_dma_object
- dt-bindings: Fix typo 'hardwares' to 'hardware'"

* tag 'drm-fixes-2026-01-23' of https://gitlab.freedesktop.org/drm/kernel: (38 commits)
Revert "drm/amd/display: pause the workload setting in dm"
drm/xe: Select CONFIG_DEVICE_PRIVATE when DRM_XE_GPUSVM is selected
drm, drm/xe: Fix xe userptr in the absence of CONFIG_DEVICE_PRIVATE
drm/i915/display: Fix color pipeline enum name leak
drm/vkms: Fix color pipeline enum name leak
drm/amd/display: Fix color pipeline enum name leak
drm/i915/color: Place 3D LUT after CSC in plane color pipeline
drm/nouveau/disp: Set drm_mode_config_funcs.atomic_(check|commit)
drm/nouveau: implement missing DCB connector types; gracefully handle unknown connectors
drm/nouveau: add missing DCB connector types
drm/amdgpu: fix type for wptr in ring backup
drm/amdgpu: Fix validating flush_gpu_tlb_pasid()
drm/amd/pm: Workaround SI powertune issue on Radeon 430 (v2)
drm/amd/pm: Don't clear SI SMC table when setting power limit
drm/amd/pm: Fix si_dpm mmCG_THERMAL_INT setting
drm/xe: Update wedged.mode only after successful reset policy change
drm/xe/migrate: fix job lock assert
drm/xe/uapi: disallow bind queue sharing
drm/amd/display: Only poll analog connectors
drm/amdgpu: fix error handling in ib_schedule()
...

+561 -405
+1 -1
Documentation/devicetree/bindings/display/mediatek/mediatek,dp.yaml
··· 11 - Jitao shi <jitao.shi@mediatek.com> 12 13 description: | 14 - MediaTek DP and eDP are different hardwares and there are some features 15 which are not supported for eDP. For example, audio is not supported for 16 eDP. Therefore, we need to use two different compatibles to describe them. 17 In addition, We just need to enable the power domain of DP, so the clock
··· 11 - Jitao shi <jitao.shi@mediatek.com> 12 13 description: | 14 + MediaTek DP and eDP are different hardware and there are some features 15 which are not supported for eDP. For example, audio is not supported for 16 eDP. Therefore, we need to use two different compatibles to describe them. 17 In addition, We just need to enable the power domain of DP, so the clock
+1 -1
drivers/gpu/drm/Kconfig
··· 210 211 config DRM_GPUSVM 212 tristate 213 - depends on DRM && DEVICE_PRIVATE 214 select HMM_MIRROR 215 select MMU_NOTIFIER 216 help
··· 210 211 config DRM_GPUSVM 212 tristate 213 + depends on DRM 214 select HMM_MIRROR 215 select MMU_NOTIFIER 216 help
+3 -1
drivers/gpu/drm/Makefile
··· 108 obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o 109 110 drm_gpusvm_helper-y := \ 111 - drm_gpusvm.o\ 112 drm_pagemap.o 113 obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o 114 115 obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
··· 108 obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o 109 110 drm_gpusvm_helper-y := \ 111 + drm_gpusvm.o 112 + drm_gpusvm_helper-$(CONFIG_ZONE_DEVICE) += \ 113 drm_pagemap.o 114 + 115 obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o 116 117 obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
··· 763 } 764 765 static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring, 766 - u64 start_wptr, u32 end_wptr) 767 { 768 unsigned int first_idx = start_wptr & ring->buf_mask; 769 unsigned int last_idx = end_wptr & ring->buf_mask;
··· 763 } 764 765 static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring, 766 + u64 start_wptr, u64 end_wptr) 767 { 768 unsigned int first_idx = start_wptr & ring->buf_mask; 769 unsigned int last_idx = end_wptr & ring->buf_mask;
+4 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 733 734 if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) { 735 736 - if (!adev->gmc.gmc_funcs->flush_gpu_tlb_pasid) 737 - return 0; 738 739 if (adev->gmc.flush_tlb_needs_extra_type_2) 740 adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
··· 733 734 if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) { 735 736 + if (!adev->gmc.gmc_funcs->flush_gpu_tlb_pasid) { 737 + r = 0; 738 + goto error_unlock_reset; 739 + } 740 741 if (adev->gmc.flush_tlb_needs_extra_type_2) 742 adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 302 if (job && job->vmid) 303 amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid); 304 amdgpu_ring_undo(ring); 305 - return r; 306 } 307 *f = &af->base; 308 /* get a ref for the job */
··· 302 if (job && job->vmid) 303 amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid); 304 amdgpu_ring_undo(ring); 305 + goto free_fence; 306 } 307 *f = &af->base; 308 /* get a ref for the job */
+5 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 217 if (!entity) 218 return 0; 219 220 - return drm_sched_job_init(&(*job)->base, entity, 1, owner, 221 - drm_client_id); 222 223 err_fence: 224 kfree((*job)->hw_fence);
··· 217 if (!entity) 218 return 0; 219 220 + r = drm_sched_job_init(&(*job)->base, entity, 1, owner, drm_client_id); 221 + if (!r) 222 + return 0; 223 + 224 + kfree((*job)->hw_vm_fence); 225 226 err_fence: 227 kfree((*job)->hw_fence);
-12
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
··· 278 u32 sh_num, u32 instance, int xcc_id); 279 static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 280 281 - static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 282 static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 283 uint32_t val); 284 static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); ··· 4633 return r; 4634 } 4635 4636 - static void gfx_v12_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, 4637 - bool start, 4638 - bool secure) 4639 - { 4640 - uint32_t v = secure ? FRAME_TMZ : 0; 4641 - 4642 - amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); 4643 - amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1)); 4644 - } 4645 - 4646 static void gfx_v12_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 4647 uint32_t reg_val_offs) 4648 { ··· 5509 .emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl, 5510 .init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec, 5511 .preempt_ib = gfx_v12_0_ring_preempt_ib, 5512 - .emit_frame_cntl = gfx_v12_0_ring_emit_frame_cntl, 5513 .emit_wreg = gfx_v12_0_ring_emit_wreg, 5514 .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 5515 .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
··· 278 u32 sh_num, u32 instance, int xcc_id); 279 static u32 gfx_v12_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev); 280 281 static void gfx_v12_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, 282 uint32_t val); 283 static int gfx_v12_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev); ··· 4634 return r; 4635 } 4636 4637 static void gfx_v12_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, 4638 uint32_t reg_val_offs) 4639 { ··· 5520 .emit_cntxcntl = gfx_v12_0_ring_emit_cntxcntl, 5521 .init_cond_exec = gfx_v12_0_ring_emit_init_cond_exec, 5522 .preempt_ib = gfx_v12_0_ring_preempt_ib, 5523 .emit_wreg = gfx_v12_0_ring_emit_wreg, 5524 .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, 5525 .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
+1 -2
drivers/gpu/drm/amd/amdkfd/kfd_debug.h
··· 120 && dev->kfd->mec2_fw_version < 0x1b6) || 121 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) 122 && dev->kfd->mec2_fw_version < 0x30) || 123 - (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) && 124 - KFD_GC_VERSION(dev) < IP_VERSION(12, 0, 0))) 125 return false; 126 127 /* Assume debugging and cooperative launch supported otherwise. */
··· 120 && dev->kfd->mec2_fw_version < 0x1b6) || 121 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) 122 && dev->kfd->mec2_fw_version < 0x30) || 123 + kfd_dbg_has_cwsr_workaround(dev)) 124 return false; 125 126 /* Assume debugging and cooperative launch supported otherwise. */
+3 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
··· 79 goto cleanup; 80 81 list->type = ops[i]->base.id; 82 - list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id); 83 84 i++; 85 ··· 196 goto cleanup; 197 198 drm_colorop_set_next_property(ops[i-1], ops[i]); 199 return 0; 200 201 cleanup:
··· 79 goto cleanup; 80 81 list->type = ops[i]->base.id; 82 83 i++; 84 ··· 197 goto cleanup; 198 199 drm_colorop_set_next_property(ops[i-1], ops[i]); 200 + 201 + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[0]->base.id); 202 + 203 return 0; 204 205 cleanup:
-11
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
··· 248 struct vblank_control_work *vblank_work = 249 container_of(work, struct vblank_control_work, work); 250 struct amdgpu_display_manager *dm = vblank_work->dm; 251 - struct amdgpu_device *adev = drm_to_adev(dm->ddev); 252 - int r; 253 254 mutex_lock(&dm->dc_lock); 255 ··· 277 278 if (dm->active_vblank_irq_count == 0) { 279 dc_post_update_surfaces_to_stream(dm->dc); 280 - 281 - r = amdgpu_dpm_pause_power_profile(adev, true); 282 - if (r) 283 - dev_warn(adev->dev, "failed to set default power profile mode\n"); 284 - 285 dc_allow_idle_optimizations(dm->dc, true); 286 - 287 - r = amdgpu_dpm_pause_power_profile(adev, false); 288 - if (r) 289 - dev_warn(adev->dev, "failed to restore the power profile mode\n"); 290 } 291 292 mutex_unlock(&dm->dc_lock);
··· 248 struct vblank_control_work *vblank_work = 249 container_of(work, struct vblank_control_work, work); 250 struct amdgpu_display_manager *dm = vblank_work->dm; 251 252 mutex_lock(&dm->dc_lock); 253 ··· 279 280 if (dm->active_vblank_irq_count == 0) { 281 dc_post_update_surfaces_to_stream(dm->dc); 282 dc_allow_idle_optimizations(dm->dc, true); 283 } 284 285 mutex_unlock(&dm->dc_lock);
+8 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
··· 915 struct amdgpu_dm_connector *amdgpu_dm_connector; 916 const struct dc_link *dc_link; 917 918 - use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD; 919 - 920 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 921 continue; 922 923 amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 924 925 dc_link = amdgpu_dm_connector->dc_link; 926
··· 915 struct amdgpu_dm_connector *amdgpu_dm_connector; 916 const struct dc_link *dc_link; 917 918 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) 919 continue; 920 921 amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 922 + 923 + /* 924 + * Analog connectors may be hot-plugged unlike other connector 925 + * types that don't support HPD. Only poll analog connectors. 926 + */ 927 + use_polling |= 928 + amdgpu_dm_connector->dc_link && 929 + dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id); 930 931 dc_link = amdgpu_dm_connector->dc_link; 932
+9 -4
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
··· 1790 static int 1791 dm_plane_init_colorops(struct drm_plane *plane) 1792 { 1793 - struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES]; 1794 struct drm_device *dev = plane->dev; 1795 struct amdgpu_device *adev = drm_to_adev(dev); 1796 struct dc *dc = adev->dm.dc; 1797 int len = 0; 1798 - int ret; 1799 1800 if (plane->type == DRM_PLANE_TYPE_CURSOR) 1801 return 0; ··· 1807 if (ret) { 1808 drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n", 1809 plane->base.id, ret); 1810 - return ret; 1811 } 1812 len++; 1813 ··· 1815 drm_plane_create_color_pipeline_property(plane, pipelines, len); 1816 } 1817 1818 - return 0; 1819 } 1820 #endif 1821
··· 1790 static int 1791 dm_plane_init_colorops(struct drm_plane *plane) 1792 { 1793 + struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES] = {}; 1794 struct drm_device *dev = plane->dev; 1795 struct amdgpu_device *adev = drm_to_adev(dev); 1796 struct dc *dc = adev->dm.dc; 1797 int len = 0; 1798 + int ret = 0; 1799 + int i; 1800 1801 if (plane->type == DRM_PLANE_TYPE_CURSOR) 1802 return 0; ··· 1806 if (ret) { 1807 drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n", 1808 plane->base.id, ret); 1809 + goto out; 1810 } 1811 len++; 1812 ··· 1814 drm_plane_create_color_pipeline_property(plane, pipelines, len); 1815 } 1816 1817 + out: 1818 + for (i = 0; i < len; i++) 1819 + kfree(pipelines[i].name); 1820 + 1821 + return ret; 1822 } 1823 #endif 1824
+16 -15
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
··· 2273 if (scaling_factor == 0) 2274 return -EINVAL; 2275 2276 - memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); 2277 - 2278 ret = si_calculate_adjusted_tdp_limits(adev, 2279 false, /* ??? */ 2280 adev->pm.dpm.tdp_adjustment, ··· 2280 &near_tdp_limit); 2281 if (ret) 2282 return ret; 2283 2284 smc_table->dpm2Params.TDPLimit = 2285 cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000); ··· 2332 2333 if (ni_pi->enable_power_containment) { 2334 SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; 2335 - u32 scaling_factor = si_get_smc_power_scaling_factor(adev); 2336 int ret; 2337 - 2338 - memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); 2339 - 2340 - smc_table->dpm2Params.NearTDPLimit = 2341 - cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000); 2342 - smc_table->dpm2Params.SafePowerLimit = 2343 - cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); 2344 2345 ret = amdgpu_si_copy_bytes_to_smc(adev, 2346 (si_pi->state_table_start + ··· 3469 (adev->pdev->revision == 0x80) || 3470 (adev->pdev->revision == 0x81) || 3471 (adev->pdev->revision == 0x83) || 3472 - (adev->pdev->revision == 0x87) || 3473 (adev->pdev->device == 0x6604) || 3474 (adev->pdev->device == 0x6605)) { 3475 max_sclk = 75000; 3476 } 3477 } 3478 ··· 7601 case AMDGPU_IRQ_STATE_DISABLE: 7602 cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); 7603 cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK; 7604 - WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); 7605 break; 7606 case AMDGPU_IRQ_STATE_ENABLE: 7607 cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); 7608 cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK; 7609 - WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); 7610 break; 7611 default: 7612 break; ··· 7618 case AMDGPU_IRQ_STATE_DISABLE: 7619 cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); 7620 cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK; 7621 - WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); 7622 break; 7623 case AMDGPU_IRQ_STATE_ENABLE: 7624 cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); 7625 cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK; 7626 - WREG32_SMC(mmCG_THERMAL_INT, cg_thermal_int); 7627 break; 7628 default: 7629 break;
··· 2273 if (scaling_factor == 0) 2274 return -EINVAL; 2275 2276 ret = si_calculate_adjusted_tdp_limits(adev, 2277 false, /* ??? */ 2278 adev->pm.dpm.tdp_adjustment, ··· 2282 &near_tdp_limit); 2283 if (ret) 2284 return ret; 2285 + 2286 + if (adev->pdev->device == 0x6611 && adev->pdev->revision == 0x87) { 2287 + /* Workaround buggy powertune on Radeon 430 and 520. */ 2288 + tdp_limit = 32; 2289 + near_tdp_limit = 28; 2290 + } 2291 2292 smc_table->dpm2Params.TDPLimit = 2293 cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000); ··· 2328 2329 if (ni_pi->enable_power_containment) { 2330 SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; 2331 int ret; 2332 2333 ret = amdgpu_si_copy_bytes_to_smc(adev, 2334 (si_pi->state_table_start + ··· 3473 (adev->pdev->revision == 0x80) || 3474 (adev->pdev->revision == 0x81) || 3475 (adev->pdev->revision == 0x83) || 3476 + (adev->pdev->revision == 0x87 && 3477 + adev->pdev->device != 0x6611) || 3478 (adev->pdev->device == 0x6604) || 3479 (adev->pdev->device == 0x6605)) { 3480 max_sclk = 75000; 3481 + } else if (adev->pdev->revision == 0x87 && 3482 + adev->pdev->device == 0x6611) { 3483 + /* Radeon 430 and 520 */ 3484 + max_sclk = 78000; 3485 } 3486 } 3487 ··· 7600 case AMDGPU_IRQ_STATE_DISABLE: 7601 cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); 7602 cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK; 7603 + WREG32(mmCG_THERMAL_INT, cg_thermal_int); 7604 break; 7605 case AMDGPU_IRQ_STATE_ENABLE: 7606 cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); 7607 cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK; 7608 + WREG32(mmCG_THERMAL_INT, cg_thermal_int); 7609 break; 7610 default: 7611 break; ··· 7617 case AMDGPU_IRQ_STATE_DISABLE: 7618 cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); 7619 cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK; 7620 + WREG32(mmCG_THERMAL_INT, cg_thermal_int); 7621 break; 7622 case AMDGPU_IRQ_STATE_ENABLE: 7623 cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT); 7624 cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK; 7625 + WREG32(mmCG_THERMAL_INT, cg_thermal_int); 7626 break; 7627 default: 7628 break;
+14 -6
drivers/gpu/drm/bridge/synopsys/dw-dp.c
··· 2062 } 2063 2064 ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); 2065 - if (ret) 2066 dev_err_probe(dev, ret, "Failed to attach bridge\n"); 2067 2068 dw_dp_init_hw(dp); 2069 2070 ret = phy_init(dp->phy); 2071 if (ret) { 2072 dev_err_probe(dev, ret, "phy init failed\n"); 2073 - return ERR_PTR(ret); 2074 } 2075 2076 ret = devm_add_action_or_reset(dev, dw_dp_phy_exit, dp); 2077 if (ret) 2078 - return ERR_PTR(ret); 2079 2080 dp->irq = platform_get_irq(pdev, 0); 2081 - if (dp->irq < 0) 2082 - return ERR_PTR(ret); 2083 2084 ret = devm_request_threaded_irq(dev, dp->irq, NULL, dw_dp_irq, 2085 IRQF_ONESHOT, dev_name(dev), dp); 2086 if (ret) { 2087 dev_err_probe(dev, ret, "failed to request irq\n"); 2088 - return ERR_PTR(ret); 2089 } 2090 2091 return dp; 2092 } 2093 EXPORT_SYMBOL_GPL(dw_dp_bind); 2094
··· 2062 } 2063 2064 ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); 2065 + if (ret) { 2066 dev_err_probe(dev, ret, "Failed to attach bridge\n"); 2067 + goto unregister_aux; 2068 + } 2069 2070 dw_dp_init_hw(dp); 2071 2072 ret = phy_init(dp->phy); 2073 if (ret) { 2074 dev_err_probe(dev, ret, "phy init failed\n"); 2075 + goto unregister_aux; 2076 } 2077 2078 ret = devm_add_action_or_reset(dev, dw_dp_phy_exit, dp); 2079 if (ret) 2080 + goto unregister_aux; 2081 2082 dp->irq = platform_get_irq(pdev, 0); 2083 + if (dp->irq < 0) { 2084 + ret = dp->irq; 2085 + goto unregister_aux; 2086 + } 2087 2088 ret = devm_request_threaded_irq(dev, dp->irq, NULL, dw_dp_irq, 2089 IRQF_ONESHOT, dev_name(dev), dp); 2090 if (ret) { 2091 dev_err_probe(dev, ret, "failed to request irq\n"); 2092 + goto unregister_aux; 2093 } 2094 2095 return dp; 2096 + 2097 + unregister_aux: 2098 + drm_dp_aux_unregister(&dp->aux); 2099 + return ERR_PTR(ret); 2100 } 2101 EXPORT_SYMBOL_GPL(dw_dp_bind); 2102
+22 -14
drivers/gpu/drm/i915/display/intel_color_pipeline.c
··· 34 return ret; 35 36 list->type = colorop->base.base.id; 37 - list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", colorop->base.base.id); 38 39 /* TODO: handle failures and clean up */ 40 prev_op = &colorop->base; 41 42 if (DISPLAY_VER(display) >= 35 && ··· 63 prev_op = &colorop->base; 64 } 65 66 - colorop = intel_colorop_create(INTEL_PLANE_CB_CSC); 67 - ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane, 68 - DRM_COLOROP_FLAG_ALLOW_BYPASS); 69 - if (ret) 70 - return ret; 71 - 72 - drm_colorop_set_next_property(prev_op, &colorop->base); 73 - prev_op = &colorop->base; 74 - 75 colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT); 76 ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, 77 PLANE_GAMMA_SIZE, ··· 73 74 drm_colorop_set_next_property(prev_op, &colorop->base); 75 76 return 0; 77 } 78 ··· 82 { 83 struct drm_device *dev = plane->dev; 84 struct intel_display *display = to_intel_display(dev); 85 - struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES]; 86 int len = 0; 87 - int ret; 88 89 /* Currently expose pipeline only for HDR planes */ 90 if (!icl_is_hdr_plane(display, to_intel_plane(plane)->id)) ··· 94 /* Add pipeline consisting of transfer functions */ 95 ret = _intel_color_pipeline_plane_init(plane, &pipelines[len], pipe); 96 if (ret) 97 - return ret; 98 len++; 99 100 - return drm_plane_create_color_pipeline_property(plane, pipelines, len); 101 }
··· 34 return ret; 35 36 list->type = colorop->base.base.id; 37 38 /* TODO: handle failures and clean up */ 39 + prev_op = &colorop->base; 40 + 41 + colorop = intel_colorop_create(INTEL_PLANE_CB_CSC); 42 + ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane, 43 + DRM_COLOROP_FLAG_ALLOW_BYPASS); 44 + if (ret) 45 + return ret; 46 + 47 + drm_colorop_set_next_property(prev_op, &colorop->base); 48 prev_op = &colorop->base; 49 50 if (DISPLAY_VER(display) >= 35 && ··· 55 prev_op = &colorop->base; 56 } 57 58 colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT); 59 ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane, 60 PLANE_GAMMA_SIZE, ··· 74 75 drm_colorop_set_next_property(prev_op, &colorop->base); 76 77 + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", list->type); 78 + 79 return 0; 80 } 81 ··· 81 { 82 struct drm_device *dev = plane->dev; 83 struct intel_display *display = to_intel_display(dev); 84 + struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES] = {}; 85 int len = 0; 86 + int ret = 0; 87 + int i; 88 89 /* Currently expose pipeline only for HDR planes */ 90 if (!icl_is_hdr_plane(display, to_intel_plane(plane)->id)) ··· 92 /* Add pipeline consisting of transfer functions */ 93 ret = _intel_color_pipeline_plane_init(plane, &pipelines[len], pipe); 94 if (ret) 95 + goto out; 96 len++; 97 98 + ret = drm_plane_create_color_pipeline_property(plane, pipelines, len); 99 + 100 + for (i = 0; i < len; i++) 101 + kfree(pipelines[i].name); 102 + 103 + out: 104 + return ret; 105 }
+7 -1
drivers/gpu/drm/imagination/pvr_fw_trace.c
··· 137 struct rogue_fwif_kccb_cmd cmd; 138 int idx; 139 int err; 140 141 if (group_mask) 142 fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_TRACE | group_mask; ··· 155 cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE; 156 cmd.kccb_flags = 0; 157 158 - err = pvr_kccb_send_cmd(pvr_dev, &cmd, NULL); 159 160 drm_dev_exit(idx); 161 162 err_up_read:
··· 137 struct rogue_fwif_kccb_cmd cmd; 138 int idx; 139 int err; 140 + int slot; 141 142 if (group_mask) 143 fw_trace->tracebuf_ctrl->log_type = ROGUE_FWIF_LOG_TYPE_TRACE | group_mask; ··· 154 cmd.cmd_type = ROGUE_FWIF_KCCB_CMD_LOGTYPE_UPDATE; 155 cmd.kccb_flags = 0; 156 157 + err = pvr_kccb_send_cmd(pvr_dev, &cmd, &slot); 158 + if (err) 159 + goto err_drm_dev_exit; 160 161 + err = pvr_kccb_wait_for_completion(pvr_dev, slot, HZ, NULL); 162 + 163 + err_drm_dev_exit: 164 drm_dev_exit(idx); 165 166 err_up_read:
+1 -1
drivers/gpu/drm/mediatek/Kconfig
··· 8 depends on OF 9 depends on MTK_MMSYS 10 select DRM_CLIENT_SELECTION 11 - select DRM_GEM_DMA_HELPER if DRM_FBDEV_EMULATION 12 select DRM_KMS_HELPER 13 select DRM_DISPLAY_HELPER 14 select DRM_BRIDGE_CONNECTOR
··· 8 depends on OF 9 depends on MTK_MMSYS 10 select DRM_CLIENT_SELECTION 11 + select DRM_GEM_DMA_HELPER 12 select DRM_KMS_HELPER 13 select DRM_DISPLAY_HELPER 14 select DRM_BRIDGE_CONNECTOR
+9 -14
drivers/gpu/drm/mediatek/mtk_dpi.c
··· 836 enum drm_bridge_attach_flags flags) 837 { 838 struct mtk_dpi *dpi = bridge_to_dpi(bridge); 839 - int ret; 840 - 841 - dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 1, -1); 842 - if (IS_ERR(dpi->next_bridge)) { 843 - ret = PTR_ERR(dpi->next_bridge); 844 - if (ret == -EPROBE_DEFER) 845 - return ret; 846 - 847 - /* Old devicetree has only one endpoint */ 848 - dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 0, 0); 849 - if (IS_ERR(dpi->next_bridge)) 850 - return dev_err_probe(dpi->dev, PTR_ERR(dpi->next_bridge), 851 - "Failed to get bridge\n"); 852 - } 853 854 return drm_bridge_attach(encoder, dpi->next_bridge, 855 &dpi->bridge, flags); ··· 1304 dpi->irq = platform_get_irq(pdev, 0); 1305 if (dpi->irq < 0) 1306 return dpi->irq; 1307 1308 platform_set_drvdata(pdev, dpi); 1309
··· 836 enum drm_bridge_attach_flags flags) 837 { 838 struct mtk_dpi *dpi = bridge_to_dpi(bridge); 839 840 return drm_bridge_attach(encoder, dpi->next_bridge, 841 &dpi->bridge, flags); ··· 1318 dpi->irq = platform_get_irq(pdev, 0); 1319 if (dpi->irq < 0) 1320 return dpi->irq; 1321 + 1322 + dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 1, -1); 1323 + if (IS_ERR(dpi->next_bridge) && PTR_ERR(dpi->next_bridge) == -ENODEV) { 1324 + /* Old devicetree has only one endpoint */ 1325 + dpi->next_bridge = devm_drm_of_get_bridge(dpi->dev, dpi->dev->of_node, 0, 0); 1326 + } 1327 + if (IS_ERR(dpi->next_bridge)) 1328 + return dev_err_probe(dpi->dev, PTR_ERR(dpi->next_bridge), 1329 + "Failed to get bridge\n"); 1330 1331 platform_set_drvdata(pdev, dpi); 1332
+103 -161
drivers/gpu/drm/mediatek/mtk_gem.c
··· 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015 MediaTek Inc. 4 */ 5 6 #include <linux/dma-buf.h> ··· 20 21 static int mtk_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 22 23 - static const struct vm_operations_struct vm_ops = { 24 - .open = drm_gem_vm_open, 25 - .close = drm_gem_vm_close, 26 - }; 27 28 static const struct drm_gem_object_funcs mtk_gem_object_funcs = { 29 .free = mtk_gem_free_object, 30 .get_sg_table = mtk_gem_prime_get_sg_table, 31 - .vmap = mtk_gem_prime_vmap, 32 - .vunmap = mtk_gem_prime_vunmap, 33 .mmap = mtk_gem_object_mmap, 34 - .vm_ops = &vm_ops, 35 }; 36 37 - static struct mtk_gem_obj *mtk_gem_init(struct drm_device *dev, 38 - unsigned long size) 39 { 40 - struct mtk_gem_obj *mtk_gem_obj; 41 int ret; 42 43 size = round_up(size, PAGE_SIZE); ··· 85 if (size == 0) 86 return ERR_PTR(-EINVAL); 87 88 - mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL); 89 - if (!mtk_gem_obj) 90 return ERR_PTR(-ENOMEM); 91 92 - mtk_gem_obj->base.funcs = &mtk_gem_object_funcs; 93 94 - ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size); 95 - if (ret < 0) { 96 DRM_ERROR("failed to initialize gem object\n"); 97 - kfree(mtk_gem_obj); 98 return ERR_PTR(ret); 99 } 100 101 - return mtk_gem_obj; 102 } 103 104 - struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, 105 - size_t size, bool alloc_kmap) 106 { 107 struct mtk_drm_private *priv = dev->dev_private; 108 - struct mtk_gem_obj *mtk_gem; 109 struct drm_gem_object *obj; 110 int ret; 111 112 - mtk_gem = mtk_gem_init(dev, size); 113 - if (IS_ERR(mtk_gem)) 114 - return ERR_CAST(mtk_gem); 115 116 - obj = &mtk_gem->base; 117 118 - mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE; 119 - 120 - if (!alloc_kmap) 121 - mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 122 - 123 - mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size, 124 - &mtk_gem->dma_addr, GFP_KERNEL, 125 - mtk_gem->dma_attrs); 126 - if (!mtk_gem->cookie) { 127 DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size); 128 ret = -ENOMEM; 129 goto err_gem_free; 130 } 131 132 - if (alloc_kmap) 133 - mtk_gem->kvaddr = mtk_gem->cookie; 134 - 135 - DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n", 136 - mtk_gem->cookie, &mtk_gem->dma_addr, 137 size); 138 139 - return mtk_gem; 140 141 err_gem_free: 142 drm_gem_object_release(obj); 143 - kfree(mtk_gem); 144 return ERR_PTR(ret); 145 - } 146 - 147 - void mtk_gem_free_object(struct drm_gem_object *obj) 148 - { 149 - struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 150 - struct mtk_drm_private *priv = obj->dev->dev_private; 151 - 152 - if (mtk_gem->sg) 153 - drm_prime_gem_destroy(obj, mtk_gem->sg); 154 - else 155 - dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie, 156 - mtk_gem->dma_addr, mtk_gem->dma_attrs); 157 - 158 - /* release file pointer to gem object. */ 159 - drm_gem_object_release(obj); 160 - 161 - kfree(mtk_gem); 162 } 163 164 int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, 165 struct drm_mode_create_dumb *args) 166 { 167 - struct mtk_gem_obj *mtk_gem; 168 int ret; 169 170 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); ··· 156 args->size = args->pitch; 157 args->size *= args->height; 158 159 - mtk_gem = mtk_gem_create(dev, args->size, false); 160 - if (IS_ERR(mtk_gem)) 161 - return PTR_ERR(mtk_gem); 162 163 /* 164 * allocate a id of idr table where the obj is registered 165 * and handle has the id what user can see. 166 */ 167 - ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle); 168 if (ret) 169 goto err_handle_create; 170 171 /* drop reference from allocate - handle holds it now. */ 172 - drm_gem_object_put(&mtk_gem->base); 173 174 return 0; 175 176 err_handle_create: 177 - mtk_gem_free_object(&mtk_gem->base); 178 return ret; 179 } 180 ··· 182 struct vm_area_struct *vma) 183 184 { 185 - int ret; 186 - struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 187 struct mtk_drm_private *priv = obj->dev->dev_private; 188 189 /* 190 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the 191 * whole buffer from the start. 192 */ 193 - vma->vm_pgoff = 0; 194 195 /* 196 * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear 197 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). 198 */ 199 - vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); 200 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 201 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 202 203 - ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie, 204 - mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs); 205 206 return ret; 207 } 208 209 - /* 210 - * Allocate a sg_table for this GEM object. 211 - * Note: Both the table's contents, and the sg_table itself must be freed by 212 - * the caller. 213 - * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. 214 - */ 215 - struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj) 216 - { 217 - struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 218 - struct mtk_drm_private *priv = obj->dev->dev_private; 219 - struct sg_table *sgt; 220 - int ret; 221 - 222 - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 223 - if (!sgt) 224 - return ERR_PTR(-ENOMEM); 225 - 226 - ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie, 227 - mtk_gem->dma_addr, obj->size, 228 - mtk_gem->dma_attrs); 229 - if (ret) { 230 - DRM_ERROR("failed to allocate sgt, %d\n", ret); 231 - kfree(sgt); 232 - return ERR_PTR(ret); 233 - } 234 - 235 - return sgt; 236 - } 237 - 238 struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, 239 - struct dma_buf_attachment *attach, struct sg_table *sg) 240 { 241 - struct mtk_gem_obj *mtk_gem; 242 243 /* check if the entries in the sg_table are contiguous */ 244 - if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { 245 DRM_ERROR("sg_table is not contiguous"); 246 return ERR_PTR(-EINVAL); 247 } 248 249 - mtk_gem = mtk_gem_init(dev, attach->dmabuf->size); 250 - if (IS_ERR(mtk_gem)) 251 - return ERR_CAST(mtk_gem); 252 253 - mtk_gem->dma_addr = sg_dma_address(sg->sgl); 254 - mtk_gem->sg = sg; 255 256 - return &mtk_gem->base; 257 - } 258 - 259 - int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) 260 - { 261 - struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 262 - struct sg_table *sgt = NULL; 263 - unsigned int npages; 264 - 265 - if (mtk_gem->kvaddr) 266 - goto out; 267 - 268 - sgt = mtk_gem_prime_get_sg_table(obj); 269 - if (IS_ERR(sgt)) 270 - return PTR_ERR(sgt); 271 - 272 - npages = obj->size >> PAGE_SHIFT; 273 - mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); 274 - if (!mtk_gem->pages) { 275 - sg_free_table(sgt); 276 - kfree(sgt); 277 - return -ENOMEM; 278 - } 279 - 280 - drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages); 281 - 282 - mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, 283 - pgprot_writecombine(PAGE_KERNEL)); 284 - if (!mtk_gem->kvaddr) { 285 - sg_free_table(sgt); 286 - kfree(sgt); 287 - kfree(mtk_gem->pages); 288 - return -ENOMEM; 289 - } 290 - sg_free_table(sgt); 291 - kfree(sgt); 292 - 293 - out: 294 - iosys_map_set_vaddr(map, mtk_gem->kvaddr); 295 - 296 - return 0; 297 - } 298 - 299 - void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) 300 - { 301 - struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); 302 - void *vaddr = map->vaddr; 303 - 304 - if (!mtk_gem->pages) 305 - return; 306 - 307 - vunmap(vaddr); 308 - mtk_gem->kvaddr = NULL; 309 - kfree(mtk_gem->pages); 310 }
··· 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015 MediaTek Inc. 4 + * Copyright (c) 2025 Collabora Ltd. 5 + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> 6 */ 7 8 #include <linux/dma-buf.h> ··· 18 19 static int mtk_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 20 21 + static void mtk_gem_free_object(struct drm_gem_object *obj) 22 + { 23 + struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj); 24 + struct mtk_drm_private *priv = obj->dev->dev_private; 25 + 26 + if (dma_obj->sgt) 27 + drm_prime_gem_destroy(obj, dma_obj->sgt); 28 + else 29 + dma_free_wc(priv->dma_dev, dma_obj->base.size, 30 + dma_obj->vaddr, dma_obj->dma_addr); 31 + 32 + /* release file pointer to gem object. */ 33 + drm_gem_object_release(obj); 34 + 35 + kfree(dma_obj); 36 + } 37 + 38 + /* 39 + * Allocate a sg_table for this GEM object. 40 + * Note: Both the table's contents, and the sg_table itself must be freed by 41 + * the caller. 42 + * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. 43 + */ 44 + static struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj) 45 + { 46 + struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj); 47 + struct mtk_drm_private *priv = obj->dev->dev_private; 48 + struct sg_table *sgt; 49 + int ret; 50 + 51 + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 52 + if (!sgt) 53 + return ERR_PTR(-ENOMEM); 54 + 55 + ret = dma_get_sgtable(priv->dma_dev, sgt, dma_obj->vaddr, 56 + dma_obj->dma_addr, obj->size); 57 + if (ret) { 58 + DRM_ERROR("failed to allocate sgt, %d\n", ret); 59 + kfree(sgt); 60 + return ERR_PTR(ret); 61 + } 62 + 63 + return sgt; 64 + } 65 66 static const struct drm_gem_object_funcs mtk_gem_object_funcs = { 67 .free = mtk_gem_free_object, 68 + .print_info = drm_gem_dma_object_print_info, 69 .get_sg_table = mtk_gem_prime_get_sg_table, 70 + .vmap = drm_gem_dma_object_vmap, 71 .mmap = mtk_gem_object_mmap, 72 + .vm_ops = &drm_gem_dma_vm_ops, 73 }; 74 75 + static struct drm_gem_dma_object *mtk_gem_init(struct drm_device *dev, 76 + unsigned long size, bool private) 77 { 78 + struct drm_gem_dma_object *dma_obj; 79 int ret; 80 81 size = round_up(size, PAGE_SIZE); ··· 43 if (size == 0) 44 return ERR_PTR(-EINVAL); 45 46 + dma_obj = kzalloc(sizeof(*dma_obj), GFP_KERNEL); 47 + if (!dma_obj) 48 return ERR_PTR(-ENOMEM); 49 50 + dma_obj->base.funcs = &mtk_gem_object_funcs; 51 52 + if (private) { 53 + ret = 0; 54 + drm_gem_private_object_init(dev, &dma_obj->base, size); 55 + } else { 56 + ret = drm_gem_object_init(dev, &dma_obj->base, size); 57 + } 58 + if (ret) { 59 DRM_ERROR("failed to initialize gem object\n"); 60 + kfree(dma_obj); 61 return ERR_PTR(ret); 62 } 63 64 + return dma_obj; 65 } 66 67 + static struct drm_gem_dma_object *mtk_gem_create(struct drm_device *dev, size_t size) 68 { 69 struct mtk_drm_private *priv = dev->dev_private; 70 + struct drm_gem_dma_object *dma_obj; 71 struct drm_gem_object *obj; 72 int ret; 73 74 + dma_obj = mtk_gem_init(dev, size, false); 75 + if (IS_ERR(dma_obj)) 76 + return ERR_CAST(dma_obj); 77 78 + obj = &dma_obj->base; 79 80 + dma_obj->vaddr = dma_alloc_wc(priv->dma_dev, obj->size, 81 + &dma_obj->dma_addr, 82 + GFP_KERNEL | __GFP_NOWARN); 83 + if (!dma_obj->vaddr) { 84 DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size); 85 ret = -ENOMEM; 86 goto err_gem_free; 87 } 88 89 + DRM_DEBUG_DRIVER("vaddr = %p dma_addr = %pad size = %zu\n", 90 + dma_obj->vaddr, &dma_obj->dma_addr, 91 size); 92 93 + return dma_obj; 94 95 err_gem_free: 96 drm_gem_object_release(obj); 97 + kfree(dma_obj); 98 return ERR_PTR(ret); 99 } 100 101 int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, 102 struct drm_mode_create_dumb *args) 103 { 104 + struct drm_gem_dma_object *dma_obj; 105 int ret; 106 107 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); ··· 135 args->size = args->pitch; 136 args->size *= args->height; 137 138 + dma_obj = mtk_gem_create(dev, args->size); 139 + if (IS_ERR(dma_obj)) 140 + return PTR_ERR(dma_obj); 141 142 /* 143 * allocate a id of idr table where the obj is registered 144 * and handle has the id what user can see. 145 */ 146 + ret = drm_gem_handle_create(file_priv, &dma_obj->base, &args->handle); 147 if (ret) 148 goto err_handle_create; 149 150 /* drop reference from allocate - handle holds it now. */ 151 + drm_gem_object_put(&dma_obj->base); 152 153 return 0; 154 155 err_handle_create: 156 + mtk_gem_free_object(&dma_obj->base); 157 return ret; 158 } 159 ··· 161 struct vm_area_struct *vma) 162 163 { 164 + struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj); 165 struct mtk_drm_private *priv = obj->dev->dev_private; 166 + int ret; 167 168 /* 169 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the 170 * whole buffer from the start. 171 */ 172 + vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); 173 174 /* 175 * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear 176 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). 177 */ 178 + vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP); 179 + 180 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 181 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 182 183 + ret = dma_mmap_wc(priv->dma_dev, vma, dma_obj->vaddr, 184 + dma_obj->dma_addr, obj->size); 185 + if (ret) 186 + drm_gem_vm_close(vma); 187 188 return ret; 189 } 190 191 struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, 192 + struct dma_buf_attachment *attach, struct sg_table *sgt) 193 { 194 + struct drm_gem_dma_object *dma_obj; 195 196 /* check if the entries in the sg_table are contiguous */ 197 + if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) { 198 DRM_ERROR("sg_table is not contiguous"); 199 return ERR_PTR(-EINVAL); 200 } 201 202 + dma_obj = mtk_gem_init(dev, attach->dmabuf->size, true); 203 + if (IS_ERR(dma_obj)) 204 + return ERR_CAST(dma_obj); 205 206 + dma_obj->dma_addr = sg_dma_address(sgt->sgl); 207 + dma_obj->sgt = sgt; 208 209 + return &dma_obj->base; 210 }
+1 -32
drivers/gpu/drm/mediatek/mtk_gem.h
··· 7 #define _MTK_GEM_H_ 8 9 #include <drm/drm_gem.h> 10 11 - /* 12 - * mtk drm buffer structure. 13 - * 14 - * @base: a gem object. 15 - * - a new handle to this gem object would be created 16 - * by drm_gem_handle_create(). 17 - * @cookie: the return value of dma_alloc_attrs(), keep it for dma_free_attrs() 18 - * @kvaddr: kernel virtual address of gem buffer. 19 - * @dma_addr: dma address of gem buffer. 20 - * @dma_attrs: dma attributes of gem buffer. 21 - * 22 - * P.S. this object would be transferred to user as kms_bo.handle so 23 - * user can access the buffer through kms_bo.handle. 24 - */ 25 - struct mtk_gem_obj { 26 - struct drm_gem_object base; 27 - void *cookie; 28 - void *kvaddr; 29 - dma_addr_t dma_addr; 30 - unsigned long dma_attrs; 31 - struct sg_table *sg; 32 - struct page **pages; 33 - }; 34 - 35 - #define to_mtk_gem_obj(x) container_of(x, struct mtk_gem_obj, base) 36 - 37 - void mtk_gem_free_object(struct drm_gem_object *gem); 38 - struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, size_t size, 39 - bool alloc_kmap); 40 int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, 41 struct drm_mode_create_dumb *args); 42 - struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); 43 struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, 44 struct dma_buf_attachment *attach, struct sg_table *sg); 45 - int mtk_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); 46 - void mtk_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); 47 48 #endif
··· 7 #define _MTK_GEM_H_ 8 9 #include <drm/drm_gem.h> 10 + #include <drm/drm_gem_dma_helper.h> 11 12 int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, 13 struct drm_mode_create_dumb *args); 14 struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, 15 struct dma_buf_attachment *attach, struct sg_table *sg); 16 17 #endif
+1 -1
drivers/gpu/drm/mediatek/mtk_hdmi_common.c
··· 303 return dev_err_probe(dev, ret, "Failed to get clocks\n"); 304 305 hdmi->irq = platform_get_irq(pdev, 0); 306 - if (!hdmi->irq) 307 return hdmi->irq; 308 309 hdmi->regs = device_node_to_regmap(dev->of_node);
··· 303 return dev_err_probe(dev, ret, "Failed to get clocks\n"); 304 305 hdmi->irq = platform_get_irq(pdev, 0); 306 + if (hdmi->irq < 0) 307 return hdmi->irq; 308 309 hdmi->regs = device_node_to_regmap(dev->of_node);
+1 -1
drivers/gpu/drm/mediatek/mtk_hdmi_common.h
··· 168 bool audio_enable; 169 bool powered; 170 bool enabled; 171 - unsigned int irq; 172 enum hdmi_hpd_state hpd; 173 hdmi_codec_plugged_cb plugged_cb; 174 struct device *codec_dev;
··· 168 bool audio_enable; 169 bool powered; 170 bool enabled; 171 + int irq; 172 enum hdmi_hpd_state hpd; 173 hdmi_codec_plugged_cb plugged_cb; 174 struct device *codec_dev;
+33 -25
drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c
··· 66 return 0; 67 } 68 69 - static int mtk_ddc_wr_one(struct mtk_hdmi_ddc *ddc, u16 addr_id, 70 - u16 offset_id, u8 *wr_data) 71 { 72 u32 val; 73 - int ret; 74 75 /* If down, rise bus for write operation */ 76 mtk_ddc_check_and_rise_low_bus(ddc); ··· 86 regmap_update_bits(ddc->regs, HPD_DDC_CTRL, HPD_DDC_DELAY_CNT, 87 FIELD_PREP(HPD_DDC_DELAY_CNT, DDC2_DLY_CNT)); 88 89 if (wr_data) { 90 - regmap_write(ddc->regs, SI2C_CTRL, 91 - FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) | 92 - FIELD_PREP(SI2C_WDATA, *wr_data) | 93 - SI2C_WR); 94 } 95 - 96 regmap_write(ddc->regs, DDC_CTRL, 97 FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_SEQ_WRITE) | 98 - FIELD_PREP(DDC_CTRL_DIN_CNT, wr_data == NULL ? 0 : 1) | 99 FIELD_PREP(DDC_CTRL_OFFSET, offset_id) | 100 FIELD_PREP(DDC_CTRL_ADDR, addr_id)); 101 usleep_range(1000, 1250); ··· 109 !(val & DDC_I2C_IN_PROG), 500, 1000); 110 if (ret) { 111 dev_err(ddc->dev, "DDC I2C write timeout\n"); 112 return ret; 113 } 114 ··· 197 500 * (temp_length + 5)); 198 if (ret) { 199 dev_err(ddc->dev, "Timeout waiting for DDC I2C\n"); 200 return ret; 201 } 202 ··· 273 static int mtk_hdmi_ddc_fg_data_write(struct mtk_hdmi_ddc *ddc, u16 b_dev, 274 u8 data_addr, u16 data_cnt, u8 *pr_data) 275 { 276 - int i, ret; 277 - 278 regmap_set_bits(ddc->regs, HDCP2X_POL_CTRL, HDCP2X_DIS_POLL_EN); 279 - /* 280 - * In case there is no payload data, just do a single write for the 281 - * address only 282 - */ 283 - if (data_cnt == 0) 284 - return mtk_ddc_wr_one(ddc, b_dev, data_addr, NULL); 285 286 - i = 0; 287 - do { 288 - ret = mtk_ddc_wr_one(ddc, b_dev, data_addr + i, pr_data + i); 289 - if (ret) 290 - return ret; 291 - } while (++i < data_cnt); 292 - 293 - return 0; 294 } 295 296 static int mtk_hdmi_ddc_v2_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
··· 66 return 0; 67 } 68 69 + static int mtk_ddcm_write_hdmi(struct mtk_hdmi_ddc *ddc, u16 addr_id, 70 + u16 offset_id, u16 data_cnt, u8 *wr_data) 71 { 72 u32 val; 73 + int ret, i; 74 + 75 + /* Don't allow transfer with a size over than the transfer fifo size 76 + * (16 byte) 77 + */ 78 + if (data_cnt > 16) { 79 + dev_err(ddc->dev, "Invalid DDCM write request\n"); 80 + return -EINVAL; 81 + } 82 83 /* If down, rise bus for write operation */ 84 mtk_ddc_check_and_rise_low_bus(ddc); ··· 78 regmap_update_bits(ddc->regs, HPD_DDC_CTRL, HPD_DDC_DELAY_CNT, 79 FIELD_PREP(HPD_DDC_DELAY_CNT, DDC2_DLY_CNT)); 80 81 + /* In case there is no payload data, just do a single write for the 82 + * address only 83 + */ 84 if (wr_data) { 85 + /* Fill transfer fifo with payload data */ 86 + for (i = 0; i < data_cnt; i++) { 87 + regmap_write(ddc->regs, SI2C_CTRL, 88 + FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) | 89 + FIELD_PREP(SI2C_WDATA, wr_data[i]) | 90 + SI2C_WR); 91 + } 92 } 93 regmap_write(ddc->regs, DDC_CTRL, 94 FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_SEQ_WRITE) | 95 + FIELD_PREP(DDC_CTRL_DIN_CNT, wr_data == NULL ? 0 : data_cnt) | 96 FIELD_PREP(DDC_CTRL_OFFSET, offset_id) | 97 FIELD_PREP(DDC_CTRL_ADDR, addr_id)); 98 usleep_range(1000, 1250); ··· 96 !(val & DDC_I2C_IN_PROG), 500, 1000); 97 if (ret) { 98 dev_err(ddc->dev, "DDC I2C write timeout\n"); 99 + 100 + /* Abort transfer if it is still in progress */ 101 + regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD, 102 + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_ABORT_XFER)); 103 + 104 return ret; 105 } 106 ··· 179 500 * (temp_length + 5)); 180 if (ret) { 181 dev_err(ddc->dev, "Timeout waiting for DDC I2C\n"); 182 + 183 + /* Abort transfer if it is still in progress */ 184 + regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD, 185 + FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_ABORT_XFER)); 186 + 187 return ret; 188 } 189 ··· 250 static int mtk_hdmi_ddc_fg_data_write(struct mtk_hdmi_ddc *ddc, u16 b_dev, 251 u8 data_addr, u16 data_cnt, u8 *pr_data) 252 { 253 regmap_set_bits(ddc->regs, HDCP2X_POL_CTRL, HDCP2X_DIS_POLL_EN); 254 255 + return mtk_ddcm_write_hdmi(ddc, b_dev, data_addr, data_cnt, pr_data); 256 } 257 258 static int mtk_hdmi_ddc_v2_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
+4 -3
drivers/gpu/drm/mediatek/mtk_hdmi_v2.c
··· 1120 mtk_hdmi_v2_disable(hdmi); 1121 } 1122 1123 - static int mtk_hdmi_v2_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge, 1124 - const struct drm_display_mode *mode, 1125 - unsigned long long tmds_rate) 1126 { 1127 if (mode->clock < MTK_HDMI_V2_CLOCK_MIN) 1128 return MODE_CLOCK_LOW;
··· 1120 mtk_hdmi_v2_disable(hdmi); 1121 } 1122 1123 + static enum drm_mode_status 1124 + mtk_hdmi_v2_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge, 1125 + const struct drm_display_mode *mode, 1126 + unsigned long long tmds_rate) 1127 { 1128 if (mode->clock < MTK_HDMI_V2_CLOCK_MIN) 1129 return MODE_CLOCK_LOW;
+4 -4
drivers/gpu/drm/mediatek/mtk_plane.c
··· 11 #include <drm/drm_fourcc.h> 12 #include <drm/drm_framebuffer.h> 13 #include <drm/drm_gem_atomic_helper.h> 14 #include <drm/drm_print.h> 15 #include <linux/align.h> 16 17 #include "mtk_crtc.h" 18 #include "mtk_ddp_comp.h" 19 #include "mtk_drm_drv.h" 20 - #include "mtk_gem.h" 21 #include "mtk_plane.h" 22 23 static const u64 modifiers[] = { ··· 114 struct mtk_plane_state *mtk_plane_state) 115 { 116 struct drm_framebuffer *fb = new_state->fb; 117 struct drm_gem_object *gem; 118 - struct mtk_gem_obj *mtk_gem; 119 unsigned int pitch, format; 120 u64 modifier; 121 dma_addr_t addr; ··· 124 int offset; 125 126 gem = fb->obj[0]; 127 - mtk_gem = to_mtk_gem_obj(gem); 128 - addr = mtk_gem->dma_addr; 129 pitch = fb->pitches[0]; 130 format = fb->format->format; 131 modifier = fb->modifier;
··· 11 #include <drm/drm_fourcc.h> 12 #include <drm/drm_framebuffer.h> 13 #include <drm/drm_gem_atomic_helper.h> 14 + #include <drm/drm_gem_dma_helper.h> 15 #include <drm/drm_print.h> 16 #include <linux/align.h> 17 18 #include "mtk_crtc.h" 19 #include "mtk_ddp_comp.h" 20 #include "mtk_drm_drv.h" 21 #include "mtk_plane.h" 22 23 static const u64 modifiers[] = { ··· 114 struct mtk_plane_state *mtk_plane_state) 115 { 116 struct drm_framebuffer *fb = new_state->fb; 117 + struct drm_gem_dma_object *dma_obj; 118 struct drm_gem_object *gem; 119 unsigned int pitch, format; 120 u64 modifier; 121 dma_addr_t addr; ··· 124 int offset; 125 126 gem = fb->obj[0]; 127 + dma_obj = to_drm_gem_dma_obj(gem); 128 + addr = dma_obj->dma_addr; 129 pitch = fb->pitches[0]; 130 format = fb->format->format; 131 modifier = fb->modifier;
+74 -21
drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
··· 1 /* SPDX-License-Identifier: MIT */ 2 #ifndef __NVBIOS_CONN_H__ 3 #define __NVBIOS_CONN_H__ 4 enum dcb_connector_type { 5 - DCB_CONNECTOR_VGA = 0x00, 6 - DCB_CONNECTOR_TV_0 = 0x10, 7 - DCB_CONNECTOR_TV_1 = 0x11, 8 - DCB_CONNECTOR_TV_3 = 0x13, 9 - DCB_CONNECTOR_DVI_I = 0x30, 10 - DCB_CONNECTOR_DVI_D = 0x31, 11 - DCB_CONNECTOR_DMS59_0 = 0x38, 12 - DCB_CONNECTOR_DMS59_1 = 0x39, 13 - DCB_CONNECTOR_LVDS = 0x40, 14 - DCB_CONNECTOR_LVDS_SPWG = 0x41, 15 - DCB_CONNECTOR_DP = 0x46, 16 - DCB_CONNECTOR_eDP = 0x47, 17 - DCB_CONNECTOR_mDP = 0x48, 18 - DCB_CONNECTOR_HDMI_0 = 0x60, 19 - DCB_CONNECTOR_HDMI_1 = 0x61, 20 - DCB_CONNECTOR_HDMI_C = 0x63, 21 - DCB_CONNECTOR_DMS59_DP0 = 0x64, 22 - DCB_CONNECTOR_DMS59_DP1 = 0x65, 23 - DCB_CONNECTOR_WFD = 0x70, 24 - DCB_CONNECTOR_USB_C = 0x71, 25 - DCB_CONNECTOR_NONE = 0xff 26 }; 27 28 struct nvbios_connT {
··· 1 /* SPDX-License-Identifier: MIT */ 2 #ifndef __NVBIOS_CONN_H__ 3 #define __NVBIOS_CONN_H__ 4 + 5 + /* 6 + * An enumerator representing all of the possible VBIOS connector types defined 7 + * by Nvidia at 8 + * https://nvidia.github.io/open-gpu-doc/DCB/DCB-4.x-Specification.html. 9 + * 10 + * [1] Nvidia's documentation actually claims DCB_CONNECTOR_HDMI_0 is a "3-Pin 11 + * DIN Stereo Connector". This seems very likely to be a documentation typo 12 + * or some sort of funny historical baggage, because we've treated this 13 + * connector type as HDMI for years without issue. 14 + * TODO: Check with Nvidia what's actually happening here. 15 + */ 16 enum dcb_connector_type { 17 + /* Analog outputs */ 18 + DCB_CONNECTOR_VGA = 0x00, // VGA 15-pin connector 19 + DCB_CONNECTOR_DVI_A = 0x01, // DVI-A 20 + DCB_CONNECTOR_POD_VGA = 0x02, // Pod - VGA 15-pin connector 21 + DCB_CONNECTOR_TV_0 = 0x10, // TV - Composite Out 22 + DCB_CONNECTOR_TV_1 = 0x11, // TV - S-Video Out 23 + DCB_CONNECTOR_TV_2 = 0x12, // TV - S-Video Breakout - Composite 24 + DCB_CONNECTOR_TV_3 = 0x13, // HDTV Component - YPrPb 25 + DCB_CONNECTOR_TV_SCART = 0x14, // TV - SCART Connector 26 + DCB_CONNECTOR_TV_SCART_D = 0x16, // TV - Composite SCART over D-connector 27 + DCB_CONNECTOR_TV_DTERM = 0x17, // HDTV - D-connector (EIAJ4120) 28 + DCB_CONNECTOR_POD_TV_3 = 0x18, // Pod - HDTV - YPrPb 29 + DCB_CONNECTOR_POD_TV_1 = 0x19, // Pod - S-Video 30 + DCB_CONNECTOR_POD_TV_0 = 0x1a, // Pod - Composite 31 + 32 + /* DVI digital outputs */ 33 + DCB_CONNECTOR_DVI_I_TV_1 = 0x20, // DVI-I-TV-S-Video 34 + DCB_CONNECTOR_DVI_I_TV_0 = 0x21, // DVI-I-TV-Composite 35 + DCB_CONNECTOR_DVI_I_TV_2 = 0x22, // DVI-I-TV-S-Video Breakout-Composite 36 + DCB_CONNECTOR_DVI_I = 0x30, // DVI-I 37 + DCB_CONNECTOR_DVI_D = 0x31, // DVI-D 38 + DCB_CONNECTOR_DVI_ADC = 0x32, // Apple Display Connector (ADC) 39 + DCB_CONNECTOR_DMS59_0 = 0x38, // LFH-DVI-I-1 40 + DCB_CONNECTOR_DMS59_1 = 0x39, // LFH-DVI-I-2 41 + DCB_CONNECTOR_BNC = 0x3c, // BNC Connector [for SDI?] 42 + 43 + /* LVDS / TMDS digital outputs */ 44 + DCB_CONNECTOR_LVDS = 0x40, // LVDS-SPWG-Attached [is this name correct?] 45 + DCB_CONNECTOR_LVDS_SPWG = 0x41, // LVDS-OEM-Attached (non-removable) 46 + DCB_CONNECTOR_LVDS_REM = 0x42, // LVDS-SPWG-Detached [following naming above] 47 + DCB_CONNECTOR_LVDS_SPWG_REM = 0x43, // LVDS-OEM-Detached (removable) 48 + DCB_CONNECTOR_TMDS = 0x45, // TMDS-OEM-Attached (non-removable) 49 + 50 + /* DP digital outputs */ 51 + DCB_CONNECTOR_DP = 0x46, // DisplayPort External Connector 52 + DCB_CONNECTOR_eDP = 0x47, // DisplayPort Internal Connector 53 + DCB_CONNECTOR_mDP = 0x48, // DisplayPort (Mini) External Connector 54 + 55 + /* Dock outputs (not used) */ 56 + DCB_CONNECTOR_DOCK_VGA_0 = 0x50, // VGA 15-pin if not docked 57 + DCB_CONNECTOR_DOCK_VGA_1 = 0x51, // VGA 15-pin if docked 58 + DCB_CONNECTOR_DOCK_DVI_I_0 = 0x52, // DVI-I if not docked 59 + DCB_CONNECTOR_DOCK_DVI_I_1 = 0x53, // DVI-I if docked 60 + DCB_CONNECTOR_DOCK_DVI_D_0 = 0x54, // DVI-D if not docked 61 + DCB_CONNECTOR_DOCK_DVI_D_1 = 0x55, // DVI-D if docked 62 + DCB_CONNECTOR_DOCK_DP_0 = 0x56, // DisplayPort if not docked 63 + DCB_CONNECTOR_DOCK_DP_1 = 0x57, // DisplayPort if docked 64 + DCB_CONNECTOR_DOCK_mDP_0 = 0x58, // DisplayPort (Mini) if not docked 65 + DCB_CONNECTOR_DOCK_mDP_1 = 0x59, // DisplayPort (Mini) if docked 66 + 67 + /* HDMI? digital outputs */ 68 + DCB_CONNECTOR_HDMI_0 = 0x60, // HDMI? See [1] in top-level enum comment above 69 + DCB_CONNECTOR_HDMI_1 = 0x61, // HDMI-A connector 70 + DCB_CONNECTOR_SPDIF = 0x62, // Audio S/PDIF connector 71 + DCB_CONNECTOR_HDMI_C = 0x63, // HDMI-C (Mini) connector 72 + 73 + /* Misc. digital outputs */ 74 + DCB_CONNECTOR_DMS59_DP0 = 0x64, // LFH-DP-1 75 + DCB_CONNECTOR_DMS59_DP1 = 0x65, // LFH-DP-2 76 + DCB_CONNECTOR_WFD = 0x70, // Virtual connector for Wifi Display (WFD) 77 + DCB_CONNECTOR_USB_C = 0x71, // [DP over USB-C; not present in docs] 78 + DCB_CONNECTOR_NONE = 0xff // Skip Entry 79 }; 80 81 struct nvbios_connT {
+2
drivers/gpu/drm/nouveau/nouveau_display.c
··· 352 353 static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 354 .fb_create = nouveau_user_framebuffer_create, 355 }; 356 357
··· 352 353 static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 354 .fb_create = nouveau_user_framebuffer_create, 355 + .atomic_commit = drm_atomic_helper_commit, 356 + .atomic_check = drm_atomic_helper_check, 357 }; 358 359
+53 -20
drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
··· 191 spin_lock(&disp->client.lock); 192 if (!conn->object.func) { 193 switch (conn->info.type) { 194 - case DCB_CONNECTOR_VGA : args->v0.type = NVIF_CONN_V0_VGA; break; 195 - case DCB_CONNECTOR_TV_0 : 196 - case DCB_CONNECTOR_TV_1 : 197 - case DCB_CONNECTOR_TV_3 : args->v0.type = NVIF_CONN_V0_TV; break; 198 - case DCB_CONNECTOR_DMS59_0 : 199 - case DCB_CONNECTOR_DMS59_1 : 200 - case DCB_CONNECTOR_DVI_I : args->v0.type = NVIF_CONN_V0_DVI_I; break; 201 - case DCB_CONNECTOR_DVI_D : args->v0.type = NVIF_CONN_V0_DVI_D; break; 202 - case DCB_CONNECTOR_LVDS : args->v0.type = NVIF_CONN_V0_LVDS; break; 203 - case DCB_CONNECTOR_LVDS_SPWG: args->v0.type = NVIF_CONN_V0_LVDS_SPWG; break; 204 - case DCB_CONNECTOR_DMS59_DP0: 205 - case DCB_CONNECTOR_DMS59_DP1: 206 - case DCB_CONNECTOR_DP : 207 - case DCB_CONNECTOR_mDP : 208 - case DCB_CONNECTOR_USB_C : args->v0.type = NVIF_CONN_V0_DP; break; 209 - case DCB_CONNECTOR_eDP : args->v0.type = NVIF_CONN_V0_EDP; break; 210 - case DCB_CONNECTOR_HDMI_0 : 211 - case DCB_CONNECTOR_HDMI_1 : 212 - case DCB_CONNECTOR_HDMI_C : args->v0.type = NVIF_CONN_V0_HDMI; break; 213 default: 214 - WARN_ON(1); 215 ret = -EINVAL; 216 break; 217 }
··· 191 spin_lock(&disp->client.lock); 192 if (!conn->object.func) { 193 switch (conn->info.type) { 194 + /* VGA */ 195 + case DCB_CONNECTOR_DVI_A : 196 + case DCB_CONNECTOR_POD_VGA : 197 + case DCB_CONNECTOR_VGA : args->v0.type = NVIF_CONN_V0_VGA; break; 198 + 199 + /* TV */ 200 + case DCB_CONNECTOR_TV_0 : 201 + case DCB_CONNECTOR_TV_1 : 202 + case DCB_CONNECTOR_TV_2 : 203 + case DCB_CONNECTOR_TV_SCART : 204 + case DCB_CONNECTOR_TV_SCART_D : 205 + case DCB_CONNECTOR_TV_DTERM : 206 + case DCB_CONNECTOR_POD_TV_3 : 207 + case DCB_CONNECTOR_POD_TV_1 : 208 + case DCB_CONNECTOR_POD_TV_0 : 209 + case DCB_CONNECTOR_TV_3 : args->v0.type = NVIF_CONN_V0_TV; break; 210 + 211 + /* DVI */ 212 + case DCB_CONNECTOR_DVI_I_TV_1 : 213 + case DCB_CONNECTOR_DVI_I_TV_0 : 214 + case DCB_CONNECTOR_DVI_I_TV_2 : 215 + case DCB_CONNECTOR_DVI_ADC : 216 + case DCB_CONNECTOR_DMS59_0 : 217 + case DCB_CONNECTOR_DMS59_1 : 218 + case DCB_CONNECTOR_DVI_I : args->v0.type = NVIF_CONN_V0_DVI_I; break; 219 + case DCB_CONNECTOR_TMDS : 220 + case DCB_CONNECTOR_DVI_D : args->v0.type = NVIF_CONN_V0_DVI_D; break; 221 + 222 + /* LVDS */ 223 + case DCB_CONNECTOR_LVDS : args->v0.type = NVIF_CONN_V0_LVDS; break; 224 + case DCB_CONNECTOR_LVDS_SPWG : args->v0.type = NVIF_CONN_V0_LVDS_SPWG; break; 225 + 226 + /* DP */ 227 + case DCB_CONNECTOR_DMS59_DP0 : 228 + case DCB_CONNECTOR_DMS59_DP1 : 229 + case DCB_CONNECTOR_DP : 230 + case DCB_CONNECTOR_mDP : 231 + case DCB_CONNECTOR_USB_C : args->v0.type = NVIF_CONN_V0_DP; break; 232 + case DCB_CONNECTOR_eDP : args->v0.type = NVIF_CONN_V0_EDP; break; 233 + 234 + /* HDMI */ 235 + case DCB_CONNECTOR_HDMI_0 : 236 + case DCB_CONNECTOR_HDMI_1 : 237 + case DCB_CONNECTOR_HDMI_C : args->v0.type = NVIF_CONN_V0_HDMI; break; 238 + 239 + /* 240 + * Dock & unused outputs. 241 + * BNC, SPDIF, WFD, and detached LVDS go here. 242 + */ 243 default: 244 + nvkm_warn(&disp->engine.subdev, 245 + "unimplemented connector type 0x%02x\n", 246 + conn->info.type); 247 + args->v0.type = NVIF_CONN_V0_VGA; 248 ret = -EINVAL; 249 break; 250 }
+8 -7
drivers/gpu/drm/vkms/vkms_colorop.c
··· 37 goto cleanup; 38 39 list->type = ops[i]->base.id; 40 - list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id); 41 42 i++; 43 ··· 87 88 drm_colorop_set_next_property(ops[i - 1], ops[i]); 89 90 return 0; 91 92 cleanup: ··· 104 105 int vkms_initialize_colorops(struct drm_plane *plane) 106 { 107 - struct drm_prop_enum_list pipeline; 108 - int ret; 109 110 /* Add color pipeline */ 111 ret = vkms_initialize_color_pipeline(plane, &pipeline); 112 if (ret) 113 - return ret; 114 115 /* Create COLOR_PIPELINE property and attach */ 116 ret = drm_plane_create_color_pipeline_property(plane, &pipeline, 1); 117 - if (ret) 118 - return ret; 119 120 - return 0; 121 }
··· 37 goto cleanup; 38 39 list->type = ops[i]->base.id; 40 41 i++; 42 ··· 88 89 drm_colorop_set_next_property(ops[i - 1], ops[i]); 90 91 + list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[0]->base.id); 92 + 93 return 0; 94 95 cleanup: ··· 103 104 int vkms_initialize_colorops(struct drm_plane *plane) 105 { 106 + struct drm_prop_enum_list pipeline = {}; 107 + int ret = 0; 108 109 /* Add color pipeline */ 110 ret = vkms_initialize_color_pipeline(plane, &pipeline); 111 if (ret) 112 + goto out; 113 114 /* Create COLOR_PIPELINE property and attach */ 115 ret = drm_plane_create_color_pipeline_property(plane, &pipeline, 1); 116 117 + kfree(pipeline.name); 118 + out: 119 + return ret; 120 }
+3 -2
drivers/gpu/drm/xe/Kconfig
··· 39 select DRM_TTM 40 select DRM_TTM_HELPER 41 select DRM_EXEC 42 - select DRM_GPUSVM if !UML && DEVICE_PRIVATE 43 select DRM_GPUVM 44 select DRM_SCHED 45 select MMU_NOTIFIER ··· 80 bool "Enable CPU to GPU address mirroring" 81 depends on DRM_XE 82 depends on !UML 83 - depends on DEVICE_PRIVATE 84 default y 85 select DRM_GPUSVM 86 help 87 Enable this option if you want support for CPU to GPU address
··· 39 select DRM_TTM 40 select DRM_TTM_HELPER 41 select DRM_EXEC 42 + select DRM_GPUSVM if !UML 43 select DRM_GPUVM 44 select DRM_SCHED 45 select MMU_NOTIFIER ··· 80 bool "Enable CPU to GPU address mirroring" 81 depends on DRM_XE 82 depends on !UML 83 + depends on ZONE_DEVICE 84 default y 85 + select DEVICE_PRIVATE 86 select DRM_GPUSVM 87 help 88 Enable this option if you want support for CPU to GPU address
+7 -2
drivers/gpu/drm/xe/xe_bo.c
··· 1055 unsigned long *scanned) 1056 { 1057 struct xe_device *xe = ttm_to_xe_device(bo->bdev); 1058 long lret; 1059 1060 /* Fake move to system, without copying data. */ ··· 1080 .writeback = false, 1081 .allow_move = false}); 1082 1083 - if (lret > 0) 1084 xe_ttm_tt_account_subtract(xe, bo->ttm); 1085 1086 return lret; 1087 } ··· 1169 if (needs_rpm) 1170 xe_pm_runtime_put(xe); 1171 1172 - if (lret > 0) 1173 xe_ttm_tt_account_subtract(xe, tt); 1174 1175 out_unref: 1176 xe_bo_put(xe_bo);
··· 1055 unsigned long *scanned) 1056 { 1057 struct xe_device *xe = ttm_to_xe_device(bo->bdev); 1058 + struct ttm_tt *tt = bo->ttm; 1059 long lret; 1060 1061 /* Fake move to system, without copying data. */ ··· 1079 .writeback = false, 1080 .allow_move = false}); 1081 1082 + if (lret > 0) { 1083 xe_ttm_tt_account_subtract(xe, bo->ttm); 1084 + update_global_total_pages(bo->bdev, -(long)tt->num_pages); 1085 + } 1086 1087 return lret; 1088 } ··· 1166 if (needs_rpm) 1167 xe_pm_runtime_put(xe); 1168 1169 + if (lret > 0) { 1170 xe_ttm_tt_account_subtract(xe, tt); 1171 + update_global_total_pages(bo->bdev, -(long)tt->num_pages); 1172 + } 1173 1174 out_unref: 1175 xe_bo_put(xe_bo);
+57 -15
drivers/gpu/drm/xe/xe_debugfs.c
··· 256 return simple_read_from_buffer(ubuf, size, pos, buf, len); 257 } 258 259 static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf, 260 size_t size, loff_t *pos) 261 { 262 struct xe_device *xe = file_inode(f)->i_private; 263 - struct xe_gt *gt; 264 u32 wedged_mode; 265 ssize_t ret; 266 - u8 id; 267 268 ret = kstrtouint_from_user(ubuf, size, 0, &wedged_mode); 269 if (ret) ··· 322 if (wedged_mode > 2) 323 return -EINVAL; 324 325 - if (xe->wedged.mode == wedged_mode) 326 - return size; 327 328 xe->wedged.mode = wedged_mode; 329 - 330 - xe_pm_runtime_get(xe); 331 - for_each_gt(gt, xe, id) { 332 - ret = xe_guc_ads_scheduler_policy_toggle_reset(&gt->uc.guc.ads); 333 - if (ret) { 334 - xe_gt_err(gt, "Failed to update GuC ADS scheduler policy. GuC may still cause engine reset even with wedged_mode=2\n"); 335 - xe_pm_runtime_put(xe); 336 - return -EIO; 337 - } 338 - } 339 - xe_pm_runtime_put(xe); 340 341 return size; 342 }
··· 256 return simple_read_from_buffer(ubuf, size, pos, buf, len); 257 } 258 259 + static int __wedged_mode_set_reset_policy(struct xe_gt *gt, enum xe_wedged_mode mode) 260 + { 261 + bool enable_engine_reset; 262 + int ret; 263 + 264 + enable_engine_reset = (mode != XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET); 265 + ret = xe_guc_ads_scheduler_policy_toggle_reset(&gt->uc.guc.ads, 266 + enable_engine_reset); 267 + if (ret) 268 + xe_gt_err(gt, "Failed to update GuC ADS scheduler policy (%pe)\n", ERR_PTR(ret)); 269 + 270 + return ret; 271 + } 272 + 273 + static int wedged_mode_set_reset_policy(struct xe_device *xe, enum xe_wedged_mode mode) 274 + { 275 + struct xe_gt *gt; 276 + int ret; 277 + u8 id; 278 + 279 + guard(xe_pm_runtime)(xe); 280 + for_each_gt(gt, xe, id) { 281 + ret = __wedged_mode_set_reset_policy(gt, mode); 282 + if (ret) { 283 + if (id > 0) { 284 + xe->wedged.inconsistent_reset = true; 285 + drm_err(&xe->drm, "Inconsistent reset policy state between GTs\n"); 286 + } 287 + return ret; 288 + } 289 + } 290 + 291 + xe->wedged.inconsistent_reset = false; 292 + 293 + return 0; 294 + } 295 + 296 + static bool wedged_mode_needs_policy_update(struct xe_device *xe, enum xe_wedged_mode mode) 297 + { 298 + if (xe->wedged.inconsistent_reset) 299 + return true; 300 + 301 + if (xe->wedged.mode == mode) 302 + return false; 303 + 304 + if (xe->wedged.mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET || 305 + mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET) 306 + return true; 307 + 308 + return false; 309 + } 310 + 311 static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf, 312 size_t size, loff_t *pos) 313 { 314 struct xe_device *xe = file_inode(f)->i_private; 315 u32 wedged_mode; 316 ssize_t ret; 317 318 ret = kstrtouint_from_user(ubuf, size, 0, &wedged_mode); 319 if (ret) ··· 272 if (wedged_mode > 2) 273 return -EINVAL; 274 275 + if (wedged_mode_needs_policy_update(xe, wedged_mode)) { 276 + ret = wedged_mode_set_reset_policy(xe, wedged_mode); 277 + if (ret) 278 + return ret; 279 + } 280 281 xe->wedged.mode = wedged_mode; 282 283 return size; 284 }
+18
drivers/gpu/drm/xe/xe_device_types.h
··· 44 struct xe_pxp; 45 struct xe_vram_region; 46 47 #define XE_BO_INVALID_OFFSET LONG_MAX 48 49 #define GRAPHICS_VER(xe) ((xe)->info.graphics_verx100 / 100) ··· 603 int mode; 604 /** @wedged.method: Recovery method to be sent in the drm device wedged uevent */ 605 unsigned long method; 606 } wedged; 607 608 /** @bo_device: Struct to control async free of BOs */
··· 44 struct xe_pxp; 45 struct xe_vram_region; 46 47 + /** 48 + * enum xe_wedged_mode - possible wedged modes 49 + * @XE_WEDGED_MODE_NEVER: Device will never be declared wedged. 50 + * @XE_WEDGED_MODE_UPON_CRITICAL_ERROR: Device will be declared wedged only 51 + * when critical error occurs like GT reset failure or firmware failure. 52 + * This is the default mode. 53 + * @XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET: Device will be declared wedged on 54 + * any hang. In this mode, engine resets are disabled to avoid automatic 55 + * recovery attempts. This mode is primarily intended for debugging hangs. 56 + */ 57 + enum xe_wedged_mode { 58 + XE_WEDGED_MODE_NEVER = 0, 59 + XE_WEDGED_MODE_UPON_CRITICAL_ERROR = 1, 60 + XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET = 2, 61 + }; 62 + 63 #define XE_BO_INVALID_OFFSET LONG_MAX 64 65 #define GRAPHICS_VER(xe) ((xe)->info.graphics_verx100 / 100) ··· 587 int mode; 588 /** @wedged.method: Recovery method to be sent in the drm device wedged uevent */ 589 unsigned long method; 590 + /** @wedged.inconsistent_reset: Inconsistent reset policy state between GTs */ 591 + bool inconsistent_reset; 592 } wedged; 593 594 /** @bo_device: Struct to control async free of BOs */
+31 -1
drivers/gpu/drm/xe/xe_exec_queue.c
··· 328 * @xe: Xe device. 329 * @tile: tile which bind exec queue belongs to. 330 * @flags: exec queue creation flags 331 * @extensions: exec queue creation extensions 332 * 333 * Normalize bind exec queue creation. Bind exec queue is tied to migration VM ··· 342 */ 343 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, 344 struct xe_tile *tile, 345 u32 flags, u64 extensions) 346 { 347 struct xe_gt *gt = tile->primary_gt; ··· 379 xe_exec_queue_put(q); 380 return ERR_PTR(err); 381 } 382 } 383 384 return q; ··· 410 list_for_each_entry_safe(eq, next, &q->multi_gt_list, 411 multi_gt_link) 412 xe_exec_queue_put(eq); 413 } 414 415 q->ops->destroy(q); ··· 752 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) 753 return -EINVAL; 754 755 for_each_tile(tile, xe, id) { 756 struct xe_exec_queue *new; 757 ··· 775 if (id) 776 flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD; 777 778 - new = xe_exec_queue_create_bind(xe, tile, flags, 779 args->extensions); 780 if (IS_ERR(new)) { 781 err = PTR_ERR(new); 782 if (q) 783 goto put_exec_queue; ··· 791 list_add_tail(&new->multi_gt_list, 792 &q->multi_gt_link); 793 } 794 } else { 795 logical_mask = calc_validate_logical_mask(xe, eci, 796 args->width,
··· 328 * @xe: Xe device. 329 * @tile: tile which bind exec queue belongs to. 330 * @flags: exec queue creation flags 331 + * @user_vm: The user VM which this exec queue belongs to 332 * @extensions: exec queue creation extensions 333 * 334 * Normalize bind exec queue creation. Bind exec queue is tied to migration VM ··· 341 */ 342 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, 343 struct xe_tile *tile, 344 + struct xe_vm *user_vm, 345 u32 flags, u64 extensions) 346 { 347 struct xe_gt *gt = tile->primary_gt; ··· 377 xe_exec_queue_put(q); 378 return ERR_PTR(err); 379 } 380 + 381 + if (user_vm) 382 + q->user_vm = xe_vm_get(user_vm); 383 } 384 385 return q; ··· 405 list_for_each_entry_safe(eq, next, &q->multi_gt_list, 406 multi_gt_link) 407 xe_exec_queue_put(eq); 408 + } 409 + 410 + if (q->user_vm) { 411 + xe_vm_put(q->user_vm); 412 + q->user_vm = NULL; 413 } 414 415 q->ops->destroy(q); ··· 742 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0)) 743 return -EINVAL; 744 745 + vm = xe_vm_lookup(xef, args->vm_id); 746 + if (XE_IOCTL_DBG(xe, !vm)) 747 + return -ENOENT; 748 + 749 + err = down_read_interruptible(&vm->lock); 750 + if (err) { 751 + xe_vm_put(vm); 752 + return err; 753 + } 754 + 755 + if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { 756 + up_read(&vm->lock); 757 + xe_vm_put(vm); 758 + return -ENOENT; 759 + } 760 + 761 for_each_tile(tile, xe, id) { 762 struct xe_exec_queue *new; 763 ··· 749 if (id) 750 flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD; 751 752 + new = xe_exec_queue_create_bind(xe, tile, vm, flags, 753 args->extensions); 754 if (IS_ERR(new)) { 755 + up_read(&vm->lock); 756 + xe_vm_put(vm); 757 err = PTR_ERR(new); 758 if (q) 759 goto put_exec_queue; ··· 763 list_add_tail(&new->multi_gt_list, 764 &q->multi_gt_link); 765 } 766 + up_read(&vm->lock); 767 + xe_vm_put(vm); 768 } else { 769 logical_mask = calc_validate_logical_mask(xe, eci, 770 args->width,
+1
drivers/gpu/drm/xe/xe_exec_queue.h
··· 28 u32 flags, u64 extensions); 29 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, 30 struct xe_tile *tile, 31 u32 flags, u64 extensions); 32 33 void xe_exec_queue_fini(struct xe_exec_queue *q);
··· 28 u32 flags, u64 extensions); 29 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe, 30 struct xe_tile *tile, 31 + struct xe_vm *user_vm, 32 u32 flags, u64 extensions); 33 34 void xe_exec_queue_fini(struct xe_exec_queue *q);
+6
drivers/gpu/drm/xe/xe_exec_queue_types.h
··· 54 struct kref refcount; 55 /** @vm: VM (address space) for this exec queue */ 56 struct xe_vm *vm; 57 /** @class: class of this exec queue */ 58 enum xe_engine_class class; 59 /**
··· 54 struct kref refcount; 55 /** @vm: VM (address space) for this exec queue */ 56 struct xe_vm *vm; 57 + /** 58 + * @user_vm: User VM (address space) for this exec queue (bind queues 59 + * only) 60 + */ 61 + struct xe_vm *user_vm; 62 + 63 /** @class: class of this exec queue */ 64 enum xe_engine_class class; 65 /**
+1 -1
drivers/gpu/drm/xe/xe_ggtt.c
··· 322 else 323 ggtt->pt_ops = &xelp_pt_ops; 324 325 - ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM); 326 if (!ggtt->wq) 327 return -ENOMEM; 328
··· 322 else 323 ggtt->pt_ops = &xelp_pt_ops; 324 325 + ggtt->wq = alloc_workqueue("xe-ggtt-wq", WQ_MEM_RECLAIM, 0); 326 if (!ggtt->wq) 327 return -ENOMEM; 328
+2 -2
drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
··· 41 }; 42 43 /** 44 - * xe_gt_sriov_vf_migration - VF migration data. 45 */ 46 struct xe_gt_sriov_vf_migration { 47 - /** @migration: VF migration recovery worker */ 48 struct work_struct worker; 49 /** @lock: Protects recovery_queued, teardown */ 50 spinlock_t lock;
··· 41 }; 42 43 /** 44 + * struct xe_gt_sriov_vf_migration - VF migration data. 45 */ 46 struct xe_gt_sriov_vf_migration { 47 + /** @worker: VF migration recovery worker */ 48 struct work_struct worker; 49 /** @lock: Protects recovery_queued, teardown */ 50 spinlock_t lock;
+8 -6
drivers/gpu/drm/xe/xe_guc_ads.c
··· 983 /** 984 * xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy 985 * @ads: Additional data structures object 986 * 987 - * This function update the GuC's engine reset policy based on wedged.mode. 988 * 989 * Return: 0 on success, and negative error code otherwise. 990 */ 991 - int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads) 992 { 993 struct guc_policies *policies; 994 struct xe_guc *guc = ads_to_guc(ads); 995 - struct xe_device *xe = ads_to_xe(ads); 996 CLASS(xe_guc_buf, buf)(&guc->buf, sizeof(*policies)); 997 998 if (!xe_guc_buf_is_valid(buf)) ··· 1005 policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time); 1006 policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items); 1007 policies->is_valid = 1; 1008 - if (xe->wedged.mode == 2) 1009 - policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET; 1010 - else 1011 policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET; 1012 1013 return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf)); 1014 }
··· 983 /** 984 * xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy 985 * @ads: Additional data structures object 986 + * @enable_engine_reset: true to enable engine resets, false otherwise 987 * 988 + * This function update the GuC's engine reset policy. 989 * 990 * Return: 0 on success, and negative error code otherwise. 991 */ 992 + int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads, 993 + bool enable_engine_reset) 994 { 995 struct guc_policies *policies; 996 struct xe_guc *guc = ads_to_guc(ads); 997 CLASS(xe_guc_buf, buf)(&guc->buf, sizeof(*policies)); 998 999 if (!xe_guc_buf_is_valid(buf)) ··· 1004 policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time); 1005 policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items); 1006 policies->is_valid = 1; 1007 + 1008 + if (enable_engine_reset) 1009 policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET; 1010 + else 1011 + policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET; 1012 1013 return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf)); 1014 }
+4 -1
drivers/gpu/drm/xe/xe_guc_ads.h
··· 6 #ifndef _XE_GUC_ADS_H_ 7 #define _XE_GUC_ADS_H_ 8 9 struct xe_guc_ads; 10 11 int xe_guc_ads_init(struct xe_guc_ads *ads); ··· 15 void xe_guc_ads_populate(struct xe_guc_ads *ads); 16 void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads); 17 void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads); 18 - int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads); 19 20 #endif
··· 6 #ifndef _XE_GUC_ADS_H_ 7 #define _XE_GUC_ADS_H_ 8 9 + #include <linux/types.h> 10 + 11 struct xe_guc_ads; 12 13 int xe_guc_ads_init(struct xe_guc_ads *ads); ··· 13 void xe_guc_ads_populate(struct xe_guc_ads *ads); 14 void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads); 15 void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads); 16 + int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads, 17 + bool enable_engine_reset); 18 19 #endif
+3 -1
drivers/gpu/drm/xe/xe_late_bind_fw_types.h
··· 15 #define XE_LB_MAX_PAYLOAD_SIZE SZ_4K 16 17 /** 18 - * xe_late_bind_fw_id - enum to determine late binding fw index 19 */ 20 enum xe_late_bind_fw_id { 21 XE_LB_FW_FAN_CONTROL = 0, 22 XE_LB_FW_MAX_ID 23 }; 24
··· 15 #define XE_LB_MAX_PAYLOAD_SIZE SZ_4K 16 17 /** 18 + * enum xe_late_bind_fw_id - enum to determine late binding fw index 19 */ 20 enum xe_late_bind_fw_id { 21 + /** @XE_LB_FW_FAN_CONTROL: Fan control */ 22 XE_LB_FW_FAN_CONTROL = 0, 23 + /** @XE_LB_FW_MAX_ID: Number of IDs */ 24 XE_LB_FW_MAX_ID 25 }; 26
+3
drivers/gpu/drm/xe/xe_lrc.c
··· 1050 { 1051 u32 *cmd = batch; 1052 1053 if (xe_gt_WARN_ON(lrc->gt, max_len < 12)) 1054 return -ENOSPC; 1055
··· 1050 { 1051 u32 *cmd = batch; 1052 1053 + if (IS_SRIOV_VF(gt_to_xe(lrc->gt))) 1054 + return 0; 1055 + 1056 if (xe_gt_WARN_ON(lrc->gt, max_len < 12)) 1057 return -ENOSPC; 1058
+2 -2
drivers/gpu/drm/xe/xe_migrate.c
··· 2445 if (is_migrate) 2446 mutex_lock(&m->job_mutex); 2447 else 2448 - xe_vm_assert_held(q->vm); /* User queues VM's should be locked */ 2449 } 2450 2451 /** ··· 2463 if (is_migrate) 2464 mutex_unlock(&m->job_mutex); 2465 else 2466 - xe_vm_assert_held(q->vm); /* User queues VM's should be locked */ 2467 } 2468 2469 #if IS_ENABLED(CONFIG_PROVE_LOCKING)
··· 2445 if (is_migrate) 2446 mutex_lock(&m->job_mutex); 2447 else 2448 + xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */ 2449 } 2450 2451 /** ··· 2463 if (is_migrate) 2464 mutex_unlock(&m->job_mutex); 2465 else 2466 + xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */ 2467 } 2468 2469 #if IS_ENABLED(CONFIG_PROVE_LOCKING)
+1 -1
drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
··· 346 flags = EXEC_QUEUE_FLAG_KERNEL | 347 EXEC_QUEUE_FLAG_PERMANENT | 348 EXEC_QUEUE_FLAG_MIGRATE; 349 - q = xe_exec_queue_create_bind(xe, tile, flags, 0); 350 if (IS_ERR(q)) { 351 err = PTR_ERR(q); 352 goto err_ret;
··· 346 flags = EXEC_QUEUE_FLAG_KERNEL | 347 EXEC_QUEUE_FLAG_PERMANENT | 348 EXEC_QUEUE_FLAG_MIGRATE; 349 + q = xe_exec_queue_create_bind(xe, tile, NULL, flags, 0); 350 if (IS_ERR(q)) { 351 err = PTR_ERR(q); 352 goto err_ret;
+6 -1
drivers/gpu/drm/xe/xe_vm.c
··· 1617 if (!vm->pt_root[id]) 1618 continue; 1619 1620 - q = xe_exec_queue_create_bind(xe, tile, create_flags, 0); 1621 if (IS_ERR(q)) { 1622 err = PTR_ERR(q); 1623 goto err_close; ··· 3576 err = -EINVAL; 3577 goto put_exec_queue; 3578 } 3579 } 3580 3581 /* Ensure all UNMAPs visible */
··· 1617 if (!vm->pt_root[id]) 1618 continue; 1619 1620 + q = xe_exec_queue_create_bind(xe, tile, vm, create_flags, 0); 1621 if (IS_ERR(q)) { 1622 err = PTR_ERR(q); 1623 goto err_close; ··· 3576 err = -EINVAL; 3577 goto put_exec_queue; 3578 } 3579 + } 3580 + 3581 + if (XE_IOCTL_DBG(xe, q && vm != q->user_vm)) { 3582 + err = -EINVAL; 3583 + goto put_exec_queue; 3584 } 3585 3586 /* Ensure all UNMAPs visible */
+1 -1
drivers/gpu/drm/xe/xe_vm.h
··· 379 } 380 381 /** 382 - * xe_vm_set_validation_exec() - Accessor to read the drm_exec object 383 * @vm: The vm we want to register a drm_exec object with. 384 * 385 * Return: The drm_exec object used to lock the vm's resv. The value
··· 379 } 380 381 /** 382 + * xe_vm_validation_exec() - Accessor to read the drm_exec object 383 * @vm: The vm we want to register a drm_exec object with. 384 * 385 * Return: The drm_exec object used to lock the vm's resv. The value
+17 -2
include/drm/drm_pagemap.h
··· 209 struct dma_fence *pre_migrate_fence); 210 }; 211 212 /** 213 * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation 214 * ··· 246 struct dma_fence *pre_migrate_fence; 247 }; 248 249 int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, 250 struct mm_struct *mm, 251 unsigned long start, unsigned long end, ··· 257 int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation); 258 259 const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void); 260 - 261 - struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page); 262 263 void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, 264 struct device *dev, struct mm_struct *mm, ··· 268 unsigned long start, unsigned long end, 269 struct mm_struct *mm, 270 unsigned long timeslice_ms); 271 272 #endif
··· 209 struct dma_fence *pre_migrate_fence); 210 }; 211 212 + #if IS_ENABLED(CONFIG_ZONE_DEVICE) 213 + 214 + struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page); 215 + 216 + #else 217 + 218 + static inline struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page) 219 + { 220 + return NULL; 221 + } 222 + 223 + #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */ 224 + 225 /** 226 * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation 227 * ··· 233 struct dma_fence *pre_migrate_fence; 234 }; 235 236 + #if IS_ENABLED(CONFIG_ZONE_DEVICE) 237 + 238 int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, 239 struct mm_struct *mm, 240 unsigned long start, unsigned long end, ··· 242 int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation); 243 244 const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void); 245 246 void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, 247 struct device *dev, struct mm_struct *mm, ··· 255 unsigned long start, unsigned long end, 256 struct mm_struct *mm, 257 unsigned long timeslice_ms); 258 + 259 + #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */ 260 261 #endif