Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-2024-11-02' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
"Regular fixes pull, nothing too out of the ordinary, the mediatek
fixes came in a batch that I might have preferred a bit earlier but
all seem fine, otherwise regular xe/amdgpu and a few misc ones.

xe:
- Fix missing HPD interrupt enabling, bringing one PM refactor with it
- Workaround LNL GGTT invalidation not being visible to GuC
- Avoid getting jobs stuck without a protecting timeout

ivpu:
- Fix firewall IRQ handling

panthor:
- Fix firmware initialization wrt page sizes
- Fix handling and reporting of dead job groups

sched:
- Guarantee forward progress via WC_MEM_RECLAIM

tests:
- Fix memory leak in drm_display_mode_from_cea_vic()

amdgpu:
- DCN 3.5 fix
- Vangogh SMU KASAN fix
- SMU 13 profile reporting fix

mediatek:
- Fix degradation problem of alpha blending
- Fix color format MACROs in OVL
- Fix get efuse issue for MT8188 DPTX
- Fix potential NULL dereference in mtk_crtc_destroy()
- Correct dpi power-domains property
- Add split subschema property constraints"

* tag 'drm-fixes-2024-11-02' of https://gitlab.freedesktop.org/drm/kernel: (27 commits)
drm/xe: Don't short circuit TDR on jobs not started
drm/xe: Add mmio read before GGTT invalidate
drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
drm/connector: hdmi: Fix memory leak in drm_display_mode_from_cea_vic()
drm/tests: helpers: Add helper for drm_display_mode_from_cea_vic()
drm/panthor: Report group as timedout when we fail to properly suspend
drm/panthor: Fail job creation when the group is dead
drm/panthor: Fix firmware initialization on systems with a page size > 4k
accel/ivpu: Fix NOC firewall interrupt handling
drm/xe/display: Add missing HPD interrupt enabling during non-d3cold RPM resume
drm/xe/display: Separate the d3cold and non-d3cold runtime PM handling
drm/xe: Remove runtime argument from display s/r functions
drm/amdgpu/smu13: fix profile reporting
drm/amd/pm: Vangogh: Fix kernel memory out of bounds write
Revert "drm/amd/display: update DML2 policy EnhancedPrefetchScheduleAccelerationFinal DCN35"
drm/sched: Mark scheduler work queues with WQ_MEM_RECLAIM
drm/tegra: Fix NULL vs IS_ERR() check in probe()
dt-bindings: display: mediatek: split: add subschema property constraints
dt-bindings: display: mediatek: dpi: correct power-domains property
drm/mediatek: Fix potential NULL dereference in mtk_crtc_destroy()
...

+412 -121
+10 -14
Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml
··· 63 63 - const: sleep 64 64 65 65 power-domains: 66 + description: | 67 + The MediaTek DPI module is typically associated with one of the 68 + following multimedia power domains: 69 + POWER_DOMAIN_DISPLAY 70 + POWER_DOMAIN_VDOSYS 71 + POWER_DOMAIN_MM 72 + The specific power domain used varies depending on the SoC design. 73 + 74 + It is recommended to explicitly add the appropriate power domain 75 + property to the DPI node in the device tree. 66 76 maxItems: 1 67 77 68 78 port: ··· 88 78 - clocks 89 79 - clock-names 90 80 - port 91 - 92 - allOf: 93 - - if: 94 - not: 95 - properties: 96 - compatible: 97 - contains: 98 - enum: 99 - - mediatek,mt6795-dpi 100 - - mediatek,mt8173-dpi 101 - - mediatek,mt8186-dpi 102 - then: 103 - properties: 104 - power-domains: false 105 81 106 82 additionalProperties: false 107 83
+19
Documentation/devicetree/bindings/display/mediatek/mediatek,split.yaml
··· 38 38 description: A phandle and PM domain specifier as defined by bindings of 39 39 the power controller specified by phandle. See 40 40 Documentation/devicetree/bindings/power/power-domain.yaml for details. 41 + maxItems: 1 41 42 42 43 mediatek,gce-client-reg: 43 44 description: ··· 58 57 clocks: 59 58 items: 60 59 - description: SPLIT Clock 60 + - description: Used for interfacing with the HDMI RX signal source. 61 + - description: Paired with receiving HDMI RX metadata. 62 + minItems: 1 61 63 62 64 required: 63 65 - compatible ··· 76 72 const: mediatek,mt8195-mdp3-split 77 73 78 74 then: 75 + properties: 76 + clocks: 77 + minItems: 3 78 + 79 79 required: 80 80 - mediatek,gce-client-reg 81 + 82 + - if: 83 + properties: 84 + compatible: 85 + contains: 86 + const: mediatek,mt8173-disp-split 87 + 88 + then: 89 + properties: 90 + clocks: 91 + maxItems: 1 81 92 82 93 additionalProperties: false 83 94
+9
drivers/accel/ivpu/ivpu_debugfs.c
··· 108 108 return 0; 109 109 } 110 110 111 + static int firewall_irq_counter_show(struct seq_file *s, void *v) 112 + { 113 + struct ivpu_device *vdev = seq_to_ivpu(s); 114 + 115 + seq_printf(s, "%d\n", atomic_read(&vdev->hw->firewall_irq_counter)); 116 + return 0; 117 + } 118 + 111 119 static const struct drm_debugfs_info vdev_debugfs_list[] = { 112 120 {"bo_list", bo_list_show, 0}, 113 121 {"fw_name", fw_name_show, 0}, ··· 124 116 {"last_bootmode", last_bootmode_show, 0}, 125 117 {"reset_counter", reset_counter_show, 0}, 126 118 {"reset_pending", reset_pending_show, 0}, 119 + {"firewall_irq_counter", firewall_irq_counter_show, 0}, 127 120 }; 128 121 129 122 static ssize_t
+1
drivers/accel/ivpu/ivpu_hw.c
··· 249 249 platform_init(vdev); 250 250 wa_init(vdev); 251 251 timeouts_init(vdev); 252 + atomic_set(&vdev->hw->firewall_irq_counter, 0); 252 253 253 254 return 0; 254 255 }
+1
drivers/accel/ivpu/ivpu_hw.h
··· 52 52 int dma_bits; 53 53 ktime_t d0i3_entry_host_ts; 54 54 u64 d0i3_entry_vpu_ts; 55 + atomic_t firewall_irq_counter; 55 56 }; 56 57 57 58 int ivpu_hw_init(struct ivpu_device *vdev);
+4 -1
drivers/accel/ivpu/ivpu_hw_ip.c
··· 1062 1062 1063 1063 static void irq_noc_firewall_handler(struct ivpu_device *vdev) 1064 1064 { 1065 - ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ"); 1065 + atomic_inc(&vdev->hw->firewall_irq_counter); 1066 + 1067 + ivpu_dbg(vdev, IRQ, "NOC Firewall interrupt detected, counter %d\n", 1068 + atomic_read(&vdev->hw->firewall_irq_counter)); 1066 1069 } 1067 1070 1068 1071 /* Handler for IRQs from NPU core */
+1
drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
··· 303 303 if (project == dml_project_dcn35 || 304 304 project == dml_project_dcn351) { 305 305 policy->DCCProgrammingAssumesScanDirectionUnknownFinal = false; 306 + policy->EnhancedPrefetchScheduleAccelerationFinal = 0; 306 307 policy->AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter_if_possible; /*new*/ 307 308 policy->UseOnlyMaxPrefetchModes = 1; 308 309 }
+3 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
··· 242 242 goto err0_out; 243 243 smu_table->metrics_time = 0; 244 244 245 - smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2)); 245 + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2); 246 + smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3)); 247 + smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4)); 246 248 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 247 249 if (!smu_table->gpu_metrics_table) 248 250 goto err1_out;
+3 -3
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
··· 2485 2485 DpmActivityMonitorCoeffInt_t *activity_monitor = 2486 2486 &(activity_monitor_external.DpmActivityMonitorCoeffInt); 2487 2487 int workload_type, ret = 0; 2488 - u32 workload_mask; 2488 + u32 workload_mask, selected_workload_mask; 2489 2489 2490 2490 smu->power_profile_mode = input[size]; 2491 2491 ··· 2552 2552 if (workload_type < 0) 2553 2553 return -EINVAL; 2554 2554 2555 - workload_mask = 1 << workload_type; 2555 + selected_workload_mask = workload_mask = 1 << workload_type; 2556 2556 2557 2557 /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ 2558 2558 if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && ··· 2572 2572 workload_mask, 2573 2573 NULL); 2574 2574 if (!ret) 2575 - smu->workload_mask = workload_mask; 2575 + smu->workload_mask = selected_workload_mask; 2576 2576 2577 2577 return ret; 2578 2578 }
+2 -2
drivers/gpu/drm/mediatek/mtk_crtc.c
··· 127 127 128 128 mtk_mutex_put(mtk_crtc->mutex); 129 129 #if IS_REACHABLE(CONFIG_MTK_CMDQ) 130 - cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle); 131 - 132 130 if (mtk_crtc->cmdq_client.chan) { 131 + cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle); 133 132 mbox_free_channel(mtk_crtc->cmdq_client.chan); 134 133 mtk_crtc->cmdq_client.chan = NULL; 135 134 } ··· 912 913 BIT(pipe), 913 914 mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes), 914 915 mtk_ddp_comp_supported_rotations(comp), 916 + mtk_ddp_comp_get_blend_modes(comp), 915 917 mtk_ddp_comp_get_formats(comp), 916 918 mtk_ddp_comp_get_num_formats(comp), i); 917 919 if (ret)
+2
drivers/gpu/drm/mediatek/mtk_ddp_comp.c
··· 363 363 .layer_config = mtk_ovl_layer_config, 364 364 .bgclr_in_on = mtk_ovl_bgclr_in_on, 365 365 .bgclr_in_off = mtk_ovl_bgclr_in_off, 366 + .get_blend_modes = mtk_ovl_get_blend_modes, 366 367 .get_formats = mtk_ovl_get_formats, 367 368 .get_num_formats = mtk_ovl_get_num_formats, 368 369 }; ··· 417 416 .disconnect = mtk_ovl_adaptor_disconnect, 418 417 .add = mtk_ovl_adaptor_add_comp, 419 418 .remove = mtk_ovl_adaptor_remove_comp, 419 + .get_blend_modes = mtk_ovl_adaptor_get_blend_modes, 420 420 .get_formats = mtk_ovl_adaptor_get_formats, 421 421 .get_num_formats = mtk_ovl_adaptor_get_num_formats, 422 422 .mode_valid = mtk_ovl_adaptor_mode_valid,
+10
drivers/gpu/drm/mediatek/mtk_ddp_comp.h
··· 80 80 void (*ctm_set)(struct device *dev, 81 81 struct drm_crtc_state *state); 82 82 struct device * (*dma_dev_get)(struct device *dev); 83 + u32 (*get_blend_modes)(struct device *dev); 83 84 const u32 *(*get_formats)(struct device *dev); 84 85 size_t (*get_num_formats)(struct device *dev); 85 86 void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next); ··· 265 264 if (comp->funcs && comp->funcs->dma_dev_get) 266 265 return comp->funcs->dma_dev_get(comp->dev); 267 266 return comp->dev; 267 + } 268 + 269 + static inline 270 + u32 mtk_ddp_comp_get_blend_modes(struct mtk_ddp_comp *comp) 271 + { 272 + if (comp->funcs && comp->funcs->get_blend_modes) 273 + return comp->funcs->get_blend_modes(comp->dev); 274 + 275 + return 0; 268 276 } 269 277 270 278 static inline
+2
drivers/gpu/drm/mediatek/mtk_disp_drv.h
··· 103 103 void mtk_ovl_unregister_vblank_cb(struct device *dev); 104 104 void mtk_ovl_enable_vblank(struct device *dev); 105 105 void mtk_ovl_disable_vblank(struct device *dev); 106 + u32 mtk_ovl_get_blend_modes(struct device *dev); 106 107 const u32 *mtk_ovl_get_formats(struct device *dev); 107 108 size_t mtk_ovl_get_num_formats(struct device *dev); 108 109 ··· 132 131 void mtk_ovl_adaptor_stop(struct device *dev); 133 132 unsigned int mtk_ovl_adaptor_layer_nr(struct device *dev); 134 133 struct device *mtk_ovl_adaptor_dma_dev_get(struct device *dev); 134 + u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev); 135 135 const u32 *mtk_ovl_adaptor_get_formats(struct device *dev); 136 136 size_t mtk_ovl_adaptor_get_num_formats(struct device *dev); 137 137 enum drm_mode_status mtk_ovl_adaptor_mode_valid(struct device *dev,
+55 -19
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
··· 65 65 #define OVL_CON_CLRFMT_RGB (1 << 12) 66 66 #define OVL_CON_CLRFMT_ARGB8888 (2 << 12) 67 67 #define OVL_CON_CLRFMT_RGBA8888 (3 << 12) 68 - #define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP) 69 - #define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP) 68 + #define OVL_CON_CLRFMT_ABGR8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP) 69 + #define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP) 70 70 #define OVL_CON_CLRFMT_UYVY (4 << 12) 71 71 #define OVL_CON_CLRFMT_YUYV (5 << 12) 72 72 #define OVL_CON_MTX_YUV_TO_RGB (6 << 16) ··· 146 146 bool fmt_rgb565_is_0; 147 147 bool smi_id_en; 148 148 bool supports_afbc; 149 + const u32 blend_modes; 149 150 const u32 *formats; 150 151 size_t num_formats; 151 152 bool supports_clrfmt_ext; ··· 213 212 struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); 214 213 215 214 writel_relaxed(0x0, ovl->regs + DISP_REG_OVL_INTEN); 215 + } 216 + 217 + u32 mtk_ovl_get_blend_modes(struct device *dev) 218 + { 219 + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); 220 + 221 + return ovl->data->blend_modes; 216 222 } 217 223 218 224 const u32 *mtk_ovl_get_formats(struct device *dev) ··· 394 386 DISP_REG_OVL_RDMA_CTRL(idx)); 395 387 } 396 388 397 - static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt, 398 - unsigned int blend_mode) 389 + static unsigned int mtk_ovl_fmt_convert(struct mtk_disp_ovl *ovl, 390 + struct mtk_plane_state *state) 399 391 { 400 - /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX" 401 - * is defined in mediatek HW data sheet. 402 - * The alphabet order in XXX is no relation to data 403 - * arrangement in memory. 392 + unsigned int fmt = state->pending.format; 393 + unsigned int blend_mode = DRM_MODE_BLEND_COVERAGE; 394 + 395 + /* 396 + * For the platforms where OVL_CON_CLRFMT_MAN is defined in the hardware data sheet 397 + * and supports premultiplied color formats, such as OVL_CON_CLRFMT_PARGB8888. 398 + * 399 + * Check blend_modes in the driver data to see if premultiplied mode is supported. 400 + * If not, use coverage mode instead to set it to the supported color formats. 401 + * 402 + * Current DRM assumption is that alpha is default premultiplied, so the bitmask of 403 + * blend_modes must include BIT(DRM_MODE_BLEND_PREMULTI). Otherwise, mtk_plane_init() 404 + * will get an error return from drm_plane_create_blend_mode_property() and 405 + * state->base.pixel_blend_mode should not be used. 404 406 */ 407 + if (ovl->data->blend_modes & BIT(DRM_MODE_BLEND_PREMULTI)) 408 + blend_mode = state->base.pixel_blend_mode; 409 + 405 410 switch (fmt) { 406 411 default: 407 412 case DRM_FORMAT_RGB565: ··· 492 471 return; 493 472 } 494 473 495 - con = ovl_fmt_convert(ovl, fmt, blend_mode); 474 + con = mtk_ovl_fmt_convert(ovl, state); 496 475 if (state->base.fb) { 497 - con |= OVL_CON_AEN; 498 476 con |= state->base.alpha & OVL_CON_ALPHA; 499 - } 500 477 501 - /* CONST_BLD must be enabled for XRGB formats although the alpha channel 502 - * can be ignored, or OVL will still read the value from memory. 503 - * For RGB888 related formats, whether CONST_BLD is enabled or not won't 504 - * affect the result. Therefore we use !has_alpha as the condition. 505 - */ 506 - if ((state->base.fb && !state->base.fb->format->has_alpha) || 507 - blend_mode == DRM_MODE_BLEND_PIXEL_NONE) 508 - ignore_pixel_alpha = OVL_CONST_BLEND; 478 + /* 479 + * For blend_modes supported SoCs, always enable alpha blending. 480 + * For blend_modes unsupported SoCs, enable alpha blending when has_alpha is set. 481 + */ 482 + if (blend_mode || state->base.fb->format->has_alpha) 483 + con |= OVL_CON_AEN; 484 + 485 + /* 486 + * Although the alpha channel can be ignored, CONST_BLD must be enabled 487 + * for XRGB format, otherwise OVL will still read the value from memory. 488 + * For RGB888 related formats, whether CONST_BLD is enabled or not won't 489 + * affect the result. Therefore we use !has_alpha as the condition. 490 + */ 491 + if (blend_mode == DRM_MODE_BLEND_PIXEL_NONE || !state->base.fb->format->has_alpha) 492 + ignore_pixel_alpha = OVL_CONST_BLEND; 493 + } 509 494 510 495 if (pending->rotation & DRM_MODE_REFLECT_Y) { 511 496 con |= OVL_CON_VIRT_FLIP; ··· 690 663 .layer_nr = 4, 691 664 .fmt_rgb565_is_0 = true, 692 665 .smi_id_en = true, 666 + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | 667 + BIT(DRM_MODE_BLEND_COVERAGE) | 668 + BIT(DRM_MODE_BLEND_PIXEL_NONE), 693 669 .formats = mt8173_formats, 694 670 .num_formats = ARRAY_SIZE(mt8173_formats), 695 671 }; ··· 703 673 .layer_nr = 2, 704 674 .fmt_rgb565_is_0 = true, 705 675 .smi_id_en = true, 676 + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | 677 + BIT(DRM_MODE_BLEND_COVERAGE) | 678 + BIT(DRM_MODE_BLEND_PIXEL_NONE), 706 679 .formats = mt8173_formats, 707 680 .num_formats = ARRAY_SIZE(mt8173_formats), 708 681 }; ··· 717 684 .fmt_rgb565_is_0 = true, 718 685 .smi_id_en = true, 719 686 .supports_afbc = true, 687 + .blend_modes = BIT(DRM_MODE_BLEND_PREMULTI) | 688 + BIT(DRM_MODE_BLEND_COVERAGE) | 689 + BIT(DRM_MODE_BLEND_PIXEL_NONE), 720 690 .formats = mt8195_formats, 721 691 .num_formats = ARRAY_SIZE(mt8195_formats), 722 692 .supports_clrfmt_ext = true,
+7
drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
··· 400 400 mtk_ethdr_disable_vblank(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); 401 401 } 402 402 403 + u32 mtk_ovl_adaptor_get_blend_modes(struct device *dev) 404 + { 405 + struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev); 406 + 407 + return mtk_ethdr_get_blend_modes(ovl_adaptor->ovl_adaptor_comp[OVL_ADAPTOR_ETHDR0]); 408 + } 409 + 403 410 const u32 *mtk_ovl_adaptor_get_formats(struct device *dev) 404 411 { 405 412 struct mtk_disp_ovl_adaptor *ovl_adaptor = dev_get_drvdata(dev);
+84 -1
drivers/gpu/drm/mediatek/mtk_dp.c
··· 145 145 u16 audio_m_div2_bit; 146 146 }; 147 147 148 + static const struct mtk_dp_efuse_fmt mt8188_dp_efuse_fmt[MTK_DP_CAL_MAX] = { 149 + [MTK_DP_CAL_GLB_BIAS_TRIM] = { 150 + .idx = 0, 151 + .shift = 10, 152 + .mask = 0x1f, 153 + .min_val = 1, 154 + .max_val = 0x1e, 155 + .default_val = 0xf, 156 + }, 157 + [MTK_DP_CAL_CLKTX_IMPSE] = { 158 + .idx = 0, 159 + .shift = 15, 160 + .mask = 0xf, 161 + .min_val = 1, 162 + .max_val = 0xe, 163 + .default_val = 0x8, 164 + }, 165 + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_0] = { 166 + .idx = 1, 167 + .shift = 0, 168 + .mask = 0xf, 169 + .min_val = 1, 170 + .max_val = 0xe, 171 + .default_val = 0x8, 172 + }, 173 + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_1] = { 174 + .idx = 1, 175 + .shift = 8, 176 + .mask = 0xf, 177 + .min_val = 1, 178 + .max_val = 0xe, 179 + .default_val = 0x8, 180 + }, 181 + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_2] = { 182 + .idx = 1, 183 + .shift = 16, 184 + .mask = 0xf, 185 + .min_val = 1, 186 + .max_val = 0xe, 187 + .default_val = 0x8, 188 + }, 189 + [MTK_DP_CAL_LN_TX_IMPSEL_PMOS_3] = { 190 + .idx = 1, 191 + .shift = 24, 192 + .mask = 0xf, 193 + .min_val = 1, 194 + .max_val = 0xe, 195 + .default_val = 0x8, 196 + }, 197 + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_0] = { 198 + .idx = 1, 199 + .shift = 4, 200 + .mask = 0xf, 201 + .min_val = 1, 202 + .max_val = 0xe, 203 + .default_val = 0x8, 204 + }, 205 + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_1] = { 206 + .idx = 1, 207 + .shift = 12, 208 + .mask = 0xf, 209 + .min_val = 1, 210 + .max_val = 0xe, 211 + .default_val = 0x8, 212 + }, 213 + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_2] = { 214 + .idx = 1, 215 + .shift = 20, 216 + .mask = 0xf, 217 + .min_val = 1, 218 + .max_val = 0xe, 219 + .default_val = 0x8, 220 + }, 221 + [MTK_DP_CAL_LN_TX_IMPSEL_NMOS_3] = { 222 + .idx = 1, 223 + .shift = 28, 224 + .mask = 0xf, 225 + .min_val = 1, 226 + .max_val = 0xe, 227 + .default_val = 0x8, 228 + }, 229 + }; 230 + 148 231 static const struct mtk_dp_efuse_fmt mt8195_edp_efuse_fmt[MTK_DP_CAL_MAX] = { 149 232 [MTK_DP_CAL_GLB_BIAS_TRIM] = { 150 233 .idx = 3, ··· 2854 2771 static const struct mtk_dp_data mt8188_dp_data = { 2855 2772 .bridge_type = DRM_MODE_CONNECTOR_DisplayPort, 2856 2773 .smc_cmd = MTK_DP_SIP_ATF_VIDEO_UNMUTE, 2857 - .efuse_fmt = mt8195_dp_efuse_fmt, 2774 + .efuse_fmt = mt8188_dp_efuse_fmt, 2858 2775 .audio_supported = true, 2859 2776 .audio_pkt_in_hblank_area = true, 2860 2777 .audio_m_div2_bit = MT8188_AUDIO_M_CODE_MULT_DIV_SEL_DP_ENC0_P0_DIV_2,
+7
drivers/gpu/drm/mediatek/mtk_ethdr.c
··· 145 145 return IRQ_HANDLED; 146 146 } 147 147 148 + u32 mtk_ethdr_get_blend_modes(struct device *dev) 149 + { 150 + return BIT(DRM_MODE_BLEND_PREMULTI) | 151 + BIT(DRM_MODE_BLEND_COVERAGE) | 152 + BIT(DRM_MODE_BLEND_PIXEL_NONE); 153 + } 154 + 148 155 void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, 149 156 struct mtk_plane_state *state, 150 157 struct cmdq_pkt *cmdq_pkt)
+1
drivers/gpu/drm/mediatek/mtk_ethdr.h
··· 13 13 void mtk_ethdr_config(struct device *dev, unsigned int w, 14 14 unsigned int h, unsigned int vrefresh, 15 15 unsigned int bpc, struct cmdq_pkt *cmdq_pkt); 16 + u32 mtk_ethdr_get_blend_modes(struct device *dev); 16 17 void mtk_ethdr_layer_config(struct device *dev, unsigned int idx, 17 18 struct mtk_plane_state *state, 18 19 struct cmdq_pkt *cmdq_pkt);
+7 -8
drivers/gpu/drm/mediatek/mtk_plane.c
··· 320 320 321 321 int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, 322 322 unsigned long possible_crtcs, enum drm_plane_type type, 323 - unsigned int supported_rotations, const u32 *formats, 324 - size_t num_formats, unsigned int plane_idx) 323 + unsigned int supported_rotations, const u32 blend_modes, 324 + const u32 *formats, size_t num_formats, unsigned int plane_idx) 325 325 { 326 326 int err; 327 327 ··· 366 366 if (err) 367 367 DRM_ERROR("failed to create property: alpha\n"); 368 368 369 - err = drm_plane_create_blend_mode_property(plane, 370 - BIT(DRM_MODE_BLEND_PREMULTI) | 371 - BIT(DRM_MODE_BLEND_COVERAGE) | 372 - BIT(DRM_MODE_BLEND_PIXEL_NONE)); 373 - if (err) 374 - DRM_ERROR("failed to create property: blend_mode\n"); 369 + if (blend_modes) { 370 + err = drm_plane_create_blend_mode_property(plane, blend_modes); 371 + if (err) 372 + DRM_ERROR("failed to create property: blend_mode\n"); 373 + } 375 374 376 375 drm_plane_helper_add(plane, &mtk_plane_helper_funcs); 377 376
+2 -2
drivers/gpu/drm/mediatek/mtk_plane.h
··· 48 48 49 49 int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, 50 50 unsigned long possible_crtcs, enum drm_plane_type type, 51 - unsigned int supported_rotations, const u32 *formats, 52 - size_t num_formats, unsigned int plane_idx); 51 + unsigned int supported_rotations, const u32 blend_modes, 52 + const u32 *formats, size_t num_formats, unsigned int plane_idx); 53 53 #endif
+2 -2
drivers/gpu/drm/panthor/panthor_fw.c
··· 487 487 struct panthor_fw_binary_iter *iter, 488 488 u32 ehdr) 489 489 { 490 + ssize_t vm_pgsz = panthor_vm_page_size(ptdev->fw->vm); 490 491 struct panthor_fw_binary_section_entry_hdr hdr; 491 492 struct panthor_fw_section *section; 492 493 u32 section_size; ··· 516 515 return -EINVAL; 517 516 } 518 517 519 - if ((hdr.va.start & ~PAGE_MASK) != 0 || 520 - (hdr.va.end & ~PAGE_MASK) != 0) { 518 + if (!IS_ALIGNED(hdr.va.start, vm_pgsz) || !IS_ALIGNED(hdr.va.end, vm_pgsz)) { 521 519 drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n", 522 520 hdr.va.start, hdr.va.end); 523 521 return -EINVAL;
+8 -3
drivers/gpu/drm/panthor/panthor_gem.c
··· 44 44 to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm))) 45 45 goto out_free_bo; 46 46 47 - ret = panthor_vm_unmap_range(vm, bo->va_node.start, 48 - panthor_kernel_bo_size(bo)); 47 + ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size); 49 48 if (ret) 50 49 goto out_free_bo; 51 50 ··· 94 95 } 95 96 96 97 bo = to_panthor_bo(&obj->base); 97 - size = obj->base.size; 98 98 kbo->obj = &obj->base; 99 99 bo->flags = bo_flags; 100 100 101 + /* The system and GPU MMU page size might differ, which becomes a 102 + * problem for FW sections that need to be mapped at explicit address 103 + * since our PAGE_SIZE alignment might cover a VA range that's 104 + * expected to be used for another section. 105 + * Make sure we never map more than we need. 106 + */ 107 + size = ALIGN(size, panthor_vm_page_size(vm)); 101 108 ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node); 102 109 if (ret) 103 110 goto err_put_obj;
+13 -3
drivers/gpu/drm/panthor/panthor_mmu.c
··· 826 826 mutex_unlock(&ptdev->mmu->as.slots_lock); 827 827 } 828 828 829 + u32 panthor_vm_page_size(struct panthor_vm *vm) 830 + { 831 + const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops); 832 + u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1; 833 + 834 + return 1u << pg_shift; 835 + } 836 + 829 837 static void panthor_vm_stop(struct panthor_vm *vm) 830 838 { 831 839 drm_sched_stop(&vm->sched, NULL); ··· 1033 1025 panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size, 1034 1026 struct drm_mm_node *va_node) 1035 1027 { 1028 + ssize_t vm_pgsz = panthor_vm_page_size(vm); 1036 1029 int ret; 1037 1030 1038 - if (!size || (size & ~PAGE_MASK)) 1031 + if (!size || !IS_ALIGNED(size, vm_pgsz)) 1039 1032 return -EINVAL; 1040 1033 1041 - if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK)) 1034 + if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz)) 1042 1035 return -EINVAL; 1043 1036 1044 1037 mutex_lock(&vm->mm_lock); ··· 2375 2366 const struct drm_panthor_vm_bind_op *op, 2376 2367 struct panthor_vm_op_ctx *op_ctx) 2377 2368 { 2369 + ssize_t vm_pgsz = panthor_vm_page_size(vm); 2378 2370 struct drm_gem_object *gem; 2379 2371 int ret; 2380 2372 2381 2373 /* Aligned on page size. */ 2382 - if ((op->va | op->size) & ~PAGE_MASK) 2374 + if (!IS_ALIGNED(op->va | op->size, vm_pgsz)) 2383 2375 return -EINVAL; 2384 2376 2385 2377 switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
+1
drivers/gpu/drm/panthor/panthor_mmu.h
··· 30 30 31 31 int panthor_vm_active(struct panthor_vm *vm); 32 32 void panthor_vm_idle(struct panthor_vm *vm); 33 + u32 panthor_vm_page_size(struct panthor_vm *vm); 33 34 int panthor_vm_as(struct panthor_vm *vm); 34 35 int panthor_vm_flush_all(struct panthor_vm *vm); 35 36
+16 -4
drivers/gpu/drm/panthor/panthor_sched.c
··· 589 589 * @timedout: True when a timeout occurred on any of the queues owned by 590 590 * this group. 591 591 * 592 - * Timeouts can be reported by drm_sched or by the FW. In any case, any 593 - * timeout situation is unrecoverable, and the group becomes useless. 594 - * We simply wait for all references to be dropped so we can release the 595 - * group object. 592 + * Timeouts can be reported by drm_sched or by the FW. If a reset is required, 593 + * and the group can't be suspended, this also leads to a timeout. In any case, 594 + * any timeout situation is unrecoverable, and the group becomes useless. We 595 + * simply wait for all references to be dropped so we can release the group 596 + * object. 596 597 */ 597 598 bool timedout; 598 599 ··· 2641 2640 csgs_upd_ctx_init(&upd_ctx); 2642 2641 while (slot_mask) { 2643 2642 u32 csg_id = ffs(slot_mask) - 1; 2643 + struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; 2644 + 2645 + /* We consider group suspension failures as fatal and flag the 2646 + * group as unusable by setting timedout=true. 2647 + */ 2648 + csg_slot->group->timedout = true; 2644 2649 2645 2650 csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id, 2646 2651 CSG_STATE_TERMINATE, ··· 3412 3405 3413 3406 job->group = group_from_handle(gpool, group_handle); 3414 3407 if (!job->group) { 3408 + ret = -EINVAL; 3409 + goto err_put_job; 3410 + } 3411 + 3412 + if (!group_can_run(job->group)) { 3415 3413 ret = -EINVAL; 3416 3414 goto err_put_job; 3417 3415 }
+3 -2
drivers/gpu/drm/scheduler/sched_main.c
··· 1276 1276 sched->own_submit_wq = false; 1277 1277 } else { 1278 1278 #ifdef CONFIG_LOCKDEP 1279 - sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0, 1279 + sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 1280 + WQ_MEM_RECLAIM, 1280 1281 &drm_sched_lockdep_map); 1281 1282 #else 1282 - sched->submit_wq = alloc_ordered_workqueue(name, 0); 1283 + sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); 1283 1284 #endif 1284 1285 if (!sched->submit_wq) 1285 1286 return -ENOMEM;
+2 -2
drivers/gpu/drm/tegra/drm.c
··· 1153 1153 1154 1154 if (host1x_drm_wants_iommu(dev) && device_iommu_mapped(dma_dev)) { 1155 1155 tegra->domain = iommu_paging_domain_alloc(dma_dev); 1156 - if (!tegra->domain) { 1157 - err = -ENOMEM; 1156 + if (IS_ERR(tegra->domain)) { 1157 + err = PTR_ERR(tegra->domain); 1158 1158 goto free; 1159 1159 } 1160 1160
+12 -12
drivers/gpu/drm/tests/drm_connector_test.c
··· 996 996 unsigned long long rate; 997 997 struct drm_device *drm = &priv->drm; 998 998 999 - mode = drm_display_mode_from_cea_vic(drm, 16); 999 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); 1000 1000 KUNIT_ASSERT_NOT_NULL(test, mode); 1001 1001 1002 1002 KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); ··· 1017 1017 unsigned long long rate; 1018 1018 struct drm_device *drm = &priv->drm; 1019 1019 1020 - mode = drm_display_mode_from_cea_vic(drm, 16); 1020 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); 1021 1021 KUNIT_ASSERT_NOT_NULL(test, mode); 1022 1022 1023 1023 KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); ··· 1038 1038 unsigned long long rate; 1039 1039 struct drm_device *drm = &priv->drm; 1040 1040 1041 - mode = drm_display_mode_from_cea_vic(drm, 1); 1041 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); 1042 1042 KUNIT_ASSERT_NOT_NULL(test, mode); 1043 1043 1044 1044 rate = drm_hdmi_compute_mode_clock(mode, 10, HDMI_COLORSPACE_RGB); ··· 1056 1056 unsigned long long rate; 1057 1057 struct drm_device *drm = &priv->drm; 1058 1058 1059 - mode = drm_display_mode_from_cea_vic(drm, 16); 1059 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); 1060 1060 KUNIT_ASSERT_NOT_NULL(test, mode); 1061 1061 1062 1062 KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); ··· 1077 1077 unsigned long long rate; 1078 1078 struct drm_device *drm = &priv->drm; 1079 1079 1080 - mode = drm_display_mode_from_cea_vic(drm, 1); 1080 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); 1081 1081 KUNIT_ASSERT_NOT_NULL(test, mode); 1082 1082 1083 1083 rate = drm_hdmi_compute_mode_clock(mode, 12, HDMI_COLORSPACE_RGB); ··· 1095 1095 unsigned long long rate; 1096 1096 struct drm_device *drm = &priv->drm; 1097 1097 1098 - mode = drm_display_mode_from_cea_vic(drm, 6); 1098 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 6); 1099 1099 KUNIT_ASSERT_NOT_NULL(test, mode); 1100 1100 1101 1101 KUNIT_ASSERT_TRUE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); ··· 1118 1118 unsigned long long rate; 1119 1119 unsigned int vic = *(unsigned int *)test->param_value; 1120 1120 1121 - mode = drm_display_mode_from_cea_vic(drm, vic); 1121 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic); 1122 1122 KUNIT_ASSERT_NOT_NULL(test, mode); 1123 1123 1124 1124 KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); ··· 1155 1155 drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0]; 1156 1156 unsigned long long rate; 1157 1157 1158 - mode = drm_display_mode_from_cea_vic(drm, vic); 1158 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic); 1159 1159 KUNIT_ASSERT_NOT_NULL(test, mode); 1160 1160 1161 1161 KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); ··· 1180 1180 drm_hdmi_compute_mode_clock_yuv420_vic_valid_tests[0]; 1181 1181 unsigned long long rate; 1182 1182 1183 - mode = drm_display_mode_from_cea_vic(drm, vic); 1183 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, vic); 1184 1184 KUNIT_ASSERT_NOT_NULL(test, mode); 1185 1185 1186 1186 KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); ··· 1203 1203 struct drm_device *drm = &priv->drm; 1204 1204 unsigned long long rate; 1205 1205 1206 - mode = drm_display_mode_from_cea_vic(drm, 16); 1206 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); 1207 1207 KUNIT_ASSERT_NOT_NULL(test, mode); 1208 1208 1209 1209 KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); ··· 1225 1225 struct drm_device *drm = &priv->drm; 1226 1226 unsigned long long rate; 1227 1227 1228 - mode = drm_display_mode_from_cea_vic(drm, 16); 1228 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); 1229 1229 KUNIT_ASSERT_NOT_NULL(test, mode); 1230 1230 1231 1231 KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK); ··· 1247 1247 struct drm_device *drm = &priv->drm; 1248 1248 unsigned long long rate; 1249 1249 1250 - mode = drm_display_mode_from_cea_vic(drm, 16); 1250 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 16); 1251 1251 KUNIT_ASSERT_NOT_NULL(test, mode); 1252 1252 1253 1253 KUNIT_ASSERT_FALSE(test, mode->flags & DRM_MODE_FLAG_DBLCLK);
+4 -4
drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
··· 441 441 ctx = drm_kunit_helper_acquire_ctx_alloc(test); 442 442 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 443 443 444 - mode = drm_display_mode_from_cea_vic(drm, 1); 444 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); 445 445 KUNIT_ASSERT_NOT_NULL(test, mode); 446 446 447 447 drm = &priv->drm; ··· 555 555 ctx = drm_kunit_helper_acquire_ctx_alloc(test); 556 556 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 557 557 558 - mode = drm_display_mode_from_cea_vic(drm, 1); 558 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); 559 559 KUNIT_ASSERT_NOT_NULL(test, mode); 560 560 561 561 drm = &priv->drm; ··· 671 671 ctx = drm_kunit_helper_acquire_ctx_alloc(test); 672 672 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 673 673 674 - mode = drm_display_mode_from_cea_vic(drm, 1); 674 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); 675 675 KUNIT_ASSERT_NOT_NULL(test, mode); 676 676 677 677 drm = &priv->drm; ··· 1263 1263 ctx = drm_kunit_helper_acquire_ctx_alloc(test); 1264 1264 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); 1265 1265 1266 - mode = drm_display_mode_from_cea_vic(drm, 1); 1266 + mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1); 1267 1267 KUNIT_ASSERT_NOT_NULL(test, mode); 1268 1268 1269 1269 /*
+42
drivers/gpu/drm/tests/drm_kunit_helpers.c
··· 3 3 #include <drm/drm_atomic.h> 4 4 #include <drm/drm_atomic_helper.h> 5 5 #include <drm/drm_drv.h> 6 + #include <drm/drm_edid.h> 6 7 #include <drm/drm_fourcc.h> 7 8 #include <drm/drm_kunit_helpers.h> 8 9 #include <drm/drm_managed.h> ··· 311 310 return crtc; 312 311 } 313 312 EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc); 313 + 314 + static void kunit_action_drm_mode_destroy(void *ptr) 315 + { 316 + struct drm_display_mode *mode = ptr; 317 + 318 + drm_mode_destroy(NULL, mode); 319 + } 320 + 321 + /** 322 + * drm_kunit_display_mode_from_cea_vic() - return a mode for CEA VIC 323 + for a KUnit test 324 + * @test: The test context object 325 + * @dev: DRM device 326 + * @video_code: CEA VIC of the mode 327 + * 328 + * Creates a new mode matching the specified CEA VIC for a KUnit test. 329 + * 330 + * Resources will be cleaned up automatically. 331 + * 332 + * Returns: A new drm_display_mode on success or NULL on failure 333 + */ 334 + struct drm_display_mode * 335 + drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev, 336 + u8 video_code) 337 + { 338 + struct drm_display_mode *mode; 339 + int ret; 340 + 341 + mode = drm_display_mode_from_cea_vic(dev, video_code); 342 + if (!mode) 343 + return NULL; 344 + 345 + ret = kunit_add_action_or_reset(test, 346 + kunit_action_drm_mode_destroy, 347 + mode); 348 + if (ret) 349 + return NULL; 350 + 351 + return mode; 352 + } 353 + EXPORT_SYMBOL_GPL(drm_kunit_display_mode_from_cea_vic); 314 354 315 355 MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>"); 316 356 MODULE_DESCRIPTION("KUnit test suite helper functions");
+46 -25
drivers/gpu/drm/xe/display/xe_display.c
··· 309 309 } 310 310 311 311 /* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */ 312 - void xe_display_pm_runtime_suspend(struct xe_device *xe) 313 - { 314 - if (!xe->info.probe_display) 315 - return; 316 - 317 - if (xe->d3cold.allowed) 318 - xe_display_pm_suspend(xe, true); 319 - 320 - intel_hpd_poll_enable(xe); 321 - } 322 - 323 - void xe_display_pm_suspend(struct xe_device *xe, bool runtime) 312 + static void __xe_display_pm_suspend(struct xe_device *xe, bool runtime) 324 313 { 325 314 struct intel_display *display = &xe->display; 326 315 bool s2idle = suspend_to_idle(); ··· 342 353 intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold); 343 354 344 355 intel_dmc_suspend(xe); 356 + 357 + if (runtime && has_display(xe)) 358 + intel_hpd_poll_enable(xe); 359 + } 360 + 361 + void xe_display_pm_suspend(struct xe_device *xe) 362 + { 363 + __xe_display_pm_suspend(xe, false); 364 + } 365 + 366 + void xe_display_pm_runtime_suspend(struct xe_device *xe) 367 + { 368 + if (!xe->info.probe_display) 369 + return; 370 + 371 + if (xe->d3cold.allowed) { 372 + __xe_display_pm_suspend(xe, true); 373 + return; 374 + } 375 + 376 + intel_hpd_poll_enable(xe); 345 377 } 346 378 347 379 void xe_display_pm_suspend_late(struct xe_device *xe) ··· 376 366 intel_display_power_suspend_late(xe); 377 367 } 378 368 379 - void xe_display_pm_runtime_resume(struct xe_device *xe) 380 - { 381 - if (!xe->info.probe_display) 382 - return; 383 - 384 - intel_hpd_poll_disable(xe); 385 - 386 - if (xe->d3cold.allowed) 387 - xe_display_pm_resume(xe, true); 388 - } 389 - 390 369 void xe_display_pm_resume_early(struct xe_device *xe) 391 370 { 392 371 if (!xe->info.probe_display) ··· 386 387 intel_power_domains_resume(xe); 387 388 } 388 389 389 - void xe_display_pm_resume(struct xe_device *xe, bool runtime) 390 + static void __xe_display_pm_resume(struct xe_device *xe, bool runtime) 390 391 { 391 392 struct intel_display *display = &xe->display; 392 393 ··· 410 411 intel_display_driver_resume(xe); 411 412 drm_kms_helper_poll_enable(&xe->drm); 412 413 intel_display_driver_enable_user_access(xe); 413 - intel_hpd_poll_disable(xe); 414 414 } 415 + 416 + if (has_display(xe)) 417 + intel_hpd_poll_disable(xe); 415 418 416 419 intel_opregion_resume(display); 417 420 ··· 421 420 422 421 intel_power_domains_enable(xe); 423 422 } 423 + 424 + void xe_display_pm_resume(struct xe_device *xe) 425 + { 426 + __xe_display_pm_resume(xe, false); 427 + } 428 + 429 + void xe_display_pm_runtime_resume(struct xe_device *xe) 430 + { 431 + if (!xe->info.probe_display) 432 + return; 433 + 434 + if (xe->d3cold.allowed) { 435 + __xe_display_pm_resume(xe, true); 436 + return; 437 + } 438 + 439 + intel_hpd_init(xe); 440 + intel_hpd_poll_disable(xe); 441 + } 442 + 424 443 425 444 static void display_device_remove(struct drm_device *dev, void *arg) 426 445 {
+4 -4
drivers/gpu/drm/xe/display/xe_display.h
··· 34 34 void xe_display_irq_reset(struct xe_device *xe); 35 35 void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt); 36 36 37 - void xe_display_pm_suspend(struct xe_device *xe, bool runtime); 37 + void xe_display_pm_suspend(struct xe_device *xe); 38 38 void xe_display_pm_suspend_late(struct xe_device *xe); 39 39 void xe_display_pm_resume_early(struct xe_device *xe); 40 - void xe_display_pm_resume(struct xe_device *xe, bool runtime); 40 + void xe_display_pm_resume(struct xe_device *xe); 41 41 void xe_display_pm_runtime_suspend(struct xe_device *xe); 42 42 void xe_display_pm_runtime_resume(struct xe_device *xe); 43 43 ··· 65 65 static inline void xe_display_irq_reset(struct xe_device *xe) {} 66 66 static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {} 67 67 68 - static inline void xe_display_pm_suspend(struct xe_device *xe, bool runtime) {} 68 + static inline void xe_display_pm_suspend(struct xe_device *xe) {} 69 69 static inline void xe_display_pm_suspend_late(struct xe_device *xe) {} 70 70 static inline void xe_display_pm_resume_early(struct xe_device *xe) {} 71 - static inline void xe_display_pm_resume(struct xe_device *xe, bool runtime) {} 71 + static inline void xe_display_pm_resume(struct xe_device *xe) {} 72 72 static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {} 73 73 static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {} 74 74
+10
drivers/gpu/drm/xe/xe_ggtt.c
··· 397 397 398 398 static void xe_ggtt_invalidate(struct xe_ggtt *ggtt) 399 399 { 400 + struct xe_device *xe = tile_to_xe(ggtt->tile); 401 + 402 + /* 403 + * XXX: Barrier for GGTT pages. Unsure exactly why this required but 404 + * without this LNL is having issues with the GuC reading scratch page 405 + * vs. correct GGTT page. Not particularly a hot code path so blindly 406 + * do a mmio read here which results in GuC reading correct GGTT page. 407 + */ 408 + xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG); 409 + 400 410 /* Each GT in a tile has its own TLB to cache GGTT lookups */ 401 411 ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt); 402 412 ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
+12 -6
drivers/gpu/drm/xe/xe_guc_submit.c
··· 916 916 static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job) 917 917 { 918 918 struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q)); 919 - u32 ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]); 920 - u32 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); 919 + u32 ctx_timestamp, ctx_job_timestamp; 921 920 u32 timeout_ms = q->sched_props.job_timeout_ms; 922 921 u32 diff; 923 922 u64 running_time_ms; 923 + 924 + if (!xe_sched_job_started(job)) { 925 + xe_gt_warn(gt, "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, not started", 926 + xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job), 927 + q->guc->id); 928 + 929 + return xe_sched_invalidate_job(job, 2); 930 + } 931 + 932 + ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]); 933 + ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); 924 934 925 935 /* 926 936 * Counter wraps at ~223s at the usual 19.2MHz, be paranoid catch ··· 1058 1048 skip_timeout_check = exec_queue_reset(q) || 1059 1049 exec_queue_killed_or_banned_or_wedged(q) || 1060 1050 exec_queue_destroyed(q); 1061 - 1062 - /* Job hasn't started, can't be timed out */ 1063 - if (!skip_timeout_check && !xe_sched_job_started(job)) 1064 - goto rearm; 1065 1051 1066 1052 /* 1067 1053 * XXX: Sampling timeout doesn't work in wedged mode as we have to
+3 -3
drivers/gpu/drm/xe/xe_pm.c
··· 123 123 for_each_gt(gt, xe, id) 124 124 xe_gt_suspend_prepare(gt); 125 125 126 - xe_display_pm_suspend(xe, false); 126 + xe_display_pm_suspend(xe); 127 127 128 128 /* FIXME: Super racey... */ 129 129 err = xe_bo_evict_all(xe); ··· 133 133 for_each_gt(gt, xe, id) { 134 134 err = xe_gt_suspend(gt); 135 135 if (err) { 136 - xe_display_pm_resume(xe, false); 136 + xe_display_pm_resume(xe); 137 137 goto err; 138 138 } 139 139 } ··· 187 187 for_each_gt(gt, xe, id) 188 188 xe_gt_resume(gt); 189 189 190 - xe_display_pm_resume(xe, false); 190 + xe_display_pm_resume(xe); 191 191 192 192 err = xe_bo_restore_user(xe); 193 193 if (err)
+4
include/drm/drm_kunit_helpers.h
··· 120 120 const struct drm_crtc_funcs *funcs, 121 121 const struct drm_crtc_helper_funcs *helper_funcs); 122 122 123 + struct drm_display_mode * 124 + drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev, 125 + u8 video_code); 126 + 123 127 #endif // DRM_KUNIT_HELPERS_H_