Merge tag 'drm-fixes-2026-02-06' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
"The usual xe/amdgpu selection, and a couple of misc changes for
gma500, mgag200 and bridge. There is a nouveau revert, and also a set
of changes that fix a regression since we moved to 570 firmware.
Suspend/resume was broken on a bunch of GPUs. The fix looks big, but
it's mostly just refactoring to pass an extra bit down the nouveau
abstractions to the firmware command.

amdgpu:
- MES 11 old firmware compatibility fix
- ASPM fix
- DC LUT fixes

amdkfd:
- Fix possible double deletion of validate list

xe:
- Fix topology query pointer advance
- A couple of kerneldoc fixes
- Disable D3Cold for BMG only on specific platforms
- Fix CFI violation in debugfs access

nouveau:
- Revert adding atomic commit functions as it regresses pre-nv50
- Fix suspend/resume bugs exposed by enabling 570 firmware

gma500:
- Revert a regression caused by vblank changes

mgag200:
- Replace a busy loop with a polling loop to fix that blocking 1 cpu
for 300 ms roughly every 20 minutes

bridge:
- imx8mp-hdmi-pa: Use runtime pm to fix a bug in channel ordering"

* tag 'drm-fixes-2026-02-06' of https://gitlab.freedesktop.org/drm/kernel:
drm/xe/guc: Fix CFI violation in debugfs access.
drm/bridge: imx8mp-hdmi-pai: enable PM runtime
drm/xe/pm: Disable D3Cold for BMG only on specific platforms
drm/xe: Fix kerneldoc for xe_tlb_inval_job_alloc_dep
drm/xe: Fix kerneldoc for xe_gt_tlb_inval_init_early
drm/xe: Fix kerneldoc for xe_migrate_exec_queue
drm/xe/query: Fix topology query pointer advance
drm/mgag200: fix mgag200_bmc_stop_scanout()
nouveau/gsp: fix suspend/resume regression on r570 firmware
nouveau: add a third state to the fini handler.
nouveau/gsp: use rpc sequence numbers properly.
drm/amdgpu: Fix double deletion of validate_list
drm/amd/display: remove assert around dpp_base replacement
drm/amd/display: extend delta clamping logic to CM3 LUT helper
drm/amd/display: fix wrong color value mapping on MCM shaper LUT
Revert "drm/amd: Check if ASPM is enabled from PCIe subsystem"
drm/amd: Set minimum version for set_hw_resource_1 on gfx11 to 0x52
Revert "drm/gma500: use drm_crtc_vblank_crtc()"
Revert "drm/nouveau/disp: Set drm_mode_config_funcs.atomic_(check|commit)"

+290 -189
+9 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 1920 1920 1921 1921 /* Make sure restore workers don't access the BO any more */ 1922 1922 mutex_lock(&process_info->lock); 1923 - list_del(&mem->validate_list); 1923 + if (!list_empty(&mem->validate_list)) 1924 + list_del_init(&mem->validate_list); 1924 1925 mutex_unlock(&process_info->lock); 1925 - 1926 - /* Cleanup user pages and MMU notifiers */ 1927 - if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { 1928 - amdgpu_hmm_unregister(mem->bo); 1929 - mutex_lock(&process_info->notifier_lock); 1930 - amdgpu_hmm_range_free(mem->range); 1931 - mutex_unlock(&process_info->notifier_lock); 1932 - } 1933 1926 1934 1927 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); 1935 1928 if (unlikely(ret)) 1936 1929 return ret; 1930 + 1931 + /* Cleanup user pages and MMU notifiers */ 1932 + if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { 1933 + amdgpu_hmm_unregister(mem->bo); 1934 + amdgpu_hmm_range_free(mem->range); 1935 + mem->range = NULL; 1936 + } 1937 1937 1938 1938 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1939 1939 process_info->eviction_fence);
-3
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2405 2405 return -ENODEV; 2406 2406 } 2407 2407 2408 - if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev)) 2409 - amdgpu_aspm = 0; 2410 - 2411 2408 if (amdgpu_virtual_display || 2412 2409 amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK)) 2413 2410 supports_atomic = true;
+1 -1
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
··· 1671 1671 if (r) 1672 1672 goto failure; 1673 1673 1674 - if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x50) { 1674 + if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x52) { 1675 1675 r = mes_v11_0_set_hw_resources_1(&adev->mes); 1676 1676 if (r) { 1677 1677 DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
+29 -8
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
··· 105 105 #define NUMBER_REGIONS 32 106 106 #define NUMBER_SW_SEGMENTS 16 107 107 108 - bool cm3_helper_translate_curve_to_hw_format( 109 - const struct dc_transfer_func *output_tf, 110 - struct pwl_params *lut_params, bool fixpoint) 108 + #define DC_LOGGER \ 109 + ctx->logger 110 + 111 + bool cm3_helper_translate_curve_to_hw_format(struct dc_context *ctx, 112 + const struct dc_transfer_func *output_tf, 113 + struct pwl_params *lut_params, bool fixpoint) 111 114 { 112 115 struct curve_points3 *corner_points; 113 116 struct pwl_result_data *rgb_resulted; ··· 165 162 if (seg_distr[k] != -1) 166 163 hw_points += (1 << seg_distr[k]); 167 164 } 165 + 166 + // DCN3+ have 257 pts in lieu of no separate slope registers 167 + // Prior HW had 256 base+slope pairs 168 + // Shaper LUT (i.e. fixpoint == true) is still 256 bases and 256 deltas 169 + hw_points = fixpoint ? (hw_points - 1) : hw_points; 168 170 169 171 j = 0; 170 172 for (k = 0; k < (region_end - region_start); k++) { ··· 231 223 corner_points[1].green.slope = dc_fixpt_zero; 232 224 corner_points[1].blue.slope = dc_fixpt_zero; 233 225 234 - // DCN3+ have 257 pts in lieu of no separate slope registers 235 - // Prior HW had 256 base+slope pairs 236 226 lut_params->hw_points_num = hw_points + 1; 237 227 238 228 k = 0; ··· 254 248 if (fixpoint == true) { 255 249 i = 1; 256 250 while (i != hw_points + 2) { 251 + uint32_t red_clamp; 252 + uint32_t green_clamp; 253 + uint32_t blue_clamp; 254 + 257 255 if (i >= hw_points) { 258 256 if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) 259 257 rgb_plus_1->red = dc_fixpt_add(rgb->red, ··· 270 260 rgb_minus_1->delta_blue); 271 261 } 272 262 273 - rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red); 274 - rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green); 275 - rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue); 263 + rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); 264 + rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); 265 + rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); 266 + 267 + red_clamp = dc_fixpt_clamp_u0d14(rgb->delta_red); 268 + green_clamp = dc_fixpt_clamp_u0d14(rgb->delta_green); 269 + blue_clamp = dc_fixpt_clamp_u0d14(rgb->delta_blue); 270 + 271 + if (red_clamp >> 10 || green_clamp >> 10 || blue_clamp >> 10) 272 + DC_LOG_ERROR("Losing delta precision while programming shaper LUT."); 273 + 274 + rgb->delta_red_reg = red_clamp & 0x3ff; 275 + rgb->delta_green_reg = green_clamp & 0x3ff; 276 + rgb->delta_blue_reg = blue_clamp & 0x3ff; 276 277 rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red); 277 278 rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green); 278 279 rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue);
+1 -1
drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h
··· 59 59 const struct pwl_params *params, 60 60 const struct dcn3_xfer_func_reg *reg); 61 61 62 - bool cm3_helper_translate_curve_to_hw_format( 62 + bool cm3_helper_translate_curve_to_hw_format(struct dc_context *ctx, 63 63 const struct dc_transfer_func *output_tf, 64 64 struct pwl_params *lut_params, bool fixpoint); 65 65
+5 -4
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
··· 239 239 if (plane_state->blend_tf.type == TF_TYPE_HWPWL) 240 240 blend_lut = &plane_state->blend_tf.pwl; 241 241 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { 242 - result = cm3_helper_translate_curve_to_hw_format( 242 + result = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 243 243 &plane_state->blend_tf, &dpp_base->regamma_params, false); 244 244 if (!result) 245 245 return result; ··· 334 334 if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) 335 335 params = &plane_state->in_transfer_func.pwl; 336 336 else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && 337 - cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func, 338 - &dpp_base->degamma_params, false)) 337 + cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 338 + &plane_state->in_transfer_func, 339 + &dpp_base->degamma_params, false)) 339 340 params = &dpp_base->degamma_params; 340 341 341 342 result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); ··· 407 406 params = &stream->out_transfer_func.pwl; 408 407 else if (pipe_ctx->stream->out_transfer_func.type == 409 408 TF_TYPE_DISTRIBUTED_POINTS && 410 - cm3_helper_translate_curve_to_hw_format( 409 + cm3_helper_translate_curve_to_hw_format(stream->ctx, 411 410 &stream->out_transfer_func, 412 411 &mpc->blender_params, false)) 413 412 params = &mpc->blender_params;
+10 -8
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
··· 486 486 if (plane_state->blend_tf.type == TF_TYPE_HWPWL) 487 487 lut_params = &plane_state->blend_tf.pwl; 488 488 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { 489 - result = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf, 490 - &dpp_base->regamma_params, false); 489 + result = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 490 + &plane_state->blend_tf, 491 + &dpp_base->regamma_params, false); 491 492 if (!result) 492 493 return result; 493 494 ··· 502 501 lut_params = &plane_state->in_shaper_func.pwl; 503 502 else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { 504 503 // TODO: dpp_base replace 505 - ASSERT(false); 506 - cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func, 507 - &dpp_base->shaper_params, true); 504 + cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 505 + &plane_state->in_shaper_func, 506 + &dpp_base->shaper_params, true); 508 507 lut_params = &dpp_base->shaper_params; 509 508 } 510 509 ··· 544 543 if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) 545 544 params = &plane_state->in_transfer_func.pwl; 546 545 else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && 547 - cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func, 548 - &dpp_base->degamma_params, false)) 546 + cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 547 + &plane_state->in_transfer_func, 548 + &dpp_base->degamma_params, false)) 549 549 params = &dpp_base->degamma_params; 550 550 551 551 dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); ··· 577 575 params = &stream->out_transfer_func.pwl; 578 576 else if (pipe_ctx->stream->out_transfer_func.type == 579 577 TF_TYPE_DISTRIBUTED_POINTS && 580 - cm3_helper_translate_curve_to_hw_format( 578 + cm3_helper_translate_curve_to_hw_format(stream->ctx, 581 579 &stream->out_transfer_func, 582 580 &mpc->blender_params, false)) 583 581 params = &mpc->blender_params;
+9 -7
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
··· 430 430 if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL) 431 431 m_lut_params.pwl = &mcm_luts.lut1d_func->pwl; 432 432 else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) { 433 - rval = cm3_helper_translate_curve_to_hw_format( 433 + rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx, 434 434 mcm_luts.lut1d_func, 435 435 &dpp_base->regamma_params, false); 436 436 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; ··· 450 450 m_lut_params.pwl = &mcm_luts.shaper->pwl; 451 451 else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { 452 452 ASSERT(false); 453 - rval = cm3_helper_translate_curve_to_hw_format( 453 + rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx, 454 454 mcm_luts.shaper, 455 455 &dpp_base->regamma_params, true); 456 456 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; ··· 627 627 if (plane_state->blend_tf.type == TF_TYPE_HWPWL) 628 628 lut_params = &plane_state->blend_tf.pwl; 629 629 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { 630 - rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf, 631 - &dpp_base->regamma_params, false); 630 + rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 631 + &plane_state->blend_tf, 632 + &dpp_base->regamma_params, false); 632 633 lut_params = rval ? &dpp_base->regamma_params : NULL; 633 634 } 634 635 result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); ··· 640 639 lut_params = &plane_state->in_shaper_func.pwl; 641 640 else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { 642 641 // TODO: dpp_base replace 643 - rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func, 644 - &dpp_base->shaper_params, true); 642 + rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 643 + &plane_state->in_shaper_func, 644 + &dpp_base->shaper_params, true); 645 645 lut_params = rval ? &dpp_base->shaper_params : NULL; 646 646 } 647 647 result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); ··· 676 674 params = &stream->out_transfer_func.pwl; 677 675 else if (pipe_ctx->stream->out_transfer_func.type == 678 676 TF_TYPE_DISTRIBUTED_POINTS && 679 - cm3_helper_translate_curve_to_hw_format( 677 + cm3_helper_translate_curve_to_hw_format(stream->ctx, 680 678 &stream->out_transfer_func, 681 679 &mpc->blender_params, false)) 682 680 params = &mpc->blender_params;
+15
drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c
··· 8 8 #include <linux/module.h> 9 9 #include <linux/of_platform.h> 10 10 #include <linux/platform_device.h> 11 + #include <linux/pm_runtime.h> 11 12 #include <linux/regmap.h> 12 13 #include <drm/bridge/dw_hdmi.h> 13 14 #include <sound/asoundef.h> ··· 34 33 35 34 struct imx8mp_hdmi_pai { 36 35 struct regmap *regmap; 36 + struct device *dev; 37 37 }; 38 38 39 39 static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel, ··· 44 42 const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi); 45 43 struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio; 46 44 int val; 45 + 46 + if (pm_runtime_resume_and_get(hdmi_pai->dev) < 0) 47 + return; 47 48 48 49 /* PAI set control extended */ 49 50 val = WTMK_HIGH(3) | WTMK_LOW(3); ··· 90 85 91 86 /* Stop PAI */ 92 87 regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, 0); 88 + 89 + pm_runtime_put_sync(hdmi_pai->dev); 93 90 } 94 91 95 92 static const struct regmap_config imx8mp_hdmi_pai_regmap_config = { ··· 108 101 struct imx8mp_hdmi_pai *hdmi_pai; 109 102 struct resource *res; 110 103 void __iomem *base; 104 + int ret; 111 105 112 106 hdmi_pai = devm_kzalloc(dev, sizeof(*hdmi_pai), GFP_KERNEL); 113 107 if (!hdmi_pai) ··· 128 120 plat_data->enable_audio = imx8mp_hdmi_pai_enable; 129 121 plat_data->disable_audio = imx8mp_hdmi_pai_disable; 130 122 plat_data->priv_audio = hdmi_pai; 123 + 124 + hdmi_pai->dev = dev; 125 + ret = devm_pm_runtime_enable(dev); 126 + if (ret < 0) { 127 + dev_err(dev, "failed to enable PM runtime: %d\n", ret); 128 + return ret; 129 + } 131 130 132 131 return 0; 133 132 }
+13 -23
drivers/gpu/drm/gma500/psb_irq.c
··· 250 250 void gma_irq_preinstall(struct drm_device *dev) 251 251 { 252 252 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 253 - struct drm_crtc *crtc; 254 253 unsigned long irqflags; 255 254 256 255 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); ··· 260 261 PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE); 261 262 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); 262 263 263 - drm_for_each_crtc(crtc, dev) { 264 - struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 265 - 266 - if (vblank->enabled) { 267 - u32 mask = drm_crtc_index(crtc) ? _PSB_VSYNC_PIPEB_FLAG : 268 - _PSB_VSYNC_PIPEA_FLAG; 269 - dev_priv->vdc_irq_mask |= mask; 270 - } 271 - } 264 + if (dev->vblank[0].enabled) 265 + dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; 266 + if (dev->vblank[1].enabled) 267 + dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG; 272 268 273 269 /* Revisit this area - want per device masks ? */ 274 270 if (dev_priv->ops->hotplug) ··· 278 284 void gma_irq_postinstall(struct drm_device *dev) 279 285 { 280 286 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 281 - struct drm_crtc *crtc; 282 287 unsigned long irqflags; 288 + unsigned int i; 283 289 284 290 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 285 291 ··· 292 298 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 293 299 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 294 300 295 - drm_for_each_crtc(crtc, dev) { 296 - struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 297 - 298 - if (vblank->enabled) 299 - gma_enable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); 301 + for (i = 0; i < dev->num_crtcs; ++i) { 302 + if (dev->vblank[i].enabled) 303 + gma_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); 300 304 else 301 - gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); 305 + gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); 302 306 } 303 307 304 308 if (dev_priv->ops->hotplug_enable) ··· 337 345 { 338 346 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 339 347 struct pci_dev *pdev = to_pci_dev(dev->dev); 340 - struct drm_crtc *crtc; 341 348 unsigned long irqflags; 349 + unsigned int i; 342 350 343 351 if (!dev_priv->irq_enabled) 344 352 return; ··· 350 358 351 359 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 352 360 353 - drm_for_each_crtc(crtc, dev) { 354 - struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 355 - 356 - if (vblank->enabled) 357 - gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); 361 + for (i = 0; i < dev->num_crtcs; ++i) { 362 + if (dev->vblank[i].enabled) 363 + gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); 358 364 } 359 365 360 366 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+12 -19
drivers/gpu/drm/mgag200/mgag200_bmc.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 3 3 #include <linux/delay.h> 4 + #include <linux/iopoll.h> 4 5 5 6 #include <drm/drm_atomic_helper.h> 6 7 #include <drm/drm_edid.h> ··· 13 12 void mgag200_bmc_stop_scanout(struct mga_device *mdev) 14 13 { 15 14 u8 tmp; 16 - int iter_max; 15 + int ret; 17 16 18 17 /* 19 18 * 1 - The first step is to inform the BMC of an upcoming mode ··· 43 42 44 43 /* 45 44 * 3a- The third step is to verify if there is an active scan. 46 - * We are waiting for a 0 on remhsyncsts <XSPAREREG<0>). 45 + * We are waiting for a 0 on remhsyncsts (<XSPAREREG<0>). 47 46 */ 48 - iter_max = 300; 49 - while (!(tmp & 0x1) && iter_max) { 50 - WREG8(DAC_INDEX, MGA1064_SPAREREG); 51 - tmp = RREG8(DAC_DATA); 52 - udelay(1000); 53 - iter_max--; 54 - } 47 + ret = read_poll_timeout(RREG_DAC, tmp, !(tmp & 0x1), 48 + 1000, 300000, false, 49 + MGA1064_SPAREREG); 50 + if (ret == -ETIMEDOUT) 51 + return; 55 52 56 53 /* 57 - * 3b- This step occurs only if the remove is actually 54 + * 3b- This step occurs only if the remote BMC is actually 58 55 * scanning. We are waiting for the end of the frame which is 59 56 * a 1 on remvsyncsts (XSPAREREG<1>) 60 57 */ 61 - if (iter_max) { 62 - iter_max = 300; 63 - while ((tmp & 0x2) && iter_max) { 64 - WREG8(DAC_INDEX, MGA1064_SPAREREG); 65 - tmp = RREG8(DAC_DATA); 66 - udelay(1000); 67 - iter_max--; 68 - } 69 - } 58 + (void)read_poll_timeout(RREG_DAC, tmp, (tmp & 0x2), 59 + 1000, 300000, false, 60 + MGA1064_SPAREREG); 70 61 } 71 62 72 63 void mgag200_bmc_start_scanout(struct mga_device *mdev)
+6
drivers/gpu/drm/mgag200/mgag200_drv.h
··· 111 111 #define DAC_INDEX 0x3c00 112 112 #define DAC_DATA 0x3c0a 113 113 114 + #define RREG_DAC(reg) \ 115 + ({ \ 116 + WREG8(DAC_INDEX, reg); \ 117 + RREG8(DAC_DATA); \ 118 + }) \ 119 + 114 120 #define WREG_DAC(reg, v) \ 115 121 do { \ 116 122 WREG8(DAC_INDEX, reg); \
+1 -1
drivers/gpu/drm/nouveau/include/nvif/client.h
··· 11 11 12 12 int nvif_client_ctor(struct nvif_client *parent, const char *name, struct nvif_client *); 13 13 void nvif_client_dtor(struct nvif_client *); 14 - int nvif_client_suspend(struct nvif_client *); 14 + int nvif_client_suspend(struct nvif_client *, bool); 15 15 int nvif_client_resume(struct nvif_client *); 16 16 17 17 /*XXX*/
+1 -1
drivers/gpu/drm/nouveau/include/nvif/driver.h
··· 8 8 const char *name; 9 9 int (*init)(const char *name, u64 device, const char *cfg, 10 10 const char *dbg, void **priv); 11 - int (*suspend)(void *priv); 11 + int (*suspend)(void *priv, bool runtime); 12 12 int (*resume)(void *priv); 13 13 int (*ioctl)(void *priv, void *data, u32 size, void **hack); 14 14 void __iomem *(*map)(void *priv, u64 handle, u32 size);
+2 -1
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
··· 2 2 #ifndef __NVKM_DEVICE_H__ 3 3 #define __NVKM_DEVICE_H__ 4 4 #include <core/oclass.h> 5 + #include <core/suspend_state.h> 5 6 #include <core/intr.h> 6 7 enum nvkm_subdev_type; 7 8 ··· 94 93 void *(*dtor)(struct nvkm_device *); 95 94 int (*preinit)(struct nvkm_device *); 96 95 int (*init)(struct nvkm_device *); 97 - void (*fini)(struct nvkm_device *, bool suspend); 96 + void (*fini)(struct nvkm_device *, enum nvkm_suspend_state suspend); 98 97 int (*irq)(struct nvkm_device *); 99 98 resource_size_t (*resource_addr)(struct nvkm_device *, enum nvkm_bar_id); 100 99 resource_size_t (*resource_size)(struct nvkm_device *, enum nvkm_bar_id);
+1 -1
drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
··· 20 20 int (*oneinit)(struct nvkm_engine *); 21 21 int (*info)(struct nvkm_engine *, u64 mthd, u64 *data); 22 22 int (*init)(struct nvkm_engine *); 23 - int (*fini)(struct nvkm_engine *, bool suspend); 23 + int (*fini)(struct nvkm_engine *, enum nvkm_suspend_state suspend); 24 24 int (*reset)(struct nvkm_engine *); 25 25 int (*nonstall)(struct nvkm_engine *); 26 26 void (*intr)(struct nvkm_engine *);
+3 -2
drivers/gpu/drm/nouveau/include/nvkm/core/object.h
··· 2 2 #ifndef __NVKM_OBJECT_H__ 3 3 #define __NVKM_OBJECT_H__ 4 4 #include <core/oclass.h> 5 + #include <core/suspend_state.h> 5 6 struct nvkm_event; 6 7 struct nvkm_gpuobj; 7 8 struct nvkm_uevent; ··· 28 27 struct nvkm_object_func { 29 28 void *(*dtor)(struct nvkm_object *); 30 29 int (*init)(struct nvkm_object *); 31 - int (*fini)(struct nvkm_object *, bool suspend); 30 + int (*fini)(struct nvkm_object *, enum nvkm_suspend_state suspend); 32 31 int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size); 33 32 int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **); 34 33 int (*map)(struct nvkm_object *, void *argv, u32 argc, ··· 50 49 void nvkm_object_del(struct nvkm_object **); 51 50 void *nvkm_object_dtor(struct nvkm_object *); 52 51 int nvkm_object_init(struct nvkm_object *); 53 - int nvkm_object_fini(struct nvkm_object *, bool suspend); 52 + int nvkm_object_fini(struct nvkm_object *, enum nvkm_suspend_state); 54 53 int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size); 55 54 int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **); 56 55 int nvkm_object_map(struct nvkm_object *, void *argv, u32 argc,
+1 -1
drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
··· 13 13 struct nvkm_oproxy_func { 14 14 void (*dtor[2])(struct nvkm_oproxy *); 15 15 int (*init[2])(struct nvkm_oproxy *); 16 - int (*fini[2])(struct nvkm_oproxy *, bool suspend); 16 + int (*fini[2])(struct nvkm_oproxy *, enum nvkm_suspend_state suspend); 17 17 }; 18 18 19 19 void nvkm_oproxy_ctor(const struct nvkm_oproxy_func *,
+2 -2
drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
··· 40 40 int (*oneinit)(struct nvkm_subdev *); 41 41 int (*info)(struct nvkm_subdev *, u64 mthd, u64 *data); 42 42 int (*init)(struct nvkm_subdev *); 43 - int (*fini)(struct nvkm_subdev *, bool suspend); 43 + int (*fini)(struct nvkm_subdev *, enum nvkm_suspend_state suspend); 44 44 void (*intr)(struct nvkm_subdev *); 45 45 }; 46 46 ··· 65 65 int nvkm_subdev_preinit(struct nvkm_subdev *); 66 66 int nvkm_subdev_oneinit(struct nvkm_subdev *); 67 67 int nvkm_subdev_init(struct nvkm_subdev *); 68 - int nvkm_subdev_fini(struct nvkm_subdev *, bool suspend); 68 + int nvkm_subdev_fini(struct nvkm_subdev *, enum nvkm_suspend_state suspend); 69 69 int nvkm_subdev_info(struct nvkm_subdev *, u64, u64 *); 70 70 void nvkm_subdev_intr(struct nvkm_subdev *); 71 71
+11
drivers/gpu/drm/nouveau/include/nvkm/core/suspend_state.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + #ifndef __NVKM_SUSPEND_STATE_H__ 3 + #define __NVKM_SUSPEND_STATE_H__ 4 + 5 + enum nvkm_suspend_state { 6 + NVKM_POWEROFF, 7 + NVKM_SUSPEND, 8 + NVKM_RUNTIME_SUSPEND, 9 + }; 10 + 11 + #endif
+6
drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
··· 44 44 * NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the 45 45 * caller after the GSP RPC command is issued. 46 46 * 47 + * NVKM_GSP_RPC_REPLY_NOSEQ - If specified, exactly like NOWAIT 48 + * but don't emit RPC sequence number. 49 + * 47 50 * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP 48 51 * RPC message after the GSP RPC command is issued. 49 52 * ··· 56 53 */ 57 54 enum nvkm_gsp_rpc_reply_policy { 58 55 NVKM_GSP_RPC_REPLY_NOWAIT = 0, 56 + NVKM_GSP_RPC_REPLY_NOSEQ, 59 57 NVKM_GSP_RPC_REPLY_RECV, 60 58 NVKM_GSP_RPC_REPLY_POLL, 61 59 }; ··· 245 241 246 242 /* The size of the registry RPC */ 247 243 size_t registry_rpc_size; 244 + 245 + u32 rpc_seq; 248 246 249 247 #ifdef CONFIG_DEBUG_FS 250 248 /*
-2
drivers/gpu/drm/nouveau/nouveau_display.c
··· 352 352 353 353 static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 354 354 .fb_create = nouveau_user_framebuffer_create, 355 - .atomic_commit = drm_atomic_helper_commit, 356 - .atomic_check = drm_atomic_helper_check, 357 355 }; 358 356 359 357
+1 -1
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 983 983 } 984 984 985 985 NV_DEBUG(drm, "suspending object tree...\n"); 986 - ret = nvif_client_suspend(&drm->_client); 986 + ret = nvif_client_suspend(&drm->_client, runtime); 987 987 if (ret) 988 988 goto fail_client; 989 989
+8 -2
drivers/gpu/drm/nouveau/nouveau_nvif.c
··· 62 62 } 63 63 64 64 static int 65 - nvkm_client_suspend(void *priv) 65 + nvkm_client_suspend(void *priv, bool runtime) 66 66 { 67 67 struct nvkm_client *client = priv; 68 - return nvkm_object_fini(&client->object, true); 68 + enum nvkm_suspend_state state; 69 + 70 + if (runtime) 71 + state = NVKM_RUNTIME_SUSPEND; 72 + else 73 + state = NVKM_SUSPEND; 74 + return nvkm_object_fini(&client->object, state); 69 75 } 70 76 71 77 static int
+2 -2
drivers/gpu/drm/nouveau/nvif/client.c
··· 30 30 #include <nvif/if0000.h> 31 31 32 32 int 33 - nvif_client_suspend(struct nvif_client *client) 33 + nvif_client_suspend(struct nvif_client *client, bool runtime) 34 34 { 35 - return client->driver->suspend(client->object.priv); 35 + return client->driver->suspend(client->object.priv, runtime); 36 36 } 37 37 38 38 int
+2 -2
drivers/gpu/drm/nouveau/nvkm/core/engine.c
··· 41 41 if (engine->func->reset) 42 42 return engine->func->reset(engine); 43 43 44 - nvkm_subdev_fini(&engine->subdev, false); 44 + nvkm_subdev_fini(&engine->subdev, NVKM_POWEROFF); 45 45 return nvkm_subdev_init(&engine->subdev); 46 46 } 47 47 ··· 98 98 } 99 99 100 100 static int 101 - nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend) 101 + nvkm_engine_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 102 102 { 103 103 struct nvkm_engine *engine = nvkm_engine(subdev); 104 104 if (engine->func->fini)
+2 -2
drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
··· 141 141 } 142 142 ret = -EEXIST; 143 143 } 144 - nvkm_object_fini(object, false); 144 + nvkm_object_fini(object, NVKM_POWEROFF); 145 145 } 146 146 147 147 nvkm_object_del(&object); ··· 160 160 nvif_ioctl(object, "delete size %d\n", size); 161 161 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) { 162 162 nvif_ioctl(object, "delete\n"); 163 - nvkm_object_fini(object, false); 163 + nvkm_object_fini(object, NVKM_POWEROFF); 164 164 nvkm_object_del(&object); 165 165 } 166 166
+16 -4
drivers/gpu/drm/nouveau/nvkm/core/object.c
··· 142 142 } 143 143 144 144 int 145 - nvkm_object_fini(struct nvkm_object *object, bool suspend) 145 + nvkm_object_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 146 146 { 147 - const char *action = suspend ? "suspend" : "fini"; 147 + const char *action; 148 148 struct nvkm_object *child; 149 149 s64 time; 150 150 int ret; 151 151 152 + switch (suspend) { 153 + case NVKM_POWEROFF: 154 + default: 155 + action = "fini"; 156 + break; 157 + case NVKM_SUSPEND: 158 + action = "suspend"; 159 + break; 160 + case NVKM_RUNTIME_SUSPEND: 161 + action = "runtime"; 162 + break; 163 + } 152 164 nvif_debug(object, "%s children...\n", action); 153 165 time = ktime_to_us(ktime_get()); 154 166 list_for_each_entry_reverse(child, &object->tree, head) { ··· 224 212 225 213 fail_child: 226 214 list_for_each_entry_continue_reverse(child, &object->tree, head) 227 - nvkm_object_fini(child, false); 215 + nvkm_object_fini(child, NVKM_POWEROFF); 228 216 fail: 229 217 nvif_error(object, "init failed with %d\n", ret); 230 218 if (object->func->fini) 231 - object->func->fini(object, false); 219 + object->func->fini(object, NVKM_POWEROFF); 232 220 return ret; 233 221 } 234 222
+1 -1
drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
··· 87 87 } 88 88 89 89 static int 90 - nvkm_oproxy_fini(struct nvkm_object *object, bool suspend) 90 + nvkm_oproxy_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 91 91 { 92 92 struct nvkm_oproxy *oproxy = nvkm_oproxy(object); 93 93 int ret;
+15 -3
drivers/gpu/drm/nouveau/nvkm/core/subdev.c
··· 51 51 } 52 52 53 53 int 54 - nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) 54 + nvkm_subdev_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 55 55 { 56 56 struct nvkm_device *device = subdev->device; 57 - const char *action = suspend ? "suspend" : subdev->use.enabled ? "fini" : "reset"; 57 + const char *action; 58 58 s64 time; 59 59 60 + switch (suspend) { 61 + case NVKM_POWEROFF: 62 + default: 63 + action = subdev->use.enabled ? "fini" : "reset"; 64 + break; 65 + case NVKM_SUSPEND: 66 + action = "suspend"; 67 + break; 68 + case NVKM_RUNTIME_SUSPEND: 69 + action = "runtime"; 70 + break; 71 + } 60 72 nvkm_trace(subdev, "%s running...\n", action); 61 73 time = ktime_to_us(ktime_get()); 62 74 ··· 198 186 nvkm_subdev_unref(struct nvkm_subdev *subdev) 199 187 { 200 188 if (refcount_dec_and_mutex_lock(&subdev->use.refcount, &subdev->use.mutex)) { 201 - nvkm_subdev_fini(subdev, false); 189 + nvkm_subdev_fini(subdev, NVKM_POWEROFF); 202 190 mutex_unlock(&subdev->use.mutex); 203 191 } 204 192 }
+1 -1
drivers/gpu/drm/nouveau/nvkm/core/uevent.c
··· 73 73 } 74 74 75 75 static int 76 - nvkm_uevent_fini(struct nvkm_object *object, bool suspend) 76 + nvkm_uevent_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 77 77 { 78 78 struct nvkm_uevent *uevent = nvkm_uevent(object); 79 79
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
··· 46 46 } 47 47 48 48 int 49 - ga100_ce_fini(struct nvkm_engine *engine, bool suspend) 49 + ga100_ce_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 50 50 { 51 51 nvkm_inth_block(&engine->subdev.inth); 52 52 return 0;
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
··· 14 14 15 15 int ga100_ce_oneinit(struct nvkm_engine *); 16 16 int ga100_ce_init(struct nvkm_engine *); 17 - int ga100_ce_fini(struct nvkm_engine *, bool); 17 + int ga100_ce_fini(struct nvkm_engine *, enum nvkm_suspend_state); 18 18 int ga100_ce_nonstall(struct nvkm_engine *); 19 19 20 20 u32 gb202_ce_grce_mask(struct nvkm_device *);
+17 -5
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
··· 2936 2936 } 2937 2937 2938 2938 int 2939 - nvkm_device_fini(struct nvkm_device *device, bool suspend) 2939 + nvkm_device_fini(struct nvkm_device *device, enum nvkm_suspend_state suspend) 2940 2940 { 2941 - const char *action = suspend ? "suspend" : "fini"; 2941 + const char *action; 2942 2942 struct nvkm_subdev *subdev; 2943 2943 int ret; 2944 2944 s64 time; 2945 2945 2946 + switch (suspend) { 2947 + case NVKM_POWEROFF: 2948 + default: 2949 + action = "fini"; 2950 + break; 2951 + case NVKM_SUSPEND: 2952 + action = "suspend"; 2953 + break; 2954 + case NVKM_RUNTIME_SUSPEND: 2955 + action = "runtime"; 2956 + break; 2957 + } 2946 2958 nvdev_trace(device, "%s running...\n", action); 2947 2959 time = ktime_to_us(ktime_get()); 2948 2960 ··· 3044 3032 if (ret) 3045 3033 return ret; 3046 3034 3047 - nvkm_device_fini(device, false); 3035 + nvkm_device_fini(device, NVKM_POWEROFF); 3048 3036 3049 3037 nvdev_trace(device, "init running...\n"); 3050 3038 time = ktime_to_us(ktime_get()); ··· 3072 3060 3073 3061 fail_subdev: 3074 3062 list_for_each_entry_from(subdev, &device->subdev, head) 3075 - nvkm_subdev_fini(subdev, false); 3063 + nvkm_subdev_fini(subdev, NVKM_POWEROFF); 3076 3064 fail: 3077 - nvkm_device_fini(device, false); 3065 + nvkm_device_fini(device, NVKM_POWEROFF); 3078 3066 3079 3067 nvdev_error(device, "init failed with %d\n", ret); 3080 3068 return ret;
+2 -2
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
··· 1605 1605 } 1606 1606 1607 1607 static void 1608 - nvkm_device_pci_fini(struct nvkm_device *device, bool suspend) 1608 + nvkm_device_pci_fini(struct nvkm_device *device, enum nvkm_suspend_state suspend) 1609 1609 { 1610 1610 struct nvkm_device_pci *pdev = nvkm_device_pci(device); 1611 - if (suspend) { 1611 + if (suspend != NVKM_POWEROFF) { 1612 1612 pci_disable_device(pdev->pdev); 1613 1613 pdev->suspend = true; 1614 1614 }
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
··· 56 56 const char *name, const char *cfg, const char *dbg, 57 57 struct nvkm_device *); 58 58 int nvkm_device_init(struct nvkm_device *); 59 - int nvkm_device_fini(struct nvkm_device *, bool suspend); 59 + int nvkm_device_fini(struct nvkm_device *, enum nvkm_suspend_state suspend); 60 60 #endif
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
··· 218 218 } 219 219 220 220 static int 221 - nvkm_udevice_fini(struct nvkm_object *object, bool suspend) 221 + nvkm_udevice_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 222 222 { 223 223 struct nvkm_udevice *udev = nvkm_udevice(object); 224 224 struct nvkm_device *device = udev->device;
+2 -2
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
··· 99 99 } 100 100 101 101 static int 102 - nvkm_disp_fini(struct nvkm_engine *engine, bool suspend) 102 + nvkm_disp_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 103 103 { 104 104 struct nvkm_disp *disp = nvkm_disp(engine); 105 105 struct nvkm_outp *outp; 106 106 107 107 if (disp->func->fini) 108 - disp->func->fini(disp, suspend); 108 + disp->func->fini(disp, suspend != NVKM_POWEROFF); 109 109 110 110 list_for_each_entry(outp, &disp->outps, head) { 111 111 if (outp->func->fini)
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
··· 128 128 } 129 129 130 130 static int 131 - nvkm_disp_chan_fini(struct nvkm_object *object, bool suspend) 131 + nvkm_disp_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 132 132 { 133 133 struct nvkm_disp_chan *chan = nvkm_disp_chan(object); 134 134
+2 -2
drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
··· 93 93 } 94 94 95 95 static int 96 - nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend) 96 + nvkm_falcon_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 97 97 { 98 98 struct nvkm_falcon *falcon = nvkm_falcon(engine); 99 99 struct nvkm_device *device = falcon->engine.subdev.device; 100 100 const u32 base = falcon->addr; 101 101 102 - if (!suspend) { 102 + if (suspend == NVKM_POWEROFF) { 103 103 nvkm_memory_unref(&falcon->core); 104 104 if (falcon->external) { 105 105 vfree(falcon->data.data);
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
··· 122 122 } 123 123 124 124 static int 125 - nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend) 125 + nvkm_fifo_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 126 126 { 127 127 struct nvkm_fifo *fifo = nvkm_fifo(engine); 128 128 struct nvkm_runl *runl;
+3 -3
drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
··· 72 72 }; 73 73 74 74 static int 75 - nvkm_uchan_object_fini_1(struct nvkm_oproxy *oproxy, bool suspend) 75 + nvkm_uchan_object_fini_1(struct nvkm_oproxy *oproxy, enum nvkm_suspend_state suspend) 76 76 { 77 77 struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy); 78 78 struct nvkm_chan *chan = uobj->chan; ··· 87 87 nvkm_chan_cctx_bind(chan, ectx->engn, NULL); 88 88 89 89 if (refcount_dec_and_test(&ectx->uses)) 90 - nvkm_object_fini(ectx->object, false); 90 + nvkm_object_fini(ectx->object, NVKM_POWEROFF); 91 91 mutex_unlock(&chan->cgrp->mutex); 92 92 } 93 93 ··· 269 269 } 270 270 271 271 static int 272 - nvkm_uchan_fini(struct nvkm_object *object, bool suspend) 272 + nvkm_uchan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 273 273 { 274 274 struct nvkm_chan *chan = nvkm_uchan(object)->chan; 275 275
+2 -2
drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
··· 168 168 } 169 169 170 170 static int 171 - nvkm_gr_fini(struct nvkm_engine *engine, bool suspend) 171 + nvkm_gr_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 172 172 { 173 173 struct nvkm_gr *gr = nvkm_gr(engine); 174 174 if (gr->func->fini) 175 - return gr->func->fini(gr, suspend); 175 + return gr->func->fini(gr, suspend != NVKM_POWEROFF); 176 176 return 0; 177 177 } 178 178
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
··· 2330 2330 2331 2331 WARN_ON(gf100_gr_fecs_halt_pipeline(gr)); 2332 2332 2333 - subdev->func->fini(subdev, false); 2333 + subdev->func->fini(subdev, NVKM_POWEROFF); 2334 2334 nvkm_mc_disable(device, subdev->type, subdev->inst); 2335 2335 if (gr->func->gpccs.reset) 2336 2336 gr->func->gpccs.reset(gr);
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
··· 1158 1158 } 1159 1159 1160 1160 static int 1161 - nv04_gr_chan_fini(struct nvkm_object *object, bool suspend) 1161 + nv04_gr_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 1162 1162 { 1163 1163 struct nv04_gr_chan *chan = nv04_gr_chan(object); 1164 1164 struct nv04_gr *gr = chan->gr;
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
··· 951 951 } 952 952 953 953 static int 954 - nv10_gr_chan_fini(struct nvkm_object *object, bool suspend) 954 + nv10_gr_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 955 955 { 956 956 struct nv10_gr_chan *chan = nv10_gr_chan(object); 957 957 struct nv10_gr *gr = chan->gr;
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
··· 27 27 } 28 28 29 29 int 30 - nv20_gr_chan_fini(struct nvkm_object *object, bool suspend) 30 + nv20_gr_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 31 31 { 32 32 struct nv20_gr_chan *chan = nv20_gr_chan(object); 33 33 struct nv20_gr *gr = chan->gr;
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
··· 31 31 32 32 void *nv20_gr_chan_dtor(struct nvkm_object *); 33 33 int nv20_gr_chan_init(struct nvkm_object *); 34 - int nv20_gr_chan_fini(struct nvkm_object *, bool); 34 + int nv20_gr_chan_fini(struct nvkm_object *, enum nvkm_suspend_state); 35 35 #endif
+2 -2
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
··· 89 89 } 90 90 91 91 static int 92 - nv40_gr_chan_fini(struct nvkm_object *object, bool suspend) 92 + nv40_gr_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 93 93 { 94 94 struct nv40_gr_chan *chan = nv40_gr_chan(object); 95 95 struct nv40_gr *gr = chan->gr; ··· 101 101 nvkm_mask(device, 0x400720, 0x00000001, 0x00000000); 102 102 103 103 if (nvkm_rd32(device, 0x40032c) == inst) { 104 - if (suspend) { 104 + if (suspend != NVKM_POWEROFF) { 105 105 nvkm_wr32(device, 0x400720, 0x00000000); 106 106 nvkm_wr32(device, 0x400784, inst); 107 107 nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
··· 65 65 } 66 66 67 67 static int 68 - nv44_mpeg_chan_fini(struct nvkm_object *object, bool suspend) 68 + nv44_mpeg_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 69 69 { 70 70 71 71 struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
··· 37 37 } 38 38 39 39 static int 40 - nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend) 40 + nvkm_sec2_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 41 41 { 42 42 struct nvkm_sec2 *sec2 = nvkm_sec2(engine); 43 43 struct nvkm_subdev *subdev = &sec2->engine.subdev;
+2 -2
drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
··· 76 76 } 77 77 78 78 static int 79 - nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend) 79 + nvkm_xtensa_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 80 80 { 81 81 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine); 82 82 struct nvkm_device *device = xtensa->engine.subdev.device; ··· 85 85 nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */ 86 86 nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */ 87 87 88 - if (!suspend) 88 + if (suspend == NVKM_POWEROFF) 89 89 nvkm_memory_unref(&xtensa->gpu_fw); 90 90 return 0; 91 91 }
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
··· 182 182 } 183 183 184 184 static int 185 - nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend) 185 + nvkm_acr_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 186 186 { 187 187 if (!subdev->use.enabled) 188 188 return 0;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
··· 90 90 } 91 91 92 92 static int 93 - nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend) 93 + nvkm_bar_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 94 94 { 95 95 struct nvkm_bar *bar = nvkm_bar(subdev); 96 96
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
··· 577 577 } 578 578 579 579 static int 580 - nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend) 580 + nvkm_clk_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 581 581 { 582 582 struct nvkm_clk *clk = nvkm_clk(subdev); 583 583 flush_work(&clk->work);
+2 -2
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
··· 67 67 } 68 68 69 69 static int 70 - nvkm_devinit_fini(struct nvkm_subdev *subdev, bool suspend) 70 + nvkm_devinit_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 71 71 { 72 72 struct nvkm_devinit *init = nvkm_devinit(subdev); 73 73 /* force full reinit on resume */ 74 - if (suspend) 74 + if (suspend != NVKM_POWEROFF) 75 75 init->post = true; 76 76 return 0; 77 77 }
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
··· 51 51 } 52 52 53 53 static int 54 - nvkm_fault_fini(struct nvkm_subdev *subdev, bool suspend) 54 + nvkm_fault_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 55 55 { 56 56 struct nvkm_fault *fault = nvkm_fault(subdev); 57 57 if (fault->func->fini)
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
··· 56 56 } 57 57 58 58 static int 59 - nvkm_ufault_fini(struct nvkm_object *object, bool suspend) 59 + nvkm_ufault_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 60 60 { 61 61 struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object); 62 62 buffer->fault->func->buffer.fini(buffer);
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
··· 144 144 } 145 145 146 146 static int 147 - nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend) 147 + nvkm_gpio_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 148 148 { 149 149 struct nvkm_gpio *gpio = nvkm_gpio(subdev); 150 150 u32 mask = (1ULL << gpio->func->lines) - 1;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
··· 48 48 } 49 49 50 50 static int 51 - nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend) 51 + nvkm_gsp_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 52 52 { 53 53 struct nvkm_gsp *gsp = nvkm_gsp(subdev); 54 54
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
··· 17 17 #include <nvhw/ref/gh100/dev_riscv_pri.h> 18 18 19 19 int 20 - gh100_gsp_fini(struct nvkm_gsp *gsp, bool suspend) 20 + gh100_gsp_fini(struct nvkm_gsp *gsp, enum nvkm_suspend_state suspend) 21 21 { 22 22 struct nvkm_falcon *falcon = &gsp->falcon; 23 23 int ret, time = 4000;
+4 -4
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
··· 59 59 void (*dtor)(struct nvkm_gsp *); 60 60 int (*oneinit)(struct nvkm_gsp *); 61 61 int (*init)(struct nvkm_gsp *); 62 - int (*fini)(struct nvkm_gsp *, bool suspend); 62 + int (*fini)(struct nvkm_gsp *, enum nvkm_suspend_state suspend); 63 63 int (*reset)(struct nvkm_gsp *); 64 64 65 65 struct { ··· 75 75 void tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *); 76 76 int tu102_gsp_oneinit(struct nvkm_gsp *); 77 77 int tu102_gsp_init(struct nvkm_gsp *); 78 - int tu102_gsp_fini(struct nvkm_gsp *, bool suspend); 78 + int tu102_gsp_fini(struct nvkm_gsp *, enum nvkm_suspend_state suspend); 79 79 int tu102_gsp_reset(struct nvkm_gsp *); 80 80 u64 tu102_gsp_wpr_heap_size(struct nvkm_gsp *); 81 81 ··· 87 87 88 88 int gh100_gsp_oneinit(struct nvkm_gsp *); 89 89 int gh100_gsp_init(struct nvkm_gsp *); 90 - int gh100_gsp_fini(struct nvkm_gsp *, bool suspend); 90 + int gh100_gsp_fini(struct nvkm_gsp *, enum nvkm_suspend_state suspend); 91 91 92 92 void r535_gsp_dtor(struct nvkm_gsp *); 93 93 int r535_gsp_oneinit(struct nvkm_gsp *); 94 94 int r535_gsp_init(struct nvkm_gsp *); 95 - int r535_gsp_fini(struct nvkm_gsp *, bool suspend); 95 + int r535_gsp_fini(struct nvkm_gsp *, enum nvkm_suspend_state suspend); 96 96 97 97 int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, 98 98 struct nvkm_gsp **);
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c
··· 208 208 } 209 209 210 210 static int 211 - r535_fbsr_suspend(struct nvkm_gsp *gsp) 211 + r535_fbsr_suspend(struct nvkm_gsp *gsp, bool runtime) 212 212 { 213 213 struct nvkm_subdev *subdev = &gsp->subdev; 214 214 struct nvkm_device *device = subdev->device;
+4 -4
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
··· 704 704 705 705 build_registry(gsp, rpc); 706 706 707 - return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT); 707 + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOSEQ); 708 708 709 709 fail: 710 710 clean_registry(gsp); ··· 921 921 info->pciConfigMirrorSize = device->pci->func->cfg.size; 922 922 r535_gsp_acpi_info(gsp, &info->acpiMethodData); 923 923 924 - return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); 924 + return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOSEQ); 925 925 } 926 926 927 927 static int ··· 1721 1721 } 1722 1722 1723 1723 int 1724 - r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) 1724 + r535_gsp_fini(struct nvkm_gsp *gsp, enum nvkm_suspend_state suspend) 1725 1725 { 1726 1726 struct nvkm_rm *rm = gsp->rm; 1727 1727 int ret; ··· 1748 1748 sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr; 1749 1749 sr->sizeOfSuspendResumeData = len; 1750 1750 1751 - ret = rm->api->fbsr->suspend(gsp); 1751 + ret = rm->api->fbsr->suspend(gsp, suspend == NVKM_RUNTIME_SUSPEND); 1752 1752 if (ret) { 1753 1753 nvkm_gsp_mem_dtor(&gsp->sr.meta); 1754 1754 nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
+6
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
··· 557 557 558 558 switch (policy) { 559 559 case NVKM_GSP_RPC_REPLY_NOWAIT: 560 + case NVKM_GSP_RPC_REPLY_NOSEQ: 560 561 break; 561 562 case NVKM_GSP_RPC_REPLY_RECV: 562 563 reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len); ··· 588 587 print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1, 589 588 rpc->data, rpc->length - sizeof(*rpc), true); 590 589 } 590 + 591 + if (policy == NVKM_GSP_RPC_REPLY_NOSEQ) 592 + rpc->sequence = 0; 593 + else 594 + rpc->sequence = gsp->rpc_seq++; 591 595 592 596 ret = r535_gsp_cmdq_push(gsp, rpc); 593 597 if (ret)
+4 -4
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c
··· 62 62 } 63 63 64 64 static int 65 - r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size) 65 + r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, bool runtime) 66 66 { 67 67 NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl; 68 68 struct nvkm_gsp_object memlist; ··· 81 81 ctrl->hClient = gsp->internal.client.object.handle; 82 82 ctrl->hSysMem = memlist.handle; 83 83 ctrl->sysmemAddrOfSuspendResumeData = gsp->sr.meta.addr; 84 - ctrl->bEnteringGcoffState = 1; 84 + ctrl->bEnteringGcoffState = runtime ? 1 : 0; 85 85 86 86 ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); 87 87 if (ret) ··· 92 92 } 93 93 94 94 static int 95 - r570_fbsr_suspend(struct nvkm_gsp *gsp) 95 + r570_fbsr_suspend(struct nvkm_gsp *gsp, bool runtime) 96 96 { 97 97 struct nvkm_subdev *subdev = &gsp->subdev; 98 98 struct nvkm_device *device = subdev->device; ··· 133 133 return ret; 134 134 135 135 /* Initialise FBSR on RM. */ 136 - ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size); 136 + ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size, runtime); 137 137 if (ret) { 138 138 nvkm_gsp_sg_free(device, &gsp->sr.fbsr); 139 139 return ret;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c
··· 176 176 info->bIsPrimary = video_is_primary_device(device->dev); 177 177 info->bPreserveVideoMemoryAllocations = false; 178 178 179 - return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); 179 + return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOSEQ); 180 180 } 181 181 182 182 static void
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h
··· 78 78 } *device; 79 79 80 80 const struct nvkm_rm_api_fbsr { 81 - int (*suspend)(struct nvkm_gsp *); 81 + int (*suspend)(struct nvkm_gsp *, bool runtime); 82 82 void (*resume)(struct nvkm_gsp *); 83 83 } *fbsr; 84 84
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
··· 161 161 } 162 162 163 163 int 164 - tu102_gsp_fini(struct nvkm_gsp *gsp, bool suspend) 164 + tu102_gsp_fini(struct nvkm_gsp *gsp, enum nvkm_suspend_state suspend) 165 165 { 166 166 u32 mbox0 = 0xff, mbox1 = 0xff; 167 167 int ret;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
··· 135 135 } 136 136 137 137 static int 138 - nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend) 138 + nvkm_i2c_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 139 139 { 140 140 struct nvkm_i2c *i2c = nvkm_i2c(subdev); 141 141 struct nvkm_i2c_pad *pad;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
··· 176 176 } 177 177 178 178 static int 179 - nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend) 179 + nvkm_instmem_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 180 180 { 181 181 struct nvkm_instmem *imem = nvkm_instmem(subdev); 182 182 int ret;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
··· 74 74 } 75 75 76 76 static int 77 - nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend) 77 + nvkm_pci_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 78 78 { 79 79 struct nvkm_pci *pci = nvkm_pci(subdev); 80 80
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
··· 77 77 } 78 78 79 79 static int 80 - nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend) 80 + nvkm_pmu_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 81 81 { 82 82 struct nvkm_pmu *pmu = nvkm_pmu(subdev); 83 83
+3 -3
drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
··· 341 341 } 342 342 343 343 static int 344 - nvkm_therm_fini(struct nvkm_subdev *subdev, bool suspend) 344 + nvkm_therm_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 345 345 { 346 346 struct nvkm_therm *therm = nvkm_therm(subdev); 347 347 348 348 if (therm->func->fini) 349 349 therm->func->fini(therm); 350 350 351 - nvkm_therm_fan_fini(therm, suspend); 352 - nvkm_therm_sensor_fini(therm, suspend); 351 + nvkm_therm_fan_fini(therm, suspend != NVKM_POWEROFF); 352 + nvkm_therm_sensor_fini(therm, suspend != NVKM_POWEROFF); 353 353 354 354 if (suspend) { 355 355 therm->suspend = therm->mode;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
··· 149 149 } 150 150 151 151 static int 152 - nvkm_timer_fini(struct nvkm_subdev *subdev, bool suspend) 152 + nvkm_timer_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 153 153 { 154 154 struct nvkm_timer *tmr = nvkm_timer(subdev); 155 155 tmr->func->alarm_fini(tmr);
+4 -2
drivers/gpu/drm/xe/xe_guc.c
··· 1618 1618 return xe_guc_submit_start(guc); 1619 1619 } 1620 1620 1621 - void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) 1621 + int xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) 1622 1622 { 1623 1623 struct xe_gt *gt = guc_to_gt(guc); 1624 1624 unsigned int fw_ref; ··· 1630 1630 if (!IS_SRIOV_VF(gt_to_xe(gt))) { 1631 1631 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 1632 1632 if (!fw_ref) 1633 - return; 1633 + return -EIO; 1634 1634 1635 1635 status = xe_mmio_read32(&gt->mmio, GUC_STATUS); 1636 1636 ··· 1658 1658 1659 1659 drm_puts(p, "\n"); 1660 1660 xe_guc_submit_print(guc, p); 1661 + 1662 + return 0; 1661 1663 } 1662 1664 1663 1665 /**
+1 -1
drivers/gpu/drm/xe/xe_guc.h
··· 45 45 int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val); 46 46 void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir); 47 47 void xe_guc_sanitize(struct xe_guc *guc); 48 - void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p); 48 + int xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p); 49 49 int xe_guc_reset_prepare(struct xe_guc *guc); 50 50 void xe_guc_reset_wait(struct xe_guc *guc); 51 51 void xe_guc_stop_prepare(struct xe_guc *guc);
+1 -1
drivers/gpu/drm/xe/xe_migrate.c
··· 1201 1201 } 1202 1202 1203 1203 /** 1204 - * xe_get_migrate_exec_queue() - Get the execution queue from migrate context. 1204 + * xe_migrate_exec_queue() - Get the execution queue from migrate context. 1205 1205 * @migrate: Migrate context. 1206 1206 * 1207 1207 * Return: Pointer to execution queue on success, error on failure
+10 -3
drivers/gpu/drm/xe/xe_pm.c
··· 8 8 #include <linux/fault-inject.h> 9 9 #include <linux/pm_runtime.h> 10 10 #include <linux/suspend.h> 11 + #include <linux/dmi.h> 11 12 12 13 #include <drm/drm_managed.h> 13 14 #include <drm/ttm/ttm_placement.h> ··· 358 357 359 358 static u32 vram_threshold_value(struct xe_device *xe) 360 359 { 361 - /* FIXME: D3Cold temporarily disabled by default on BMG */ 362 - if (xe->info.platform == XE_BATTLEMAGE) 363 - return 0; 360 + if (xe->info.platform == XE_BATTLEMAGE) { 361 + const char *product_name; 362 + 363 + product_name = dmi_get_system_info(DMI_PRODUCT_NAME); 364 + if (product_name && strstr(product_name, "NUC13RNG")) { 365 + drm_warn(&xe->drm, "BMG + D3Cold not supported on this platform\n"); 366 + return 0; 367 + } 368 + } 364 369 365 370 return DEFAULT_VRAM_THRESHOLD; 366 371 }
+1 -1
drivers/gpu/drm/xe/xe_query.c
··· 491 491 492 492 if (copy_to_user(*ptr, topo, sizeof(*topo))) 493 493 return -EFAULT; 494 - *ptr += sizeof(topo); 494 + *ptr += sizeof(*topo); 495 495 496 496 if (copy_to_user(*ptr, mask, mask_size)) 497 497 return -EFAULT;
+1 -1
drivers/gpu/drm/xe/xe_tlb_inval.c
··· 115 115 } 116 116 117 117 /** 118 - * xe_gt_tlb_inval_init - Initialize TLB invalidation state 118 + * xe_gt_tlb_inval_init_early() - Initialize TLB invalidation state 119 119 * @gt: GT structure 120 120 * 121 121 * Initialize TLB invalidation state, purely software initialization, should
+1 -1
drivers/gpu/drm/xe/xe_tlb_inval_job.c
··· 164 164 } 165 165 166 166 /** 167 - * xe_tlb_inval_alloc_dep() - TLB invalidation job alloc dependency 167 + * xe_tlb_inval_job_alloc_dep() - TLB invalidation job alloc dependency 168 168 * @job: TLB invalidation job to alloc dependency for 169 169 * 170 170 * Allocate storage for a dependency in the TLB invalidation fence. This