Merge tag 'drm-fixes-2026-02-06' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
"The usual xe/amdgpu selection, and a couple of misc changes for
gma500, mgag200 and bridge. There is a nouveau revert, and also a set
of changes that fix a regression since we moved to 570 firmware.
Suspend/resume was broken on a bunch of GPUs. The fix looks big, but
it's mostly just refactoring to pass an extra bit down the nouveau
abstractions to the firmware command.

amdgpu:
- MES 11 old firmware compatibility fix
- ASPM fix
- DC LUT fixes

amdkfd:
- Fix possible double deletion of validate list

xe:
- Fix topology query pointer advance
- A couple of kerneldoc fixes
- Disable D3Cold for BMG only on specific platforms
- Fix CFI violation in debugfs access

nouveau:
- Revert adding atomic commit functions as it regresses pre-nv50
- Fix suspend/resume bugs exposed by enabling 570 firmware

gma500:
- Revert a regression caused by vblank changes

mgag200:
- Replace a busy loop with a polling loop to fix that blocking 1 cpu
for 300 ms roughly every 20 minutes

bridge:
- imx8mp-hdmi-pa: Use runtime pm to fix a bug in channel ordering"

* tag 'drm-fixes-2026-02-06' of https://gitlab.freedesktop.org/drm/kernel:
drm/xe/guc: Fix CFI violation in debugfs access.
drm/bridge: imx8mp-hdmi-pai: enable PM runtime
drm/xe/pm: Disable D3Cold for BMG only on specific platforms
drm/xe: Fix kerneldoc for xe_tlb_inval_job_alloc_dep
drm/xe: Fix kerneldoc for xe_gt_tlb_inval_init_early
drm/xe: Fix kerneldoc for xe_migrate_exec_queue
drm/xe/query: Fix topology query pointer advance
drm/mgag200: fix mgag200_bmc_stop_scanout()
nouveau/gsp: fix suspend/resume regression on r570 firmware
nouveau: add a third state to the fini handler.
nouveau/gsp: use rpc sequence numbers properly.
drm/amdgpu: Fix double deletion of validate_list
drm/amd/display: remove assert around dpp_base replacement
drm/amd/display: extend delta clamping logic to CM3 LUT helper
drm/amd/display: fix wrong color value mapping on MCM shaper LUT
Revert "drm/amd: Check if ASPM is enabled from PCIe subsystem"
drm/amd: Set minimum version for set_hw_resource_1 on gfx11 to 0x52
Revert "drm/gma500: use drm_crtc_vblank_crtc()"
Revert "drm/nouveau/disp: Set drm_mode_config_funcs.atomic_(check|commit)"

+290 -189
+9 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 1920 1921 /* Make sure restore workers don't access the BO any more */ 1922 mutex_lock(&process_info->lock); 1923 - list_del(&mem->validate_list); 1924 mutex_unlock(&process_info->lock); 1925 - 1926 - /* Cleanup user pages and MMU notifiers */ 1927 - if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { 1928 - amdgpu_hmm_unregister(mem->bo); 1929 - mutex_lock(&process_info->notifier_lock); 1930 - amdgpu_hmm_range_free(mem->range); 1931 - mutex_unlock(&process_info->notifier_lock); 1932 - } 1933 1934 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); 1935 if (unlikely(ret)) 1936 return ret; 1937 1938 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1939 process_info->eviction_fence);
··· 1920 1921 /* Make sure restore workers don't access the BO any more */ 1922 mutex_lock(&process_info->lock); 1923 + if (!list_empty(&mem->validate_list)) 1924 + list_del_init(&mem->validate_list); 1925 mutex_unlock(&process_info->lock); 1926 1927 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); 1928 if (unlikely(ret)) 1929 return ret; 1930 + 1931 + /* Cleanup user pages and MMU notifiers */ 1932 + if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { 1933 + amdgpu_hmm_unregister(mem->bo); 1934 + amdgpu_hmm_range_free(mem->range); 1935 + mem->range = NULL; 1936 + } 1937 1938 amdgpu_amdkfd_remove_eviction_fence(mem->bo, 1939 process_info->eviction_fence);
-3
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2405 return -ENODEV; 2406 } 2407 2408 - if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev)) 2409 - amdgpu_aspm = 0; 2410 - 2411 if (amdgpu_virtual_display || 2412 amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK)) 2413 supports_atomic = true;
··· 2405 return -ENODEV; 2406 } 2407 2408 if (amdgpu_virtual_display || 2409 amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK)) 2410 supports_atomic = true;
+1 -1
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
··· 1671 if (r) 1672 goto failure; 1673 1674 - if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x50) { 1675 r = mes_v11_0_set_hw_resources_1(&adev->mes); 1676 if (r) { 1677 DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
··· 1671 if (r) 1672 goto failure; 1673 1674 + if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x52) { 1675 r = mes_v11_0_set_hw_resources_1(&adev->mes); 1676 if (r) { 1677 DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r);
+29 -8
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
··· 105 #define NUMBER_REGIONS 32 106 #define NUMBER_SW_SEGMENTS 16 107 108 - bool cm3_helper_translate_curve_to_hw_format( 109 - const struct dc_transfer_func *output_tf, 110 - struct pwl_params *lut_params, bool fixpoint) 111 { 112 struct curve_points3 *corner_points; 113 struct pwl_result_data *rgb_resulted; ··· 165 if (seg_distr[k] != -1) 166 hw_points += (1 << seg_distr[k]); 167 } 168 169 j = 0; 170 for (k = 0; k < (region_end - region_start); k++) { ··· 231 corner_points[1].green.slope = dc_fixpt_zero; 232 corner_points[1].blue.slope = dc_fixpt_zero; 233 234 - // DCN3+ have 257 pts in lieu of no separate slope registers 235 - // Prior HW had 256 base+slope pairs 236 lut_params->hw_points_num = hw_points + 1; 237 238 k = 0; ··· 254 if (fixpoint == true) { 255 i = 1; 256 while (i != hw_points + 2) { 257 if (i >= hw_points) { 258 if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) 259 rgb_plus_1->red = dc_fixpt_add(rgb->red, ··· 270 rgb_minus_1->delta_blue); 271 } 272 273 - rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red); 274 - rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green); 275 - rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue); 276 rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red); 277 rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green); 278 rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue);
··· 105 #define NUMBER_REGIONS 32 106 #define NUMBER_SW_SEGMENTS 16 107 108 + #define DC_LOGGER \ 109 + ctx->logger 110 + 111 + bool cm3_helper_translate_curve_to_hw_format(struct dc_context *ctx, 112 + const struct dc_transfer_func *output_tf, 113 + struct pwl_params *lut_params, bool fixpoint) 114 { 115 struct curve_points3 *corner_points; 116 struct pwl_result_data *rgb_resulted; ··· 162 if (seg_distr[k] != -1) 163 hw_points += (1 << seg_distr[k]); 164 } 165 + 166 + // DCN3+ have 257 pts in lieu of no separate slope registers 167 + // Prior HW had 256 base+slope pairs 168 + // Shaper LUT (i.e. fixpoint == true) is still 256 bases and 256 deltas 169 + hw_points = fixpoint ? (hw_points - 1) : hw_points; 170 171 j = 0; 172 for (k = 0; k < (region_end - region_start); k++) { ··· 223 corner_points[1].green.slope = dc_fixpt_zero; 224 corner_points[1].blue.slope = dc_fixpt_zero; 225 226 lut_params->hw_points_num = hw_points + 1; 227 228 k = 0; ··· 248 if (fixpoint == true) { 249 i = 1; 250 while (i != hw_points + 2) { 251 + uint32_t red_clamp; 252 + uint32_t green_clamp; 253 + uint32_t blue_clamp; 254 + 255 if (i >= hw_points) { 256 if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) 257 rgb_plus_1->red = dc_fixpt_add(rgb->red, ··· 260 rgb_minus_1->delta_blue); 261 } 262 263 + rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); 264 + rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); 265 + rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); 266 + 267 + red_clamp = dc_fixpt_clamp_u0d14(rgb->delta_red); 268 + green_clamp = dc_fixpt_clamp_u0d14(rgb->delta_green); 269 + blue_clamp = dc_fixpt_clamp_u0d14(rgb->delta_blue); 270 + 271 + if (red_clamp >> 10 || green_clamp >> 10 || blue_clamp >> 10) 272 + DC_LOG_ERROR("Losing delta precision while programming shaper LUT."); 273 + 274 + rgb->delta_red_reg = red_clamp & 0x3ff; 275 + rgb->delta_green_reg = green_clamp & 0x3ff; 276 + rgb->delta_blue_reg = blue_clamp & 0x3ff; 277 rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red); 278 rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green); 279 rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue);
+1 -1
drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h
··· 59 const struct pwl_params *params, 60 const struct dcn3_xfer_func_reg *reg); 61 62 - bool cm3_helper_translate_curve_to_hw_format( 63 const struct dc_transfer_func *output_tf, 64 struct pwl_params *lut_params, bool fixpoint); 65
··· 59 const struct pwl_params *params, 60 const struct dcn3_xfer_func_reg *reg); 61 62 + bool cm3_helper_translate_curve_to_hw_format(struct dc_context *ctx, 63 const struct dc_transfer_func *output_tf, 64 struct pwl_params *lut_params, bool fixpoint); 65
+5 -4
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
··· 239 if (plane_state->blend_tf.type == TF_TYPE_HWPWL) 240 blend_lut = &plane_state->blend_tf.pwl; 241 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { 242 - result = cm3_helper_translate_curve_to_hw_format( 243 &plane_state->blend_tf, &dpp_base->regamma_params, false); 244 if (!result) 245 return result; ··· 334 if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) 335 params = &plane_state->in_transfer_func.pwl; 336 else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && 337 - cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func, 338 - &dpp_base->degamma_params, false)) 339 params = &dpp_base->degamma_params; 340 341 result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); ··· 407 params = &stream->out_transfer_func.pwl; 408 else if (pipe_ctx->stream->out_transfer_func.type == 409 TF_TYPE_DISTRIBUTED_POINTS && 410 - cm3_helper_translate_curve_to_hw_format( 411 &stream->out_transfer_func, 412 &mpc->blender_params, false)) 413 params = &mpc->blender_params;
··· 239 if (plane_state->blend_tf.type == TF_TYPE_HWPWL) 240 blend_lut = &plane_state->blend_tf.pwl; 241 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { 242 + result = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 243 &plane_state->blend_tf, &dpp_base->regamma_params, false); 244 if (!result) 245 return result; ··· 334 if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) 335 params = &plane_state->in_transfer_func.pwl; 336 else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && 337 + cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 338 + &plane_state->in_transfer_func, 339 + &dpp_base->degamma_params, false)) 340 params = &dpp_base->degamma_params; 341 342 result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); ··· 406 params = &stream->out_transfer_func.pwl; 407 else if (pipe_ctx->stream->out_transfer_func.type == 408 TF_TYPE_DISTRIBUTED_POINTS && 409 + cm3_helper_translate_curve_to_hw_format(stream->ctx, 410 &stream->out_transfer_func, 411 &mpc->blender_params, false)) 412 params = &mpc->blender_params;
+10 -8
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
··· 486 if (plane_state->blend_tf.type == TF_TYPE_HWPWL) 487 lut_params = &plane_state->blend_tf.pwl; 488 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { 489 - result = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf, 490 - &dpp_base->regamma_params, false); 491 if (!result) 492 return result; 493 ··· 502 lut_params = &plane_state->in_shaper_func.pwl; 503 else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { 504 // TODO: dpp_base replace 505 - ASSERT(false); 506 - cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func, 507 - &dpp_base->shaper_params, true); 508 lut_params = &dpp_base->shaper_params; 509 } 510 ··· 544 if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) 545 params = &plane_state->in_transfer_func.pwl; 546 else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && 547 - cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func, 548 - &dpp_base->degamma_params, false)) 549 params = &dpp_base->degamma_params; 550 551 dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); ··· 577 params = &stream->out_transfer_func.pwl; 578 else if (pipe_ctx->stream->out_transfer_func.type == 579 TF_TYPE_DISTRIBUTED_POINTS && 580 - cm3_helper_translate_curve_to_hw_format( 581 &stream->out_transfer_func, 582 &mpc->blender_params, false)) 583 params = &mpc->blender_params;
··· 486 if (plane_state->blend_tf.type == TF_TYPE_HWPWL) 487 lut_params = &plane_state->blend_tf.pwl; 488 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { 489 + result = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 490 + &plane_state->blend_tf, 491 + &dpp_base->regamma_params, false); 492 if (!result) 493 return result; 494 ··· 501 lut_params = &plane_state->in_shaper_func.pwl; 502 else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { 503 // TODO: dpp_base replace 504 + cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 505 + &plane_state->in_shaper_func, 506 + &dpp_base->shaper_params, true); 507 lut_params = &dpp_base->shaper_params; 508 } 509 ··· 543 if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) 544 params = &plane_state->in_transfer_func.pwl; 545 else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && 546 + cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 547 + &plane_state->in_transfer_func, 548 + &dpp_base->degamma_params, false)) 549 params = &dpp_base->degamma_params; 550 551 dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); ··· 575 params = &stream->out_transfer_func.pwl; 576 else if (pipe_ctx->stream->out_transfer_func.type == 577 TF_TYPE_DISTRIBUTED_POINTS && 578 + cm3_helper_translate_curve_to_hw_format(stream->ctx, 579 &stream->out_transfer_func, 580 &mpc->blender_params, false)) 581 params = &mpc->blender_params;
+9 -7
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
··· 430 if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL) 431 m_lut_params.pwl = &mcm_luts.lut1d_func->pwl; 432 else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) { 433 - rval = cm3_helper_translate_curve_to_hw_format( 434 mcm_luts.lut1d_func, 435 &dpp_base->regamma_params, false); 436 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; ··· 450 m_lut_params.pwl = &mcm_luts.shaper->pwl; 451 else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { 452 ASSERT(false); 453 - rval = cm3_helper_translate_curve_to_hw_format( 454 mcm_luts.shaper, 455 &dpp_base->regamma_params, true); 456 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; ··· 627 if (plane_state->blend_tf.type == TF_TYPE_HWPWL) 628 lut_params = &plane_state->blend_tf.pwl; 629 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { 630 - rval = cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf, 631 - &dpp_base->regamma_params, false); 632 lut_params = rval ? &dpp_base->regamma_params : NULL; 633 } 634 result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); ··· 640 lut_params = &plane_state->in_shaper_func.pwl; 641 else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { 642 // TODO: dpp_base replace 643 - rval = cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func, 644 - &dpp_base->shaper_params, true); 645 lut_params = rval ? &dpp_base->shaper_params : NULL; 646 } 647 result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); ··· 676 params = &stream->out_transfer_func.pwl; 677 else if (pipe_ctx->stream->out_transfer_func.type == 678 TF_TYPE_DISTRIBUTED_POINTS && 679 - cm3_helper_translate_curve_to_hw_format( 680 &stream->out_transfer_func, 681 &mpc->blender_params, false)) 682 params = &mpc->blender_params;
··· 430 if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL) 431 m_lut_params.pwl = &mcm_luts.lut1d_func->pwl; 432 else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) { 433 + rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx, 434 mcm_luts.lut1d_func, 435 &dpp_base->regamma_params, false); 436 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; ··· 450 m_lut_params.pwl = &mcm_luts.shaper->pwl; 451 else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { 452 ASSERT(false); 453 + rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx, 454 mcm_luts.shaper, 455 &dpp_base->regamma_params, true); 456 m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL; ··· 627 if (plane_state->blend_tf.type == TF_TYPE_HWPWL) 628 lut_params = &plane_state->blend_tf.pwl; 629 else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { 630 + rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 631 + &plane_state->blend_tf, 632 + &dpp_base->regamma_params, false); 633 lut_params = rval ? &dpp_base->regamma_params : NULL; 634 } 635 result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); ··· 639 lut_params = &plane_state->in_shaper_func.pwl; 640 else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { 641 // TODO: dpp_base replace 642 + rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx, 643 + &plane_state->in_shaper_func, 644 + &dpp_base->shaper_params, true); 645 lut_params = rval ? &dpp_base->shaper_params : NULL; 646 } 647 result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); ··· 674 params = &stream->out_transfer_func.pwl; 675 else if (pipe_ctx->stream->out_transfer_func.type == 676 TF_TYPE_DISTRIBUTED_POINTS && 677 + cm3_helper_translate_curve_to_hw_format(stream->ctx, 678 &stream->out_transfer_func, 679 &mpc->blender_params, false)) 680 params = &mpc->blender_params;
+15
drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c
··· 8 #include <linux/module.h> 9 #include <linux/of_platform.h> 10 #include <linux/platform_device.h> 11 #include <linux/regmap.h> 12 #include <drm/bridge/dw_hdmi.h> 13 #include <sound/asoundef.h> ··· 34 35 struct imx8mp_hdmi_pai { 36 struct regmap *regmap; 37 }; 38 39 static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel, ··· 44 const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi); 45 struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio; 46 int val; 47 48 /* PAI set control extended */ 49 val = WTMK_HIGH(3) | WTMK_LOW(3); ··· 90 91 /* Stop PAI */ 92 regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, 0); 93 } 94 95 static const struct regmap_config imx8mp_hdmi_pai_regmap_config = { ··· 108 struct imx8mp_hdmi_pai *hdmi_pai; 109 struct resource *res; 110 void __iomem *base; 111 112 hdmi_pai = devm_kzalloc(dev, sizeof(*hdmi_pai), GFP_KERNEL); 113 if (!hdmi_pai) ··· 128 plat_data->enable_audio = imx8mp_hdmi_pai_enable; 129 plat_data->disable_audio = imx8mp_hdmi_pai_disable; 130 plat_data->priv_audio = hdmi_pai; 131 132 return 0; 133 }
··· 8 #include <linux/module.h> 9 #include <linux/of_platform.h> 10 #include <linux/platform_device.h> 11 + #include <linux/pm_runtime.h> 12 #include <linux/regmap.h> 13 #include <drm/bridge/dw_hdmi.h> 14 #include <sound/asoundef.h> ··· 33 34 struct imx8mp_hdmi_pai { 35 struct regmap *regmap; 36 + struct device *dev; 37 }; 38 39 static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel, ··· 42 const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi); 43 struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio; 44 int val; 45 + 46 + if (pm_runtime_resume_and_get(hdmi_pai->dev) < 0) 47 + return; 48 49 /* PAI set control extended */ 50 val = WTMK_HIGH(3) | WTMK_LOW(3); ··· 85 86 /* Stop PAI */ 87 regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, 0); 88 + 89 + pm_runtime_put_sync(hdmi_pai->dev); 90 } 91 92 static const struct regmap_config imx8mp_hdmi_pai_regmap_config = { ··· 101 struct imx8mp_hdmi_pai *hdmi_pai; 102 struct resource *res; 103 void __iomem *base; 104 + int ret; 105 106 hdmi_pai = devm_kzalloc(dev, sizeof(*hdmi_pai), GFP_KERNEL); 107 if (!hdmi_pai) ··· 120 plat_data->enable_audio = imx8mp_hdmi_pai_enable; 121 plat_data->disable_audio = imx8mp_hdmi_pai_disable; 122 plat_data->priv_audio = hdmi_pai; 123 + 124 + hdmi_pai->dev = dev; 125 + ret = devm_pm_runtime_enable(dev); 126 + if (ret < 0) { 127 + dev_err(dev, "failed to enable PM runtime: %d\n", ret); 128 + return ret; 129 + } 130 131 return 0; 132 }
+13 -23
drivers/gpu/drm/gma500/psb_irq.c
··· 250 void gma_irq_preinstall(struct drm_device *dev) 251 { 252 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 253 - struct drm_crtc *crtc; 254 unsigned long irqflags; 255 256 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); ··· 260 PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE); 261 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); 262 263 - drm_for_each_crtc(crtc, dev) { 264 - struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 265 - 266 - if (vblank->enabled) { 267 - u32 mask = drm_crtc_index(crtc) ? _PSB_VSYNC_PIPEB_FLAG : 268 - _PSB_VSYNC_PIPEA_FLAG; 269 - dev_priv->vdc_irq_mask |= mask; 270 - } 271 - } 272 273 /* Revisit this area - want per device masks ? */ 274 if (dev_priv->ops->hotplug) ··· 278 void gma_irq_postinstall(struct drm_device *dev) 279 { 280 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 281 - struct drm_crtc *crtc; 282 unsigned long irqflags; 283 284 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 285 ··· 292 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 293 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 294 295 - drm_for_each_crtc(crtc, dev) { 296 - struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 297 - 298 - if (vblank->enabled) 299 - gma_enable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); 300 else 301 - gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); 302 } 303 304 if (dev_priv->ops->hotplug_enable) ··· 337 { 338 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 339 struct pci_dev *pdev = to_pci_dev(dev->dev); 340 - struct drm_crtc *crtc; 341 unsigned long irqflags; 342 343 if (!dev_priv->irq_enabled) 344 return; ··· 350 351 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 352 353 - drm_for_each_crtc(crtc, dev) { 354 - struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); 355 - 356 - if (vblank->enabled) 357 - gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE); 358 } 359 360 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
··· 250 void gma_irq_preinstall(struct drm_device *dev) 251 { 252 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 253 unsigned long irqflags; 254 255 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); ··· 261 PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE); 262 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); 263 264 + if (dev->vblank[0].enabled) 265 + dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG; 266 + if (dev->vblank[1].enabled) 267 + dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG; 268 269 /* Revisit this area - want per device masks ? */ 270 if (dev_priv->ops->hotplug) ··· 284 void gma_irq_postinstall(struct drm_device *dev) 285 { 286 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 287 unsigned long irqflags; 288 + unsigned int i; 289 290 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); 291 ··· 298 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); 299 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 300 301 + for (i = 0; i < dev->num_crtcs; ++i) { 302 + if (dev->vblank[i].enabled) 303 + gma_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); 304 else 305 + gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); 306 } 307 308 if (dev_priv->ops->hotplug_enable) ··· 345 { 346 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 347 struct pci_dev *pdev = to_pci_dev(dev->dev); 348 unsigned long irqflags; 349 + unsigned int i; 350 351 if (!dev_priv->irq_enabled) 352 return; ··· 358 359 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); 360 361 + for (i = 0; i < dev->num_crtcs; ++i) { 362 + if (dev->vblank[i].enabled) 363 + gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); 364 } 365 366 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+12 -19
drivers/gpu/drm/mgag200/mgag200_bmc.c
··· 1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/delay.h> 4 5 #include <drm/drm_atomic_helper.h> 6 #include <drm/drm_edid.h> ··· 13 void mgag200_bmc_stop_scanout(struct mga_device *mdev) 14 { 15 u8 tmp; 16 - int iter_max; 17 18 /* 19 * 1 - The first step is to inform the BMC of an upcoming mode ··· 43 44 /* 45 * 3a- The third step is to verify if there is an active scan. 46 - * We are waiting for a 0 on remhsyncsts <XSPAREREG<0>). 47 */ 48 - iter_max = 300; 49 - while (!(tmp & 0x1) && iter_max) { 50 - WREG8(DAC_INDEX, MGA1064_SPAREREG); 51 - tmp = RREG8(DAC_DATA); 52 - udelay(1000); 53 - iter_max--; 54 - } 55 56 /* 57 - * 3b- This step occurs only if the remove is actually 58 * scanning. We are waiting for the end of the frame which is 59 * a 1 on remvsyncsts (XSPAREREG<1>) 60 */ 61 - if (iter_max) { 62 - iter_max = 300; 63 - while ((tmp & 0x2) && iter_max) { 64 - WREG8(DAC_INDEX, MGA1064_SPAREREG); 65 - tmp = RREG8(DAC_DATA); 66 - udelay(1000); 67 - iter_max--; 68 - } 69 - } 70 } 71 72 void mgag200_bmc_start_scanout(struct mga_device *mdev)
··· 1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/delay.h> 4 + #include <linux/iopoll.h> 5 6 #include <drm/drm_atomic_helper.h> 7 #include <drm/drm_edid.h> ··· 12 void mgag200_bmc_stop_scanout(struct mga_device *mdev) 13 { 14 u8 tmp; 15 + int ret; 16 17 /* 18 * 1 - The first step is to inform the BMC of an upcoming mode ··· 42 43 /* 44 * 3a- The third step is to verify if there is an active scan. 45 + * We are waiting for a 0 on remhsyncsts (<XSPAREREG<0>). 46 */ 47 + ret = read_poll_timeout(RREG_DAC, tmp, !(tmp & 0x1), 48 + 1000, 300000, false, 49 + MGA1064_SPAREREG); 50 + if (ret == -ETIMEDOUT) 51 + return; 52 53 /* 54 + * 3b- This step occurs only if the remote BMC is actually 55 * scanning. We are waiting for the end of the frame which is 56 * a 1 on remvsyncsts (XSPAREREG<1>) 57 */ 58 + (void)read_poll_timeout(RREG_DAC, tmp, (tmp & 0x2), 59 + 1000, 300000, false, 60 + MGA1064_SPAREREG); 61 } 62 63 void mgag200_bmc_start_scanout(struct mga_device *mdev)
+6
drivers/gpu/drm/mgag200/mgag200_drv.h
··· 111 #define DAC_INDEX 0x3c00 112 #define DAC_DATA 0x3c0a 113 114 #define WREG_DAC(reg, v) \ 115 do { \ 116 WREG8(DAC_INDEX, reg); \
··· 111 #define DAC_INDEX 0x3c00 112 #define DAC_DATA 0x3c0a 113 114 + #define RREG_DAC(reg) \ 115 + ({ \ 116 + WREG8(DAC_INDEX, reg); \ 117 + RREG8(DAC_DATA); \ 118 + }) \ 119 + 120 #define WREG_DAC(reg, v) \ 121 do { \ 122 WREG8(DAC_INDEX, reg); \
+1 -1
drivers/gpu/drm/nouveau/include/nvif/client.h
··· 11 12 int nvif_client_ctor(struct nvif_client *parent, const char *name, struct nvif_client *); 13 void nvif_client_dtor(struct nvif_client *); 14 - int nvif_client_suspend(struct nvif_client *); 15 int nvif_client_resume(struct nvif_client *); 16 17 /*XXX*/
··· 11 12 int nvif_client_ctor(struct nvif_client *parent, const char *name, struct nvif_client *); 13 void nvif_client_dtor(struct nvif_client *); 14 + int nvif_client_suspend(struct nvif_client *, bool); 15 int nvif_client_resume(struct nvif_client *); 16 17 /*XXX*/
+1 -1
drivers/gpu/drm/nouveau/include/nvif/driver.h
··· 8 const char *name; 9 int (*init)(const char *name, u64 device, const char *cfg, 10 const char *dbg, void **priv); 11 - int (*suspend)(void *priv); 12 int (*resume)(void *priv); 13 int (*ioctl)(void *priv, void *data, u32 size, void **hack); 14 void __iomem *(*map)(void *priv, u64 handle, u32 size);
··· 8 const char *name; 9 int (*init)(const char *name, u64 device, const char *cfg, 10 const char *dbg, void **priv); 11 + int (*suspend)(void *priv, bool runtime); 12 int (*resume)(void *priv); 13 int (*ioctl)(void *priv, void *data, u32 size, void **hack); 14 void __iomem *(*map)(void *priv, u64 handle, u32 size);
+2 -1
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
··· 2 #ifndef __NVKM_DEVICE_H__ 3 #define __NVKM_DEVICE_H__ 4 #include <core/oclass.h> 5 #include <core/intr.h> 6 enum nvkm_subdev_type; 7 ··· 94 void *(*dtor)(struct nvkm_device *); 95 int (*preinit)(struct nvkm_device *); 96 int (*init)(struct nvkm_device *); 97 - void (*fini)(struct nvkm_device *, bool suspend); 98 int (*irq)(struct nvkm_device *); 99 resource_size_t (*resource_addr)(struct nvkm_device *, enum nvkm_bar_id); 100 resource_size_t (*resource_size)(struct nvkm_device *, enum nvkm_bar_id);
··· 2 #ifndef __NVKM_DEVICE_H__ 3 #define __NVKM_DEVICE_H__ 4 #include <core/oclass.h> 5 + #include <core/suspend_state.h> 6 #include <core/intr.h> 7 enum nvkm_subdev_type; 8 ··· 93 void *(*dtor)(struct nvkm_device *); 94 int (*preinit)(struct nvkm_device *); 95 int (*init)(struct nvkm_device *); 96 + void (*fini)(struct nvkm_device *, enum nvkm_suspend_state suspend); 97 int (*irq)(struct nvkm_device *); 98 resource_size_t (*resource_addr)(struct nvkm_device *, enum nvkm_bar_id); 99 resource_size_t (*resource_size)(struct nvkm_device *, enum nvkm_bar_id);
+1 -1
drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
··· 20 int (*oneinit)(struct nvkm_engine *); 21 int (*info)(struct nvkm_engine *, u64 mthd, u64 *data); 22 int (*init)(struct nvkm_engine *); 23 - int (*fini)(struct nvkm_engine *, bool suspend); 24 int (*reset)(struct nvkm_engine *); 25 int (*nonstall)(struct nvkm_engine *); 26 void (*intr)(struct nvkm_engine *);
··· 20 int (*oneinit)(struct nvkm_engine *); 21 int (*info)(struct nvkm_engine *, u64 mthd, u64 *data); 22 int (*init)(struct nvkm_engine *); 23 + int (*fini)(struct nvkm_engine *, enum nvkm_suspend_state suspend); 24 int (*reset)(struct nvkm_engine *); 25 int (*nonstall)(struct nvkm_engine *); 26 void (*intr)(struct nvkm_engine *);
+3 -2
drivers/gpu/drm/nouveau/include/nvkm/core/object.h
··· 2 #ifndef __NVKM_OBJECT_H__ 3 #define __NVKM_OBJECT_H__ 4 #include <core/oclass.h> 5 struct nvkm_event; 6 struct nvkm_gpuobj; 7 struct nvkm_uevent; ··· 28 struct nvkm_object_func { 29 void *(*dtor)(struct nvkm_object *); 30 int (*init)(struct nvkm_object *); 31 - int (*fini)(struct nvkm_object *, bool suspend); 32 int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size); 33 int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **); 34 int (*map)(struct nvkm_object *, void *argv, u32 argc, ··· 50 void nvkm_object_del(struct nvkm_object **); 51 void *nvkm_object_dtor(struct nvkm_object *); 52 int nvkm_object_init(struct nvkm_object *); 53 - int nvkm_object_fini(struct nvkm_object *, bool suspend); 54 int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size); 55 int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **); 56 int nvkm_object_map(struct nvkm_object *, void *argv, u32 argc,
··· 2 #ifndef __NVKM_OBJECT_H__ 3 #define __NVKM_OBJECT_H__ 4 #include <core/oclass.h> 5 + #include <core/suspend_state.h> 6 struct nvkm_event; 7 struct nvkm_gpuobj; 8 struct nvkm_uevent; ··· 27 struct nvkm_object_func { 28 void *(*dtor)(struct nvkm_object *); 29 int (*init)(struct nvkm_object *); 30 + int (*fini)(struct nvkm_object *, enum nvkm_suspend_state suspend); 31 int (*mthd)(struct nvkm_object *, u32 mthd, void *data, u32 size); 32 int (*ntfy)(struct nvkm_object *, u32 mthd, struct nvkm_event **); 33 int (*map)(struct nvkm_object *, void *argv, u32 argc, ··· 49 void nvkm_object_del(struct nvkm_object **); 50 void *nvkm_object_dtor(struct nvkm_object *); 51 int nvkm_object_init(struct nvkm_object *); 52 + int nvkm_object_fini(struct nvkm_object *, enum nvkm_suspend_state); 53 int nvkm_object_mthd(struct nvkm_object *, u32 mthd, void *data, u32 size); 54 int nvkm_object_ntfy(struct nvkm_object *, u32 mthd, struct nvkm_event **); 55 int nvkm_object_map(struct nvkm_object *, void *argv, u32 argc,
+1 -1
drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
··· 13 struct nvkm_oproxy_func { 14 void (*dtor[2])(struct nvkm_oproxy *); 15 int (*init[2])(struct nvkm_oproxy *); 16 - int (*fini[2])(struct nvkm_oproxy *, bool suspend); 17 }; 18 19 void nvkm_oproxy_ctor(const struct nvkm_oproxy_func *,
··· 13 struct nvkm_oproxy_func { 14 void (*dtor[2])(struct nvkm_oproxy *); 15 int (*init[2])(struct nvkm_oproxy *); 16 + int (*fini[2])(struct nvkm_oproxy *, enum nvkm_suspend_state suspend); 17 }; 18 19 void nvkm_oproxy_ctor(const struct nvkm_oproxy_func *,
+2 -2
drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
··· 40 int (*oneinit)(struct nvkm_subdev *); 41 int (*info)(struct nvkm_subdev *, u64 mthd, u64 *data); 42 int (*init)(struct nvkm_subdev *); 43 - int (*fini)(struct nvkm_subdev *, bool suspend); 44 void (*intr)(struct nvkm_subdev *); 45 }; 46 ··· 65 int nvkm_subdev_preinit(struct nvkm_subdev *); 66 int nvkm_subdev_oneinit(struct nvkm_subdev *); 67 int nvkm_subdev_init(struct nvkm_subdev *); 68 - int nvkm_subdev_fini(struct nvkm_subdev *, bool suspend); 69 int nvkm_subdev_info(struct nvkm_subdev *, u64, u64 *); 70 void nvkm_subdev_intr(struct nvkm_subdev *); 71
··· 40 int (*oneinit)(struct nvkm_subdev *); 41 int (*info)(struct nvkm_subdev *, u64 mthd, u64 *data); 42 int (*init)(struct nvkm_subdev *); 43 + int (*fini)(struct nvkm_subdev *, enum nvkm_suspend_state suspend); 44 void (*intr)(struct nvkm_subdev *); 45 }; 46 ··· 65 int nvkm_subdev_preinit(struct nvkm_subdev *); 66 int nvkm_subdev_oneinit(struct nvkm_subdev *); 67 int nvkm_subdev_init(struct nvkm_subdev *); 68 + int nvkm_subdev_fini(struct nvkm_subdev *, enum nvkm_suspend_state suspend); 69 int nvkm_subdev_info(struct nvkm_subdev *, u64, u64 *); 70 void nvkm_subdev_intr(struct nvkm_subdev *); 71
+11
drivers/gpu/drm/nouveau/include/nvkm/core/suspend_state.h
···
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + #ifndef __NVKM_SUSPEND_STATE_H__ 3 + #define __NVKM_SUSPEND_STATE_H__ 4 + 5 + enum nvkm_suspend_state { 6 + NVKM_POWEROFF, 7 + NVKM_SUSPEND, 8 + NVKM_RUNTIME_SUSPEND, 9 + }; 10 + 11 + #endif
+6
drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
··· 44 * NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the 45 * caller after the GSP RPC command is issued. 46 * 47 * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP 48 * RPC message after the GSP RPC command is issued. 49 * ··· 56 */ 57 enum nvkm_gsp_rpc_reply_policy { 58 NVKM_GSP_RPC_REPLY_NOWAIT = 0, 59 NVKM_GSP_RPC_REPLY_RECV, 60 NVKM_GSP_RPC_REPLY_POLL, 61 }; ··· 245 246 /* The size of the registry RPC */ 247 size_t registry_rpc_size; 248 249 #ifdef CONFIG_DEBUG_FS 250 /*
··· 44 * NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the 45 * caller after the GSP RPC command is issued. 46 * 47 + * NVKM_GSP_RPC_REPLY_NOSEQ - If specified, exactly like NOWAIT 48 + * but don't emit RPC sequence number. 49 + * 50 * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP 51 * RPC message after the GSP RPC command is issued. 52 * ··· 53 */ 54 enum nvkm_gsp_rpc_reply_policy { 55 NVKM_GSP_RPC_REPLY_NOWAIT = 0, 56 + NVKM_GSP_RPC_REPLY_NOSEQ, 57 NVKM_GSP_RPC_REPLY_RECV, 58 NVKM_GSP_RPC_REPLY_POLL, 59 }; ··· 241 242 /* The size of the registry RPC */ 243 size_t registry_rpc_size; 244 + 245 + u32 rpc_seq; 246 247 #ifdef CONFIG_DEBUG_FS 248 /*
-2
drivers/gpu/drm/nouveau/nouveau_display.c
··· 352 353 static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 354 .fb_create = nouveau_user_framebuffer_create, 355 - .atomic_commit = drm_atomic_helper_commit, 356 - .atomic_check = drm_atomic_helper_check, 357 }; 358 359
··· 352 353 static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 354 .fb_create = nouveau_user_framebuffer_create, 355 }; 356 357
+1 -1
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 983 } 984 985 NV_DEBUG(drm, "suspending object tree...\n"); 986 - ret = nvif_client_suspend(&drm->_client); 987 if (ret) 988 goto fail_client; 989
··· 983 } 984 985 NV_DEBUG(drm, "suspending object tree...\n"); 986 + ret = nvif_client_suspend(&drm->_client, runtime); 987 if (ret) 988 goto fail_client; 989
+8 -2
drivers/gpu/drm/nouveau/nouveau_nvif.c
··· 62 } 63 64 static int 65 - nvkm_client_suspend(void *priv) 66 { 67 struct nvkm_client *client = priv; 68 - return nvkm_object_fini(&client->object, true); 69 } 70 71 static int
··· 62 } 63 64 static int 65 + nvkm_client_suspend(void *priv, bool runtime) 66 { 67 struct nvkm_client *client = priv; 68 + enum nvkm_suspend_state state; 69 + 70 + if (runtime) 71 + state = NVKM_RUNTIME_SUSPEND; 72 + else 73 + state = NVKM_SUSPEND; 74 + return nvkm_object_fini(&client->object, state); 75 } 76 77 static int
+2 -2
drivers/gpu/drm/nouveau/nvif/client.c
··· 30 #include <nvif/if0000.h> 31 32 int 33 - nvif_client_suspend(struct nvif_client *client) 34 { 35 - return client->driver->suspend(client->object.priv); 36 } 37 38 int
··· 30 #include <nvif/if0000.h> 31 32 int 33 + nvif_client_suspend(struct nvif_client *client, bool runtime) 34 { 35 + return client->driver->suspend(client->object.priv, runtime); 36 } 37 38 int
+2 -2
drivers/gpu/drm/nouveau/nvkm/core/engine.c
··· 41 if (engine->func->reset) 42 return engine->func->reset(engine); 43 44 - nvkm_subdev_fini(&engine->subdev, false); 45 return nvkm_subdev_init(&engine->subdev); 46 } 47 ··· 98 } 99 100 static int 101 - nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend) 102 { 103 struct nvkm_engine *engine = nvkm_engine(subdev); 104 if (engine->func->fini)
··· 41 if (engine->func->reset) 42 return engine->func->reset(engine); 43 44 + nvkm_subdev_fini(&engine->subdev, NVKM_POWEROFF); 45 return nvkm_subdev_init(&engine->subdev); 46 } 47 ··· 98 } 99 100 static int 101 + nvkm_engine_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 102 { 103 struct nvkm_engine *engine = nvkm_engine(subdev); 104 if (engine->func->fini)
+2 -2
drivers/gpu/drm/nouveau/nvkm/core/ioctl.c
··· 141 } 142 ret = -EEXIST; 143 } 144 - nvkm_object_fini(object, false); 145 } 146 147 nvkm_object_del(&object); ··· 160 nvif_ioctl(object, "delete size %d\n", size); 161 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) { 162 nvif_ioctl(object, "delete\n"); 163 - nvkm_object_fini(object, false); 164 nvkm_object_del(&object); 165 } 166
··· 141 } 142 ret = -EEXIST; 143 } 144 + nvkm_object_fini(object, NVKM_POWEROFF); 145 } 146 147 nvkm_object_del(&object); ··· 160 nvif_ioctl(object, "delete size %d\n", size); 161 if (!(ret = nvif_unvers(ret, &data, &size, args->none))) { 162 nvif_ioctl(object, "delete\n"); 163 + nvkm_object_fini(object, NVKM_POWEROFF); 164 nvkm_object_del(&object); 165 } 166
+16 -4
drivers/gpu/drm/nouveau/nvkm/core/object.c
··· 142 } 143 144 int 145 - nvkm_object_fini(struct nvkm_object *object, bool suspend) 146 { 147 - const char *action = suspend ? "suspend" : "fini"; 148 struct nvkm_object *child; 149 s64 time; 150 int ret; 151 152 nvif_debug(object, "%s children...\n", action); 153 time = ktime_to_us(ktime_get()); 154 list_for_each_entry_reverse(child, &object->tree, head) { ··· 224 225 fail_child: 226 list_for_each_entry_continue_reverse(child, &object->tree, head) 227 - nvkm_object_fini(child, false); 228 fail: 229 nvif_error(object, "init failed with %d\n", ret); 230 if (object->func->fini) 231 - object->func->fini(object, false); 232 return ret; 233 } 234
··· 142 } 143 144 int 145 + nvkm_object_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 146 { 147 + const char *action; 148 struct nvkm_object *child; 149 s64 time; 150 int ret; 151 152 + switch (suspend) { 153 + case NVKM_POWEROFF: 154 + default: 155 + action = "fini"; 156 + break; 157 + case NVKM_SUSPEND: 158 + action = "suspend"; 159 + break; 160 + case NVKM_RUNTIME_SUSPEND: 161 + action = "runtime"; 162 + break; 163 + } 164 nvif_debug(object, "%s children...\n", action); 165 time = ktime_to_us(ktime_get()); 166 list_for_each_entry_reverse(child, &object->tree, head) { ··· 212 213 fail_child: 214 list_for_each_entry_continue_reverse(child, &object->tree, head) 215 + nvkm_object_fini(child, NVKM_POWEROFF); 216 fail: 217 nvif_error(object, "init failed with %d\n", ret); 218 if (object->func->fini) 219 + object->func->fini(object, NVKM_POWEROFF); 220 return ret; 221 } 222
+1 -1
drivers/gpu/drm/nouveau/nvkm/core/oproxy.c
··· 87 } 88 89 static int 90 - nvkm_oproxy_fini(struct nvkm_object *object, bool suspend) 91 { 92 struct nvkm_oproxy *oproxy = nvkm_oproxy(object); 93 int ret;
··· 87 } 88 89 static int 90 + nvkm_oproxy_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 91 { 92 struct nvkm_oproxy *oproxy = nvkm_oproxy(object); 93 int ret;
+15 -3
drivers/gpu/drm/nouveau/nvkm/core/subdev.c
··· 51 } 52 53 int 54 - nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) 55 { 56 struct nvkm_device *device = subdev->device; 57 - const char *action = suspend ? "suspend" : subdev->use.enabled ? "fini" : "reset"; 58 s64 time; 59 60 nvkm_trace(subdev, "%s running...\n", action); 61 time = ktime_to_us(ktime_get()); 62 ··· 198 nvkm_subdev_unref(struct nvkm_subdev *subdev) 199 { 200 if (refcount_dec_and_mutex_lock(&subdev->use.refcount, &subdev->use.mutex)) { 201 - nvkm_subdev_fini(subdev, false); 202 mutex_unlock(&subdev->use.mutex); 203 } 204 }
··· 51 } 52 53 int 54 + nvkm_subdev_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 55 { 56 struct nvkm_device *device = subdev->device; 57 + const char *action; 58 s64 time; 59 60 + switch (suspend) { 61 + case NVKM_POWEROFF: 62 + default: 63 + action = subdev->use.enabled ? "fini" : "reset"; 64 + break; 65 + case NVKM_SUSPEND: 66 + action = "suspend"; 67 + break; 68 + case NVKM_RUNTIME_SUSPEND: 69 + action = "runtime"; 70 + break; 71 + } 72 nvkm_trace(subdev, "%s running...\n", action); 73 time = ktime_to_us(ktime_get()); 74 ··· 186 nvkm_subdev_unref(struct nvkm_subdev *subdev) 187 { 188 if (refcount_dec_and_mutex_lock(&subdev->use.refcount, &subdev->use.mutex)) { 189 + nvkm_subdev_fini(subdev, NVKM_POWEROFF); 190 mutex_unlock(&subdev->use.mutex); 191 } 192 }
+1 -1
drivers/gpu/drm/nouveau/nvkm/core/uevent.c
··· 73 } 74 75 static int 76 - nvkm_uevent_fini(struct nvkm_object *object, bool suspend) 77 { 78 struct nvkm_uevent *uevent = nvkm_uevent(object); 79
··· 73 } 74 75 static int 76 + nvkm_uevent_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 77 { 78 struct nvkm_uevent *uevent = nvkm_uevent(object); 79
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
··· 46 } 47 48 int 49 - ga100_ce_fini(struct nvkm_engine *engine, bool suspend) 50 { 51 nvkm_inth_block(&engine->subdev.inth); 52 return 0;
··· 46 } 47 48 int 49 + ga100_ce_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 50 { 51 nvkm_inth_block(&engine->subdev.inth); 52 return 0;
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
··· 14 15 int ga100_ce_oneinit(struct nvkm_engine *); 16 int ga100_ce_init(struct nvkm_engine *); 17 - int ga100_ce_fini(struct nvkm_engine *, bool); 18 int ga100_ce_nonstall(struct nvkm_engine *); 19 20 u32 gb202_ce_grce_mask(struct nvkm_device *);
··· 14 15 int ga100_ce_oneinit(struct nvkm_engine *); 16 int ga100_ce_init(struct nvkm_engine *); 17 + int ga100_ce_fini(struct nvkm_engine *, enum nvkm_suspend_state); 18 int ga100_ce_nonstall(struct nvkm_engine *); 19 20 u32 gb202_ce_grce_mask(struct nvkm_device *);
+17 -5
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
··· 2936 } 2937 2938 int 2939 - nvkm_device_fini(struct nvkm_device *device, bool suspend) 2940 { 2941 - const char *action = suspend ? "suspend" : "fini"; 2942 struct nvkm_subdev *subdev; 2943 int ret; 2944 s64 time; 2945 2946 nvdev_trace(device, "%s running...\n", action); 2947 time = ktime_to_us(ktime_get()); 2948 ··· 3044 if (ret) 3045 return ret; 3046 3047 - nvkm_device_fini(device, false); 3048 3049 nvdev_trace(device, "init running...\n"); 3050 time = ktime_to_us(ktime_get()); ··· 3072 3073 fail_subdev: 3074 list_for_each_entry_from(subdev, &device->subdev, head) 3075 - nvkm_subdev_fini(subdev, false); 3076 fail: 3077 - nvkm_device_fini(device, false); 3078 3079 nvdev_error(device, "init failed with %d\n", ret); 3080 return ret;
··· 2936 } 2937 2938 int 2939 + nvkm_device_fini(struct nvkm_device *device, enum nvkm_suspend_state suspend) 2940 { 2941 + const char *action; 2942 struct nvkm_subdev *subdev; 2943 int ret; 2944 s64 time; 2945 2946 + switch (suspend) { 2947 + case NVKM_POWEROFF: 2948 + default: 2949 + action = "fini"; 2950 + break; 2951 + case NVKM_SUSPEND: 2952 + action = "suspend"; 2953 + break; 2954 + case NVKM_RUNTIME_SUSPEND: 2955 + action = "runtime"; 2956 + break; 2957 + } 2958 nvdev_trace(device, "%s running...\n", action); 2959 time = ktime_to_us(ktime_get()); 2960 ··· 3032 if (ret) 3033 return ret; 3034 3035 + nvkm_device_fini(device, NVKM_POWEROFF); 3036 3037 nvdev_trace(device, "init running...\n"); 3038 time = ktime_to_us(ktime_get()); ··· 3060 3061 fail_subdev: 3062 list_for_each_entry_from(subdev, &device->subdev, head) 3063 + nvkm_subdev_fini(subdev, NVKM_POWEROFF); 3064 fail: 3065 + nvkm_device_fini(device, NVKM_POWEROFF); 3066 3067 nvdev_error(device, "init failed with %d\n", ret); 3068 return ret;
+2 -2
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
··· 1605 } 1606 1607 static void 1608 - nvkm_device_pci_fini(struct nvkm_device *device, bool suspend) 1609 { 1610 struct nvkm_device_pci *pdev = nvkm_device_pci(device); 1611 - if (suspend) { 1612 pci_disable_device(pdev->pdev); 1613 pdev->suspend = true; 1614 }
··· 1605 } 1606 1607 static void 1608 + nvkm_device_pci_fini(struct nvkm_device *device, enum nvkm_suspend_state suspend) 1609 { 1610 struct nvkm_device_pci *pdev = nvkm_device_pci(device); 1611 + if (suspend != NVKM_POWEROFF) { 1612 pci_disable_device(pdev->pdev); 1613 pdev->suspend = true; 1614 }
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
··· 56 const char *name, const char *cfg, const char *dbg, 57 struct nvkm_device *); 58 int nvkm_device_init(struct nvkm_device *); 59 - int nvkm_device_fini(struct nvkm_device *, bool suspend); 60 #endif
··· 56 const char *name, const char *cfg, const char *dbg, 57 struct nvkm_device *); 58 int nvkm_device_init(struct nvkm_device *); 59 + int nvkm_device_fini(struct nvkm_device *, enum nvkm_suspend_state suspend); 60 #endif
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
··· 218 } 219 220 static int 221 - nvkm_udevice_fini(struct nvkm_object *object, bool suspend) 222 { 223 struct nvkm_udevice *udev = nvkm_udevice(object); 224 struct nvkm_device *device = udev->device;
··· 218 } 219 220 static int 221 + nvkm_udevice_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 222 { 223 struct nvkm_udevice *udev = nvkm_udevice(object); 224 struct nvkm_device *device = udev->device;
+2 -2
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
··· 99 } 100 101 static int 102 - nvkm_disp_fini(struct nvkm_engine *engine, bool suspend) 103 { 104 struct nvkm_disp *disp = nvkm_disp(engine); 105 struct nvkm_outp *outp; 106 107 if (disp->func->fini) 108 - disp->func->fini(disp, suspend); 109 110 list_for_each_entry(outp, &disp->outps, head) { 111 if (outp->func->fini)
··· 99 } 100 101 static int 102 + nvkm_disp_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 103 { 104 struct nvkm_disp *disp = nvkm_disp(engine); 105 struct nvkm_outp *outp; 106 107 if (disp->func->fini) 108 + disp->func->fini(disp, suspend != NVKM_POWEROFF); 109 110 list_for_each_entry(outp, &disp->outps, head) { 111 if (outp->func->fini)
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.c
··· 128 } 129 130 static int 131 - nvkm_disp_chan_fini(struct nvkm_object *object, bool suspend) 132 { 133 struct nvkm_disp_chan *chan = nvkm_disp_chan(object); 134
··· 128 } 129 130 static int 131 + nvkm_disp_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 132 { 133 struct nvkm_disp_chan *chan = nvkm_disp_chan(object); 134
+2 -2
drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
··· 93 } 94 95 static int 96 - nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend) 97 { 98 struct nvkm_falcon *falcon = nvkm_falcon(engine); 99 struct nvkm_device *device = falcon->engine.subdev.device; 100 const u32 base = falcon->addr; 101 102 - if (!suspend) { 103 nvkm_memory_unref(&falcon->core); 104 if (falcon->external) { 105 vfree(falcon->data.data);
··· 93 } 94 95 static int 96 + nvkm_falcon_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 97 { 98 struct nvkm_falcon *falcon = nvkm_falcon(engine); 99 struct nvkm_device *device = falcon->engine.subdev.device; 100 const u32 base = falcon->addr; 101 102 + if (suspend == NVKM_POWEROFF) { 103 nvkm_memory_unref(&falcon->core); 104 if (falcon->external) { 105 vfree(falcon->data.data);
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
··· 122 } 123 124 static int 125 - nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend) 126 { 127 struct nvkm_fifo *fifo = nvkm_fifo(engine); 128 struct nvkm_runl *runl;
··· 122 } 123 124 static int 125 + nvkm_fifo_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 126 { 127 struct nvkm_fifo *fifo = nvkm_fifo(engine); 128 struct nvkm_runl *runl;
+3 -3
drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
··· 72 }; 73 74 static int 75 - nvkm_uchan_object_fini_1(struct nvkm_oproxy *oproxy, bool suspend) 76 { 77 struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy); 78 struct nvkm_chan *chan = uobj->chan; ··· 87 nvkm_chan_cctx_bind(chan, ectx->engn, NULL); 88 89 if (refcount_dec_and_test(&ectx->uses)) 90 - nvkm_object_fini(ectx->object, false); 91 mutex_unlock(&chan->cgrp->mutex); 92 } 93 ··· 269 } 270 271 static int 272 - nvkm_uchan_fini(struct nvkm_object *object, bool suspend) 273 { 274 struct nvkm_chan *chan = nvkm_uchan(object)->chan; 275
··· 72 }; 73 74 static int 75 + nvkm_uchan_object_fini_1(struct nvkm_oproxy *oproxy, enum nvkm_suspend_state suspend) 76 { 77 struct nvkm_uobj *uobj = container_of(oproxy, typeof(*uobj), oproxy); 78 struct nvkm_chan *chan = uobj->chan; ··· 87 nvkm_chan_cctx_bind(chan, ectx->engn, NULL); 88 89 if (refcount_dec_and_test(&ectx->uses)) 90 + nvkm_object_fini(ectx->object, NVKM_POWEROFF); 91 mutex_unlock(&chan->cgrp->mutex); 92 } 93 ··· 269 } 270 271 static int 272 + nvkm_uchan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 273 { 274 struct nvkm_chan *chan = nvkm_uchan(object)->chan; 275
+2 -2
drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
··· 168 } 169 170 static int 171 - nvkm_gr_fini(struct nvkm_engine *engine, bool suspend) 172 { 173 struct nvkm_gr *gr = nvkm_gr(engine); 174 if (gr->func->fini) 175 - return gr->func->fini(gr, suspend); 176 return 0; 177 } 178
··· 168 } 169 170 static int 171 + nvkm_gr_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 172 { 173 struct nvkm_gr *gr = nvkm_gr(engine); 174 if (gr->func->fini) 175 + return gr->func->fini(gr, suspend != NVKM_POWEROFF); 176 return 0; 177 } 178
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
··· 2330 2331 WARN_ON(gf100_gr_fecs_halt_pipeline(gr)); 2332 2333 - subdev->func->fini(subdev, false); 2334 nvkm_mc_disable(device, subdev->type, subdev->inst); 2335 if (gr->func->gpccs.reset) 2336 gr->func->gpccs.reset(gr);
··· 2330 2331 WARN_ON(gf100_gr_fecs_halt_pipeline(gr)); 2332 2333 + subdev->func->fini(subdev, NVKM_POWEROFF); 2334 nvkm_mc_disable(device, subdev->type, subdev->inst); 2335 if (gr->func->gpccs.reset) 2336 gr->func->gpccs.reset(gr);
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
··· 1158 } 1159 1160 static int 1161 - nv04_gr_chan_fini(struct nvkm_object *object, bool suspend) 1162 { 1163 struct nv04_gr_chan *chan = nv04_gr_chan(object); 1164 struct nv04_gr *gr = chan->gr;
··· 1158 } 1159 1160 static int 1161 + nv04_gr_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 1162 { 1163 struct nv04_gr_chan *chan = nv04_gr_chan(object); 1164 struct nv04_gr *gr = chan->gr;
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
··· 951 } 952 953 static int 954 - nv10_gr_chan_fini(struct nvkm_object *object, bool suspend) 955 { 956 struct nv10_gr_chan *chan = nv10_gr_chan(object); 957 struct nv10_gr *gr = chan->gr;
··· 951 } 952 953 static int 954 + nv10_gr_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 955 { 956 struct nv10_gr_chan *chan = nv10_gr_chan(object); 957 struct nv10_gr *gr = chan->gr;
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
··· 27 } 28 29 int 30 - nv20_gr_chan_fini(struct nvkm_object *object, bool suspend) 31 { 32 struct nv20_gr_chan *chan = nv20_gr_chan(object); 33 struct nv20_gr *gr = chan->gr;
··· 27 } 28 29 int 30 + nv20_gr_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 31 { 32 struct nv20_gr_chan *chan = nv20_gr_chan(object); 33 struct nv20_gr *gr = chan->gr;
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
··· 31 32 void *nv20_gr_chan_dtor(struct nvkm_object *); 33 int nv20_gr_chan_init(struct nvkm_object *); 34 - int nv20_gr_chan_fini(struct nvkm_object *, bool); 35 #endif
··· 31 32 void *nv20_gr_chan_dtor(struct nvkm_object *); 33 int nv20_gr_chan_init(struct nvkm_object *); 34 + int nv20_gr_chan_fini(struct nvkm_object *, enum nvkm_suspend_state); 35 #endif
+2 -2
drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
··· 89 } 90 91 static int 92 - nv40_gr_chan_fini(struct nvkm_object *object, bool suspend) 93 { 94 struct nv40_gr_chan *chan = nv40_gr_chan(object); 95 struct nv40_gr *gr = chan->gr; ··· 101 nvkm_mask(device, 0x400720, 0x00000001, 0x00000000); 102 103 if (nvkm_rd32(device, 0x40032c) == inst) { 104 - if (suspend) { 105 nvkm_wr32(device, 0x400720, 0x00000000); 106 nvkm_wr32(device, 0x400784, inst); 107 nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
··· 89 } 90 91 static int 92 + nv40_gr_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 93 { 94 struct nv40_gr_chan *chan = nv40_gr_chan(object); 95 struct nv40_gr *gr = chan->gr; ··· 101 nvkm_mask(device, 0x400720, 0x00000001, 0x00000000); 102 103 if (nvkm_rd32(device, 0x40032c) == inst) { 104 + if (suspend != NVKM_POWEROFF) { 105 nvkm_wr32(device, 0x400720, 0x00000000); 106 nvkm_wr32(device, 0x400784, inst); 107 nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
··· 65 } 66 67 static int 68 - nv44_mpeg_chan_fini(struct nvkm_object *object, bool suspend) 69 { 70 71 struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
··· 65 } 66 67 static int 68 + nv44_mpeg_chan_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 69 { 70 71 struct nv44_mpeg_chan *chan = nv44_mpeg_chan(object);
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
··· 37 } 38 39 static int 40 - nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend) 41 { 42 struct nvkm_sec2 *sec2 = nvkm_sec2(engine); 43 struct nvkm_subdev *subdev = &sec2->engine.subdev;
··· 37 } 38 39 static int 40 + nvkm_sec2_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 41 { 42 struct nvkm_sec2 *sec2 = nvkm_sec2(engine); 43 struct nvkm_subdev *subdev = &sec2->engine.subdev;
+2 -2
drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
··· 76 } 77 78 static int 79 - nvkm_xtensa_fini(struct nvkm_engine *engine, bool suspend) 80 { 81 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine); 82 struct nvkm_device *device = xtensa->engine.subdev.device; ··· 85 nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */ 86 nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */ 87 88 - if (!suspend) 89 nvkm_memory_unref(&xtensa->gpu_fw); 90 return 0; 91 }
··· 76 } 77 78 static int 79 + nvkm_xtensa_fini(struct nvkm_engine *engine, enum nvkm_suspend_state suspend) 80 { 81 struct nvkm_xtensa *xtensa = nvkm_xtensa(engine); 82 struct nvkm_device *device = xtensa->engine.subdev.device; ··· 85 nvkm_wr32(device, base + 0xd84, 0); /* INTR_EN */ 86 nvkm_wr32(device, base + 0xd94, 0); /* FIFO_CTRL */ 87 88 + if (suspend == NVKM_POWEROFF) 89 nvkm_memory_unref(&xtensa->gpu_fw); 90 return 0; 91 }
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
··· 182 } 183 184 static int 185 - nvkm_acr_fini(struct nvkm_subdev *subdev, bool suspend) 186 { 187 if (!subdev->use.enabled) 188 return 0;
··· 182 } 183 184 static int 185 + nvkm_acr_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 186 { 187 if (!subdev->use.enabled) 188 return 0;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
··· 90 } 91 92 static int 93 - nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend) 94 { 95 struct nvkm_bar *bar = nvkm_bar(subdev); 96
··· 90 } 91 92 static int 93 + nvkm_bar_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 94 { 95 struct nvkm_bar *bar = nvkm_bar(subdev); 96
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
··· 577 } 578 579 static int 580 - nvkm_clk_fini(struct nvkm_subdev *subdev, bool suspend) 581 { 582 struct nvkm_clk *clk = nvkm_clk(subdev); 583 flush_work(&clk->work);
··· 577 } 578 579 static int 580 + nvkm_clk_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 581 { 582 struct nvkm_clk *clk = nvkm_clk(subdev); 583 flush_work(&clk->work);
+2 -2
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
··· 67 } 68 69 static int 70 - nvkm_devinit_fini(struct nvkm_subdev *subdev, bool suspend) 71 { 72 struct nvkm_devinit *init = nvkm_devinit(subdev); 73 /* force full reinit on resume */ 74 - if (suspend) 75 init->post = true; 76 return 0; 77 }
··· 67 } 68 69 static int 70 + nvkm_devinit_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 71 { 72 struct nvkm_devinit *init = nvkm_devinit(subdev); 73 /* force full reinit on resume */ 74 + if (suspend != NVKM_POWEROFF) 75 init->post = true; 76 return 0; 77 }
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
··· 51 } 52 53 static int 54 - nvkm_fault_fini(struct nvkm_subdev *subdev, bool suspend) 55 { 56 struct nvkm_fault *fault = nvkm_fault(subdev); 57 if (fault->func->fini)
··· 51 } 52 53 static int 54 + nvkm_fault_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 55 { 56 struct nvkm_fault *fault = nvkm_fault(subdev); 57 if (fault->func->fini)
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c
··· 56 } 57 58 static int 59 - nvkm_ufault_fini(struct nvkm_object *object, bool suspend) 60 { 61 struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object); 62 buffer->fault->func->buffer.fini(buffer);
··· 56 } 57 58 static int 59 + nvkm_ufault_fini(struct nvkm_object *object, enum nvkm_suspend_state suspend) 60 { 61 struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object); 62 buffer->fault->func->buffer.fini(buffer);
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
··· 144 } 145 146 static int 147 - nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend) 148 { 149 struct nvkm_gpio *gpio = nvkm_gpio(subdev); 150 u32 mask = (1ULL << gpio->func->lines) - 1;
··· 144 } 145 146 static int 147 + nvkm_gpio_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 148 { 149 struct nvkm_gpio *gpio = nvkm_gpio(subdev); 150 u32 mask = (1ULL << gpio->func->lines) - 1;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
··· 48 } 49 50 static int 51 - nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend) 52 { 53 struct nvkm_gsp *gsp = nvkm_gsp(subdev); 54
··· 48 } 49 50 static int 51 + nvkm_gsp_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 52 { 53 struct nvkm_gsp *gsp = nvkm_gsp(subdev); 54
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
··· 17 #include <nvhw/ref/gh100/dev_riscv_pri.h> 18 19 int 20 - gh100_gsp_fini(struct nvkm_gsp *gsp, bool suspend) 21 { 22 struct nvkm_falcon *falcon = &gsp->falcon; 23 int ret, time = 4000;
··· 17 #include <nvhw/ref/gh100/dev_riscv_pri.h> 18 19 int 20 + gh100_gsp_fini(struct nvkm_gsp *gsp, enum nvkm_suspend_state suspend) 21 { 22 struct nvkm_falcon *falcon = &gsp->falcon; 23 int ret, time = 4000;
+4 -4
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
··· 59 void (*dtor)(struct nvkm_gsp *); 60 int (*oneinit)(struct nvkm_gsp *); 61 int (*init)(struct nvkm_gsp *); 62 - int (*fini)(struct nvkm_gsp *, bool suspend); 63 int (*reset)(struct nvkm_gsp *); 64 65 struct { ··· 75 void tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *); 76 int tu102_gsp_oneinit(struct nvkm_gsp *); 77 int tu102_gsp_init(struct nvkm_gsp *); 78 - int tu102_gsp_fini(struct nvkm_gsp *, bool suspend); 79 int tu102_gsp_reset(struct nvkm_gsp *); 80 u64 tu102_gsp_wpr_heap_size(struct nvkm_gsp *); 81 ··· 87 88 int gh100_gsp_oneinit(struct nvkm_gsp *); 89 int gh100_gsp_init(struct nvkm_gsp *); 90 - int gh100_gsp_fini(struct nvkm_gsp *, bool suspend); 91 92 void r535_gsp_dtor(struct nvkm_gsp *); 93 int r535_gsp_oneinit(struct nvkm_gsp *); 94 int r535_gsp_init(struct nvkm_gsp *); 95 - int r535_gsp_fini(struct nvkm_gsp *, bool suspend); 96 97 int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, 98 struct nvkm_gsp **);
··· 59 void (*dtor)(struct nvkm_gsp *); 60 int (*oneinit)(struct nvkm_gsp *); 61 int (*init)(struct nvkm_gsp *); 62 + int (*fini)(struct nvkm_gsp *, enum nvkm_suspend_state suspend); 63 int (*reset)(struct nvkm_gsp *); 64 65 struct { ··· 75 void tu102_gsp_fwsec_sb_dtor(struct nvkm_gsp *); 76 int tu102_gsp_oneinit(struct nvkm_gsp *); 77 int tu102_gsp_init(struct nvkm_gsp *); 78 + int tu102_gsp_fini(struct nvkm_gsp *, enum nvkm_suspend_state suspend); 79 int tu102_gsp_reset(struct nvkm_gsp *); 80 u64 tu102_gsp_wpr_heap_size(struct nvkm_gsp *); 81 ··· 87 88 int gh100_gsp_oneinit(struct nvkm_gsp *); 89 int gh100_gsp_init(struct nvkm_gsp *); 90 + int gh100_gsp_fini(struct nvkm_gsp *, enum nvkm_suspend_state suspend); 91 92 void r535_gsp_dtor(struct nvkm_gsp *); 93 int r535_gsp_oneinit(struct nvkm_gsp *); 94 int r535_gsp_init(struct nvkm_gsp *); 95 + int r535_gsp_fini(struct nvkm_gsp *, enum nvkm_suspend_state suspend); 96 97 int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int, 98 struct nvkm_gsp **);
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/fbsr.c
··· 208 } 209 210 static int 211 - r535_fbsr_suspend(struct nvkm_gsp *gsp) 212 { 213 struct nvkm_subdev *subdev = &gsp->subdev; 214 struct nvkm_device *device = subdev->device;
··· 208 } 209 210 static int 211 + r535_fbsr_suspend(struct nvkm_gsp *gsp, bool runtime) 212 { 213 struct nvkm_subdev *subdev = &gsp->subdev; 214 struct nvkm_device *device = subdev->device;
+4 -4
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
··· 704 705 build_registry(gsp, rpc); 706 707 - return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT); 708 709 fail: 710 clean_registry(gsp); ··· 921 info->pciConfigMirrorSize = device->pci->func->cfg.size; 922 r535_gsp_acpi_info(gsp, &info->acpiMethodData); 923 924 - return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); 925 } 926 927 static int ··· 1721 } 1722 1723 int 1724 - r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) 1725 { 1726 struct nvkm_rm *rm = gsp->rm; 1727 int ret; ··· 1748 sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr; 1749 sr->sizeOfSuspendResumeData = len; 1750 1751 - ret = rm->api->fbsr->suspend(gsp); 1752 if (ret) { 1753 nvkm_gsp_mem_dtor(&gsp->sr.meta); 1754 nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
··· 704 705 build_registry(gsp, rpc); 706 707 + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOSEQ); 708 709 fail: 710 clean_registry(gsp); ··· 921 info->pciConfigMirrorSize = device->pci->func->cfg.size; 922 r535_gsp_acpi_info(gsp, &info->acpiMethodData); 923 924 + return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOSEQ); 925 } 926 927 static int ··· 1721 } 1722 1723 int 1724 + r535_gsp_fini(struct nvkm_gsp *gsp, enum nvkm_suspend_state suspend) 1725 { 1726 struct nvkm_rm *rm = gsp->rm; 1727 int ret; ··· 1748 sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr; 1749 sr->sizeOfSuspendResumeData = len; 1750 1751 + ret = rm->api->fbsr->suspend(gsp, suspend == NVKM_RUNTIME_SUSPEND); 1752 if (ret) { 1753 nvkm_gsp_mem_dtor(&gsp->sr.meta); 1754 nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
+6
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c
··· 557 558 switch (policy) { 559 case NVKM_GSP_RPC_REPLY_NOWAIT: 560 break; 561 case NVKM_GSP_RPC_REPLY_RECV: 562 reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len); ··· 588 print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1, 589 rpc->data, rpc->length - sizeof(*rpc), true); 590 } 591 592 ret = r535_gsp_cmdq_push(gsp, rpc); 593 if (ret)
··· 557 558 switch (policy) { 559 case NVKM_GSP_RPC_REPLY_NOWAIT: 560 + case NVKM_GSP_RPC_REPLY_NOSEQ: 561 break; 562 case NVKM_GSP_RPC_REPLY_RECV: 563 reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len); ··· 587 print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1, 588 rpc->data, rpc->length - sizeof(*rpc), true); 589 } 590 + 591 + if (policy == NVKM_GSP_RPC_REPLY_NOSEQ) 592 + rpc->sequence = 0; 593 + else 594 + rpc->sequence = gsp->rpc_seq++; 595 596 ret = r535_gsp_cmdq_push(gsp, rpc); 597 if (ret)
+4 -4
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/fbsr.c
··· 62 } 63 64 static int 65 - r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size) 66 { 67 NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl; 68 struct nvkm_gsp_object memlist; ··· 81 ctrl->hClient = gsp->internal.client.object.handle; 82 ctrl->hSysMem = memlist.handle; 83 ctrl->sysmemAddrOfSuspendResumeData = gsp->sr.meta.addr; 84 - ctrl->bEnteringGcoffState = 1; 85 86 ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); 87 if (ret) ··· 92 } 93 94 static int 95 - r570_fbsr_suspend(struct nvkm_gsp *gsp) 96 { 97 struct nvkm_subdev *subdev = &gsp->subdev; 98 struct nvkm_device *device = subdev->device; ··· 133 return ret; 134 135 /* Initialise FBSR on RM. */ 136 - ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size); 137 if (ret) { 138 nvkm_gsp_sg_free(device, &gsp->sr.fbsr); 139 return ret;
··· 62 } 63 64 static int 65 + r570_fbsr_init(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, bool runtime) 66 { 67 NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl; 68 struct nvkm_gsp_object memlist; ··· 81 ctrl->hClient = gsp->internal.client.object.handle; 82 ctrl->hSysMem = memlist.handle; 83 ctrl->sysmemAddrOfSuspendResumeData = gsp->sr.meta.addr; 84 + ctrl->bEnteringGcoffState = runtime ? 1 : 0; 85 86 ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl); 87 if (ret) ··· 92 } 93 94 static int 95 + r570_fbsr_suspend(struct nvkm_gsp *gsp, bool runtime) 96 { 97 struct nvkm_subdev *subdev = &gsp->subdev; 98 struct nvkm_device *device = subdev->device; ··· 133 return ret; 134 135 /* Initialise FBSR on RM. */ 136 + ret = r570_fbsr_init(gsp, &gsp->sr.fbsr, size, runtime); 137 if (ret) { 138 nvkm_gsp_sg_free(device, &gsp->sr.fbsr); 139 return ret;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r570/gsp.c
··· 176 info->bIsPrimary = video_is_primary_device(device->dev); 177 info->bPreserveVideoMemoryAllocations = false; 178 179 - return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); 180 } 181 182 static void
··· 176 info->bIsPrimary = video_is_primary_device(device->dev); 177 info->bPreserveVideoMemoryAllocations = false; 178 179 + return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOSEQ); 180 } 181 182 static void
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/rm.h
··· 78 } *device; 79 80 const struct nvkm_rm_api_fbsr { 81 - int (*suspend)(struct nvkm_gsp *); 82 void (*resume)(struct nvkm_gsp *); 83 } *fbsr; 84
··· 78 } *device; 79 80 const struct nvkm_rm_api_fbsr { 81 + int (*suspend)(struct nvkm_gsp *, bool runtime); 82 void (*resume)(struct nvkm_gsp *); 83 } *fbsr; 84
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
··· 161 } 162 163 int 164 - tu102_gsp_fini(struct nvkm_gsp *gsp, bool suspend) 165 { 166 u32 mbox0 = 0xff, mbox1 = 0xff; 167 int ret;
··· 161 } 162 163 int 164 + tu102_gsp_fini(struct nvkm_gsp *gsp, enum nvkm_suspend_state suspend) 165 { 166 u32 mbox0 = 0xff, mbox1 = 0xff; 167 int ret;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
··· 135 } 136 137 static int 138 - nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend) 139 { 140 struct nvkm_i2c *i2c = nvkm_i2c(subdev); 141 struct nvkm_i2c_pad *pad;
··· 135 } 136 137 static int 138 + nvkm_i2c_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 139 { 140 struct nvkm_i2c *i2c = nvkm_i2c(subdev); 141 struct nvkm_i2c_pad *pad;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
··· 176 } 177 178 static int 179 - nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend) 180 { 181 struct nvkm_instmem *imem = nvkm_instmem(subdev); 182 int ret;
··· 176 } 177 178 static int 179 + nvkm_instmem_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 180 { 181 struct nvkm_instmem *imem = nvkm_instmem(subdev); 182 int ret;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
··· 74 } 75 76 static int 77 - nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend) 78 { 79 struct nvkm_pci *pci = nvkm_pci(subdev); 80
··· 74 } 75 76 static int 77 + nvkm_pci_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 78 { 79 struct nvkm_pci *pci = nvkm_pci(subdev); 80
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
··· 77 } 78 79 static int 80 - nvkm_pmu_fini(struct nvkm_subdev *subdev, bool suspend) 81 { 82 struct nvkm_pmu *pmu = nvkm_pmu(subdev); 83
··· 77 } 78 79 static int 80 + nvkm_pmu_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 81 { 82 struct nvkm_pmu *pmu = nvkm_pmu(subdev); 83
+3 -3
drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
··· 341 } 342 343 static int 344 - nvkm_therm_fini(struct nvkm_subdev *subdev, bool suspend) 345 { 346 struct nvkm_therm *therm = nvkm_therm(subdev); 347 348 if (therm->func->fini) 349 therm->func->fini(therm); 350 351 - nvkm_therm_fan_fini(therm, suspend); 352 - nvkm_therm_sensor_fini(therm, suspend); 353 354 if (suspend) { 355 therm->suspend = therm->mode;
··· 341 } 342 343 static int 344 + nvkm_therm_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 345 { 346 struct nvkm_therm *therm = nvkm_therm(subdev); 347 348 if (therm->func->fini) 349 therm->func->fini(therm); 350 351 + nvkm_therm_fan_fini(therm, suspend != NVKM_POWEROFF); 352 + nvkm_therm_sensor_fini(therm, suspend != NVKM_POWEROFF); 353 354 if (suspend) { 355 therm->suspend = therm->mode;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
··· 149 } 150 151 static int 152 - nvkm_timer_fini(struct nvkm_subdev *subdev, bool suspend) 153 { 154 struct nvkm_timer *tmr = nvkm_timer(subdev); 155 tmr->func->alarm_fini(tmr);
··· 149 } 150 151 static int 152 + nvkm_timer_fini(struct nvkm_subdev *subdev, enum nvkm_suspend_state suspend) 153 { 154 struct nvkm_timer *tmr = nvkm_timer(subdev); 155 tmr->func->alarm_fini(tmr);
+4 -2
drivers/gpu/drm/xe/xe_guc.c
··· 1618 return xe_guc_submit_start(guc); 1619 } 1620 1621 - void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) 1622 { 1623 struct xe_gt *gt = guc_to_gt(guc); 1624 unsigned int fw_ref; ··· 1630 if (!IS_SRIOV_VF(gt_to_xe(gt))) { 1631 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 1632 if (!fw_ref) 1633 - return; 1634 1635 status = xe_mmio_read32(&gt->mmio, GUC_STATUS); 1636 ··· 1658 1659 drm_puts(p, "\n"); 1660 xe_guc_submit_print(guc, p); 1661 } 1662 1663 /**
··· 1618 return xe_guc_submit_start(guc); 1619 } 1620 1621 + int xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) 1622 { 1623 struct xe_gt *gt = guc_to_gt(guc); 1624 unsigned int fw_ref; ··· 1630 if (!IS_SRIOV_VF(gt_to_xe(gt))) { 1631 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 1632 if (!fw_ref) 1633 + return -EIO; 1634 1635 status = xe_mmio_read32(&gt->mmio, GUC_STATUS); 1636 ··· 1658 1659 drm_puts(p, "\n"); 1660 xe_guc_submit_print(guc, p); 1661 + 1662 + return 0; 1663 } 1664 1665 /**
+1 -1
drivers/gpu/drm/xe/xe_guc.h
··· 45 int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val); 46 void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir); 47 void xe_guc_sanitize(struct xe_guc *guc); 48 - void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p); 49 int xe_guc_reset_prepare(struct xe_guc *guc); 50 void xe_guc_reset_wait(struct xe_guc *guc); 51 void xe_guc_stop_prepare(struct xe_guc *guc);
··· 45 int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val); 46 void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir); 47 void xe_guc_sanitize(struct xe_guc *guc); 48 + int xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p); 49 int xe_guc_reset_prepare(struct xe_guc *guc); 50 void xe_guc_reset_wait(struct xe_guc *guc); 51 void xe_guc_stop_prepare(struct xe_guc *guc);
+1 -1
drivers/gpu/drm/xe/xe_migrate.c
··· 1201 } 1202 1203 /** 1204 - * xe_get_migrate_exec_queue() - Get the execution queue from migrate context. 1205 * @migrate: Migrate context. 1206 * 1207 * Return: Pointer to execution queue on success, error on failure
··· 1201 } 1202 1203 /** 1204 + * xe_migrate_exec_queue() - Get the execution queue from migrate context. 1205 * @migrate: Migrate context. 1206 * 1207 * Return: Pointer to execution queue on success, error on failure
+10 -3
drivers/gpu/drm/xe/xe_pm.c
··· 8 #include <linux/fault-inject.h> 9 #include <linux/pm_runtime.h> 10 #include <linux/suspend.h> 11 12 #include <drm/drm_managed.h> 13 #include <drm/ttm/ttm_placement.h> ··· 358 359 static u32 vram_threshold_value(struct xe_device *xe) 360 { 361 - /* FIXME: D3Cold temporarily disabled by default on BMG */ 362 - if (xe->info.platform == XE_BATTLEMAGE) 363 - return 0; 364 365 return DEFAULT_VRAM_THRESHOLD; 366 }
··· 8 #include <linux/fault-inject.h> 9 #include <linux/pm_runtime.h> 10 #include <linux/suspend.h> 11 + #include <linux/dmi.h> 12 13 #include <drm/drm_managed.h> 14 #include <drm/ttm/ttm_placement.h> ··· 357 358 static u32 vram_threshold_value(struct xe_device *xe) 359 { 360 + if (xe->info.platform == XE_BATTLEMAGE) { 361 + const char *product_name; 362 + 363 + product_name = dmi_get_system_info(DMI_PRODUCT_NAME); 364 + if (product_name && strstr(product_name, "NUC13RNG")) { 365 + drm_warn(&xe->drm, "BMG + D3Cold not supported on this platform\n"); 366 + return 0; 367 + } 368 + } 369 370 return DEFAULT_VRAM_THRESHOLD; 371 }
+1 -1
drivers/gpu/drm/xe/xe_query.c
··· 491 492 if (copy_to_user(*ptr, topo, sizeof(*topo))) 493 return -EFAULT; 494 - *ptr += sizeof(topo); 495 496 if (copy_to_user(*ptr, mask, mask_size)) 497 return -EFAULT;
··· 491 492 if (copy_to_user(*ptr, topo, sizeof(*topo))) 493 return -EFAULT; 494 + *ptr += sizeof(*topo); 495 496 if (copy_to_user(*ptr, mask, mask_size)) 497 return -EFAULT;
+1 -1
drivers/gpu/drm/xe/xe_tlb_inval.c
··· 115 } 116 117 /** 118 - * xe_gt_tlb_inval_init - Initialize TLB invalidation state 119 * @gt: GT structure 120 * 121 * Initialize TLB invalidation state, purely software initialization, should
··· 115 } 116 117 /** 118 + * xe_gt_tlb_inval_init_early() - Initialize TLB invalidation state 119 * @gt: GT structure 120 * 121 * Initialize TLB invalidation state, purely software initialization, should
+1 -1
drivers/gpu/drm/xe/xe_tlb_inval_job.c
··· 164 } 165 166 /** 167 - * xe_tlb_inval_alloc_dep() - TLB invalidation job alloc dependency 168 * @job: TLB invalidation job to alloc dependency for 169 * 170 * Allocate storage for a dependency in the TLB invalidation fence. This
··· 164 } 165 166 /** 167 + * xe_tlb_inval_job_alloc_dep() - TLB invalidation job alloc dependency 168 * @job: TLB invalidation job to alloc dependency for 169 * 170 * Allocate storage for a dependency in the TLB invalidation fence. This