Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'drm-intel-fixes-2024-03-28' of https://anongit.freedesktop.org/git/drm/drm-intel into drm-fixes

Core/GT Fixes:
- Fix for BUG_ON/BUILD_BUG_ON IN I915_memcpy.c (Joonas)
- Update a MTL workaround (Tejas)
- Fix locking inversion in hwmon's sysfs (Janusz)
- Remove a bogus error message around PXP (Jose)
- Fix UAF on VMA (Janusz)
- Reset queue_priority_hint on parking (Chris)

Display Fixes:
- Remove duplicated audio enable/disable on SDVO and DP (Ville)
- Disable AuxCCS for Xe driver (Juha-Pekka)
- Revert init order of MIPI DSI (Ville)
- DRRS debugfs fix with an extra refactor patch (Bhanuprakash)
- VRR related fixes (Ville)
- Fix a JSL eDP corruption (Jonathon)
- Fix the cursor physical dma address (Ville)
- BIOS VBT related fix (Ville)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZgYaIVgjIs30mIvS@intel.com

+161 -63
-2
drivers/gpu/drm/i915/display/g4x_dp.c
··· 717 717 { 718 718 intel_enable_dp(state, encoder, pipe_config, conn_state); 719 719 intel_edp_backlight_on(pipe_config, conn_state); 720 - encoder->audio_enable(encoder, pipe_config, conn_state); 721 720 } 722 721 723 722 static void vlv_enable_dp(struct intel_atomic_state *state, ··· 725 726 const struct drm_connector_state *conn_state) 726 727 { 727 728 intel_edp_backlight_on(pipe_config, conn_state); 728 - encoder->audio_enable(encoder, pipe_config, conn_state); 729 729 } 730 730 731 731 static void g4x_pre_enable_dp(struct intel_atomic_state *state,
+2 -1
drivers/gpu/drm/i915/display/icl_dsi.c
··· 1155 1155 } 1156 1156 1157 1157 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP); 1158 - intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON); 1159 1158 1160 1159 /* ensure all panel commands dispatched before enabling transcoder */ 1161 1160 wait_for_cmds_dispatched_to_panel(encoder); ··· 1254 1255 1255 1256 /* step6d: enable dsi transcoder */ 1256 1257 gen11_dsi_enable_transcoder(encoder); 1258 + 1259 + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON); 1257 1260 1258 1261 /* step7: enable backlight */ 1259 1262 intel_backlight_enable(crtc_state, conn_state);
+40 -6
drivers/gpu/drm/i915/display/intel_bios.c
··· 1955 1955 * these devices we split the init OTP sequence into a deassert sequence and 1956 1956 * the actual init OTP part. 1957 1957 */ 1958 - static void fixup_mipi_sequences(struct drm_i915_private *i915, 1959 - struct intel_panel *panel) 1958 + static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915, 1959 + struct intel_panel *panel) 1960 1960 { 1961 1961 u8 *init_otp; 1962 1962 int len; 1963 - 1964 - /* Limit this to VLV for now. */ 1965 - if (!IS_VALLEYVIEW(i915)) 1966 - return; 1967 1963 1968 1964 /* Limit this to v1 vid-mode sequences */ 1969 1965 if (panel->vbt.dsi.config->is_cmd_mode || ··· 1994 1998 init_otp[len - 1] = MIPI_SEQ_INIT_OTP; 1995 1999 /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */ 1996 2000 panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1; 2001 + } 2002 + 2003 + /* 2004 + * Some machines (eg. Lenovo 82TQ) appear to have broken 2005 + * VBT sequences: 2006 + * - INIT_OTP is not present at all 2007 + * - what should be in INIT_OTP is in DISPLAY_ON 2008 + * - what should be in DISPLAY_ON is in BACKLIGHT_ON 2009 + * (along with the actual backlight stuff) 2010 + * 2011 + * To make those work we simply swap DISPLAY_ON and INIT_OTP. 2012 + * 2013 + * TODO: Do we need to limit this to specific machines, 2014 + * or examine the contents of the sequences to 2015 + * avoid false positives? 2016 + */ 2017 + static void icl_fixup_mipi_sequences(struct drm_i915_private *i915, 2018 + struct intel_panel *panel) 2019 + { 2020 + if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] && 2021 + panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) { 2022 + drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n"); 2023 + 2024 + swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP], 2025 + panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]); 2026 + } 2027 + } 2028 + 2029 + static void fixup_mipi_sequences(struct drm_i915_private *i915, 2030 + struct intel_panel *panel) 2031 + { 2032 + if (DISPLAY_VER(i915) >= 11) 2033 + icl_fixup_mipi_sequences(i915, panel); 2034 + else if (IS_VALLEYVIEW(i915)) 2035 + vlv_fixup_mipi_sequences(i915, panel); 1997 2036 } 1998 2037 1999 2038 static void ··· 3381 3350 bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata) 3382 3351 { 3383 3352 const struct child_device_config *child = &devdata->child; 3353 + 3354 + if (!devdata) 3355 + return false; 3384 3356 3385 3357 if (!intel_bios_encoder_supports_dp(devdata) || 3386 3358 !intel_bios_encoder_supports_hdmi(devdata))
+1 -3
drivers/gpu/drm/i915/display/intel_cursor.c
··· 36 36 { 37 37 struct drm_i915_private *dev_priv = 38 38 to_i915(plane_state->uapi.plane->dev); 39 - const struct drm_framebuffer *fb = plane_state->hw.fb; 40 - struct drm_i915_gem_object *obj = intel_fb_obj(fb); 41 39 u32 base; 42 40 43 41 if (DISPLAY_INFO(dev_priv)->cursor_needs_physical) 44 - base = i915_gem_object_get_dma_address(obj, 0); 42 + base = plane_state->phys_dma_addr; 45 43 else 46 44 base = intel_plane_ggtt_offset(plane_state); 47 45
+1
drivers/gpu/drm/i915/display/intel_display_types.h
··· 727 727 #define PLANE_HAS_FENCE BIT(0) 728 728 729 729 struct intel_fb_view view; 730 + u32 phys_dma_addr; /* for cursor_needs_physical */ 730 731 731 732 /* Plane pxp decryption state */ 732 733 bool decrypt;
+2 -10
drivers/gpu/drm/i915/display/intel_dp.c
··· 67 67 #include "intel_dp_tunnel.h" 68 68 #include "intel_dpio_phy.h" 69 69 #include "intel_dpll.h" 70 + #include "intel_drrs.h" 70 71 #include "intel_fifo_underrun.h" 71 72 #include "intel_hdcp.h" 72 73 #include "intel_hdmi.h" ··· 2684 2683 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 2685 2684 } 2686 2685 2687 - static bool cpu_transcoder_has_drrs(struct drm_i915_private *i915, 2688 - enum transcoder cpu_transcoder) 2689 - { 2690 - if (HAS_DOUBLE_BUFFERED_M_N(i915)) 2691 - return true; 2692 - 2693 - return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder); 2694 - } 2695 - 2696 2686 static bool can_enable_drrs(struct intel_connector *connector, 2697 2687 const struct intel_crtc_state *pipe_config, 2698 2688 const struct drm_display_mode *downclock_mode) ··· 2706 2714 if (pipe_config->has_pch_encoder) 2707 2715 return false; 2708 2716 2709 - if (!cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder)) 2717 + if (!intel_cpu_transcoder_has_drrs(i915, pipe_config->cpu_transcoder)) 2710 2718 return false; 2711 2719 2712 2720 return downclock_mode &&
+1 -1
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
··· 2554 2554 static bool 2555 2555 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915) 2556 2556 { 2557 - return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) && 2557 + return ((IS_ELKHARTLAKE(i915) && 2558 2558 IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) || 2559 2559 IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) && 2560 2560 i915->display.dpll.ref_clks.nssc == 38400;
+11 -3
drivers/gpu/drm/i915/display/intel_drrs.c
··· 63 63 return str[drrs_type]; 64 64 } 65 65 66 + bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915, 67 + enum transcoder cpu_transcoder) 68 + { 69 + if (HAS_DOUBLE_BUFFERED_M_N(i915)) 70 + return true; 71 + 72 + return intel_cpu_transcoder_has_m2_n2(i915, cpu_transcoder); 73 + } 74 + 66 75 static void 67 76 intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc, 68 77 enum drrs_refresh_rate refresh_rate) ··· 321 312 mutex_lock(&crtc->drrs.mutex); 322 313 323 314 seq_printf(m, "DRRS capable: %s\n", 324 - str_yes_no(crtc_state->has_drrs || 325 - HAS_DOUBLE_BUFFERED_M_N(i915) || 326 - intel_cpu_transcoder_has_m2_n2(i915, crtc_state->cpu_transcoder))); 315 + str_yes_no(intel_cpu_transcoder_has_drrs(i915, 316 + crtc_state->cpu_transcoder))); 327 317 328 318 seq_printf(m, "DRRS enabled: %s\n", 329 319 str_yes_no(crtc_state->has_drrs));
+3
drivers/gpu/drm/i915/display/intel_drrs.h
··· 9 9 #include <linux/types.h> 10 10 11 11 enum drrs_type; 12 + enum transcoder; 12 13 struct drm_i915_private; 13 14 struct intel_atomic_state; 14 15 struct intel_crtc; 15 16 struct intel_crtc_state; 16 17 struct intel_connector; 17 18 19 + bool intel_cpu_transcoder_has_drrs(struct drm_i915_private *i915, 20 + enum transcoder cpu_transcoder); 18 21 const char *intel_drrs_type_str(enum drrs_type drrs_type); 19 22 bool intel_drrs_is_active(struct intel_crtc *crtc); 20 23 void intel_drrs_activate(const struct intel_crtc_state *crtc_state);
+14
drivers/gpu/drm/i915/display/intel_dsb.c
··· 340 340 return max(0, vblank_start - intel_usecs_to_scanlines(adjusted_mode, latency)); 341 341 } 342 342 343 + static u32 dsb_chicken(struct intel_crtc *crtc) 344 + { 345 + if (crtc->mode_flags & I915_MODE_FLAG_VRR) 346 + return DSB_CTRL_WAIT_SAFE_WINDOW | 347 + DSB_CTRL_NO_WAIT_VBLANK | 348 + DSB_INST_WAIT_SAFE_WINDOW | 349 + DSB_INST_NO_WAIT_VBLANK; 350 + else 351 + return 0; 352 + } 353 + 343 354 static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl, 344 355 int dewake_scanline) 345 356 { ··· 371 360 372 361 intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id), 373 362 ctrl | DSB_ENABLE); 363 + 364 + intel_de_write_fw(dev_priv, DSB_CHICKEN(pipe, dsb->id), 365 + dsb_chicken(crtc)); 374 366 375 367 intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id), 376 368 intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
+10
drivers/gpu/drm/i915/display/intel_fb_pin.c
··· 255 255 return PTR_ERR(vma); 256 256 257 257 plane_state->ggtt_vma = vma; 258 + 259 + /* 260 + * Pre-populate the dma address before we enter the vblank 261 + * evade critical section as i915_gem_object_get_dma_address() 262 + * will trigger might_sleep() even if it won't actually sleep, 263 + * which is the case when the fb has already been pinned. 264 + */ 265 + if (phys_cursor) 266 + plane_state->phys_dma_addr = 267 + i915_gem_object_get_dma_address(intel_fb_obj(fb), 0); 258 268 } else { 259 269 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 260 270
-4
drivers/gpu/drm/i915/display/intel_sdvo.c
··· 1842 1842 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1843 1843 u32 temp; 1844 1844 1845 - encoder->audio_disable(encoder, old_crtc_state, conn_state); 1846 - 1847 1845 intel_sdvo_set_active_outputs(intel_sdvo, 0); 1848 1846 if (0) 1849 1847 intel_sdvo_set_encoder_power_state(intel_sdvo, ··· 1933 1935 intel_sdvo_set_encoder_power_state(intel_sdvo, 1934 1936 DRM_MODE_DPMS_ON); 1935 1937 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo_connector->output_flag); 1936 - 1937 - encoder->audio_enable(encoder, pipe_config, conn_state); 1938 1938 } 1939 1939 1940 1940 static enum drm_mode_status
+4 -3
drivers/gpu/drm/i915/display/intel_vrr.c
··· 187 187 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 188 188 189 189 /* 190 - * TRANS_SET_CONTEXT_LATENCY with VRR enabled 191 - * requires this chicken bit on ADL/DG2. 190 + * This bit seems to have two meanings depending on the platform: 191 + * TGL: generate VRR "safe window" for DSB vblank waits 192 + * ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR 192 193 */ 193 - if (DISPLAY_VER(dev_priv) == 13) 194 + if (IS_DISPLAY_VER(dev_priv, 12, 13)) 194 195 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 195 196 0, PIPE_VBLANK_WITH_DELAY); 196 197
+3
drivers/gpu/drm/i915/display/skl_universal_plane.c
··· 2295 2295 if (HAS_4TILE(i915)) 2296 2296 caps |= INTEL_PLANE_CAP_TILING_4; 2297 2297 2298 + if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915)) 2299 + return caps; 2300 + 2298 2301 if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) { 2299 2302 caps |= INTEL_PLANE_CAP_CCS_RC; 2300 2303 if (DISPLAY_VER(i915) >= 12)
-3
drivers/gpu/drm/i915/gt/intel_engine_pm.c
··· 279 279 intel_engine_park_heartbeat(engine); 280 280 intel_breadcrumbs_park(engine->breadcrumbs); 281 281 282 - /* Must be reset upon idling, or we may miss the busy wakeup. */ 283 - GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN); 284 - 285 282 if (engine->park) 286 283 engine->park(engine); 287 284
+3
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
··· 3272 3272 { 3273 3273 cancel_timer(&engine->execlists.timer); 3274 3274 cancel_timer(&engine->execlists.preempt); 3275 + 3276 + /* Reset upon idling, or we may delay the busy wakeup. */ 3277 + WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN); 3275 3278 } 3276 3279 3277 3280 static void add_to_engine(struct i915_request *rq)
+1
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 1653 1653 xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1654 1654 { 1655 1655 /* Wa_14018575942 / Wa_18018781329 */ 1656 + wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB); 1656 1657 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB); 1657 1658 1658 1659 /* Wa_22016670082 */
+1 -1
drivers/gpu/drm/i915/i915_driver.c
··· 800 800 goto out_cleanup_modeset2; 801 801 802 802 ret = intel_pxp_init(i915); 803 - if (ret != -ENODEV) 803 + if (ret && ret != -ENODEV) 804 804 drm_dbg(&i915->drm, "pxp init failed with %d\n", ret); 805 805 806 806 ret = intel_display_driver_probe(i915);
+19 -18
drivers/gpu/drm/i915/i915_hwmon.c
··· 72 72 struct intel_uncore *uncore = ddat->uncore; 73 73 intel_wakeref_t wakeref; 74 74 75 - mutex_lock(&hwmon->hwmon_lock); 75 + with_intel_runtime_pm(uncore->rpm, wakeref) { 76 + mutex_lock(&hwmon->hwmon_lock); 76 77 77 - with_intel_runtime_pm(uncore->rpm, wakeref) 78 78 intel_uncore_rmw(uncore, reg, clear, set); 79 79 80 - mutex_unlock(&hwmon->hwmon_lock); 80 + mutex_unlock(&hwmon->hwmon_lock); 81 + } 81 82 } 82 83 83 84 /* ··· 137 136 else 138 137 rgaddr = hwmon->rg.energy_status_all; 139 138 140 - mutex_lock(&hwmon->hwmon_lock); 139 + with_intel_runtime_pm(uncore->rpm, wakeref) { 140 + mutex_lock(&hwmon->hwmon_lock); 141 141 142 - with_intel_runtime_pm(uncore->rpm, wakeref) 143 142 reg_val = intel_uncore_read(uncore, rgaddr); 144 143 145 - if (reg_val >= ei->reg_val_prev) 146 - ei->accum_energy += reg_val - ei->reg_val_prev; 147 - else 148 - ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val; 149 - ei->reg_val_prev = reg_val; 144 + if (reg_val >= ei->reg_val_prev) 145 + ei->accum_energy += reg_val - ei->reg_val_prev; 146 + else 147 + ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val; 148 + ei->reg_val_prev = reg_val; 150 149 151 - *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY, 152 - hwmon->scl_shift_energy); 153 - mutex_unlock(&hwmon->hwmon_lock); 150 + *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY, 151 + hwmon->scl_shift_energy); 152 + mutex_unlock(&hwmon->hwmon_lock); 153 + } 154 154 } 155 155 156 156 static ssize_t ··· 406 404 407 405 /* Block waiting for GuC reset to complete when needed */ 408 406 for (;;) { 407 + wakeref = intel_runtime_pm_get(ddat->uncore->rpm); 409 408 mutex_lock(&hwmon->hwmon_lock); 410 409 411 410 prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE); ··· 420 417 } 421 418 422 419 mutex_unlock(&hwmon->hwmon_lock); 420 + intel_runtime_pm_put(ddat->uncore->rpm, wakeref); 423 421 424 422 schedule(); 425 423 } 426 424 finish_wait(&ddat->waitq, &wait); 427 425 if (ret) 428 - goto unlock; 429 - 430 - wakeref = intel_runtime_pm_get(ddat->uncore->rpm); 426 + goto exit; 431 427 432 428 /* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */ 433 429 if (val == PL1_DISABLE) { ··· 446 444 intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit, 447 445 PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval); 448 446 exit: 449 - intel_runtime_pm_put(ddat->uncore->rpm, wakeref); 450 - unlock: 451 447 mutex_unlock(&hwmon->hwmon_lock); 448 + intel_runtime_pm_put(ddat->uncore->rpm, wakeref); 452 449 return ret; 453 450 } 454 451
+1
drivers/gpu/drm/i915/i915_memcpy.c
··· 26 26 #include <linux/string.h> 27 27 #include <linux/cpufeature.h> 28 28 #include <linux/bug.h> 29 + #include <linux/build_bug.h> 29 30 #include <asm/fpu/api.h> 30 31 31 32 #include "i915_memcpy.h"
+1 -1
drivers/gpu/drm/i915/i915_reg.h
··· 4599 4599 #define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \ 4600 4600 _MTL_CHICKEN_TRANS_A, \ 4601 4601 _MTL_CHICKEN_TRANS_B) 4602 - #define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* ADL/DG2 */ 4602 + #define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */ 4603 4603 #define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */ 4604 4604 #define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) 4605 4605 #define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
+43 -7
drivers/gpu/drm/i915/i915_vma.c
··· 34 34 #include "gt/intel_engine.h" 35 35 #include "gt/intel_engine_heartbeat.h" 36 36 #include "gt/intel_gt.h" 37 + #include "gt/intel_gt_pm.h" 37 38 #include "gt/intel_gt_requests.h" 38 39 #include "gt/intel_tlb.h" 39 40 ··· 104 103 105 104 static int __i915_vma_active(struct i915_active *ref) 106 105 { 107 - return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; 106 + struct i915_vma *vma = active_to_vma(ref); 107 + 108 + if (!i915_vma_tryget(vma)) 109 + return -ENOENT; 110 + 111 + /* 112 + * Exclude global GTT VMA from holding a GT wakeref 113 + * while active, otherwise GPU never goes idle. 114 + */ 115 + if (!i915_vma_is_ggtt(vma)) { 116 + /* 117 + * Since we and our _retire() counterpart can be 118 + * called asynchronously, storing a wakeref tracking 119 + * handle inside struct i915_vma is not safe, and 120 + * there is no other good place for that. Hence, 121 + * use untracked variants of intel_gt_pm_get/put(). 122 + */ 123 + intel_gt_pm_get_untracked(vma->vm->gt); 124 + } 125 + 126 + return 0; 108 127 } 109 128 110 129 static void __i915_vma_retire(struct i915_active *ref) 111 130 { 112 - i915_vma_put(active_to_vma(ref)); 131 + struct i915_vma *vma = active_to_vma(ref); 132 + 133 + if (!i915_vma_is_ggtt(vma)) { 134 + /* 135 + * Since we can be called from atomic contexts, 136 + * use an async variant of intel_gt_pm_put(). 137 + */ 138 + intel_gt_pm_put_async_untracked(vma->vm->gt); 139 + } 140 + 141 + i915_vma_put(vma); 113 142 } 114 143 115 144 static struct i915_vma * ··· 1435 1404 struct i915_vma_work *work = NULL; 1436 1405 struct dma_fence *moving = NULL; 1437 1406 struct i915_vma_resource *vma_res = NULL; 1438 - intel_wakeref_t wakeref = 0; 1407 + intel_wakeref_t wakeref; 1439 1408 unsigned int bound; 1440 1409 int err; 1441 1410 ··· 1455 1424 if (err) 1456 1425 return err; 1457 1426 1458 - if (flags & PIN_GLOBAL) 1459 - wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); 1427 + /* 1428 + * In case of a global GTT, we must hold a runtime-pm wakeref 1429 + * while global PTEs are updated. In other cases, we hold 1430 + * the rpm reference while the VMA is active. Since runtime 1431 + * resume may require allocations, which are forbidden inside 1432 + * vm->mutex, get the first rpm wakeref outside of the mutex. 1433 + */ 1434 + wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); 1460 1435 1461 1436 if (flags & vma->vm->bind_async_flags) { 1462 1437 /* lock VM */ ··· 1598 1561 if (work) 1599 1562 dma_fence_work_commit_imm(&work->base); 1600 1563 err_rpm: 1601 - if (wakeref) 1602 - intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); 1564 + intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); 1603 1565 1604 1566 if (moving) 1605 1567 dma_fence_put(moving);