Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Move rps.hw_lock to dev_priv and s/hw_lock/pcu_lock

In order to separate GT PM related functionality into new structure
we are updating rps structure. hw_lock in it is used for display
related PCU communication too hence move it to dev_priv.

Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1507360055-19948-8-git-send-email-sagar.a.kamble@intel.com
Acked-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171010213010.7415-7-chris@chris-wilson.co.uk

authored by

Sagar Arun Kamble and committed by
Chris Wilson
9f817501 ad1443f0

+105 -105
+12 -12
drivers/gpu/drm/i915/i915_debugfs.c
··· 1097 1097 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1098 1098 u32 rpmodectl, freq_sts; 1099 1099 1100 - mutex_lock(&dev_priv->rps.hw_lock); 1100 + mutex_lock(&dev_priv->pcu_lock); 1101 1101 1102 1102 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1103 1103 seq_printf(m, "Video Turbo Mode: %s\n", ··· 1130 1130 seq_printf(m, 1131 1131 "efficient (RPe) frequency: %d MHz\n", 1132 1132 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1133 - mutex_unlock(&dev_priv->rps.hw_lock); 1133 + mutex_unlock(&dev_priv->pcu_lock); 1134 1134 } else if (INTEL_GEN(dev_priv) >= 6) { 1135 1135 u32 rp_state_limits; 1136 1136 u32 gt_perf_status; ··· 1565 1565 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); 1566 1566 } 1567 1567 1568 - mutex_lock(&dev_priv->rps.hw_lock); 1568 + mutex_lock(&dev_priv->pcu_lock); 1569 1569 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1570 - mutex_unlock(&dev_priv->rps.hw_lock); 1570 + mutex_unlock(&dev_priv->pcu_lock); 1571 1571 1572 1572 seq_printf(m, "RC1e Enabled: %s\n", 1573 1573 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); ··· 1842 1842 1843 1843 intel_runtime_pm_get(dev_priv); 1844 1844 1845 - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1845 + ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 1846 1846 if (ret) 1847 1847 goto out; 1848 1848 ··· 1873 1873 ((ia_freq >> 8) & 0xff) * 100); 1874 1874 } 1875 1875 1876 - mutex_unlock(&dev_priv->rps.hw_lock); 1876 + mutex_unlock(&dev_priv->pcu_lock); 1877 1877 1878 1878 out: 1879 1879 intel_runtime_pm_put(dev_priv); ··· 4320 4320 4321 4321 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 4322 4322 4323 - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4323 + ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 4324 4324 if (ret) 4325 4325 return ret; 4326 4326 ··· 4333 4333 hw_min = dev_priv->rps.min_freq; 4334 4334 4335 4335 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 4336 - mutex_unlock(&dev_priv->rps.hw_lock); 4336 + mutex_unlock(&dev_priv->pcu_lock); 4337 4337 return -EINVAL; 4338 4338 } 4339 4339 ··· 4342 4342 if (intel_set_rps(dev_priv, val)) 4343 4343 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4344 4344 4345 - mutex_unlock(&dev_priv->rps.hw_lock); 4345 + mutex_unlock(&dev_priv->pcu_lock); 4346 4346 4347 4347 return 0; 4348 4348 } ··· 4375 4375 4376 4376 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 4377 4377 4378 - ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4378 + ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 4379 4379 if (ret) 4380 4380 return ret; 4381 4381 ··· 4389 4389 4390 4390 if (val < hw_min || 4391 4391 val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 4392 - mutex_unlock(&dev_priv->rps.hw_lock); 4392 + mutex_unlock(&dev_priv->pcu_lock); 4393 4393 return -EINVAL; 4394 4394 } 4395 4395 ··· 4398 4398 if (intel_set_rps(dev_priv, val)) 4399 4399 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4400 4400 4401 - mutex_unlock(&dev_priv->rps.hw_lock); 4401 + mutex_unlock(&dev_priv->pcu_lock); 4402 4402 4403 4403 return 0; 4404 4404 }
+8 -8
drivers/gpu/drm/i915/i915_drv.h
··· 1364 1364 1365 1365 /* manual wa residency calculations */ 1366 1366 struct intel_rps_ei ei; 1367 - 1368 - /* 1369 - * Protects RPS/RC6 register access and PCU communication. 1370 - * Must be taken after struct_mutex if nested. Note that 1371 - * this lock may be held for long periods of time when 1372 - * talking to hw - so only take it when talking to hw! 1373 - */ 1374 - struct mutex hw_lock; 1375 1367 }; 1376 1368 1377 1369 /* defined intel_pm.c */ ··· 2412 2420 2413 2421 /* Cannot be determined by PCIID. You must always read a register. */ 2414 2422 u32 edram_cap; 2423 + 2424 + /* 2425 + * Protects RPS/RC6 register access and PCU communication. 2426 + * Must be taken after struct_mutex if nested. Note that 2427 + * this lock may be held for long periods of time when 2428 + * talking to hw - so only take it when talking to hw! 2429 + */ 2430 + struct mutex pcu_lock; 2415 2431 2416 2432 /* gen6+ rps state */ 2417 2433 struct intel_gen6_power_mgmt rps;
+2 -2
drivers/gpu/drm/i915/i915_irq.c
··· 1181 1181 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1182 1182 goto out; 1183 1183 1184 - mutex_lock(&dev_priv->rps.hw_lock); 1184 + mutex_lock(&dev_priv->pcu_lock); 1185 1185 1186 1186 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1187 1187 ··· 1235 1235 dev_priv->rps.last_adj = 0; 1236 1236 } 1237 1237 1238 - mutex_unlock(&dev_priv->rps.hw_lock); 1238 + mutex_unlock(&dev_priv->pcu_lock); 1239 1239 1240 1240 out: 1241 1241 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
+10 -10
drivers/gpu/drm/i915/i915_sysfs.c
··· 246 246 247 247 intel_runtime_pm_get(dev_priv); 248 248 249 - mutex_lock(&dev_priv->rps.hw_lock); 249 + mutex_lock(&dev_priv->pcu_lock); 250 250 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 251 251 u32 freq; 252 252 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); ··· 261 261 ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 262 262 ret = intel_gpu_freq(dev_priv, ret); 263 263 } 264 - mutex_unlock(&dev_priv->rps.hw_lock); 264 + mutex_unlock(&dev_priv->pcu_lock); 265 265 266 266 intel_runtime_pm_put(dev_priv); 267 267 ··· 304 304 if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq) 305 305 return -EINVAL; 306 306 307 - mutex_lock(&dev_priv->rps.hw_lock); 307 + mutex_lock(&dev_priv->pcu_lock); 308 308 dev_priv->rps.boost_freq = val; 309 - mutex_unlock(&dev_priv->rps.hw_lock); 309 + mutex_unlock(&dev_priv->pcu_lock); 310 310 311 311 return count; 312 312 } ··· 344 344 345 345 intel_runtime_pm_get(dev_priv); 346 346 347 - mutex_lock(&dev_priv->rps.hw_lock); 347 + mutex_lock(&dev_priv->pcu_lock); 348 348 349 349 val = intel_freq_opcode(dev_priv, val); 350 350 351 351 if (val < dev_priv->rps.min_freq || 352 352 val > dev_priv->rps.max_freq || 353 353 val < dev_priv->rps.min_freq_softlimit) { 354 - mutex_unlock(&dev_priv->rps.hw_lock); 354 + mutex_unlock(&dev_priv->pcu_lock); 355 355 intel_runtime_pm_put(dev_priv); 356 356 return -EINVAL; 357 357 } ··· 371 371 * frequency request may be unchanged. */ 372 372 ret = intel_set_rps(dev_priv, val); 373 373 374 - mutex_unlock(&dev_priv->rps.hw_lock); 374 + mutex_unlock(&dev_priv->pcu_lock); 375 375 376 376 intel_runtime_pm_put(dev_priv); 377 377 ··· 401 401 402 402 intel_runtime_pm_get(dev_priv); 403 403 404 - mutex_lock(&dev_priv->rps.hw_lock); 404 + mutex_lock(&dev_priv->pcu_lock); 405 405 406 406 val = intel_freq_opcode(dev_priv, val); 407 407 408 408 if (val < dev_priv->rps.min_freq || 409 409 val > dev_priv->rps.max_freq || 410 410 val > dev_priv->rps.max_freq_softlimit) { 411 - mutex_unlock(&dev_priv->rps.hw_lock); 411 + mutex_unlock(&dev_priv->pcu_lock); 412 412 intel_runtime_pm_put(dev_priv); 413 413 return -EINVAL; 414 414 } ··· 424 424 * frequency request may be unchanged. */ 425 425 ret = intel_set_rps(dev_priv, val); 426 426 427 - mutex_unlock(&dev_priv->rps.hw_lock); 427 + mutex_unlock(&dev_priv->pcu_lock); 428 428 429 429 intel_runtime_pm_put(dev_priv); 430 430
+20 -20
drivers/gpu/drm/i915/intel_cdclk.c
··· 503 503 else 504 504 cmd = 0; 505 505 506 - mutex_lock(&dev_priv->rps.hw_lock); 506 + mutex_lock(&dev_priv->pcu_lock); 507 507 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 508 508 val &= ~DSPFREQGUAR_MASK; 509 509 val |= (cmd << DSPFREQGUAR_SHIFT); ··· 513 513 50)) { 514 514 DRM_ERROR("timed out waiting for CDclk change\n"); 515 515 } 516 - mutex_unlock(&dev_priv->rps.hw_lock); 516 + mutex_unlock(&dev_priv->pcu_lock); 517 517 518 518 mutex_lock(&dev_priv->sb_lock); 519 519 ··· 590 590 */ 591 591 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 592 592 593 - mutex_lock(&dev_priv->rps.hw_lock); 593 + mutex_lock(&dev_priv->pcu_lock); 594 594 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 595 595 val &= ~DSPFREQGUAR_MASK_CHV; 596 596 val |= (cmd << DSPFREQGUAR_SHIFT_CHV); ··· 600 600 50)) { 601 601 DRM_ERROR("timed out waiting for CDclk change\n"); 602 602 } 603 - mutex_unlock(&dev_priv->rps.hw_lock); 603 + mutex_unlock(&dev_priv->pcu_lock); 604 604 605 605 intel_update_cdclk(dev_priv); 606 606 ··· 656 656 "trying to change cdclk frequency with cdclk not enabled\n")) 657 657 return; 658 658 659 - mutex_lock(&dev_priv->rps.hw_lock); 659 + mutex_lock(&dev_priv->pcu_lock); 660 660 ret = sandybridge_pcode_write(dev_priv, 661 661 BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); 662 - mutex_unlock(&dev_priv->rps.hw_lock); 662 + mutex_unlock(&dev_priv->pcu_lock); 663 663 if (ret) { 664 664 DRM_ERROR("failed to inform pcode about cdclk change\n"); 665 665 return; ··· 712 712 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 713 713 DRM_ERROR("Switching back to LCPLL failed\n"); 714 714 715 - mutex_lock(&dev_priv->rps.hw_lock); 715 + mutex_lock(&dev_priv->pcu_lock); 716 716 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data); 717 - mutex_unlock(&dev_priv->rps.hw_lock); 717 + mutex_unlock(&dev_priv->pcu_lock); 718 718 719 719 I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); 720 720 ··· 928 928 929 929 WARN_ON((cdclk == 24000) != (vco == 0)); 930 930 931 - mutex_lock(&dev_priv->rps.hw_lock); 931 + mutex_lock(&dev_priv->pcu_lock); 932 932 ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, 933 933 SKL_CDCLK_PREPARE_FOR_CHANGE, 934 934 SKL_CDCLK_READY_FOR_CHANGE, 935 935 SKL_CDCLK_READY_FOR_CHANGE, 3); 936 - mutex_unlock(&dev_priv->rps.hw_lock); 936 + mutex_unlock(&dev_priv->pcu_lock); 937 937 if (ret) { 938 938 DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", 939 939 ret); ··· 975 975 POSTING_READ(CDCLK_CTL); 976 976 977 977 /* inform PCU of the change */ 978 - mutex_lock(&dev_priv->rps.hw_lock); 978 + mutex_lock(&dev_priv->pcu_lock); 979 979 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); 980 - mutex_unlock(&dev_priv->rps.hw_lock); 980 + mutex_unlock(&dev_priv->pcu_lock); 981 981 982 982 intel_update_cdclk(dev_priv); 983 983 } ··· 1268 1268 } 1269 1269 1270 1270 /* Inform power controller of upcoming frequency change */ 1271 - mutex_lock(&dev_priv->rps.hw_lock); 1271 + mutex_lock(&dev_priv->pcu_lock); 1272 1272 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 1273 1273 0x80000000); 1274 - mutex_unlock(&dev_priv->rps.hw_lock); 1274 + mutex_unlock(&dev_priv->pcu_lock); 1275 1275 1276 1276 if (ret) { 1277 1277 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", ··· 1300 1300 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 1301 1301 I915_WRITE(CDCLK_CTL, val); 1302 1302 1303 - mutex_lock(&dev_priv->rps.hw_lock); 1303 + mutex_lock(&dev_priv->pcu_lock); 1304 1304 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 1305 1305 DIV_ROUND_UP(cdclk, 25000)); 1306 - mutex_unlock(&dev_priv->rps.hw_lock); 1306 + mutex_unlock(&dev_priv->pcu_lock); 1307 1307 1308 1308 if (ret) { 1309 1309 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", ··· 1518 1518 u32 val, divider, pcu_ack; 1519 1519 int ret; 1520 1520 1521 - mutex_lock(&dev_priv->rps.hw_lock); 1521 + mutex_lock(&dev_priv->pcu_lock); 1522 1522 ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, 1523 1523 SKL_CDCLK_PREPARE_FOR_CHANGE, 1524 1524 SKL_CDCLK_READY_FOR_CHANGE, 1525 1525 SKL_CDCLK_READY_FOR_CHANGE, 3); 1526 - mutex_unlock(&dev_priv->rps.hw_lock); 1526 + mutex_unlock(&dev_priv->pcu_lock); 1527 1527 if (ret) { 1528 1528 DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", 1529 1529 ret); ··· 1575 1575 I915_WRITE(CDCLK_CTL, val); 1576 1576 1577 1577 /* inform PCU of the change */ 1578 - mutex_lock(&dev_priv->rps.hw_lock); 1578 + mutex_lock(&dev_priv->pcu_lock); 1579 1579 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); 1580 - mutex_unlock(&dev_priv->rps.hw_lock); 1580 + mutex_unlock(&dev_priv->pcu_lock); 1581 1581 1582 1582 intel_update_cdclk(dev_priv); 1583 1583 }
+6 -6
drivers/gpu/drm/i915/intel_display.c
··· 4946 4946 4947 4947 assert_plane_enabled(dev_priv, crtc->plane); 4948 4948 if (IS_BROADWELL(dev_priv)) { 4949 - mutex_lock(&dev_priv->rps.hw_lock); 4949 + mutex_lock(&dev_priv->pcu_lock); 4950 4950 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 4951 4951 IPS_ENABLE | IPS_PCODE_CONTROL)); 4952 - mutex_unlock(&dev_priv->rps.hw_lock); 4952 + mutex_unlock(&dev_priv->pcu_lock); 4953 4953 /* Quoting Art Runyan: "its not safe to expect any particular 4954 4954 * value in IPS_CTL bit 31 after enabling IPS through the 4955 4955 * mailbox." Moreover, the mailbox may return a bogus state, ··· 4979 4979 4980 4980 assert_plane_enabled(dev_priv, crtc->plane); 4981 4981 if (IS_BROADWELL(dev_priv)) { 4982 - mutex_lock(&dev_priv->rps.hw_lock); 4982 + mutex_lock(&dev_priv->pcu_lock); 4983 4983 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4984 - mutex_unlock(&dev_priv->rps.hw_lock); 4984 + mutex_unlock(&dev_priv->pcu_lock); 4985 4985 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4986 4986 if (intel_wait_for_register(dev_priv, 4987 4987 IPS_CTL, IPS_ENABLE, 0, ··· 8839 8839 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 8840 8840 { 8841 8841 if (IS_HASWELL(dev_priv)) { 8842 - mutex_lock(&dev_priv->rps.hw_lock); 8842 + mutex_lock(&dev_priv->pcu_lock); 8843 8843 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 8844 8844 val)) 8845 8845 DRM_DEBUG_KMS("Failed to write to D_COMP\n"); 8846 - mutex_unlock(&dev_priv->rps.hw_lock); 8846 + mutex_unlock(&dev_priv->pcu_lock); 8847 8847 } else { 8848 8848 I915_WRITE(D_COMP_BDW, val); 8849 8849 POSTING_READ(D_COMP_BDW);
+36 -36
drivers/gpu/drm/i915/intel_pm.c
··· 322 322 { 323 323 u32 val; 324 324 325 - mutex_lock(&dev_priv->rps.hw_lock); 325 + mutex_lock(&dev_priv->pcu_lock); 326 326 327 327 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 328 328 if (enable) ··· 337 337 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) 338 338 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); 339 339 340 - mutex_unlock(&dev_priv->rps.hw_lock); 340 + mutex_unlock(&dev_priv->pcu_lock); 341 341 } 342 342 343 343 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) 344 344 { 345 345 u32 val; 346 346 347 - mutex_lock(&dev_priv->rps.hw_lock); 347 + mutex_lock(&dev_priv->pcu_lock); 348 348 349 349 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 350 350 if (enable) ··· 353 353 val &= ~DSP_MAXFIFO_PM5_ENABLE; 354 354 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 355 355 356 - mutex_unlock(&dev_priv->rps.hw_lock); 356 + mutex_unlock(&dev_priv->pcu_lock); 357 357 } 358 358 359 359 #define FW_WM(value, plane) \ ··· 2790 2790 2791 2791 /* read the first set of memory latencies[0:3] */ 2792 2792 val = 0; /* data0 to be programmed to 0 for first set */ 2793 - mutex_lock(&dev_priv->rps.hw_lock); 2793 + mutex_lock(&dev_priv->pcu_lock); 2794 2794 ret = sandybridge_pcode_read(dev_priv, 2795 2795 GEN9_PCODE_READ_MEM_LATENCY, 2796 2796 &val); 2797 - mutex_unlock(&dev_priv->rps.hw_lock); 2797 + mutex_unlock(&dev_priv->pcu_lock); 2798 2798 2799 2799 if (ret) { 2800 2800 DRM_ERROR("SKL Mailbox read error = %d\n", ret); ··· 2811 2811 2812 2812 /* read the second set of memory latencies[4:7] */ 2813 2813 val = 1; /* data0 to be programmed to 1 for second set */ 2814 - mutex_lock(&dev_priv->rps.hw_lock); 2814 + mutex_lock(&dev_priv->pcu_lock); 2815 2815 ret = sandybridge_pcode_read(dev_priv, 2816 2816 GEN9_PCODE_READ_MEM_LATENCY, 2817 2817 &val); 2818 - mutex_unlock(&dev_priv->rps.hw_lock); 2818 + mutex_unlock(&dev_priv->pcu_lock); 2819 2819 if (ret) { 2820 2820 DRM_ERROR("SKL Mailbox read error = %d\n", ret); 2821 2821 return; ··· 3608 3608 return 0; 3609 3609 3610 3610 DRM_DEBUG_KMS("Enabling the SAGV\n"); 3611 - mutex_lock(&dev_priv->rps.hw_lock); 3611 + mutex_lock(&dev_priv->pcu_lock); 3612 3612 3613 3613 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, 3614 3614 GEN9_SAGV_ENABLE); 3615 3615 3616 3616 /* We don't need to wait for the SAGV when enabling */ 3617 - mutex_unlock(&dev_priv->rps.hw_lock); 3617 + mutex_unlock(&dev_priv->pcu_lock); 3618 3618 3619 3619 /* 3620 3620 * Some skl systems, pre-release machines in particular, ··· 3645 3645 return 0; 3646 3646 3647 3647 DRM_DEBUG_KMS("Disabling the SAGV\n"); 3648 - mutex_lock(&dev_priv->rps.hw_lock); 3648 + mutex_lock(&dev_priv->pcu_lock); 3649 3649 3650 3650 /* bspec says to keep retrying for at least 1 ms */ 3651 3651 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, 3652 3652 GEN9_SAGV_DISABLE, 3653 3653 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 3654 3654 1); 3655 - mutex_unlock(&dev_priv->rps.hw_lock); 3655 + mutex_unlock(&dev_priv->pcu_lock); 3656 3656 3657 3657 /* 3658 3658 * Some skl systems, pre-release machines in particular, ··· 5621 5621 wm->level = VLV_WM_LEVEL_PM2; 5622 5622 5623 5623 if (IS_CHERRYVIEW(dev_priv)) { 5624 - mutex_lock(&dev_priv->rps.hw_lock); 5624 + mutex_lock(&dev_priv->pcu_lock); 5625 5625 5626 5626 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5627 5627 if (val & DSP_MAXFIFO_PM5_ENABLE) ··· 5651 5651 wm->level = VLV_WM_LEVEL_DDR_DVFS; 5652 5652 } 5653 5653 5654 - mutex_unlock(&dev_priv->rps.hw_lock); 5654 + mutex_unlock(&dev_priv->pcu_lock); 5655 5655 } 5656 5656 5657 5657 for_each_intel_crtc(dev, crtc) { ··· 6224 6224 6225 6225 void gen6_rps_busy(struct drm_i915_private *dev_priv) 6226 6226 { 6227 - mutex_lock(&dev_priv->rps.hw_lock); 6227 + mutex_lock(&dev_priv->pcu_lock); 6228 6228 if (dev_priv->rps.enabled) { 6229 6229 u8 freq; 6230 6230 ··· 6247 6247 dev_priv->rps.max_freq_softlimit))) 6248 6248 DRM_DEBUG_DRIVER("Failed to set idle frequency\n"); 6249 6249 } 6250 - mutex_unlock(&dev_priv->rps.hw_lock); 6250 + mutex_unlock(&dev_priv->pcu_lock); 6251 6251 } 6252 6252 6253 6253 void gen6_rps_idle(struct drm_i915_private *dev_priv) ··· 6259 6259 */ 6260 6260 gen6_disable_rps_interrupts(dev_priv); 6261 6261 6262 - mutex_lock(&dev_priv->rps.hw_lock); 6262 + mutex_lock(&dev_priv->pcu_lock); 6263 6263 if (dev_priv->rps.enabled) { 6264 6264 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6265 6265 vlv_set_rps_idle(dev_priv); ··· 6269 6269 I915_WRITE(GEN6_PMINTRMSK, 6270 6270 gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 6271 6271 } 6272 - mutex_unlock(&dev_priv->rps.hw_lock); 6272 + mutex_unlock(&dev_priv->pcu_lock); 6273 6273 } 6274 6274 6275 6275 void gen6_rps_boost(struct drm_i915_gem_request *rq, ··· 6306 6306 { 6307 6307 int err; 6308 6308 6309 - lockdep_assert_held(&dev_priv->rps.hw_lock); 6309 + lockdep_assert_held(&dev_priv->pcu_lock); 6310 6310 GEM_BUG_ON(val > dev_priv->rps.max_freq); 6311 6311 GEM_BUG_ON(val < dev_priv->rps.min_freq); 6312 6312 ··· 6715 6715 int rc6_mode; 6716 6716 int ret; 6717 6717 6718 - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 6718 + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); 6719 6719 6720 6720 I915_WRITE(GEN6_RC_STATE, 0); 6721 6721 ··· 6789 6789 6790 6790 static void gen6_enable_rps(struct drm_i915_private *dev_priv) 6791 6791 { 6792 - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 6792 + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); 6793 6793 6794 6794 /* Here begins a magic sequence of register writes to enable 6795 6795 * auto-downclocking. ··· 6817 6817 int scaling_factor = 180; 6818 6818 struct cpufreq_policy *policy; 6819 6819 6820 - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 6820 + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); 6821 6821 6822 6822 policy = cpufreq_cpu_get(0); 6823 6823 if (policy) { ··· 7210 7210 enum intel_engine_id id; 7211 7211 u32 gtfifodbg, rc6_mode = 0, pcbr; 7212 7212 7213 - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7213 + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); 7214 7214 7215 7215 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV | 7216 7216 GT_FIFO_FREE_ENTRIES_CHV); ··· 7264 7264 { 7265 7265 u32 val; 7266 7266 7267 - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7267 + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); 7268 7268 7269 7269 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 7270 7270 ··· 7310 7310 enum intel_engine_id id; 7311 7311 u32 gtfifodbg, rc6_mode = 0; 7312 7312 7313 - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7313 + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); 7314 7314 7315 7315 valleyview_check_pctx(dev_priv); 7316 7316 ··· 7357 7357 { 7358 7358 u32 val; 7359 7359 7360 - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7360 + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); 7361 7361 7362 7362 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 7363 7363 ··· 7881 7881 } 7882 7882 7883 7883 mutex_lock(&dev_priv->drm.struct_mutex); 7884 - mutex_lock(&dev_priv->rps.hw_lock); 7884 + mutex_lock(&dev_priv->pcu_lock); 7885 7885 7886 7886 /* Initialize RPS limits (for userspace) */ 7887 7887 if (IS_CHERRYVIEW(dev_priv)) ··· 7921 7921 /* Finally allow us to boost to max by default */ 7922 7922 dev_priv->rps.boost_freq = dev_priv->rps.max_freq; 7923 7923 7924 - mutex_unlock(&dev_priv->rps.hw_lock); 7924 + mutex_unlock(&dev_priv->pcu_lock); 7925 7925 mutex_unlock(&dev_priv->drm.struct_mutex); 7926 7926 7927 7927 intel_autoenable_gt_powersave(dev_priv); ··· 7968 7968 if (!READ_ONCE(dev_priv->rps.enabled)) 7969 7969 return; 7970 7970 7971 - mutex_lock(&dev_priv->rps.hw_lock); 7971 + mutex_lock(&dev_priv->pcu_lock); 7972 7972 7973 7973 if (INTEL_GEN(dev_priv) >= 9) { 7974 7974 gen9_disable_rc6(dev_priv); ··· 7987 7987 } 7988 7988 7989 7989 dev_priv->rps.enabled = false; 7990 - mutex_unlock(&dev_priv->rps.hw_lock); 7990 + mutex_unlock(&dev_priv->pcu_lock); 7991 7991 } 7992 7992 7993 7993 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) ··· 8002 8002 if (intel_vgpu_active(dev_priv)) 8003 8003 return; 8004 8004 8005 - mutex_lock(&dev_priv->rps.hw_lock); 8005 + mutex_lock(&dev_priv->pcu_lock); 8006 8006 8007 8007 if (IS_CHERRYVIEW(dev_priv)) { 8008 8008 cherryview_enable_rc6(dev_priv); ··· 8035 8035 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); 8036 8036 8037 8037 dev_priv->rps.enabled = true; 8038 - mutex_unlock(&dev_priv->rps.hw_lock); 8038 + mutex_unlock(&dev_priv->pcu_lock); 8039 8039 } 8040 8040 8041 8041 static void __intel_autoenable_gt_powersave(struct work_struct *work) ··· 9123 9123 { 9124 9124 int status; 9125 9125 9126 - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 9126 + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); 9127 9127 9128 9128 /* GEN6_PCODE_* are outside of the forcewake domain, we can 9129 9129 * use te fw I915_READ variants to reduce the amount of work ··· 9170 9170 { 9171 9171 int status; 9172 9172 9173 - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 9173 + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); 9174 9174 9175 9175 /* GEN6_PCODE_* are outside of the forcewake domain, we can 9176 9176 * use te fw I915_READ variants to reduce the amount of work ··· 9247 9247 u32 status; 9248 9248 int ret; 9249 9249 9250 - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 9250 + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); 9251 9251 9252 9252 #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \ 9253 9253 &status) ··· 9344 9344 9345 9345 void intel_pm_setup(struct drm_i915_private *dev_priv) 9346 9346 { 9347 - mutex_init(&dev_priv->rps.hw_lock); 9347 + mutex_init(&dev_priv->pcu_lock); 9348 9348 9349 9349 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work, 9350 9350 __intel_autoenable_gt_powersave);
+8 -8
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 785 785 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : 786 786 PUNIT_PWRGT_PWR_GATE(power_well_id); 787 787 788 - mutex_lock(&dev_priv->rps.hw_lock); 788 + mutex_lock(&dev_priv->pcu_lock); 789 789 790 790 #define COND \ 791 791 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) ··· 806 806 #undef COND 807 807 808 808 out: 809 - mutex_unlock(&dev_priv->rps.hw_lock); 809 + mutex_unlock(&dev_priv->pcu_lock); 810 810 } 811 811 812 812 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, ··· 833 833 mask = PUNIT_PWRGT_MASK(power_well_id); 834 834 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); 835 835 836 - mutex_lock(&dev_priv->rps.hw_lock); 836 + mutex_lock(&dev_priv->pcu_lock); 837 837 838 838 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 839 839 /* ··· 852 852 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 853 853 WARN_ON(ctrl != state); 854 854 855 - mutex_unlock(&dev_priv->rps.hw_lock); 855 + mutex_unlock(&dev_priv->pcu_lock); 856 856 857 857 return enabled; 858 858 } ··· 1364 1364 bool enabled; 1365 1365 u32 state, ctrl; 1366 1366 1367 - mutex_lock(&dev_priv->rps.hw_lock); 1367 + mutex_lock(&dev_priv->pcu_lock); 1368 1368 1369 1369 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); 1370 1370 /* ··· 1381 1381 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); 1382 1382 WARN_ON(ctrl << 16 != state); 1383 1383 1384 - mutex_unlock(&dev_priv->rps.hw_lock); 1384 + mutex_unlock(&dev_priv->pcu_lock); 1385 1385 1386 1386 return enabled; 1387 1387 } ··· 1396 1396 1397 1397 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1398 1398 1399 - mutex_lock(&dev_priv->rps.hw_lock); 1399 + mutex_lock(&dev_priv->pcu_lock); 1400 1400 1401 1401 #define COND \ 1402 1402 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) ··· 1417 1417 #undef COND 1418 1418 1419 1419 out: 1420 - mutex_unlock(&dev_priv->rps.hw_lock); 1420 + mutex_unlock(&dev_priv->pcu_lock); 1421 1421 } 1422 1422 1423 1423 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
+3 -3
drivers/gpu/drm/i915/intel_sideband.c
··· 81 81 { 82 82 u32 val = 0; 83 83 84 - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 84 + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); 85 85 86 86 mutex_lock(&dev_priv->sb_lock); 87 87 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, ··· 95 95 { 96 96 int err; 97 97 98 - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 98 + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); 99 99 100 100 mutex_lock(&dev_priv->sb_lock); 101 101 err = vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, ··· 125 125 { 126 126 u32 val = 0; 127 127 128 - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 128 + WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); 129 129 130 130 mutex_lock(&dev_priv->sb_lock); 131 131 vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,