at v4.13 9230 lines 268 kB view raw
1/* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * 26 */ 27 28#include <linux/cpufreq.h> 29#include <drm/drm_plane_helper.h> 30#include "i915_drv.h" 31#include "intel_drv.h" 32#include "../../../platform/x86/intel_ips.h" 33#include <linux/module.h> 34#include <drm/drm_atomic_helper.h> 35 36/** 37 * DOC: RC6 38 * 39 * RC6 is a special power stage which allows the GPU to enter an very 40 * low-voltage mode when idle, using down to 0V while at this stage. This 41 * stage is entered automatically when the GPU is idle when RC6 support is 42 * enabled, and as soon as new workload arises GPU wakes up automatically as well. 43 * 44 * There are different RC6 modes available in Intel GPU, which differentiate 45 * among each other with the latency required to enter and leave RC6 and 46 * voltage consumed by the GPU in different states. 47 * 48 * The combination of the following flags define which states GPU is allowed 49 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and 50 * RC6pp is deepest RC6. Their support by hardware varies according to the 51 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one 52 * which brings the most power savings; deeper states save more power, but 53 * require higher latency to switch to and wake up. 54 */ 55#define INTEL_RC6_ENABLE (1<<0) 56#define INTEL_RC6p_ENABLE (1<<1) 57#define INTEL_RC6pp_ENABLE (1<<2) 58 59static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) 60{ 61 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ 62 I915_WRITE(CHICKEN_PAR1_1, 63 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); 64 65 I915_WRITE(GEN8_CONFIG0, 66 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES); 67 68 /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */ 69 I915_WRITE(GEN8_CHICKEN_DCPR_1, 70 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); 71 72 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */ 73 /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */ 74 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 75 DISP_FBC_WM_DIS | 76 DISP_FBC_MEMORY_WAKE); 77 78 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */ 79 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 80 ILK_DPFC_DISABLE_DUMMY0); 81} 82 83static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) 84{ 85 gen9_init_clock_gating(dev_priv); 86 87 /* WaDisableSDEUnitClockGating:bxt */ 88 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 89 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 90 91 /* 92 * FIXME: 93 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. 94 */ 95 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 96 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); 97 98 /* 99 * Wa: Backlight PWM may stop in the asserted state, causing backlight 100 * to stay fully on. 101 */ 102 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | 103 PWM1_GATING_DIS | PWM2_GATING_DIS); 104} 105 106static void glk_init_clock_gating(struct drm_i915_private *dev_priv) 107{ 108 gen9_init_clock_gating(dev_priv); 109 110 /* 111 * WaDisablePWMClockGating:glk 112 * Backlight PWM may stop in the asserted state, causing backlight 113 * to stay fully on. 114 */ 115 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | 116 PWM1_GATING_DIS | PWM2_GATING_DIS); 117 118 /* WaDDIIOTimeout:glk */ 119 if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) { 120 u32 val = I915_READ(CHICKEN_MISC_2); 121 val &= ~(GLK_CL0_PWR_DOWN | 122 GLK_CL1_PWR_DOWN | 123 GLK_CL2_PWR_DOWN); 124 I915_WRITE(CHICKEN_MISC_2, val); 125 } 126 127} 128 129static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv) 130{ 131 u32 tmp; 132 133 tmp = I915_READ(CLKCFG); 134 135 switch (tmp & CLKCFG_FSB_MASK) { 136 case CLKCFG_FSB_533: 137 dev_priv->fsb_freq = 533; /* 133*4 */ 138 break; 139 case CLKCFG_FSB_800: 140 dev_priv->fsb_freq = 800; /* 200*4 */ 141 break; 142 case CLKCFG_FSB_667: 143 dev_priv->fsb_freq = 667; /* 167*4 */ 144 break; 145 case CLKCFG_FSB_400: 146 dev_priv->fsb_freq = 400; /* 100*4 */ 147 break; 148 } 149 150 switch (tmp & CLKCFG_MEM_MASK) { 151 case CLKCFG_MEM_533: 152 dev_priv->mem_freq = 533; 153 break; 154 case CLKCFG_MEM_667: 155 dev_priv->mem_freq = 667; 156 break; 157 case CLKCFG_MEM_800: 158 dev_priv->mem_freq = 800; 159 break; 160 } 161 162 /* detect pineview DDR3 setting */ 163 tmp = I915_READ(CSHRDDR3CTL); 164 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 165} 166 167static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv) 168{ 169 u16 ddrpll, csipll; 170 171 ddrpll = I915_READ16(DDRMPLL1); 172 csipll = I915_READ16(CSIPLL0); 173 174 switch (ddrpll & 0xff) { 175 case 0xc: 176 dev_priv->mem_freq = 800; 177 break; 178 case 0x10: 179 dev_priv->mem_freq = 1066; 180 break; 181 case 0x14: 182 dev_priv->mem_freq = 1333; 183 break; 184 case 0x18: 185 dev_priv->mem_freq = 1600; 186 break; 187 default: 188 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", 189 ddrpll & 0xff); 190 dev_priv->mem_freq = 0; 191 break; 192 } 193 194 dev_priv->ips.r_t = dev_priv->mem_freq; 195 196 switch (csipll & 0x3ff) { 197 case 0x00c: 198 dev_priv->fsb_freq = 3200; 199 break; 200 case 0x00e: 201 dev_priv->fsb_freq = 3733; 202 break; 203 case 0x010: 204 dev_priv->fsb_freq = 4266; 205 break; 206 case 0x012: 207 dev_priv->fsb_freq = 4800; 208 break; 209 case 0x014: 210 dev_priv->fsb_freq = 5333; 211 break; 212 case 0x016: 213 dev_priv->fsb_freq = 5866; 214 break; 215 case 0x018: 216 dev_priv->fsb_freq = 6400; 217 break; 218 default: 219 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", 220 csipll & 0x3ff); 221 dev_priv->fsb_freq = 0; 222 break; 223 } 224 225 if (dev_priv->fsb_freq == 3200) { 226 dev_priv->ips.c_m = 0; 227 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 228 dev_priv->ips.c_m = 1; 229 } else { 230 dev_priv->ips.c_m = 2; 231 } 232} 233 234static const struct cxsr_latency cxsr_latency_table[] = { 235 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 236 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 237 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 238 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ 239 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ 240 241 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 242 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 243 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 244 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ 245 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ 246 247 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 248 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 249 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 250 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ 251 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ 252 253 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 254 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 255 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 256 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ 257 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ 258 259 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 260 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 261 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 262 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ 263 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ 264 265 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 266 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 267 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 268 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ 269 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 270}; 271 272static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop, 273 bool is_ddr3, 274 int fsb, 275 int mem) 276{ 277 const struct cxsr_latency *latency; 278 int i; 279 280 if (fsb == 0 || mem == 0) 281 return NULL; 282 283 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 284 latency = &cxsr_latency_table[i]; 285 if (is_desktop == latency->is_desktop && 286 is_ddr3 == latency->is_ddr3 && 287 fsb == latency->fsb_freq && mem == latency->mem_freq) 288 return latency; 289 } 290 291 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 292 293 return NULL; 294} 295 296static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) 297{ 298 u32 val; 299 300 mutex_lock(&dev_priv->rps.hw_lock); 301 302 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 303 if (enable) 304 val &= ~FORCE_DDR_HIGH_FREQ; 305 else 306 val |= FORCE_DDR_HIGH_FREQ; 307 val &= ~FORCE_DDR_LOW_FREQ; 308 val |= FORCE_DDR_FREQ_REQ_ACK; 309 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 310 311 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 312 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) 313 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); 314 315 mutex_unlock(&dev_priv->rps.hw_lock); 316} 317 318static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) 319{ 320 u32 val; 321 322 mutex_lock(&dev_priv->rps.hw_lock); 323 324 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 325 if (enable) 326 val |= DSP_MAXFIFO_PM5_ENABLE; 327 else 328 val &= ~DSP_MAXFIFO_PM5_ENABLE; 329 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 330 331 mutex_unlock(&dev_priv->rps.hw_lock); 332} 333 334#define FW_WM(value, plane) \ 335 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) 336 337static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 338{ 339 bool was_enabled; 340 u32 val; 341 342 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 343 was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 344 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); 345 POSTING_READ(FW_BLC_SELF_VLV); 346 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { 347 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 348 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); 349 POSTING_READ(FW_BLC_SELF); 350 } else if (IS_PINEVIEW(dev_priv)) { 351 val = I915_READ(DSPFW3); 352 was_enabled = val & PINEVIEW_SELF_REFRESH_EN; 353 if (enable) 354 val |= PINEVIEW_SELF_REFRESH_EN; 355 else 356 val &= ~PINEVIEW_SELF_REFRESH_EN; 357 I915_WRITE(DSPFW3, val); 358 POSTING_READ(DSPFW3); 359 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { 360 was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 361 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : 362 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); 363 I915_WRITE(FW_BLC_SELF, val); 364 POSTING_READ(FW_BLC_SELF); 365 } else if (IS_I915GM(dev_priv)) { 366 /* 367 * FIXME can't find a bit like this for 915G, and 368 * and yet it does have the related watermark in 369 * FW_BLC_SELF. What's going on? 370 */ 371 was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 372 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : 373 _MASKED_BIT_DISABLE(INSTPM_SELF_EN); 374 I915_WRITE(INSTPM, val); 375 POSTING_READ(INSTPM); 376 } else { 377 return false; 378 } 379 380 trace_intel_memory_cxsr(dev_priv, was_enabled, enable); 381 382 DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n", 383 enableddisabled(enable), 384 enableddisabled(was_enabled)); 385 386 return was_enabled; 387} 388 389/** 390 * intel_set_memory_cxsr - Configure CxSR state 391 * @dev_priv: i915 device 392 * @enable: Allow vs. disallow CxSR 393 * 394 * Allow or disallow the system to enter a special CxSR 395 * (C-state self refresh) state. What typically happens in CxSR mode 396 * is that several display FIFOs may get combined into a single larger 397 * FIFO for a particular plane (so called max FIFO mode) to allow the 398 * system to defer memory fetches longer, and the memory will enter 399 * self refresh. 400 * 401 * Note that enabling CxSR does not guarantee that the system enter 402 * this special mode, nor does it guarantee that the system stays 403 * in that mode once entered. So this just allows/disallows the system 404 * to autonomously utilize the CxSR mode. Other factors such as core 405 * C-states will affect when/if the system actually enters/exits the 406 * CxSR mode. 407 * 408 * Note that on VLV/CHV this actually only controls the max FIFO mode, 409 * and the system is free to enter/exit memory self refresh at any time 410 * even when the use of CxSR has been disallowed. 411 * 412 * While the system is actually in the CxSR/max FIFO mode, some plane 413 * control registers will not get latched on vblank. Thus in order to 414 * guarantee the system will respond to changes in the plane registers 415 * we must always disallow CxSR prior to making changes to those registers. 416 * Unfortunately the system will re-evaluate the CxSR conditions at 417 * frame start which happens after vblank start (which is when the plane 418 * registers would get latched), so we can't proceed with the plane update 419 * during the same frame where we disallowed CxSR. 420 * 421 * Certain platforms also have a deeper HPLL SR mode. Fortunately the 422 * HPLL SR mode depends on CxSR itself, so we don't have to hand hold 423 * the hardware w.r.t. HPLL SR when writing to plane registers. 424 * Disallowing just CxSR is sufficient. 425 */ 426bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 427{ 428 bool ret; 429 430 mutex_lock(&dev_priv->wm.wm_mutex); 431 ret = _intel_set_memory_cxsr(dev_priv, enable); 432 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 433 dev_priv->wm.vlv.cxsr = enable; 434 else if (IS_G4X(dev_priv)) 435 dev_priv->wm.g4x.cxsr = enable; 436 mutex_unlock(&dev_priv->wm.wm_mutex); 437 438 return ret; 439} 440 441/* 442 * Latency for FIFO fetches is dependent on several factors: 443 * - memory configuration (speed, channels) 444 * - chipset 445 * - current MCH state 446 * It can be fairly high in some situations, so here we assume a fairly 447 * pessimal value. It's a tradeoff between extra memory fetches (if we 448 * set this value too high, the FIFO will fetch frequently to stay full) 449 * and power consumption (set it too low to save power and we might see 450 * FIFO underruns and display "flicker"). 451 * 452 * A value of 5us seems to be a good balance; safe for very low end 453 * platforms but not overly aggressive on lower latency configs. 454 */ 455static const int pessimal_latency_ns = 5000; 456 457#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ 458 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) 459 460static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) 461{ 462 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 463 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 464 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; 465 enum pipe pipe = crtc->pipe; 466 int sprite0_start, sprite1_start; 467 468 switch (pipe) { 469 uint32_t dsparb, dsparb2, dsparb3; 470 case PIPE_A: 471 dsparb = I915_READ(DSPARB); 472 dsparb2 = I915_READ(DSPARB2); 473 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); 474 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); 475 break; 476 case PIPE_B: 477 dsparb = I915_READ(DSPARB); 478 dsparb2 = I915_READ(DSPARB2); 479 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); 480 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); 481 break; 482 case PIPE_C: 483 dsparb2 = I915_READ(DSPARB2); 484 dsparb3 = I915_READ(DSPARB3); 485 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); 486 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); 487 break; 488 default: 489 MISSING_CASE(pipe); 490 return; 491 } 492 493 fifo_state->plane[PLANE_PRIMARY] = sprite0_start; 494 fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start; 495 fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start; 496 fifo_state->plane[PLANE_CURSOR] = 63; 497} 498 499static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane) 500{ 501 uint32_t dsparb = I915_READ(DSPARB); 502 int size; 503 504 size = dsparb & 0x7f; 505 if (plane) 506 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; 507 508 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 509 plane ? "B" : "A", size); 510 511 return size; 512} 513 514static int i830_get_fifo_size(struct drm_i915_private *dev_priv, int plane) 515{ 516 uint32_t dsparb = I915_READ(DSPARB); 517 int size; 518 519 size = dsparb & 0x1ff; 520 if (plane) 521 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; 522 size >>= 1; /* Convert to cachelines */ 523 524 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 525 plane ? "B" : "A", size); 526 527 return size; 528} 529 530static int i845_get_fifo_size(struct drm_i915_private *dev_priv, int plane) 531{ 532 uint32_t dsparb = I915_READ(DSPARB); 533 int size; 534 535 size = dsparb & 0x7f; 536 size >>= 2; /* Convert to cachelines */ 537 538 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 539 plane ? "B" : "A", 540 size); 541 542 return size; 543} 544 545/* Pineview has different values for various configs */ 546static const struct intel_watermark_params pineview_display_wm = { 547 .fifo_size = PINEVIEW_DISPLAY_FIFO, 548 .max_wm = PINEVIEW_MAX_WM, 549 .default_wm = PINEVIEW_DFT_WM, 550 .guard_size = PINEVIEW_GUARD_WM, 551 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 552}; 553static const struct intel_watermark_params pineview_display_hplloff_wm = { 554 .fifo_size = PINEVIEW_DISPLAY_FIFO, 555 .max_wm = PINEVIEW_MAX_WM, 556 .default_wm = PINEVIEW_DFT_HPLLOFF_WM, 557 .guard_size = PINEVIEW_GUARD_WM, 558 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 559}; 560static const struct intel_watermark_params pineview_cursor_wm = { 561 .fifo_size = PINEVIEW_CURSOR_FIFO, 562 .max_wm = PINEVIEW_CURSOR_MAX_WM, 563 .default_wm = PINEVIEW_CURSOR_DFT_WM, 564 .guard_size = PINEVIEW_CURSOR_GUARD_WM, 565 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 566}; 567static const struct intel_watermark_params pineview_cursor_hplloff_wm = { 568 .fifo_size = PINEVIEW_CURSOR_FIFO, 569 .max_wm = PINEVIEW_CURSOR_MAX_WM, 570 .default_wm = PINEVIEW_CURSOR_DFT_WM, 571 .guard_size = PINEVIEW_CURSOR_GUARD_WM, 572 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 573}; 574static const struct intel_watermark_params i965_cursor_wm_info = { 575 .fifo_size = I965_CURSOR_FIFO, 576 .max_wm = I965_CURSOR_MAX_WM, 577 .default_wm = I965_CURSOR_DFT_WM, 578 .guard_size = 2, 579 .cacheline_size = I915_FIFO_LINE_SIZE, 580}; 581static const struct intel_watermark_params i945_wm_info = { 582 .fifo_size = I945_FIFO_SIZE, 583 .max_wm = I915_MAX_WM, 584 .default_wm = 1, 585 .guard_size = 2, 586 .cacheline_size = I915_FIFO_LINE_SIZE, 587}; 588static const struct intel_watermark_params i915_wm_info = { 589 .fifo_size = I915_FIFO_SIZE, 590 .max_wm = I915_MAX_WM, 591 .default_wm = 1, 592 .guard_size = 2, 593 .cacheline_size = I915_FIFO_LINE_SIZE, 594}; 595static const struct intel_watermark_params i830_a_wm_info = { 596 .fifo_size = I855GM_FIFO_SIZE, 597 .max_wm = I915_MAX_WM, 598 .default_wm = 1, 599 .guard_size = 2, 600 .cacheline_size = I830_FIFO_LINE_SIZE, 601}; 602static const struct intel_watermark_params i830_bc_wm_info = { 603 .fifo_size = I855GM_FIFO_SIZE, 604 .max_wm = I915_MAX_WM/2, 605 .default_wm = 1, 606 .guard_size = 2, 607 .cacheline_size = I830_FIFO_LINE_SIZE, 608}; 609static const struct intel_watermark_params i845_wm_info = { 610 .fifo_size = I830_FIFO_SIZE, 611 .max_wm = I915_MAX_WM, 612 .default_wm = 1, 613 .guard_size = 2, 614 .cacheline_size = I830_FIFO_LINE_SIZE, 615}; 616 617/** 618 * intel_wm_method1 - Method 1 / "small buffer" watermark formula 619 * @pixel_rate: Pipe pixel rate in kHz 620 * @cpp: Plane bytes per pixel 621 * @latency: Memory wakeup latency in 0.1us units 622 * 623 * Compute the watermark using the method 1 or "small buffer" 624 * formula. The caller may additonally add extra cachelines 625 * to account for TLB misses and clock crossings. 626 * 627 * This method is concerned with the short term drain rate 628 * of the FIFO, ie. it does not account for blanking periods 629 * which would effectively reduce the average drain rate across 630 * a longer period. The name "small" refers to the fact the 631 * FIFO is relatively small compared to the amount of data 632 * fetched. 633 * 634 * The FIFO level vs. time graph might look something like: 635 * 636 * |\ |\ 637 * | \ | \ 638 * __---__---__ (- plane active, _ blanking) 639 * -> time 640 * 641 * or perhaps like this: 642 * 643 * |\|\ |\|\ 644 * __----__----__ (- plane active, _ blanking) 645 * -> time 646 * 647 * Returns: 648 * The watermark in bytes 649 */ 650static unsigned int intel_wm_method1(unsigned int pixel_rate, 651 unsigned int cpp, 652 unsigned int latency) 653{ 654 uint64_t ret; 655 656 ret = (uint64_t) pixel_rate * cpp * latency; 657 ret = DIV_ROUND_UP_ULL(ret, 10000); 658 659 return ret; 660} 661 662/** 663 * intel_wm_method2 - Method 2 / "large buffer" watermark formula 664 * @pixel_rate: Pipe pixel rate in kHz 665 * @htotal: Pipe horizontal total 666 * @width: Plane width in pixels 667 * @cpp: Plane bytes per pixel 668 * @latency: Memory wakeup latency in 0.1us units 669 * 670 * Compute the watermark using the method 2 or "large buffer" 671 * formula. The caller may additonally add extra cachelines 672 * to account for TLB misses and clock crossings. 673 * 674 * This method is concerned with the long term drain rate 675 * of the FIFO, ie. it does account for blanking periods 676 * which effectively reduce the average drain rate across 677 * a longer period. The name "large" refers to the fact the 678 * FIFO is relatively large compared to the amount of data 679 * fetched. 680 * 681 * The FIFO level vs. time graph might look something like: 682 * 683 * |\___ |\___ 684 * | \___ | \___ 685 * | \ | \ 686 * __ --__--__--__--__--__--__ (- plane active, _ blanking) 687 * -> time 688 * 689 * Returns: 690 * The watermark in bytes 691 */ 692static unsigned int intel_wm_method2(unsigned int pixel_rate, 693 unsigned int htotal, 694 unsigned int width, 695 unsigned int cpp, 696 unsigned int latency) 697{ 698 unsigned int ret; 699 700 /* 701 * FIXME remove once all users are computing 702 * watermarks in the correct place. 703 */ 704 if (WARN_ON_ONCE(htotal == 0)) 705 htotal = 1; 706 707 ret = (latency * pixel_rate) / (htotal * 10000); 708 ret = (ret + 1) * width * cpp; 709 710 return ret; 711} 712 713/** 714 * intel_calculate_wm - calculate watermark level 715 * @pixel_rate: pixel clock 716 * @wm: chip FIFO params 717 * @cpp: bytes per pixel 718 * @latency_ns: memory latency for the platform 719 * 720 * Calculate the watermark level (the level at which the display plane will 721 * start fetching from memory again). Each chip has a different display 722 * FIFO size and allocation, so the caller needs to figure that out and pass 723 * in the correct intel_watermark_params structure. 724 * 725 * As the pixel clock runs, the FIFO will be drained at a rate that depends 726 * on the pixel size. When it reaches the watermark level, it'll start 727 * fetching FIFO line sized based chunks from memory until the FIFO fills 728 * past the watermark point. If the FIFO drains completely, a FIFO underrun 729 * will occur, and a display engine hang could result. 730 */ 731static unsigned int intel_calculate_wm(int pixel_rate, 732 const struct intel_watermark_params *wm, 733 int fifo_size, int cpp, 734 unsigned int latency_ns) 735{ 736 int entries, wm_size; 737 738 /* 739 * Note: we need to make sure we don't overflow for various clock & 740 * latency values. 741 * clocks go from a few thousand to several hundred thousand. 742 * latency is usually a few thousand 743 */ 744 entries = intel_wm_method1(pixel_rate, cpp, 745 latency_ns / 100); 746 entries = DIV_ROUND_UP(entries, wm->cacheline_size) + 747 wm->guard_size; 748 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries); 749 750 wm_size = fifo_size - entries; 751 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); 752 753 /* Don't promote wm_size to unsigned... */ 754 if (wm_size > wm->max_wm) 755 wm_size = wm->max_wm; 756 if (wm_size <= 0) 757 wm_size = wm->default_wm; 758 759 /* 760 * Bspec seems to indicate that the value shouldn't be lower than 761 * 'burst size + 1'. Certainly 830 is quite unhappy with low values. 762 * Lets go for 8 which is the burst size since certain platforms 763 * already use a hardcoded 8 (which is what the spec says should be 764 * done). 765 */ 766 if (wm_size <= 8) 767 wm_size = 8; 768 769 return wm_size; 770} 771 772static bool is_disabling(int old, int new, int threshold) 773{ 774 return old >= threshold && new < threshold; 775} 776 777static bool is_enabling(int old, int new, int threshold) 778{ 779 return old < threshold && new >= threshold; 780} 781 782static int intel_wm_num_levels(struct drm_i915_private *dev_priv) 783{ 784 return dev_priv->wm.max_level + 1; 785} 786 787static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, 788 const struct intel_plane_state *plane_state) 789{ 790 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 791 792 /* FIXME check the 'enable' instead */ 793 if (!crtc_state->base.active) 794 return false; 795 796 /* 797 * Treat cursor with fb as always visible since cursor updates 798 * can happen faster than the vrefresh rate, and the current 799 * watermark code doesn't handle that correctly. Cursor updates 800 * which set/clear the fb or change the cursor size are going 801 * to get throttled by intel_legacy_cursor_update() to work 802 * around this problem with the watermark code. 803 */ 804 if (plane->id == PLANE_CURSOR) 805 return plane_state->base.fb != NULL; 806 else 807 return plane_state->base.visible; 808} 809 810static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) 811{ 812 struct intel_crtc *crtc, *enabled = NULL; 813 814 for_each_intel_crtc(&dev_priv->drm, crtc) { 815 if (intel_crtc_active(crtc)) { 816 if (enabled) 817 return NULL; 818 enabled = crtc; 819 } 820 } 821 822 return enabled; 823} 824 825static void pineview_update_wm(struct intel_crtc *unused_crtc) 826{ 827 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); 828 struct intel_crtc *crtc; 829 const struct cxsr_latency *latency; 830 u32 reg; 831 unsigned int wm; 832 833 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv), 834 dev_priv->is_ddr3, 835 dev_priv->fsb_freq, 836 dev_priv->mem_freq); 837 if (!latency) { 838 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 839 intel_set_memory_cxsr(dev_priv, false); 840 return; 841 } 842 843 crtc = single_enabled_crtc(dev_priv); 844 if (crtc) { 845 const struct drm_display_mode *adjusted_mode = 846 &crtc->config->base.adjusted_mode; 847 const struct drm_framebuffer *fb = 848 crtc->base.primary->state->fb; 849 int cpp = fb->format->cpp[0]; 850 int clock = adjusted_mode->crtc_clock; 851 852 /* Display SR */ 853 wm = intel_calculate_wm(clock, &pineview_display_wm, 854 pineview_display_wm.fifo_size, 855 cpp, latency->display_sr); 856 reg = I915_READ(DSPFW1); 857 reg &= ~DSPFW_SR_MASK; 858 reg |= FW_WM(wm, SR); 859 I915_WRITE(DSPFW1, reg); 860 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); 861 862 /* cursor SR */ 863 wm = intel_calculate_wm(clock, &pineview_cursor_wm, 864 pineview_display_wm.fifo_size, 865 4, latency->cursor_sr); 866 reg = I915_READ(DSPFW3); 867 reg &= ~DSPFW_CURSOR_SR_MASK; 868 reg |= FW_WM(wm, CURSOR_SR); 869 I915_WRITE(DSPFW3, reg); 870 871 /* Display HPLL off SR */ 872 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, 873 pineview_display_hplloff_wm.fifo_size, 874 cpp, latency->display_hpll_disable); 875 reg = I915_READ(DSPFW3); 876 reg &= ~DSPFW_HPLL_SR_MASK; 877 reg |= FW_WM(wm, HPLL_SR); 878 I915_WRITE(DSPFW3, reg); 879 880 /* cursor HPLL off SR */ 881 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, 882 pineview_display_hplloff_wm.fifo_size, 883 4, latency->cursor_hpll_disable); 884 reg = I915_READ(DSPFW3); 885 reg &= ~DSPFW_HPLL_CURSOR_MASK; 886 reg |= FW_WM(wm, HPLL_CURSOR); 887 I915_WRITE(DSPFW3, reg); 888 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); 889 890 intel_set_memory_cxsr(dev_priv, true); 891 } else { 892 intel_set_memory_cxsr(dev_priv, false); 893 } 894} 895 896/* 897 * Documentation says: 898 * "If the line size is small, the TLB fetches can get in the way of the 899 * data fetches, causing some lag in the pixel data return which is not 900 * accounted for in the above formulas. The following adjustment only 901 * needs to be applied if eight whole lines fit in the buffer at once. 902 * The WM is adjusted upwards by the difference between the FIFO size 903 * and the size of 8 whole lines. This adjustment is always performed 904 * in the actual pixel depth regardless of whether FBC is enabled or not." 905 */ 906static int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) 907{ 908 int tlb_miss = fifo_size * 64 - width * cpp * 8; 909 910 return max(0, tlb_miss); 911} 912 913static void g4x_write_wm_values(struct drm_i915_private *dev_priv, 914 const struct g4x_wm_values *wm) 915{ 916 enum pipe pipe; 917 918 for_each_pipe(dev_priv, pipe) 919 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); 920 921 I915_WRITE(DSPFW1, 922 FW_WM(wm->sr.plane, SR) | 923 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | 924 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | 925 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); 926 I915_WRITE(DSPFW2, 927 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) | 928 FW_WM(wm->sr.fbc, FBC_SR) | 929 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | 930 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | 931 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | 932 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); 933 I915_WRITE(DSPFW3, 934 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) | 935 FW_WM(wm->sr.cursor, CURSOR_SR) | 936 FW_WM(wm->hpll.cursor, HPLL_CURSOR) | 937 FW_WM(wm->hpll.plane, HPLL_SR)); 938 939 POSTING_READ(DSPFW1); 940} 941 942#define FW_WM_VLV(value, plane) \ 943 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) 944 945static void vlv_write_wm_values(struct drm_i915_private *dev_priv, 946 const struct vlv_wm_values *wm) 947{ 948 enum pipe pipe; 949 950 for_each_pipe(dev_priv, pipe) { 951 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); 952 953 I915_WRITE(VLV_DDL(pipe), 954 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | 955 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | 956 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | 957 (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); 958 } 959 960 /* 961 * Zero the (unused) WM1 watermarks, and also clear all the 962 * high order bits so that there are no out of bounds values 963 * present in the registers during the reprogramming. 964 */ 965 I915_WRITE(DSPHOWM, 0); 966 I915_WRITE(DSPHOWM1, 0); 967 I915_WRITE(DSPFW4, 0); 968 I915_WRITE(DSPFW5, 0); 969 I915_WRITE(DSPFW6, 0); 970 971 I915_WRITE(DSPFW1, 972 FW_WM(wm->sr.plane, SR) | 973 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | 974 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | 975 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); 976 I915_WRITE(DSPFW2, 977 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | 978 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | 979 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); 980 I915_WRITE(DSPFW3, 981 FW_WM(wm->sr.cursor, CURSOR_SR)); 982 983 if (IS_CHERRYVIEW(dev_priv)) { 984 I915_WRITE(DSPFW7_CHV, 985 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | 986 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); 987 I915_WRITE(DSPFW8_CHV, 988 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | 989 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); 990 I915_WRITE(DSPFW9_CHV, 991 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | 992 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); 993 I915_WRITE(DSPHOWM, 994 FW_WM(wm->sr.plane >> 9, SR_HI) | 995 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | 996 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | 997 FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) | 998 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | 999 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | 1000 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | 1001 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | 1002 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | 1003 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); 1004 } else { 1005 I915_WRITE(DSPFW7, 1006 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | 1007 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); 1008 I915_WRITE(DSPHOWM, 1009 FW_WM(wm->sr.plane >> 9, SR_HI) | 1010 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | 1011 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | 1012 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | 1013 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | 1014 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | 1015 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); 1016 } 1017 1018 POSTING_READ(DSPFW1); 1019} 1020 1021#undef FW_WM_VLV 1022 1023static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv) 1024{ 1025 /* all latencies in usec */ 1026 dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; 1027 dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12; 1028 dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; 1029 1030 dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL; 1031} 1032 1033static int g4x_plane_fifo_size(enum plane_id plane_id, int level) 1034{ 1035 /* 1036 * DSPCNTR[13] supposedly controls whether the 1037 * primary plane can use the FIFO space otherwise 1038 * reserved for the sprite plane. It's not 100% clear 1039 * what the actual FIFO size is, but it looks like we 1040 * can happily set both primary and sprite watermarks 1041 * up to 127 cachelines. So that would seem to mean 1042 * that either DSPCNTR[13] doesn't do anything, or that 1043 * the total FIFO is >= 256 cachelines in size. Either 1044 * way, we don't seem to have to worry about this 1045 * repartitioning as the maximum watermark value the 1046 * register can hold for each plane is lower than the 1047 * minimum FIFO size. 1048 */ 1049 switch (plane_id) { 1050 case PLANE_CURSOR: 1051 return 63; 1052 case PLANE_PRIMARY: 1053 return level == G4X_WM_LEVEL_NORMAL ? 127 : 511; 1054 case PLANE_SPRITE0: 1055 return level == G4X_WM_LEVEL_NORMAL ? 127 : 0; 1056 default: 1057 MISSING_CASE(plane_id); 1058 return 0; 1059 } 1060} 1061 1062static int g4x_fbc_fifo_size(int level) 1063{ 1064 switch (level) { 1065 case G4X_WM_LEVEL_SR: 1066 return 7; 1067 case G4X_WM_LEVEL_HPLL: 1068 return 15; 1069 default: 1070 MISSING_CASE(level); 1071 return 0; 1072 } 1073} 1074 1075static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state, 1076 const struct intel_plane_state *plane_state, 1077 int level) 1078{ 1079 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 1080 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1081 const struct drm_display_mode *adjusted_mode = 1082 &crtc_state->base.adjusted_mode; 1083 int clock, htotal, cpp, width, wm; 1084 int latency = dev_priv->wm.pri_latency[level] * 10; 1085 1086 if (latency == 0) 1087 return USHRT_MAX; 1088 1089 if (!intel_wm_plane_visible(crtc_state, plane_state)) 1090 return 0; 1091 1092 /* 1093 * Not 100% sure which way ELK should go here as the 1094 * spec only says CL/CTG should assume 32bpp and BW 1095 * doesn't need to. But as these things followed the 1096 * mobile vs. desktop lines on gen3 as well, let's 1097 * assume ELK doesn't need this. 1098 * 1099 * The spec also fails to list such a restriction for 1100 * the HPLL watermark, which seems a little strange. 1101 * Let's use 32bpp for the HPLL watermark as well. 1102 */ 1103 if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY && 1104 level != G4X_WM_LEVEL_NORMAL) 1105 cpp = 4; 1106 else 1107 cpp = plane_state->base.fb->format->cpp[0]; 1108 1109 clock = adjusted_mode->crtc_clock; 1110 htotal = adjusted_mode->crtc_htotal; 1111 1112 if (plane->id == PLANE_CURSOR) 1113 width = plane_state->base.crtc_w; 1114 else 1115 width = drm_rect_width(&plane_state->base.dst); 1116 1117 if (plane->id == PLANE_CURSOR) { 1118 wm = intel_wm_method2(clock, htotal, width, cpp, latency); 1119 } else if (plane->id == PLANE_PRIMARY && 1120 level == G4X_WM_LEVEL_NORMAL) { 1121 wm = intel_wm_method1(clock, cpp, latency); 1122 } else { 1123 int small, large; 1124 1125 small = intel_wm_method1(clock, cpp, latency); 1126 large = intel_wm_method2(clock, htotal, width, cpp, latency); 1127 1128 wm = min(small, large); 1129 } 1130 1131 wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level), 1132 width, cpp); 1133 1134 wm = DIV_ROUND_UP(wm, 64) + 2; 1135 1136 return min_t(int, wm, USHRT_MAX); 1137} 1138 1139static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, 1140 int level, enum plane_id plane_id, u16 value) 1141{ 1142 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 1143 bool dirty = false; 1144 1145 for (; level < intel_wm_num_levels(dev_priv); level++) { 1146 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; 1147 1148 dirty |= raw->plane[plane_id] != value; 1149 raw->plane[plane_id] = value; 1150 } 1151 1152 return dirty; 1153} 1154 1155static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, 1156 int level, u16 value) 1157{ 1158 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 1159 bool dirty = false; 1160 1161 /* NORMAL level doesn't have an FBC watermark */ 1162 level = max(level, G4X_WM_LEVEL_SR); 1163 1164 for (; level < intel_wm_num_levels(dev_priv); level++) { 1165 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; 1166 1167 dirty |= raw->fbc != value; 1168 raw->fbc = value; 1169 } 1170 1171 return dirty; 1172} 1173 1174static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, 1175 const struct intel_plane_state *pstate, 1176 uint32_t pri_val); 1177 1178static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, 1179 const struct intel_plane_state *plane_state) 1180{ 1181 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 1182 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); 1183 enum plane_id plane_id = plane->id; 1184 bool dirty = false; 1185 int level; 1186 1187 if (!intel_wm_plane_visible(crtc_state, plane_state)) { 1188 dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0); 1189 if (plane_id == PLANE_PRIMARY) 1190 dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0); 1191 goto out; 1192 } 1193 1194 for (level = 0; level < num_levels; level++) { 1195 struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; 1196 int wm, max_wm; 1197 1198 wm = g4x_compute_wm(crtc_state, plane_state, level); 1199 max_wm = g4x_plane_fifo_size(plane_id, level); 1200 1201 if (wm > max_wm) 1202 break; 1203 1204 dirty |= raw->plane[plane_id] != wm; 1205 raw->plane[plane_id] = wm; 1206 1207 if (plane_id != PLANE_PRIMARY || 1208 level == G4X_WM_LEVEL_NORMAL) 1209 continue; 1210 1211 wm = ilk_compute_fbc_wm(crtc_state, plane_state, 1212 raw->plane[plane_id]); 1213 max_wm = g4x_fbc_fifo_size(level); 1214 1215 /* 1216 * FBC wm is not mandatory as we 1217 * can always just disable its use. 1218 */ 1219 if (wm > max_wm) 1220 wm = USHRT_MAX; 1221 1222 dirty |= raw->fbc != wm; 1223 raw->fbc = wm; 1224 } 1225 1226 /* mark watermarks as invalid */ 1227 dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); 1228 1229 if (plane_id == PLANE_PRIMARY) 1230 dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); 1231 1232 out: 1233 if (dirty) { 1234 DRM_DEBUG_KMS("%s watermarks: normal=%d, SR=%d, HPLL=%d\n", 1235 plane->base.name, 1236 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], 1237 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id], 1238 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); 1239 1240 if (plane_id == PLANE_PRIMARY) 1241 DRM_DEBUG_KMS("FBC watermarks: SR=%d, HPLL=%d\n", 1242 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, 1243 crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); 1244 } 1245 1246 return dirty; 1247} 1248 1249static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, 1250 enum plane_id plane_id, int level) 1251{ 1252 const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; 1253 1254 return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level); 1255} 1256 1257static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, 1258 int level) 1259{ 1260 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 1261 1262 if (level > dev_priv->wm.max_level) 1263 return false; 1264 1265 return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && 1266 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && 1267 g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); 1268} 1269 1270/* mark all levels starting from 'level' as invalid */ 1271static void g4x_invalidate_wms(struct intel_crtc *crtc, 1272 struct g4x_wm_state *wm_state, int level) 1273{ 1274 if (level <= G4X_WM_LEVEL_NORMAL) { 1275 enum plane_id plane_id; 1276 1277 for_each_plane_id_on_crtc(crtc, plane_id) 1278 wm_state->wm.plane[plane_id] = USHRT_MAX; 1279 } 1280 1281 if (level <= G4X_WM_LEVEL_SR) { 1282 wm_state->cxsr = false; 1283 wm_state->sr.cursor = USHRT_MAX; 1284 wm_state->sr.plane = USHRT_MAX; 1285 wm_state->sr.fbc = USHRT_MAX; 1286 } 1287 1288 if (level <= G4X_WM_LEVEL_HPLL) { 1289 wm_state->hpll_en = false; 1290 wm_state->hpll.cursor = USHRT_MAX; 1291 wm_state->hpll.plane = USHRT_MAX; 1292 wm_state->hpll.fbc = USHRT_MAX; 1293 } 1294} 1295 1296static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) 1297{ 1298 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1299 struct intel_atomic_state *state = 1300 to_intel_atomic_state(crtc_state->base.state); 1301 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; 1302 int num_active_planes = hweight32(crtc_state->active_planes & 1303 ~BIT(PLANE_CURSOR)); 1304 const struct g4x_pipe_wm *raw; 1305 struct intel_plane_state *plane_state; 1306 struct intel_plane *plane; 1307 enum plane_id plane_id; 1308 int i, level; 1309 unsigned int dirty = 0; 1310 1311 for_each_intel_plane_in_state(state, plane, plane_state, i) { 1312 const struct intel_plane_state *old_plane_state = 1313 to_intel_plane_state(plane->base.state); 1314 1315 if (plane_state->base.crtc != &crtc->base && 1316 old_plane_state->base.crtc != &crtc->base) 1317 continue; 1318 1319 if (g4x_raw_plane_wm_compute(crtc_state, plane_state)) 1320 dirty |= BIT(plane->id); 1321 } 1322 1323 if (!dirty) 1324 return 0; 1325 1326 level = G4X_WM_LEVEL_NORMAL; 1327 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) 1328 goto out; 1329 1330 raw = &crtc_state->wm.g4x.raw[level]; 1331 for_each_plane_id_on_crtc(crtc, plane_id) 1332 wm_state->wm.plane[plane_id] = raw->plane[plane_id]; 1333 1334 level = G4X_WM_LEVEL_SR; 1335 1336 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) 1337 goto out; 1338 1339 raw = &crtc_state->wm.g4x.raw[level]; 1340 wm_state->sr.plane = raw->plane[PLANE_PRIMARY]; 1341 wm_state->sr.cursor = raw->plane[PLANE_CURSOR]; 1342 wm_state->sr.fbc = raw->fbc; 1343 1344 wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY); 1345 1346 level = G4X_WM_LEVEL_HPLL; 1347 1348 if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) 1349 goto out; 1350 1351 raw = &crtc_state->wm.g4x.raw[level]; 1352 wm_state->hpll.plane = raw->plane[PLANE_PRIMARY]; 1353 wm_state->hpll.cursor = raw->plane[PLANE_CURSOR]; 1354 wm_state->hpll.fbc = raw->fbc; 1355 1356 wm_state->hpll_en = wm_state->cxsr; 1357 1358 level++; 1359 1360 out: 1361 if (level == G4X_WM_LEVEL_NORMAL) 1362 return -EINVAL; 1363 1364 /* invalidate the higher levels */ 1365 g4x_invalidate_wms(crtc, wm_state, level); 1366 1367 /* 1368 * Determine if the FBC watermark(s) can be used. IF 1369 * this isn't the case we prefer to disable the FBC 1370 ( watermark(s) rather than disable the SR/HPLL 1371 * level(s) entirely. 1372 */ 1373 wm_state->fbc_en = level > G4X_WM_LEVEL_NORMAL; 1374 1375 if (level >= G4X_WM_LEVEL_SR && 1376 wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR)) 1377 wm_state->fbc_en = false; 1378 else if (level >= G4X_WM_LEVEL_HPLL && 1379 wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL)) 1380 wm_state->fbc_en = false; 1381 1382 return 0; 1383} 1384 1385static int g4x_compute_intermediate_wm(struct drm_device *dev, 1386 struct intel_crtc *crtc, 1387 struct intel_crtc_state *crtc_state) 1388{ 1389 struct g4x_wm_state *intermediate = &crtc_state->wm.g4x.intermediate; 1390 const struct g4x_wm_state *optimal = &crtc_state->wm.g4x.optimal; 1391 const struct g4x_wm_state *active = &crtc->wm.active.g4x; 1392 enum plane_id plane_id; 1393 1394 intermediate->cxsr = optimal->cxsr && active->cxsr && 1395 !crtc_state->disable_cxsr; 1396 intermediate->hpll_en = optimal->hpll_en && active->hpll_en && 1397 !crtc_state->disable_cxsr; 1398 intermediate->fbc_en = optimal->fbc_en && active->fbc_en; 1399 1400 for_each_plane_id_on_crtc(crtc, plane_id) { 1401 intermediate->wm.plane[plane_id] = 1402 max(optimal->wm.plane[plane_id], 1403 active->wm.plane[plane_id]); 1404 1405 WARN_ON(intermediate->wm.plane[plane_id] > 1406 g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL)); 1407 } 1408 1409 intermediate->sr.plane = max(optimal->sr.plane, 1410 active->sr.plane); 1411 intermediate->sr.cursor = max(optimal->sr.cursor, 1412 active->sr.cursor); 1413 intermediate->sr.fbc = max(optimal->sr.fbc, 1414 active->sr.fbc); 1415 1416 intermediate->hpll.plane = max(optimal->hpll.plane, 1417 active->hpll.plane); 1418 intermediate->hpll.cursor = max(optimal->hpll.cursor, 1419 active->hpll.cursor); 1420 intermediate->hpll.fbc = max(optimal->hpll.fbc, 1421 active->hpll.fbc); 1422 1423 WARN_ON((intermediate->sr.plane > 1424 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || 1425 intermediate->sr.cursor > 1426 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && 1427 intermediate->cxsr); 1428 WARN_ON((intermediate->sr.plane > 1429 g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || 1430 intermediate->sr.cursor > 1431 g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && 1432 intermediate->hpll_en); 1433 1434 WARN_ON(intermediate->sr.fbc > g4x_fbc_fifo_size(1) && 1435 intermediate->fbc_en && intermediate->cxsr); 1436 WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && 1437 intermediate->fbc_en && intermediate->hpll_en); 1438 1439 /* 1440 * If our intermediate WM are identical to the final WM, then we can 1441 * omit the post-vblank programming; only update if it's different. 1442 */ 1443 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) 1444 crtc_state->wm.need_postvbl_update = true; 1445 1446 return 0; 1447} 1448 1449static void g4x_merge_wm(struct drm_i915_private *dev_priv, 1450 struct g4x_wm_values *wm) 1451{ 1452 struct intel_crtc *crtc; 1453 int num_active_crtcs = 0; 1454 1455 wm->cxsr = true; 1456 wm->hpll_en = true; 1457 wm->fbc_en = true; 1458 1459 for_each_intel_crtc(&dev_priv->drm, crtc) { 1460 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; 1461 1462 if (!crtc->active) 1463 continue; 1464 1465 if (!wm_state->cxsr) 1466 wm->cxsr = false; 1467 if (!wm_state->hpll_en) 1468 wm->hpll_en = false; 1469 if (!wm_state->fbc_en) 1470 wm->fbc_en = false; 1471 1472 num_active_crtcs++; 1473 } 1474 1475 if (num_active_crtcs != 1) { 1476 wm->cxsr = false; 1477 wm->hpll_en = false; 1478 wm->fbc_en = false; 1479 } 1480 1481 for_each_intel_crtc(&dev_priv->drm, crtc) { 1482 const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; 1483 enum pipe pipe = crtc->pipe; 1484 1485 wm->pipe[pipe] = wm_state->wm; 1486 if (crtc->active && wm->cxsr) 1487 wm->sr = wm_state->sr; 1488 if (crtc->active && wm->hpll_en) 1489 wm->hpll = wm_state->hpll; 1490 } 1491} 1492 1493static void g4x_program_watermarks(struct drm_i915_private *dev_priv) 1494{ 1495 struct g4x_wm_values *old_wm = &dev_priv->wm.g4x; 1496 struct g4x_wm_values new_wm = {}; 1497 1498 g4x_merge_wm(dev_priv, &new_wm); 1499 1500 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) 1501 return; 1502 1503 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) 1504 _intel_set_memory_cxsr(dev_priv, false); 1505 1506 g4x_write_wm_values(dev_priv, &new_wm); 1507 1508 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) 1509 _intel_set_memory_cxsr(dev_priv, true); 1510 1511 *old_wm = new_wm; 1512} 1513 1514static void g4x_initial_watermarks(struct intel_atomic_state *state, 1515 struct intel_crtc_state *crtc_state) 1516{ 1517 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 1518 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1519 1520 mutex_lock(&dev_priv->wm.wm_mutex); 1521 crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; 1522 g4x_program_watermarks(dev_priv); 1523 mutex_unlock(&dev_priv->wm.wm_mutex); 1524} 1525 1526static void g4x_optimize_watermarks(struct intel_atomic_state *state, 1527 struct intel_crtc_state *crtc_state) 1528{ 1529 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 1530 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 1531 1532 if (!crtc_state->wm.need_postvbl_update) 1533 return; 1534 1535 mutex_lock(&dev_priv->wm.wm_mutex); 1536 intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; 1537 g4x_program_watermarks(dev_priv); 1538 mutex_unlock(&dev_priv->wm.wm_mutex); 1539} 1540 1541/* latency must be in 0.1us units. */ 1542static unsigned int vlv_wm_method2(unsigned int pixel_rate, 1543 unsigned int htotal, 1544 unsigned int width, 1545 unsigned int cpp, 1546 unsigned int latency) 1547{ 1548 unsigned int ret; 1549 1550 ret = intel_wm_method2(pixel_rate, htotal, 1551 width, cpp, latency); 1552 ret = DIV_ROUND_UP(ret, 64); 1553 1554 return ret; 1555} 1556 1557static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) 1558{ 1559 /* all latencies in usec */ 1560 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; 1561 1562 dev_priv->wm.max_level = VLV_WM_LEVEL_PM2; 1563 1564 if (IS_CHERRYVIEW(dev_priv)) { 1565 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; 1566 dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; 1567 1568 dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS; 1569 } 1570} 1571 1572static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, 1573 const struct intel_plane_state *plane_state, 1574 int level) 1575{ 1576 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 1577 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1578 const struct drm_display_mode *adjusted_mode = 1579 &crtc_state->base.adjusted_mode; 1580 int clock, htotal, cpp, width, wm; 1581 1582 if (dev_priv->wm.pri_latency[level] == 0) 1583 return USHRT_MAX; 1584 1585 if (!intel_wm_plane_visible(crtc_state, plane_state)) 1586 return 0; 1587 1588 cpp = plane_state->base.fb->format->cpp[0]; 1589 clock = adjusted_mode->crtc_clock; 1590 htotal = adjusted_mode->crtc_htotal; 1591 width = crtc_state->pipe_src_w; 1592 1593 if (plane->id == PLANE_CURSOR) { 1594 /* 1595 * FIXME the formula gives values that are 1596 * too big for the cursor FIFO, and hence we 1597 * would never be able to use cursors. For 1598 * now just hardcode the watermark. 1599 */ 1600 wm = 63; 1601 } else { 1602 wm = vlv_wm_method2(clock, htotal, width, cpp, 1603 dev_priv->wm.pri_latency[level] * 10); 1604 } 1605 1606 return min_t(int, wm, USHRT_MAX); 1607} 1608 1609static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes) 1610{ 1611 return (active_planes & (BIT(PLANE_SPRITE0) | 1612 BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1); 1613} 1614 1615static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) 1616{ 1617 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1618 const struct g4x_pipe_wm *raw = 1619 &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; 1620 struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; 1621 unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); 1622 int num_active_planes = hweight32(active_planes); 1623 const int fifo_size = 511; 1624 int fifo_extra, fifo_left = fifo_size; 1625 int sprite0_fifo_extra = 0; 1626 unsigned int total_rate; 1627 enum plane_id plane_id; 1628 1629 /* 1630 * When enabling sprite0 after sprite1 has already been enabled 1631 * we tend to get an underrun unless sprite0 already has some 1632 * FIFO space allcoated. Hence we always allocate at least one 1633 * cacheline for sprite0 whenever sprite1 is enabled. 1634 * 1635 * All other plane enable sequences appear immune to this problem. 1636 */ 1637 if (vlv_need_sprite0_fifo_workaround(active_planes)) 1638 sprite0_fifo_extra = 1; 1639 1640 total_rate = raw->plane[PLANE_PRIMARY] + 1641 raw->plane[PLANE_SPRITE0] + 1642 raw->plane[PLANE_SPRITE1] + 1643 sprite0_fifo_extra; 1644 1645 if (total_rate > fifo_size) 1646 return -EINVAL; 1647 1648 if (total_rate == 0) 1649 total_rate = 1; 1650 1651 for_each_plane_id_on_crtc(crtc, plane_id) { 1652 unsigned int rate; 1653 1654 if ((active_planes & BIT(plane_id)) == 0) { 1655 fifo_state->plane[plane_id] = 0; 1656 continue; 1657 } 1658 1659 rate = raw->plane[plane_id]; 1660 fifo_state->plane[plane_id] = fifo_size * rate / total_rate; 1661 fifo_left -= fifo_state->plane[plane_id]; 1662 } 1663 1664 fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra; 1665 fifo_left -= sprite0_fifo_extra; 1666 1667 fifo_state->plane[PLANE_CURSOR] = 63; 1668 1669 fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1); 1670 1671 /* spread the remainder evenly */ 1672 for_each_plane_id_on_crtc(crtc, plane_id) { 1673 int plane_extra; 1674 1675 if (fifo_left == 0) 1676 break; 1677 1678 if ((active_planes & BIT(plane_id)) == 0) 1679 continue; 1680 1681 plane_extra = min(fifo_extra, fifo_left); 1682 fifo_state->plane[plane_id] += plane_extra; 1683 fifo_left -= plane_extra; 1684 } 1685 1686 WARN_ON(active_planes != 0 && fifo_left != 0); 1687 1688 /* give it all to the first plane if none are active */ 1689 if (active_planes == 0) { 1690 WARN_ON(fifo_left != fifo_size); 1691 fifo_state->plane[PLANE_PRIMARY] = fifo_left; 1692 } 1693 1694 return 0; 1695} 1696 1697/* mark all levels starting from 'level' as invalid */ 1698static void vlv_invalidate_wms(struct intel_crtc *crtc, 1699 struct vlv_wm_state *wm_state, int level) 1700{ 1701 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1702 1703 for (; level < intel_wm_num_levels(dev_priv); level++) { 1704 enum plane_id plane_id; 1705 1706 for_each_plane_id_on_crtc(crtc, plane_id) 1707 wm_state->wm[level].plane[plane_id] = USHRT_MAX; 1708 1709 wm_state->sr[level].cursor = USHRT_MAX; 1710 wm_state->sr[level].plane = USHRT_MAX; 1711 } 1712} 1713 1714static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) 1715{ 1716 if (wm > fifo_size) 1717 return USHRT_MAX; 1718 else 1719 return fifo_size - wm; 1720} 1721 1722/* 1723 * Starting from 'level' set all higher 1724 * levels to 'value' in the "raw" watermarks. 1725 */ 1726static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, 1727 int level, enum plane_id plane_id, u16 value) 1728{ 1729 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 1730 int num_levels = intel_wm_num_levels(dev_priv); 1731 bool dirty = false; 1732 1733 for (; level < num_levels; level++) { 1734 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; 1735 1736 dirty |= raw->plane[plane_id] != value; 1737 raw->plane[plane_id] = value; 1738 } 1739 1740 return dirty; 1741} 1742 1743static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, 1744 const struct intel_plane_state *plane_state) 1745{ 1746 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 1747 enum plane_id plane_id = plane->id; 1748 int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); 1749 int level; 1750 bool dirty = false; 1751 1752 if (!intel_wm_plane_visible(crtc_state, plane_state)) { 1753 dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0); 1754 goto out; 1755 } 1756 1757 for (level = 0; level < num_levels; level++) { 1758 struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; 1759 int wm = vlv_compute_wm_level(crtc_state, plane_state, level); 1760 int max_wm = plane_id == PLANE_CURSOR ? 63 : 511; 1761 1762 if (wm > max_wm) 1763 break; 1764 1765 dirty |= raw->plane[plane_id] != wm; 1766 raw->plane[plane_id] = wm; 1767 } 1768 1769 /* mark all higher levels as invalid */ 1770 dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); 1771 1772out: 1773 if (dirty) 1774 DRM_DEBUG_KMS("%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", 1775 plane->base.name, 1776 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], 1777 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id], 1778 crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]); 1779 1780 return dirty; 1781} 1782 1783static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, 1784 enum plane_id plane_id, int level) 1785{ 1786 const struct g4x_pipe_wm *raw = 1787 &crtc_state->wm.vlv.raw[level]; 1788 const struct vlv_fifo_state *fifo_state = 1789 &crtc_state->wm.vlv.fifo_state; 1790 1791 return raw->plane[plane_id] <= fifo_state->plane[plane_id]; 1792} 1793 1794static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level) 1795{ 1796 return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && 1797 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && 1798 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) && 1799 vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); 1800} 1801 1802static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) 1803{ 1804 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1805 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1806 struct intel_atomic_state *state = 1807 to_intel_atomic_state(crtc_state->base.state); 1808 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; 1809 const struct vlv_fifo_state *fifo_state = 1810 &crtc_state->wm.vlv.fifo_state; 1811 int num_active_planes = hweight32(crtc_state->active_planes & 1812 ~BIT(PLANE_CURSOR)); 1813 bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base); 1814 struct intel_plane_state *plane_state; 1815 struct intel_plane *plane; 1816 enum plane_id plane_id; 1817 int level, ret, i; 1818 unsigned int dirty = 0; 1819 1820 for_each_intel_plane_in_state(state, plane, plane_state, i) { 1821 const struct intel_plane_state *old_plane_state = 1822 to_intel_plane_state(plane->base.state); 1823 1824 if (plane_state->base.crtc != &crtc->base && 1825 old_plane_state->base.crtc != &crtc->base) 1826 continue; 1827 1828 if (vlv_raw_plane_wm_compute(crtc_state, plane_state)) 1829 dirty |= BIT(plane->id); 1830 } 1831 1832 /* 1833 * DSPARB registers may have been reset due to the 1834 * power well being turned off. Make sure we restore 1835 * them to a consistent state even if no primary/sprite 1836 * planes are initially active. 1837 */ 1838 if (needs_modeset) 1839 crtc_state->fifo_changed = true; 1840 1841 if (!dirty) 1842 return 0; 1843 1844 /* cursor changes don't warrant a FIFO recompute */ 1845 if (dirty & ~BIT(PLANE_CURSOR)) { 1846 const struct intel_crtc_state *old_crtc_state = 1847 to_intel_crtc_state(crtc->base.state); 1848 const struct vlv_fifo_state *old_fifo_state = 1849 &old_crtc_state->wm.vlv.fifo_state; 1850 1851 ret = vlv_compute_fifo(crtc_state); 1852 if (ret) 1853 return ret; 1854 1855 if (needs_modeset || 1856 memcmp(old_fifo_state, fifo_state, 1857 sizeof(*fifo_state)) != 0) 1858 crtc_state->fifo_changed = true; 1859 } 1860 1861 /* initially allow all levels */ 1862 wm_state->num_levels = intel_wm_num_levels(dev_priv); 1863 /* 1864 * Note that enabling cxsr with no primary/sprite planes 1865 * enabled can wedge the pipe. Hence we only allow cxsr 1866 * with exactly one enabled primary/sprite plane. 1867 */ 1868 wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1; 1869 1870 for (level = 0; level < wm_state->num_levels; level++) { 1871 const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; 1872 const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1; 1873 1874 if (!vlv_raw_crtc_wm_is_valid(crtc_state, level)) 1875 break; 1876 1877 for_each_plane_id_on_crtc(crtc, plane_id) { 1878 wm_state->wm[level].plane[plane_id] = 1879 vlv_invert_wm_value(raw->plane[plane_id], 1880 fifo_state->plane[plane_id]); 1881 } 1882 1883 wm_state->sr[level].plane = 1884 vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY], 1885 raw->plane[PLANE_SPRITE0], 1886 raw->plane[PLANE_SPRITE1]), 1887 sr_fifo_size); 1888 1889 wm_state->sr[level].cursor = 1890 vlv_invert_wm_value(raw->plane[PLANE_CURSOR], 1891 63); 1892 } 1893 1894 if (level == 0) 1895 return -EINVAL; 1896 1897 /* limit to only levels we can actually handle */ 1898 wm_state->num_levels = level; 1899 1900 /* invalidate the higher levels */ 1901 vlv_invalidate_wms(crtc, wm_state, level); 1902 1903 return 0; 1904} 1905 1906#define VLV_FIFO(plane, value) \ 1907 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) 1908 1909static void vlv_atomic_update_fifo(struct intel_atomic_state *state, 1910 struct intel_crtc_state *crtc_state) 1911{ 1912 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1913 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1914 const struct vlv_fifo_state *fifo_state = 1915 &crtc_state->wm.vlv.fifo_state; 1916 int sprite0_start, sprite1_start, fifo_size; 1917 1918 if (!crtc_state->fifo_changed) 1919 return; 1920 1921 sprite0_start = fifo_state->plane[PLANE_PRIMARY]; 1922 sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start; 1923 fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start; 1924 1925 WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63); 1926 WARN_ON(fifo_size != 511); 1927 1928 trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size); 1929 1930 /* 1931 * uncore.lock serves a double purpose here. It allows us to 1932 * use the less expensive I915_{READ,WRITE}_FW() functions, and 1933 * it protects the DSPARB registers from getting clobbered by 1934 * parallel updates from multiple pipes. 1935 * 1936 * intel_pipe_update_start() has already disabled interrupts 1937 * for us, so a plain spin_lock() is sufficient here. 1938 */ 1939 spin_lock(&dev_priv->uncore.lock); 1940 1941 switch (crtc->pipe) { 1942 uint32_t dsparb, dsparb2, dsparb3; 1943 case PIPE_A: 1944 dsparb = I915_READ_FW(DSPARB); 1945 dsparb2 = I915_READ_FW(DSPARB2); 1946 1947 dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | 1948 VLV_FIFO(SPRITEB, 0xff)); 1949 dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | 1950 VLV_FIFO(SPRITEB, sprite1_start)); 1951 1952 dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | 1953 VLV_FIFO(SPRITEB_HI, 0x1)); 1954 dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | 1955 VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); 1956 1957 I915_WRITE_FW(DSPARB, dsparb); 1958 I915_WRITE_FW(DSPARB2, dsparb2); 1959 break; 1960 case PIPE_B: 1961 dsparb = I915_READ_FW(DSPARB); 1962 dsparb2 = I915_READ_FW(DSPARB2); 1963 1964 dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | 1965 VLV_FIFO(SPRITED, 0xff)); 1966 dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | 1967 VLV_FIFO(SPRITED, sprite1_start)); 1968 1969 dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | 1970 VLV_FIFO(SPRITED_HI, 0xff)); 1971 dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | 1972 VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); 1973 1974 I915_WRITE_FW(DSPARB, dsparb); 1975 I915_WRITE_FW(DSPARB2, dsparb2); 1976 break; 1977 case PIPE_C: 1978 dsparb3 = I915_READ_FW(DSPARB3); 1979 dsparb2 = I915_READ_FW(DSPARB2); 1980 1981 dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | 1982 VLV_FIFO(SPRITEF, 0xff)); 1983 dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | 1984 VLV_FIFO(SPRITEF, sprite1_start)); 1985 1986 dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | 1987 VLV_FIFO(SPRITEF_HI, 0xff)); 1988 dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | 1989 VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); 1990 1991 I915_WRITE_FW(DSPARB3, dsparb3); 1992 I915_WRITE_FW(DSPARB2, dsparb2); 1993 break; 1994 default: 1995 break; 1996 } 1997 1998 POSTING_READ_FW(DSPARB); 1999 2000 spin_unlock(&dev_priv->uncore.lock); 2001} 2002 2003#undef VLV_FIFO 2004 2005static int vlv_compute_intermediate_wm(struct drm_device *dev, 2006 struct intel_crtc *crtc, 2007 struct intel_crtc_state *crtc_state) 2008{ 2009 struct vlv_wm_state *intermediate = &crtc_state->wm.vlv.intermediate; 2010 const struct vlv_wm_state *optimal = &crtc_state->wm.vlv.optimal; 2011 const struct vlv_wm_state *active = &crtc->wm.active.vlv; 2012 int level; 2013 2014 intermediate->num_levels = min(optimal->num_levels, active->num_levels); 2015 intermediate->cxsr = optimal->cxsr && active->cxsr && 2016 !crtc_state->disable_cxsr; 2017 2018 for (level = 0; level < intermediate->num_levels; level++) { 2019 enum plane_id plane_id; 2020 2021 for_each_plane_id_on_crtc(crtc, plane_id) { 2022 intermediate->wm[level].plane[plane_id] = 2023 min(optimal->wm[level].plane[plane_id], 2024 active->wm[level].plane[plane_id]); 2025 } 2026 2027 intermediate->sr[level].plane = min(optimal->sr[level].plane, 2028 active->sr[level].plane); 2029 intermediate->sr[level].cursor = min(optimal->sr[level].cursor, 2030 active->sr[level].cursor); 2031 } 2032 2033 vlv_invalidate_wms(crtc, intermediate, level); 2034 2035 /* 2036 * If our intermediate WM are identical to the final WM, then we can 2037 * omit the post-vblank programming; only update if it's different. 2038 */ 2039 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) 2040 crtc_state->wm.need_postvbl_update = true; 2041 2042 return 0; 2043} 2044 2045static void vlv_merge_wm(struct drm_i915_private *dev_priv, 2046 struct vlv_wm_values *wm) 2047{ 2048 struct intel_crtc *crtc; 2049 int num_active_crtcs = 0; 2050 2051 wm->level = dev_priv->wm.max_level; 2052 wm->cxsr = true; 2053 2054 for_each_intel_crtc(&dev_priv->drm, crtc) { 2055 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; 2056 2057 if (!crtc->active) 2058 continue; 2059 2060 if (!wm_state->cxsr) 2061 wm->cxsr = false; 2062 2063 num_active_crtcs++; 2064 wm->level = min_t(int, wm->level, wm_state->num_levels - 1); 2065 } 2066 2067 if (num_active_crtcs != 1) 2068 wm->cxsr = false; 2069 2070 if (num_active_crtcs > 1) 2071 wm->level = VLV_WM_LEVEL_PM2; 2072 2073 for_each_intel_crtc(&dev_priv->drm, crtc) { 2074 const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; 2075 enum pipe pipe = crtc->pipe; 2076 2077 wm->pipe[pipe] = wm_state->wm[wm->level]; 2078 if (crtc->active && wm->cxsr) 2079 wm->sr = wm_state->sr[wm->level]; 2080 2081 wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2; 2082 wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2; 2083 wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2; 2084 wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2; 2085 } 2086} 2087 2088static void vlv_program_watermarks(struct drm_i915_private *dev_priv) 2089{ 2090 struct vlv_wm_values *old_wm = &dev_priv->wm.vlv; 2091 struct vlv_wm_values new_wm = {}; 2092 2093 vlv_merge_wm(dev_priv, &new_wm); 2094 2095 if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) 2096 return; 2097 2098 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) 2099 chv_set_memory_dvfs(dev_priv, false); 2100 2101 if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) 2102 chv_set_memory_pm5(dev_priv, false); 2103 2104 if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) 2105 _intel_set_memory_cxsr(dev_priv, false); 2106 2107 vlv_write_wm_values(dev_priv, &new_wm); 2108 2109 if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) 2110 _intel_set_memory_cxsr(dev_priv, true); 2111 2112 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) 2113 chv_set_memory_pm5(dev_priv, true); 2114 2115 if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) 2116 chv_set_memory_dvfs(dev_priv, true); 2117 2118 *old_wm = new_wm; 2119} 2120 2121static void vlv_initial_watermarks(struct intel_atomic_state *state, 2122 struct intel_crtc_state *crtc_state) 2123{ 2124 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 2125 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 2126 2127 mutex_lock(&dev_priv->wm.wm_mutex); 2128 crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; 2129 vlv_program_watermarks(dev_priv); 2130 mutex_unlock(&dev_priv->wm.wm_mutex); 2131} 2132 2133static void vlv_optimize_watermarks(struct intel_atomic_state *state, 2134 struct intel_crtc_state *crtc_state) 2135{ 2136 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 2137 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2138 2139 if (!crtc_state->wm.need_postvbl_update) 2140 return; 2141 2142 mutex_lock(&dev_priv->wm.wm_mutex); 2143 intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; 2144 vlv_program_watermarks(dev_priv); 2145 mutex_unlock(&dev_priv->wm.wm_mutex); 2146} 2147 2148static void i965_update_wm(struct intel_crtc *unused_crtc) 2149{ 2150 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); 2151 struct intel_crtc *crtc; 2152 int srwm = 1; 2153 int cursor_sr = 16; 2154 bool cxsr_enabled; 2155 2156 /* Calc sr entries for one plane configs */ 2157 crtc = single_enabled_crtc(dev_priv); 2158 if (crtc) { 2159 /* self-refresh has much higher latency */ 2160 static const int sr_latency_ns = 12000; 2161 const struct drm_display_mode *adjusted_mode = 2162 &crtc->config->base.adjusted_mode; 2163 const struct drm_framebuffer *fb = 2164 crtc->base.primary->state->fb; 2165 int clock = adjusted_mode->crtc_clock; 2166 int htotal = adjusted_mode->crtc_htotal; 2167 int hdisplay = crtc->config->pipe_src_w; 2168 int cpp = fb->format->cpp[0]; 2169 int entries; 2170 2171 entries = intel_wm_method2(clock, htotal, 2172 hdisplay, cpp, sr_latency_ns / 100); 2173 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); 2174 srwm = I965_FIFO_SIZE - entries; 2175 if (srwm < 0) 2176 srwm = 1; 2177 srwm &= 0x1ff; 2178 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", 2179 entries, srwm); 2180 2181 entries = intel_wm_method2(clock, htotal, 2182 crtc->base.cursor->state->crtc_w, 4, 2183 sr_latency_ns / 100); 2184 entries = DIV_ROUND_UP(entries, 2185 i965_cursor_wm_info.cacheline_size) + 2186 i965_cursor_wm_info.guard_size; 2187 2188 cursor_sr = i965_cursor_wm_info.fifo_size - entries; 2189 if (cursor_sr > i965_cursor_wm_info.max_wm) 2190 cursor_sr = i965_cursor_wm_info.max_wm; 2191 2192 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 2193 "cursor %d\n", srwm, cursor_sr); 2194 2195 cxsr_enabled = true; 2196 } else { 2197 cxsr_enabled = false; 2198 /* Turn off self refresh if both pipes are enabled */ 2199 intel_set_memory_cxsr(dev_priv, false); 2200 } 2201 2202 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 2203 srwm); 2204 2205 /* 965 has limitations... */ 2206 I915_WRITE(DSPFW1, FW_WM(srwm, SR) | 2207 FW_WM(8, CURSORB) | 2208 FW_WM(8, PLANEB) | 2209 FW_WM(8, PLANEA)); 2210 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) | 2211 FW_WM(8, PLANEC_OLD)); 2212 /* update cursor SR watermark */ 2213 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); 2214 2215 if (cxsr_enabled) 2216 intel_set_memory_cxsr(dev_priv, true); 2217} 2218 2219#undef FW_WM 2220 2221static void i9xx_update_wm(struct intel_crtc *unused_crtc) 2222{ 2223 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); 2224 const struct intel_watermark_params *wm_info; 2225 uint32_t fwater_lo; 2226 uint32_t fwater_hi; 2227 int cwm, srwm = 1; 2228 int fifo_size; 2229 int planea_wm, planeb_wm; 2230 struct intel_crtc *crtc, *enabled = NULL; 2231 2232 if (IS_I945GM(dev_priv)) 2233 wm_info = &i945_wm_info; 2234 else if (!IS_GEN2(dev_priv)) 2235 wm_info = &i915_wm_info; 2236 else 2237 wm_info = &i830_a_wm_info; 2238 2239 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 0); 2240 crtc = intel_get_crtc_for_plane(dev_priv, 0); 2241 if (intel_crtc_active(crtc)) { 2242 const struct drm_display_mode *adjusted_mode = 2243 &crtc->config->base.adjusted_mode; 2244 const struct drm_framebuffer *fb = 2245 crtc->base.primary->state->fb; 2246 int cpp; 2247 2248 if (IS_GEN2(dev_priv)) 2249 cpp = 4; 2250 else 2251 cpp = fb->format->cpp[0]; 2252 2253 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 2254 wm_info, fifo_size, cpp, 2255 pessimal_latency_ns); 2256 enabled = crtc; 2257 } else { 2258 planea_wm = fifo_size - wm_info->guard_size; 2259 if (planea_wm > (long)wm_info->max_wm) 2260 planea_wm = wm_info->max_wm; 2261 } 2262 2263 if (IS_GEN2(dev_priv)) 2264 wm_info = &i830_bc_wm_info; 2265 2266 fifo_size = dev_priv->display.get_fifo_size(dev_priv, 1); 2267 crtc = intel_get_crtc_for_plane(dev_priv, 1); 2268 if (intel_crtc_active(crtc)) { 2269 const struct drm_display_mode *adjusted_mode = 2270 &crtc->config->base.adjusted_mode; 2271 const struct drm_framebuffer *fb = 2272 crtc->base.primary->state->fb; 2273 int cpp; 2274 2275 if (IS_GEN2(dev_priv)) 2276 cpp = 4; 2277 else 2278 cpp = fb->format->cpp[0]; 2279 2280 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 2281 wm_info, fifo_size, cpp, 2282 pessimal_latency_ns); 2283 if (enabled == NULL) 2284 enabled = crtc; 2285 else 2286 enabled = NULL; 2287 } else { 2288 planeb_wm = fifo_size - wm_info->guard_size; 2289 if (planeb_wm > (long)wm_info->max_wm) 2290 planeb_wm = wm_info->max_wm; 2291 } 2292 2293 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 2294 2295 if (IS_I915GM(dev_priv) && enabled) { 2296 struct drm_i915_gem_object *obj; 2297 2298 obj = intel_fb_obj(enabled->base.primary->state->fb); 2299 2300 /* self-refresh seems busted with untiled */ 2301 if (!i915_gem_object_is_tiled(obj)) 2302 enabled = NULL; 2303 } 2304 2305 /* 2306 * Overlay gets an aggressive default since video jitter is bad. 2307 */ 2308 cwm = 2; 2309 2310 /* Play safe and disable self-refresh before adjusting watermarks. */ 2311 intel_set_memory_cxsr(dev_priv, false); 2312 2313 /* Calc sr entries for one plane configs */ 2314 if (HAS_FW_BLC(dev_priv) && enabled) { 2315 /* self-refresh has much higher latency */ 2316 static const int sr_latency_ns = 6000; 2317 const struct drm_display_mode *adjusted_mode = 2318 &enabled->config->base.adjusted_mode; 2319 const struct drm_framebuffer *fb = 2320 enabled->base.primary->state->fb; 2321 int clock = adjusted_mode->crtc_clock; 2322 int htotal = adjusted_mode->crtc_htotal; 2323 int hdisplay = enabled->config->pipe_src_w; 2324 int cpp; 2325 int entries; 2326 2327 if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) 2328 cpp = 4; 2329 else 2330 cpp = fb->format->cpp[0]; 2331 2332 entries = intel_wm_method2(clock, htotal, hdisplay, cpp, 2333 sr_latency_ns / 100); 2334 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); 2335 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); 2336 srwm = wm_info->fifo_size - entries; 2337 if (srwm < 0) 2338 srwm = 1; 2339 2340 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 2341 I915_WRITE(FW_BLC_SELF, 2342 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); 2343 else 2344 I915_WRITE(FW_BLC_SELF, srwm & 0x3f); 2345 } 2346 2347 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2348 planea_wm, planeb_wm, cwm, srwm); 2349 2350 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 2351 fwater_hi = (cwm & 0x1f); 2352 2353 /* Set request length to 8 cachelines per fetch */ 2354 fwater_lo = fwater_lo | (1 << 24) | (1 << 8); 2355 fwater_hi = fwater_hi | (1 << 8); 2356 2357 I915_WRITE(FW_BLC, fwater_lo); 2358 I915_WRITE(FW_BLC2, fwater_hi); 2359 2360 if (enabled) 2361 intel_set_memory_cxsr(dev_priv, true); 2362} 2363 2364static void i845_update_wm(struct intel_crtc *unused_crtc) 2365{ 2366 struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev); 2367 struct intel_crtc *crtc; 2368 const struct drm_display_mode *adjusted_mode; 2369 uint32_t fwater_lo; 2370 int planea_wm; 2371 2372 crtc = single_enabled_crtc(dev_priv); 2373 if (crtc == NULL) 2374 return; 2375 2376 adjusted_mode = &crtc->config->base.adjusted_mode; 2377 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 2378 &i845_wm_info, 2379 dev_priv->display.get_fifo_size(dev_priv, 0), 2380 4, pessimal_latency_ns); 2381 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 2382 fwater_lo |= (3<<8) | planea_wm; 2383 2384 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); 2385 2386 I915_WRITE(FW_BLC, fwater_lo); 2387} 2388 2389/* latency must be in 0.1us units. */ 2390static unsigned int ilk_wm_method1(unsigned int pixel_rate, 2391 unsigned int cpp, 2392 unsigned int latency) 2393{ 2394 unsigned int ret; 2395 2396 ret = intel_wm_method1(pixel_rate, cpp, latency); 2397 ret = DIV_ROUND_UP(ret, 64) + 2; 2398 2399 return ret; 2400} 2401 2402/* latency must be in 0.1us units. */ 2403static unsigned int ilk_wm_method2(unsigned int pixel_rate, 2404 unsigned int htotal, 2405 unsigned int width, 2406 unsigned int cpp, 2407 unsigned int latency) 2408{ 2409 unsigned int ret; 2410 2411 ret = intel_wm_method2(pixel_rate, htotal, 2412 width, cpp, latency); 2413 ret = DIV_ROUND_UP(ret, 64) + 2; 2414 2415 return ret; 2416} 2417 2418static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, 2419 uint8_t cpp) 2420{ 2421 /* 2422 * Neither of these should be possible since this function shouldn't be 2423 * called if the CRTC is off or the plane is invisible. But let's be 2424 * extra paranoid to avoid a potential divide-by-zero if we screw up 2425 * elsewhere in the driver. 2426 */ 2427 if (WARN_ON(!cpp)) 2428 return 0; 2429 if (WARN_ON(!horiz_pixels)) 2430 return 0; 2431 2432 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2; 2433} 2434 2435struct ilk_wm_maximums { 2436 uint16_t pri; 2437 uint16_t spr; 2438 uint16_t cur; 2439 uint16_t fbc; 2440}; 2441 2442/* 2443 * For both WM_PIPE and WM_LP. 2444 * mem_value must be in 0.1us units. 2445 */ 2446static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, 2447 const struct intel_plane_state *pstate, 2448 uint32_t mem_value, 2449 bool is_lp) 2450{ 2451 uint32_t method1, method2; 2452 int cpp; 2453 2454 if (!intel_wm_plane_visible(cstate, pstate)) 2455 return 0; 2456 2457 cpp = pstate->base.fb->format->cpp[0]; 2458 2459 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value); 2460 2461 if (!is_lp) 2462 return method1; 2463 2464 method2 = ilk_wm_method2(cstate->pixel_rate, 2465 cstate->base.adjusted_mode.crtc_htotal, 2466 drm_rect_width(&pstate->base.dst), 2467 cpp, mem_value); 2468 2469 return min(method1, method2); 2470} 2471 2472/* 2473 * For both WM_PIPE and WM_LP. 2474 * mem_value must be in 0.1us units. 2475 */ 2476static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, 2477 const struct intel_plane_state *pstate, 2478 uint32_t mem_value) 2479{ 2480 uint32_t method1, method2; 2481 int cpp; 2482 2483 if (!intel_wm_plane_visible(cstate, pstate)) 2484 return 0; 2485 2486 cpp = pstate->base.fb->format->cpp[0]; 2487 2488 method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value); 2489 method2 = ilk_wm_method2(cstate->pixel_rate, 2490 cstate->base.adjusted_mode.crtc_htotal, 2491 drm_rect_width(&pstate->base.dst), 2492 cpp, mem_value); 2493 return min(method1, method2); 2494} 2495 2496/* 2497 * For both WM_PIPE and WM_LP. 2498 * mem_value must be in 0.1us units. 2499 */ 2500static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, 2501 const struct intel_plane_state *pstate, 2502 uint32_t mem_value) 2503{ 2504 int cpp; 2505 2506 if (!intel_wm_plane_visible(cstate, pstate)) 2507 return 0; 2508 2509 cpp = pstate->base.fb->format->cpp[0]; 2510 2511 return ilk_wm_method2(cstate->pixel_rate, 2512 cstate->base.adjusted_mode.crtc_htotal, 2513 pstate->base.crtc_w, cpp, mem_value); 2514} 2515 2516/* Only for WM_LP. */ 2517static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, 2518 const struct intel_plane_state *pstate, 2519 uint32_t pri_val) 2520{ 2521 int cpp; 2522 2523 if (!intel_wm_plane_visible(cstate, pstate)) 2524 return 0; 2525 2526 cpp = pstate->base.fb->format->cpp[0]; 2527 2528 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp); 2529} 2530 2531static unsigned int 2532ilk_display_fifo_size(const struct drm_i915_private *dev_priv) 2533{ 2534 if (INTEL_GEN(dev_priv) >= 8) 2535 return 3072; 2536 else if (INTEL_GEN(dev_priv) >= 7) 2537 return 768; 2538 else 2539 return 512; 2540} 2541 2542static unsigned int 2543ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv, 2544 int level, bool is_sprite) 2545{ 2546 if (INTEL_GEN(dev_priv) >= 8) 2547 /* BDW primary/sprite plane watermarks */ 2548 return level == 0 ? 255 : 2047; 2549 else if (INTEL_GEN(dev_priv) >= 7) 2550 /* IVB/HSW primary/sprite plane watermarks */ 2551 return level == 0 ? 127 : 1023; 2552 else if (!is_sprite) 2553 /* ILK/SNB primary plane watermarks */ 2554 return level == 0 ? 127 : 511; 2555 else 2556 /* ILK/SNB sprite plane watermarks */ 2557 return level == 0 ? 63 : 255; 2558} 2559 2560static unsigned int 2561ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level) 2562{ 2563 if (INTEL_GEN(dev_priv) >= 7) 2564 return level == 0 ? 63 : 255; 2565 else 2566 return level == 0 ? 31 : 63; 2567} 2568 2569static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) 2570{ 2571 if (INTEL_GEN(dev_priv) >= 8) 2572 return 31; 2573 else 2574 return 15; 2575} 2576 2577/* Calculate the maximum primary/sprite plane watermark */ 2578static unsigned int ilk_plane_wm_max(const struct drm_device *dev, 2579 int level, 2580 const struct intel_wm_config *config, 2581 enum intel_ddb_partitioning ddb_partitioning, 2582 bool is_sprite) 2583{ 2584 struct drm_i915_private *dev_priv = to_i915(dev); 2585 unsigned int fifo_size = ilk_display_fifo_size(dev_priv); 2586 2587 /* if sprites aren't enabled, sprites get nothing */ 2588 if (is_sprite && !config->sprites_enabled) 2589 return 0; 2590 2591 /* HSW allows LP1+ watermarks even with multiple pipes */ 2592 if (level == 0 || config->num_pipes_active > 1) { 2593 fifo_size /= INTEL_INFO(dev_priv)->num_pipes; 2594 2595 /* 2596 * For some reason the non self refresh 2597 * FIFO size is only half of the self 2598 * refresh FIFO size on ILK/SNB. 2599 */ 2600 if (INTEL_GEN(dev_priv) <= 6) 2601 fifo_size /= 2; 2602 } 2603 2604 if (config->sprites_enabled) { 2605 /* level 0 is always calculated with 1:1 split */ 2606 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { 2607 if (is_sprite) 2608 fifo_size *= 5; 2609 fifo_size /= 6; 2610 } else { 2611 fifo_size /= 2; 2612 } 2613 } 2614 2615 /* clamp to max that the registers can hold */ 2616 return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite)); 2617} 2618 2619/* Calculate the maximum cursor plane watermark */ 2620static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, 2621 int level, 2622 const struct intel_wm_config *config) 2623{ 2624 /* HSW LP1+ watermarks w/ multiple pipes */ 2625 if (level > 0 && config->num_pipes_active > 1) 2626 return 64; 2627 2628 /* otherwise just report max that registers can hold */ 2629 return ilk_cursor_wm_reg_max(to_i915(dev), level); 2630} 2631 2632static void ilk_compute_wm_maximums(const struct drm_device *dev, 2633 int level, 2634 const struct intel_wm_config *config, 2635 enum intel_ddb_partitioning ddb_partitioning, 2636 struct ilk_wm_maximums *max) 2637{ 2638 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 2639 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 2640 max->cur = ilk_cursor_wm_max(dev, level, config); 2641 max->fbc = ilk_fbc_wm_reg_max(to_i915(dev)); 2642} 2643 2644static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, 2645 int level, 2646 struct ilk_wm_maximums *max) 2647{ 2648 max->pri = ilk_plane_wm_reg_max(dev_priv, level, false); 2649 max->spr = ilk_plane_wm_reg_max(dev_priv, level, true); 2650 max->cur = ilk_cursor_wm_reg_max(dev_priv, level); 2651 max->fbc = ilk_fbc_wm_reg_max(dev_priv); 2652} 2653 2654static bool ilk_validate_wm_level(int level, 2655 const struct ilk_wm_maximums *max, 2656 struct intel_wm_level *result) 2657{ 2658 bool ret; 2659 2660 /* already determined to be invalid? */ 2661 if (!result->enable) 2662 return false; 2663 2664 result->enable = result->pri_val <= max->pri && 2665 result->spr_val <= max->spr && 2666 result->cur_val <= max->cur; 2667 2668 ret = result->enable; 2669 2670 /* 2671 * HACK until we can pre-compute everything, 2672 * and thus fail gracefully if LP0 watermarks 2673 * are exceeded... 2674 */ 2675 if (level == 0 && !result->enable) { 2676 if (result->pri_val > max->pri) 2677 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", 2678 level, result->pri_val, max->pri); 2679 if (result->spr_val > max->spr) 2680 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", 2681 level, result->spr_val, max->spr); 2682 if (result->cur_val > max->cur) 2683 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", 2684 level, result->cur_val, max->cur); 2685 2686 result->pri_val = min_t(uint32_t, result->pri_val, max->pri); 2687 result->spr_val = min_t(uint32_t, result->spr_val, max->spr); 2688 result->cur_val = min_t(uint32_t, result->cur_val, max->cur); 2689 result->enable = true; 2690 } 2691 2692 return ret; 2693} 2694 2695static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, 2696 const struct intel_crtc *intel_crtc, 2697 int level, 2698 struct intel_crtc_state *cstate, 2699 struct intel_plane_state *pristate, 2700 struct intel_plane_state *sprstate, 2701 struct intel_plane_state *curstate, 2702 struct intel_wm_level *result) 2703{ 2704 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 2705 uint16_t spr_latency = dev_priv->wm.spr_latency[level]; 2706 uint16_t cur_latency = dev_priv->wm.cur_latency[level]; 2707 2708 /* WM1+ latency values stored in 0.5us units */ 2709 if (level > 0) { 2710 pri_latency *= 5; 2711 spr_latency *= 5; 2712 cur_latency *= 5; 2713 } 2714 2715 if (pristate) { 2716 result->pri_val = ilk_compute_pri_wm(cstate, pristate, 2717 pri_latency, level); 2718 result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val); 2719 } 2720 2721 if (sprstate) 2722 result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency); 2723 2724 if (curstate) 2725 result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency); 2726 2727 result->enable = true; 2728} 2729 2730static uint32_t 2731hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) 2732{ 2733 const struct intel_atomic_state *intel_state = 2734 to_intel_atomic_state(cstate->base.state); 2735 const struct drm_display_mode *adjusted_mode = 2736 &cstate->base.adjusted_mode; 2737 u32 linetime, ips_linetime; 2738 2739 if (!cstate->base.active) 2740 return 0; 2741 if (WARN_ON(adjusted_mode->crtc_clock == 0)) 2742 return 0; 2743 if (WARN_ON(intel_state->cdclk.logical.cdclk == 0)) 2744 return 0; 2745 2746 /* The WM are computed with base on how long it takes to fill a single 2747 * row at the given clock rate, multiplied by 8. 2748 * */ 2749 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2750 adjusted_mode->crtc_clock); 2751 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2752 intel_state->cdclk.logical.cdclk); 2753 2754 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2755 PIPE_WM_LINETIME_TIME(linetime); 2756} 2757 2758static void intel_read_wm_latency(struct drm_i915_private *dev_priv, 2759 uint16_t wm[8]) 2760{ 2761 if (IS_GEN9(dev_priv)) { 2762 uint32_t val; 2763 int ret, i; 2764 int level, max_level = ilk_wm_max_level(dev_priv); 2765 2766 /* read the first set of memory latencies[0:3] */ 2767 val = 0; /* data0 to be programmed to 0 for first set */ 2768 mutex_lock(&dev_priv->rps.hw_lock); 2769 ret = sandybridge_pcode_read(dev_priv, 2770 GEN9_PCODE_READ_MEM_LATENCY, 2771 &val); 2772 mutex_unlock(&dev_priv->rps.hw_lock); 2773 2774 if (ret) { 2775 DRM_ERROR("SKL Mailbox read error = %d\n", ret); 2776 return; 2777 } 2778 2779 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK; 2780 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & 2781 GEN9_MEM_LATENCY_LEVEL_MASK; 2782 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & 2783 GEN9_MEM_LATENCY_LEVEL_MASK; 2784 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & 2785 GEN9_MEM_LATENCY_LEVEL_MASK; 2786 2787 /* read the second set of memory latencies[4:7] */ 2788 val = 1; /* data0 to be programmed to 1 for second set */ 2789 mutex_lock(&dev_priv->rps.hw_lock); 2790 ret = sandybridge_pcode_read(dev_priv, 2791 GEN9_PCODE_READ_MEM_LATENCY, 2792 &val); 2793 mutex_unlock(&dev_priv->rps.hw_lock); 2794 if (ret) { 2795 DRM_ERROR("SKL Mailbox read error = %d\n", ret); 2796 return; 2797 } 2798 2799 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK; 2800 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & 2801 GEN9_MEM_LATENCY_LEVEL_MASK; 2802 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & 2803 GEN9_MEM_LATENCY_LEVEL_MASK; 2804 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & 2805 GEN9_MEM_LATENCY_LEVEL_MASK; 2806 2807 /* 2808 * If a level n (n > 1) has a 0us latency, all levels m (m >= n) 2809 * need to be disabled. We make sure to sanitize the values out 2810 * of the punit to satisfy this requirement. 2811 */ 2812 for (level = 1; level <= max_level; level++) { 2813 if (wm[level] == 0) { 2814 for (i = level + 1; i <= max_level; i++) 2815 wm[i] = 0; 2816 break; 2817 } 2818 } 2819 2820 /* 2821 * WaWmMemoryReadLatency:skl,glk 2822 * 2823 * punit doesn't take into account the read latency so we need 2824 * to add 2us to the various latency levels we retrieve from the 2825 * punit when level 0 response data us 0us. 2826 */ 2827 if (wm[0] == 0) { 2828 wm[0] += 2; 2829 for (level = 1; level <= max_level; level++) { 2830 if (wm[level] == 0) 2831 break; 2832 wm[level] += 2; 2833 } 2834 } 2835 2836 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2837 uint64_t sskpd = I915_READ64(MCH_SSKPD); 2838 2839 wm[0] = (sskpd >> 56) & 0xFF; 2840 if (wm[0] == 0) 2841 wm[0] = sskpd & 0xF; 2842 wm[1] = (sskpd >> 4) & 0xFF; 2843 wm[2] = (sskpd >> 12) & 0xFF; 2844 wm[3] = (sskpd >> 20) & 0x1FF; 2845 wm[4] = (sskpd >> 32) & 0x1FF; 2846 } else if (INTEL_GEN(dev_priv) >= 6) { 2847 uint32_t sskpd = I915_READ(MCH_SSKPD); 2848 2849 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; 2850 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; 2851 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; 2852 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; 2853 } else if (INTEL_GEN(dev_priv) >= 5) { 2854 uint32_t mltr = I915_READ(MLTR_ILK); 2855 2856 /* ILK primary LP0 latency is 700 ns */ 2857 wm[0] = 7; 2858 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK; 2859 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK; 2860 } 2861} 2862 2863static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, 2864 uint16_t wm[5]) 2865{ 2866 /* ILK sprite LP0 latency is 1300 ns */ 2867 if (IS_GEN5(dev_priv)) 2868 wm[0] = 13; 2869} 2870 2871static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, 2872 uint16_t wm[5]) 2873{ 2874 /* ILK cursor LP0 latency is 1300 ns */ 2875 if (IS_GEN5(dev_priv)) 2876 wm[0] = 13; 2877 2878 /* WaDoubleCursorLP3Latency:ivb */ 2879 if (IS_IVYBRIDGE(dev_priv)) 2880 wm[3] *= 2; 2881} 2882 2883int ilk_wm_max_level(const struct drm_i915_private *dev_priv) 2884{ 2885 /* how many WM levels are we expecting */ 2886 if (INTEL_GEN(dev_priv) >= 9) 2887 return 7; 2888 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 2889 return 4; 2890 else if (INTEL_GEN(dev_priv) >= 6) 2891 return 3; 2892 else 2893 return 2; 2894} 2895 2896static void intel_print_wm_latency(struct drm_i915_private *dev_priv, 2897 const char *name, 2898 const uint16_t wm[8]) 2899{ 2900 int level, max_level = ilk_wm_max_level(dev_priv); 2901 2902 for (level = 0; level <= max_level; level++) { 2903 unsigned int latency = wm[level]; 2904 2905 if (latency == 0) { 2906 DRM_ERROR("%s WM%d latency not provided\n", 2907 name, level); 2908 continue; 2909 } 2910 2911 /* 2912 * - latencies are in us on gen9. 2913 * - before then, WM1+ latency values are in 0.5us units 2914 */ 2915 if (IS_GEN9(dev_priv)) 2916 latency *= 10; 2917 else if (level > 0) 2918 latency *= 5; 2919 2920 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n", 2921 name, level, wm[level], 2922 latency / 10, latency % 10); 2923 } 2924} 2925 2926static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, 2927 uint16_t wm[5], uint16_t min) 2928{ 2929 int level, max_level = ilk_wm_max_level(dev_priv); 2930 2931 if (wm[0] >= min) 2932 return false; 2933 2934 wm[0] = max(wm[0], min); 2935 for (level = 1; level <= max_level; level++) 2936 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); 2937 2938 return true; 2939} 2940 2941static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) 2942{ 2943 bool changed; 2944 2945 /* 2946 * The BIOS provided WM memory latency values are often 2947 * inadequate for high resolution displays. Adjust them. 2948 */ 2949 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | 2950 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | 2951 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); 2952 2953 if (!changed) 2954 return; 2955 2956 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); 2957 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); 2958 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); 2959 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 2960} 2961 2962static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) 2963{ 2964 intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); 2965 2966 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, 2967 sizeof(dev_priv->wm.pri_latency)); 2968 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency, 2969 sizeof(dev_priv->wm.pri_latency)); 2970 2971 intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency); 2972 intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency); 2973 2974 intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); 2975 intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); 2976 intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); 2977 2978 if (IS_GEN6(dev_priv)) 2979 snb_wm_latency_quirk(dev_priv); 2980} 2981 2982static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) 2983{ 2984 intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency); 2985 intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency); 2986} 2987 2988static bool ilk_validate_pipe_wm(struct drm_device *dev, 2989 struct intel_pipe_wm *pipe_wm) 2990{ 2991 /* LP0 watermark maximums depend on this pipe alone */ 2992 const struct intel_wm_config config = { 2993 .num_pipes_active = 1, 2994 .sprites_enabled = pipe_wm->sprites_enabled, 2995 .sprites_scaled = pipe_wm->sprites_scaled, 2996 }; 2997 struct ilk_wm_maximums max; 2998 2999 /* LP0 watermarks always use 1/2 DDB partitioning */ 3000 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); 3001 3002 /* At least LP0 must be valid */ 3003 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { 3004 DRM_DEBUG_KMS("LP0 watermark invalid\n"); 3005 return false; 3006 } 3007 3008 return true; 3009} 3010 3011/* Compute new watermarks for the pipe */ 3012static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) 3013{ 3014 struct drm_atomic_state *state = cstate->base.state; 3015 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3016 struct intel_pipe_wm *pipe_wm; 3017 struct drm_device *dev = state->dev; 3018 const struct drm_i915_private *dev_priv = to_i915(dev); 3019 struct intel_plane *intel_plane; 3020 struct intel_plane_state *pristate = NULL; 3021 struct intel_plane_state *sprstate = NULL; 3022 struct intel_plane_state *curstate = NULL; 3023 int level, max_level = ilk_wm_max_level(dev_priv), usable_level; 3024 struct ilk_wm_maximums max; 3025 3026 pipe_wm = &cstate->wm.ilk.optimal; 3027 3028 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3029 struct intel_plane_state *ps; 3030 3031 ps = intel_atomic_get_existing_plane_state(state, 3032 intel_plane); 3033 if (!ps) 3034 continue; 3035 3036 if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY) 3037 pristate = ps; 3038 else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) 3039 sprstate = ps; 3040 else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) 3041 curstate = ps; 3042 } 3043 3044 pipe_wm->pipe_enabled = cstate->base.active; 3045 if (sprstate) { 3046 pipe_wm->sprites_enabled = sprstate->base.visible; 3047 pipe_wm->sprites_scaled = sprstate->base.visible && 3048 (drm_rect_width(&sprstate->base.dst) != drm_rect_width(&sprstate->base.src) >> 16 || 3049 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16); 3050 } 3051 3052 usable_level = max_level; 3053 3054 /* ILK/SNB: LP2+ watermarks only w/o sprites */ 3055 if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled) 3056 usable_level = 1; 3057 3058 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ 3059 if (pipe_wm->sprites_scaled) 3060 usable_level = 0; 3061 3062 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, 3063 pristate, sprstate, curstate, &pipe_wm->raw_wm[0]); 3064 3065 memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); 3066 pipe_wm->wm[0] = pipe_wm->raw_wm[0]; 3067 3068 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3069 pipe_wm->linetime = hsw_compute_linetime_wm(cstate); 3070 3071 if (!ilk_validate_pipe_wm(dev, pipe_wm)) 3072 return -EINVAL; 3073 3074 ilk_compute_wm_reg_maximums(dev_priv, 1, &max); 3075 3076 for (level = 1; level <= max_level; level++) { 3077 struct intel_wm_level *wm = &pipe_wm->raw_wm[level]; 3078 3079 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, 3080 pristate, sprstate, curstate, wm); 3081 3082 /* 3083 * Disable any watermark level that exceeds the 3084 * register maximums since such watermarks are 3085 * always invalid. 3086 */ 3087 if (level > usable_level) 3088 continue; 3089 3090 if (ilk_validate_wm_level(level, &max, wm)) 3091 pipe_wm->wm[level] = *wm; 3092 else 3093 usable_level = level; 3094 } 3095 3096 return 0; 3097} 3098 3099/* 3100 * Build a set of 'intermediate' watermark values that satisfy both the old 3101 * state and the new state. These can be programmed to the hardware 3102 * immediately. 3103 */ 3104static int ilk_compute_intermediate_wm(struct drm_device *dev, 3105 struct intel_crtc *intel_crtc, 3106 struct intel_crtc_state *newstate) 3107{ 3108 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; 3109 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk; 3110 int level, max_level = ilk_wm_max_level(to_i915(dev)); 3111 3112 /* 3113 * Start with the final, target watermarks, then combine with the 3114 * currently active watermarks to get values that are safe both before 3115 * and after the vblank. 3116 */ 3117 *a = newstate->wm.ilk.optimal; 3118 a->pipe_enabled |= b->pipe_enabled; 3119 a->sprites_enabled |= b->sprites_enabled; 3120 a->sprites_scaled |= b->sprites_scaled; 3121 3122 for (level = 0; level <= max_level; level++) { 3123 struct intel_wm_level *a_wm = &a->wm[level]; 3124 const struct intel_wm_level *b_wm = &b->wm[level]; 3125 3126 a_wm->enable &= b_wm->enable; 3127 a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); 3128 a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); 3129 a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); 3130 a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); 3131 } 3132 3133 /* 3134 * We need to make sure that these merged watermark values are 3135 * actually a valid configuration themselves. If they're not, 3136 * there's no safe way to transition from the old state to 3137 * the new state, so we need to fail the atomic transaction. 3138 */ 3139 if (!ilk_validate_pipe_wm(dev, a)) 3140 return -EINVAL; 3141 3142 /* 3143 * If our intermediate WM are identical to the final WM, then we can 3144 * omit the post-vblank programming; only update if it's different. 3145 */ 3146 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0) 3147 newstate->wm.need_postvbl_update = true; 3148 3149 return 0; 3150} 3151 3152/* 3153 * Merge the watermarks from all active pipes for a specific level. 3154 */ 3155static void ilk_merge_wm_level(struct drm_device *dev, 3156 int level, 3157 struct intel_wm_level *ret_wm) 3158{ 3159 const struct intel_crtc *intel_crtc; 3160 3161 ret_wm->enable = true; 3162 3163 for_each_intel_crtc(dev, intel_crtc) { 3164 const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk; 3165 const struct intel_wm_level *wm = &active->wm[level]; 3166 3167 if (!active->pipe_enabled) 3168 continue; 3169 3170 /* 3171 * The watermark values may have been used in the past, 3172 * so we must maintain them in the registers for some 3173 * time even if the level is now disabled. 3174 */ 3175 if (!wm->enable) 3176 ret_wm->enable = false; 3177 3178 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); 3179 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); 3180 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); 3181 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); 3182 } 3183} 3184 3185/* 3186 * Merge all low power watermarks for all active pipes. 3187 */ 3188static void ilk_wm_merge(struct drm_device *dev, 3189 const struct intel_wm_config *config, 3190 const struct ilk_wm_maximums *max, 3191 struct intel_pipe_wm *merged) 3192{ 3193 struct drm_i915_private *dev_priv = to_i915(dev); 3194 int level, max_level = ilk_wm_max_level(dev_priv); 3195 int last_enabled_level = max_level; 3196 3197 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ 3198 if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) && 3199 config->num_pipes_active > 1) 3200 last_enabled_level = 0; 3201 3202 /* ILK: FBC WM must be disabled always */ 3203 merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6; 3204 3205 /* merge each WM1+ level */ 3206 for (level = 1; level <= max_level; level++) { 3207 struct intel_wm_level *wm = &merged->wm[level]; 3208 3209 ilk_merge_wm_level(dev, level, wm); 3210 3211 if (level > last_enabled_level) 3212 wm->enable = false; 3213 else if (!ilk_validate_wm_level(level, max, wm)) 3214 /* make sure all following levels get disabled */ 3215 last_enabled_level = level - 1; 3216 3217 /* 3218 * The spec says it is preferred to disable 3219 * FBC WMs instead of disabling a WM level. 3220 */ 3221 if (wm->fbc_val > max->fbc) { 3222 if (wm->enable) 3223 merged->fbc_wm_enabled = false; 3224 wm->fbc_val = 0; 3225 } 3226 } 3227 3228 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ 3229 /* 3230 * FIXME this is racy. FBC might get enabled later. 3231 * What we should check here is whether FBC can be 3232 * enabled sometime later. 3233 */ 3234 if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled && 3235 intel_fbc_is_active(dev_priv)) { 3236 for (level = 2; level <= max_level; level++) { 3237 struct intel_wm_level *wm = &merged->wm[level]; 3238 3239 wm->enable = false; 3240 } 3241 } 3242} 3243 3244static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) 3245{ 3246 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ 3247 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); 3248} 3249 3250/* The value we need to program into the WM_LPx latency field */ 3251static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) 3252{ 3253 struct drm_i915_private *dev_priv = to_i915(dev); 3254 3255 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3256 return 2 * level; 3257 else 3258 return dev_priv->wm.pri_latency[level]; 3259} 3260 3261static void ilk_compute_wm_results(struct drm_device *dev, 3262 const struct intel_pipe_wm *merged, 3263 enum intel_ddb_partitioning partitioning, 3264 struct ilk_wm_values *results) 3265{ 3266 struct drm_i915_private *dev_priv = to_i915(dev); 3267 struct intel_crtc *intel_crtc; 3268 int level, wm_lp; 3269 3270 results->enable_fbc_wm = merged->fbc_wm_enabled; 3271 results->partitioning = partitioning; 3272 3273 /* LP1+ register values */ 3274 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 3275 const struct intel_wm_level *r; 3276 3277 level = ilk_wm_lp_to_level(wm_lp, merged); 3278 3279 r = &merged->wm[level]; 3280 3281 /* 3282 * Maintain the watermark values even if the level is 3283 * disabled. Doing otherwise could cause underruns. 3284 */ 3285 results->wm_lp[wm_lp - 1] = 3286 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) | 3287 (r->pri_val << WM1_LP_SR_SHIFT) | 3288 r->cur_val; 3289 3290 if (r->enable) 3291 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN; 3292 3293 if (INTEL_GEN(dev_priv) >= 8) 3294 results->wm_lp[wm_lp - 1] |= 3295 r->fbc_val << WM1_LP_FBC_SHIFT_BDW; 3296 else 3297 results->wm_lp[wm_lp - 1] |= 3298 r->fbc_val << WM1_LP_FBC_SHIFT; 3299 3300 /* 3301 * Always set WM1S_LP_EN when spr_val != 0, even if the 3302 * level is disabled. Doing otherwise could cause underruns. 3303 */ 3304 if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) { 3305 WARN_ON(wm_lp != 1); 3306 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; 3307 } else 3308 results->wm_lp_spr[wm_lp - 1] = r->spr_val; 3309 } 3310 3311 /* LP0 register values */ 3312 for_each_intel_crtc(dev, intel_crtc) { 3313 enum pipe pipe = intel_crtc->pipe; 3314 const struct intel_wm_level *r = 3315 &intel_crtc->wm.active.ilk.wm[0]; 3316 3317 if (WARN_ON(!r->enable)) 3318 continue; 3319 3320 results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime; 3321 3322 results->wm_pipe[pipe] = 3323 (r->pri_val << WM0_PIPE_PLANE_SHIFT) | 3324 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) | 3325 r->cur_val; 3326 } 3327} 3328 3329/* Find the result with the highest level enabled. Check for enable_fbc_wm in 3330 * case both are at the same level. Prefer r1 in case they're the same. */ 3331static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev, 3332 struct intel_pipe_wm *r1, 3333 struct intel_pipe_wm *r2) 3334{ 3335 int level, max_level = ilk_wm_max_level(to_i915(dev)); 3336 int level1 = 0, level2 = 0; 3337 3338 for (level = 1; level <= max_level; level++) { 3339 if (r1->wm[level].enable) 3340 level1 = level; 3341 if (r2->wm[level].enable) 3342 level2 = level; 3343 } 3344 3345 if (level1 == level2) { 3346 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) 3347 return r2; 3348 else 3349 return r1; 3350 } else if (level1 > level2) { 3351 return r1; 3352 } else { 3353 return r2; 3354 } 3355} 3356 3357/* dirty bits used to track which watermarks need changes */ 3358#define WM_DIRTY_PIPE(pipe) (1 << (pipe)) 3359#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe))) 3360#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) 3361#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) 3362#define WM_DIRTY_FBC (1 << 24) 3363#define WM_DIRTY_DDB (1 << 25) 3364 3365static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, 3366 const struct ilk_wm_values *old, 3367 const struct ilk_wm_values *new) 3368{ 3369 unsigned int dirty = 0; 3370 enum pipe pipe; 3371 int wm_lp; 3372 3373 for_each_pipe(dev_priv, pipe) { 3374 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) { 3375 dirty |= WM_DIRTY_LINETIME(pipe); 3376 /* Must disable LP1+ watermarks too */ 3377 dirty |= WM_DIRTY_LP_ALL; 3378 } 3379 3380 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { 3381 dirty |= WM_DIRTY_PIPE(pipe); 3382 /* Must disable LP1+ watermarks too */ 3383 dirty |= WM_DIRTY_LP_ALL; 3384 } 3385 } 3386 3387 if (old->enable_fbc_wm != new->enable_fbc_wm) { 3388 dirty |= WM_DIRTY_FBC; 3389 /* Must disable LP1+ watermarks too */ 3390 dirty |= WM_DIRTY_LP_ALL; 3391 } 3392 3393 if (old->partitioning != new->partitioning) { 3394 dirty |= WM_DIRTY_DDB; 3395 /* Must disable LP1+ watermarks too */ 3396 dirty |= WM_DIRTY_LP_ALL; 3397 } 3398 3399 /* LP1+ watermarks already deemed dirty, no need to continue */ 3400 if (dirty & WM_DIRTY_LP_ALL) 3401 return dirty; 3402 3403 /* Find the lowest numbered LP1+ watermark in need of an update... */ 3404 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 3405 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || 3406 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) 3407 break; 3408 } 3409 3410 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ 3411 for (; wm_lp <= 3; wm_lp++) 3412 dirty |= WM_DIRTY_LP(wm_lp); 3413 3414 return dirty; 3415} 3416 3417static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, 3418 unsigned int dirty) 3419{ 3420 struct ilk_wm_values *previous = &dev_priv->wm.hw; 3421 bool changed = false; 3422 3423 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { 3424 previous->wm_lp[2] &= ~WM1_LP_SR_EN; 3425 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]); 3426 changed = true; 3427 } 3428 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { 3429 previous->wm_lp[1] &= ~WM1_LP_SR_EN; 3430 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]); 3431 changed = true; 3432 } 3433 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { 3434 previous->wm_lp[0] &= ~WM1_LP_SR_EN; 3435 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]); 3436 changed = true; 3437 } 3438 3439 /* 3440 * Don't touch WM1S_LP_EN here. 3441 * Doing so could cause underruns. 3442 */ 3443 3444 return changed; 3445} 3446 3447/* 3448 * The spec says we shouldn't write when we don't need, because every write 3449 * causes WMs to be re-evaluated, expending some power. 3450 */ 3451static void ilk_write_wm_values(struct drm_i915_private *dev_priv, 3452 struct ilk_wm_values *results) 3453{ 3454 struct ilk_wm_values *previous = &dev_priv->wm.hw; 3455 unsigned int dirty; 3456 uint32_t val; 3457 3458 dirty = ilk_compute_wm_dirty(dev_priv, previous, results); 3459 if (!dirty) 3460 return; 3461 3462 _ilk_disable_lp_wm(dev_priv, dirty); 3463 3464 if (dirty & WM_DIRTY_PIPE(PIPE_A)) 3465 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); 3466 if (dirty & WM_DIRTY_PIPE(PIPE_B)) 3467 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); 3468 if (dirty & WM_DIRTY_PIPE(PIPE_C)) 3469 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); 3470 3471 if (dirty & WM_DIRTY_LINETIME(PIPE_A)) 3472 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); 3473 if (dirty & WM_DIRTY_LINETIME(PIPE_B)) 3474 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); 3475 if (dirty & WM_DIRTY_LINETIME(PIPE_C)) 3476 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); 3477 3478 if (dirty & WM_DIRTY_DDB) { 3479 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3480 val = I915_READ(WM_MISC); 3481 if (results->partitioning == INTEL_DDB_PART_1_2) 3482 val &= ~WM_MISC_DATA_PARTITION_5_6; 3483 else 3484 val |= WM_MISC_DATA_PARTITION_5_6; 3485 I915_WRITE(WM_MISC, val); 3486 } else { 3487 val = I915_READ(DISP_ARB_CTL2); 3488 if (results->partitioning == INTEL_DDB_PART_1_2) 3489 val &= ~DISP_DATA_PARTITION_5_6; 3490 else 3491 val |= DISP_DATA_PARTITION_5_6; 3492 I915_WRITE(DISP_ARB_CTL2, val); 3493 } 3494 } 3495 3496 if (dirty & WM_DIRTY_FBC) { 3497 val = I915_READ(DISP_ARB_CTL); 3498 if (results->enable_fbc_wm) 3499 val &= ~DISP_FBC_WM_DIS; 3500 else 3501 val |= DISP_FBC_WM_DIS; 3502 I915_WRITE(DISP_ARB_CTL, val); 3503 } 3504 3505 if (dirty & WM_DIRTY_LP(1) && 3506 previous->wm_lp_spr[0] != results->wm_lp_spr[0]) 3507 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); 3508 3509 if (INTEL_GEN(dev_priv) >= 7) { 3510 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) 3511 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); 3512 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) 3513 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); 3514 } 3515 3516 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) 3517 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); 3518 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) 3519 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); 3520 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) 3521 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); 3522 3523 dev_priv->wm.hw = *results; 3524} 3525 3526bool ilk_disable_lp_wm(struct drm_device *dev) 3527{ 3528 struct drm_i915_private *dev_priv = to_i915(dev); 3529 3530 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); 3531} 3532 3533#define SKL_SAGV_BLOCK_TIME 30 /* µs */ 3534 3535/* 3536 * FIXME: We still don't have the proper code detect if we need to apply the WA, 3537 * so assume we'll always need it in order to avoid underruns. 3538 */ 3539static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state) 3540{ 3541 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 3542 3543 if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) 3544 return true; 3545 3546 return false; 3547} 3548 3549static bool 3550intel_has_sagv(struct drm_i915_private *dev_priv) 3551{ 3552 if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) 3553 return true; 3554 3555 if (IS_SKYLAKE(dev_priv) && 3556 dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED) 3557 return true; 3558 3559 return false; 3560} 3561 3562/* 3563 * SAGV dynamically adjusts the system agent voltage and clock frequencies 3564 * depending on power and performance requirements. The display engine access 3565 * to system memory is blocked during the adjustment time. Because of the 3566 * blocking time, having this enabled can cause full system hangs and/or pipe 3567 * underruns if we don't meet all of the following requirements: 3568 * 3569 * - <= 1 pipe enabled 3570 * - All planes can enable watermarks for latencies >= SAGV engine block time 3571 * - We're not using an interlaced display configuration 3572 */ 3573int 3574intel_enable_sagv(struct drm_i915_private *dev_priv) 3575{ 3576 int ret; 3577 3578 if (!intel_has_sagv(dev_priv)) 3579 return 0; 3580 3581 if (dev_priv->sagv_status == I915_SAGV_ENABLED) 3582 return 0; 3583 3584 DRM_DEBUG_KMS("Enabling the SAGV\n"); 3585 mutex_lock(&dev_priv->rps.hw_lock); 3586 3587 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, 3588 GEN9_SAGV_ENABLE); 3589 3590 /* We don't need to wait for the SAGV when enabling */ 3591 mutex_unlock(&dev_priv->rps.hw_lock); 3592 3593 /* 3594 * Some skl systems, pre-release machines in particular, 3595 * don't actually have an SAGV. 3596 */ 3597 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { 3598 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); 3599 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; 3600 return 0; 3601 } else if (ret < 0) { 3602 DRM_ERROR("Failed to enable the SAGV\n"); 3603 return ret; 3604 } 3605 3606 dev_priv->sagv_status = I915_SAGV_ENABLED; 3607 return 0; 3608} 3609 3610int 3611intel_disable_sagv(struct drm_i915_private *dev_priv) 3612{ 3613 int ret; 3614 3615 if (!intel_has_sagv(dev_priv)) 3616 return 0; 3617 3618 if (dev_priv->sagv_status == I915_SAGV_DISABLED) 3619 return 0; 3620 3621 DRM_DEBUG_KMS("Disabling the SAGV\n"); 3622 mutex_lock(&dev_priv->rps.hw_lock); 3623 3624 /* bspec says to keep retrying for at least 1 ms */ 3625 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, 3626 GEN9_SAGV_DISABLE, 3627 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 3628 1); 3629 mutex_unlock(&dev_priv->rps.hw_lock); 3630 3631 /* 3632 * Some skl systems, pre-release machines in particular, 3633 * don't actually have an SAGV. 3634 */ 3635 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { 3636 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); 3637 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; 3638 return 0; 3639 } else if (ret < 0) { 3640 DRM_ERROR("Failed to disable the SAGV (%d)\n", ret); 3641 return ret; 3642 } 3643 3644 dev_priv->sagv_status = I915_SAGV_DISABLED; 3645 return 0; 3646} 3647 3648bool intel_can_enable_sagv(struct drm_atomic_state *state) 3649{ 3650 struct drm_device *dev = state->dev; 3651 struct drm_i915_private *dev_priv = to_i915(dev); 3652 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 3653 struct intel_crtc *crtc; 3654 struct intel_plane *plane; 3655 struct intel_crtc_state *cstate; 3656 enum pipe pipe; 3657 int level, latency; 3658 3659 if (!intel_has_sagv(dev_priv)) 3660 return false; 3661 3662 /* 3663 * SKL workaround: bspec recommends we disable the SAGV when we have 3664 * more then one pipe enabled 3665 * 3666 * If there are no active CRTCs, no additional checks need be performed 3667 */ 3668 if (hweight32(intel_state->active_crtcs) == 0) 3669 return true; 3670 else if (hweight32(intel_state->active_crtcs) > 1) 3671 return false; 3672 3673 /* Since we're now guaranteed to only have one active CRTC... */ 3674 pipe = ffs(intel_state->active_crtcs) - 1; 3675 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 3676 cstate = to_intel_crtc_state(crtc->base.state); 3677 3678 if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3679 return false; 3680 3681 for_each_intel_plane_on_crtc(dev, crtc, plane) { 3682 struct skl_plane_wm *wm = 3683 &cstate->wm.skl.optimal.planes[plane->id]; 3684 3685 /* Skip this plane if it's not enabled */ 3686 if (!wm->wm[0].plane_en) 3687 continue; 3688 3689 /* Find the highest enabled wm level for this plane */ 3690 for (level = ilk_wm_max_level(dev_priv); 3691 !wm->wm[level].plane_en; --level) 3692 { } 3693 3694 latency = dev_priv->wm.skl_latency[level]; 3695 3696 if (skl_needs_memory_bw_wa(intel_state) && 3697 plane->base.state->fb->modifier == 3698 I915_FORMAT_MOD_X_TILED) 3699 latency += 15; 3700 3701 /* 3702 * If any of the planes on this pipe don't enable wm levels 3703 * that incur memory latencies higher then 30µs we can't enable 3704 * the SAGV 3705 */ 3706 if (latency < SKL_SAGV_BLOCK_TIME) 3707 return false; 3708 } 3709 3710 return true; 3711} 3712 3713static void 3714skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 3715 const struct intel_crtc_state *cstate, 3716 struct skl_ddb_entry *alloc, /* out */ 3717 int *num_active /* out */) 3718{ 3719 struct drm_atomic_state *state = cstate->base.state; 3720 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 3721 struct drm_i915_private *dev_priv = to_i915(dev); 3722 struct drm_crtc *for_crtc = cstate->base.crtc; 3723 unsigned int pipe_size, ddb_size; 3724 int nth_active_pipe; 3725 3726 if (WARN_ON(!state) || !cstate->base.active) { 3727 alloc->start = 0; 3728 alloc->end = 0; 3729 *num_active = hweight32(dev_priv->active_crtcs); 3730 return; 3731 } 3732 3733 if (intel_state->active_pipe_changes) 3734 *num_active = hweight32(intel_state->active_crtcs); 3735 else 3736 *num_active = hweight32(dev_priv->active_crtcs); 3737 3738 ddb_size = INTEL_INFO(dev_priv)->ddb_size; 3739 WARN_ON(ddb_size == 0); 3740 3741 ddb_size -= 4; /* 4 blocks for bypass path allocation */ 3742 3743 /* 3744 * If the state doesn't change the active CRTC's, then there's 3745 * no need to recalculate; the existing pipe allocation limits 3746 * should remain unchanged. Note that we're safe from racing 3747 * commits since any racing commit that changes the active CRTC 3748 * list would need to grab _all_ crtc locks, including the one 3749 * we currently hold. 3750 */ 3751 if (!intel_state->active_pipe_changes) { 3752 /* 3753 * alloc may be cleared by clear_intel_crtc_state, 3754 * copy from old state to be sure 3755 */ 3756 *alloc = to_intel_crtc_state(for_crtc->state)->wm.skl.ddb; 3757 return; 3758 } 3759 3760 nth_active_pipe = hweight32(intel_state->active_crtcs & 3761 (drm_crtc_mask(for_crtc) - 1)); 3762 pipe_size = ddb_size / hweight32(intel_state->active_crtcs); 3763 alloc->start = nth_active_pipe * ddb_size / *num_active; 3764 alloc->end = alloc->start + pipe_size; 3765} 3766 3767static unsigned int skl_cursor_allocation(int num_active) 3768{ 3769 if (num_active == 1) 3770 return 32; 3771 3772 return 8; 3773} 3774 3775static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) 3776{ 3777 entry->start = reg & 0x3ff; 3778 entry->end = (reg >> 16) & 0x3ff; 3779 if (entry->end) 3780 entry->end += 1; 3781} 3782 3783void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, 3784 struct skl_ddb_allocation *ddb /* out */) 3785{ 3786 struct intel_crtc *crtc; 3787 3788 memset(ddb, 0, sizeof(*ddb)); 3789 3790 for_each_intel_crtc(&dev_priv->drm, crtc) { 3791 enum intel_display_power_domain power_domain; 3792 enum plane_id plane_id; 3793 enum pipe pipe = crtc->pipe; 3794 3795 power_domain = POWER_DOMAIN_PIPE(pipe); 3796 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 3797 continue; 3798 3799 for_each_plane_id_on_crtc(crtc, plane_id) { 3800 u32 val; 3801 3802 if (plane_id != PLANE_CURSOR) 3803 val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); 3804 else 3805 val = I915_READ(CUR_BUF_CFG(pipe)); 3806 3807 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val); 3808 } 3809 3810 intel_display_power_put(dev_priv, power_domain); 3811 } 3812} 3813 3814/* 3815 * Determines the downscale amount of a plane for the purposes of watermark calculations. 3816 * The bspec defines downscale amount as: 3817 * 3818 * """ 3819 * Horizontal down scale amount = maximum[1, Horizontal source size / 3820 * Horizontal destination size] 3821 * Vertical down scale amount = maximum[1, Vertical source size / 3822 * Vertical destination size] 3823 * Total down scale amount = Horizontal down scale amount * 3824 * Vertical down scale amount 3825 * """ 3826 * 3827 * Return value is provided in 16.16 fixed point form to retain fractional part. 3828 * Caller should take care of dividing & rounding off the value. 3829 */ 3830static uint_fixed_16_16_t 3831skl_plane_downscale_amount(const struct intel_crtc_state *cstate, 3832 const struct intel_plane_state *pstate) 3833{ 3834 struct intel_plane *plane = to_intel_plane(pstate->base.plane); 3835 uint32_t src_w, src_h, dst_w, dst_h; 3836 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; 3837 uint_fixed_16_16_t downscale_h, downscale_w; 3838 3839 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate))) 3840 return u32_to_fixed_16_16(0); 3841 3842 /* n.b., src is 16.16 fixed point, dst is whole integer */ 3843 if (plane->id == PLANE_CURSOR) { 3844 /* 3845 * Cursors only support 0/180 degree rotation, 3846 * hence no need to account for rotation here. 3847 */ 3848 src_w = pstate->base.src_w >> 16; 3849 src_h = pstate->base.src_h >> 16; 3850 dst_w = pstate->base.crtc_w; 3851 dst_h = pstate->base.crtc_h; 3852 } else { 3853 /* 3854 * Src coordinates are already rotated by 270 degrees for 3855 * the 90/270 degree plane rotation cases (to match the 3856 * GTT mapping), hence no need to account for rotation here. 3857 */ 3858 src_w = drm_rect_width(&pstate->base.src) >> 16; 3859 src_h = drm_rect_height(&pstate->base.src) >> 16; 3860 dst_w = drm_rect_width(&pstate->base.dst); 3861 dst_h = drm_rect_height(&pstate->base.dst); 3862 } 3863 3864 fp_w_ratio = fixed_16_16_div(src_w, dst_w); 3865 fp_h_ratio = fixed_16_16_div(src_h, dst_h); 3866 downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1)); 3867 downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1)); 3868 3869 return mul_fixed16(downscale_w, downscale_h); 3870} 3871 3872static uint_fixed_16_16_t 3873skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state) 3874{ 3875 uint_fixed_16_16_t pipe_downscale = u32_to_fixed_16_16(1); 3876 3877 if (!crtc_state->base.enable) 3878 return pipe_downscale; 3879 3880 if (crtc_state->pch_pfit.enabled) { 3881 uint32_t src_w, src_h, dst_w, dst_h; 3882 uint32_t pfit_size = crtc_state->pch_pfit.size; 3883 uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; 3884 uint_fixed_16_16_t downscale_h, downscale_w; 3885 3886 src_w = crtc_state->pipe_src_w; 3887 src_h = crtc_state->pipe_src_h; 3888 dst_w = pfit_size >> 16; 3889 dst_h = pfit_size & 0xffff; 3890 3891 if (!dst_w || !dst_h) 3892 return pipe_downscale; 3893 3894 fp_w_ratio = fixed_16_16_div(src_w, dst_w); 3895 fp_h_ratio = fixed_16_16_div(src_h, dst_h); 3896 downscale_w = max_fixed_16_16(fp_w_ratio, u32_to_fixed_16_16(1)); 3897 downscale_h = max_fixed_16_16(fp_h_ratio, u32_to_fixed_16_16(1)); 3898 3899 pipe_downscale = mul_fixed16(downscale_w, downscale_h); 3900 } 3901 3902 return pipe_downscale; 3903} 3904 3905int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, 3906 struct intel_crtc_state *cstate) 3907{ 3908 struct drm_crtc_state *crtc_state = &cstate->base; 3909 struct drm_atomic_state *state = crtc_state->state; 3910 struct drm_plane *plane; 3911 const struct drm_plane_state *pstate; 3912 struct intel_plane_state *intel_pstate; 3913 int crtc_clock, dotclk; 3914 uint32_t pipe_max_pixel_rate; 3915 uint_fixed_16_16_t pipe_downscale; 3916 uint_fixed_16_16_t max_downscale = u32_to_fixed_16_16(1); 3917 3918 if (!cstate->base.enable) 3919 return 0; 3920 3921 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { 3922 uint_fixed_16_16_t plane_downscale; 3923 uint_fixed_16_16_t fp_9_div_8 = fixed_16_16_div(9, 8); 3924 int bpp; 3925 3926 if (!intel_wm_plane_visible(cstate, 3927 to_intel_plane_state(pstate))) 3928 continue; 3929 3930 if (WARN_ON(!pstate->fb)) 3931 return -EINVAL; 3932 3933 intel_pstate = to_intel_plane_state(pstate); 3934 plane_downscale = skl_plane_downscale_amount(cstate, 3935 intel_pstate); 3936 bpp = pstate->fb->format->cpp[0] * 8; 3937 if (bpp == 64) 3938 plane_downscale = mul_fixed16(plane_downscale, 3939 fp_9_div_8); 3940 3941 max_downscale = max_fixed_16_16(plane_downscale, max_downscale); 3942 } 3943 pipe_downscale = skl_pipe_downscale_amount(cstate); 3944 3945 pipe_downscale = mul_fixed16(pipe_downscale, max_downscale); 3946 3947 crtc_clock = crtc_state->adjusted_mode.crtc_clock; 3948 dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk; 3949 3950 if (IS_GEMINILAKE(to_i915(intel_crtc->base.dev))) 3951 dotclk *= 2; 3952 3953 pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale); 3954 3955 if (pipe_max_pixel_rate < crtc_clock) { 3956 DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n"); 3957 return -EINVAL; 3958 } 3959 3960 return 0; 3961} 3962 3963static unsigned int 3964skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, 3965 const struct drm_plane_state *pstate, 3966 int y) 3967{ 3968 struct intel_plane *plane = to_intel_plane(pstate->plane); 3969 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 3970 uint32_t data_rate; 3971 uint32_t width = 0, height = 0; 3972 struct drm_framebuffer *fb; 3973 u32 format; 3974 uint_fixed_16_16_t down_scale_amount; 3975 3976 if (!intel_pstate->base.visible) 3977 return 0; 3978 3979 fb = pstate->fb; 3980 format = fb->format->format; 3981 3982 if (plane->id == PLANE_CURSOR) 3983 return 0; 3984 if (y && format != DRM_FORMAT_NV12) 3985 return 0; 3986 3987 /* 3988 * Src coordinates are already rotated by 270 degrees for 3989 * the 90/270 degree plane rotation cases (to match the 3990 * GTT mapping), hence no need to account for rotation here. 3991 */ 3992 width = drm_rect_width(&intel_pstate->base.src) >> 16; 3993 height = drm_rect_height(&intel_pstate->base.src) >> 16; 3994 3995 /* for planar format */ 3996 if (format == DRM_FORMAT_NV12) { 3997 if (y) /* y-plane data rate */ 3998 data_rate = width * height * 3999 fb->format->cpp[0]; 4000 else /* uv-plane data rate */ 4001 data_rate = (width / 2) * (height / 2) * 4002 fb->format->cpp[1]; 4003 } else { 4004 /* for packed formats */ 4005 data_rate = width * height * fb->format->cpp[0]; 4006 } 4007 4008 down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate); 4009 4010 return mul_round_up_u32_fixed16(data_rate, down_scale_amount); 4011} 4012 4013/* 4014 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching 4015 * a 8192x4096@32bpp framebuffer: 4016 * 3 * 4096 * 8192 * 4 < 2^32 4017 */ 4018static unsigned int 4019skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, 4020 unsigned *plane_data_rate, 4021 unsigned *plane_y_data_rate) 4022{ 4023 struct drm_crtc_state *cstate = &intel_cstate->base; 4024 struct drm_atomic_state *state = cstate->state; 4025 struct drm_plane *plane; 4026 const struct drm_plane_state *pstate; 4027 unsigned int total_data_rate = 0; 4028 4029 if (WARN_ON(!state)) 4030 return 0; 4031 4032 /* Calculate and cache data rate for each plane */ 4033 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { 4034 enum plane_id plane_id = to_intel_plane(plane)->id; 4035 unsigned int rate; 4036 4037 /* packed/uv */ 4038 rate = skl_plane_relative_data_rate(intel_cstate, 4039 pstate, 0); 4040 plane_data_rate[plane_id] = rate; 4041 4042 total_data_rate += rate; 4043 4044 /* y-plane */ 4045 rate = skl_plane_relative_data_rate(intel_cstate, 4046 pstate, 1); 4047 plane_y_data_rate[plane_id] = rate; 4048 4049 total_data_rate += rate; 4050 } 4051 4052 return total_data_rate; 4053} 4054 4055static uint16_t 4056skl_ddb_min_alloc(const struct drm_plane_state *pstate, 4057 const int y) 4058{ 4059 struct drm_framebuffer *fb = pstate->fb; 4060 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 4061 uint32_t src_w, src_h; 4062 uint32_t min_scanlines = 8; 4063 uint8_t plane_bpp; 4064 4065 if (WARN_ON(!fb)) 4066 return 0; 4067 4068 /* For packed formats, no y-plane, return 0 */ 4069 if (y && fb->format->format != DRM_FORMAT_NV12) 4070 return 0; 4071 4072 /* For Non Y-tile return 8-blocks */ 4073 if (fb->modifier != I915_FORMAT_MOD_Y_TILED && 4074 fb->modifier != I915_FORMAT_MOD_Yf_TILED) 4075 return 8; 4076 4077 /* 4078 * Src coordinates are already rotated by 270 degrees for 4079 * the 90/270 degree plane rotation cases (to match the 4080 * GTT mapping), hence no need to account for rotation here. 4081 */ 4082 src_w = drm_rect_width(&intel_pstate->base.src) >> 16; 4083 src_h = drm_rect_height(&intel_pstate->base.src) >> 16; 4084 4085 /* Halve UV plane width and height for NV12 */ 4086 if (fb->format->format == DRM_FORMAT_NV12 && !y) { 4087 src_w /= 2; 4088 src_h /= 2; 4089 } 4090 4091 if (fb->format->format == DRM_FORMAT_NV12 && !y) 4092 plane_bpp = fb->format->cpp[1]; 4093 else 4094 plane_bpp = fb->format->cpp[0]; 4095 4096 if (drm_rotation_90_or_270(pstate->rotation)) { 4097 switch (plane_bpp) { 4098 case 1: 4099 min_scanlines = 32; 4100 break; 4101 case 2: 4102 min_scanlines = 16; 4103 break; 4104 case 4: 4105 min_scanlines = 8; 4106 break; 4107 case 8: 4108 min_scanlines = 4; 4109 break; 4110 default: 4111 WARN(1, "Unsupported pixel depth %u for rotation", 4112 plane_bpp); 4113 min_scanlines = 32; 4114 } 4115 } 4116 4117 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3; 4118} 4119 4120static void 4121skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active, 4122 uint16_t *minimum, uint16_t *y_minimum) 4123{ 4124 const struct drm_plane_state *pstate; 4125 struct drm_plane *plane; 4126 4127 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) { 4128 enum plane_id plane_id = to_intel_plane(plane)->id; 4129 4130 if (plane_id == PLANE_CURSOR) 4131 continue; 4132 4133 if (!pstate->visible) 4134 continue; 4135 4136 minimum[plane_id] = skl_ddb_min_alloc(pstate, 0); 4137 y_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); 4138 } 4139 4140 minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active); 4141} 4142 4143static int 4144skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, 4145 struct skl_ddb_allocation *ddb /* out */) 4146{ 4147 struct drm_atomic_state *state = cstate->base.state; 4148 struct drm_crtc *crtc = cstate->base.crtc; 4149 struct drm_device *dev = crtc->dev; 4150 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4151 enum pipe pipe = intel_crtc->pipe; 4152 struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; 4153 uint16_t alloc_size, start; 4154 uint16_t minimum[I915_MAX_PLANES] = {}; 4155 uint16_t y_minimum[I915_MAX_PLANES] = {}; 4156 unsigned int total_data_rate; 4157 enum plane_id plane_id; 4158 int num_active; 4159 unsigned plane_data_rate[I915_MAX_PLANES] = {}; 4160 unsigned plane_y_data_rate[I915_MAX_PLANES] = {}; 4161 uint16_t total_min_blocks = 0; 4162 4163 /* Clear the partitioning for disabled planes. */ 4164 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 4165 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe])); 4166 4167 if (WARN_ON(!state)) 4168 return 0; 4169 4170 if (!cstate->base.active) { 4171 alloc->start = alloc->end = 0; 4172 return 0; 4173 } 4174 4175 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active); 4176 alloc_size = skl_ddb_entry_size(alloc); 4177 if (alloc_size == 0) 4178 return 0; 4179 4180 skl_ddb_calc_min(cstate, num_active, minimum, y_minimum); 4181 4182 /* 4183 * 1. Allocate the mininum required blocks for each active plane 4184 * and allocate the cursor, it doesn't require extra allocation 4185 * proportional to the data rate. 4186 */ 4187 4188 for_each_plane_id_on_crtc(intel_crtc, plane_id) { 4189 total_min_blocks += minimum[plane_id]; 4190 total_min_blocks += y_minimum[plane_id]; 4191 } 4192 4193 if (total_min_blocks > alloc_size) { 4194 DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations"); 4195 DRM_DEBUG_KMS("minimum required %d/%d\n", total_min_blocks, 4196 alloc_size); 4197 return -EINVAL; 4198 } 4199 4200 alloc_size -= total_min_blocks; 4201 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR]; 4202 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; 4203 4204 /* 4205 * 2. Distribute the remaining space in proportion to the amount of 4206 * data each plane needs to fetch from memory. 4207 * 4208 * FIXME: we may not allocate every single block here. 4209 */ 4210 total_data_rate = skl_get_total_relative_data_rate(cstate, 4211 plane_data_rate, 4212 plane_y_data_rate); 4213 if (total_data_rate == 0) 4214 return 0; 4215 4216 start = alloc->start; 4217 for_each_plane_id_on_crtc(intel_crtc, plane_id) { 4218 unsigned int data_rate, y_data_rate; 4219 uint16_t plane_blocks, y_plane_blocks = 0; 4220 4221 if (plane_id == PLANE_CURSOR) 4222 continue; 4223 4224 data_rate = plane_data_rate[plane_id]; 4225 4226 /* 4227 * allocation for (packed formats) or (uv-plane part of planar format): 4228 * promote the expression to 64 bits to avoid overflowing, the 4229 * result is < available as data_rate / total_data_rate < 1 4230 */ 4231 plane_blocks = minimum[plane_id]; 4232 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 4233 total_data_rate); 4234 4235 /* Leave disabled planes at (0,0) */ 4236 if (data_rate) { 4237 ddb->plane[pipe][plane_id].start = start; 4238 ddb->plane[pipe][plane_id].end = start + plane_blocks; 4239 } 4240 4241 start += plane_blocks; 4242 4243 /* 4244 * allocation for y_plane part of planar format: 4245 */ 4246 y_data_rate = plane_y_data_rate[plane_id]; 4247 4248 y_plane_blocks = y_minimum[plane_id]; 4249 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, 4250 total_data_rate); 4251 4252 if (y_data_rate) { 4253 ddb->y_plane[pipe][plane_id].start = start; 4254 ddb->y_plane[pipe][plane_id].end = start + y_plane_blocks; 4255 } 4256 4257 start += y_plane_blocks; 4258 } 4259 4260 return 0; 4261} 4262 4263/* 4264 * The max latency should be 257 (max the punit can code is 255 and we add 2us 4265 * for the read latency) and cpp should always be <= 8, so that 4266 * should allow pixel_rate up to ~2 GHz which seems sufficient since max 4267 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. 4268*/ 4269static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, 4270 uint32_t latency) 4271{ 4272 uint32_t wm_intermediate_val; 4273 uint_fixed_16_16_t ret; 4274 4275 if (latency == 0) 4276 return FP_16_16_MAX; 4277 4278 wm_intermediate_val = latency * pixel_rate * cpp; 4279 ret = fixed_16_16_div_u64(wm_intermediate_val, 1000 * 512); 4280 return ret; 4281} 4282 4283static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate, 4284 uint32_t pipe_htotal, 4285 uint32_t latency, 4286 uint_fixed_16_16_t plane_blocks_per_line) 4287{ 4288 uint32_t wm_intermediate_val; 4289 uint_fixed_16_16_t ret; 4290 4291 if (latency == 0) 4292 return FP_16_16_MAX; 4293 4294 wm_intermediate_val = latency * pixel_rate; 4295 wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val, 4296 pipe_htotal * 1000); 4297 ret = mul_u32_fixed_16_16(wm_intermediate_val, plane_blocks_per_line); 4298 return ret; 4299} 4300 4301static uint_fixed_16_16_t 4302intel_get_linetime_us(struct intel_crtc_state *cstate) 4303{ 4304 uint32_t pixel_rate; 4305 uint32_t crtc_htotal; 4306 uint_fixed_16_16_t linetime_us; 4307 4308 if (!cstate->base.active) 4309 return u32_to_fixed_16_16(0); 4310 4311 pixel_rate = cstate->pixel_rate; 4312 4313 if (WARN_ON(pixel_rate == 0)) 4314 return u32_to_fixed_16_16(0); 4315 4316 crtc_htotal = cstate->base.adjusted_mode.crtc_htotal; 4317 linetime_us = fixed_16_16_div_u64(crtc_htotal * 1000, pixel_rate); 4318 4319 return linetime_us; 4320} 4321 4322static uint32_t 4323skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, 4324 const struct intel_plane_state *pstate) 4325{ 4326 uint64_t adjusted_pixel_rate; 4327 uint_fixed_16_16_t downscale_amount; 4328 4329 /* Shouldn't reach here on disabled planes... */ 4330 if (WARN_ON(!intel_wm_plane_visible(cstate, pstate))) 4331 return 0; 4332 4333 /* 4334 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate 4335 * with additional adjustments for plane-specific scaling. 4336 */ 4337 adjusted_pixel_rate = cstate->pixel_rate; 4338 downscale_amount = skl_plane_downscale_amount(cstate, pstate); 4339 4340 return mul_round_up_u32_fixed16(adjusted_pixel_rate, 4341 downscale_amount); 4342} 4343 4344static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 4345 struct intel_crtc_state *cstate, 4346 const struct intel_plane_state *intel_pstate, 4347 uint16_t ddb_allocation, 4348 int level, 4349 uint16_t *out_blocks, /* out */ 4350 uint8_t *out_lines, /* out */ 4351 bool *enabled /* out */) 4352{ 4353 struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane); 4354 const struct drm_plane_state *pstate = &intel_pstate->base; 4355 const struct drm_framebuffer *fb = pstate->fb; 4356 uint32_t latency = dev_priv->wm.skl_latency[level]; 4357 uint_fixed_16_16_t method1, method2; 4358 uint_fixed_16_16_t plane_blocks_per_line; 4359 uint_fixed_16_16_t selected_result; 4360 uint32_t interm_pbpl; 4361 uint32_t plane_bytes_per_line; 4362 uint32_t res_blocks, res_lines; 4363 uint8_t cpp; 4364 uint32_t width = 0, height = 0; 4365 uint32_t plane_pixel_rate; 4366 uint_fixed_16_16_t y_tile_minimum; 4367 uint32_t y_min_scanlines; 4368 struct intel_atomic_state *state = 4369 to_intel_atomic_state(cstate->base.state); 4370 bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); 4371 bool y_tiled, x_tiled; 4372 4373 if (latency == 0 || 4374 !intel_wm_plane_visible(cstate, intel_pstate)) { 4375 *enabled = false; 4376 return 0; 4377 } 4378 4379 y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED || 4380 fb->modifier == I915_FORMAT_MOD_Yf_TILED; 4381 x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED; 4382 4383 /* Display WA #1141: kbl,cfl */ 4384 if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) && 4385 dev_priv->ipc_enabled) 4386 latency += 4; 4387 4388 if (apply_memory_bw_wa && x_tiled) 4389 latency += 15; 4390 4391 if (plane->id == PLANE_CURSOR) { 4392 width = intel_pstate->base.crtc_w; 4393 height = intel_pstate->base.crtc_h; 4394 } else { 4395 /* 4396 * Src coordinates are already rotated by 270 degrees for 4397 * the 90/270 degree plane rotation cases (to match the 4398 * GTT mapping), hence no need to account for rotation here. 4399 */ 4400 width = drm_rect_width(&intel_pstate->base.src) >> 16; 4401 height = drm_rect_height(&intel_pstate->base.src) >> 16; 4402 } 4403 4404 cpp = fb->format->cpp[0]; 4405 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); 4406 4407 if (drm_rotation_90_or_270(pstate->rotation)) { 4408 int cpp = (fb->format->format == DRM_FORMAT_NV12) ? 4409 fb->format->cpp[1] : 4410 fb->format->cpp[0]; 4411 4412 switch (cpp) { 4413 case 1: 4414 y_min_scanlines = 16; 4415 break; 4416 case 2: 4417 y_min_scanlines = 8; 4418 break; 4419 case 4: 4420 y_min_scanlines = 4; 4421 break; 4422 default: 4423 MISSING_CASE(cpp); 4424 return -EINVAL; 4425 } 4426 } else { 4427 y_min_scanlines = 4; 4428 } 4429 4430 if (apply_memory_bw_wa) 4431 y_min_scanlines *= 2; 4432 4433 plane_bytes_per_line = width * cpp; 4434 if (y_tiled) { 4435 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line * 4436 y_min_scanlines, 512); 4437 plane_blocks_per_line = fixed_16_16_div(interm_pbpl, 4438 y_min_scanlines); 4439 } else if (x_tiled) { 4440 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512); 4441 plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl); 4442 } else { 4443 interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; 4444 plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl); 4445 } 4446 4447 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); 4448 method2 = skl_wm_method2(plane_pixel_rate, 4449 cstate->base.adjusted_mode.crtc_htotal, 4450 latency, 4451 plane_blocks_per_line); 4452 4453 y_tile_minimum = mul_u32_fixed_16_16(y_min_scanlines, 4454 plane_blocks_per_line); 4455 4456 if (y_tiled) { 4457 selected_result = max_fixed_16_16(method2, y_tile_minimum); 4458 } else { 4459 uint32_t linetime_us; 4460 4461 linetime_us = fixed_16_16_to_u32_round_up( 4462 intel_get_linetime_us(cstate)); 4463 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && 4464 (plane_bytes_per_line / 512 < 1)) 4465 selected_result = method2; 4466 else if (ddb_allocation >= 4467 fixed_16_16_to_u32_round_up(plane_blocks_per_line)) 4468 selected_result = min_fixed_16_16(method1, method2); 4469 else if (latency >= linetime_us) 4470 selected_result = min_fixed_16_16(method1, method2); 4471 else 4472 selected_result = method1; 4473 } 4474 4475 res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1; 4476 res_lines = div_round_up_fixed16(selected_result, 4477 plane_blocks_per_line); 4478 4479 if (level >= 1 && level <= 7) { 4480 if (y_tiled) { 4481 res_blocks += fixed_16_16_to_u32_round_up(y_tile_minimum); 4482 res_lines += y_min_scanlines; 4483 } else { 4484 res_blocks++; 4485 } 4486 } 4487 4488 if (res_blocks >= ddb_allocation || res_lines > 31) { 4489 *enabled = false; 4490 4491 /* 4492 * If there are no valid level 0 watermarks, then we can't 4493 * support this display configuration. 4494 */ 4495 if (level) { 4496 return 0; 4497 } else { 4498 struct drm_plane *plane = pstate->plane; 4499 4500 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); 4501 DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n", 4502 plane->base.id, plane->name, 4503 res_blocks, ddb_allocation, res_lines); 4504 return -EINVAL; 4505 } 4506 } 4507 4508 *out_blocks = res_blocks; 4509 *out_lines = res_lines; 4510 *enabled = true; 4511 4512 return 0; 4513} 4514 4515static int 4516skl_compute_wm_levels(const struct drm_i915_private *dev_priv, 4517 struct skl_ddb_allocation *ddb, 4518 struct intel_crtc_state *cstate, 4519 const struct intel_plane_state *intel_pstate, 4520 struct skl_plane_wm *wm) 4521{ 4522 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 4523 struct drm_plane *plane = intel_pstate->base.plane; 4524 struct intel_plane *intel_plane = to_intel_plane(plane); 4525 uint16_t ddb_blocks; 4526 enum pipe pipe = intel_crtc->pipe; 4527 int level, max_level = ilk_wm_max_level(dev_priv); 4528 int ret; 4529 4530 if (WARN_ON(!intel_pstate->base.fb)) 4531 return -EINVAL; 4532 4533 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]); 4534 4535 for (level = 0; level <= max_level; level++) { 4536 struct skl_wm_level *result = &wm->wm[level]; 4537 4538 ret = skl_compute_plane_wm(dev_priv, 4539 cstate, 4540 intel_pstate, 4541 ddb_blocks, 4542 level, 4543 &result->plane_res_b, 4544 &result->plane_res_l, 4545 &result->plane_en); 4546 if (ret) 4547 return ret; 4548 } 4549 4550 return 0; 4551} 4552 4553static uint32_t 4554skl_compute_linetime_wm(struct intel_crtc_state *cstate) 4555{ 4556 struct drm_atomic_state *state = cstate->base.state; 4557 struct drm_i915_private *dev_priv = to_i915(state->dev); 4558 uint_fixed_16_16_t linetime_us; 4559 uint32_t linetime_wm; 4560 4561 linetime_us = intel_get_linetime_us(cstate); 4562 4563 if (is_fixed16_zero(linetime_us)) 4564 return 0; 4565 4566 linetime_wm = fixed_16_16_to_u32_round_up(mul_u32_fixed_16_16(8, 4567 linetime_us)); 4568 4569 /* Display WA #1135: bxt. */ 4570 if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled) 4571 linetime_wm = DIV_ROUND_UP(linetime_wm, 2); 4572 4573 return linetime_wm; 4574} 4575 4576static void skl_compute_transition_wm(struct intel_crtc_state *cstate, 4577 struct skl_wm_level *trans_wm /* out */) 4578{ 4579 if (!cstate->base.active) 4580 return; 4581 4582 /* Until we know more, just disable transition WMs */ 4583 trans_wm->plane_en = false; 4584} 4585 4586static int skl_build_pipe_wm(struct intel_crtc_state *cstate, 4587 struct skl_ddb_allocation *ddb, 4588 struct skl_pipe_wm *pipe_wm) 4589{ 4590 struct drm_device *dev = cstate->base.crtc->dev; 4591 struct drm_crtc_state *crtc_state = &cstate->base; 4592 const struct drm_i915_private *dev_priv = to_i915(dev); 4593 struct drm_plane *plane; 4594 const struct drm_plane_state *pstate; 4595 struct skl_plane_wm *wm; 4596 int ret; 4597 4598 /* 4599 * We'll only calculate watermarks for planes that are actually 4600 * enabled, so make sure all other planes are set as disabled. 4601 */ 4602 memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes)); 4603 4604 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { 4605 const struct intel_plane_state *intel_pstate = 4606 to_intel_plane_state(pstate); 4607 enum plane_id plane_id = to_intel_plane(plane)->id; 4608 4609 wm = &pipe_wm->planes[plane_id]; 4610 4611 ret = skl_compute_wm_levels(dev_priv, ddb, cstate, 4612 intel_pstate, wm); 4613 if (ret) 4614 return ret; 4615 skl_compute_transition_wm(cstate, &wm->trans_wm); 4616 } 4617 pipe_wm->linetime = skl_compute_linetime_wm(cstate); 4618 4619 return 0; 4620} 4621 4622static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, 4623 i915_reg_t reg, 4624 const struct skl_ddb_entry *entry) 4625{ 4626 if (entry->end) 4627 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start); 4628 else 4629 I915_WRITE(reg, 0); 4630} 4631 4632static void skl_write_wm_level(struct drm_i915_private *dev_priv, 4633 i915_reg_t reg, 4634 const struct skl_wm_level *level) 4635{ 4636 uint32_t val = 0; 4637 4638 if (level->plane_en) { 4639 val |= PLANE_WM_EN; 4640 val |= level->plane_res_b; 4641 val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; 4642 } 4643 4644 I915_WRITE(reg, val); 4645} 4646 4647static void skl_write_plane_wm(struct intel_crtc *intel_crtc, 4648 const struct skl_plane_wm *wm, 4649 const struct skl_ddb_allocation *ddb, 4650 enum plane_id plane_id) 4651{ 4652 struct drm_crtc *crtc = &intel_crtc->base; 4653 struct drm_device *dev = crtc->dev; 4654 struct drm_i915_private *dev_priv = to_i915(dev); 4655 int level, max_level = ilk_wm_max_level(dev_priv); 4656 enum pipe pipe = intel_crtc->pipe; 4657 4658 for (level = 0; level <= max_level; level++) { 4659 skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), 4660 &wm->wm[level]); 4661 } 4662 skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), 4663 &wm->trans_wm); 4664 4665 skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), 4666 &ddb->plane[pipe][plane_id]); 4667 skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane_id), 4668 &ddb->y_plane[pipe][plane_id]); 4669} 4670 4671static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, 4672 const struct skl_plane_wm *wm, 4673 const struct skl_ddb_allocation *ddb) 4674{ 4675 struct drm_crtc *crtc = &intel_crtc->base; 4676 struct drm_device *dev = crtc->dev; 4677 struct drm_i915_private *dev_priv = to_i915(dev); 4678 int level, max_level = ilk_wm_max_level(dev_priv); 4679 enum pipe pipe = intel_crtc->pipe; 4680 4681 for (level = 0; level <= max_level; level++) { 4682 skl_write_wm_level(dev_priv, CUR_WM(pipe, level), 4683 &wm->wm[level]); 4684 } 4685 skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm); 4686 4687 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), 4688 &ddb->plane[pipe][PLANE_CURSOR]); 4689} 4690 4691bool skl_wm_level_equals(const struct skl_wm_level *l1, 4692 const struct skl_wm_level *l2) 4693{ 4694 if (l1->plane_en != l2->plane_en) 4695 return false; 4696 4697 /* If both planes aren't enabled, the rest shouldn't matter */ 4698 if (!l1->plane_en) 4699 return true; 4700 4701 return (l1->plane_res_l == l2->plane_res_l && 4702 l1->plane_res_b == l2->plane_res_b); 4703} 4704 4705static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, 4706 const struct skl_ddb_entry *b) 4707{ 4708 return a->start < b->end && b->start < a->end; 4709} 4710 4711bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries, 4712 const struct skl_ddb_entry *ddb, 4713 int ignore) 4714{ 4715 int i; 4716 4717 for (i = 0; i < I915_MAX_PIPES; i++) 4718 if (i != ignore && entries[i] && 4719 skl_ddb_entries_overlap(ddb, entries[i])) 4720 return true; 4721 4722 return false; 4723} 4724 4725static int skl_update_pipe_wm(struct drm_crtc_state *cstate, 4726 const struct skl_pipe_wm *old_pipe_wm, 4727 struct skl_pipe_wm *pipe_wm, /* out */ 4728 struct skl_ddb_allocation *ddb, /* out */ 4729 bool *changed /* out */) 4730{ 4731 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); 4732 int ret; 4733 4734 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); 4735 if (ret) 4736 return ret; 4737 4738 if (!memcmp(old_pipe_wm, pipe_wm, sizeof(*pipe_wm))) 4739 *changed = false; 4740 else 4741 *changed = true; 4742 4743 return 0; 4744} 4745 4746static uint32_t 4747pipes_modified(struct drm_atomic_state *state) 4748{ 4749 struct drm_crtc *crtc; 4750 struct drm_crtc_state *cstate; 4751 uint32_t i, ret = 0; 4752 4753 for_each_new_crtc_in_state(state, crtc, cstate, i) 4754 ret |= drm_crtc_mask(crtc); 4755 4756 return ret; 4757} 4758 4759static int 4760skl_ddb_add_affected_planes(struct intel_crtc_state *cstate) 4761{ 4762 struct drm_atomic_state *state = cstate->base.state; 4763 struct drm_device *dev = state->dev; 4764 struct drm_crtc *crtc = cstate->base.crtc; 4765 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4766 struct drm_i915_private *dev_priv = to_i915(dev); 4767 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 4768 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; 4769 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; 4770 struct drm_plane_state *plane_state; 4771 struct drm_plane *plane; 4772 enum pipe pipe = intel_crtc->pipe; 4773 4774 WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc)); 4775 4776 drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) { 4777 enum plane_id plane_id = to_intel_plane(plane)->id; 4778 4779 if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id], 4780 &new_ddb->plane[pipe][plane_id]) && 4781 skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][plane_id], 4782 &new_ddb->y_plane[pipe][plane_id])) 4783 continue; 4784 4785 plane_state = drm_atomic_get_plane_state(state, plane); 4786 if (IS_ERR(plane_state)) 4787 return PTR_ERR(plane_state); 4788 } 4789 4790 return 0; 4791} 4792 4793static int 4794skl_compute_ddb(struct drm_atomic_state *state) 4795{ 4796 struct drm_device *dev = state->dev; 4797 struct drm_i915_private *dev_priv = to_i915(dev); 4798 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 4799 struct intel_crtc *intel_crtc; 4800 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; 4801 uint32_t realloc_pipes = pipes_modified(state); 4802 int ret; 4803 4804 /* 4805 * If this is our first atomic update following hardware readout, 4806 * we can't trust the DDB that the BIOS programmed for us. Let's 4807 * pretend that all pipes switched active status so that we'll 4808 * ensure a full DDB recompute. 4809 */ 4810 if (dev_priv->wm.distrust_bios_wm) { 4811 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 4812 state->acquire_ctx); 4813 if (ret) 4814 return ret; 4815 4816 intel_state->active_pipe_changes = ~0; 4817 4818 /* 4819 * We usually only initialize intel_state->active_crtcs if we 4820 * we're doing a modeset; make sure this field is always 4821 * initialized during the sanitization process that happens 4822 * on the first commit too. 4823 */ 4824 if (!intel_state->modeset) 4825 intel_state->active_crtcs = dev_priv->active_crtcs; 4826 } 4827 4828 /* 4829 * If the modeset changes which CRTC's are active, we need to 4830 * recompute the DDB allocation for *all* active pipes, even 4831 * those that weren't otherwise being modified in any way by this 4832 * atomic commit. Due to the shrinking of the per-pipe allocations 4833 * when new active CRTC's are added, it's possible for a pipe that 4834 * we were already using and aren't changing at all here to suddenly 4835 * become invalid if its DDB needs exceeds its new allocation. 4836 * 4837 * Note that if we wind up doing a full DDB recompute, we can't let 4838 * any other display updates race with this transaction, so we need 4839 * to grab the lock on *all* CRTC's. 4840 */ 4841 if (intel_state->active_pipe_changes) { 4842 realloc_pipes = ~0; 4843 intel_state->wm_results.dirty_pipes = ~0; 4844 } 4845 4846 /* 4847 * We're not recomputing for the pipes not included in the commit, so 4848 * make sure we start with the current state. 4849 */ 4850 memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); 4851 4852 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { 4853 struct intel_crtc_state *cstate; 4854 4855 cstate = intel_atomic_get_crtc_state(state, intel_crtc); 4856 if (IS_ERR(cstate)) 4857 return PTR_ERR(cstate); 4858 4859 ret = skl_allocate_pipe_ddb(cstate, ddb); 4860 if (ret) 4861 return ret; 4862 4863 ret = skl_ddb_add_affected_planes(cstate); 4864 if (ret) 4865 return ret; 4866 } 4867 4868 return 0; 4869} 4870 4871static void 4872skl_copy_wm_for_pipe(struct skl_wm_values *dst, 4873 struct skl_wm_values *src, 4874 enum pipe pipe) 4875{ 4876 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe], 4877 sizeof(dst->ddb.y_plane[pipe])); 4878 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe], 4879 sizeof(dst->ddb.plane[pipe])); 4880} 4881 4882static void 4883skl_print_wm_changes(const struct drm_atomic_state *state) 4884{ 4885 const struct drm_device *dev = state->dev; 4886 const struct drm_i915_private *dev_priv = to_i915(dev); 4887 const struct intel_atomic_state *intel_state = 4888 to_intel_atomic_state(state); 4889 const struct drm_crtc *crtc; 4890 const struct drm_crtc_state *cstate; 4891 const struct intel_plane *intel_plane; 4892 const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb; 4893 const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; 4894 int i; 4895 4896 for_each_new_crtc_in_state(state, crtc, cstate, i) { 4897 const struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4898 enum pipe pipe = intel_crtc->pipe; 4899 4900 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 4901 enum plane_id plane_id = intel_plane->id; 4902 const struct skl_ddb_entry *old, *new; 4903 4904 old = &old_ddb->plane[pipe][plane_id]; 4905 new = &new_ddb->plane[pipe][plane_id]; 4906 4907 if (skl_ddb_entry_equal(old, new)) 4908 continue; 4909 4910 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", 4911 intel_plane->base.base.id, 4912 intel_plane->base.name, 4913 old->start, old->end, 4914 new->start, new->end); 4915 } 4916 } 4917} 4918 4919static int 4920skl_compute_wm(struct drm_atomic_state *state) 4921{ 4922 struct drm_crtc *crtc; 4923 struct drm_crtc_state *cstate; 4924 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 4925 struct skl_wm_values *results = &intel_state->wm_results; 4926 struct drm_device *dev = state->dev; 4927 struct skl_pipe_wm *pipe_wm; 4928 bool changed = false; 4929 int ret, i; 4930 4931 /* 4932 * When we distrust bios wm we always need to recompute to set the 4933 * expected DDB allocations for each CRTC. 4934 */ 4935 if (to_i915(dev)->wm.distrust_bios_wm) 4936 changed = true; 4937 4938 /* 4939 * If this transaction isn't actually touching any CRTC's, don't 4940 * bother with watermark calculation. Note that if we pass this 4941 * test, we're guaranteed to hold at least one CRTC state mutex, 4942 * which means we can safely use values like dev_priv->active_crtcs 4943 * since any racing commits that want to update them would need to 4944 * hold _all_ CRTC state mutexes. 4945 */ 4946 for_each_new_crtc_in_state(state, crtc, cstate, i) 4947 changed = true; 4948 4949 if (!changed) 4950 return 0; 4951 4952 /* Clear all dirty flags */ 4953 results->dirty_pipes = 0; 4954 4955 ret = skl_compute_ddb(state); 4956 if (ret) 4957 return ret; 4958 4959 /* 4960 * Calculate WM's for all pipes that are part of this transaction. 4961 * Note that the DDB allocation above may have added more CRTC's that 4962 * weren't otherwise being modified (and set bits in dirty_pipes) if 4963 * pipe allocations had to change. 4964 * 4965 * FIXME: Now that we're doing this in the atomic check phase, we 4966 * should allow skl_update_pipe_wm() to return failure in cases where 4967 * no suitable watermark values can be found. 4968 */ 4969 for_each_new_crtc_in_state(state, crtc, cstate, i) { 4970 struct intel_crtc_state *intel_cstate = 4971 to_intel_crtc_state(cstate); 4972 const struct skl_pipe_wm *old_pipe_wm = 4973 &to_intel_crtc_state(crtc->state)->wm.skl.optimal; 4974 4975 pipe_wm = &intel_cstate->wm.skl.optimal; 4976 ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, 4977 &results->ddb, &changed); 4978 if (ret) 4979 return ret; 4980 4981 if (changed) 4982 results->dirty_pipes |= drm_crtc_mask(crtc); 4983 4984 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) 4985 /* This pipe's WM's did not change */ 4986 continue; 4987 4988 intel_cstate->update_wm_pre = true; 4989 } 4990 4991 skl_print_wm_changes(state); 4992 4993 return 0; 4994} 4995 4996static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, 4997 struct intel_crtc_state *cstate) 4998{ 4999 struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc); 5000 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 5001 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; 5002 const struct skl_ddb_allocation *ddb = &state->wm_results.ddb; 5003 enum pipe pipe = crtc->pipe; 5004 enum plane_id plane_id; 5005 5006 if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) 5007 return; 5008 5009 I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); 5010 5011 for_each_plane_id_on_crtc(crtc, plane_id) { 5012 if (plane_id != PLANE_CURSOR) 5013 skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id], 5014 ddb, plane_id); 5015 else 5016 skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id], 5017 ddb); 5018 } 5019} 5020 5021static void skl_initial_wm(struct intel_atomic_state *state, 5022 struct intel_crtc_state *cstate) 5023{ 5024 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 5025 struct drm_device *dev = intel_crtc->base.dev; 5026 struct drm_i915_private *dev_priv = to_i915(dev); 5027 struct skl_wm_values *results = &state->wm_results; 5028 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw; 5029 enum pipe pipe = intel_crtc->pipe; 5030 5031 if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0) 5032 return; 5033 5034 mutex_lock(&dev_priv->wm.wm_mutex); 5035 5036 if (cstate->base.active_changed) 5037 skl_atomic_update_crtc_wm(state, cstate); 5038 5039 skl_copy_wm_for_pipe(hw_vals, results, pipe); 5040 5041 mutex_unlock(&dev_priv->wm.wm_mutex); 5042} 5043 5044static void ilk_compute_wm_config(struct drm_device *dev, 5045 struct intel_wm_config *config) 5046{ 5047 struct intel_crtc *crtc; 5048 5049 /* Compute the currently _active_ config */ 5050 for_each_intel_crtc(dev, crtc) { 5051 const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; 5052 5053 if (!wm->pipe_enabled) 5054 continue; 5055 5056 config->sprites_enabled |= wm->sprites_enabled; 5057 config->sprites_scaled |= wm->sprites_scaled; 5058 config->num_pipes_active++; 5059 } 5060} 5061 5062static void ilk_program_watermarks(struct drm_i915_private *dev_priv) 5063{ 5064 struct drm_device *dev = &dev_priv->drm; 5065 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 5066 struct ilk_wm_maximums max; 5067 struct intel_wm_config config = {}; 5068 struct ilk_wm_values results = {}; 5069 enum intel_ddb_partitioning partitioning; 5070 5071 ilk_compute_wm_config(dev, &config); 5072 5073 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); 5074 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); 5075 5076 /* 5/6 split only in single pipe config on IVB+ */ 5077 if (INTEL_GEN(dev_priv) >= 7 && 5078 config.num_pipes_active == 1 && config.sprites_enabled) { 5079 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); 5080 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); 5081 5082 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); 5083 } else { 5084 best_lp_wm = &lp_wm_1_2; 5085 } 5086 5087 partitioning = (best_lp_wm == &lp_wm_1_2) ? 5088 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 5089 5090 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results); 5091 5092 ilk_write_wm_values(dev_priv, &results); 5093} 5094 5095static void ilk_initial_watermarks(struct intel_atomic_state *state, 5096 struct intel_crtc_state *cstate) 5097{ 5098 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); 5099 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 5100 5101 mutex_lock(&dev_priv->wm.wm_mutex); 5102 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate; 5103 ilk_program_watermarks(dev_priv); 5104 mutex_unlock(&dev_priv->wm.wm_mutex); 5105} 5106 5107static void ilk_optimize_watermarks(struct intel_atomic_state *state, 5108 struct intel_crtc_state *cstate) 5109{ 5110 struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); 5111 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 5112 5113 mutex_lock(&dev_priv->wm.wm_mutex); 5114 if (cstate->wm.need_postvbl_update) { 5115 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal; 5116 ilk_program_watermarks(dev_priv); 5117 } 5118 mutex_unlock(&dev_priv->wm.wm_mutex); 5119} 5120 5121static inline void skl_wm_level_from_reg_val(uint32_t val, 5122 struct skl_wm_level *level) 5123{ 5124 level->plane_en = val & PLANE_WM_EN; 5125 level->plane_res_b = val & PLANE_WM_BLOCKS_MASK; 5126 level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) & 5127 PLANE_WM_LINES_MASK; 5128} 5129 5130void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc, 5131 struct skl_pipe_wm *out) 5132{ 5133 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5134 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5135 enum pipe pipe = intel_crtc->pipe; 5136 int level, max_level; 5137 enum plane_id plane_id; 5138 uint32_t val; 5139 5140 max_level = ilk_wm_max_level(dev_priv); 5141 5142 for_each_plane_id_on_crtc(intel_crtc, plane_id) { 5143 struct skl_plane_wm *wm = &out->planes[plane_id]; 5144 5145 for (level = 0; level <= max_level; level++) { 5146 if (plane_id != PLANE_CURSOR) 5147 val = I915_READ(PLANE_WM(pipe, plane_id, level)); 5148 else 5149 val = I915_READ(CUR_WM(pipe, level)); 5150 5151 skl_wm_level_from_reg_val(val, &wm->wm[level]); 5152 } 5153 5154 if (plane_id != PLANE_CURSOR) 5155 val = I915_READ(PLANE_WM_TRANS(pipe, plane_id)); 5156 else 5157 val = I915_READ(CUR_WM_TRANS(pipe)); 5158 5159 skl_wm_level_from_reg_val(val, &wm->trans_wm); 5160 } 5161 5162 if (!intel_crtc->active) 5163 return; 5164 5165 out->linetime = I915_READ(PIPE_WM_LINETIME(pipe)); 5166} 5167 5168void skl_wm_get_hw_state(struct drm_device *dev) 5169{ 5170 struct drm_i915_private *dev_priv = to_i915(dev); 5171 struct skl_wm_values *hw = &dev_priv->wm.skl_hw; 5172 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; 5173 struct drm_crtc *crtc; 5174 struct intel_crtc *intel_crtc; 5175 struct intel_crtc_state *cstate; 5176 5177 skl_ddb_get_hw_state(dev_priv, ddb); 5178 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 5179 intel_crtc = to_intel_crtc(crtc); 5180 cstate = to_intel_crtc_state(crtc->state); 5181 5182 skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal); 5183 5184 if (intel_crtc->active) 5185 hw->dirty_pipes |= drm_crtc_mask(crtc); 5186 } 5187 5188 if (dev_priv->active_crtcs) { 5189 /* Fully recompute DDB on first atomic commit */ 5190 dev_priv->wm.distrust_bios_wm = true; 5191 } else { 5192 /* Easy/common case; just sanitize DDB now if everything off */ 5193 memset(ddb, 0, sizeof(*ddb)); 5194 } 5195} 5196 5197static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 5198{ 5199 struct drm_device *dev = crtc->dev; 5200 struct drm_i915_private *dev_priv = to_i915(dev); 5201 struct ilk_wm_values *hw = &dev_priv->wm.hw; 5202 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5203 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 5204 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal; 5205 enum pipe pipe = intel_crtc->pipe; 5206 static const i915_reg_t wm0_pipe_reg[] = { 5207 [PIPE_A] = WM0_PIPEA_ILK, 5208 [PIPE_B] = WM0_PIPEB_ILK, 5209 [PIPE_C] = WM0_PIPEC_IVB, 5210 }; 5211 5212 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); 5213 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 5214 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 5215 5216 memset(active, 0, sizeof(*active)); 5217 5218 active->pipe_enabled = intel_crtc->active; 5219 5220 if (active->pipe_enabled) { 5221 u32 tmp = hw->wm_pipe[pipe]; 5222 5223 /* 5224 * For active pipes LP0 watermark is marked as 5225 * enabled, and LP1+ watermaks as disabled since 5226 * we can't really reverse compute them in case 5227 * multiple pipes are active. 5228 */ 5229 active->wm[0].enable = true; 5230 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; 5231 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; 5232 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; 5233 active->linetime = hw->wm_linetime[pipe]; 5234 } else { 5235 int level, max_level = ilk_wm_max_level(dev_priv); 5236 5237 /* 5238 * For inactive pipes, all watermark levels 5239 * should be marked as enabled but zeroed, 5240 * which is what we'd compute them to. 5241 */ 5242 for (level = 0; level <= max_level; level++) 5243 active->wm[level].enable = true; 5244 } 5245 5246 intel_crtc->wm.active.ilk = *active; 5247} 5248 5249#define _FW_WM(value, plane) \ 5250 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) 5251#define _FW_WM_VLV(value, plane) \ 5252 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) 5253 5254static void g4x_read_wm_values(struct drm_i915_private *dev_priv, 5255 struct g4x_wm_values *wm) 5256{ 5257 uint32_t tmp; 5258 5259 tmp = I915_READ(DSPFW1); 5260 wm->sr.plane = _FW_WM(tmp, SR); 5261 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); 5262 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB); 5263 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA); 5264 5265 tmp = I915_READ(DSPFW2); 5266 wm->fbc_en = tmp & DSPFW_FBC_SR_EN; 5267 wm->sr.fbc = _FW_WM(tmp, FBC_SR); 5268 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR); 5269 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB); 5270 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); 5271 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA); 5272 5273 tmp = I915_READ(DSPFW3); 5274 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN; 5275 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); 5276 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR); 5277 wm->hpll.plane = _FW_WM(tmp, HPLL_SR); 5278} 5279 5280static void vlv_read_wm_values(struct drm_i915_private *dev_priv, 5281 struct vlv_wm_values *wm) 5282{ 5283 enum pipe pipe; 5284 uint32_t tmp; 5285 5286 for_each_pipe(dev_priv, pipe) { 5287 tmp = I915_READ(VLV_DDL(pipe)); 5288 5289 wm->ddl[pipe].plane[PLANE_PRIMARY] = 5290 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 5291 wm->ddl[pipe].plane[PLANE_CURSOR] = 5292 (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 5293 wm->ddl[pipe].plane[PLANE_SPRITE0] = 5294 (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 5295 wm->ddl[pipe].plane[PLANE_SPRITE1] = 5296 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 5297 } 5298 5299 tmp = I915_READ(DSPFW1); 5300 wm->sr.plane = _FW_WM(tmp, SR); 5301 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); 5302 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); 5303 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); 5304 5305 tmp = I915_READ(DSPFW2); 5306 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); 5307 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); 5308 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); 5309 5310 tmp = I915_READ(DSPFW3); 5311 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); 5312 5313 if (IS_CHERRYVIEW(dev_priv)) { 5314 tmp = I915_READ(DSPFW7_CHV); 5315 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); 5316 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); 5317 5318 tmp = I915_READ(DSPFW8_CHV); 5319 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); 5320 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); 5321 5322 tmp = I915_READ(DSPFW9_CHV); 5323 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); 5324 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); 5325 5326 tmp = I915_READ(DSPHOWM); 5327 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 5328 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; 5329 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; 5330 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8; 5331 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; 5332 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; 5333 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; 5334 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; 5335 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; 5336 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; 5337 } else { 5338 tmp = I915_READ(DSPFW7); 5339 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); 5340 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); 5341 5342 tmp = I915_READ(DSPHOWM); 5343 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 5344 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; 5345 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; 5346 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; 5347 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; 5348 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; 5349 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; 5350 } 5351} 5352 5353#undef _FW_WM 5354#undef _FW_WM_VLV 5355 5356void g4x_wm_get_hw_state(struct drm_device *dev) 5357{ 5358 struct drm_i915_private *dev_priv = to_i915(dev); 5359 struct g4x_wm_values *wm = &dev_priv->wm.g4x; 5360 struct intel_crtc *crtc; 5361 5362 g4x_read_wm_values(dev_priv, wm); 5363 5364 wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 5365 5366 for_each_intel_crtc(dev, crtc) { 5367 struct intel_crtc_state *crtc_state = 5368 to_intel_crtc_state(crtc->base.state); 5369 struct g4x_wm_state *active = &crtc->wm.active.g4x; 5370 struct g4x_pipe_wm *raw; 5371 enum pipe pipe = crtc->pipe; 5372 enum plane_id plane_id; 5373 int level, max_level; 5374 5375 active->cxsr = wm->cxsr; 5376 active->hpll_en = wm->hpll_en; 5377 active->fbc_en = wm->fbc_en; 5378 5379 active->sr = wm->sr; 5380 active->hpll = wm->hpll; 5381 5382 for_each_plane_id_on_crtc(crtc, plane_id) { 5383 active->wm.plane[plane_id] = 5384 wm->pipe[pipe].plane[plane_id]; 5385 } 5386 5387 if (wm->cxsr && wm->hpll_en) 5388 max_level = G4X_WM_LEVEL_HPLL; 5389 else if (wm->cxsr) 5390 max_level = G4X_WM_LEVEL_SR; 5391 else 5392 max_level = G4X_WM_LEVEL_NORMAL; 5393 5394 level = G4X_WM_LEVEL_NORMAL; 5395 raw = &crtc_state->wm.g4x.raw[level]; 5396 for_each_plane_id_on_crtc(crtc, plane_id) 5397 raw->plane[plane_id] = active->wm.plane[plane_id]; 5398 5399 if (++level > max_level) 5400 goto out; 5401 5402 raw = &crtc_state->wm.g4x.raw[level]; 5403 raw->plane[PLANE_PRIMARY] = active->sr.plane; 5404 raw->plane[PLANE_CURSOR] = active->sr.cursor; 5405 raw->plane[PLANE_SPRITE0] = 0; 5406 raw->fbc = active->sr.fbc; 5407 5408 if (++level > max_level) 5409 goto out; 5410 5411 raw = &crtc_state->wm.g4x.raw[level]; 5412 raw->plane[PLANE_PRIMARY] = active->hpll.plane; 5413 raw->plane[PLANE_CURSOR] = active->hpll.cursor; 5414 raw->plane[PLANE_SPRITE0] = 0; 5415 raw->fbc = active->hpll.fbc; 5416 5417 out: 5418 for_each_plane_id_on_crtc(crtc, plane_id) 5419 g4x_raw_plane_wm_set(crtc_state, level, 5420 plane_id, USHRT_MAX); 5421 g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); 5422 5423 crtc_state->wm.g4x.optimal = *active; 5424 crtc_state->wm.g4x.intermediate = *active; 5425 5426 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", 5427 pipe_name(pipe), 5428 wm->pipe[pipe].plane[PLANE_PRIMARY], 5429 wm->pipe[pipe].plane[PLANE_CURSOR], 5430 wm->pipe[pipe].plane[PLANE_SPRITE0]); 5431 } 5432 5433 DRM_DEBUG_KMS("Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", 5434 wm->sr.plane, wm->sr.cursor, wm->sr.fbc); 5435 DRM_DEBUG_KMS("Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", 5436 wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); 5437 DRM_DEBUG_KMS("Initial SR=%s HPLL=%s FBC=%s\n", 5438 yesno(wm->cxsr), yesno(wm->hpll_en), yesno(wm->fbc_en)); 5439} 5440 5441void g4x_wm_sanitize(struct drm_i915_private *dev_priv) 5442{ 5443 struct intel_plane *plane; 5444 struct intel_crtc *crtc; 5445 5446 mutex_lock(&dev_priv->wm.wm_mutex); 5447 5448 for_each_intel_plane(&dev_priv->drm, plane) { 5449 struct intel_crtc *crtc = 5450 intel_get_crtc_for_pipe(dev_priv, plane->pipe); 5451 struct intel_crtc_state *crtc_state = 5452 to_intel_crtc_state(crtc->base.state); 5453 struct intel_plane_state *plane_state = 5454 to_intel_plane_state(plane->base.state); 5455 struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; 5456 enum plane_id plane_id = plane->id; 5457 int level; 5458 5459 if (plane_state->base.visible) 5460 continue; 5461 5462 for (level = 0; level < 3; level++) { 5463 struct g4x_pipe_wm *raw = 5464 &crtc_state->wm.g4x.raw[level]; 5465 5466 raw->plane[plane_id] = 0; 5467 wm_state->wm.plane[plane_id] = 0; 5468 } 5469 5470 if (plane_id == PLANE_PRIMARY) { 5471 for (level = 0; level < 3; level++) { 5472 struct g4x_pipe_wm *raw = 5473 &crtc_state->wm.g4x.raw[level]; 5474 raw->fbc = 0; 5475 } 5476 5477 wm_state->sr.fbc = 0; 5478 wm_state->hpll.fbc = 0; 5479 wm_state->fbc_en = false; 5480 } 5481 } 5482 5483 for_each_intel_crtc(&dev_priv->drm, crtc) { 5484 struct intel_crtc_state *crtc_state = 5485 to_intel_crtc_state(crtc->base.state); 5486 5487 crtc_state->wm.g4x.intermediate = 5488 crtc_state->wm.g4x.optimal; 5489 crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; 5490 } 5491 5492 g4x_program_watermarks(dev_priv); 5493 5494 mutex_unlock(&dev_priv->wm.wm_mutex); 5495} 5496 5497void vlv_wm_get_hw_state(struct drm_device *dev) 5498{ 5499 struct drm_i915_private *dev_priv = to_i915(dev); 5500 struct vlv_wm_values *wm = &dev_priv->wm.vlv; 5501 struct intel_crtc *crtc; 5502 u32 val; 5503 5504 vlv_read_wm_values(dev_priv, wm); 5505 5506 wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 5507 wm->level = VLV_WM_LEVEL_PM2; 5508 5509 if (IS_CHERRYVIEW(dev_priv)) { 5510 mutex_lock(&dev_priv->rps.hw_lock); 5511 5512 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5513 if (val & DSP_MAXFIFO_PM5_ENABLE) 5514 wm->level = VLV_WM_LEVEL_PM5; 5515 5516 /* 5517 * If DDR DVFS is disabled in the BIOS, Punit 5518 * will never ack the request. So if that happens 5519 * assume we don't have to enable/disable DDR DVFS 5520 * dynamically. To test that just set the REQ_ACK 5521 * bit to poke the Punit, but don't change the 5522 * HIGH/LOW bits so that we don't actually change 5523 * the current state. 5524 */ 5525 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 5526 val |= FORCE_DDR_FREQ_REQ_ACK; 5527 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 5528 5529 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 5530 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { 5531 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, " 5532 "assuming DDR DVFS is disabled\n"); 5533 dev_priv->wm.max_level = VLV_WM_LEVEL_PM5; 5534 } else { 5535 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 5536 if ((val & FORCE_DDR_HIGH_FREQ) == 0) 5537 wm->level = VLV_WM_LEVEL_DDR_DVFS; 5538 } 5539 5540 mutex_unlock(&dev_priv->rps.hw_lock); 5541 } 5542 5543 for_each_intel_crtc(dev, crtc) { 5544 struct intel_crtc_state *crtc_state = 5545 to_intel_crtc_state(crtc->base.state); 5546 struct vlv_wm_state *active = &crtc->wm.active.vlv; 5547 const struct vlv_fifo_state *fifo_state = 5548 &crtc_state->wm.vlv.fifo_state; 5549 enum pipe pipe = crtc->pipe; 5550 enum plane_id plane_id; 5551 int level; 5552 5553 vlv_get_fifo_size(crtc_state); 5554 5555 active->num_levels = wm->level + 1; 5556 active->cxsr = wm->cxsr; 5557 5558 for (level = 0; level < active->num_levels; level++) { 5559 struct g4x_pipe_wm *raw = 5560 &crtc_state->wm.vlv.raw[level]; 5561 5562 active->sr[level].plane = wm->sr.plane; 5563 active->sr[level].cursor = wm->sr.cursor; 5564 5565 for_each_plane_id_on_crtc(crtc, plane_id) { 5566 active->wm[level].plane[plane_id] = 5567 wm->pipe[pipe].plane[plane_id]; 5568 5569 raw->plane[plane_id] = 5570 vlv_invert_wm_value(active->wm[level].plane[plane_id], 5571 fifo_state->plane[plane_id]); 5572 } 5573 } 5574 5575 for_each_plane_id_on_crtc(crtc, plane_id) 5576 vlv_raw_plane_wm_set(crtc_state, level, 5577 plane_id, USHRT_MAX); 5578 vlv_invalidate_wms(crtc, active, level); 5579 5580 crtc_state->wm.vlv.optimal = *active; 5581 crtc_state->wm.vlv.intermediate = *active; 5582 5583 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", 5584 pipe_name(pipe), 5585 wm->pipe[pipe].plane[PLANE_PRIMARY], 5586 wm->pipe[pipe].plane[PLANE_CURSOR], 5587 wm->pipe[pipe].plane[PLANE_SPRITE0], 5588 wm->pipe[pipe].plane[PLANE_SPRITE1]); 5589 } 5590 5591 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", 5592 wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); 5593} 5594 5595void vlv_wm_sanitize(struct drm_i915_private *dev_priv) 5596{ 5597 struct intel_plane *plane; 5598 struct intel_crtc *crtc; 5599 5600 mutex_lock(&dev_priv->wm.wm_mutex); 5601 5602 for_each_intel_plane(&dev_priv->drm, plane) { 5603 struct intel_crtc *crtc = 5604 intel_get_crtc_for_pipe(dev_priv, plane->pipe); 5605 struct intel_crtc_state *crtc_state = 5606 to_intel_crtc_state(crtc->base.state); 5607 struct intel_plane_state *plane_state = 5608 to_intel_plane_state(plane->base.state); 5609 struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; 5610 const struct vlv_fifo_state *fifo_state = 5611 &crtc_state->wm.vlv.fifo_state; 5612 enum plane_id plane_id = plane->id; 5613 int level; 5614 5615 if (plane_state->base.visible) 5616 continue; 5617 5618 for (level = 0; level < wm_state->num_levels; level++) { 5619 struct g4x_pipe_wm *raw = 5620 &crtc_state->wm.vlv.raw[level]; 5621 5622 raw->plane[plane_id] = 0; 5623 5624 wm_state->wm[level].plane[plane_id] = 5625 vlv_invert_wm_value(raw->plane[plane_id], 5626 fifo_state->plane[plane_id]); 5627 } 5628 } 5629 5630 for_each_intel_crtc(&dev_priv->drm, crtc) { 5631 struct intel_crtc_state *crtc_state = 5632 to_intel_crtc_state(crtc->base.state); 5633 5634 crtc_state->wm.vlv.intermediate = 5635 crtc_state->wm.vlv.optimal; 5636 crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; 5637 } 5638 5639 vlv_program_watermarks(dev_priv); 5640 5641 mutex_unlock(&dev_priv->wm.wm_mutex); 5642} 5643 5644void ilk_wm_get_hw_state(struct drm_device *dev) 5645{ 5646 struct drm_i915_private *dev_priv = to_i915(dev); 5647 struct ilk_wm_values *hw = &dev_priv->wm.hw; 5648 struct drm_crtc *crtc; 5649 5650 for_each_crtc(dev, crtc) 5651 ilk_pipe_wm_get_hw_state(crtc); 5652 5653 hw->wm_lp[0] = I915_READ(WM1_LP_ILK); 5654 hw->wm_lp[1] = I915_READ(WM2_LP_ILK); 5655 hw->wm_lp[2] = I915_READ(WM3_LP_ILK); 5656 5657 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); 5658 if (INTEL_GEN(dev_priv) >= 7) { 5659 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); 5660 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); 5661 } 5662 5663 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 5664 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 5665 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 5666 else if (IS_IVYBRIDGE(dev_priv)) 5667 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? 5668 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 5669 5670 hw->enable_fbc_wm = 5671 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); 5672} 5673 5674/** 5675 * intel_update_watermarks - update FIFO watermark values based on current modes 5676 * 5677 * Calculate watermark values for the various WM regs based on current mode 5678 * and plane configuration. 5679 * 5680 * There are several cases to deal with here: 5681 * - normal (i.e. non-self-refresh) 5682 * - self-refresh (SR) mode 5683 * - lines are large relative to FIFO size (buffer can hold up to 2) 5684 * - lines are small relative to FIFO size (buffer can hold more than 2 5685 * lines), so need to account for TLB latency 5686 * 5687 * The normal calculation is: 5688 * watermark = dotclock * bytes per pixel * latency 5689 * where latency is platform & configuration dependent (we assume pessimal 5690 * values here). 5691 * 5692 * The SR calculation is: 5693 * watermark = (trunc(latency/line time)+1) * surface width * 5694 * bytes per pixel 5695 * where 5696 * line time = htotal / dotclock 5697 * surface width = hdisplay for normal plane and 64 for cursor 5698 * and latency is assumed to be high, as above. 5699 * 5700 * The final value programmed to the register should always be rounded up, 5701 * and include an extra 2 entries to account for clock crossings. 5702 * 5703 * We don't use the sprite, so we can ignore that. And on Crestline we have 5704 * to set the non-SR watermarks to 8. 5705 */ 5706void intel_update_watermarks(struct intel_crtc *crtc) 5707{ 5708 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5709 5710 if (dev_priv->display.update_wm) 5711 dev_priv->display.update_wm(crtc); 5712} 5713 5714/* 5715 * Lock protecting IPS related data structures 5716 */ 5717DEFINE_SPINLOCK(mchdev_lock); 5718 5719/* Global for IPS driver to get at the current i915 device. Protected by 5720 * mchdev_lock. */ 5721static struct drm_i915_private *i915_mch_dev; 5722 5723bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val) 5724{ 5725 u16 rgvswctl; 5726 5727 lockdep_assert_held(&mchdev_lock); 5728 5729 rgvswctl = I915_READ16(MEMSWCTL); 5730 if (rgvswctl & MEMCTL_CMD_STS) { 5731 DRM_DEBUG("gpu busy, RCS change rejected\n"); 5732 return false; /* still busy with another command */ 5733 } 5734 5735 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 5736 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 5737 I915_WRITE16(MEMSWCTL, rgvswctl); 5738 POSTING_READ16(MEMSWCTL); 5739 5740 rgvswctl |= MEMCTL_CMD_STS; 5741 I915_WRITE16(MEMSWCTL, rgvswctl); 5742 5743 return true; 5744} 5745 5746static void ironlake_enable_drps(struct drm_i915_private *dev_priv) 5747{ 5748 u32 rgvmodectl; 5749 u8 fmax, fmin, fstart, vstart; 5750 5751 spin_lock_irq(&mchdev_lock); 5752 5753 rgvmodectl = I915_READ(MEMMODECTL); 5754 5755 /* Enable temp reporting */ 5756 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); 5757 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); 5758 5759 /* 100ms RC evaluation intervals */ 5760 I915_WRITE(RCUPEI, 100000); 5761 I915_WRITE(RCDNEI, 100000); 5762 5763 /* Set max/min thresholds to 90ms and 80ms respectively */ 5764 I915_WRITE(RCBMAXAVG, 90000); 5765 I915_WRITE(RCBMINAVG, 80000); 5766 5767 I915_WRITE(MEMIHYST, 1); 5768 5769 /* Set up min, max, and cur for interrupt handling */ 5770 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 5771 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 5772 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 5773 MEMMODE_FSTART_SHIFT; 5774 5775 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >> 5776 PXVFREQ_PX_SHIFT; 5777 5778 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ 5779 dev_priv->ips.fstart = fstart; 5780 5781 dev_priv->ips.max_delay = fstart; 5782 dev_priv->ips.min_delay = fmin; 5783 dev_priv->ips.cur_delay = fstart; 5784 5785 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", 5786 fmax, fmin, fstart); 5787 5788 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 5789 5790 /* 5791 * Interrupts will be enabled in ironlake_irq_postinstall 5792 */ 5793 5794 I915_WRITE(VIDSTART, vstart); 5795 POSTING_READ(VIDSTART); 5796 5797 rgvmodectl |= MEMMODE_SWMODE_EN; 5798 I915_WRITE(MEMMODECTL, rgvmodectl); 5799 5800 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) 5801 DRM_ERROR("stuck trying to change perf mode\n"); 5802 mdelay(1); 5803 5804 ironlake_set_drps(dev_priv, fstart); 5805 5806 dev_priv->ips.last_count1 = I915_READ(DMIEC) + 5807 I915_READ(DDREC) + I915_READ(CSIEC); 5808 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); 5809 dev_priv->ips.last_count2 = I915_READ(GFXEC); 5810 dev_priv->ips.last_time2 = ktime_get_raw_ns(); 5811 5812 spin_unlock_irq(&mchdev_lock); 5813} 5814 5815static void ironlake_disable_drps(struct drm_i915_private *dev_priv) 5816{ 5817 u16 rgvswctl; 5818 5819 spin_lock_irq(&mchdev_lock); 5820 5821 rgvswctl = I915_READ16(MEMSWCTL); 5822 5823 /* Ack interrupts, disable EFC interrupt */ 5824 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 5825 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); 5826 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); 5827 I915_WRITE(DEIIR, DE_PCU_EVENT); 5828 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 5829 5830 /* Go back to the starting frequency */ 5831 ironlake_set_drps(dev_priv, dev_priv->ips.fstart); 5832 mdelay(1); 5833 rgvswctl |= MEMCTL_CMD_STS; 5834 I915_WRITE(MEMSWCTL, rgvswctl); 5835 mdelay(1); 5836 5837 spin_unlock_irq(&mchdev_lock); 5838} 5839 5840/* There's a funny hw issue where the hw returns all 0 when reading from 5841 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value 5842 * ourselves, instead of doing a rmw cycle (which might result in us clearing 5843 * all limits and the gpu stuck at whatever frequency it is at atm). 5844 */ 5845static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) 5846{ 5847 u32 limits; 5848 5849 /* Only set the down limit when we've reached the lowest level to avoid 5850 * getting more interrupts, otherwise leave this clear. This prevents a 5851 * race in the hw when coming out of rc6: There's a tiny window where 5852 * the hw runs at the minimal clock before selecting the desired 5853 * frequency, if the down threshold expires in that window we will not 5854 * receive a down interrupt. */ 5855 if (IS_GEN9(dev_priv)) { 5856 limits = (dev_priv->rps.max_freq_softlimit) << 23; 5857 if (val <= dev_priv->rps.min_freq_softlimit) 5858 limits |= (dev_priv->rps.min_freq_softlimit) << 14; 5859 } else { 5860 limits = dev_priv->rps.max_freq_softlimit << 24; 5861 if (val <= dev_priv->rps.min_freq_softlimit) 5862 limits |= dev_priv->rps.min_freq_softlimit << 16; 5863 } 5864 5865 return limits; 5866} 5867 5868static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) 5869{ 5870 int new_power; 5871 u32 threshold_up = 0, threshold_down = 0; /* in % */ 5872 u32 ei_up = 0, ei_down = 0; 5873 5874 new_power = dev_priv->rps.power; 5875 switch (dev_priv->rps.power) { 5876 case LOW_POWER: 5877 if (val > dev_priv->rps.efficient_freq + 1 && 5878 val > dev_priv->rps.cur_freq) 5879 new_power = BETWEEN; 5880 break; 5881 5882 case BETWEEN: 5883 if (val <= dev_priv->rps.efficient_freq && 5884 val < dev_priv->rps.cur_freq) 5885 new_power = LOW_POWER; 5886 else if (val >= dev_priv->rps.rp0_freq && 5887 val > dev_priv->rps.cur_freq) 5888 new_power = HIGH_POWER; 5889 break; 5890 5891 case HIGH_POWER: 5892 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && 5893 val < dev_priv->rps.cur_freq) 5894 new_power = BETWEEN; 5895 break; 5896 } 5897 /* Max/min bins are special */ 5898 if (val <= dev_priv->rps.min_freq_softlimit) 5899 new_power = LOW_POWER; 5900 if (val >= dev_priv->rps.max_freq_softlimit) 5901 new_power = HIGH_POWER; 5902 if (new_power == dev_priv->rps.power) 5903 return; 5904 5905 /* Note the units here are not exactly 1us, but 1280ns. */ 5906 switch (new_power) { 5907 case LOW_POWER: 5908 /* Upclock if more than 95% busy over 16ms */ 5909 ei_up = 16000; 5910 threshold_up = 95; 5911 5912 /* Downclock if less than 85% busy over 32ms */ 5913 ei_down = 32000; 5914 threshold_down = 85; 5915 break; 5916 5917 case BETWEEN: 5918 /* Upclock if more than 90% busy over 13ms */ 5919 ei_up = 13000; 5920 threshold_up = 90; 5921 5922 /* Downclock if less than 75% busy over 32ms */ 5923 ei_down = 32000; 5924 threshold_down = 75; 5925 break; 5926 5927 case HIGH_POWER: 5928 /* Upclock if more than 85% busy over 10ms */ 5929 ei_up = 10000; 5930 threshold_up = 85; 5931 5932 /* Downclock if less than 60% busy over 32ms */ 5933 ei_down = 32000; 5934 threshold_down = 60; 5935 break; 5936 } 5937 5938 /* When byt can survive without system hang with dynamic 5939 * sw freq adjustments, this restriction can be lifted. 5940 */ 5941 if (IS_VALLEYVIEW(dev_priv)) 5942 goto skip_hw_write; 5943 5944 I915_WRITE(GEN6_RP_UP_EI, 5945 GT_INTERVAL_FROM_US(dev_priv, ei_up)); 5946 I915_WRITE(GEN6_RP_UP_THRESHOLD, 5947 GT_INTERVAL_FROM_US(dev_priv, 5948 ei_up * threshold_up / 100)); 5949 5950 I915_WRITE(GEN6_RP_DOWN_EI, 5951 GT_INTERVAL_FROM_US(dev_priv, ei_down)); 5952 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 5953 GT_INTERVAL_FROM_US(dev_priv, 5954 ei_down * threshold_down / 100)); 5955 5956 I915_WRITE(GEN6_RP_CONTROL, 5957 GEN6_RP_MEDIA_TURBO | 5958 GEN6_RP_MEDIA_HW_NORMAL_MODE | 5959 GEN6_RP_MEDIA_IS_GFX | 5960 GEN6_RP_ENABLE | 5961 GEN6_RP_UP_BUSY_AVG | 5962 GEN6_RP_DOWN_IDLE_AVG); 5963 5964skip_hw_write: 5965 dev_priv->rps.power = new_power; 5966 dev_priv->rps.up_threshold = threshold_up; 5967 dev_priv->rps.down_threshold = threshold_down; 5968 dev_priv->rps.last_adj = 0; 5969} 5970 5971static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) 5972{ 5973 u32 mask = 0; 5974 5975 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */ 5976 if (val > dev_priv->rps.min_freq_softlimit) 5977 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; 5978 if (val < dev_priv->rps.max_freq_softlimit) 5979 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 5980 5981 mask &= dev_priv->pm_rps_events; 5982 5983 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask); 5984} 5985 5986/* gen6_set_rps is called to update the frequency request, but should also be 5987 * called when the range (min_delay and max_delay) is modified so that we can 5988 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 5989static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val) 5990{ 5991 /* min/max delay may still have been modified so be sure to 5992 * write the limits value. 5993 */ 5994 if (val != dev_priv->rps.cur_freq) { 5995 gen6_set_rps_thresholds(dev_priv, val); 5996 5997 if (IS_GEN9(dev_priv)) 5998 I915_WRITE(GEN6_RPNSWREQ, 5999 GEN9_FREQUENCY(val)); 6000 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 6001 I915_WRITE(GEN6_RPNSWREQ, 6002 HSW_FREQUENCY(val)); 6003 else 6004 I915_WRITE(GEN6_RPNSWREQ, 6005 GEN6_FREQUENCY(val) | 6006 GEN6_OFFSET(0) | 6007 GEN6_AGGRESSIVE_TURBO); 6008 } 6009 6010 /* Make sure we continue to get interrupts 6011 * until we hit the minimum or maximum frequencies. 6012 */ 6013 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val)); 6014 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 6015 6016 dev_priv->rps.cur_freq = val; 6017 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 6018 6019 return 0; 6020} 6021 6022static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val) 6023{ 6024 int err; 6025 6026 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1), 6027 "Odd GPU freq value\n")) 6028 val &= ~1; 6029 6030 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 6031 6032 if (val != dev_priv->rps.cur_freq) { 6033 err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 6034 if (err) 6035 return err; 6036 6037 gen6_set_rps_thresholds(dev_priv, val); 6038 } 6039 6040 dev_priv->rps.cur_freq = val; 6041 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 6042 6043 return 0; 6044} 6045 6046/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down 6047 * 6048 * * If Gfx is Idle, then 6049 * 1. Forcewake Media well. 6050 * 2. Request idle freq. 6051 * 3. Release Forcewake of Media well. 6052*/ 6053static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) 6054{ 6055 u32 val = dev_priv->rps.idle_freq; 6056 int err; 6057 6058 if (dev_priv->rps.cur_freq <= val) 6059 return; 6060 6061 /* The punit delays the write of the frequency and voltage until it 6062 * determines the GPU is awake. During normal usage we don't want to 6063 * waste power changing the frequency if the GPU is sleeping (rc6). 6064 * However, the GPU and driver is now idle and we do not want to delay 6065 * switching to minimum voltage (reducing power whilst idle) as we do 6066 * not expect to be woken in the near future and so must flush the 6067 * change by waking the device. 6068 * 6069 * We choose to take the media powerwell (either would do to trick the 6070 * punit into committing the voltage change) as that takes a lot less 6071 * power than the render powerwell. 6072 */ 6073 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); 6074 err = valleyview_set_rps(dev_priv, val); 6075 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); 6076 6077 if (err) 6078 DRM_ERROR("Failed to set RPS for idle\n"); 6079} 6080 6081void gen6_rps_busy(struct drm_i915_private *dev_priv) 6082{ 6083 mutex_lock(&dev_priv->rps.hw_lock); 6084 if (dev_priv->rps.enabled) { 6085 u8 freq; 6086 6087 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED) 6088 gen6_rps_reset_ei(dev_priv); 6089 I915_WRITE(GEN6_PMINTRMSK, 6090 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 6091 6092 gen6_enable_rps_interrupts(dev_priv); 6093 6094 /* Use the user's desired frequency as a guide, but for better 6095 * performance, jump directly to RPe as our starting frequency. 6096 */ 6097 freq = max(dev_priv->rps.cur_freq, 6098 dev_priv->rps.efficient_freq); 6099 6100 if (intel_set_rps(dev_priv, 6101 clamp(freq, 6102 dev_priv->rps.min_freq_softlimit, 6103 dev_priv->rps.max_freq_softlimit))) 6104 DRM_DEBUG_DRIVER("Failed to set idle frequency\n"); 6105 } 6106 mutex_unlock(&dev_priv->rps.hw_lock); 6107} 6108 6109void gen6_rps_idle(struct drm_i915_private *dev_priv) 6110{ 6111 /* Flush our bottom-half so that it does not race with us 6112 * setting the idle frequency and so that it is bounded by 6113 * our rpm wakeref. And then disable the interrupts to stop any 6114 * futher RPS reclocking whilst we are asleep. 6115 */ 6116 gen6_disable_rps_interrupts(dev_priv); 6117 6118 mutex_lock(&dev_priv->rps.hw_lock); 6119 if (dev_priv->rps.enabled) { 6120 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6121 vlv_set_rps_idle(dev_priv); 6122 else 6123 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); 6124 dev_priv->rps.last_adj = 0; 6125 I915_WRITE(GEN6_PMINTRMSK, 6126 gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 6127 } 6128 mutex_unlock(&dev_priv->rps.hw_lock); 6129 6130 spin_lock(&dev_priv->rps.client_lock); 6131 while (!list_empty(&dev_priv->rps.clients)) 6132 list_del_init(dev_priv->rps.clients.next); 6133 spin_unlock(&dev_priv->rps.client_lock); 6134} 6135 6136void gen6_rps_boost(struct drm_i915_private *dev_priv, 6137 struct intel_rps_client *rps, 6138 unsigned long submitted) 6139{ 6140 /* This is intentionally racy! We peek at the state here, then 6141 * validate inside the RPS worker. 6142 */ 6143 if (!(dev_priv->gt.awake && 6144 dev_priv->rps.enabled && 6145 dev_priv->rps.cur_freq < dev_priv->rps.boost_freq)) 6146 return; 6147 6148 /* Force a RPS boost (and don't count it against the client) if 6149 * the GPU is severely congested. 6150 */ 6151 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES)) 6152 rps = NULL; 6153 6154 spin_lock(&dev_priv->rps.client_lock); 6155 if (rps == NULL || list_empty(&rps->link)) { 6156 spin_lock_irq(&dev_priv->irq_lock); 6157 if (dev_priv->rps.interrupts_enabled) { 6158 dev_priv->rps.client_boost = true; 6159 schedule_work(&dev_priv->rps.work); 6160 } 6161 spin_unlock_irq(&dev_priv->irq_lock); 6162 6163 if (rps != NULL) { 6164 list_add(&rps->link, &dev_priv->rps.clients); 6165 rps->boosts++; 6166 } else 6167 dev_priv->rps.boosts++; 6168 } 6169 spin_unlock(&dev_priv->rps.client_lock); 6170} 6171 6172int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) 6173{ 6174 int err; 6175 6176 lockdep_assert_held(&dev_priv->rps.hw_lock); 6177 GEM_BUG_ON(val > dev_priv->rps.max_freq); 6178 GEM_BUG_ON(val < dev_priv->rps.min_freq); 6179 6180 if (!dev_priv->rps.enabled) { 6181 dev_priv->rps.cur_freq = val; 6182 return 0; 6183 } 6184 6185 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6186 err = valleyview_set_rps(dev_priv, val); 6187 else 6188 err = gen6_set_rps(dev_priv, val); 6189 6190 return err; 6191} 6192 6193static void gen9_disable_rc6(struct drm_i915_private *dev_priv) 6194{ 6195 I915_WRITE(GEN6_RC_CONTROL, 0); 6196 I915_WRITE(GEN9_PG_ENABLE, 0); 6197} 6198 6199static void gen9_disable_rps(struct drm_i915_private *dev_priv) 6200{ 6201 I915_WRITE(GEN6_RP_CONTROL, 0); 6202} 6203 6204static void gen6_disable_rps(struct drm_i915_private *dev_priv) 6205{ 6206 I915_WRITE(GEN6_RC_CONTROL, 0); 6207 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 6208 I915_WRITE(GEN6_RP_CONTROL, 0); 6209} 6210 6211static void cherryview_disable_rps(struct drm_i915_private *dev_priv) 6212{ 6213 I915_WRITE(GEN6_RC_CONTROL, 0); 6214} 6215 6216static void valleyview_disable_rps(struct drm_i915_private *dev_priv) 6217{ 6218 /* we're doing forcewake before Disabling RC6, 6219 * This what the BIOS expects when going into suspend */ 6220 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 6221 6222 I915_WRITE(GEN6_RC_CONTROL, 0); 6223 6224 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 6225} 6226 6227static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode) 6228{ 6229 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 6230 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) 6231 mode = GEN6_RC_CTL_RC6_ENABLE; 6232 else 6233 mode = 0; 6234 } 6235 if (HAS_RC6p(dev_priv)) 6236 DRM_DEBUG_DRIVER("Enabling RC6 states: " 6237 "RC6 %s RC6p %s RC6pp %s\n", 6238 onoff(mode & GEN6_RC_CTL_RC6_ENABLE), 6239 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), 6240 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE)); 6241 6242 else 6243 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n", 6244 onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); 6245} 6246 6247static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) 6248{ 6249 struct i915_ggtt *ggtt = &dev_priv->ggtt; 6250 bool enable_rc6 = true; 6251 unsigned long rc6_ctx_base; 6252 u32 rc_ctl; 6253 int rc_sw_target; 6254 6255 rc_ctl = I915_READ(GEN6_RC_CONTROL); 6256 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >> 6257 RC_SW_TARGET_STATE_SHIFT; 6258 DRM_DEBUG_DRIVER("BIOS enabled RC states: " 6259 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n", 6260 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE), 6261 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE), 6262 rc_sw_target); 6263 6264 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) { 6265 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n"); 6266 enable_rc6 = false; 6267 } 6268 6269 /* 6270 * The exact context size is not known for BXT, so assume a page size 6271 * for this check. 6272 */ 6273 rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK; 6274 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) && 6275 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base + 6276 ggtt->stolen_reserved_size))) { 6277 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n"); 6278 enable_rc6 = false; 6279 } 6280 6281 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) && 6282 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) && 6283 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) && 6284 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) { 6285 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n"); 6286 enable_rc6 = false; 6287 } 6288 6289 if (!I915_READ(GEN8_PUSHBUS_CONTROL) || 6290 !I915_READ(GEN8_PUSHBUS_ENABLE) || 6291 !I915_READ(GEN8_PUSHBUS_SHIFT)) { 6292 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n"); 6293 enable_rc6 = false; 6294 } 6295 6296 if (!I915_READ(GEN6_GFXPAUSE)) { 6297 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n"); 6298 enable_rc6 = false; 6299 } 6300 6301 if (!I915_READ(GEN8_MISC_CTRL0)) { 6302 DRM_DEBUG_DRIVER("GPM control not setup properly.\n"); 6303 enable_rc6 = false; 6304 } 6305 6306 return enable_rc6; 6307} 6308 6309int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6) 6310{ 6311 /* No RC6 before Ironlake and code is gone for ilk. */ 6312 if (INTEL_INFO(dev_priv)->gen < 6) 6313 return 0; 6314 6315 if (!enable_rc6) 6316 return 0; 6317 6318 if (IS_GEN9_LP(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) { 6319 DRM_INFO("RC6 disabled by BIOS\n"); 6320 return 0; 6321 } 6322 6323 /* Respect the kernel parameter if it is set */ 6324 if (enable_rc6 >= 0) { 6325 int mask; 6326 6327 if (HAS_RC6p(dev_priv)) 6328 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | 6329 INTEL_RC6pp_ENABLE; 6330 else 6331 mask = INTEL_RC6_ENABLE; 6332 6333 if ((enable_rc6 & mask) != enable_rc6) 6334 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d " 6335 "(requested %d, valid %d)\n", 6336 enable_rc6 & mask, enable_rc6, mask); 6337 6338 return enable_rc6 & mask; 6339 } 6340 6341 if (IS_IVYBRIDGE(dev_priv)) 6342 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 6343 6344 return INTEL_RC6_ENABLE; 6345} 6346 6347static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) 6348{ 6349 /* All of these values are in units of 50MHz */ 6350 6351 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 6352 if (IS_GEN9_LP(dev_priv)) { 6353 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 6354 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; 6355 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 6356 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff; 6357 } else { 6358 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 6359 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; 6360 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 6361 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; 6362 } 6363 /* hw_max = RP0 until we check for overclocking */ 6364 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 6365 6366 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 6367 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || 6368 IS_GEN9_BC(dev_priv)) { 6369 u32 ddcc_status = 0; 6370 6371 if (sandybridge_pcode_read(dev_priv, 6372 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 6373 &ddcc_status) == 0) 6374 dev_priv->rps.efficient_freq = 6375 clamp_t(u8, 6376 ((ddcc_status >> 8) & 0xff), 6377 dev_priv->rps.min_freq, 6378 dev_priv->rps.max_freq); 6379 } 6380 6381 if (IS_GEN9_BC(dev_priv)) { 6382 /* Store the frequency values in 16.66 MHZ units, which is 6383 * the natural hardware unit for SKL 6384 */ 6385 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; 6386 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; 6387 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; 6388 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER; 6389 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER; 6390 } 6391} 6392 6393static void reset_rps(struct drm_i915_private *dev_priv, 6394 int (*set)(struct drm_i915_private *, u8)) 6395{ 6396 u8 freq = dev_priv->rps.cur_freq; 6397 6398 /* force a reset */ 6399 dev_priv->rps.power = -1; 6400 dev_priv->rps.cur_freq = -1; 6401 6402 if (set(dev_priv, freq)) 6403 DRM_ERROR("Failed to reset RPS to initial values\n"); 6404} 6405 6406/* See the Gen9_GT_PM_Programming_Guide doc for the below */ 6407static void gen9_enable_rps(struct drm_i915_private *dev_priv) 6408{ 6409 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 6410 6411 /* Program defaults and thresholds for RPS*/ 6412 I915_WRITE(GEN6_RC_VIDEO_FREQ, 6413 GEN9_FREQUENCY(dev_priv->rps.rp1_freq)); 6414 6415 /* 1 second timeout*/ 6416 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 6417 GT_INTERVAL_FROM_US(dev_priv, 1000000)); 6418 6419 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa); 6420 6421 /* Leaning on the below call to gen6_set_rps to program/setup the 6422 * Up/Down EI & threshold registers, as well as the RP_CONTROL, 6423 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ 6424 reset_rps(dev_priv, gen6_set_rps); 6425 6426 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 6427} 6428 6429static void gen9_enable_rc6(struct drm_i915_private *dev_priv) 6430{ 6431 struct intel_engine_cs *engine; 6432 enum intel_engine_id id; 6433 uint32_t rc6_mask = 0; 6434 6435 /* 1a: Software RC state - RC0 */ 6436 I915_WRITE(GEN6_RC_STATE, 0); 6437 6438 /* 1b: Get forcewake during program sequence. Although the driver 6439 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 6440 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 6441 6442 /* 2a: Disable RC states. */ 6443 I915_WRITE(GEN6_RC_CONTROL, 0); 6444 6445 /* 2b: Program RC6 thresholds.*/ 6446 6447 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 6448 if (IS_SKYLAKE(dev_priv)) 6449 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 6450 else 6451 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 6452 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 6453 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 6454 for_each_engine(engine, dev_priv, id) 6455 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 6456 6457 if (HAS_GUC(dev_priv)) 6458 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); 6459 6460 I915_WRITE(GEN6_RC_SLEEP, 0); 6461 6462 /* 2c: Program Coarse Power Gating Policies. */ 6463 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25); 6464 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); 6465 6466 /* 3a: Enable RC6 */ 6467 if (intel_enable_rc6() & INTEL_RC6_ENABLE) 6468 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 6469 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); 6470 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ 6471 I915_WRITE(GEN6_RC_CONTROL, 6472 GEN6_RC_CTL_HW_ENABLE | GEN6_RC_CTL_EI_MODE(1) | rc6_mask); 6473 6474 /* 6475 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 6476 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 6477 */ 6478 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) 6479 I915_WRITE(GEN9_PG_ENABLE, 0); 6480 else 6481 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 6482 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); 6483 6484 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 6485} 6486 6487static void gen8_enable_rps(struct drm_i915_private *dev_priv) 6488{ 6489 struct intel_engine_cs *engine; 6490 enum intel_engine_id id; 6491 uint32_t rc6_mask = 0; 6492 6493 /* 1a: Software RC state - RC0 */ 6494 I915_WRITE(GEN6_RC_STATE, 0); 6495 6496 /* 1c & 1d: Get forcewake during program sequence. Although the driver 6497 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 6498 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 6499 6500 /* 2a: Disable RC states. */ 6501 I915_WRITE(GEN6_RC_CONTROL, 0); 6502 6503 /* 2b: Program RC6 thresholds.*/ 6504 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 6505 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 6506 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 6507 for_each_engine(engine, dev_priv, id) 6508 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 6509 I915_WRITE(GEN6_RC_SLEEP, 0); 6510 if (IS_BROADWELL(dev_priv)) 6511 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ 6512 else 6513 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 6514 6515 /* 3: Enable RC6 */ 6516 if (intel_enable_rc6() & INTEL_RC6_ENABLE) 6517 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 6518 intel_print_rc6_info(dev_priv, rc6_mask); 6519 if (IS_BROADWELL(dev_priv)) 6520 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 6521 GEN7_RC_CTL_TO_MODE | 6522 rc6_mask); 6523 else 6524 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 6525 GEN6_RC_CTL_EI_MODE(1) | 6526 rc6_mask); 6527 6528 /* 4 Program defaults and thresholds for RPS*/ 6529 I915_WRITE(GEN6_RPNSWREQ, 6530 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 6531 I915_WRITE(GEN6_RC_VIDEO_FREQ, 6532 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 6533 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ 6534 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ 6535 6536 /* Docs recommend 900MHz, and 300 MHz respectively */ 6537 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 6538 dev_priv->rps.max_freq_softlimit << 24 | 6539 dev_priv->rps.min_freq_softlimit << 16); 6540 6541 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ 6542 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ 6543 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */ 6544 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */ 6545 6546 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 6547 6548 /* 5: Enable RPS */ 6549 I915_WRITE(GEN6_RP_CONTROL, 6550 GEN6_RP_MEDIA_TURBO | 6551 GEN6_RP_MEDIA_HW_NORMAL_MODE | 6552 GEN6_RP_MEDIA_IS_GFX | 6553 GEN6_RP_ENABLE | 6554 GEN6_RP_UP_BUSY_AVG | 6555 GEN6_RP_DOWN_IDLE_AVG); 6556 6557 /* 6: Ring frequency + overclocking (our driver does this later */ 6558 6559 reset_rps(dev_priv, gen6_set_rps); 6560 6561 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 6562} 6563 6564static void gen6_enable_rps(struct drm_i915_private *dev_priv) 6565{ 6566 struct intel_engine_cs *engine; 6567 enum intel_engine_id id; 6568 u32 rc6vids, rc6_mask = 0; 6569 u32 gtfifodbg; 6570 int rc6_mode; 6571 int ret; 6572 6573 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 6574 6575 /* Here begins a magic sequence of register writes to enable 6576 * auto-downclocking. 6577 * 6578 * Perhaps there might be some value in exposing these to 6579 * userspace... 6580 */ 6581 I915_WRITE(GEN6_RC_STATE, 0); 6582 6583 /* Clear the DBG now so we don't confuse earlier errors */ 6584 gtfifodbg = I915_READ(GTFIFODBG); 6585 if (gtfifodbg) { 6586 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 6587 I915_WRITE(GTFIFODBG, gtfifodbg); 6588 } 6589 6590 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 6591 6592 /* disable the counters and set deterministic thresholds */ 6593 I915_WRITE(GEN6_RC_CONTROL, 0); 6594 6595 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); 6596 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); 6597 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); 6598 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 6599 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 6600 6601 for_each_engine(engine, dev_priv, id) 6602 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 6603 6604 I915_WRITE(GEN6_RC_SLEEP, 0); 6605 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 6606 if (IS_IVYBRIDGE(dev_priv)) 6607 I915_WRITE(GEN6_RC6_THRESHOLD, 125000); 6608 else 6609 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 6610 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); 6611 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 6612 6613 /* Check if we are enabling RC6 */ 6614 rc6_mode = intel_enable_rc6(); 6615 if (rc6_mode & INTEL_RC6_ENABLE) 6616 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; 6617 6618 /* We don't use those on Haswell */ 6619 if (!IS_HASWELL(dev_priv)) { 6620 if (rc6_mode & INTEL_RC6p_ENABLE) 6621 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; 6622 6623 if (rc6_mode & INTEL_RC6pp_ENABLE) 6624 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 6625 } 6626 6627 intel_print_rc6_info(dev_priv, rc6_mask); 6628 6629 I915_WRITE(GEN6_RC_CONTROL, 6630 rc6_mask | 6631 GEN6_RC_CTL_EI_MODE(1) | 6632 GEN6_RC_CTL_HW_ENABLE); 6633 6634 /* Power down if completely idle for over 50ms */ 6635 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000); 6636 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 6637 6638 reset_rps(dev_priv, gen6_set_rps); 6639 6640 rc6vids = 0; 6641 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 6642 if (IS_GEN6(dev_priv) && ret) { 6643 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); 6644 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { 6645 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", 6646 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); 6647 rc6vids &= 0xffff00; 6648 rc6vids |= GEN6_ENCODE_RC6_VID(450); 6649 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); 6650 if (ret) 6651 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); 6652 } 6653 6654 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 6655} 6656 6657static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) 6658{ 6659 int min_freq = 15; 6660 unsigned int gpu_freq; 6661 unsigned int max_ia_freq, min_ring_freq; 6662 unsigned int max_gpu_freq, min_gpu_freq; 6663 int scaling_factor = 180; 6664 struct cpufreq_policy *policy; 6665 6666 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 6667 6668 policy = cpufreq_cpu_get(0); 6669 if (policy) { 6670 max_ia_freq = policy->cpuinfo.max_freq; 6671 cpufreq_cpu_put(policy); 6672 } else { 6673 /* 6674 * Default to measured freq if none found, PCU will ensure we 6675 * don't go over 6676 */ 6677 max_ia_freq = tsc_khz; 6678 } 6679 6680 /* Convert from kHz to MHz */ 6681 max_ia_freq /= 1000; 6682 6683 min_ring_freq = I915_READ(DCLK) & 0xf; 6684 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 6685 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 6686 6687 if (IS_GEN9_BC(dev_priv)) { 6688 /* Convert GT frequency to 50 HZ units */ 6689 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; 6690 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; 6691 } else { 6692 min_gpu_freq = dev_priv->rps.min_freq; 6693 max_gpu_freq = dev_priv->rps.max_freq; 6694 } 6695 6696 /* 6697 * For each potential GPU frequency, load a ring frequency we'd like 6698 * to use for memory access. We do this by specifying the IA frequency 6699 * the PCU should use as a reference to determine the ring frequency. 6700 */ 6701 for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) { 6702 int diff = max_gpu_freq - gpu_freq; 6703 unsigned int ia_freq = 0, ring_freq = 0; 6704 6705 if (IS_GEN9_BC(dev_priv)) { 6706 /* 6707 * ring_freq = 2 * GT. ring_freq is in 100MHz units 6708 * No floor required for ring frequency on SKL. 6709 */ 6710 ring_freq = gpu_freq; 6711 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 6712 /* max(2 * GT, DDR). NB: GT is 50MHz units */ 6713 ring_freq = max(min_ring_freq, gpu_freq); 6714 } else if (IS_HASWELL(dev_priv)) { 6715 ring_freq = mult_frac(gpu_freq, 5, 4); 6716 ring_freq = max(min_ring_freq, ring_freq); 6717 /* leave ia_freq as the default, chosen by cpufreq */ 6718 } else { 6719 /* On older processors, there is no separate ring 6720 * clock domain, so in order to boost the bandwidth 6721 * of the ring, we need to upclock the CPU (ia_freq). 6722 * 6723 * For GPU frequencies less than 750MHz, 6724 * just use the lowest ring freq. 6725 */ 6726 if (gpu_freq < min_freq) 6727 ia_freq = 800; 6728 else 6729 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); 6730 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); 6731 } 6732 6733 sandybridge_pcode_write(dev_priv, 6734 GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 6735 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | 6736 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | 6737 gpu_freq); 6738 } 6739} 6740 6741static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) 6742{ 6743 u32 val, rp0; 6744 6745 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 6746 6747 switch (INTEL_INFO(dev_priv)->sseu.eu_total) { 6748 case 8: 6749 /* (2 * 4) config */ 6750 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 6751 break; 6752 case 12: 6753 /* (2 * 6) config */ 6754 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT); 6755 break; 6756 case 16: 6757 /* (2 * 8) config */ 6758 default: 6759 /* Setting (2 * 8) Min RP0 for any other combination */ 6760 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT); 6761 break; 6762 } 6763 6764 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK); 6765 6766 return rp0; 6767} 6768 6769static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv) 6770{ 6771 u32 val, rpe; 6772 6773 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG); 6774 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; 6775 6776 return rpe; 6777} 6778 6779static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) 6780{ 6781 u32 val, rp1; 6782 6783 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 6784 rp1 = (val & FB_GFX_FREQ_FUSE_MASK); 6785 6786 return rp1; 6787} 6788 6789static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv) 6790{ 6791 u32 val, rpn; 6792 6793 val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE); 6794 rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) & 6795 FB_GFX_FREQ_FUSE_MASK); 6796 6797 return rpn; 6798} 6799 6800static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv) 6801{ 6802 u32 val, rp1; 6803 6804 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); 6805 6806 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; 6807 6808 return rp1; 6809} 6810 6811static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) 6812{ 6813 u32 val, rp0; 6814 6815 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); 6816 6817 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 6818 /* Clamp to max */ 6819 rp0 = min_t(u32, rp0, 0xea); 6820 6821 return rp0; 6822} 6823 6824static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) 6825{ 6826 u32 val, rpe; 6827 6828 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 6829 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 6830 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 6831 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 6832 6833 return rpe; 6834} 6835 6836static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) 6837{ 6838 u32 val; 6839 6840 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 6841 /* 6842 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value 6843 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on 6844 * a BYT-M B0 the above register contains 0xbf. Moreover when setting 6845 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 6846 * to make sure it matches what Punit accepts. 6847 */ 6848 return max_t(u32, val, 0xc0); 6849} 6850 6851/* Check that the pctx buffer wasn't move under us. */ 6852static void valleyview_check_pctx(struct drm_i915_private *dev_priv) 6853{ 6854 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; 6855 6856 WARN_ON(pctx_addr != dev_priv->mm.stolen_base + 6857 dev_priv->vlv_pctx->stolen->start); 6858} 6859 6860 6861/* Check that the pcbr address is not empty. */ 6862static void cherryview_check_pctx(struct drm_i915_private *dev_priv) 6863{ 6864 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; 6865 6866 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); 6867} 6868 6869static void cherryview_setup_pctx(struct drm_i915_private *dev_priv) 6870{ 6871 struct i915_ggtt *ggtt = &dev_priv->ggtt; 6872 unsigned long pctx_paddr, paddr; 6873 u32 pcbr; 6874 int pctx_size = 32*1024; 6875 6876 pcbr = I915_READ(VLV_PCBR); 6877 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { 6878 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); 6879 paddr = (dev_priv->mm.stolen_base + 6880 (ggtt->stolen_size - pctx_size)); 6881 6882 pctx_paddr = (paddr & (~4095)); 6883 I915_WRITE(VLV_PCBR, pctx_paddr); 6884 } 6885 6886 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 6887} 6888 6889static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) 6890{ 6891 struct drm_i915_gem_object *pctx; 6892 unsigned long pctx_paddr; 6893 u32 pcbr; 6894 int pctx_size = 24*1024; 6895 6896 pcbr = I915_READ(VLV_PCBR); 6897 if (pcbr) { 6898 /* BIOS set it up already, grab the pre-alloc'd space */ 6899 int pcbr_offset; 6900 6901 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 6902 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv, 6903 pcbr_offset, 6904 I915_GTT_OFFSET_NONE, 6905 pctx_size); 6906 goto out; 6907 } 6908 6909 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); 6910 6911 /* 6912 * From the Gunit register HAS: 6913 * The Gfx driver is expected to program this register and ensure 6914 * proper allocation within Gfx stolen memory. For example, this 6915 * register should be programmed such than the PCBR range does not 6916 * overlap with other ranges, such as the frame buffer, protected 6917 * memory, or any other relevant ranges. 6918 */ 6919 pctx = i915_gem_object_create_stolen(dev_priv, pctx_size); 6920 if (!pctx) { 6921 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 6922 goto out; 6923 } 6924 6925 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start; 6926 I915_WRITE(VLV_PCBR, pctx_paddr); 6927 6928out: 6929 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 6930 dev_priv->vlv_pctx = pctx; 6931} 6932 6933static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) 6934{ 6935 if (WARN_ON(!dev_priv->vlv_pctx)) 6936 return; 6937 6938 i915_gem_object_put(dev_priv->vlv_pctx); 6939 dev_priv->vlv_pctx = NULL; 6940} 6941 6942static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) 6943{ 6944 dev_priv->rps.gpll_ref_freq = 6945 vlv_get_cck_clock(dev_priv, "GPLL ref", 6946 CCK_GPLL_CLOCK_CONTROL, 6947 dev_priv->czclk_freq); 6948 6949 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", 6950 dev_priv->rps.gpll_ref_freq); 6951} 6952 6953static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv) 6954{ 6955 u32 val; 6956 6957 valleyview_setup_pctx(dev_priv); 6958 6959 vlv_init_gpll_ref_freq(dev_priv); 6960 6961 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 6962 switch ((val >> 6) & 3) { 6963 case 0: 6964 case 1: 6965 dev_priv->mem_freq = 800; 6966 break; 6967 case 2: 6968 dev_priv->mem_freq = 1066; 6969 break; 6970 case 3: 6971 dev_priv->mem_freq = 1333; 6972 break; 6973 } 6974 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); 6975 6976 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); 6977 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 6978 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 6979 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), 6980 dev_priv->rps.max_freq); 6981 6982 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv); 6983 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 6984 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 6985 dev_priv->rps.efficient_freq); 6986 6987 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv); 6988 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 6989 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 6990 dev_priv->rps.rp1_freq); 6991 6992 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); 6993 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 6994 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 6995 dev_priv->rps.min_freq); 6996} 6997 6998static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) 6999{ 7000 u32 val; 7001 7002 cherryview_setup_pctx(dev_priv); 7003 7004 vlv_init_gpll_ref_freq(dev_priv); 7005 7006 mutex_lock(&dev_priv->sb_lock); 7007 val = vlv_cck_read(dev_priv, CCK_FUSE_REG); 7008 mutex_unlock(&dev_priv->sb_lock); 7009 7010 switch ((val >> 2) & 0x7) { 7011 case 3: 7012 dev_priv->mem_freq = 2000; 7013 break; 7014 default: 7015 dev_priv->mem_freq = 1600; 7016 break; 7017 } 7018 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); 7019 7020 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); 7021 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 7022 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 7023 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), 7024 dev_priv->rps.max_freq); 7025 7026 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv); 7027 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 7028 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 7029 dev_priv->rps.efficient_freq); 7030 7031 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv); 7032 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", 7033 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 7034 dev_priv->rps.rp1_freq); 7035 7036 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv); 7037 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 7038 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 7039 dev_priv->rps.min_freq); 7040 7041 WARN_ONCE((dev_priv->rps.max_freq | 7042 dev_priv->rps.efficient_freq | 7043 dev_priv->rps.rp1_freq | 7044 dev_priv->rps.min_freq) & 1, 7045 "Odd GPU freq values\n"); 7046} 7047 7048static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv) 7049{ 7050 valleyview_cleanup_pctx(dev_priv); 7051} 7052 7053static void cherryview_enable_rps(struct drm_i915_private *dev_priv) 7054{ 7055 struct intel_engine_cs *engine; 7056 enum intel_engine_id id; 7057 u32 gtfifodbg, val, rc6_mode = 0, pcbr; 7058 7059 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7060 7061 gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV | 7062 GT_FIFO_FREE_ENTRIES_CHV); 7063 if (gtfifodbg) { 7064 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 7065 gtfifodbg); 7066 I915_WRITE(GTFIFODBG, gtfifodbg); 7067 } 7068 7069 cherryview_check_pctx(dev_priv); 7070 7071 /* 1a & 1b: Get forcewake during program sequence. Although the driver 7072 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 7073 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 7074 7075 /* Disable RC states. */ 7076 I915_WRITE(GEN6_RC_CONTROL, 0); 7077 7078 /* 2a: Program RC6 thresholds.*/ 7079 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 7080 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 7081 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 7082 7083 for_each_engine(engine, dev_priv, id) 7084 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 7085 I915_WRITE(GEN6_RC_SLEEP, 0); 7086 7087 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */ 7088 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186); 7089 7090 /* allows RC6 residency counter to work */ 7091 I915_WRITE(VLV_COUNTER_CONTROL, 7092 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | 7093 VLV_MEDIA_RC6_COUNT_EN | 7094 VLV_RENDER_RC6_COUNT_EN)); 7095 7096 /* For now we assume BIOS is allocating and populating the PCBR */ 7097 pcbr = I915_READ(VLV_PCBR); 7098 7099 /* 3: Enable RC6 */ 7100 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) && 7101 (pcbr >> VLV_PCBR_ADDR_SHIFT)) 7102 rc6_mode = GEN7_RC_CTL_TO_MODE; 7103 7104 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 7105 7106 /* 4 Program defaults and thresholds for RPS*/ 7107 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 7108 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 7109 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 7110 I915_WRITE(GEN6_RP_UP_EI, 66000); 7111 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 7112 7113 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 7114 7115 /* 5: Enable RPS */ 7116 I915_WRITE(GEN6_RP_CONTROL, 7117 GEN6_RP_MEDIA_HW_NORMAL_MODE | 7118 GEN6_RP_MEDIA_IS_GFX | 7119 GEN6_RP_ENABLE | 7120 GEN6_RP_UP_BUSY_AVG | 7121 GEN6_RP_DOWN_IDLE_AVG); 7122 7123 /* Setting Fixed Bias */ 7124 val = VLV_OVERRIDE_EN | 7125 VLV_SOC_TDP_EN | 7126 CHV_BIAS_CPU_50_SOC_50; 7127 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); 7128 7129 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 7130 7131 /* RPS code assumes GPLL is used */ 7132 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 7133 7134 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 7135 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 7136 7137 reset_rps(dev_priv, valleyview_set_rps); 7138 7139 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 7140} 7141 7142static void valleyview_enable_rps(struct drm_i915_private *dev_priv) 7143{ 7144 struct intel_engine_cs *engine; 7145 enum intel_engine_id id; 7146 u32 gtfifodbg, val, rc6_mode = 0; 7147 7148 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7149 7150 valleyview_check_pctx(dev_priv); 7151 7152 gtfifodbg = I915_READ(GTFIFODBG); 7153 if (gtfifodbg) { 7154 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 7155 gtfifodbg); 7156 I915_WRITE(GTFIFODBG, gtfifodbg); 7157 } 7158 7159 /* If VLV, Forcewake all wells, else re-direct to regular path */ 7160 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 7161 7162 /* Disable RC states. */ 7163 I915_WRITE(GEN6_RC_CONTROL, 0); 7164 7165 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 7166 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 7167 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 7168 I915_WRITE(GEN6_RP_UP_EI, 66000); 7169 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 7170 7171 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 7172 7173 I915_WRITE(GEN6_RP_CONTROL, 7174 GEN6_RP_MEDIA_TURBO | 7175 GEN6_RP_MEDIA_HW_NORMAL_MODE | 7176 GEN6_RP_MEDIA_IS_GFX | 7177 GEN6_RP_ENABLE | 7178 GEN6_RP_UP_BUSY_AVG | 7179 GEN6_RP_DOWN_IDLE_CONT); 7180 7181 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000); 7182 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 7183 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 7184 7185 for_each_engine(engine, dev_priv, id) 7186 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 7187 7188 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); 7189 7190 /* allows RC6 residency counter to work */ 7191 I915_WRITE(VLV_COUNTER_CONTROL, 7192 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | 7193 VLV_MEDIA_RC0_COUNT_EN | 7194 VLV_RENDER_RC0_COUNT_EN | 7195 VLV_MEDIA_RC6_COUNT_EN | 7196 VLV_RENDER_RC6_COUNT_EN)); 7197 7198 if (intel_enable_rc6() & INTEL_RC6_ENABLE) 7199 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; 7200 7201 intel_print_rc6_info(dev_priv, rc6_mode); 7202 7203 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 7204 7205 /* Setting Fixed Bias */ 7206 val = VLV_OVERRIDE_EN | 7207 VLV_SOC_TDP_EN | 7208 VLV_BIAS_CPU_125_SOC_875; 7209 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); 7210 7211 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 7212 7213 /* RPS code assumes GPLL is used */ 7214 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 7215 7216 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); 7217 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 7218 7219 reset_rps(dev_priv, valleyview_set_rps); 7220 7221 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 7222} 7223 7224static unsigned long intel_pxfreq(u32 vidfreq) 7225{ 7226 unsigned long freq; 7227 int div = (vidfreq & 0x3f0000) >> 16; 7228 int post = (vidfreq & 0x3000) >> 12; 7229 int pre = (vidfreq & 0x7); 7230 7231 if (!pre) 7232 return 0; 7233 7234 freq = ((div * 133333) / ((1<<post) * pre)); 7235 7236 return freq; 7237} 7238 7239static const struct cparams { 7240 u16 i; 7241 u16 t; 7242 u16 m; 7243 u16 c; 7244} cparams[] = { 7245 { 1, 1333, 301, 28664 }, 7246 { 1, 1066, 294, 24460 }, 7247 { 1, 800, 294, 25192 }, 7248 { 0, 1333, 276, 27605 }, 7249 { 0, 1066, 276, 27605 }, 7250 { 0, 800, 231, 23784 }, 7251}; 7252 7253static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) 7254{ 7255 u64 total_count, diff, ret; 7256 u32 count1, count2, count3, m = 0, c = 0; 7257 unsigned long now = jiffies_to_msecs(jiffies), diff1; 7258 int i; 7259 7260 lockdep_assert_held(&mchdev_lock); 7261 7262 diff1 = now - dev_priv->ips.last_time1; 7263 7264 /* Prevent division-by-zero if we are asking too fast. 7265 * Also, we don't get interesting results if we are polling 7266 * faster than once in 10ms, so just return the saved value 7267 * in such cases. 7268 */ 7269 if (diff1 <= 10) 7270 return dev_priv->ips.chipset_power; 7271 7272 count1 = I915_READ(DMIEC); 7273 count2 = I915_READ(DDREC); 7274 count3 = I915_READ(CSIEC); 7275 7276 total_count = count1 + count2 + count3; 7277 7278 /* FIXME: handle per-counter overflow */ 7279 if (total_count < dev_priv->ips.last_count1) { 7280 diff = ~0UL - dev_priv->ips.last_count1; 7281 diff += total_count; 7282 } else { 7283 diff = total_count - dev_priv->ips.last_count1; 7284 } 7285 7286 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 7287 if (cparams[i].i == dev_priv->ips.c_m && 7288 cparams[i].t == dev_priv->ips.r_t) { 7289 m = cparams[i].m; 7290 c = cparams[i].c; 7291 break; 7292 } 7293 } 7294 7295 diff = div_u64(diff, diff1); 7296 ret = ((m * diff) + c); 7297 ret = div_u64(ret, 10); 7298 7299 dev_priv->ips.last_count1 = total_count; 7300 dev_priv->ips.last_time1 = now; 7301 7302 dev_priv->ips.chipset_power = ret; 7303 7304 return ret; 7305} 7306 7307unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 7308{ 7309 unsigned long val; 7310 7311 if (INTEL_INFO(dev_priv)->gen != 5) 7312 return 0; 7313 7314 spin_lock_irq(&mchdev_lock); 7315 7316 val = __i915_chipset_val(dev_priv); 7317 7318 spin_unlock_irq(&mchdev_lock); 7319 7320 return val; 7321} 7322 7323unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 7324{ 7325 unsigned long m, x, b; 7326 u32 tsfs; 7327 7328 tsfs = I915_READ(TSFS); 7329 7330 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 7331 x = I915_READ8(TR1); 7332 7333 b = tsfs & TSFS_INTR_MASK; 7334 7335 return ((m * x) / 127) - b; 7336} 7337 7338static int _pxvid_to_vd(u8 pxvid) 7339{ 7340 if (pxvid == 0) 7341 return 0; 7342 7343 if (pxvid >= 8 && pxvid < 31) 7344 pxvid = 31; 7345 7346 return (pxvid + 2) * 125; 7347} 7348 7349static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 7350{ 7351 const int vd = _pxvid_to_vd(pxvid); 7352 const int vm = vd - 1125; 7353 7354 if (INTEL_INFO(dev_priv)->is_mobile) 7355 return vm > 0 ? vm : 0; 7356 7357 return vd; 7358} 7359 7360static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) 7361{ 7362 u64 now, diff, diffms; 7363 u32 count; 7364 7365 lockdep_assert_held(&mchdev_lock); 7366 7367 now = ktime_get_raw_ns(); 7368 diffms = now - dev_priv->ips.last_time2; 7369 do_div(diffms, NSEC_PER_MSEC); 7370 7371 /* Don't divide by 0 */ 7372 if (!diffms) 7373 return; 7374 7375 count = I915_READ(GFXEC); 7376 7377 if (count < dev_priv->ips.last_count2) { 7378 diff = ~0UL - dev_priv->ips.last_count2; 7379 diff += count; 7380 } else { 7381 diff = count - dev_priv->ips.last_count2; 7382 } 7383 7384 dev_priv->ips.last_count2 = count; 7385 dev_priv->ips.last_time2 = now; 7386 7387 /* More magic constants... */ 7388 diff = diff * 1181; 7389 diff = div_u64(diff, diffms * 10); 7390 dev_priv->ips.gfx_power = diff; 7391} 7392 7393void i915_update_gfx_val(struct drm_i915_private *dev_priv) 7394{ 7395 if (INTEL_INFO(dev_priv)->gen != 5) 7396 return; 7397 7398 spin_lock_irq(&mchdev_lock); 7399 7400 __i915_update_gfx_val(dev_priv); 7401 7402 spin_unlock_irq(&mchdev_lock); 7403} 7404 7405static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) 7406{ 7407 unsigned long t, corr, state1, corr2, state2; 7408 u32 pxvid, ext_v; 7409 7410 lockdep_assert_held(&mchdev_lock); 7411 7412 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq)); 7413 pxvid = (pxvid >> 24) & 0x7f; 7414 ext_v = pvid_to_extvid(dev_priv, pxvid); 7415 7416 state1 = ext_v; 7417 7418 t = i915_mch_val(dev_priv); 7419 7420 /* Revel in the empirically derived constants */ 7421 7422 /* Correction factor in 1/100000 units */ 7423 if (t > 80) 7424 corr = ((t * 2349) + 135940); 7425 else if (t >= 50) 7426 corr = ((t * 964) + 29317); 7427 else /* < 50 */ 7428 corr = ((t * 301) + 1004); 7429 7430 corr = corr * ((150142 * state1) / 10000 - 78642); 7431 corr /= 100000; 7432 corr2 = (corr * dev_priv->ips.corr); 7433 7434 state2 = (corr2 * state1) / 10000; 7435 state2 /= 100; /* convert to mW */ 7436 7437 __i915_update_gfx_val(dev_priv); 7438 7439 return dev_priv->ips.gfx_power + state2; 7440} 7441 7442unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 7443{ 7444 unsigned long val; 7445 7446 if (INTEL_INFO(dev_priv)->gen != 5) 7447 return 0; 7448 7449 spin_lock_irq(&mchdev_lock); 7450 7451 val = __i915_gfx_val(dev_priv); 7452 7453 spin_unlock_irq(&mchdev_lock); 7454 7455 return val; 7456} 7457 7458/** 7459 * i915_read_mch_val - return value for IPS use 7460 * 7461 * Calculate and return a value for the IPS driver to use when deciding whether 7462 * we have thermal and power headroom to increase CPU or GPU power budget. 7463 */ 7464unsigned long i915_read_mch_val(void) 7465{ 7466 struct drm_i915_private *dev_priv; 7467 unsigned long chipset_val, graphics_val, ret = 0; 7468 7469 spin_lock_irq(&mchdev_lock); 7470 if (!i915_mch_dev) 7471 goto out_unlock; 7472 dev_priv = i915_mch_dev; 7473 7474 chipset_val = __i915_chipset_val(dev_priv); 7475 graphics_val = __i915_gfx_val(dev_priv); 7476 7477 ret = chipset_val + graphics_val; 7478 7479out_unlock: 7480 spin_unlock_irq(&mchdev_lock); 7481 7482 return ret; 7483} 7484EXPORT_SYMBOL_GPL(i915_read_mch_val); 7485 7486/** 7487 * i915_gpu_raise - raise GPU frequency limit 7488 * 7489 * Raise the limit; IPS indicates we have thermal headroom. 7490 */ 7491bool i915_gpu_raise(void) 7492{ 7493 struct drm_i915_private *dev_priv; 7494 bool ret = true; 7495 7496 spin_lock_irq(&mchdev_lock); 7497 if (!i915_mch_dev) { 7498 ret = false; 7499 goto out_unlock; 7500 } 7501 dev_priv = i915_mch_dev; 7502 7503 if (dev_priv->ips.max_delay > dev_priv->ips.fmax) 7504 dev_priv->ips.max_delay--; 7505 7506out_unlock: 7507 spin_unlock_irq(&mchdev_lock); 7508 7509 return ret; 7510} 7511EXPORT_SYMBOL_GPL(i915_gpu_raise); 7512 7513/** 7514 * i915_gpu_lower - lower GPU frequency limit 7515 * 7516 * IPS indicates we're close to a thermal limit, so throttle back the GPU 7517 * frequency maximum. 7518 */ 7519bool i915_gpu_lower(void) 7520{ 7521 struct drm_i915_private *dev_priv; 7522 bool ret = true; 7523 7524 spin_lock_irq(&mchdev_lock); 7525 if (!i915_mch_dev) { 7526 ret = false; 7527 goto out_unlock; 7528 } 7529 dev_priv = i915_mch_dev; 7530 7531 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) 7532 dev_priv->ips.max_delay++; 7533 7534out_unlock: 7535 spin_unlock_irq(&mchdev_lock); 7536 7537 return ret; 7538} 7539EXPORT_SYMBOL_GPL(i915_gpu_lower); 7540 7541/** 7542 * i915_gpu_busy - indicate GPU business to IPS 7543 * 7544 * Tell the IPS driver whether or not the GPU is busy. 7545 */ 7546bool i915_gpu_busy(void) 7547{ 7548 bool ret = false; 7549 7550 spin_lock_irq(&mchdev_lock); 7551 if (i915_mch_dev) 7552 ret = i915_mch_dev->gt.awake; 7553 spin_unlock_irq(&mchdev_lock); 7554 7555 return ret; 7556} 7557EXPORT_SYMBOL_GPL(i915_gpu_busy); 7558 7559/** 7560 * i915_gpu_turbo_disable - disable graphics turbo 7561 * 7562 * Disable graphics turbo by resetting the max frequency and setting the 7563 * current frequency to the default. 7564 */ 7565bool i915_gpu_turbo_disable(void) 7566{ 7567 struct drm_i915_private *dev_priv; 7568 bool ret = true; 7569 7570 spin_lock_irq(&mchdev_lock); 7571 if (!i915_mch_dev) { 7572 ret = false; 7573 goto out_unlock; 7574 } 7575 dev_priv = i915_mch_dev; 7576 7577 dev_priv->ips.max_delay = dev_priv->ips.fstart; 7578 7579 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart)) 7580 ret = false; 7581 7582out_unlock: 7583 spin_unlock_irq(&mchdev_lock); 7584 7585 return ret; 7586} 7587EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); 7588 7589/** 7590 * Tells the intel_ips driver that the i915 driver is now loaded, if 7591 * IPS got loaded first. 7592 * 7593 * This awkward dance is so that neither module has to depend on the 7594 * other in order for IPS to do the appropriate communication of 7595 * GPU turbo limits to i915. 7596 */ 7597static void 7598ips_ping_for_i915_load(void) 7599{ 7600 void (*link)(void); 7601 7602 link = symbol_get(ips_link_to_i915_driver); 7603 if (link) { 7604 link(); 7605 symbol_put(ips_link_to_i915_driver); 7606 } 7607} 7608 7609void intel_gpu_ips_init(struct drm_i915_private *dev_priv) 7610{ 7611 /* We only register the i915 ips part with intel-ips once everything is 7612 * set up, to avoid intel-ips sneaking in and reading bogus values. */ 7613 spin_lock_irq(&mchdev_lock); 7614 i915_mch_dev = dev_priv; 7615 spin_unlock_irq(&mchdev_lock); 7616 7617 ips_ping_for_i915_load(); 7618} 7619 7620void intel_gpu_ips_teardown(void) 7621{ 7622 spin_lock_irq(&mchdev_lock); 7623 i915_mch_dev = NULL; 7624 spin_unlock_irq(&mchdev_lock); 7625} 7626 7627static void intel_init_emon(struct drm_i915_private *dev_priv) 7628{ 7629 u32 lcfuse; 7630 u8 pxw[16]; 7631 int i; 7632 7633 /* Disable to program */ 7634 I915_WRITE(ECR, 0); 7635 POSTING_READ(ECR); 7636 7637 /* Program energy weights for various events */ 7638 I915_WRITE(SDEW, 0x15040d00); 7639 I915_WRITE(CSIEW0, 0x007f0000); 7640 I915_WRITE(CSIEW1, 0x1e220004); 7641 I915_WRITE(CSIEW2, 0x04000004); 7642 7643 for (i = 0; i < 5; i++) 7644 I915_WRITE(PEW(i), 0); 7645 for (i = 0; i < 3; i++) 7646 I915_WRITE(DEW(i), 0); 7647 7648 /* Program P-state weights to account for frequency power adjustment */ 7649 for (i = 0; i < 16; i++) { 7650 u32 pxvidfreq = I915_READ(PXVFREQ(i)); 7651 unsigned long freq = intel_pxfreq(pxvidfreq); 7652 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> 7653 PXVFREQ_PX_SHIFT; 7654 unsigned long val; 7655 7656 val = vid * vid; 7657 val *= (freq / 1000); 7658 val *= 255; 7659 val /= (127*127*900); 7660 if (val > 0xff) 7661 DRM_ERROR("bad pxval: %ld\n", val); 7662 pxw[i] = val; 7663 } 7664 /* Render standby states get 0 weight */ 7665 pxw[14] = 0; 7666 pxw[15] = 0; 7667 7668 for (i = 0; i < 4; i++) { 7669 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | 7670 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); 7671 I915_WRITE(PXW(i), val); 7672 } 7673 7674 /* Adjust magic regs to magic values (more experimental results) */ 7675 I915_WRITE(OGW0, 0); 7676 I915_WRITE(OGW1, 0); 7677 I915_WRITE(EG0, 0x00007f00); 7678 I915_WRITE(EG1, 0x0000000e); 7679 I915_WRITE(EG2, 0x000e0000); 7680 I915_WRITE(EG3, 0x68000300); 7681 I915_WRITE(EG4, 0x42000000); 7682 I915_WRITE(EG5, 0x00140031); 7683 I915_WRITE(EG6, 0); 7684 I915_WRITE(EG7, 0); 7685 7686 for (i = 0; i < 8; i++) 7687 I915_WRITE(PXWL(i), 0); 7688 7689 /* Enable PMON + select events */ 7690 I915_WRITE(ECR, 0x80000019); 7691 7692 lcfuse = I915_READ(LCFUSE02); 7693 7694 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); 7695} 7696 7697void intel_init_gt_powersave(struct drm_i915_private *dev_priv) 7698{ 7699 /* 7700 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 7701 * requirement. 7702 */ 7703 if (!i915.enable_rc6) { 7704 DRM_INFO("RC6 disabled, disabling runtime PM support\n"); 7705 intel_runtime_pm_get(dev_priv); 7706 } 7707 7708 mutex_lock(&dev_priv->drm.struct_mutex); 7709 mutex_lock(&dev_priv->rps.hw_lock); 7710 7711 /* Initialize RPS limits (for userspace) */ 7712 if (IS_CHERRYVIEW(dev_priv)) 7713 cherryview_init_gt_powersave(dev_priv); 7714 else if (IS_VALLEYVIEW(dev_priv)) 7715 valleyview_init_gt_powersave(dev_priv); 7716 else if (INTEL_GEN(dev_priv) >= 6) 7717 gen6_init_rps_frequencies(dev_priv); 7718 7719 /* Derive initial user preferences/limits from the hardware limits */ 7720 dev_priv->rps.idle_freq = dev_priv->rps.min_freq; 7721 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq; 7722 7723 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 7724 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 7725 7726 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 7727 dev_priv->rps.min_freq_softlimit = 7728 max_t(int, 7729 dev_priv->rps.efficient_freq, 7730 intel_freq_opcode(dev_priv, 450)); 7731 7732 /* After setting max-softlimit, find the overclock max freq */ 7733 if (IS_GEN6(dev_priv) || 7734 IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { 7735 u32 params = 0; 7736 7737 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params); 7738 if (params & BIT(31)) { /* OC supported */ 7739 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n", 7740 (dev_priv->rps.max_freq & 0xff) * 50, 7741 (params & 0xff) * 50); 7742 dev_priv->rps.max_freq = params & 0xff; 7743 } 7744 } 7745 7746 /* Finally allow us to boost to max by default */ 7747 dev_priv->rps.boost_freq = dev_priv->rps.max_freq; 7748 7749 mutex_unlock(&dev_priv->rps.hw_lock); 7750 mutex_unlock(&dev_priv->drm.struct_mutex); 7751 7752 intel_autoenable_gt_powersave(dev_priv); 7753} 7754 7755void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) 7756{ 7757 if (IS_VALLEYVIEW(dev_priv)) 7758 valleyview_cleanup_gt_powersave(dev_priv); 7759 7760 if (!i915.enable_rc6) 7761 intel_runtime_pm_put(dev_priv); 7762} 7763 7764/** 7765 * intel_suspend_gt_powersave - suspend PM work and helper threads 7766 * @dev_priv: i915 device 7767 * 7768 * We don't want to disable RC6 or other features here, we just want 7769 * to make sure any work we've queued has finished and won't bother 7770 * us while we're suspended. 7771 */ 7772void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv) 7773{ 7774 if (INTEL_GEN(dev_priv) < 6) 7775 return; 7776 7777 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work)) 7778 intel_runtime_pm_put(dev_priv); 7779 7780 /* gen6_rps_idle() will be called later to disable interrupts */ 7781} 7782 7783void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) 7784{ 7785 dev_priv->rps.enabled = true; /* force disabling */ 7786 intel_disable_gt_powersave(dev_priv); 7787 7788 gen6_reset_rps_interrupts(dev_priv); 7789} 7790 7791void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) 7792{ 7793 if (!READ_ONCE(dev_priv->rps.enabled)) 7794 return; 7795 7796 mutex_lock(&dev_priv->rps.hw_lock); 7797 7798 if (INTEL_GEN(dev_priv) >= 9) { 7799 gen9_disable_rc6(dev_priv); 7800 gen9_disable_rps(dev_priv); 7801 } else if (IS_CHERRYVIEW(dev_priv)) { 7802 cherryview_disable_rps(dev_priv); 7803 } else if (IS_VALLEYVIEW(dev_priv)) { 7804 valleyview_disable_rps(dev_priv); 7805 } else if (INTEL_GEN(dev_priv) >= 6) { 7806 gen6_disable_rps(dev_priv); 7807 } else if (IS_IRONLAKE_M(dev_priv)) { 7808 ironlake_disable_drps(dev_priv); 7809 } 7810 7811 dev_priv->rps.enabled = false; 7812 mutex_unlock(&dev_priv->rps.hw_lock); 7813} 7814 7815void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) 7816{ 7817 /* We shouldn't be disabling as we submit, so this should be less 7818 * racy than it appears! 7819 */ 7820 if (READ_ONCE(dev_priv->rps.enabled)) 7821 return; 7822 7823 /* Powersaving is controlled by the host when inside a VM */ 7824 if (intel_vgpu_active(dev_priv)) 7825 return; 7826 7827 mutex_lock(&dev_priv->rps.hw_lock); 7828 7829 if (IS_CHERRYVIEW(dev_priv)) { 7830 cherryview_enable_rps(dev_priv); 7831 } else if (IS_VALLEYVIEW(dev_priv)) { 7832 valleyview_enable_rps(dev_priv); 7833 } else if (INTEL_GEN(dev_priv) >= 9) { 7834 gen9_enable_rc6(dev_priv); 7835 gen9_enable_rps(dev_priv); 7836 if (IS_GEN9_BC(dev_priv)) 7837 gen6_update_ring_freq(dev_priv); 7838 } else if (IS_BROADWELL(dev_priv)) { 7839 gen8_enable_rps(dev_priv); 7840 gen6_update_ring_freq(dev_priv); 7841 } else if (INTEL_GEN(dev_priv) >= 6) { 7842 gen6_enable_rps(dev_priv); 7843 gen6_update_ring_freq(dev_priv); 7844 } else if (IS_IRONLAKE_M(dev_priv)) { 7845 ironlake_enable_drps(dev_priv); 7846 intel_init_emon(dev_priv); 7847 } 7848 7849 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); 7850 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq); 7851 7852 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq); 7853 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); 7854 7855 dev_priv->rps.enabled = true; 7856 mutex_unlock(&dev_priv->rps.hw_lock); 7857} 7858 7859static void __intel_autoenable_gt_powersave(struct work_struct *work) 7860{ 7861 struct drm_i915_private *dev_priv = 7862 container_of(work, typeof(*dev_priv), rps.autoenable_work.work); 7863 struct intel_engine_cs *rcs; 7864 struct drm_i915_gem_request *req; 7865 7866 if (READ_ONCE(dev_priv->rps.enabled)) 7867 goto out; 7868 7869 rcs = dev_priv->engine[RCS]; 7870 if (rcs->last_retired_context) 7871 goto out; 7872 7873 if (!rcs->init_context) 7874 goto out; 7875 7876 mutex_lock(&dev_priv->drm.struct_mutex); 7877 7878 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context); 7879 if (IS_ERR(req)) 7880 goto unlock; 7881 7882 if (!i915.enable_execlists && i915_switch_context(req) == 0) 7883 rcs->init_context(req); 7884 7885 /* Mark the device busy, calling intel_enable_gt_powersave() */ 7886 i915_add_request(req); 7887 7888unlock: 7889 mutex_unlock(&dev_priv->drm.struct_mutex); 7890out: 7891 intel_runtime_pm_put(dev_priv); 7892} 7893 7894void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv) 7895{ 7896 if (READ_ONCE(dev_priv->rps.enabled)) 7897 return; 7898 7899 if (IS_IRONLAKE_M(dev_priv)) { 7900 ironlake_enable_drps(dev_priv); 7901 intel_init_emon(dev_priv); 7902 } else if (INTEL_INFO(dev_priv)->gen >= 6) { 7903 /* 7904 * PCU communication is slow and this doesn't need to be 7905 * done at any specific time, so do this out of our fast path 7906 * to make resume and init faster. 7907 * 7908 * We depend on the HW RC6 power context save/restore 7909 * mechanism when entering D3 through runtime PM suspend. So 7910 * disable RPM until RPS/RC6 is properly setup. We can only 7911 * get here via the driver load/system resume/runtime resume 7912 * paths, so the _noresume version is enough (and in case of 7913 * runtime resume it's necessary). 7914 */ 7915 if (queue_delayed_work(dev_priv->wq, 7916 &dev_priv->rps.autoenable_work, 7917 round_jiffies_up_relative(HZ))) 7918 intel_runtime_pm_get_noresume(dev_priv); 7919 } 7920} 7921 7922static void ibx_init_clock_gating(struct drm_i915_private *dev_priv) 7923{ 7924 /* 7925 * On Ibex Peak and Cougar Point, we need to disable clock 7926 * gating for the panel power sequencer or it will fail to 7927 * start up when no ports are active. 7928 */ 7929 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 7930} 7931 7932static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) 7933{ 7934 enum pipe pipe; 7935 7936 for_each_pipe(dev_priv, pipe) { 7937 I915_WRITE(DSPCNTR(pipe), 7938 I915_READ(DSPCNTR(pipe)) | 7939 DISPPLANE_TRICKLE_FEED_DISABLE); 7940 7941 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe))); 7942 POSTING_READ(DSPSURF(pipe)); 7943 } 7944} 7945 7946static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) 7947{ 7948 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); 7949 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); 7950 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); 7951 7952 /* 7953 * Don't touch WM1S_LP_EN here. 7954 * Doing so could cause underruns. 7955 */ 7956} 7957 7958static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv) 7959{ 7960 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 7961 7962 /* 7963 * Required for FBC 7964 * WaFbcDisableDpfcClockGating:ilk 7965 */ 7966 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | 7967 ILK_DPFCUNIT_CLOCK_GATE_DISABLE | 7968 ILK_DPFDUNIT_CLOCK_GATE_ENABLE; 7969 7970 I915_WRITE(PCH_3DCGDIS0, 7971 MARIUNIT_CLOCK_GATE_DISABLE | 7972 SVSMUNIT_CLOCK_GATE_DISABLE); 7973 I915_WRITE(PCH_3DCGDIS1, 7974 VFMUNIT_CLOCK_GATE_DISABLE); 7975 7976 /* 7977 * According to the spec the following bits should be set in 7978 * order to enable memory self-refresh 7979 * The bit 22/21 of 0x42004 7980 * The bit 5 of 0x42020 7981 * The bit 15 of 0x45000 7982 */ 7983 I915_WRITE(ILK_DISPLAY_CHICKEN2, 7984 (I915_READ(ILK_DISPLAY_CHICKEN2) | 7985 ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 7986 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; 7987 I915_WRITE(DISP_ARB_CTL, 7988 (I915_READ(DISP_ARB_CTL) | 7989 DISP_FBC_WM_DIS)); 7990 7991 ilk_init_lp_watermarks(dev_priv); 7992 7993 /* 7994 * Based on the document from hardware guys the following bits 7995 * should be set unconditionally in order to enable FBC. 7996 * The bit 22 of 0x42000 7997 * The bit 22 of 0x42004 7998 * The bit 7,8,9 of 0x42020. 7999 */ 8000 if (IS_IRONLAKE_M(dev_priv)) { 8001 /* WaFbcAsynchFlipDisableFbcQueue:ilk */ 8002 I915_WRITE(ILK_DISPLAY_CHICKEN1, 8003 I915_READ(ILK_DISPLAY_CHICKEN1) | 8004 ILK_FBCQ_DIS); 8005 I915_WRITE(ILK_DISPLAY_CHICKEN2, 8006 I915_READ(ILK_DISPLAY_CHICKEN2) | 8007 ILK_DPARB_GATE); 8008 } 8009 8010 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 8011 8012 I915_WRITE(ILK_DISPLAY_CHICKEN2, 8013 I915_READ(ILK_DISPLAY_CHICKEN2) | 8014 ILK_ELPIN_409_SELECT); 8015 I915_WRITE(_3D_CHICKEN2, 8016 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 8017 _3D_CHICKEN2_WM_READ_PIPELINED); 8018 8019 /* WaDisableRenderCachePipelinedFlush:ilk */ 8020 I915_WRITE(CACHE_MODE_0, 8021 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 8022 8023 /* WaDisable_RenderCache_OperationalFlush:ilk */ 8024 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 8025 8026 g4x_disable_trickle_feed(dev_priv); 8027 8028 ibx_init_clock_gating(dev_priv); 8029} 8030 8031static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) 8032{ 8033 int pipe; 8034 uint32_t val; 8035 8036 /* 8037 * On Ibex Peak and Cougar Point, we need to disable clock 8038 * gating for the panel power sequencer or it will fail to 8039 * start up when no ports are active. 8040 */ 8041 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | 8042 PCH_DPLUNIT_CLOCK_GATE_DISABLE | 8043 PCH_CPUNIT_CLOCK_GATE_DISABLE); 8044 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 8045 DPLS_EDP_PPS_FIX_DIS); 8046 /* The below fixes the weird display corruption, a few pixels shifted 8047 * downward, on (only) LVDS of some HP laptops with IVY. 8048 */ 8049 for_each_pipe(dev_priv, pipe) { 8050 val = I915_READ(TRANS_CHICKEN2(pipe)); 8051 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 8052 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 8053 if (dev_priv->vbt.fdi_rx_polarity_inverted) 8054 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 8055 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 8056 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; 8057 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; 8058 I915_WRITE(TRANS_CHICKEN2(pipe), val); 8059 } 8060 /* WADP0ClockGatingDisable */ 8061 for_each_pipe(dev_priv, pipe) { 8062 I915_WRITE(TRANS_CHICKEN1(pipe), 8063 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 8064 } 8065} 8066 8067static void gen6_check_mch_setup(struct drm_i915_private *dev_priv) 8068{ 8069 uint32_t tmp; 8070 8071 tmp = I915_READ(MCH_SSKPD); 8072 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) 8073 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", 8074 tmp); 8075} 8076 8077static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) 8078{ 8079 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 8080 8081 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 8082 8083 I915_WRITE(ILK_DISPLAY_CHICKEN2, 8084 I915_READ(ILK_DISPLAY_CHICKEN2) | 8085 ILK_ELPIN_409_SELECT); 8086 8087 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ 8088 I915_WRITE(_3D_CHICKEN, 8089 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); 8090 8091 /* WaDisable_RenderCache_OperationalFlush:snb */ 8092 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 8093 8094 /* 8095 * BSpec recoomends 8x4 when MSAA is used, 8096 * however in practice 16x4 seems fastest. 8097 * 8098 * Note that PS/WM thread counts depend on the WIZ hashing 8099 * disable bit, which we don't touch here, but it's good 8100 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 8101 */ 8102 I915_WRITE(GEN6_GT_MODE, 8103 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 8104 8105 ilk_init_lp_watermarks(dev_priv); 8106 8107 I915_WRITE(CACHE_MODE_0, 8108 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 8109 8110 I915_WRITE(GEN6_UCGCTL1, 8111 I915_READ(GEN6_UCGCTL1) | 8112 GEN6_BLBUNIT_CLOCK_GATE_DISABLE | 8113 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 8114 8115 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 8116 * gating disable must be set. Failure to set it results in 8117 * flickering pixels due to Z write ordering failures after 8118 * some amount of runtime in the Mesa "fire" demo, and Unigine 8119 * Sanctuary and Tropics, and apparently anything else with 8120 * alpha test or pixel discard. 8121 * 8122 * According to the spec, bit 11 (RCCUNIT) must also be set, 8123 * but we didn't debug actual testcases to find it out. 8124 * 8125 * WaDisableRCCUnitClockGating:snb 8126 * WaDisableRCPBUnitClockGating:snb 8127 */ 8128 I915_WRITE(GEN6_UCGCTL2, 8129 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 8130 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 8131 8132 /* WaStripsFansDisableFastClipPerformanceFix:snb */ 8133 I915_WRITE(_3D_CHICKEN3, 8134 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL)); 8135 8136 /* 8137 * Bspec says: 8138 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and 8139 * 3DSTATE_SF number of SF output attributes is more than 16." 8140 */ 8141 I915_WRITE(_3D_CHICKEN3, 8142 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH)); 8143 8144 /* 8145 * According to the spec the following bits should be 8146 * set in order to enable memory self-refresh and fbc: 8147 * The bit21 and bit22 of 0x42000 8148 * The bit21 and bit22 of 0x42004 8149 * The bit5 and bit7 of 0x42020 8150 * The bit14 of 0x70180 8151 * The bit14 of 0x71180 8152 * 8153 * WaFbcAsynchFlipDisableFbcQueue:snb 8154 */ 8155 I915_WRITE(ILK_DISPLAY_CHICKEN1, 8156 I915_READ(ILK_DISPLAY_CHICKEN1) | 8157 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 8158 I915_WRITE(ILK_DISPLAY_CHICKEN2, 8159 I915_READ(ILK_DISPLAY_CHICKEN2) | 8160 ILK_DPARB_GATE | ILK_VSDPFD_FULL); 8161 I915_WRITE(ILK_DSPCLK_GATE_D, 8162 I915_READ(ILK_DSPCLK_GATE_D) | 8163 ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 8164 ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 8165 8166 g4x_disable_trickle_feed(dev_priv); 8167 8168 cpt_init_clock_gating(dev_priv); 8169 8170 gen6_check_mch_setup(dev_priv); 8171} 8172 8173static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) 8174{ 8175 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); 8176 8177 /* 8178 * WaVSThreadDispatchOverride:ivb,vlv 8179 * 8180 * This actually overrides the dispatch 8181 * mode for all thread types. 8182 */ 8183 reg &= ~GEN7_FF_SCHED_MASK; 8184 reg |= GEN7_FF_TS_SCHED_HW; 8185 reg |= GEN7_FF_VS_SCHED_HW; 8186 reg |= GEN7_FF_DS_SCHED_HW; 8187 8188 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 8189} 8190 8191static void lpt_init_clock_gating(struct drm_i915_private *dev_priv) 8192{ 8193 /* 8194 * TODO: this bit should only be enabled when really needed, then 8195 * disabled when not needed anymore in order to save power. 8196 */ 8197 if (HAS_PCH_LPT_LP(dev_priv)) 8198 I915_WRITE(SOUTH_DSPCLK_GATE_D, 8199 I915_READ(SOUTH_DSPCLK_GATE_D) | 8200 PCH_LP_PARTITION_LEVEL_DISABLE); 8201 8202 /* WADPOClockGatingDisable:hsw */ 8203 I915_WRITE(TRANS_CHICKEN1(PIPE_A), 8204 I915_READ(TRANS_CHICKEN1(PIPE_A)) | 8205 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 8206} 8207 8208static void lpt_suspend_hw(struct drm_i915_private *dev_priv) 8209{ 8210 if (HAS_PCH_LPT_LP(dev_priv)) { 8211 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); 8212 8213 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 8214 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 8215 } 8216} 8217 8218static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, 8219 int general_prio_credits, 8220 int high_prio_credits) 8221{ 8222 u32 misccpctl; 8223 8224 /* WaTempDisableDOPClkGating:bdw */ 8225 misccpctl = I915_READ(GEN7_MISCCPCTL); 8226 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 8227 8228 I915_WRITE(GEN8_L3SQCREG1, 8229 L3_GENERAL_PRIO_CREDITS(general_prio_credits) | 8230 L3_HIGH_PRIO_CREDITS(high_prio_credits)); 8231 8232 /* 8233 * Wait at least 100 clocks before re-enabling clock gating. 8234 * See the definition of L3SQCREG1 in BSpec. 8235 */ 8236 POSTING_READ(GEN8_L3SQCREG1); 8237 udelay(1); 8238 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 8239} 8240 8241static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv) 8242{ 8243 gen9_init_clock_gating(dev_priv); 8244 8245 /* WaDisableSDEUnitClockGating:kbl */ 8246 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 8247 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 8248 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 8249 8250 /* WaDisableGamClockGating:kbl */ 8251 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 8252 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 8253 GEN6_GAMUNIT_CLOCK_GATE_DISABLE); 8254 8255 /* WaFbcNukeOnHostModify:kbl,cfl */ 8256 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 8257 ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 8258} 8259 8260static void skylake_init_clock_gating(struct drm_i915_private *dev_priv) 8261{ 8262 gen9_init_clock_gating(dev_priv); 8263 8264 /* WAC6entrylatency:skl */ 8265 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | 8266 FBC_LLC_FULLY_OPEN); 8267 8268 /* WaFbcNukeOnHostModify:skl */ 8269 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 8270 ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 8271} 8272 8273static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv) 8274{ 8275 enum pipe pipe; 8276 8277 ilk_init_lp_watermarks(dev_priv); 8278 8279 /* WaSwitchSolVfFArbitrationPriority:bdw */ 8280 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 8281 8282 /* WaPsrDPAMaskVBlankInSRD:bdw */ 8283 I915_WRITE(CHICKEN_PAR1_1, 8284 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); 8285 8286 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 8287 for_each_pipe(dev_priv, pipe) { 8288 I915_WRITE(CHICKEN_PIPESL_1(pipe), 8289 I915_READ(CHICKEN_PIPESL_1(pipe)) | 8290 BDW_DPRS_MASK_VBLANK_SRD); 8291 } 8292 8293 /* WaVSRefCountFullforceMissDisable:bdw */ 8294 /* WaDSRefCountFullforceMissDisable:bdw */ 8295 I915_WRITE(GEN7_FF_THREAD_MODE, 8296 I915_READ(GEN7_FF_THREAD_MODE) & 8297 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 8298 8299 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 8300 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 8301 8302 /* WaDisableSDEUnitClockGating:bdw */ 8303 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 8304 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 8305 8306 /* WaProgramL3SqcReg1Default:bdw */ 8307 gen8_set_l3sqc_credits(dev_priv, 30, 2); 8308 8309 /* 8310 * WaGttCachingOffByDefault:bdw 8311 * GTT cache may not work with big pages, so if those 8312 * are ever enabled GTT cache may need to be disabled. 8313 */ 8314 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); 8315 8316 /* WaKVMNotificationOnConfigChange:bdw */ 8317 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1) 8318 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT); 8319 8320 lpt_init_clock_gating(dev_priv); 8321 8322 /* WaDisableDopClockGating:bdw 8323 * 8324 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP 8325 * clock gating. 8326 */ 8327 I915_WRITE(GEN6_UCGCTL1, 8328 I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE); 8329} 8330 8331static void haswell_init_clock_gating(struct drm_i915_private *dev_priv) 8332{ 8333 ilk_init_lp_watermarks(dev_priv); 8334 8335 /* L3 caching of data atomics doesn't work -- disable it. */ 8336 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); 8337 I915_WRITE(HSW_ROW_CHICKEN3, 8338 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE)); 8339 8340 /* This is required by WaCatErrorRejectionIssue:hsw */ 8341 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 8342 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 8343 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 8344 8345 /* WaVSRefCountFullforceMissDisable:hsw */ 8346 I915_WRITE(GEN7_FF_THREAD_MODE, 8347 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME); 8348 8349 /* WaDisable_RenderCache_OperationalFlush:hsw */ 8350 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 8351 8352 /* enable HiZ Raw Stall Optimization */ 8353 I915_WRITE(CACHE_MODE_0_GEN7, 8354 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 8355 8356 /* WaDisable4x2SubspanOptimization:hsw */ 8357 I915_WRITE(CACHE_MODE_1, 8358 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 8359 8360 /* 8361 * BSpec recommends 8x4 when MSAA is used, 8362 * however in practice 16x4 seems fastest. 8363 * 8364 * Note that PS/WM thread counts depend on the WIZ hashing 8365 * disable bit, which we don't touch here, but it's good 8366 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 8367 */ 8368 I915_WRITE(GEN7_GT_MODE, 8369 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 8370 8371 /* WaSampleCChickenBitEnable:hsw */ 8372 I915_WRITE(HALF_SLICE_CHICKEN3, 8373 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE)); 8374 8375 /* WaSwitchSolVfFArbitrationPriority:hsw */ 8376 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 8377 8378 /* WaRsPkgCStateDisplayPMReq:hsw */ 8379 I915_WRITE(CHICKEN_PAR1_1, 8380 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 8381 8382 lpt_init_clock_gating(dev_priv); 8383} 8384 8385static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv) 8386{ 8387 uint32_t snpcr; 8388 8389 ilk_init_lp_watermarks(dev_priv); 8390 8391 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 8392 8393 /* WaDisableEarlyCull:ivb */ 8394 I915_WRITE(_3D_CHICKEN3, 8395 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 8396 8397 /* WaDisableBackToBackFlipFix:ivb */ 8398 I915_WRITE(IVB_CHICKEN3, 8399 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 8400 CHICKEN3_DGMG_DONE_FIX_DISABLE); 8401 8402 /* WaDisablePSDDualDispatchEnable:ivb */ 8403 if (IS_IVB_GT1(dev_priv)) 8404 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 8405 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 8406 8407 /* WaDisable_RenderCache_OperationalFlush:ivb */ 8408 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 8409 8410 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ 8411 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 8412 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 8413 8414 /* WaApplyL3ControlAndL3ChickenMode:ivb */ 8415 I915_WRITE(GEN7_L3CNTLREG1, 8416 GEN7_WA_FOR_GEN7_L3_CONTROL); 8417 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 8418 GEN7_WA_L3_CHICKEN_MODE); 8419 if (IS_IVB_GT1(dev_priv)) 8420 I915_WRITE(GEN7_ROW_CHICKEN2, 8421 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 8422 else { 8423 /* must write both registers */ 8424 I915_WRITE(GEN7_ROW_CHICKEN2, 8425 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 8426 I915_WRITE(GEN7_ROW_CHICKEN2_GT2, 8427 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 8428 } 8429 8430 /* WaForceL3Serialization:ivb */ 8431 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 8432 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 8433 8434 /* 8435 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 8436 * This implements the WaDisableRCZUnitClockGating:ivb workaround. 8437 */ 8438 I915_WRITE(GEN6_UCGCTL2, 8439 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 8440 8441 /* This is required by WaCatErrorRejectionIssue:ivb */ 8442 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 8443 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 8444 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 8445 8446 g4x_disable_trickle_feed(dev_priv); 8447 8448 gen7_setup_fixed_func_scheduler(dev_priv); 8449 8450 if (0) { /* causes HiZ corruption on ivb:gt1 */ 8451 /* enable HiZ Raw Stall Optimization */ 8452 I915_WRITE(CACHE_MODE_0_GEN7, 8453 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 8454 } 8455 8456 /* WaDisable4x2SubspanOptimization:ivb */ 8457 I915_WRITE(CACHE_MODE_1, 8458 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 8459 8460 /* 8461 * BSpec recommends 8x4 when MSAA is used, 8462 * however in practice 16x4 seems fastest. 8463 * 8464 * Note that PS/WM thread counts depend on the WIZ hashing 8465 * disable bit, which we don't touch here, but it's good 8466 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 8467 */ 8468 I915_WRITE(GEN7_GT_MODE, 8469 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 8470 8471 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 8472 snpcr &= ~GEN6_MBC_SNPCR_MASK; 8473 snpcr |= GEN6_MBC_SNPCR_MED; 8474 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 8475 8476 if (!HAS_PCH_NOP(dev_priv)) 8477 cpt_init_clock_gating(dev_priv); 8478 8479 gen6_check_mch_setup(dev_priv); 8480} 8481 8482static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv) 8483{ 8484 /* WaDisableEarlyCull:vlv */ 8485 I915_WRITE(_3D_CHICKEN3, 8486 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 8487 8488 /* WaDisableBackToBackFlipFix:vlv */ 8489 I915_WRITE(IVB_CHICKEN3, 8490 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 8491 CHICKEN3_DGMG_DONE_FIX_DISABLE); 8492 8493 /* WaPsdDispatchEnable:vlv */ 8494 /* WaDisablePSDDualDispatchEnable:vlv */ 8495 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 8496 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | 8497 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 8498 8499 /* WaDisable_RenderCache_OperationalFlush:vlv */ 8500 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 8501 8502 /* WaForceL3Serialization:vlv */ 8503 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 8504 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 8505 8506 /* WaDisableDopClockGating:vlv */ 8507 I915_WRITE(GEN7_ROW_CHICKEN2, 8508 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 8509 8510 /* This is required by WaCatErrorRejectionIssue:vlv */ 8511 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 8512 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 8513 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 8514 8515 gen7_setup_fixed_func_scheduler(dev_priv); 8516 8517 /* 8518 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 8519 * This implements the WaDisableRCZUnitClockGating:vlv workaround. 8520 */ 8521 I915_WRITE(GEN6_UCGCTL2, 8522 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 8523 8524 /* WaDisableL3Bank2xClockGate:vlv 8525 * Disabling L3 clock gating- MMIO 940c[25] = 1 8526 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */ 8527 I915_WRITE(GEN7_UCGCTL4, 8528 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 8529 8530 /* 8531 * BSpec says this must be set, even though 8532 * WaDisable4x2SubspanOptimization isn't listed for VLV. 8533 */ 8534 I915_WRITE(CACHE_MODE_1, 8535 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 8536 8537 /* 8538 * BSpec recommends 8x4 when MSAA is used, 8539 * however in practice 16x4 seems fastest. 8540 * 8541 * Note that PS/WM thread counts depend on the WIZ hashing 8542 * disable bit, which we don't touch here, but it's good 8543 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 8544 */ 8545 I915_WRITE(GEN7_GT_MODE, 8546 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 8547 8548 /* 8549 * WaIncreaseL3CreditsForVLVB0:vlv 8550 * This is the hardware default actually. 8551 */ 8552 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); 8553 8554 /* 8555 * WaDisableVLVClockGating_VBIIssue:vlv 8556 * Disable clock gating on th GCFG unit to prevent a delay 8557 * in the reporting of vblank events. 8558 */ 8559 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 8560} 8561 8562static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv) 8563{ 8564 /* WaVSRefCountFullforceMissDisable:chv */ 8565 /* WaDSRefCountFullforceMissDisable:chv */ 8566 I915_WRITE(GEN7_FF_THREAD_MODE, 8567 I915_READ(GEN7_FF_THREAD_MODE) & 8568 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 8569 8570 /* WaDisableSemaphoreAndSyncFlipWait:chv */ 8571 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 8572 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 8573 8574 /* WaDisableCSUnitClockGating:chv */ 8575 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 8576 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 8577 8578 /* WaDisableSDEUnitClockGating:chv */ 8579 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 8580 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 8581 8582 /* 8583 * WaProgramL3SqcReg1Default:chv 8584 * See gfxspecs/Related Documents/Performance Guide/ 8585 * LSQC Setting Recommendations. 8586 */ 8587 gen8_set_l3sqc_credits(dev_priv, 38, 2); 8588 8589 /* 8590 * GTT cache may not work with big pages, so if those 8591 * are ever enabled GTT cache may need to be disabled. 8592 */ 8593 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); 8594} 8595 8596static void g4x_init_clock_gating(struct drm_i915_private *dev_priv) 8597{ 8598 uint32_t dspclk_gate; 8599 8600 I915_WRITE(RENCLK_GATE_D1, 0); 8601 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 8602 GS_UNIT_CLOCK_GATE_DISABLE | 8603 CL_UNIT_CLOCK_GATE_DISABLE); 8604 I915_WRITE(RAMCLK_GATE_D, 0); 8605 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 8606 OVRUNIT_CLOCK_GATE_DISABLE | 8607 OVCUNIT_CLOCK_GATE_DISABLE; 8608 if (IS_GM45(dev_priv)) 8609 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 8610 I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 8611 8612 /* WaDisableRenderCachePipelinedFlush */ 8613 I915_WRITE(CACHE_MODE_0, 8614 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 8615 8616 /* WaDisable_RenderCache_OperationalFlush:g4x */ 8617 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 8618 8619 g4x_disable_trickle_feed(dev_priv); 8620} 8621 8622static void crestline_init_clock_gating(struct drm_i915_private *dev_priv) 8623{ 8624 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 8625 I915_WRITE(RENCLK_GATE_D2, 0); 8626 I915_WRITE(DSPCLK_GATE_D, 0); 8627 I915_WRITE(RAMCLK_GATE_D, 0); 8628 I915_WRITE16(DEUC, 0); 8629 I915_WRITE(MI_ARB_STATE, 8630 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 8631 8632 /* WaDisable_RenderCache_OperationalFlush:gen4 */ 8633 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 8634} 8635 8636static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv) 8637{ 8638 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 8639 I965_RCC_CLOCK_GATE_DISABLE | 8640 I965_RCPB_CLOCK_GATE_DISABLE | 8641 I965_ISC_CLOCK_GATE_DISABLE | 8642 I965_FBC_CLOCK_GATE_DISABLE); 8643 I915_WRITE(RENCLK_GATE_D2, 0); 8644 I915_WRITE(MI_ARB_STATE, 8645 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 8646 8647 /* WaDisable_RenderCache_OperationalFlush:gen4 */ 8648 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 8649} 8650 8651static void gen3_init_clock_gating(struct drm_i915_private *dev_priv) 8652{ 8653 u32 dstate = I915_READ(D_STATE); 8654 8655 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 8656 DSTATE_DOT_CLOCK_GATING; 8657 I915_WRITE(D_STATE, dstate); 8658 8659 if (IS_PINEVIEW(dev_priv)) 8660 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); 8661 8662 /* IIR "flip pending" means done if this bit is set */ 8663 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); 8664 8665 /* interrupts should cause a wake up from C3 */ 8666 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); 8667 8668 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 8669 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 8670 8671 I915_WRITE(MI_ARB_STATE, 8672 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 8673} 8674 8675static void i85x_init_clock_gating(struct drm_i915_private *dev_priv) 8676{ 8677 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 8678 8679 /* interrupts should cause a wake up from C3 */ 8680 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | 8681 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); 8682 8683 I915_WRITE(MEM_MODE, 8684 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE)); 8685} 8686 8687static void i830_init_clock_gating(struct drm_i915_private *dev_priv) 8688{ 8689 I915_WRITE(MEM_MODE, 8690 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) | 8691 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE)); 8692} 8693 8694void intel_init_clock_gating(struct drm_i915_private *dev_priv) 8695{ 8696 dev_priv->display.init_clock_gating(dev_priv); 8697} 8698 8699void intel_suspend_hw(struct drm_i915_private *dev_priv) 8700{ 8701 if (HAS_PCH_LPT(dev_priv)) 8702 lpt_suspend_hw(dev_priv); 8703} 8704 8705static void nop_init_clock_gating(struct drm_i915_private *dev_priv) 8706{ 8707 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n"); 8708} 8709 8710/** 8711 * intel_init_clock_gating_hooks - setup the clock gating hooks 8712 * @dev_priv: device private 8713 * 8714 * Setup the hooks that configure which clocks of a given platform can be 8715 * gated and also apply various GT and display specific workarounds for these 8716 * platforms. Note that some GT specific workarounds are applied separately 8717 * when GPU contexts or batchbuffers start their execution. 8718 */ 8719void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) 8720{ 8721 if (IS_SKYLAKE(dev_priv)) 8722 dev_priv->display.init_clock_gating = skylake_init_clock_gating; 8723 else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) 8724 dev_priv->display.init_clock_gating = kabylake_init_clock_gating; 8725 else if (IS_BROXTON(dev_priv)) 8726 dev_priv->display.init_clock_gating = bxt_init_clock_gating; 8727 else if (IS_GEMINILAKE(dev_priv)) 8728 dev_priv->display.init_clock_gating = glk_init_clock_gating; 8729 else if (IS_BROADWELL(dev_priv)) 8730 dev_priv->display.init_clock_gating = broadwell_init_clock_gating; 8731 else if (IS_CHERRYVIEW(dev_priv)) 8732 dev_priv->display.init_clock_gating = cherryview_init_clock_gating; 8733 else if (IS_HASWELL(dev_priv)) 8734 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 8735 else if (IS_IVYBRIDGE(dev_priv)) 8736 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 8737 else if (IS_VALLEYVIEW(dev_priv)) 8738 dev_priv->display.init_clock_gating = valleyview_init_clock_gating; 8739 else if (IS_GEN6(dev_priv)) 8740 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 8741 else if (IS_GEN5(dev_priv)) 8742 dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 8743 else if (IS_G4X(dev_priv)) 8744 dev_priv->display.init_clock_gating = g4x_init_clock_gating; 8745 else if (IS_I965GM(dev_priv)) 8746 dev_priv->display.init_clock_gating = crestline_init_clock_gating; 8747 else if (IS_I965G(dev_priv)) 8748 dev_priv->display.init_clock_gating = broadwater_init_clock_gating; 8749 else if (IS_GEN3(dev_priv)) 8750 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 8751 else if (IS_I85X(dev_priv) || IS_I865G(dev_priv)) 8752 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 8753 else if (IS_GEN2(dev_priv)) 8754 dev_priv->display.init_clock_gating = i830_init_clock_gating; 8755 else { 8756 MISSING_CASE(INTEL_DEVID(dev_priv)); 8757 dev_priv->display.init_clock_gating = nop_init_clock_gating; 8758 } 8759} 8760 8761/* Set up chip specific power management-related functions */ 8762void intel_init_pm(struct drm_i915_private *dev_priv) 8763{ 8764 intel_fbc_init(dev_priv); 8765 8766 /* For cxsr */ 8767 if (IS_PINEVIEW(dev_priv)) 8768 i915_pineview_get_mem_freq(dev_priv); 8769 else if (IS_GEN5(dev_priv)) 8770 i915_ironlake_get_mem_freq(dev_priv); 8771 8772 /* For FIFO watermark updates */ 8773 if (INTEL_GEN(dev_priv) >= 9) { 8774 skl_setup_wm_latency(dev_priv); 8775 dev_priv->display.initial_watermarks = skl_initial_wm; 8776 dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm; 8777 dev_priv->display.compute_global_watermarks = skl_compute_wm; 8778 } else if (HAS_PCH_SPLIT(dev_priv)) { 8779 ilk_setup_wm_latency(dev_priv); 8780 8781 if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] && 8782 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || 8783 (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] && 8784 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { 8785 dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm; 8786 dev_priv->display.compute_intermediate_wm = 8787 ilk_compute_intermediate_wm; 8788 dev_priv->display.initial_watermarks = 8789 ilk_initial_watermarks; 8790 dev_priv->display.optimize_watermarks = 8791 ilk_optimize_watermarks; 8792 } else { 8793 DRM_DEBUG_KMS("Failed to read display plane latency. " 8794 "Disable CxSR\n"); 8795 } 8796 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 8797 vlv_setup_wm_latency(dev_priv); 8798 dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm; 8799 dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm; 8800 dev_priv->display.initial_watermarks = vlv_initial_watermarks; 8801 dev_priv->display.optimize_watermarks = vlv_optimize_watermarks; 8802 dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo; 8803 } else if (IS_G4X(dev_priv)) { 8804 g4x_setup_wm_latency(dev_priv); 8805 dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm; 8806 dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm; 8807 dev_priv->display.initial_watermarks = g4x_initial_watermarks; 8808 dev_priv->display.optimize_watermarks = g4x_optimize_watermarks; 8809 } else if (IS_PINEVIEW(dev_priv)) { 8810 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv), 8811 dev_priv->is_ddr3, 8812 dev_priv->fsb_freq, 8813 dev_priv->mem_freq)) { 8814 DRM_INFO("failed to find known CxSR latency " 8815 "(found ddr%s fsb freq %d, mem freq %d), " 8816 "disabling CxSR\n", 8817 (dev_priv->is_ddr3 == 1) ? "3" : "2", 8818 dev_priv->fsb_freq, dev_priv->mem_freq); 8819 /* Disable CxSR and never update its watermark again */ 8820 intel_set_memory_cxsr(dev_priv, false); 8821 dev_priv->display.update_wm = NULL; 8822 } else 8823 dev_priv->display.update_wm = pineview_update_wm; 8824 } else if (IS_GEN4(dev_priv)) { 8825 dev_priv->display.update_wm = i965_update_wm; 8826 } else if (IS_GEN3(dev_priv)) { 8827 dev_priv->display.update_wm = i9xx_update_wm; 8828 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 8829 } else if (IS_GEN2(dev_priv)) { 8830 if (INTEL_INFO(dev_priv)->num_pipes == 1) { 8831 dev_priv->display.update_wm = i845_update_wm; 8832 dev_priv->display.get_fifo_size = i845_get_fifo_size; 8833 } else { 8834 dev_priv->display.update_wm = i9xx_update_wm; 8835 dev_priv->display.get_fifo_size = i830_get_fifo_size; 8836 } 8837 } else { 8838 DRM_ERROR("unexpected fall-through in intel_init_pm\n"); 8839 } 8840} 8841 8842static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv) 8843{ 8844 uint32_t flags = 8845 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; 8846 8847 switch (flags) { 8848 case GEN6_PCODE_SUCCESS: 8849 return 0; 8850 case GEN6_PCODE_UNIMPLEMENTED_CMD: 8851 case GEN6_PCODE_ILLEGAL_CMD: 8852 return -ENXIO; 8853 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: 8854 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: 8855 return -EOVERFLOW; 8856 case GEN6_PCODE_TIMEOUT: 8857 return -ETIMEDOUT; 8858 default: 8859 MISSING_CASE(flags); 8860 return 0; 8861 } 8862} 8863 8864static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv) 8865{ 8866 uint32_t flags = 8867 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK; 8868 8869 switch (flags) { 8870 case GEN6_PCODE_SUCCESS: 8871 return 0; 8872 case GEN6_PCODE_ILLEGAL_CMD: 8873 return -ENXIO; 8874 case GEN7_PCODE_TIMEOUT: 8875 return -ETIMEDOUT; 8876 case GEN7_PCODE_ILLEGAL_DATA: 8877 return -EINVAL; 8878 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE: 8879 return -EOVERFLOW; 8880 default: 8881 MISSING_CASE(flags); 8882 return 0; 8883 } 8884} 8885 8886int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) 8887{ 8888 int status; 8889 8890 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 8891 8892 /* GEN6_PCODE_* are outside of the forcewake domain, we can 8893 * use te fw I915_READ variants to reduce the amount of work 8894 * required when reading/writing. 8895 */ 8896 8897 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 8898 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); 8899 return -EAGAIN; 8900 } 8901 8902 I915_WRITE_FW(GEN6_PCODE_DATA, *val); 8903 I915_WRITE_FW(GEN6_PCODE_DATA1, 0); 8904 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 8905 8906 if (__intel_wait_for_register_fw(dev_priv, 8907 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, 8908 500, 0, NULL)) { 8909 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); 8910 return -ETIMEDOUT; 8911 } 8912 8913 *val = I915_READ_FW(GEN6_PCODE_DATA); 8914 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 8915 8916 if (INTEL_GEN(dev_priv) > 6) 8917 status = gen7_check_mailbox_status(dev_priv); 8918 else 8919 status = gen6_check_mailbox_status(dev_priv); 8920 8921 if (status) { 8922 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n", 8923 status); 8924 return status; 8925 } 8926 8927 return 0; 8928} 8929 8930int sandybridge_pcode_write(struct drm_i915_private *dev_priv, 8931 u32 mbox, u32 val) 8932{ 8933 int status; 8934 8935 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 8936 8937 /* GEN6_PCODE_* are outside of the forcewake domain, we can 8938 * use te fw I915_READ variants to reduce the amount of work 8939 * required when reading/writing. 8940 */ 8941 8942 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 8943 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); 8944 return -EAGAIN; 8945 } 8946 8947 I915_WRITE_FW(GEN6_PCODE_DATA, val); 8948 I915_WRITE_FW(GEN6_PCODE_DATA1, 0); 8949 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 8950 8951 if (__intel_wait_for_register_fw(dev_priv, 8952 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, 8953 500, 0, NULL)) { 8954 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); 8955 return -ETIMEDOUT; 8956 } 8957 8958 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 8959 8960 if (INTEL_GEN(dev_priv) > 6) 8961 status = gen7_check_mailbox_status(dev_priv); 8962 else 8963 status = gen6_check_mailbox_status(dev_priv); 8964 8965 if (status) { 8966 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n", 8967 status); 8968 return status; 8969 } 8970 8971 return 0; 8972} 8973 8974static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox, 8975 u32 request, u32 reply_mask, u32 reply, 8976 u32 *status) 8977{ 8978 u32 val = request; 8979 8980 *status = sandybridge_pcode_read(dev_priv, mbox, &val); 8981 8982 return *status || ((val & reply_mask) == reply); 8983} 8984 8985/** 8986 * skl_pcode_request - send PCODE request until acknowledgment 8987 * @dev_priv: device private 8988 * @mbox: PCODE mailbox ID the request is targeted for 8989 * @request: request ID 8990 * @reply_mask: mask used to check for request acknowledgment 8991 * @reply: value used to check for request acknowledgment 8992 * @timeout_base_ms: timeout for polling with preemption enabled 8993 * 8994 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE 8995 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires. 8996 * The request is acknowledged once the PCODE reply dword equals @reply after 8997 * applying @reply_mask. Polling is first attempted with preemption enabled 8998 * for @timeout_base_ms and if this times out for another 50 ms with 8999 * preemption disabled. 9000 * 9001 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some 9002 * other error as reported by PCODE. 9003 */ 9004int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, 9005 u32 reply_mask, u32 reply, int timeout_base_ms) 9006{ 9007 u32 status; 9008 int ret; 9009 9010 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 9011 9012#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \ 9013 &status) 9014 9015 /* 9016 * Prime the PCODE by doing a request first. Normally it guarantees 9017 * that a subsequent request, at most @timeout_base_ms later, succeeds. 9018 * _wait_for() doesn't guarantee when its passed condition is evaluated 9019 * first, so send the first request explicitly. 9020 */ 9021 if (COND) { 9022 ret = 0; 9023 goto out; 9024 } 9025 ret = _wait_for(COND, timeout_base_ms * 1000, 10); 9026 if (!ret) 9027 goto out; 9028 9029 /* 9030 * The above can time out if the number of requests was low (2 in the 9031 * worst case) _and_ PCODE was busy for some reason even after a 9032 * (queued) request and @timeout_base_ms delay. As a workaround retry 9033 * the poll with preemption disabled to maximize the number of 9034 * requests. Increase the timeout from @timeout_base_ms to 50ms to 9035 * account for interrupts that could reduce the number of these 9036 * requests, and for any quirks of the PCODE firmware that delays 9037 * the request completion. 9038 */ 9039 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); 9040 WARN_ON_ONCE(timeout_base_ms > 3); 9041 preempt_disable(); 9042 ret = wait_for_atomic(COND, 50); 9043 preempt_enable(); 9044 9045out: 9046 return ret ? ret : status; 9047#undef COND 9048} 9049 9050static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) 9051{ 9052 /* 9053 * N = val - 0xb7 9054 * Slow = Fast = GPLL ref * N 9055 */ 9056 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000); 9057} 9058 9059static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) 9060{ 9061 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7; 9062} 9063 9064static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) 9065{ 9066 /* 9067 * N = val / 2 9068 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 9069 */ 9070 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000); 9071} 9072 9073static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) 9074{ 9075 /* CHV needs even values */ 9076 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2; 9077} 9078 9079int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) 9080{ 9081 if (IS_GEN9(dev_priv)) 9082 return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, 9083 GEN9_FREQ_SCALER); 9084 else if (IS_CHERRYVIEW(dev_priv)) 9085 return chv_gpu_freq(dev_priv, val); 9086 else if (IS_VALLEYVIEW(dev_priv)) 9087 return byt_gpu_freq(dev_priv, val); 9088 else 9089 return val * GT_FREQUENCY_MULTIPLIER; 9090} 9091 9092int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) 9093{ 9094 if (IS_GEN9(dev_priv)) 9095 return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, 9096 GT_FREQUENCY_MULTIPLIER); 9097 else if (IS_CHERRYVIEW(dev_priv)) 9098 return chv_freq_opcode(dev_priv, val); 9099 else if (IS_VALLEYVIEW(dev_priv)) 9100 return byt_freq_opcode(dev_priv, val); 9101 else 9102 return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); 9103} 9104 9105struct request_boost { 9106 struct work_struct work; 9107 struct drm_i915_gem_request *req; 9108}; 9109 9110static void __intel_rps_boost_work(struct work_struct *work) 9111{ 9112 struct request_boost *boost = container_of(work, struct request_boost, work); 9113 struct drm_i915_gem_request *req = boost->req; 9114 9115 if (!i915_gem_request_completed(req)) 9116 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies); 9117 9118 i915_gem_request_put(req); 9119 kfree(boost); 9120} 9121 9122void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) 9123{ 9124 struct request_boost *boost; 9125 9126 if (req == NULL || INTEL_GEN(req->i915) < 6) 9127 return; 9128 9129 if (i915_gem_request_completed(req)) 9130 return; 9131 9132 boost = kmalloc(sizeof(*boost), GFP_ATOMIC); 9133 if (boost == NULL) 9134 return; 9135 9136 boost->req = i915_gem_request_get(req); 9137 9138 INIT_WORK(&boost->work, __intel_rps_boost_work); 9139 queue_work(req->i915->wq, &boost->work); 9140} 9141 9142void intel_pm_setup(struct drm_i915_private *dev_priv) 9143{ 9144 mutex_init(&dev_priv->rps.hw_lock); 9145 spin_lock_init(&dev_priv->rps.client_lock); 9146 9147 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work, 9148 __intel_autoenable_gt_powersave); 9149 INIT_LIST_HEAD(&dev_priv->rps.clients); 9150 9151 dev_priv->pm.suspended = false; 9152 atomic_set(&dev_priv->pm.wakeref_count, 0); 9153} 9154 9155static u64 vlv_residency_raw(struct drm_i915_private *dev_priv, 9156 const i915_reg_t reg) 9157{ 9158 u32 lower, upper, tmp; 9159 int loop = 2; 9160 9161 /* The register accessed do not need forcewake. We borrow 9162 * uncore lock to prevent concurrent access to range reg. 9163 */ 9164 spin_lock_irq(&dev_priv->uncore.lock); 9165 9166 /* vlv and chv residency counters are 40 bits in width. 9167 * With a control bit, we can choose between upper or lower 9168 * 32bit window into this counter. 9169 * 9170 * Although we always use the counter in high-range mode elsewhere, 9171 * userspace may attempt to read the value before rc6 is initialised, 9172 * before we have set the default VLV_COUNTER_CONTROL value. So always 9173 * set the high bit to be safe. 9174 */ 9175 I915_WRITE_FW(VLV_COUNTER_CONTROL, 9176 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); 9177 upper = I915_READ_FW(reg); 9178 do { 9179 tmp = upper; 9180 9181 I915_WRITE_FW(VLV_COUNTER_CONTROL, 9182 _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH)); 9183 lower = I915_READ_FW(reg); 9184 9185 I915_WRITE_FW(VLV_COUNTER_CONTROL, 9186 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); 9187 upper = I915_READ_FW(reg); 9188 } while (upper != tmp && --loop); 9189 9190 /* Everywhere else we always use VLV_COUNTER_CONTROL with the 9191 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set 9192 * now. 9193 */ 9194 9195 spin_unlock_irq(&dev_priv->uncore.lock); 9196 9197 return lower | (u64)upper << 8; 9198} 9199 9200u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, 9201 const i915_reg_t reg) 9202{ 9203 u64 time_hw, units, div; 9204 9205 if (!intel_enable_rc6()) 9206 return 0; 9207 9208 intel_runtime_pm_get(dev_priv); 9209 9210 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ 9211 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 9212 units = 1000; 9213 div = dev_priv->czclk_freq; 9214 9215 time_hw = vlv_residency_raw(dev_priv, reg); 9216 } else if (IS_GEN9_LP(dev_priv)) { 9217 units = 1000; 9218 div = 1200; /* 833.33ns */ 9219 9220 time_hw = I915_READ(reg); 9221 } else { 9222 units = 128000; /* 1.28us */ 9223 div = 100000; 9224 9225 time_hw = I915_READ(reg); 9226 } 9227 9228 intel_runtime_pm_put(dev_priv); 9229 return DIV_ROUND_UP_ULL(time_hw * units, div); 9230}