Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2023-03-07' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Cross-subsystem Changes:
- MEI patches to fix suspend/resume issues with the i915's PXP. (Alexander)

Driver Changes:
- Registers helpers and clean-ups. (Lucas)
- PXP fixes and clean-ups. (Alan, Alexander)
- CDCLK related fixes and w/a (Chaitanya, Stanislav)
- Move display code to use RMW whenever possible (Andrzej)
- PSR fixes (Jouni, Ville)
- Implement async_flip mode per plane tracking (Andrzej)
- Remove pre-production Workarounds (Matt)
- HDMI related fixes (Ankit)
- LVDS cleanup (Ville)
- Watermark fixes and cleanups (Ville, Jani, Stanilav)
- DMC code related fixes, cleanups and improvements (Jani)
- Implement fb_dirty for PSR,FBC,DRRS fixes (Jouni)
- Initial DSB improvements targeting LUTs loading (Ville)
- HWMON related fixes (Ashutosh)
- PCI ID updates (Jonathan, Matt Roper)
- Fix leak in scatterlist (Matt Atwood)
- Fix eDP+DSI dual panel systems (Ville)
- Cast iomem to avoid sparese warnings (Jani)
- Set default backlight controller index (Jani)
- More MTL enabling (RK)
- Conversion of display dev_priv towards i915 (Nirmoy)
- Improvements in log/debug messages (Ville)
- Increase slice_height for DP VDSC (Suraj)
- VBT ports improvements (Ville)
- Fix platforms without Display (Imre)
- Other generic display code clean-ups (Ville, Jani, Rodrigo)
- Add RPL-U sub platform (Chaitanya)
- Add inverted backlight quirk for HP 14-r206nv (Mavroudis)
- Transcoder timing improvements (Ville)
- Track audio state per-transcoder (Ville)
- Error/underrun interrupt fixes (Ville)
- Update combo PHY init sequence (Matt Roper)
- Get HDR DPCD refresh timeout (Ville)
- Vblank improvements (Ville)
- DSS fixes and cleanups (Jani)
- PM code cleanup (Jani)
- Split display parts related to RPS (Jani)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZAez4aekcob8fTeh@intel.com

+8121 -7891
+3
drivers/gpu/drm/i915/Makefile
··· 239 239 display/intel_display_power.o \ 240 240 display/intel_display_power_map.o \ 241 241 display/intel_display_power_well.o \ 242 + display/intel_display_rps.o \ 242 243 display/intel_dmc.o \ 243 244 display/intel_dpio_phy.o \ 244 245 display/intel_dpll.o \ ··· 270 269 display/intel_tc.o \ 271 270 display/intel_vblank.o \ 272 271 display/intel_vga.o \ 272 + display/intel_wm.o \ 273 273 display/i9xx_plane.o \ 274 + display/i9xx_wm.o \ 274 275 display/skl_scaler.o \ 275 276 display/skl_universal_plane.o \ 276 277 display/skl_watermark.o
+17 -36
drivers/gpu/drm/i915/display/g4x_dp.c
··· 17 17 #include "intel_display_power.h" 18 18 #include "intel_display_types.h" 19 19 #include "intel_dp.h" 20 + #include "intel_dp_aux.h" 20 21 #include "intel_dp_link_training.h" 21 22 #include "intel_dpio_phy.h" 22 23 #include "intel_fifo_underrun.h" ··· 137 136 138 137 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); 139 138 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { 140 - u32 trans_dp; 141 - 142 139 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 143 140 144 - trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); 145 - if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 146 - trans_dp |= TRANS_DP_ENH_FRAMING; 147 - else 148 - trans_dp &= ~TRANS_DP_ENH_FRAMING; 149 - intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); 141 + intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe), 142 + TRANS_DP_ENH_FRAMING, 143 + drm_dp_enhanced_frame_cap(intel_dp->dpcd) ? 144 + TRANS_DP_ENH_FRAMING : 0); 150 145 } else { 151 146 if (IS_G4X(dev_priv) && pipe_config->limited_color_range) 152 147 intel_dp->DP |= DP_COLOR_RANGE_16_235; ··· 1197 1200 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 1198 1201 } 1199 1202 1200 - static bool gm45_digital_port_connected(struct intel_encoder *encoder) 1201 - { 1202 - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1203 - u32 bit; 1204 - 1205 - switch (encoder->hpd_pin) { 1206 - case HPD_PORT_B: 1207 - bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; 1208 - break; 1209 - case HPD_PORT_C: 1210 - bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; 1211 - break; 1212 - case HPD_PORT_D: 1213 - bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; 1214 - break; 1215 - default: 1216 - MISSING_CASE(encoder->hpd_pin); 1217 - return false; 1218 - } 1219 - 1220 - return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; 1221 - } 1222 - 1223 1203 static bool ilk_digital_port_connected(struct intel_encoder *encoder) 1224 1204 { 1225 1205 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); ··· 1253 1279 bool g4x_dp_init(struct drm_i915_private *dev_priv, 1254 1280 i915_reg_t output_reg, enum port port) 1255 1281 { 1282 + const struct intel_bios_encoder_data *devdata; 1256 1283 struct intel_digital_port *dig_port; 1257 1284 struct intel_encoder *intel_encoder; 1258 1285 struct drm_encoder *encoder; 1259 1286 struct intel_connector *intel_connector; 1287 + 1288 + devdata = intel_bios_encoder_data_lookup(dev_priv, port); 1289 + 1290 + /* FIXME bail? */ 1291 + if (!devdata) 1292 + drm_dbg_kms(&dev_priv->drm, "No VBT child device for DP-%c\n", 1293 + port_name(port)); 1260 1294 1261 1295 dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); 1262 1296 if (!dig_port) ··· 1276 1294 1277 1295 intel_encoder = &dig_port->base; 1278 1296 encoder = &intel_encoder->base; 1297 + 1298 + intel_encoder->devdata = devdata; 1279 1299 1280 1300 mutex_init(&dig_port->hdcp_mutex); 1281 1301 ··· 1361 1377 dig_port->hpd_pulse = intel_dp_hpd_pulse; 1362 1378 1363 1379 if (HAS_GMCH(dev_priv)) { 1364 - if (IS_GM45(dev_priv)) 1365 - dig_port->connected = gm45_digital_port_connected; 1366 - else 1367 - dig_port->connected = g4x_digital_port_connected; 1380 + dig_port->connected = g4x_digital_port_connected; 1368 1381 } else { 1369 1382 if (port == PORT_A) 1370 1383 dig_port->connected = ilk_digital_port_connected; ··· 1372 1391 if (port != PORT_A) 1373 1392 intel_infoframe_init(dig_port); 1374 1393 1375 - dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 1394 + dig_port->aux_ch = intel_dp_aux_ch(intel_encoder); 1376 1395 if (!intel_dp_init_connector(dig_port, intel_connector)) 1377 1396 goto err_init_connector; 1378 1397
+16 -5
drivers/gpu/drm/i915/display/g4x_hdmi.c
··· 13 13 #include "intel_de.h" 14 14 #include "intel_display_power.h" 15 15 #include "intel_display_types.h" 16 + #include "intel_dp_aux.h" 16 17 #include "intel_dpio_phy.h" 17 18 #include "intel_fifo_underrun.h" 18 19 #include "intel_hdmi.h" ··· 274 273 */ 275 274 276 275 if (pipe_config->pipe_bpp > 24) { 277 - intel_de_write(dev_priv, TRANS_CHICKEN1(pipe), 278 - intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); 276 + intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe), 277 + 0, TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); 279 278 280 279 temp &= ~SDVO_COLOR_FORMAT_MASK; 281 280 temp |= SDVO_COLOR_FORMAT_8bpc; ··· 291 290 intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); 292 291 intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); 293 292 294 - intel_de_write(dev_priv, TRANS_CHICKEN1(pipe), 295 - intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); 293 + intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe), 294 + TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0); 296 295 } 297 296 298 297 drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && ··· 549 548 void g4x_hdmi_init(struct drm_i915_private *dev_priv, 550 549 i915_reg_t hdmi_reg, enum port port) 551 550 { 551 + const struct intel_bios_encoder_data *devdata; 552 552 struct intel_digital_port *dig_port; 553 553 struct intel_encoder *intel_encoder; 554 554 struct intel_connector *intel_connector; 555 + 556 + devdata = intel_bios_encoder_data_lookup(dev_priv, port); 557 + 558 + /* FIXME bail? */ 559 + if (!devdata) 560 + drm_dbg_kms(&dev_priv->drm, "No VBT child device for HDMI-%c\n", 561 + port_name(port)); 555 562 556 563 dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); 557 564 if (!dig_port) ··· 572 563 } 573 564 574 565 intel_encoder = &dig_port->base; 566 + 567 + intel_encoder->devdata = devdata; 575 568 576 569 mutex_init(&dig_port->hdcp_mutex); 577 570 ··· 640 629 641 630 intel_infoframe_init(dig_port); 642 631 643 - dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 632 + dig_port->aux_ch = intel_dp_aux_ch(intel_encoder); 644 633 intel_hdmi_init_connector(dig_port, intel_connector); 645 634 }
+4047
drivers/gpu/drm/i915/display/i9xx_wm.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "i915_drv.h" 7 + #include "i9xx_wm.h" 8 + #include "intel_atomic.h" 9 + #include "intel_display.h" 10 + #include "intel_display_trace.h" 11 + #include "intel_mchbar_regs.h" 12 + #include "intel_wm.h" 13 + #include "skl_watermark.h" 14 + #include "vlv_sideband.h" 15 + 16 + /* used in computing the new watermarks state */ 17 + struct intel_wm_config { 18 + unsigned int num_pipes_active; 19 + bool sprites_enabled; 20 + bool sprites_scaled; 21 + }; 22 + 23 + struct cxsr_latency { 24 + bool is_desktop : 1; 25 + bool is_ddr3 : 1; 26 + u16 fsb_freq; 27 + u16 mem_freq; 28 + u16 display_sr; 29 + u16 display_hpll_disable; 30 + u16 cursor_sr; 31 + u16 cursor_hpll_disable; 32 + }; 33 + 34 + static const struct cxsr_latency cxsr_latency_table[] = { 35 + {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 36 + {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 37 + {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 38 + {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ 39 + {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ 40 + 41 + {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 42 + {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 43 + {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 44 + {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ 45 + {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ 46 + 47 + {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 48 + {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 49 + {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 50 + {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ 51 + {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ 52 + 53 + {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 54 + {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 55 + {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 56 + {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ 57 + {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ 58 + 59 + {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 60 + {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 61 + {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 62 + {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ 63 + {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ 64 + 65 + {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 66 + {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 67 + {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 68 + {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ 69 + {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 70 + }; 71 + 72 + static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop, 73 + bool is_ddr3, 74 + int fsb, 75 + int mem) 76 + { 77 + const struct cxsr_latency *latency; 78 + int i; 79 + 80 + if (fsb == 0 || mem == 0) 81 + return NULL; 82 + 83 + for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 84 + latency = &cxsr_latency_table[i]; 85 + if (is_desktop == latency->is_desktop && 86 + is_ddr3 == latency->is_ddr3 && 87 + fsb == latency->fsb_freq && mem == latency->mem_freq) 88 + return latency; 89 + } 90 + 91 + DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 92 + 93 + return NULL; 94 + } 95 + 96 + static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) 97 + { 98 + u32 val; 99 + 100 + vlv_punit_get(dev_priv); 101 + 102 + val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 103 + if (enable) 104 + val &= ~FORCE_DDR_HIGH_FREQ; 105 + else 106 + val |= FORCE_DDR_HIGH_FREQ; 107 + val &= ~FORCE_DDR_LOW_FREQ; 108 + val |= FORCE_DDR_FREQ_REQ_ACK; 109 + vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 110 + 111 + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 112 + FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) 113 + drm_err(&dev_priv->drm, 114 + "timed out waiting for Punit DDR DVFS request\n"); 115 + 116 + vlv_punit_put(dev_priv); 117 + } 118 + 119 + static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) 120 + { 121 + u32 val; 122 + 123 + vlv_punit_get(dev_priv); 124 + 125 + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 126 + if (enable) 127 + val |= DSP_MAXFIFO_PM5_ENABLE; 128 + else 129 + val &= ~DSP_MAXFIFO_PM5_ENABLE; 130 + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); 131 + 132 + vlv_punit_put(dev_priv); 133 + } 134 + 135 + #define FW_WM(value, plane) \ 136 + (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) 137 + 138 + static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 139 + { 140 + bool was_enabled; 141 + u32 val; 142 + 143 + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 144 + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 145 + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); 146 + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV); 147 + } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { 148 + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; 149 + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); 150 + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); 151 + } else if (IS_PINEVIEW(dev_priv)) { 152 + val = intel_uncore_read(&dev_priv->uncore, DSPFW3); 153 + was_enabled = val & PINEVIEW_SELF_REFRESH_EN; 154 + if (enable) 155 + val |= PINEVIEW_SELF_REFRESH_EN; 156 + else 157 + val &= ~PINEVIEW_SELF_REFRESH_EN; 158 + intel_uncore_write(&dev_priv->uncore, DSPFW3, val); 159 + intel_uncore_posting_read(&dev_priv->uncore, DSPFW3); 160 + } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { 161 + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; 162 + val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : 163 + _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); 164 + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val); 165 + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); 166 + } else if (IS_I915GM(dev_priv)) { 167 + /* 168 + * FIXME can't find a bit like this for 915G, and 169 + * yet it does have the related watermark in 170 + * FW_BLC_SELF. What's going on? 171 + */ 172 + was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN; 173 + val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : 174 + _MASKED_BIT_DISABLE(INSTPM_SELF_EN); 175 + intel_uncore_write(&dev_priv->uncore, INSTPM, val); 176 + intel_uncore_posting_read(&dev_priv->uncore, INSTPM); 177 + } else { 178 + return false; 179 + } 180 + 181 + trace_intel_memory_cxsr(dev_priv, was_enabled, enable); 182 + 183 + drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n", 184 + str_enabled_disabled(enable), 185 + str_enabled_disabled(was_enabled)); 186 + 187 + return was_enabled; 188 + } 189 + 190 + /** 191 + * intel_set_memory_cxsr - Configure CxSR state 192 + * @dev_priv: i915 device 193 + * @enable: Allow vs. disallow CxSR 194 + * 195 + * Allow or disallow the system to enter a special CxSR 196 + * (C-state self refresh) state. What typically happens in CxSR mode 197 + * is that several display FIFOs may get combined into a single larger 198 + * FIFO for a particular plane (so called max FIFO mode) to allow the 199 + * system to defer memory fetches longer, and the memory will enter 200 + * self refresh. 201 + * 202 + * Note that enabling CxSR does not guarantee that the system enter 203 + * this special mode, nor does it guarantee that the system stays 204 + * in that mode once entered. So this just allows/disallows the system 205 + * to autonomously utilize the CxSR mode. Other factors such as core 206 + * C-states will affect when/if the system actually enters/exits the 207 + * CxSR mode. 208 + * 209 + * Note that on VLV/CHV this actually only controls the max FIFO mode, 210 + * and the system is free to enter/exit memory self refresh at any time 211 + * even when the use of CxSR has been disallowed. 212 + * 213 + * While the system is actually in the CxSR/max FIFO mode, some plane 214 + * control registers will not get latched on vblank. Thus in order to 215 + * guarantee the system will respond to changes in the plane registers 216 + * we must always disallow CxSR prior to making changes to those registers. 217 + * Unfortunately the system will re-evaluate the CxSR conditions at 218 + * frame start which happens after vblank start (which is when the plane 219 + * registers would get latched), so we can't proceed with the plane update 220 + * during the same frame where we disallowed CxSR. 221 + * 222 + * Certain platforms also have a deeper HPLL SR mode. Fortunately the 223 + * HPLL SR mode depends on CxSR itself, so we don't have to hand hold 224 + * the hardware w.r.t. HPLL SR when writing to plane registers. 225 + * Disallowing just CxSR is sufficient. 226 + */ 227 + bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 228 + { 229 + bool ret; 230 + 231 + mutex_lock(&dev_priv->display.wm.wm_mutex); 232 + ret = _intel_set_memory_cxsr(dev_priv, enable); 233 + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 234 + dev_priv->display.wm.vlv.cxsr = enable; 235 + else if (IS_G4X(dev_priv)) 236 + dev_priv->display.wm.g4x.cxsr = enable; 237 + mutex_unlock(&dev_priv->display.wm.wm_mutex); 238 + 239 + return ret; 240 + } 241 + 242 + /* 243 + * Latency for FIFO fetches is dependent on several factors: 244 + * - memory configuration (speed, channels) 245 + * - chipset 246 + * - current MCH state 247 + * It can be fairly high in some situations, so here we assume a fairly 248 + * pessimal value. It's a tradeoff between extra memory fetches (if we 249 + * set this value too high, the FIFO will fetch frequently to stay full) 250 + * and power consumption (set it too low to save power and we might see 251 + * FIFO underruns and display "flicker"). 252 + * 253 + * A value of 5us seems to be a good balance; safe for very low end 254 + * platforms but not overly aggressive on lower latency configs. 255 + */ 256 + static const int pessimal_latency_ns = 5000; 257 + 258 + #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ 259 + ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) 260 + 261 + static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) 262 + { 263 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 264 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 265 + struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; 266 + enum pipe pipe = crtc->pipe; 267 + int sprite0_start, sprite1_start; 268 + u32 dsparb, dsparb2, dsparb3; 269 + 270 + switch (pipe) { 271 + case PIPE_A: 272 + dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 273 + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); 274 + sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); 275 + sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); 276 + break; 277 + case PIPE_B: 278 + dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 279 + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); 280 + sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); 281 + sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); 282 + break; 283 + case PIPE_C: 284 + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); 285 + dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3); 286 + sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); 287 + sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); 288 + break; 289 + default: 290 + MISSING_CASE(pipe); 291 + return; 292 + } 293 + 294 + fifo_state->plane[PLANE_PRIMARY] = sprite0_start; 295 + fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start; 296 + fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start; 297 + fifo_state->plane[PLANE_CURSOR] = 63; 298 + } 299 + 300 + static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, 301 + enum i9xx_plane_id i9xx_plane) 302 + { 303 + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 304 + int size; 305 + 306 + size = dsparb & 0x7f; 307 + if (i9xx_plane == PLANE_B) 308 + size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; 309 + 310 + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", 311 + dsparb, plane_name(i9xx_plane), size); 312 + 313 + return size; 314 + } 315 + 316 + static int i830_get_fifo_size(struct drm_i915_private *dev_priv, 317 + enum i9xx_plane_id i9xx_plane) 318 + { 319 + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 320 + int size; 321 + 322 + size = dsparb & 0x1ff; 323 + if (i9xx_plane == PLANE_B) 324 + size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; 325 + size >>= 1; /* Convert to cachelines */ 326 + 327 + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", 328 + dsparb, plane_name(i9xx_plane), size); 329 + 330 + return size; 331 + } 332 + 333 + static int i845_get_fifo_size(struct drm_i915_private *dev_priv, 334 + enum i9xx_plane_id i9xx_plane) 335 + { 336 + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 337 + int size; 338 + 339 + size = dsparb & 0x7f; 340 + size >>= 2; /* Convert to cachelines */ 341 + 342 + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", 343 + dsparb, plane_name(i9xx_plane), size); 344 + 345 + return size; 346 + } 347 + 348 + /* Pineview has different values for various configs */ 349 + static const struct intel_watermark_params pnv_display_wm = { 350 + .fifo_size = PINEVIEW_DISPLAY_FIFO, 351 + .max_wm = PINEVIEW_MAX_WM, 352 + .default_wm = PINEVIEW_DFT_WM, 353 + .guard_size = PINEVIEW_GUARD_WM, 354 + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 355 + }; 356 + 357 + static const struct intel_watermark_params pnv_display_hplloff_wm = { 358 + .fifo_size = PINEVIEW_DISPLAY_FIFO, 359 + .max_wm = PINEVIEW_MAX_WM, 360 + .default_wm = PINEVIEW_DFT_HPLLOFF_WM, 361 + .guard_size = PINEVIEW_GUARD_WM, 362 + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 363 + }; 364 + 365 + static const struct intel_watermark_params pnv_cursor_wm = { 366 + .fifo_size = PINEVIEW_CURSOR_FIFO, 367 + .max_wm = PINEVIEW_CURSOR_MAX_WM, 368 + .default_wm = PINEVIEW_CURSOR_DFT_WM, 369 + .guard_size = PINEVIEW_CURSOR_GUARD_WM, 370 + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 371 + }; 372 + 373 + static const struct intel_watermark_params pnv_cursor_hplloff_wm = { 374 + .fifo_size = PINEVIEW_CURSOR_FIFO, 375 + .max_wm = PINEVIEW_CURSOR_MAX_WM, 376 + .default_wm = PINEVIEW_CURSOR_DFT_WM, 377 + .guard_size = PINEVIEW_CURSOR_GUARD_WM, 378 + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 379 + }; 380 + 381 + static const struct intel_watermark_params i965_cursor_wm_info = { 382 + .fifo_size = I965_CURSOR_FIFO, 383 + .max_wm = I965_CURSOR_MAX_WM, 384 + .default_wm = I965_CURSOR_DFT_WM, 385 + .guard_size = 2, 386 + .cacheline_size = I915_FIFO_LINE_SIZE, 387 + }; 388 + 389 + static const struct intel_watermark_params i945_wm_info = { 390 + .fifo_size = I945_FIFO_SIZE, 391 + .max_wm = I915_MAX_WM, 392 + .default_wm = 1, 393 + .guard_size = 2, 394 + .cacheline_size = I915_FIFO_LINE_SIZE, 395 + }; 396 + 397 + static const struct intel_watermark_params i915_wm_info = { 398 + .fifo_size = I915_FIFO_SIZE, 399 + .max_wm = I915_MAX_WM, 400 + .default_wm = 1, 401 + .guard_size = 2, 402 + .cacheline_size = I915_FIFO_LINE_SIZE, 403 + }; 404 + 405 + static const struct intel_watermark_params i830_a_wm_info = { 406 + .fifo_size = I855GM_FIFO_SIZE, 407 + .max_wm = I915_MAX_WM, 408 + .default_wm = 1, 409 + .guard_size = 2, 410 + .cacheline_size = I830_FIFO_LINE_SIZE, 411 + }; 412 + 413 + static const struct intel_watermark_params i830_bc_wm_info = { 414 + .fifo_size = I855GM_FIFO_SIZE, 415 + .max_wm = I915_MAX_WM / 2, 416 + .default_wm = 1, 417 + .guard_size = 2, 418 + .cacheline_size = I830_FIFO_LINE_SIZE, 419 + }; 420 + 421 + static const struct intel_watermark_params i845_wm_info = { 422 + .fifo_size = I830_FIFO_SIZE, 423 + .max_wm = I915_MAX_WM, 424 + .default_wm = 1, 425 + .guard_size = 2, 426 + .cacheline_size = I830_FIFO_LINE_SIZE, 427 + }; 428 + 429 + /** 430 + * intel_wm_method1 - Method 1 / "small buffer" watermark formula 431 + * @pixel_rate: Pipe pixel rate in kHz 432 + * @cpp: Plane bytes per pixel 433 + * @latency: Memory wakeup latency in 0.1us units 434 + * 435 + * Compute the watermark using the method 1 or "small buffer" 436 + * formula. The caller may additonally add extra cachelines 437 + * to account for TLB misses and clock crossings. 438 + * 439 + * This method is concerned with the short term drain rate 440 + * of the FIFO, ie. it does not account for blanking periods 441 + * which would effectively reduce the average drain rate across 442 + * a longer period. The name "small" refers to the fact the 443 + * FIFO is relatively small compared to the amount of data 444 + * fetched. 445 + * 446 + * The FIFO level vs. time graph might look something like: 447 + * 448 + * |\ |\ 449 + * | \ | \ 450 + * __---__---__ (- plane active, _ blanking) 451 + * -> time 452 + * 453 + * or perhaps like this: 454 + * 455 + * |\|\ |\|\ 456 + * __----__----__ (- plane active, _ blanking) 457 + * -> time 458 + * 459 + * Returns: 460 + * The watermark in bytes 461 + */ 462 + static unsigned int intel_wm_method1(unsigned int pixel_rate, 463 + unsigned int cpp, 464 + unsigned int latency) 465 + { 466 + u64 ret; 467 + 468 + ret = mul_u32_u32(pixel_rate, cpp * latency); 469 + ret = DIV_ROUND_UP_ULL(ret, 10000); 470 + 471 + return ret; 472 + } 473 + 474 + /** 475 + * intel_wm_method2 - Method 2 / "large buffer" watermark formula 476 + * @pixel_rate: Pipe pixel rate in kHz 477 + * @htotal: Pipe horizontal total 478 + * @width: Plane width in pixels 479 + * @cpp: Plane bytes per pixel 480 + * @latency: Memory wakeup latency in 0.1us units 481 + * 482 + * Compute the watermark using the method 2 or "large buffer" 483 + * formula. The caller may additonally add extra cachelines 484 + * to account for TLB misses and clock crossings. 485 + * 486 + * This method is concerned with the long term drain rate 487 + * of the FIFO, ie. it does account for blanking periods 488 + * which effectively reduce the average drain rate across 489 + * a longer period. The name "large" refers to the fact the 490 + * FIFO is relatively large compared to the amount of data 491 + * fetched. 492 + * 493 + * The FIFO level vs. time graph might look something like: 494 + * 495 + * |\___ |\___ 496 + * | \___ | \___ 497 + * | \ | \ 498 + * __ --__--__--__--__--__--__ (- plane active, _ blanking) 499 + * -> time 500 + * 501 + * Returns: 502 + * The watermark in bytes 503 + */ 504 + static unsigned int intel_wm_method2(unsigned int pixel_rate, 505 + unsigned int htotal, 506 + unsigned int width, 507 + unsigned int cpp, 508 + unsigned int latency) 509 + { 510 + unsigned int ret; 511 + 512 + /* 513 + * FIXME remove once all users are computing 514 + * watermarks in the correct place. 515 + */ 516 + if (WARN_ON_ONCE(htotal == 0)) 517 + htotal = 1; 518 + 519 + ret = (latency * pixel_rate) / (htotal * 10000); 520 + ret = (ret + 1) * width * cpp; 521 + 522 + return ret; 523 + } 524 + 525 + /** 526 + * intel_calculate_wm - calculate watermark level 527 + * @pixel_rate: pixel clock 528 + * @wm: chip FIFO params 529 + * @fifo_size: size of the FIFO buffer 530 + * @cpp: bytes per pixel 531 + * @latency_ns: memory latency for the platform 532 + * 533 + * Calculate the watermark level (the level at which the display plane will 534 + * start fetching from memory again). Each chip has a different display 535 + * FIFO size and allocation, so the caller needs to figure that out and pass 536 + * in the correct intel_watermark_params structure. 537 + * 538 + * As the pixel clock runs, the FIFO will be drained at a rate that depends 539 + * on the pixel size. When it reaches the watermark level, it'll start 540 + * fetching FIFO line sized based chunks from memory until the FIFO fills 541 + * past the watermark point. If the FIFO drains completely, a FIFO underrun 542 + * will occur, and a display engine hang could result. 543 + */ 544 + static unsigned int intel_calculate_wm(int pixel_rate, 545 + const struct intel_watermark_params *wm, 546 + int fifo_size, int cpp, 547 + unsigned int latency_ns) 548 + { 549 + int entries, wm_size; 550 + 551 + /* 552 + * Note: we need to make sure we don't overflow for various clock & 553 + * latency values. 554 + * clocks go from a few thousand to several hundred thousand. 555 + * latency is usually a few thousand 556 + */ 557 + entries = intel_wm_method1(pixel_rate, cpp, 558 + latency_ns / 100); 559 + entries = DIV_ROUND_UP(entries, wm->cacheline_size) + 560 + wm->guard_size; 561 + DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries); 562 + 563 + wm_size = fifo_size - entries; 564 + DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); 565 + 566 + /* Don't promote wm_size to unsigned... */ 567 + if (wm_size > wm->max_wm) 568 + wm_size = wm->max_wm; 569 + if (wm_size <= 0) 570 + wm_size = wm->default_wm; 571 + 572 + /* 573 + * Bspec seems to indicate that the value shouldn't be lower than 574 + * 'burst size + 1'. Certainly 830 is quite unhappy with low values. 575 + * Lets go for 8 which is the burst size since certain platforms 576 + * already use a hardcoded 8 (which is what the spec says should be 577 + * done). 578 + */ 579 + if (wm_size <= 8) 580 + wm_size = 8; 581 + 582 + return wm_size; 583 + } 584 + 585 + static bool is_disabling(int old, int new, int threshold) 586 + { 587 + return old >= threshold && new < threshold; 588 + } 589 + 590 + static bool is_enabling(int old, int new, int threshold) 591 + { 592 + return old < threshold && new >= threshold; 593 + } 594 + 595 + static bool intel_crtc_active(struct intel_crtc *crtc) 596 + { 597 + /* Be paranoid as we can arrive here with only partial 598 + * state retrieved from the hardware during setup. 599 + * 600 + * We can ditch the adjusted_mode.crtc_clock check as soon 601 + * as Haswell has gained clock readout/fastboot support. 602 + * 603 + * We can ditch the crtc->primary->state->fb check as soon as we can 604 + * properly reconstruct framebuffers. 605 + * 606 + * FIXME: The intel_crtc->active here should be switched to 607 + * crtc->state->active once we have proper CRTC states wired up 608 + * for atomic. 609 + */ 610 + return crtc && crtc->active && crtc->base.primary->state->fb && 611 + crtc->config->hw.adjusted_mode.crtc_clock; 612 + } 613 + 614 + static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) 615 + { 616 + struct intel_crtc *crtc, *enabled = NULL; 617 + 618 + for_each_intel_crtc(&dev_priv->drm, crtc) { 619 + if (intel_crtc_active(crtc)) { 620 + if (enabled) 621 + return NULL; 622 + enabled = crtc; 623 + } 624 + } 625 + 626 + return enabled; 627 + } 628 + 629 + static void pnv_update_wm(struct drm_i915_private *dev_priv) 630 + { 631 + struct intel_crtc *crtc; 632 + const struct cxsr_latency *latency; 633 + u32 reg; 634 + unsigned int wm; 635 + 636 + latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv), 637 + dev_priv->is_ddr3, 638 + dev_priv->fsb_freq, 639 + dev_priv->mem_freq); 640 + if (!latency) { 641 + drm_dbg_kms(&dev_priv->drm, 642 + "Unknown FSB/MEM found, disable CxSR\n"); 643 + intel_set_memory_cxsr(dev_priv, false); 644 + return; 645 + } 646 + 647 + crtc = single_enabled_crtc(dev_priv); 648 + if (crtc) { 649 + const struct drm_framebuffer *fb = 650 + crtc->base.primary->state->fb; 651 + int pixel_rate = crtc->config->pixel_rate; 652 + int cpp = fb->format->cpp[0]; 653 + 654 + /* Display SR */ 655 + wm = intel_calculate_wm(pixel_rate, &pnv_display_wm, 656 + pnv_display_wm.fifo_size, 657 + cpp, latency->display_sr); 658 + reg = intel_uncore_read(&dev_priv->uncore, DSPFW1); 659 + reg &= ~DSPFW_SR_MASK; 660 + reg |= FW_WM(wm, SR); 661 + intel_uncore_write(&dev_priv->uncore, DSPFW1, reg); 662 + drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); 663 + 664 + /* cursor SR */ 665 + wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm, 666 + pnv_display_wm.fifo_size, 667 + 4, latency->cursor_sr); 668 + intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK, 669 + FW_WM(wm, CURSOR_SR)); 670 + 671 + /* Display HPLL off SR */ 672 + wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm, 673 + pnv_display_hplloff_wm.fifo_size, 674 + cpp, latency->display_hpll_disable); 675 + intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR)); 676 + 677 + /* cursor HPLL off SR */ 678 + wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm, 679 + pnv_display_hplloff_wm.fifo_size, 680 + 4, latency->cursor_hpll_disable); 681 + reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); 682 + reg &= ~DSPFW_HPLL_CURSOR_MASK; 683 + reg |= FW_WM(wm, HPLL_CURSOR); 684 + intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); 685 + drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg); 686 + 687 + intel_set_memory_cxsr(dev_priv, true); 688 + } else { 689 + intel_set_memory_cxsr(dev_priv, false); 690 + } 691 + } 692 + 693 + /* 694 + * Documentation says: 695 + * "If the line size is small, the TLB fetches can get in the way of the 696 + * data fetches, causing some lag in the pixel data return which is not 697 + * accounted for in the above formulas. The following adjustment only 698 + * needs to be applied if eight whole lines fit in the buffer at once. 699 + * The WM is adjusted upwards by the difference between the FIFO size 700 + * and the size of 8 whole lines. This adjustment is always performed 701 + * in the actual pixel depth regardless of whether FBC is enabled or not." 702 + */ 703 + static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) 704 + { 705 + int tlb_miss = fifo_size * 64 - width * cpp * 8; 706 + 707 + return max(0, tlb_miss); 708 + } 709 + 710 + static void g4x_write_wm_values(struct drm_i915_private *dev_priv, 711 + const struct g4x_wm_values *wm) 712 + { 713 + enum pipe pipe; 714 + 715 + for_each_pipe(dev_priv, pipe) 716 + trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); 717 + 718 + intel_uncore_write(&dev_priv->uncore, DSPFW1, 719 + FW_WM(wm->sr.plane, SR) | 720 + FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | 721 + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | 722 + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); 723 + intel_uncore_write(&dev_priv->uncore, DSPFW2, 724 + (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) | 725 + FW_WM(wm->sr.fbc, FBC_SR) | 726 + FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | 727 + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | 728 + FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | 729 + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); 730 + intel_uncore_write(&dev_priv->uncore, DSPFW3, 731 + (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) | 732 + FW_WM(wm->sr.cursor, CURSOR_SR) | 733 + FW_WM(wm->hpll.cursor, HPLL_CURSOR) | 734 + FW_WM(wm->hpll.plane, HPLL_SR)); 735 + 736 + intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); 737 + } 738 + 739 + #define FW_WM_VLV(value, plane) \ 740 + (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) 741 + 742 + static void vlv_write_wm_values(struct drm_i915_private *dev_priv, 743 + const struct vlv_wm_values *wm) 744 + { 745 + enum pipe pipe; 746 + 747 + for_each_pipe(dev_priv, pipe) { 748 + trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); 749 + 750 + intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), 751 + (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | 752 + (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | 753 + (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | 754 + (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); 755 + } 756 + 757 + /* 758 + * Zero the (unused) WM1 watermarks, and also clear all the 759 + * high order bits so that there are no out of bounds values 760 + * present in the registers during the reprogramming. 761 + */ 762 + intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0); 763 + intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0); 764 + intel_uncore_write(&dev_priv->uncore, DSPFW4, 0); 765 + intel_uncore_write(&dev_priv->uncore, DSPFW5, 0); 766 + intel_uncore_write(&dev_priv->uncore, DSPFW6, 0); 767 + 768 + intel_uncore_write(&dev_priv->uncore, DSPFW1, 769 + FW_WM(wm->sr.plane, SR) | 770 + FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | 771 + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | 772 + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); 773 + intel_uncore_write(&dev_priv->uncore, DSPFW2, 774 + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | 775 + FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | 776 + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); 777 + intel_uncore_write(&dev_priv->uncore, DSPFW3, 778 + FW_WM(wm->sr.cursor, CURSOR_SR)); 779 + 780 + if (IS_CHERRYVIEW(dev_priv)) { 781 + intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV, 782 + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | 783 + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); 784 + intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV, 785 + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | 786 + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); 787 + intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV, 788 + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | 789 + FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); 790 + intel_uncore_write(&dev_priv->uncore, DSPHOWM, 791 + FW_WM(wm->sr.plane >> 9, SR_HI) | 792 + FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | 793 + FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | 794 + FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) | 795 + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | 796 + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | 797 + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | 798 + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | 799 + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | 800 + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); 801 + } else { 802 + intel_uncore_write(&dev_priv->uncore, DSPFW7, 803 + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | 804 + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); 805 + intel_uncore_write(&dev_priv->uncore, DSPHOWM, 806 + FW_WM(wm->sr.plane >> 9, SR_HI) | 807 + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | 808 + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | 809 + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | 810 + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | 811 + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | 812 + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); 813 + } 814 + 815 + intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); 816 + } 817 + 818 + #undef FW_WM_VLV 819 + 820 + static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv) 821 + { 822 + /* all latencies in usec */ 823 + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; 824 + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12; 825 + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; 826 + 827 + dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1; 828 + } 829 + 830 + static int g4x_plane_fifo_size(enum plane_id plane_id, int level) 831 + { 832 + /* 833 + * DSPCNTR[13] supposedly controls whether the 834 + * primary plane can use the FIFO space otherwise 835 + * reserved for the sprite plane. It's not 100% clear 836 + * what the actual FIFO size is, but it looks like we 837 + * can happily set both primary and sprite watermarks 838 + * up to 127 cachelines. So that would seem to mean 839 + * that either DSPCNTR[13] doesn't do anything, or that 840 + * the total FIFO is >= 256 cachelines in size. Either 841 + * way, we don't seem to have to worry about this 842 + * repartitioning as the maximum watermark value the 843 + * register can hold for each plane is lower than the 844 + * minimum FIFO size. 845 + */ 846 + switch (plane_id) { 847 + case PLANE_CURSOR: 848 + return 63; 849 + case PLANE_PRIMARY: 850 + return level == G4X_WM_LEVEL_NORMAL ? 127 : 511; 851 + case PLANE_SPRITE0: 852 + return level == G4X_WM_LEVEL_NORMAL ? 127 : 0; 853 + default: 854 + MISSING_CASE(plane_id); 855 + return 0; 856 + } 857 + } 858 + 859 + static int g4x_fbc_fifo_size(int level) 860 + { 861 + switch (level) { 862 + case G4X_WM_LEVEL_SR: 863 + return 7; 864 + case G4X_WM_LEVEL_HPLL: 865 + return 15; 866 + default: 867 + MISSING_CASE(level); 868 + return 0; 869 + } 870 + } 871 + 872 + static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, 873 + const struct intel_plane_state *plane_state, 874 + int level) 875 + { 876 + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 877 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 878 + const struct drm_display_mode *pipe_mode = 879 + &crtc_state->hw.pipe_mode; 880 + unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10; 881 + unsigned int pixel_rate, htotal, cpp, width, wm; 882 + 883 + if (latency == 0) 884 + return USHRT_MAX; 885 + 886 + if (!intel_wm_plane_visible(crtc_state, plane_state)) 887 + return 0; 888 + 889 + cpp = plane_state->hw.fb->format->cpp[0]; 890 + 891 + /* 892 + * WaUse32BppForSRWM:ctg,elk 893 + * 894 + * The spec fails to list this restriction for the 895 + * HPLL watermark, which seems a little strange. 896 + * Let's use 32bpp for the HPLL watermark as well. 897 + */ 898 + if (plane->id == PLANE_PRIMARY && 899 + level != G4X_WM_LEVEL_NORMAL) 900 + cpp = max(cpp, 4u); 901 + 902 + pixel_rate = crtc_state->pixel_rate; 903 + htotal = pipe_mode->crtc_htotal; 904 + width = drm_rect_width(&plane_state->uapi.src) >> 16; 905 + 906 + if (plane->id == PLANE_CURSOR) { 907 + wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); 908 + } else if (plane->id == PLANE_PRIMARY && 909 + level == G4X_WM_LEVEL_NORMAL) { 910 + wm = intel_wm_method1(pixel_rate, cpp, latency); 911 + } else { 912 + unsigned int small, large; 913 + 914 + small = intel_wm_method1(pixel_rate, cpp, latency); 915 + large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); 916 + 917 + wm = min(small, large); 918 + } 919 + 920 + wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level), 921 + width, cpp); 922 + 923 + wm = DIV_ROUND_UP(wm, 64) + 2; 924 + 925 + return min_t(unsigned int, wm, USHRT_MAX); 926 + } 927 + 928 + static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, 929 + int level, enum plane_id plane_id, u16 value) 930 + { 931 + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 932 + bool dirty = false; 933 + 934 + for (; level < dev_priv->display.wm.num_levels; level++) { 935 + struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; 936 + 937 + dirty |= raw->plane[plane_id] != value; 938 + raw->plane[plane_id] = value; 939 + } 940 + 941 + return dirty; 942 + } 943 + 944 + static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, 945 + int level, u16 value) 946 + { 947 + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 948 + bool dirty = false; 949 + 950 + /* NORMAL level doesn't have an FBC watermark */ 951 + level = max(level, G4X_WM_LEVEL_SR); 952 + 953 + for (; level < dev_priv->display.wm.num_levels; level++) { 954 + struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; 955 + 956 + dirty |= raw->fbc != value; 957 + raw->fbc = value; 958 + } 959 + 960 + return dirty; 961 + } 962 + 963 + static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, 964 + const struct intel_plane_state *plane_state, 965 + u32 pri_val); 966 + 967 + static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, 968 + const struct intel_plane_state *plane_state) 969 + { 970 + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 971 + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 972 + enum plane_id plane_id = plane->id; 973 + bool dirty = false; 974 + int level; 975 + 976 + if (!intel_wm_plane_visible(crtc_state, plane_state)) { 977 + dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0); 978 + if (plane_id == PLANE_PRIMARY) 979 + dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0); 980 + goto out; 981 + } 982 + 983 + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { 984 + struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; 985 + int wm, max_wm; 986 + 987 + wm = g4x_compute_wm(crtc_state, plane_state, level); 988 + max_wm = g4x_plane_fifo_size(plane_id, level); 989 + 990 + if (wm > max_wm) 991 + break; 992 + 993 + dirty |= raw->plane[plane_id] != wm; 994 + raw->plane[plane_id] = wm; 995 + 996 + if (plane_id != PLANE_PRIMARY || 997 + level == G4X_WM_LEVEL_NORMAL) 998 + continue; 999 + 1000 + wm = ilk_compute_fbc_wm(crtc_state, plane_state, 1001 + raw->plane[plane_id]); 1002 + max_wm = g4x_fbc_fifo_size(level); 1003 + 1004 + /* 1005 + * FBC wm is not mandatory as we 1006 + * can always just disable its use. 1007 + */ 1008 + if (wm > max_wm) 1009 + wm = USHRT_MAX; 1010 + 1011 + dirty |= raw->fbc != wm; 1012 + raw->fbc = wm; 1013 + } 1014 + 1015 + /* mark watermarks as invalid */ 1016 + dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); 1017 + 1018 + if (plane_id == PLANE_PRIMARY) 1019 + dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); 1020 + 1021 + out: 1022 + if (dirty) { 1023 + drm_dbg_kms(&dev_priv->drm, 1024 + "%s watermarks: normal=%d, SR=%d, HPLL=%d\n", 1025 + plane->base.name, 1026 + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], 1027 + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id], 1028 + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); 1029 + 1030 + if (plane_id == PLANE_PRIMARY) 1031 + drm_dbg_kms(&dev_priv->drm, 1032 + "FBC watermarks: SR=%d, HPLL=%d\n", 1033 + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, 1034 + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); 1035 + } 1036 + 1037 + return dirty; 1038 + } 1039 + 1040 + static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, 1041 + enum plane_id plane_id, int level) 1042 + { 1043 + const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; 1044 + 1045 + return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level); 1046 + } 1047 + 1048 + static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, 1049 + int level) 1050 + { 1051 + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1052 + 1053 + if (level >= dev_priv->display.wm.num_levels) 1054 + return false; 1055 + 1056 + return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && 1057 + g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && 1058 + g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); 1059 + } 1060 + 1061 + /* mark all levels starting from 'level' as invalid */ 1062 + static void g4x_invalidate_wms(struct intel_crtc *crtc, 1063 + struct g4x_wm_state *wm_state, int level) 1064 + { 1065 + if (level <= G4X_WM_LEVEL_NORMAL) { 1066 + enum plane_id plane_id; 1067 + 1068 + for_each_plane_id_on_crtc(crtc, plane_id) 1069 + wm_state->wm.plane[plane_id] = USHRT_MAX; 1070 + } 1071 + 1072 + if (level <= G4X_WM_LEVEL_SR) { 1073 + wm_state->cxsr = false; 1074 + wm_state->sr.cursor = USHRT_MAX; 1075 + wm_state->sr.plane = USHRT_MAX; 1076 + wm_state->sr.fbc = USHRT_MAX; 1077 + } 1078 + 1079 + if (level <= G4X_WM_LEVEL_HPLL) { 1080 + wm_state->hpll_en = false; 1081 + wm_state->hpll.cursor = USHRT_MAX; 1082 + wm_state->hpll.plane = USHRT_MAX; 1083 + wm_state->hpll.fbc = USHRT_MAX; 1084 + } 1085 + } 1086 + 1087 + static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state, 1088 + int level) 1089 + { 1090 + if (level < G4X_WM_LEVEL_SR) 1091 + return false; 1092 + 1093 + if (level >= G4X_WM_LEVEL_SR && 1094 + wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR)) 1095 + return false; 1096 + 1097 + if (level >= G4X_WM_LEVEL_HPLL && 1098 + wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL)) 1099 + return false; 1100 + 1101 + return true; 1102 + } 1103 + 1104 + static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) 1105 + { 1106 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1107 + struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; 1108 + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); 1109 + const struct g4x_pipe_wm *raw; 1110 + enum plane_id plane_id; 1111 + int level; 1112 + 1113 + level = G4X_WM_LEVEL_NORMAL; 1114 + if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) 1115 + goto out; 1116 + 1117 + raw = &crtc_state->wm.g4x.raw[level]; 1118 + for_each_plane_id_on_crtc(crtc, plane_id) 1119 + wm_state->wm.plane[plane_id] = raw->plane[plane_id]; 1120 + 1121 + level = G4X_WM_LEVEL_SR; 1122 + if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) 1123 + goto out; 1124 + 1125 + raw = &crtc_state->wm.g4x.raw[level]; 1126 + wm_state->sr.plane = raw->plane[PLANE_PRIMARY]; 1127 + wm_state->sr.cursor = raw->plane[PLANE_CURSOR]; 1128 + wm_state->sr.fbc = raw->fbc; 1129 + 1130 + wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY); 1131 + 1132 + level = G4X_WM_LEVEL_HPLL; 1133 + if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) 1134 + goto out; 1135 + 1136 + raw = &crtc_state->wm.g4x.raw[level]; 1137 + wm_state->hpll.plane = raw->plane[PLANE_PRIMARY]; 1138 + wm_state->hpll.cursor = raw->plane[PLANE_CURSOR]; 1139 + wm_state->hpll.fbc = raw->fbc; 1140 + 1141 + wm_state->hpll_en = wm_state->cxsr; 1142 + 1143 + level++; 1144 + 1145 + out: 1146 + if (level == G4X_WM_LEVEL_NORMAL) 1147 + return -EINVAL; 1148 + 1149 + /* invalidate the higher levels */ 1150 + g4x_invalidate_wms(crtc, wm_state, level); 1151 + 1152 + /* 1153 + * Determine if the FBC watermark(s) can be used. IF 1154 + * this isn't the case we prefer to disable the FBC 1155 + * watermark(s) rather than disable the SR/HPLL 1156 + * level(s) entirely. 'level-1' is the highest valid 1157 + * level here. 1158 + */ 1159 + wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1); 1160 + 1161 + return 0; 1162 + } 1163 + 1164 + static int g4x_compute_pipe_wm(struct intel_atomic_state *state, 1165 + struct intel_crtc *crtc) 1166 + { 1167 + struct intel_crtc_state *crtc_state = 1168 + intel_atomic_get_new_crtc_state(state, crtc); 1169 + const struct intel_plane_state *old_plane_state; 1170 + const struct intel_plane_state *new_plane_state; 1171 + struct intel_plane *plane; 1172 + unsigned int dirty = 0; 1173 + int i; 1174 + 1175 + for_each_oldnew_intel_plane_in_state(state, plane, 1176 + old_plane_state, 1177 + new_plane_state, i) { 1178 + if (new_plane_state->hw.crtc != &crtc->base && 1179 + old_plane_state->hw.crtc != &crtc->base) 1180 + continue; 1181 + 1182 + if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state)) 1183 + dirty |= BIT(plane->id); 1184 + } 1185 + 1186 + if (!dirty) 1187 + return 0; 1188 + 1189 + return _g4x_compute_pipe_wm(crtc_state); 1190 + } 1191 + 1192 + static int g4x_compute_intermediate_wm(struct intel_atomic_state *state, 1193 + struct intel_crtc *crtc) 1194 + { 1195 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1196 + struct intel_crtc_state *new_crtc_state = 1197 + intel_atomic_get_new_crtc_state(state, crtc); 1198 + const struct intel_crtc_state *old_crtc_state = 1199 + intel_atomic_get_old_crtc_state(state, crtc); 1200 + struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate; 1201 + const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal; 1202 + const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal; 1203 + enum plane_id plane_id; 1204 + 1205 + if (!new_crtc_state->hw.active || 1206 + intel_crtc_needs_modeset(new_crtc_state)) { 1207 + *intermediate = *optimal; 1208 + 1209 + intermediate->cxsr = false; 1210 + intermediate->hpll_en = false; 1211 + goto out; 1212 + } 1213 + 1214 + intermediate->cxsr = optimal->cxsr && active->cxsr && 1215 + !new_crtc_state->disable_cxsr; 1216 + intermediate->hpll_en = optimal->hpll_en && active->hpll_en && 1217 + !new_crtc_state->disable_cxsr; 1218 + intermediate->fbc_en = optimal->fbc_en && active->fbc_en; 1219 + 1220 + for_each_plane_id_on_crtc(crtc, plane_id) { 1221 + intermediate->wm.plane[plane_id] = 1222 + max(optimal->wm.plane[plane_id], 1223 + active->wm.plane[plane_id]); 1224 + 1225 + drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] > 1226 + g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL)); 1227 + } 1228 + 1229 + intermediate->sr.plane = max(optimal->sr.plane, 1230 + active->sr.plane); 1231 + intermediate->sr.cursor = max(optimal->sr.cursor, 1232 + active->sr.cursor); 1233 + intermediate->sr.fbc = max(optimal->sr.fbc, 1234 + active->sr.fbc); 1235 + 1236 + intermediate->hpll.plane = max(optimal->hpll.plane, 1237 + active->hpll.plane); 1238 + intermediate->hpll.cursor = max(optimal->hpll.cursor, 1239 + active->hpll.cursor); 1240 + intermediate->hpll.fbc = max(optimal->hpll.fbc, 1241 + active->hpll.fbc); 1242 + 1243 + drm_WARN_ON(&dev_priv->drm, 1244 + (intermediate->sr.plane > 1245 + g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || 1246 + intermediate->sr.cursor > 1247 + g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && 1248 + intermediate->cxsr); 1249 + drm_WARN_ON(&dev_priv->drm, 1250 + (intermediate->sr.plane > 1251 + g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || 1252 + intermediate->sr.cursor > 1253 + g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && 1254 + intermediate->hpll_en); 1255 + 1256 + drm_WARN_ON(&dev_priv->drm, 1257 + intermediate->sr.fbc > g4x_fbc_fifo_size(1) && 1258 + intermediate->fbc_en && intermediate->cxsr); 1259 + drm_WARN_ON(&dev_priv->drm, 1260 + intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && 1261 + intermediate->fbc_en && intermediate->hpll_en); 1262 + 1263 + out: 1264 + /* 1265 + * If our intermediate WM are identical to the final WM, then we can 1266 + * omit the post-vblank programming; only update if it's different. 1267 + */ 1268 + if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) 1269 + new_crtc_state->wm.need_postvbl_update = true; 1270 + 1271 + return 0; 1272 + } 1273 + 1274 + static void g4x_merge_wm(struct drm_i915_private *dev_priv, 1275 + struct g4x_wm_values *wm) 1276 + { 1277 + struct intel_crtc *crtc; 1278 + int num_active_pipes = 0; 1279 + 1280 + wm->cxsr = true; 1281 + wm->hpll_en = true; 1282 + wm->fbc_en = true; 1283 + 1284 + for_each_intel_crtc(&dev_priv->drm, crtc) { 1285 + const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; 1286 + 1287 + if (!crtc->active) 1288 + continue; 1289 + 1290 + if (!wm_state->cxsr) 1291 + wm->cxsr = false; 1292 + if (!wm_state->hpll_en) 1293 + wm->hpll_en = false; 1294 + if (!wm_state->fbc_en) 1295 + wm->fbc_en = false; 1296 + 1297 + num_active_pipes++; 1298 + } 1299 + 1300 + if (num_active_pipes != 1) { 1301 + wm->cxsr = false; 1302 + wm->hpll_en = false; 1303 + wm->fbc_en = false; 1304 + } 1305 + 1306 + for_each_intel_crtc(&dev_priv->drm, crtc) { 1307 + const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; 1308 + enum pipe pipe = crtc->pipe; 1309 + 1310 + wm->pipe[pipe] = wm_state->wm; 1311 + if (crtc->active && wm->cxsr) 1312 + wm->sr = wm_state->sr; 1313 + if (crtc->active && wm->hpll_en) 1314 + wm->hpll = wm_state->hpll; 1315 + } 1316 + } 1317 + 1318 + static void g4x_program_watermarks(struct drm_i915_private *dev_priv) 1319 + { 1320 + struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x; 1321 + struct g4x_wm_values new_wm = {}; 1322 + 1323 + g4x_merge_wm(dev_priv, &new_wm); 1324 + 1325 + if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) 1326 + return; 1327 + 1328 + if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) 1329 + _intel_set_memory_cxsr(dev_priv, false); 1330 + 1331 + g4x_write_wm_values(dev_priv, &new_wm); 1332 + 1333 + if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) 1334 + _intel_set_memory_cxsr(dev_priv, true); 1335 + 1336 + *old_wm = new_wm; 1337 + } 1338 + 1339 + static void g4x_initial_watermarks(struct intel_atomic_state *state, 1340 + struct intel_crtc *crtc) 1341 + { 1342 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1343 + const struct intel_crtc_state *crtc_state = 1344 + intel_atomic_get_new_crtc_state(state, crtc); 1345 + 1346 + mutex_lock(&dev_priv->display.wm.wm_mutex); 1347 + crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; 1348 + g4x_program_watermarks(dev_priv); 1349 + mutex_unlock(&dev_priv->display.wm.wm_mutex); 1350 + } 1351 + 1352 + static void g4x_optimize_watermarks(struct intel_atomic_state *state, 1353 + struct intel_crtc *crtc) 1354 + { 1355 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1356 + const struct intel_crtc_state *crtc_state = 1357 + intel_atomic_get_new_crtc_state(state, crtc); 1358 + 1359 + if (!crtc_state->wm.need_postvbl_update) 1360 + return; 1361 + 1362 + mutex_lock(&dev_priv->display.wm.wm_mutex); 1363 + crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; 1364 + g4x_program_watermarks(dev_priv); 1365 + mutex_unlock(&dev_priv->display.wm.wm_mutex); 1366 + } 1367 + 1368 + /* latency must be in 0.1us units. */ 1369 + static unsigned int vlv_wm_method2(unsigned int pixel_rate, 1370 + unsigned int htotal, 1371 + unsigned int width, 1372 + unsigned int cpp, 1373 + unsigned int latency) 1374 + { 1375 + unsigned int ret; 1376 + 1377 + ret = intel_wm_method2(pixel_rate, htotal, 1378 + width, cpp, latency); 1379 + ret = DIV_ROUND_UP(ret, 64); 1380 + 1381 + return ret; 1382 + } 1383 + 1384 + static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) 1385 + { 1386 + /* all latencies in usec */ 1387 + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; 1388 + 1389 + dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1; 1390 + 1391 + if (IS_CHERRYVIEW(dev_priv)) { 1392 + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; 1393 + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; 1394 + 1395 + dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1; 1396 + } 1397 + } 1398 + 1399 + static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, 1400 + const struct intel_plane_state *plane_state, 1401 + int level) 1402 + { 1403 + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1404 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1405 + const struct drm_display_mode *pipe_mode = 1406 + &crtc_state->hw.pipe_mode; 1407 + unsigned int pixel_rate, htotal, cpp, width, wm; 1408 + 1409 + if (dev_priv->display.wm.pri_latency[level] == 0) 1410 + return USHRT_MAX; 1411 + 1412 + if (!intel_wm_plane_visible(crtc_state, plane_state)) 1413 + return 0; 1414 + 1415 + cpp = plane_state->hw.fb->format->cpp[0]; 1416 + pixel_rate = crtc_state->pixel_rate; 1417 + htotal = pipe_mode->crtc_htotal; 1418 + width = drm_rect_width(&plane_state->uapi.src) >> 16; 1419 + 1420 + if (plane->id == PLANE_CURSOR) { 1421 + /* 1422 + * FIXME the formula gives values that are 1423 + * too big for the cursor FIFO, and hence we 1424 + * would never be able to use cursors. For 1425 + * now just hardcode the watermark. 1426 + */ 1427 + wm = 63; 1428 + } else { 1429 + wm = vlv_wm_method2(pixel_rate, htotal, width, cpp, 1430 + dev_priv->display.wm.pri_latency[level] * 10); 1431 + } 1432 + 1433 + return min_t(unsigned int, wm, USHRT_MAX); 1434 + } 1435 + 1436 + static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes) 1437 + { 1438 + return (active_planes & (BIT(PLANE_SPRITE0) | 1439 + BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1); 1440 + } 1441 + 1442 + static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) 1443 + { 1444 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1445 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1446 + const struct g4x_pipe_wm *raw = 1447 + &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; 1448 + struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; 1449 + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); 1450 + int num_active_planes = hweight8(active_planes); 1451 + const int fifo_size = 511; 1452 + int fifo_extra, fifo_left = fifo_size; 1453 + int sprite0_fifo_extra = 0; 1454 + unsigned int total_rate; 1455 + enum plane_id plane_id; 1456 + 1457 + /* 1458 + * When enabling sprite0 after sprite1 has already been enabled 1459 + * we tend to get an underrun unless sprite0 already has some 1460 + * FIFO space allcoated. Hence we always allocate at least one 1461 + * cacheline for sprite0 whenever sprite1 is enabled. 1462 + * 1463 + * All other plane enable sequences appear immune to this problem. 1464 + */ 1465 + if (vlv_need_sprite0_fifo_workaround(active_planes)) 1466 + sprite0_fifo_extra = 1; 1467 + 1468 + total_rate = raw->plane[PLANE_PRIMARY] + 1469 + raw->plane[PLANE_SPRITE0] + 1470 + raw->plane[PLANE_SPRITE1] + 1471 + sprite0_fifo_extra; 1472 + 1473 + if (total_rate > fifo_size) 1474 + return -EINVAL; 1475 + 1476 + if (total_rate == 0) 1477 + total_rate = 1; 1478 + 1479 + for_each_plane_id_on_crtc(crtc, plane_id) { 1480 + unsigned int rate; 1481 + 1482 + if ((active_planes & BIT(plane_id)) == 0) { 1483 + fifo_state->plane[plane_id] = 0; 1484 + continue; 1485 + } 1486 + 1487 + rate = raw->plane[plane_id]; 1488 + fifo_state->plane[plane_id] = fifo_size * rate / total_rate; 1489 + fifo_left -= fifo_state->plane[plane_id]; 1490 + } 1491 + 1492 + fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra; 1493 + fifo_left -= sprite0_fifo_extra; 1494 + 1495 + fifo_state->plane[PLANE_CURSOR] = 63; 1496 + 1497 + fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1); 1498 + 1499 + /* spread the remainder evenly */ 1500 + for_each_plane_id_on_crtc(crtc, plane_id) { 1501 + int plane_extra; 1502 + 1503 + if (fifo_left == 0) 1504 + break; 1505 + 1506 + if ((active_planes & BIT(plane_id)) == 0) 1507 + continue; 1508 + 1509 + plane_extra = min(fifo_extra, fifo_left); 1510 + fifo_state->plane[plane_id] += plane_extra; 1511 + fifo_left -= plane_extra; 1512 + } 1513 + 1514 + drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0); 1515 + 1516 + /* give it all to the first plane if none are active */ 1517 + if (active_planes == 0) { 1518 + drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size); 1519 + fifo_state->plane[PLANE_PRIMARY] = fifo_left; 1520 + } 1521 + 1522 + return 0; 1523 + } 1524 + 1525 + /* mark all levels starting from 'level' as invalid */ 1526 + static void vlv_invalidate_wms(struct intel_crtc *crtc, 1527 + struct vlv_wm_state *wm_state, int level) 1528 + { 1529 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1530 + 1531 + for (; level < dev_priv->display.wm.num_levels; level++) { 1532 + enum plane_id plane_id; 1533 + 1534 + for_each_plane_id_on_crtc(crtc, plane_id) 1535 + wm_state->wm[level].plane[plane_id] = USHRT_MAX; 1536 + 1537 + wm_state->sr[level].cursor = USHRT_MAX; 1538 + wm_state->sr[level].plane = USHRT_MAX; 1539 + } 1540 + } 1541 + 1542 + static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) 1543 + { 1544 + if (wm > fifo_size) 1545 + return USHRT_MAX; 1546 + else 1547 + return fifo_size - wm; 1548 + } 1549 + 1550 + /* 1551 + * Starting from 'level' set all higher 1552 + * levels to 'value' in the "raw" watermarks. 1553 + */ 1554 + static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, 1555 + int level, enum plane_id plane_id, u16 value) 1556 + { 1557 + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1558 + bool dirty = false; 1559 + 1560 + for (; level < dev_priv->display.wm.num_levels; level++) { 1561 + struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; 1562 + 1563 + dirty |= raw->plane[plane_id] != value; 1564 + raw->plane[plane_id] = value; 1565 + } 1566 + 1567 + return dirty; 1568 + } 1569 + 1570 + static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, 1571 + const struct intel_plane_state *plane_state) 1572 + { 1573 + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1574 + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1575 + enum plane_id plane_id = plane->id; 1576 + int level; 1577 + bool dirty = false; 1578 + 1579 + if (!intel_wm_plane_visible(crtc_state, plane_state)) { 1580 + dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0); 1581 + goto out; 1582 + } 1583 + 1584 + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { 1585 + struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; 1586 + int wm = vlv_compute_wm_level(crtc_state, plane_state, level); 1587 + int max_wm = plane_id == PLANE_CURSOR ? 63 : 511; 1588 + 1589 + if (wm > max_wm) 1590 + break; 1591 + 1592 + dirty |= raw->plane[plane_id] != wm; 1593 + raw->plane[plane_id] = wm; 1594 + } 1595 + 1596 + /* mark all higher levels as invalid */ 1597 + dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); 1598 + 1599 + out: 1600 + if (dirty) 1601 + drm_dbg_kms(&dev_priv->drm, 1602 + "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", 1603 + plane->base.name, 1604 + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], 1605 + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id], 1606 + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]); 1607 + 1608 + return dirty; 1609 + } 1610 + 1611 + static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, 1612 + enum plane_id plane_id, int level) 1613 + { 1614 + const struct g4x_pipe_wm *raw = 1615 + &crtc_state->wm.vlv.raw[level]; 1616 + const struct vlv_fifo_state *fifo_state = 1617 + &crtc_state->wm.vlv.fifo_state; 1618 + 1619 + return raw->plane[plane_id] <= fifo_state->plane[plane_id]; 1620 + } 1621 + 1622 + static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level) 1623 + { 1624 + return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && 1625 + vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && 1626 + vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) && 1627 + vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); 1628 + } 1629 + 1630 + static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) 1631 + { 1632 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1633 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1634 + struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; 1635 + const struct vlv_fifo_state *fifo_state = 1636 + &crtc_state->wm.vlv.fifo_state; 1637 + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); 1638 + int num_active_planes = hweight8(active_planes); 1639 + enum plane_id plane_id; 1640 + int level; 1641 + 1642 + /* initially allow all levels */ 1643 + wm_state->num_levels = dev_priv->display.wm.num_levels; 1644 + /* 1645 + * Note that enabling cxsr with no primary/sprite planes 1646 + * enabled can wedge the pipe. Hence we only allow cxsr 1647 + * with exactly one enabled primary/sprite plane. 1648 + */ 1649 + wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1; 1650 + 1651 + for (level = 0; level < wm_state->num_levels; level++) { 1652 + const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; 1653 + const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1; 1654 + 1655 + if (!vlv_raw_crtc_wm_is_valid(crtc_state, level)) 1656 + break; 1657 + 1658 + for_each_plane_id_on_crtc(crtc, plane_id) { 1659 + wm_state->wm[level].plane[plane_id] = 1660 + vlv_invert_wm_value(raw->plane[plane_id], 1661 + fifo_state->plane[plane_id]); 1662 + } 1663 + 1664 + wm_state->sr[level].plane = 1665 + vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY], 1666 + raw->plane[PLANE_SPRITE0], 1667 + raw->plane[PLANE_SPRITE1]), 1668 + sr_fifo_size); 1669 + 1670 + wm_state->sr[level].cursor = 1671 + vlv_invert_wm_value(raw->plane[PLANE_CURSOR], 1672 + 63); 1673 + } 1674 + 1675 + if (level == 0) 1676 + return -EINVAL; 1677 + 1678 + /* limit to only levels we can actually handle */ 1679 + wm_state->num_levels = level; 1680 + 1681 + /* invalidate the higher levels */ 1682 + vlv_invalidate_wms(crtc, wm_state, level); 1683 + 1684 + return 0; 1685 + } 1686 + 1687 + static int vlv_compute_pipe_wm(struct intel_atomic_state *state, 1688 + struct intel_crtc *crtc) 1689 + { 1690 + struct intel_crtc_state *crtc_state = 1691 + intel_atomic_get_new_crtc_state(state, crtc); 1692 + const struct intel_plane_state *old_plane_state; 1693 + const struct intel_plane_state *new_plane_state; 1694 + struct intel_plane *plane; 1695 + unsigned int dirty = 0; 1696 + int i; 1697 + 1698 + for_each_oldnew_intel_plane_in_state(state, plane, 1699 + old_plane_state, 1700 + new_plane_state, i) { 1701 + if (new_plane_state->hw.crtc != &crtc->base && 1702 + old_plane_state->hw.crtc != &crtc->base) 1703 + continue; 1704 + 1705 + if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state)) 1706 + dirty |= BIT(plane->id); 1707 + } 1708 + 1709 + /* 1710 + * DSPARB registers may have been reset due to the 1711 + * power well being turned off. Make sure we restore 1712 + * them to a consistent state even if no primary/sprite 1713 + * planes are initially active. We also force a FIFO 1714 + * recomputation so that we are sure to sanitize the 1715 + * FIFO setting we took over from the BIOS even if there 1716 + * are no active planes on the crtc. 1717 + */ 1718 + if (intel_crtc_needs_modeset(crtc_state)) 1719 + dirty = ~0; 1720 + 1721 + if (!dirty) 1722 + return 0; 1723 + 1724 + /* cursor changes don't warrant a FIFO recompute */ 1725 + if (dirty & ~BIT(PLANE_CURSOR)) { 1726 + const struct intel_crtc_state *old_crtc_state = 1727 + intel_atomic_get_old_crtc_state(state, crtc); 1728 + const struct vlv_fifo_state *old_fifo_state = 1729 + &old_crtc_state->wm.vlv.fifo_state; 1730 + const struct vlv_fifo_state *new_fifo_state = 1731 + &crtc_state->wm.vlv.fifo_state; 1732 + int ret; 1733 + 1734 + ret = vlv_compute_fifo(crtc_state); 1735 + if (ret) 1736 + return ret; 1737 + 1738 + if (intel_crtc_needs_modeset(crtc_state) || 1739 + memcmp(old_fifo_state, new_fifo_state, 1740 + sizeof(*new_fifo_state)) != 0) 1741 + crtc_state->fifo_changed = true; 1742 + } 1743 + 1744 + return _vlv_compute_pipe_wm(crtc_state); 1745 + } 1746 + 1747 + #define VLV_FIFO(plane, value) \ 1748 + (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) 1749 + 1750 + static void vlv_atomic_update_fifo(struct intel_atomic_state *state, 1751 + struct intel_crtc *crtc) 1752 + { 1753 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1754 + struct intel_uncore *uncore = &dev_priv->uncore; 1755 + const struct intel_crtc_state *crtc_state = 1756 + intel_atomic_get_new_crtc_state(state, crtc); 1757 + const struct vlv_fifo_state *fifo_state = 1758 + &crtc_state->wm.vlv.fifo_state; 1759 + int sprite0_start, sprite1_start, fifo_size; 1760 + u32 dsparb, dsparb2, dsparb3; 1761 + 1762 + if (!crtc_state->fifo_changed) 1763 + return; 1764 + 1765 + sprite0_start = fifo_state->plane[PLANE_PRIMARY]; 1766 + sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start; 1767 + fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start; 1768 + 1769 + drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63); 1770 + drm_WARN_ON(&dev_priv->drm, fifo_size != 511); 1771 + 1772 + trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size); 1773 + 1774 + /* 1775 + * uncore.lock serves a double purpose here. It allows us to 1776 + * use the less expensive I915_{READ,WRITE}_FW() functions, and 1777 + * it protects the DSPARB registers from getting clobbered by 1778 + * parallel updates from multiple pipes. 1779 + * 1780 + * intel_pipe_update_start() has already disabled interrupts 1781 + * for us, so a plain spin_lock() is sufficient here. 1782 + */ 1783 + spin_lock(&uncore->lock); 1784 + 1785 + switch (crtc->pipe) { 1786 + case PIPE_A: 1787 + dsparb = intel_uncore_read_fw(uncore, DSPARB); 1788 + dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); 1789 + 1790 + dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | 1791 + VLV_FIFO(SPRITEB, 0xff)); 1792 + dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | 1793 + VLV_FIFO(SPRITEB, sprite1_start)); 1794 + 1795 + dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | 1796 + VLV_FIFO(SPRITEB_HI, 0x1)); 1797 + dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | 1798 + VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); 1799 + 1800 + intel_uncore_write_fw(uncore, DSPARB, dsparb); 1801 + intel_uncore_write_fw(uncore, DSPARB2, dsparb2); 1802 + break; 1803 + case PIPE_B: 1804 + dsparb = intel_uncore_read_fw(uncore, DSPARB); 1805 + dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); 1806 + 1807 + dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | 1808 + VLV_FIFO(SPRITED, 0xff)); 1809 + dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | 1810 + VLV_FIFO(SPRITED, sprite1_start)); 1811 + 1812 + dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | 1813 + VLV_FIFO(SPRITED_HI, 0xff)); 1814 + dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | 1815 + VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); 1816 + 1817 + intel_uncore_write_fw(uncore, DSPARB, dsparb); 1818 + intel_uncore_write_fw(uncore, DSPARB2, dsparb2); 1819 + break; 1820 + case PIPE_C: 1821 + dsparb3 = intel_uncore_read_fw(uncore, DSPARB3); 1822 + dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); 1823 + 1824 + dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | 1825 + VLV_FIFO(SPRITEF, 0xff)); 1826 + dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | 1827 + VLV_FIFO(SPRITEF, sprite1_start)); 1828 + 1829 + dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | 1830 + VLV_FIFO(SPRITEF_HI, 0xff)); 1831 + dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | 1832 + VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); 1833 + 1834 + intel_uncore_write_fw(uncore, DSPARB3, dsparb3); 1835 + intel_uncore_write_fw(uncore, DSPARB2, dsparb2); 1836 + break; 1837 + default: 1838 + break; 1839 + } 1840 + 1841 + intel_uncore_posting_read_fw(uncore, DSPARB); 1842 + 1843 + spin_unlock(&uncore->lock); 1844 + } 1845 + 1846 + #undef VLV_FIFO 1847 + 1848 + static int vlv_compute_intermediate_wm(struct intel_atomic_state *state, 1849 + struct intel_crtc *crtc) 1850 + { 1851 + struct intel_crtc_state *new_crtc_state = 1852 + intel_atomic_get_new_crtc_state(state, crtc); 1853 + const struct intel_crtc_state *old_crtc_state = 1854 + intel_atomic_get_old_crtc_state(state, crtc); 1855 + struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate; 1856 + const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal; 1857 + const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal; 1858 + int level; 1859 + 1860 + if (!new_crtc_state->hw.active || 1861 + intel_crtc_needs_modeset(new_crtc_state)) { 1862 + *intermediate = *optimal; 1863 + 1864 + intermediate->cxsr = false; 1865 + goto out; 1866 + } 1867 + 1868 + intermediate->num_levels = min(optimal->num_levels, active->num_levels); 1869 + intermediate->cxsr = optimal->cxsr && active->cxsr && 1870 + !new_crtc_state->disable_cxsr; 1871 + 1872 + for (level = 0; level < intermediate->num_levels; level++) { 1873 + enum plane_id plane_id; 1874 + 1875 + for_each_plane_id_on_crtc(crtc, plane_id) { 1876 + intermediate->wm[level].plane[plane_id] = 1877 + min(optimal->wm[level].plane[plane_id], 1878 + active->wm[level].plane[plane_id]); 1879 + } 1880 + 1881 + intermediate->sr[level].plane = min(optimal->sr[level].plane, 1882 + active->sr[level].plane); 1883 + intermediate->sr[level].cursor = min(optimal->sr[level].cursor, 1884 + active->sr[level].cursor); 1885 + } 1886 + 1887 + vlv_invalidate_wms(crtc, intermediate, level); 1888 + 1889 + out: 1890 + /* 1891 + * If our intermediate WM are identical to the final WM, then we can 1892 + * omit the post-vblank programming; only update if it's different. 1893 + */ 1894 + if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) 1895 + new_crtc_state->wm.need_postvbl_update = true; 1896 + 1897 + return 0; 1898 + } 1899 + 1900 + static void vlv_merge_wm(struct drm_i915_private *dev_priv, 1901 + struct vlv_wm_values *wm) 1902 + { 1903 + struct intel_crtc *crtc; 1904 + int num_active_pipes = 0; 1905 + 1906 + wm->level = dev_priv->display.wm.num_levels - 1; 1907 + wm->cxsr = true; 1908 + 1909 + for_each_intel_crtc(&dev_priv->drm, crtc) { 1910 + const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; 1911 + 1912 + if (!crtc->active) 1913 + continue; 1914 + 1915 + if (!wm_state->cxsr) 1916 + wm->cxsr = false; 1917 + 1918 + num_active_pipes++; 1919 + wm->level = min_t(int, wm->level, wm_state->num_levels - 1); 1920 + } 1921 + 1922 + if (num_active_pipes != 1) 1923 + wm->cxsr = false; 1924 + 1925 + if (num_active_pipes > 1) 1926 + wm->level = VLV_WM_LEVEL_PM2; 1927 + 1928 + for_each_intel_crtc(&dev_priv->drm, crtc) { 1929 + const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; 1930 + enum pipe pipe = crtc->pipe; 1931 + 1932 + wm->pipe[pipe] = wm_state->wm[wm->level]; 1933 + if (crtc->active && wm->cxsr) 1934 + wm->sr = wm_state->sr[wm->level]; 1935 + 1936 + wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2; 1937 + wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2; 1938 + wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2; 1939 + wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2; 1940 + } 1941 + } 1942 + 1943 + static void vlv_program_watermarks(struct drm_i915_private *dev_priv) 1944 + { 1945 + struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv; 1946 + struct vlv_wm_values new_wm = {}; 1947 + 1948 + vlv_merge_wm(dev_priv, &new_wm); 1949 + 1950 + if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) 1951 + return; 1952 + 1953 + if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) 1954 + chv_set_memory_dvfs(dev_priv, false); 1955 + 1956 + if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) 1957 + chv_set_memory_pm5(dev_priv, false); 1958 + 1959 + if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) 1960 + _intel_set_memory_cxsr(dev_priv, false); 1961 + 1962 + vlv_write_wm_values(dev_priv, &new_wm); 1963 + 1964 + if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) 1965 + _intel_set_memory_cxsr(dev_priv, true); 1966 + 1967 + if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) 1968 + chv_set_memory_pm5(dev_priv, true); 1969 + 1970 + if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) 1971 + chv_set_memory_dvfs(dev_priv, true); 1972 + 1973 + *old_wm = new_wm; 1974 + } 1975 + 1976 + static void vlv_initial_watermarks(struct intel_atomic_state *state, 1977 + struct intel_crtc *crtc) 1978 + { 1979 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1980 + const struct intel_crtc_state *crtc_state = 1981 + intel_atomic_get_new_crtc_state(state, crtc); 1982 + 1983 + mutex_lock(&dev_priv->display.wm.wm_mutex); 1984 + crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; 1985 + vlv_program_watermarks(dev_priv); 1986 + mutex_unlock(&dev_priv->display.wm.wm_mutex); 1987 + } 1988 + 1989 + static void vlv_optimize_watermarks(struct intel_atomic_state *state, 1990 + struct intel_crtc *crtc) 1991 + { 1992 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1993 + const struct intel_crtc_state *crtc_state = 1994 + intel_atomic_get_new_crtc_state(state, crtc); 1995 + 1996 + if (!crtc_state->wm.need_postvbl_update) 1997 + return; 1998 + 1999 + mutex_lock(&dev_priv->display.wm.wm_mutex); 2000 + crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; 2001 + vlv_program_watermarks(dev_priv); 2002 + mutex_unlock(&dev_priv->display.wm.wm_mutex); 2003 + } 2004 + 2005 + static void i965_update_wm(struct drm_i915_private *dev_priv) 2006 + { 2007 + struct intel_crtc *crtc; 2008 + int srwm = 1; 2009 + int cursor_sr = 16; 2010 + bool cxsr_enabled; 2011 + 2012 + /* Calc sr entries for one plane configs */ 2013 + crtc = single_enabled_crtc(dev_priv); 2014 + if (crtc) { 2015 + /* self-refresh has much higher latency */ 2016 + static const int sr_latency_ns = 12000; 2017 + const struct drm_display_mode *pipe_mode = 2018 + &crtc->config->hw.pipe_mode; 2019 + const struct drm_framebuffer *fb = 2020 + crtc->base.primary->state->fb; 2021 + int pixel_rate = crtc->config->pixel_rate; 2022 + int htotal = pipe_mode->crtc_htotal; 2023 + int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; 2024 + int cpp = fb->format->cpp[0]; 2025 + int entries; 2026 + 2027 + entries = intel_wm_method2(pixel_rate, htotal, 2028 + width, cpp, sr_latency_ns / 100); 2029 + entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); 2030 + srwm = I965_FIFO_SIZE - entries; 2031 + if (srwm < 0) 2032 + srwm = 1; 2033 + srwm &= 0x1ff; 2034 + drm_dbg_kms(&dev_priv->drm, 2035 + "self-refresh entries: %d, wm: %d\n", 2036 + entries, srwm); 2037 + 2038 + entries = intel_wm_method2(pixel_rate, htotal, 2039 + crtc->base.cursor->state->crtc_w, 4, 2040 + sr_latency_ns / 100); 2041 + entries = DIV_ROUND_UP(entries, 2042 + i965_cursor_wm_info.cacheline_size) + 2043 + i965_cursor_wm_info.guard_size; 2044 + 2045 + cursor_sr = i965_cursor_wm_info.fifo_size - entries; 2046 + if (cursor_sr > i965_cursor_wm_info.max_wm) 2047 + cursor_sr = i965_cursor_wm_info.max_wm; 2048 + 2049 + drm_dbg_kms(&dev_priv->drm, 2050 + "self-refresh watermark: display plane %d " 2051 + "cursor %d\n", srwm, cursor_sr); 2052 + 2053 + cxsr_enabled = true; 2054 + } else { 2055 + cxsr_enabled = false; 2056 + /* Turn off self refresh if both pipes are enabled */ 2057 + intel_set_memory_cxsr(dev_priv, false); 2058 + } 2059 + 2060 + drm_dbg_kms(&dev_priv->drm, 2061 + "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 2062 + srwm); 2063 + 2064 + /* 965 has limitations... */ 2065 + intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) | 2066 + FW_WM(8, CURSORB) | 2067 + FW_WM(8, PLANEB) | 2068 + FW_WM(8, PLANEA)); 2069 + intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) | 2070 + FW_WM(8, PLANEC_OLD)); 2071 + /* update cursor SR watermark */ 2072 + intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); 2073 + 2074 + if (cxsr_enabled) 2075 + intel_set_memory_cxsr(dev_priv, true); 2076 + } 2077 + 2078 + #undef FW_WM 2079 + 2080 + static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, 2081 + enum i9xx_plane_id i9xx_plane) 2082 + { 2083 + struct intel_plane *plane; 2084 + 2085 + for_each_intel_plane(&i915->drm, plane) { 2086 + if (plane->id == PLANE_PRIMARY && 2087 + plane->i9xx_plane == i9xx_plane) 2088 + return intel_crtc_for_pipe(i915, plane->pipe); 2089 + } 2090 + 2091 + return NULL; 2092 + } 2093 + 2094 + static void i9xx_update_wm(struct drm_i915_private *dev_priv) 2095 + { 2096 + const struct intel_watermark_params *wm_info; 2097 + u32 fwater_lo; 2098 + u32 fwater_hi; 2099 + int cwm, srwm = 1; 2100 + int fifo_size; 2101 + int planea_wm, planeb_wm; 2102 + struct intel_crtc *crtc; 2103 + 2104 + if (IS_I945GM(dev_priv)) 2105 + wm_info = &i945_wm_info; 2106 + else if (DISPLAY_VER(dev_priv) != 2) 2107 + wm_info = &i915_wm_info; 2108 + else 2109 + wm_info = &i830_a_wm_info; 2110 + 2111 + if (DISPLAY_VER(dev_priv) == 2) 2112 + fifo_size = i830_get_fifo_size(dev_priv, PLANE_A); 2113 + else 2114 + fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A); 2115 + crtc = intel_crtc_for_plane(dev_priv, PLANE_A); 2116 + if (intel_crtc_active(crtc)) { 2117 + const struct drm_framebuffer *fb = 2118 + crtc->base.primary->state->fb; 2119 + int cpp; 2120 + 2121 + if (DISPLAY_VER(dev_priv) == 2) 2122 + cpp = 4; 2123 + else 2124 + cpp = fb->format->cpp[0]; 2125 + 2126 + planea_wm = intel_calculate_wm(crtc->config->pixel_rate, 2127 + wm_info, fifo_size, cpp, 2128 + pessimal_latency_ns); 2129 + } else { 2130 + planea_wm = fifo_size - wm_info->guard_size; 2131 + if (planea_wm > (long)wm_info->max_wm) 2132 + planea_wm = wm_info->max_wm; 2133 + } 2134 + 2135 + if (DISPLAY_VER(dev_priv) == 2) 2136 + wm_info = &i830_bc_wm_info; 2137 + 2138 + if (DISPLAY_VER(dev_priv) == 2) 2139 + fifo_size = i830_get_fifo_size(dev_priv, PLANE_B); 2140 + else 2141 + fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B); 2142 + crtc = intel_crtc_for_plane(dev_priv, PLANE_B); 2143 + if (intel_crtc_active(crtc)) { 2144 + const struct drm_framebuffer *fb = 2145 + crtc->base.primary->state->fb; 2146 + int cpp; 2147 + 2148 + if (DISPLAY_VER(dev_priv) == 2) 2149 + cpp = 4; 2150 + else 2151 + cpp = fb->format->cpp[0]; 2152 + 2153 + planeb_wm = intel_calculate_wm(crtc->config->pixel_rate, 2154 + wm_info, fifo_size, cpp, 2155 + pessimal_latency_ns); 2156 + } else { 2157 + planeb_wm = fifo_size - wm_info->guard_size; 2158 + if (planeb_wm > (long)wm_info->max_wm) 2159 + planeb_wm = wm_info->max_wm; 2160 + } 2161 + 2162 + drm_dbg_kms(&dev_priv->drm, 2163 + "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 2164 + 2165 + crtc = single_enabled_crtc(dev_priv); 2166 + if (IS_I915GM(dev_priv) && crtc) { 2167 + struct drm_i915_gem_object *obj; 2168 + 2169 + obj = intel_fb_obj(crtc->base.primary->state->fb); 2170 + 2171 + /* self-refresh seems busted with untiled */ 2172 + if (!i915_gem_object_is_tiled(obj)) 2173 + crtc = NULL; 2174 + } 2175 + 2176 + /* 2177 + * Overlay gets an aggressive default since video jitter is bad. 2178 + */ 2179 + cwm = 2; 2180 + 2181 + /* Play safe and disable self-refresh before adjusting watermarks. */ 2182 + intel_set_memory_cxsr(dev_priv, false); 2183 + 2184 + /* Calc sr entries for one plane configs */ 2185 + if (HAS_FW_BLC(dev_priv) && crtc) { 2186 + /* self-refresh has much higher latency */ 2187 + static const int sr_latency_ns = 6000; 2188 + const struct drm_display_mode *pipe_mode = 2189 + &crtc->config->hw.pipe_mode; 2190 + const struct drm_framebuffer *fb = 2191 + crtc->base.primary->state->fb; 2192 + int pixel_rate = crtc->config->pixel_rate; 2193 + int htotal = pipe_mode->crtc_htotal; 2194 + int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; 2195 + int cpp; 2196 + int entries; 2197 + 2198 + if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) 2199 + cpp = 4; 2200 + else 2201 + cpp = fb->format->cpp[0]; 2202 + 2203 + entries = intel_wm_method2(pixel_rate, htotal, width, cpp, 2204 + sr_latency_ns / 100); 2205 + entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); 2206 + drm_dbg_kms(&dev_priv->drm, 2207 + "self-refresh entries: %d\n", entries); 2208 + srwm = wm_info->fifo_size - entries; 2209 + if (srwm < 0) 2210 + srwm = 1; 2211 + 2212 + if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 2213 + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, 2214 + FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); 2215 + else 2216 + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f); 2217 + } 2218 + 2219 + drm_dbg_kms(&dev_priv->drm, 2220 + "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2221 + planea_wm, planeb_wm, cwm, srwm); 2222 + 2223 + fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 2224 + fwater_hi = (cwm & 0x1f); 2225 + 2226 + /* Set request length to 8 cachelines per fetch */ 2227 + fwater_lo = fwater_lo | (1 << 24) | (1 << 8); 2228 + fwater_hi = fwater_hi | (1 << 8); 2229 + 2230 + intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); 2231 + intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi); 2232 + 2233 + if (crtc) 2234 + intel_set_memory_cxsr(dev_priv, true); 2235 + } 2236 + 2237 + static void i845_update_wm(struct drm_i915_private *dev_priv) 2238 + { 2239 + struct intel_crtc *crtc; 2240 + u32 fwater_lo; 2241 + int planea_wm; 2242 + 2243 + crtc = single_enabled_crtc(dev_priv); 2244 + if (crtc == NULL) 2245 + return; 2246 + 2247 + planea_wm = intel_calculate_wm(crtc->config->pixel_rate, 2248 + &i845_wm_info, 2249 + i845_get_fifo_size(dev_priv, PLANE_A), 2250 + 4, pessimal_latency_ns); 2251 + fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff; 2252 + fwater_lo |= (3<<8) | planea_wm; 2253 + 2254 + drm_dbg_kms(&dev_priv->drm, 2255 + "Setting FIFO watermarks - A: %d\n", planea_wm); 2256 + 2257 + intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); 2258 + } 2259 + 2260 + /* latency must be in 0.1us units. */ 2261 + static unsigned int ilk_wm_method1(unsigned int pixel_rate, 2262 + unsigned int cpp, 2263 + unsigned int latency) 2264 + { 2265 + unsigned int ret; 2266 + 2267 + ret = intel_wm_method1(pixel_rate, cpp, latency); 2268 + ret = DIV_ROUND_UP(ret, 64) + 2; 2269 + 2270 + return ret; 2271 + } 2272 + 2273 + /* latency must be in 0.1us units. */ 2274 + static unsigned int ilk_wm_method2(unsigned int pixel_rate, 2275 + unsigned int htotal, 2276 + unsigned int width, 2277 + unsigned int cpp, 2278 + unsigned int latency) 2279 + { 2280 + unsigned int ret; 2281 + 2282 + ret = intel_wm_method2(pixel_rate, htotal, 2283 + width, cpp, latency); 2284 + ret = DIV_ROUND_UP(ret, 64) + 2; 2285 + 2286 + return ret; 2287 + } 2288 + 2289 + static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp) 2290 + { 2291 + /* 2292 + * Neither of these should be possible since this function shouldn't be 2293 + * called if the CRTC is off or the plane is invisible. But let's be 2294 + * extra paranoid to avoid a potential divide-by-zero if we screw up 2295 + * elsewhere in the driver. 2296 + */ 2297 + if (WARN_ON(!cpp)) 2298 + return 0; 2299 + if (WARN_ON(!horiz_pixels)) 2300 + return 0; 2301 + 2302 + return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2; 2303 + } 2304 + 2305 + struct ilk_wm_maximums { 2306 + u16 pri; 2307 + u16 spr; 2308 + u16 cur; 2309 + u16 fbc; 2310 + }; 2311 + 2312 + /* 2313 + * For both WM_PIPE and WM_LP. 2314 + * mem_value must be in 0.1us units. 2315 + */ 2316 + static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, 2317 + const struct intel_plane_state *plane_state, 2318 + u32 mem_value, bool is_lp) 2319 + { 2320 + u32 method1, method2; 2321 + int cpp; 2322 + 2323 + if (mem_value == 0) 2324 + return U32_MAX; 2325 + 2326 + if (!intel_wm_plane_visible(crtc_state, plane_state)) 2327 + return 0; 2328 + 2329 + cpp = plane_state->hw.fb->format->cpp[0]; 2330 + 2331 + method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); 2332 + 2333 + if (!is_lp) 2334 + return method1; 2335 + 2336 + method2 = ilk_wm_method2(crtc_state->pixel_rate, 2337 + crtc_state->hw.pipe_mode.crtc_htotal, 2338 + drm_rect_width(&plane_state->uapi.src) >> 16, 2339 + cpp, mem_value); 2340 + 2341 + return min(method1, method2); 2342 + } 2343 + 2344 + /* 2345 + * For both WM_PIPE and WM_LP. 2346 + * mem_value must be in 0.1us units. 2347 + */ 2348 + static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, 2349 + const struct intel_plane_state *plane_state, 2350 + u32 mem_value) 2351 + { 2352 + u32 method1, method2; 2353 + int cpp; 2354 + 2355 + if (mem_value == 0) 2356 + return U32_MAX; 2357 + 2358 + if (!intel_wm_plane_visible(crtc_state, plane_state)) 2359 + return 0; 2360 + 2361 + cpp = plane_state->hw.fb->format->cpp[0]; 2362 + 2363 + method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); 2364 + method2 = ilk_wm_method2(crtc_state->pixel_rate, 2365 + crtc_state->hw.pipe_mode.crtc_htotal, 2366 + drm_rect_width(&plane_state->uapi.src) >> 16, 2367 + cpp, mem_value); 2368 + return min(method1, method2); 2369 + } 2370 + 2371 + /* 2372 + * For both WM_PIPE and WM_LP. 2373 + * mem_value must be in 0.1us units. 2374 + */ 2375 + static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, 2376 + const struct intel_plane_state *plane_state, 2377 + u32 mem_value) 2378 + { 2379 + int cpp; 2380 + 2381 + if (mem_value == 0) 2382 + return U32_MAX; 2383 + 2384 + if (!intel_wm_plane_visible(crtc_state, plane_state)) 2385 + return 0; 2386 + 2387 + cpp = plane_state->hw.fb->format->cpp[0]; 2388 + 2389 + return ilk_wm_method2(crtc_state->pixel_rate, 2390 + crtc_state->hw.pipe_mode.crtc_htotal, 2391 + drm_rect_width(&plane_state->uapi.src) >> 16, 2392 + cpp, mem_value); 2393 + } 2394 + 2395 + /* Only for WM_LP. */ 2396 + static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, 2397 + const struct intel_plane_state *plane_state, 2398 + u32 pri_val) 2399 + { 2400 + int cpp; 2401 + 2402 + if (!intel_wm_plane_visible(crtc_state, plane_state)) 2403 + return 0; 2404 + 2405 + cpp = plane_state->hw.fb->format->cpp[0]; 2406 + 2407 + return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16, 2408 + cpp); 2409 + } 2410 + 2411 + static unsigned int 2412 + ilk_display_fifo_size(const struct drm_i915_private *dev_priv) 2413 + { 2414 + if (DISPLAY_VER(dev_priv) >= 8) 2415 + return 3072; 2416 + else if (DISPLAY_VER(dev_priv) >= 7) 2417 + return 768; 2418 + else 2419 + return 512; 2420 + } 2421 + 2422 + static unsigned int 2423 + ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv, 2424 + int level, bool is_sprite) 2425 + { 2426 + if (DISPLAY_VER(dev_priv) >= 8) 2427 + /* BDW primary/sprite plane watermarks */ 2428 + return level == 0 ? 255 : 2047; 2429 + else if (DISPLAY_VER(dev_priv) >= 7) 2430 + /* IVB/HSW primary/sprite plane watermarks */ 2431 + return level == 0 ? 127 : 1023; 2432 + else if (!is_sprite) 2433 + /* ILK/SNB primary plane watermarks */ 2434 + return level == 0 ? 127 : 511; 2435 + else 2436 + /* ILK/SNB sprite plane watermarks */ 2437 + return level == 0 ? 63 : 255; 2438 + } 2439 + 2440 + static unsigned int 2441 + ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level) 2442 + { 2443 + if (DISPLAY_VER(dev_priv) >= 7) 2444 + return level == 0 ? 63 : 255; 2445 + else 2446 + return level == 0 ? 31 : 63; 2447 + } 2448 + 2449 + static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) 2450 + { 2451 + if (DISPLAY_VER(dev_priv) >= 8) 2452 + return 31; 2453 + else 2454 + return 15; 2455 + } 2456 + 2457 + /* Calculate the maximum primary/sprite plane watermark */ 2458 + static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, 2459 + int level, 2460 + const struct intel_wm_config *config, 2461 + enum intel_ddb_partitioning ddb_partitioning, 2462 + bool is_sprite) 2463 + { 2464 + unsigned int fifo_size = ilk_display_fifo_size(dev_priv); 2465 + 2466 + /* if sprites aren't enabled, sprites get nothing */ 2467 + if (is_sprite && !config->sprites_enabled) 2468 + return 0; 2469 + 2470 + /* HSW allows LP1+ watermarks even with multiple pipes */ 2471 + if (level == 0 || config->num_pipes_active > 1) { 2472 + fifo_size /= INTEL_NUM_PIPES(dev_priv); 2473 + 2474 + /* 2475 + * For some reason the non self refresh 2476 + * FIFO size is only half of the self 2477 + * refresh FIFO size on ILK/SNB. 2478 + */ 2479 + if (DISPLAY_VER(dev_priv) <= 6) 2480 + fifo_size /= 2; 2481 + } 2482 + 2483 + if (config->sprites_enabled) { 2484 + /* level 0 is always calculated with 1:1 split */ 2485 + if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { 2486 + if (is_sprite) 2487 + fifo_size *= 5; 2488 + fifo_size /= 6; 2489 + } else { 2490 + fifo_size /= 2; 2491 + } 2492 + } 2493 + 2494 + /* clamp to max that the registers can hold */ 2495 + return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite)); 2496 + } 2497 + 2498 + /* Calculate the maximum cursor plane watermark */ 2499 + static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv, 2500 + int level, 2501 + const struct intel_wm_config *config) 2502 + { 2503 + /* HSW LP1+ watermarks w/ multiple pipes */ 2504 + if (level > 0 && config->num_pipes_active > 1) 2505 + return 64; 2506 + 2507 + /* otherwise just report max that registers can hold */ 2508 + return ilk_cursor_wm_reg_max(dev_priv, level); 2509 + } 2510 + 2511 + static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv, 2512 + int level, 2513 + const struct intel_wm_config *config, 2514 + enum intel_ddb_partitioning ddb_partitioning, 2515 + struct ilk_wm_maximums *max) 2516 + { 2517 + max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false); 2518 + max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true); 2519 + max->cur = ilk_cursor_wm_max(dev_priv, level, config); 2520 + max->fbc = ilk_fbc_wm_reg_max(dev_priv); 2521 + } 2522 + 2523 + static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, 2524 + int level, 2525 + struct ilk_wm_maximums *max) 2526 + { 2527 + max->pri = ilk_plane_wm_reg_max(dev_priv, level, false); 2528 + max->spr = ilk_plane_wm_reg_max(dev_priv, level, true); 2529 + max->cur = ilk_cursor_wm_reg_max(dev_priv, level); 2530 + max->fbc = ilk_fbc_wm_reg_max(dev_priv); 2531 + } 2532 + 2533 + static bool ilk_validate_wm_level(int level, 2534 + const struct ilk_wm_maximums *max, 2535 + struct intel_wm_level *result) 2536 + { 2537 + bool ret; 2538 + 2539 + /* already determined to be invalid? */ 2540 + if (!result->enable) 2541 + return false; 2542 + 2543 + result->enable = result->pri_val <= max->pri && 2544 + result->spr_val <= max->spr && 2545 + result->cur_val <= max->cur; 2546 + 2547 + ret = result->enable; 2548 + 2549 + /* 2550 + * HACK until we can pre-compute everything, 2551 + * and thus fail gracefully if LP0 watermarks 2552 + * are exceeded... 2553 + */ 2554 + if (level == 0 && !result->enable) { 2555 + if (result->pri_val > max->pri) 2556 + DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", 2557 + level, result->pri_val, max->pri); 2558 + if (result->spr_val > max->spr) 2559 + DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", 2560 + level, result->spr_val, max->spr); 2561 + if (result->cur_val > max->cur) 2562 + DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", 2563 + level, result->cur_val, max->cur); 2564 + 2565 + result->pri_val = min_t(u32, result->pri_val, max->pri); 2566 + result->spr_val = min_t(u32, result->spr_val, max->spr); 2567 + result->cur_val = min_t(u32, result->cur_val, max->cur); 2568 + result->enable = true; 2569 + } 2570 + 2571 + return ret; 2572 + } 2573 + 2574 + static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, 2575 + const struct intel_crtc *crtc, 2576 + int level, 2577 + struct intel_crtc_state *crtc_state, 2578 + const struct intel_plane_state *pristate, 2579 + const struct intel_plane_state *sprstate, 2580 + const struct intel_plane_state *curstate, 2581 + struct intel_wm_level *result) 2582 + { 2583 + u16 pri_latency = dev_priv->display.wm.pri_latency[level]; 2584 + u16 spr_latency = dev_priv->display.wm.spr_latency[level]; 2585 + u16 cur_latency = dev_priv->display.wm.cur_latency[level]; 2586 + 2587 + /* WM1+ latency values stored in 0.5us units */ 2588 + if (level > 0) { 2589 + pri_latency *= 5; 2590 + spr_latency *= 5; 2591 + cur_latency *= 5; 2592 + } 2593 + 2594 + if (pristate) { 2595 + result->pri_val = ilk_compute_pri_wm(crtc_state, pristate, 2596 + pri_latency, level); 2597 + result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val); 2598 + } 2599 + 2600 + if (sprstate) 2601 + result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency); 2602 + 2603 + if (curstate) 2604 + result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency); 2605 + 2606 + result->enable = true; 2607 + } 2608 + 2609 + static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) 2610 + { 2611 + u64 sskpd; 2612 + 2613 + i915->display.wm.num_levels = 5; 2614 + 2615 + sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD); 2616 + 2617 + wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd); 2618 + if (wm[0] == 0) 2619 + wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd); 2620 + wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd); 2621 + wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd); 2622 + wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd); 2623 + wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); 2624 + } 2625 + 2626 + static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) 2627 + { 2628 + u32 sskpd; 2629 + 2630 + i915->display.wm.num_levels = 4; 2631 + 2632 + sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD); 2633 + 2634 + wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd); 2635 + wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd); 2636 + wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd); 2637 + wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); 2638 + } 2639 + 2640 + static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) 2641 + { 2642 + u32 mltr; 2643 + 2644 + i915->display.wm.num_levels = 3; 2645 + 2646 + mltr = intel_uncore_read(&i915->uncore, MLTR_ILK); 2647 + 2648 + /* ILK primary LP0 latency is 700 ns */ 2649 + wm[0] = 7; 2650 + wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr); 2651 + wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); 2652 + } 2653 + 2654 + static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, 2655 + u16 wm[5]) 2656 + { 2657 + /* ILK sprite LP0 latency is 1300 ns */ 2658 + if (DISPLAY_VER(dev_priv) == 5) 2659 + wm[0] = 13; 2660 + } 2661 + 2662 + static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, 2663 + u16 wm[5]) 2664 + { 2665 + /* ILK cursor LP0 latency is 1300 ns */ 2666 + if (DISPLAY_VER(dev_priv) == 5) 2667 + wm[0] = 13; 2668 + } 2669 + 2670 + static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, 2671 + u16 wm[5], u16 min) 2672 + { 2673 + int level; 2674 + 2675 + if (wm[0] >= min) 2676 + return false; 2677 + 2678 + wm[0] = max(wm[0], min); 2679 + for (level = 1; level < dev_priv->display.wm.num_levels; level++) 2680 + wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5)); 2681 + 2682 + return true; 2683 + } 2684 + 2685 + static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) 2686 + { 2687 + bool changed; 2688 + 2689 + /* 2690 + * The BIOS provided WM memory latency values are often 2691 + * inadequate for high resolution displays. Adjust them. 2692 + */ 2693 + changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12); 2694 + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12); 2695 + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12); 2696 + 2697 + if (!changed) 2698 + return; 2699 + 2700 + drm_dbg_kms(&dev_priv->drm, 2701 + "WM latency values increased to avoid potential underruns\n"); 2702 + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); 2703 + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); 2704 + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); 2705 + } 2706 + 2707 + static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) 2708 + { 2709 + /* 2710 + * On some SNB machines (Thinkpad X220 Tablet at least) 2711 + * LP3 usage can cause vblank interrupts to be lost. 2712 + * The DEIIR bit will go high but it looks like the CPU 2713 + * never gets interrupted. 2714 + * 2715 + * It's not clear whether other interrupt source could 2716 + * be affected or if this is somehow limited to vblank 2717 + * interrupts only. To play it safe we disable LP3 2718 + * watermarks entirely. 2719 + */ 2720 + if (dev_priv->display.wm.pri_latency[3] == 0 && 2721 + dev_priv->display.wm.spr_latency[3] == 0 && 2722 + dev_priv->display.wm.cur_latency[3] == 0) 2723 + return; 2724 + 2725 + dev_priv->display.wm.pri_latency[3] = 0; 2726 + dev_priv->display.wm.spr_latency[3] = 0; 2727 + dev_priv->display.wm.cur_latency[3] = 0; 2728 + 2729 + drm_dbg_kms(&dev_priv->drm, 2730 + "LP3 watermarks disabled due to potential for lost interrupts\n"); 2731 + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); 2732 + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); 2733 + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); 2734 + } 2735 + 2736 + static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) 2737 + { 2738 + if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2739 + hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); 2740 + else if (DISPLAY_VER(dev_priv) >= 6) 2741 + snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); 2742 + else 2743 + ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); 2744 + 2745 + memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency, 2746 + sizeof(dev_priv->display.wm.pri_latency)); 2747 + memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency, 2748 + sizeof(dev_priv->display.wm.pri_latency)); 2749 + 2750 + intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency); 2751 + intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency); 2752 + 2753 + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); 2754 + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); 2755 + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); 2756 + 2757 + if (DISPLAY_VER(dev_priv) == 6) { 2758 + snb_wm_latency_quirk(dev_priv); 2759 + snb_wm_lp3_irq_quirk(dev_priv); 2760 + } 2761 + } 2762 + 2763 + static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, 2764 + struct intel_pipe_wm *pipe_wm) 2765 + { 2766 + /* LP0 watermark maximums depend on this pipe alone */ 2767 + const struct intel_wm_config config = { 2768 + .num_pipes_active = 1, 2769 + .sprites_enabled = pipe_wm->sprites_enabled, 2770 + .sprites_scaled = pipe_wm->sprites_scaled, 2771 + }; 2772 + struct ilk_wm_maximums max; 2773 + 2774 + /* LP0 watermarks always use 1/2 DDB partitioning */ 2775 + ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max); 2776 + 2777 + /* At least LP0 must be valid */ 2778 + if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { 2779 + drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n"); 2780 + return false; 2781 + } 2782 + 2783 + return true; 2784 + } 2785 + 2786 + /* Compute new watermarks for the pipe */ 2787 + static int ilk_compute_pipe_wm(struct intel_atomic_state *state, 2788 + struct intel_crtc *crtc) 2789 + { 2790 + struct drm_i915_private *dev_priv = to_i915(state->base.dev); 2791 + struct intel_crtc_state *crtc_state = 2792 + intel_atomic_get_new_crtc_state(state, crtc); 2793 + struct intel_pipe_wm *pipe_wm; 2794 + struct intel_plane *plane; 2795 + const struct intel_plane_state *plane_state; 2796 + const struct intel_plane_state *pristate = NULL; 2797 + const struct intel_plane_state *sprstate = NULL; 2798 + const struct intel_plane_state *curstate = NULL; 2799 + struct ilk_wm_maximums max; 2800 + int level, usable_level; 2801 + 2802 + pipe_wm = &crtc_state->wm.ilk.optimal; 2803 + 2804 + intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { 2805 + if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 2806 + pristate = plane_state; 2807 + else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY) 2808 + sprstate = plane_state; 2809 + else if (plane->base.type == DRM_PLANE_TYPE_CURSOR) 2810 + curstate = plane_state; 2811 + } 2812 + 2813 + pipe_wm->pipe_enabled = crtc_state->hw.active; 2814 + pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0); 2815 + pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0); 2816 + 2817 + usable_level = dev_priv->display.wm.num_levels - 1; 2818 + 2819 + /* ILK/SNB: LP2+ watermarks only w/o sprites */ 2820 + if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled) 2821 + usable_level = 1; 2822 + 2823 + /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ 2824 + if (pipe_wm->sprites_scaled) 2825 + usable_level = 0; 2826 + 2827 + memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); 2828 + ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state, 2829 + pristate, sprstate, curstate, &pipe_wm->wm[0]); 2830 + 2831 + if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) 2832 + return -EINVAL; 2833 + 2834 + ilk_compute_wm_reg_maximums(dev_priv, 1, &max); 2835 + 2836 + for (level = 1; level <= usable_level; level++) { 2837 + struct intel_wm_level *wm = &pipe_wm->wm[level]; 2838 + 2839 + ilk_compute_wm_level(dev_priv, crtc, level, crtc_state, 2840 + pristate, sprstate, curstate, wm); 2841 + 2842 + /* 2843 + * Disable any watermark level that exceeds the 2844 + * register maximums since such watermarks are 2845 + * always invalid. 2846 + */ 2847 + if (!ilk_validate_wm_level(level, &max, wm)) { 2848 + memset(wm, 0, sizeof(*wm)); 2849 + break; 2850 + } 2851 + } 2852 + 2853 + return 0; 2854 + } 2855 + 2856 + /* 2857 + * Build a set of 'intermediate' watermark values that satisfy both the old 2858 + * state and the new state. These can be programmed to the hardware 2859 + * immediately. 2860 + */ 2861 + static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, 2862 + struct intel_crtc *crtc) 2863 + { 2864 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2865 + struct intel_crtc_state *new_crtc_state = 2866 + intel_atomic_get_new_crtc_state(state, crtc); 2867 + const struct intel_crtc_state *old_crtc_state = 2868 + intel_atomic_get_old_crtc_state(state, crtc); 2869 + struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate; 2870 + const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal; 2871 + int level; 2872 + 2873 + /* 2874 + * Start with the final, target watermarks, then combine with the 2875 + * currently active watermarks to get values that are safe both before 2876 + * and after the vblank. 2877 + */ 2878 + *a = new_crtc_state->wm.ilk.optimal; 2879 + if (!new_crtc_state->hw.active || 2880 + intel_crtc_needs_modeset(new_crtc_state) || 2881 + state->skip_intermediate_wm) 2882 + return 0; 2883 + 2884 + a->pipe_enabled |= b->pipe_enabled; 2885 + a->sprites_enabled |= b->sprites_enabled; 2886 + a->sprites_scaled |= b->sprites_scaled; 2887 + 2888 + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { 2889 + struct intel_wm_level *a_wm = &a->wm[level]; 2890 + const struct intel_wm_level *b_wm = &b->wm[level]; 2891 + 2892 + a_wm->enable &= b_wm->enable; 2893 + a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); 2894 + a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); 2895 + a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); 2896 + a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); 2897 + } 2898 + 2899 + /* 2900 + * We need to make sure that these merged watermark values are 2901 + * actually a valid configuration themselves. If they're not, 2902 + * there's no safe way to transition from the old state to 2903 + * the new state, so we need to fail the atomic transaction. 2904 + */ 2905 + if (!ilk_validate_pipe_wm(dev_priv, a)) 2906 + return -EINVAL; 2907 + 2908 + /* 2909 + * If our intermediate WM are identical to the final WM, then we can 2910 + * omit the post-vblank programming; only update if it's different. 2911 + */ 2912 + if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0) 2913 + new_crtc_state->wm.need_postvbl_update = true; 2914 + 2915 + return 0; 2916 + } 2917 + 2918 + /* 2919 + * Merge the watermarks from all active pipes for a specific level. 2920 + */ 2921 + static void ilk_merge_wm_level(struct drm_i915_private *dev_priv, 2922 + int level, 2923 + struct intel_wm_level *ret_wm) 2924 + { 2925 + const struct intel_crtc *crtc; 2926 + 2927 + ret_wm->enable = true; 2928 + 2929 + for_each_intel_crtc(&dev_priv->drm, crtc) { 2930 + const struct intel_pipe_wm *active = &crtc->wm.active.ilk; 2931 + const struct intel_wm_level *wm = &active->wm[level]; 2932 + 2933 + if (!active->pipe_enabled) 2934 + continue; 2935 + 2936 + /* 2937 + * The watermark values may have been used in the past, 2938 + * so we must maintain them in the registers for some 2939 + * time even if the level is now disabled. 2940 + */ 2941 + if (!wm->enable) 2942 + ret_wm->enable = false; 2943 + 2944 + ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); 2945 + ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); 2946 + ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); 2947 + ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); 2948 + } 2949 + } 2950 + 2951 + /* 2952 + * Merge all low power watermarks for all active pipes. 2953 + */ 2954 + static void ilk_wm_merge(struct drm_i915_private *dev_priv, 2955 + const struct intel_wm_config *config, 2956 + const struct ilk_wm_maximums *max, 2957 + struct intel_pipe_wm *merged) 2958 + { 2959 + int level, num_levels = dev_priv->display.wm.num_levels; 2960 + int last_enabled_level = num_levels - 1; 2961 + 2962 + /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ 2963 + if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) && 2964 + config->num_pipes_active > 1) 2965 + last_enabled_level = 0; 2966 + 2967 + /* ILK: FBC WM must be disabled always */ 2968 + merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6; 2969 + 2970 + /* merge each WM1+ level */ 2971 + for (level = 1; level < num_levels; level++) { 2972 + struct intel_wm_level *wm = &merged->wm[level]; 2973 + 2974 + ilk_merge_wm_level(dev_priv, level, wm); 2975 + 2976 + if (level > last_enabled_level) 2977 + wm->enable = false; 2978 + else if (!ilk_validate_wm_level(level, max, wm)) 2979 + /* make sure all following levels get disabled */ 2980 + last_enabled_level = level - 1; 2981 + 2982 + /* 2983 + * The spec says it is preferred to disable 2984 + * FBC WMs instead of disabling a WM level. 2985 + */ 2986 + if (wm->fbc_val > max->fbc) { 2987 + if (wm->enable) 2988 + merged->fbc_wm_enabled = false; 2989 + wm->fbc_val = 0; 2990 + } 2991 + } 2992 + 2993 + /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ 2994 + if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) && 2995 + dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) { 2996 + for (level = 2; level < num_levels; level++) { 2997 + struct intel_wm_level *wm = &merged->wm[level]; 2998 + 2999 + wm->enable = false; 3000 + } 3001 + } 3002 + } 3003 + 3004 + static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) 3005 + { 3006 + /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ 3007 + return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); 3008 + } 3009 + 3010 + /* The value we need to program into the WM_LPx latency field */ 3011 + static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv, 3012 + int level) 3013 + { 3014 + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3015 + return 2 * level; 3016 + else 3017 + return dev_priv->display.wm.pri_latency[level]; 3018 + } 3019 + 3020 + static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, 3021 + const struct intel_pipe_wm *merged, 3022 + enum intel_ddb_partitioning partitioning, 3023 + struct ilk_wm_values *results) 3024 + { 3025 + struct intel_crtc *crtc; 3026 + int level, wm_lp; 3027 + 3028 + results->enable_fbc_wm = merged->fbc_wm_enabled; 3029 + results->partitioning = partitioning; 3030 + 3031 + /* LP1+ register values */ 3032 + for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 3033 + const struct intel_wm_level *r; 3034 + 3035 + level = ilk_wm_lp_to_level(wm_lp, merged); 3036 + 3037 + r = &merged->wm[level]; 3038 + 3039 + /* 3040 + * Maintain the watermark values even if the level is 3041 + * disabled. Doing otherwise could cause underruns. 3042 + */ 3043 + results->wm_lp[wm_lp - 1] = 3044 + WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) | 3045 + WM_LP_PRIMARY(r->pri_val) | 3046 + WM_LP_CURSOR(r->cur_val); 3047 + 3048 + if (r->enable) 3049 + results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE; 3050 + 3051 + if (DISPLAY_VER(dev_priv) >= 8) 3052 + results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val); 3053 + else 3054 + results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val); 3055 + 3056 + results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val); 3057 + 3058 + /* 3059 + * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the 3060 + * level is disabled. Doing otherwise could cause underruns. 3061 + */ 3062 + if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) { 3063 + drm_WARN_ON(&dev_priv->drm, wm_lp != 1); 3064 + results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE; 3065 + } 3066 + } 3067 + 3068 + /* LP0 register values */ 3069 + for_each_intel_crtc(&dev_priv->drm, crtc) { 3070 + enum pipe pipe = crtc->pipe; 3071 + const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk; 3072 + const struct intel_wm_level *r = &pipe_wm->wm[0]; 3073 + 3074 + if (drm_WARN_ON(&dev_priv->drm, !r->enable)) 3075 + continue; 3076 + 3077 + results->wm_pipe[pipe] = 3078 + WM0_PIPE_PRIMARY(r->pri_val) | 3079 + WM0_PIPE_SPRITE(r->spr_val) | 3080 + WM0_PIPE_CURSOR(r->cur_val); 3081 + } 3082 + } 3083 + 3084 + /* 3085 + * Find the result with the highest level enabled. Check for enable_fbc_wm in 3086 + * case both are at the same level. Prefer r1 in case they're the same. 3087 + */ 3088 + static struct intel_pipe_wm * 3089 + ilk_find_best_result(struct drm_i915_private *dev_priv, 3090 + struct intel_pipe_wm *r1, 3091 + struct intel_pipe_wm *r2) 3092 + { 3093 + int level, level1 = 0, level2 = 0; 3094 + 3095 + for (level = 1; level < dev_priv->display.wm.num_levels; level++) { 3096 + if (r1->wm[level].enable) 3097 + level1 = level; 3098 + if (r2->wm[level].enable) 3099 + level2 = level; 3100 + } 3101 + 3102 + if (level1 == level2) { 3103 + if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) 3104 + return r2; 3105 + else 3106 + return r1; 3107 + } else if (level1 > level2) { 3108 + return r1; 3109 + } else { 3110 + return r2; 3111 + } 3112 + } 3113 + 3114 + /* dirty bits used to track which watermarks need changes */ 3115 + #define WM_DIRTY_PIPE(pipe) (1 << (pipe)) 3116 + #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) 3117 + #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) 3118 + #define WM_DIRTY_FBC (1 << 24) 3119 + #define WM_DIRTY_DDB (1 << 25) 3120 + 3121 + static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, 3122 + const struct ilk_wm_values *old, 3123 + const struct ilk_wm_values *new) 3124 + { 3125 + unsigned int dirty = 0; 3126 + enum pipe pipe; 3127 + int wm_lp; 3128 + 3129 + for_each_pipe(dev_priv, pipe) { 3130 + if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { 3131 + dirty |= WM_DIRTY_PIPE(pipe); 3132 + /* Must disable LP1+ watermarks too */ 3133 + dirty |= WM_DIRTY_LP_ALL; 3134 + } 3135 + } 3136 + 3137 + if (old->enable_fbc_wm != new->enable_fbc_wm) { 3138 + dirty |= WM_DIRTY_FBC; 3139 + /* Must disable LP1+ watermarks too */ 3140 + dirty |= WM_DIRTY_LP_ALL; 3141 + } 3142 + 3143 + if (old->partitioning != new->partitioning) { 3144 + dirty |= WM_DIRTY_DDB; 3145 + /* Must disable LP1+ watermarks too */ 3146 + dirty |= WM_DIRTY_LP_ALL; 3147 + } 3148 + 3149 + /* LP1+ watermarks already deemed dirty, no need to continue */ 3150 + if (dirty & WM_DIRTY_LP_ALL) 3151 + return dirty; 3152 + 3153 + /* Find the lowest numbered LP1+ watermark in need of an update... */ 3154 + for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 3155 + if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || 3156 + old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) 3157 + break; 3158 + } 3159 + 3160 + /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ 3161 + for (; wm_lp <= 3; wm_lp++) 3162 + dirty |= WM_DIRTY_LP(wm_lp); 3163 + 3164 + return dirty; 3165 + } 3166 + 3167 + static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, 3168 + unsigned int dirty) 3169 + { 3170 + struct ilk_wm_values *previous = &dev_priv->display.wm.hw; 3171 + bool changed = false; 3172 + 3173 + if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) { 3174 + previous->wm_lp[2] &= ~WM_LP_ENABLE; 3175 + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]); 3176 + changed = true; 3177 + } 3178 + if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) { 3179 + previous->wm_lp[1] &= ~WM_LP_ENABLE; 3180 + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]); 3181 + changed = true; 3182 + } 3183 + if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) { 3184 + previous->wm_lp[0] &= ~WM_LP_ENABLE; 3185 + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]); 3186 + changed = true; 3187 + } 3188 + 3189 + /* 3190 + * Don't touch WM_LP_SPRITE_ENABLE here. 3191 + * Doing so could cause underruns. 3192 + */ 3193 + 3194 + return changed; 3195 + } 3196 + 3197 + /* 3198 + * The spec says we shouldn't write when we don't need, because every write 3199 + * causes WMs to be re-evaluated, expending some power. 3200 + */ 3201 + static void ilk_write_wm_values(struct drm_i915_private *dev_priv, 3202 + struct ilk_wm_values *results) 3203 + { 3204 + struct ilk_wm_values *previous = &dev_priv->display.wm.hw; 3205 + unsigned int dirty; 3206 + 3207 + dirty = ilk_compute_wm_dirty(dev_priv, previous, results); 3208 + if (!dirty) 3209 + return; 3210 + 3211 + _ilk_disable_lp_wm(dev_priv, dirty); 3212 + 3213 + if (dirty & WM_DIRTY_PIPE(PIPE_A)) 3214 + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); 3215 + if (dirty & WM_DIRTY_PIPE(PIPE_B)) 3216 + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); 3217 + if (dirty & WM_DIRTY_PIPE(PIPE_C)) 3218 + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); 3219 + 3220 + if (dirty & WM_DIRTY_DDB) { 3221 + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3222 + intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6, 3223 + results->partitioning == INTEL_DDB_PART_1_2 ? 0 : 3224 + WM_MISC_DATA_PARTITION_5_6); 3225 + else 3226 + intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6, 3227 + results->partitioning == INTEL_DDB_PART_1_2 ? 0 : 3228 + DISP_DATA_PARTITION_5_6); 3229 + } 3230 + 3231 + if (dirty & WM_DIRTY_FBC) 3232 + intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS, 3233 + results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS); 3234 + 3235 + if (dirty & WM_DIRTY_LP(1) && 3236 + previous->wm_lp_spr[0] != results->wm_lp_spr[0]) 3237 + intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]); 3238 + 3239 + if (DISPLAY_VER(dev_priv) >= 7) { 3240 + if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) 3241 + intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]); 3242 + if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) 3243 + intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]); 3244 + } 3245 + 3246 + if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) 3247 + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]); 3248 + if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) 3249 + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]); 3250 + if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) 3251 + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]); 3252 + 3253 + dev_priv->display.wm.hw = *results; 3254 + } 3255 + 3256 + bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) 3257 + { 3258 + return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); 3259 + } 3260 + 3261 + static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, 3262 + struct intel_wm_config *config) 3263 + { 3264 + struct intel_crtc *crtc; 3265 + 3266 + /* Compute the currently _active_ config */ 3267 + for_each_intel_crtc(&dev_priv->drm, crtc) { 3268 + const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; 3269 + 3270 + if (!wm->pipe_enabled) 3271 + continue; 3272 + 3273 + config->sprites_enabled |= wm->sprites_enabled; 3274 + config->sprites_scaled |= wm->sprites_scaled; 3275 + config->num_pipes_active++; 3276 + } 3277 + } 3278 + 3279 + static void ilk_program_watermarks(struct drm_i915_private *dev_priv) 3280 + { 3281 + struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 3282 + struct ilk_wm_maximums max; 3283 + struct intel_wm_config config = {}; 3284 + struct ilk_wm_values results = {}; 3285 + enum intel_ddb_partitioning partitioning; 3286 + 3287 + ilk_compute_wm_config(dev_priv, &config); 3288 + 3289 + ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max); 3290 + ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2); 3291 + 3292 + /* 5/6 split only in single pipe config on IVB+ */ 3293 + if (DISPLAY_VER(dev_priv) >= 7 && 3294 + config.num_pipes_active == 1 && config.sprites_enabled) { 3295 + ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max); 3296 + ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6); 3297 + 3298 + best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6); 3299 + } else { 3300 + best_lp_wm = &lp_wm_1_2; 3301 + } 3302 + 3303 + partitioning = (best_lp_wm == &lp_wm_1_2) ? 3304 + INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 3305 + 3306 + ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results); 3307 + 3308 + ilk_write_wm_values(dev_priv, &results); 3309 + } 3310 + 3311 + static void ilk_initial_watermarks(struct intel_atomic_state *state, 3312 + struct intel_crtc *crtc) 3313 + { 3314 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3315 + const struct intel_crtc_state *crtc_state = 3316 + intel_atomic_get_new_crtc_state(state, crtc); 3317 + 3318 + mutex_lock(&dev_priv->display.wm.wm_mutex); 3319 + crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; 3320 + ilk_program_watermarks(dev_priv); 3321 + mutex_unlock(&dev_priv->display.wm.wm_mutex); 3322 + } 3323 + 3324 + static void ilk_optimize_watermarks(struct intel_atomic_state *state, 3325 + struct intel_crtc *crtc) 3326 + { 3327 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3328 + const struct intel_crtc_state *crtc_state = 3329 + intel_atomic_get_new_crtc_state(state, crtc); 3330 + 3331 + if (!crtc_state->wm.need_postvbl_update) 3332 + return; 3333 + 3334 + mutex_lock(&dev_priv->display.wm.wm_mutex); 3335 + crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; 3336 + ilk_program_watermarks(dev_priv); 3337 + mutex_unlock(&dev_priv->display.wm.wm_mutex); 3338 + } 3339 + 3340 + static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) 3341 + { 3342 + struct drm_device *dev = crtc->base.dev; 3343 + struct drm_i915_private *dev_priv = to_i915(dev); 3344 + struct ilk_wm_values *hw = &dev_priv->display.wm.hw; 3345 + struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 3346 + struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; 3347 + enum pipe pipe = crtc->pipe; 3348 + 3349 + hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe)); 3350 + 3351 + memset(active, 0, sizeof(*active)); 3352 + 3353 + active->pipe_enabled = crtc->active; 3354 + 3355 + if (active->pipe_enabled) { 3356 + u32 tmp = hw->wm_pipe[pipe]; 3357 + 3358 + /* 3359 + * For active pipes LP0 watermark is marked as 3360 + * enabled, and LP1+ watermaks as disabled since 3361 + * we can't really reverse compute them in case 3362 + * multiple pipes are active. 3363 + */ 3364 + active->wm[0].enable = true; 3365 + active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp); 3366 + active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp); 3367 + active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp); 3368 + } else { 3369 + int level; 3370 + 3371 + /* 3372 + * For inactive pipes, all watermark levels 3373 + * should be marked as enabled but zeroed, 3374 + * which is what we'd compute them to. 3375 + */ 3376 + for (level = 0; level < dev_priv->display.wm.num_levels; level++) 3377 + active->wm[level].enable = true; 3378 + } 3379 + 3380 + crtc->wm.active.ilk = *active; 3381 + } 3382 + 3383 + static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state) 3384 + { 3385 + struct drm_plane *plane; 3386 + struct intel_crtc *crtc; 3387 + 3388 + for_each_intel_crtc(state->dev, crtc) { 3389 + struct intel_crtc_state *crtc_state; 3390 + 3391 + crtc_state = intel_atomic_get_crtc_state(state, crtc); 3392 + if (IS_ERR(crtc_state)) 3393 + return PTR_ERR(crtc_state); 3394 + 3395 + if (crtc_state->hw.active) { 3396 + /* 3397 + * Preserve the inherited flag to avoid 3398 + * taking the full modeset path. 3399 + */ 3400 + crtc_state->inherited = true; 3401 + } 3402 + } 3403 + 3404 + drm_for_each_plane(plane, state->dev) { 3405 + struct drm_plane_state *plane_state; 3406 + 3407 + plane_state = drm_atomic_get_plane_state(state, plane); 3408 + if (IS_ERR(plane_state)) 3409 + return PTR_ERR(plane_state); 3410 + } 3411 + 3412 + return 0; 3413 + } 3414 + 3415 + /* 3416 + * Calculate what we think the watermarks should be for the state we've read 3417 + * out of the hardware and then immediately program those watermarks so that 3418 + * we ensure the hardware settings match our internal state. 3419 + * 3420 + * We can calculate what we think WM's should be by creating a duplicate of the 3421 + * current state (which was constructed during hardware readout) and running it 3422 + * through the atomic check code to calculate new watermark values in the 3423 + * state object. 3424 + */ 3425 + void ilk_wm_sanitize(struct drm_i915_private *dev_priv) 3426 + { 3427 + struct drm_atomic_state *state; 3428 + struct intel_atomic_state *intel_state; 3429 + struct intel_crtc *crtc; 3430 + struct intel_crtc_state *crtc_state; 3431 + struct drm_modeset_acquire_ctx ctx; 3432 + int ret; 3433 + int i; 3434 + 3435 + /* Only supported on platforms that use atomic watermark design */ 3436 + if (!dev_priv->display.funcs.wm->optimize_watermarks) 3437 + return; 3438 + 3439 + if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9)) 3440 + return; 3441 + 3442 + state = drm_atomic_state_alloc(&dev_priv->drm); 3443 + if (drm_WARN_ON(&dev_priv->drm, !state)) 3444 + return; 3445 + 3446 + intel_state = to_intel_atomic_state(state); 3447 + 3448 + drm_modeset_acquire_init(&ctx, 0); 3449 + 3450 + retry: 3451 + state->acquire_ctx = &ctx; 3452 + 3453 + /* 3454 + * Hardware readout is the only time we don't want to calculate 3455 + * intermediate watermarks (since we don't trust the current 3456 + * watermarks). 3457 + */ 3458 + if (!HAS_GMCH(dev_priv)) 3459 + intel_state->skip_intermediate_wm = true; 3460 + 3461 + ret = ilk_sanitize_watermarks_add_affected(state); 3462 + if (ret) 3463 + goto fail; 3464 + 3465 + ret = intel_atomic_check(&dev_priv->drm, state); 3466 + if (ret) 3467 + goto fail; 3468 + 3469 + /* Write calculated watermark values back */ 3470 + for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 3471 + crtc_state->wm.need_postvbl_update = true; 3472 + intel_optimize_watermarks(intel_state, crtc); 3473 + 3474 + to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 3475 + } 3476 + 3477 + fail: 3478 + if (ret == -EDEADLK) { 3479 + drm_atomic_state_clear(state); 3480 + drm_modeset_backoff(&ctx); 3481 + goto retry; 3482 + } 3483 + 3484 + /* 3485 + * If we fail here, it means that the hardware appears to be 3486 + * programmed in a way that shouldn't be possible, given our 3487 + * understanding of watermark requirements. This might mean a 3488 + * mistake in the hardware readout code or a mistake in the 3489 + * watermark calculations for a given platform. Raise a WARN 3490 + * so that this is noticeable. 3491 + * 3492 + * If this actually happens, we'll have to just leave the 3493 + * BIOS-programmed watermarks untouched and hope for the best. 3494 + */ 3495 + drm_WARN(&dev_priv->drm, ret, 3496 + "Could not determine valid watermarks for inherited state\n"); 3497 + 3498 + drm_atomic_state_put(state); 3499 + 3500 + drm_modeset_drop_locks(&ctx); 3501 + drm_modeset_acquire_fini(&ctx); 3502 + } 3503 + 3504 + #define _FW_WM(value, plane) \ 3505 + (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) 3506 + #define _FW_WM_VLV(value, plane) \ 3507 + (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) 3508 + 3509 + static void g4x_read_wm_values(struct drm_i915_private *dev_priv, 3510 + struct g4x_wm_values *wm) 3511 + { 3512 + u32 tmp; 3513 + 3514 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); 3515 + wm->sr.plane = _FW_WM(tmp, SR); 3516 + wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); 3517 + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB); 3518 + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA); 3519 + 3520 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); 3521 + wm->fbc_en = tmp & DSPFW_FBC_SR_EN; 3522 + wm->sr.fbc = _FW_WM(tmp, FBC_SR); 3523 + wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR); 3524 + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB); 3525 + wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); 3526 + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA); 3527 + 3528 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); 3529 + wm->hpll_en = tmp & DSPFW_HPLL_SR_EN; 3530 + wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); 3531 + wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR); 3532 + wm->hpll.plane = _FW_WM(tmp, HPLL_SR); 3533 + } 3534 + 3535 + static void vlv_read_wm_values(struct drm_i915_private *dev_priv, 3536 + struct vlv_wm_values *wm) 3537 + { 3538 + enum pipe pipe; 3539 + u32 tmp; 3540 + 3541 + for_each_pipe(dev_priv, pipe) { 3542 + tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe)); 3543 + 3544 + wm->ddl[pipe].plane[PLANE_PRIMARY] = 3545 + (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 3546 + wm->ddl[pipe].plane[PLANE_CURSOR] = 3547 + (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 3548 + wm->ddl[pipe].plane[PLANE_SPRITE0] = 3549 + (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 3550 + wm->ddl[pipe].plane[PLANE_SPRITE1] = 3551 + (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 3552 + } 3553 + 3554 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); 3555 + wm->sr.plane = _FW_WM(tmp, SR); 3556 + wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); 3557 + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); 3558 + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); 3559 + 3560 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); 3561 + wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); 3562 + wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); 3563 + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); 3564 + 3565 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); 3566 + wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); 3567 + 3568 + if (IS_CHERRYVIEW(dev_priv)) { 3569 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV); 3570 + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); 3571 + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); 3572 + 3573 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV); 3574 + wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); 3575 + wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); 3576 + 3577 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV); 3578 + wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); 3579 + wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); 3580 + 3581 + tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); 3582 + wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 3583 + wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; 3584 + wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; 3585 + wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8; 3586 + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; 3587 + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; 3588 + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; 3589 + wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; 3590 + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; 3591 + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; 3592 + } else { 3593 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7); 3594 + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); 3595 + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); 3596 + 3597 + tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); 3598 + wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 3599 + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; 3600 + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; 3601 + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; 3602 + wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; 3603 + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; 3604 + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; 3605 + } 3606 + } 3607 + 3608 + #undef _FW_WM 3609 + #undef _FW_WM_VLV 3610 + 3611 + static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) 3612 + { 3613 + struct g4x_wm_values *wm = &dev_priv->display.wm.g4x; 3614 + struct intel_crtc *crtc; 3615 + 3616 + g4x_read_wm_values(dev_priv, wm); 3617 + 3618 + wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; 3619 + 3620 + for_each_intel_crtc(&dev_priv->drm, crtc) { 3621 + struct intel_crtc_state *crtc_state = 3622 + to_intel_crtc_state(crtc->base.state); 3623 + struct g4x_wm_state *active = &crtc->wm.active.g4x; 3624 + struct g4x_pipe_wm *raw; 3625 + enum pipe pipe = crtc->pipe; 3626 + enum plane_id plane_id; 3627 + int level, max_level; 3628 + 3629 + active->cxsr = wm->cxsr; 3630 + active->hpll_en = wm->hpll_en; 3631 + active->fbc_en = wm->fbc_en; 3632 + 3633 + active->sr = wm->sr; 3634 + active->hpll = wm->hpll; 3635 + 3636 + for_each_plane_id_on_crtc(crtc, plane_id) { 3637 + active->wm.plane[plane_id] = 3638 + wm->pipe[pipe].plane[plane_id]; 3639 + } 3640 + 3641 + if (wm->cxsr && wm->hpll_en) 3642 + max_level = G4X_WM_LEVEL_HPLL; 3643 + else if (wm->cxsr) 3644 + max_level = G4X_WM_LEVEL_SR; 3645 + else 3646 + max_level = G4X_WM_LEVEL_NORMAL; 3647 + 3648 + level = G4X_WM_LEVEL_NORMAL; 3649 + raw = &crtc_state->wm.g4x.raw[level]; 3650 + for_each_plane_id_on_crtc(crtc, plane_id) 3651 + raw->plane[plane_id] = active->wm.plane[plane_id]; 3652 + 3653 + level = G4X_WM_LEVEL_SR; 3654 + if (level > max_level) 3655 + goto out; 3656 + 3657 + raw = &crtc_state->wm.g4x.raw[level]; 3658 + raw->plane[PLANE_PRIMARY] = active->sr.plane; 3659 + raw->plane[PLANE_CURSOR] = active->sr.cursor; 3660 + raw->plane[PLANE_SPRITE0] = 0; 3661 + raw->fbc = active->sr.fbc; 3662 + 3663 + level = G4X_WM_LEVEL_HPLL; 3664 + if (level > max_level) 3665 + goto out; 3666 + 3667 + raw = &crtc_state->wm.g4x.raw[level]; 3668 + raw->plane[PLANE_PRIMARY] = active->hpll.plane; 3669 + raw->plane[PLANE_CURSOR] = active->hpll.cursor; 3670 + raw->plane[PLANE_SPRITE0] = 0; 3671 + raw->fbc = active->hpll.fbc; 3672 + 3673 + level++; 3674 + out: 3675 + for_each_plane_id_on_crtc(crtc, plane_id) 3676 + g4x_raw_plane_wm_set(crtc_state, level, 3677 + plane_id, USHRT_MAX); 3678 + g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); 3679 + 3680 + g4x_invalidate_wms(crtc, active, level); 3681 + 3682 + crtc_state->wm.g4x.optimal = *active; 3683 + crtc_state->wm.g4x.intermediate = *active; 3684 + 3685 + drm_dbg_kms(&dev_priv->drm, 3686 + "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", 3687 + pipe_name(pipe), 3688 + wm->pipe[pipe].plane[PLANE_PRIMARY], 3689 + wm->pipe[pipe].plane[PLANE_CURSOR], 3690 + wm->pipe[pipe].plane[PLANE_SPRITE0]); 3691 + } 3692 + 3693 + drm_dbg_kms(&dev_priv->drm, 3694 + "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", 3695 + wm->sr.plane, wm->sr.cursor, wm->sr.fbc); 3696 + drm_dbg_kms(&dev_priv->drm, 3697 + "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", 3698 + wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); 3699 + drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n", 3700 + str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en), 3701 + str_yes_no(wm->fbc_en)); 3702 + } 3703 + 3704 + static void g4x_wm_sanitize(struct drm_i915_private *dev_priv) 3705 + { 3706 + struct intel_plane *plane; 3707 + struct intel_crtc *crtc; 3708 + 3709 + mutex_lock(&dev_priv->display.wm.wm_mutex); 3710 + 3711 + for_each_intel_plane(&dev_priv->drm, plane) { 3712 + struct intel_crtc *crtc = 3713 + intel_crtc_for_pipe(dev_priv, plane->pipe); 3714 + struct intel_crtc_state *crtc_state = 3715 + to_intel_crtc_state(crtc->base.state); 3716 + struct intel_plane_state *plane_state = 3717 + to_intel_plane_state(plane->base.state); 3718 + enum plane_id plane_id = plane->id; 3719 + int level; 3720 + 3721 + if (plane_state->uapi.visible) 3722 + continue; 3723 + 3724 + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { 3725 + struct g4x_pipe_wm *raw = 3726 + &crtc_state->wm.g4x.raw[level]; 3727 + 3728 + raw->plane[plane_id] = 0; 3729 + 3730 + if (plane_id == PLANE_PRIMARY) 3731 + raw->fbc = 0; 3732 + } 3733 + } 3734 + 3735 + for_each_intel_crtc(&dev_priv->drm, crtc) { 3736 + struct intel_crtc_state *crtc_state = 3737 + to_intel_crtc_state(crtc->base.state); 3738 + int ret; 3739 + 3740 + ret = _g4x_compute_pipe_wm(crtc_state); 3741 + drm_WARN_ON(&dev_priv->drm, ret); 3742 + 3743 + crtc_state->wm.g4x.intermediate = 3744 + crtc_state->wm.g4x.optimal; 3745 + crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; 3746 + } 3747 + 3748 + g4x_program_watermarks(dev_priv); 3749 + 3750 + mutex_unlock(&dev_priv->display.wm.wm_mutex); 3751 + } 3752 + 3753 + static void g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915) 3754 + { 3755 + g4x_wm_get_hw_state(i915); 3756 + g4x_wm_sanitize(i915); 3757 + } 3758 + 3759 + static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) 3760 + { 3761 + struct vlv_wm_values *wm = &dev_priv->display.wm.vlv; 3762 + struct intel_crtc *crtc; 3763 + u32 val; 3764 + 3765 + vlv_read_wm_values(dev_priv, wm); 3766 + 3767 + wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 3768 + wm->level = VLV_WM_LEVEL_PM2; 3769 + 3770 + if (IS_CHERRYVIEW(dev_priv)) { 3771 + vlv_punit_get(dev_priv); 3772 + 3773 + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 3774 + if (val & DSP_MAXFIFO_PM5_ENABLE) 3775 + wm->level = VLV_WM_LEVEL_PM5; 3776 + 3777 + /* 3778 + * If DDR DVFS is disabled in the BIOS, Punit 3779 + * will never ack the request. So if that happens 3780 + * assume we don't have to enable/disable DDR DVFS 3781 + * dynamically. To test that just set the REQ_ACK 3782 + * bit to poke the Punit, but don't change the 3783 + * HIGH/LOW bits so that we don't actually change 3784 + * the current state. 3785 + */ 3786 + val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 3787 + val |= FORCE_DDR_FREQ_REQ_ACK; 3788 + vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 3789 + 3790 + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 3791 + FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { 3792 + drm_dbg_kms(&dev_priv->drm, 3793 + "Punit not acking DDR DVFS request, " 3794 + "assuming DDR DVFS is disabled\n"); 3795 + dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1; 3796 + } else { 3797 + val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 3798 + if ((val & FORCE_DDR_HIGH_FREQ) == 0) 3799 + wm->level = VLV_WM_LEVEL_DDR_DVFS; 3800 + } 3801 + 3802 + vlv_punit_put(dev_priv); 3803 + } 3804 + 3805 + for_each_intel_crtc(&dev_priv->drm, crtc) { 3806 + struct intel_crtc_state *crtc_state = 3807 + to_intel_crtc_state(crtc->base.state); 3808 + struct vlv_wm_state *active = &crtc->wm.active.vlv; 3809 + const struct vlv_fifo_state *fifo_state = 3810 + &crtc_state->wm.vlv.fifo_state; 3811 + enum pipe pipe = crtc->pipe; 3812 + enum plane_id plane_id; 3813 + int level; 3814 + 3815 + vlv_get_fifo_size(crtc_state); 3816 + 3817 + active->num_levels = wm->level + 1; 3818 + active->cxsr = wm->cxsr; 3819 + 3820 + for (level = 0; level < active->num_levels; level++) { 3821 + struct g4x_pipe_wm *raw = 3822 + &crtc_state->wm.vlv.raw[level]; 3823 + 3824 + active->sr[level].plane = wm->sr.plane; 3825 + active->sr[level].cursor = wm->sr.cursor; 3826 + 3827 + for_each_plane_id_on_crtc(crtc, plane_id) { 3828 + active->wm[level].plane[plane_id] = 3829 + wm->pipe[pipe].plane[plane_id]; 3830 + 3831 + raw->plane[plane_id] = 3832 + vlv_invert_wm_value(active->wm[level].plane[plane_id], 3833 + fifo_state->plane[plane_id]); 3834 + } 3835 + } 3836 + 3837 + for_each_plane_id_on_crtc(crtc, plane_id) 3838 + vlv_raw_plane_wm_set(crtc_state, level, 3839 + plane_id, USHRT_MAX); 3840 + vlv_invalidate_wms(crtc, active, level); 3841 + 3842 + crtc_state->wm.vlv.optimal = *active; 3843 + crtc_state->wm.vlv.intermediate = *active; 3844 + 3845 + drm_dbg_kms(&dev_priv->drm, 3846 + "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", 3847 + pipe_name(pipe), 3848 + wm->pipe[pipe].plane[PLANE_PRIMARY], 3849 + wm->pipe[pipe].plane[PLANE_CURSOR], 3850 + wm->pipe[pipe].plane[PLANE_SPRITE0], 3851 + wm->pipe[pipe].plane[PLANE_SPRITE1]); 3852 + } 3853 + 3854 + drm_dbg_kms(&dev_priv->drm, 3855 + "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", 3856 + wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); 3857 + } 3858 + 3859 + static void vlv_wm_sanitize(struct drm_i915_private *dev_priv) 3860 + { 3861 + struct intel_plane *plane; 3862 + struct intel_crtc *crtc; 3863 + 3864 + mutex_lock(&dev_priv->display.wm.wm_mutex); 3865 + 3866 + for_each_intel_plane(&dev_priv->drm, plane) { 3867 + struct intel_crtc *crtc = 3868 + intel_crtc_for_pipe(dev_priv, plane->pipe); 3869 + struct intel_crtc_state *crtc_state = 3870 + to_intel_crtc_state(crtc->base.state); 3871 + struct intel_plane_state *plane_state = 3872 + to_intel_plane_state(plane->base.state); 3873 + enum plane_id plane_id = plane->id; 3874 + int level; 3875 + 3876 + if (plane_state->uapi.visible) 3877 + continue; 3878 + 3879 + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { 3880 + struct g4x_pipe_wm *raw = 3881 + &crtc_state->wm.vlv.raw[level]; 3882 + 3883 + raw->plane[plane_id] = 0; 3884 + } 3885 + } 3886 + 3887 + for_each_intel_crtc(&dev_priv->drm, crtc) { 3888 + struct intel_crtc_state *crtc_state = 3889 + to_intel_crtc_state(crtc->base.state); 3890 + int ret; 3891 + 3892 + ret = _vlv_compute_pipe_wm(crtc_state); 3893 + drm_WARN_ON(&dev_priv->drm, ret); 3894 + 3895 + crtc_state->wm.vlv.intermediate = 3896 + crtc_state->wm.vlv.optimal; 3897 + crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; 3898 + } 3899 + 3900 + vlv_program_watermarks(dev_priv); 3901 + 3902 + mutex_unlock(&dev_priv->display.wm.wm_mutex); 3903 + } 3904 + 3905 + static void vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915) 3906 + { 3907 + vlv_wm_get_hw_state(i915); 3908 + vlv_wm_sanitize(i915); 3909 + } 3910 + 3911 + /* 3912 + * FIXME should probably kill this and improve 3913 + * the real watermark readout/sanitation instead 3914 + */ 3915 + static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) 3916 + { 3917 + intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0); 3918 + intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0); 3919 + intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0); 3920 + 3921 + /* 3922 + * Don't touch WM_LP_SPRITE_ENABLE here. 3923 + * Doing so could cause underruns. 3924 + */ 3925 + } 3926 + 3927 + static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) 3928 + { 3929 + struct ilk_wm_values *hw = &dev_priv->display.wm.hw; 3930 + struct intel_crtc *crtc; 3931 + 3932 + ilk_init_lp_watermarks(dev_priv); 3933 + 3934 + for_each_intel_crtc(&dev_priv->drm, crtc) 3935 + ilk_pipe_wm_get_hw_state(crtc); 3936 + 3937 + hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK); 3938 + hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK); 3939 + hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK); 3940 + 3941 + hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK); 3942 + if (DISPLAY_VER(dev_priv) >= 7) { 3943 + hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB); 3944 + hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB); 3945 + } 3946 + 3947 + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3948 + hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & 3949 + WM_MISC_DATA_PARTITION_5_6) ? 3950 + INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 3951 + else if (IS_IVYBRIDGE(dev_priv)) 3952 + hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & 3953 + DISP_DATA_PARTITION_5_6) ? 3954 + INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 3955 + 3956 + hw->enable_fbc_wm = 3957 + !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS); 3958 + } 3959 + 3960 + static const struct intel_wm_funcs ilk_wm_funcs = { 3961 + .compute_pipe_wm = ilk_compute_pipe_wm, 3962 + .compute_intermediate_wm = ilk_compute_intermediate_wm, 3963 + .initial_watermarks = ilk_initial_watermarks, 3964 + .optimize_watermarks = ilk_optimize_watermarks, 3965 + .get_hw_state = ilk_wm_get_hw_state, 3966 + }; 3967 + 3968 + static const struct intel_wm_funcs vlv_wm_funcs = { 3969 + .compute_pipe_wm = vlv_compute_pipe_wm, 3970 + .compute_intermediate_wm = vlv_compute_intermediate_wm, 3971 + .initial_watermarks = vlv_initial_watermarks, 3972 + .optimize_watermarks = vlv_optimize_watermarks, 3973 + .atomic_update_watermarks = vlv_atomic_update_fifo, 3974 + .get_hw_state = vlv_wm_get_hw_state_and_sanitize, 3975 + }; 3976 + 3977 + static const struct intel_wm_funcs g4x_wm_funcs = { 3978 + .compute_pipe_wm = g4x_compute_pipe_wm, 3979 + .compute_intermediate_wm = g4x_compute_intermediate_wm, 3980 + .initial_watermarks = g4x_initial_watermarks, 3981 + .optimize_watermarks = g4x_optimize_watermarks, 3982 + .get_hw_state = g4x_wm_get_hw_state_and_sanitize, 3983 + }; 3984 + 3985 + static const struct intel_wm_funcs pnv_wm_funcs = { 3986 + .update_wm = pnv_update_wm, 3987 + }; 3988 + 3989 + static const struct intel_wm_funcs i965_wm_funcs = { 3990 + .update_wm = i965_update_wm, 3991 + }; 3992 + 3993 + static const struct intel_wm_funcs i9xx_wm_funcs = { 3994 + .update_wm = i9xx_update_wm, 3995 + }; 3996 + 3997 + static const struct intel_wm_funcs i845_wm_funcs = { 3998 + .update_wm = i845_update_wm, 3999 + }; 4000 + 4001 + static const struct intel_wm_funcs nop_funcs = { 4002 + }; 4003 + 4004 + void i9xx_wm_init(struct drm_i915_private *dev_priv) 4005 + { 4006 + /* For FIFO watermark updates */ 4007 + if (HAS_PCH_SPLIT(dev_priv)) { 4008 + ilk_setup_wm_latency(dev_priv); 4009 + dev_priv->display.funcs.wm = &ilk_wm_funcs; 4010 + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4011 + vlv_setup_wm_latency(dev_priv); 4012 + dev_priv->display.funcs.wm = &vlv_wm_funcs; 4013 + } else if (IS_G4X(dev_priv)) { 4014 + g4x_setup_wm_latency(dev_priv); 4015 + dev_priv->display.funcs.wm = &g4x_wm_funcs; 4016 + } else if (IS_PINEVIEW(dev_priv)) { 4017 + if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv), 4018 + dev_priv->is_ddr3, 4019 + dev_priv->fsb_freq, 4020 + dev_priv->mem_freq)) { 4021 + drm_info(&dev_priv->drm, 4022 + "failed to find known CxSR latency " 4023 + "(found ddr%s fsb freq %d, mem freq %d), " 4024 + "disabling CxSR\n", 4025 + (dev_priv->is_ddr3 == 1) ? "3" : "2", 4026 + dev_priv->fsb_freq, dev_priv->mem_freq); 4027 + /* Disable CxSR and never update its watermark again */ 4028 + intel_set_memory_cxsr(dev_priv, false); 4029 + dev_priv->display.funcs.wm = &nop_funcs; 4030 + } else { 4031 + dev_priv->display.funcs.wm = &pnv_wm_funcs; 4032 + } 4033 + } else if (DISPLAY_VER(dev_priv) == 4) { 4034 + dev_priv->display.funcs.wm = &i965_wm_funcs; 4035 + } else if (DISPLAY_VER(dev_priv) == 3) { 4036 + dev_priv->display.funcs.wm = &i9xx_wm_funcs; 4037 + } else if (DISPLAY_VER(dev_priv) == 2) { 4038 + if (INTEL_NUM_PIPES(dev_priv) == 1) 4039 + dev_priv->display.funcs.wm = &i845_wm_funcs; 4040 + else 4041 + dev_priv->display.funcs.wm = &i9xx_wm_funcs; 4042 + } else { 4043 + drm_err(&dev_priv->drm, 4044 + "unexpected fall-through in %s\n", __func__); 4045 + dev_priv->display.funcs.wm = &nop_funcs; 4046 + } 4047 + }
+21
drivers/gpu/drm/i915/display/i9xx_wm.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __I9XX_WM_H__ 7 + #define __I9XX_WM_H__ 8 + 9 + #include <linux/types.h> 10 + 11 + struct drm_i915_private; 12 + struct intel_crtc_state; 13 + struct intel_plane_state; 14 + 15 + int ilk_wm_max_level(const struct drm_i915_private *i915); 16 + bool ilk_disable_lp_wm(struct drm_i915_private *i915); 17 + void ilk_wm_sanitize(struct drm_i915_private *i915); 18 + bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable); 19 + void i9xx_wm_init(struct drm_i915_private *i915); 20 + 21 + #endif /* __I9XX_WM_H__ */
+119 -193
drivers/gpu/drm/i915/display/icl_dsi.c
··· 45 45 #include "intel_dsi_vbt.h" 46 46 #include "intel_panel.h" 47 47 #include "intel_vdsc.h" 48 + #include "intel_vdsc_regs.h" 48 49 #include "skl_scaler.h" 49 50 #include "skl_universal_plane.h" 50 51 ··· 208 207 { 209 208 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 210 209 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 211 - u32 tmp, mode_flags; 210 + u32 mode_flags; 212 211 enum port port; 213 212 214 213 mode_flags = crtc_state->mode_flags; ··· 225 224 else 226 225 return; 227 226 228 - tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port)); 229 - tmp |= DSI_FRAME_UPDATE_REQUEST; 230 - intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp); 227 + intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 0, DSI_FRAME_UPDATE_REQUEST); 231 228 } 232 229 233 230 static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) ··· 233 234 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 234 235 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 235 236 enum phy phy; 236 - u32 tmp; 237 + u32 tmp, mask, val; 237 238 int lane; 238 239 239 240 for_each_dsi_phy(phy, intel_dsi->phys) { ··· 241 242 * Program voltage swing and pre-emphasis level values as per 242 243 * table in BSPEC under DDI buffer programing 243 244 */ 245 + mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK; 246 + val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE | 247 + RTERM_SELECT(0x6); 244 248 tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); 245 - tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); 246 - tmp |= SCALING_MODE_SEL(0x2); 247 - tmp |= TAP2_DISABLE | TAP3_DISABLE; 248 - tmp |= RTERM_SELECT(0x6); 249 + tmp &= ~mask; 250 + tmp |= val; 249 251 intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); 252 + intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), mask, val); 250 253 251 - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); 252 - tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); 253 - tmp |= SCALING_MODE_SEL(0x2); 254 - tmp |= TAP2_DISABLE | TAP3_DISABLE; 255 - tmp |= RTERM_SELECT(0x6); 256 - intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp); 257 - 254 + mask = SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | 255 + RCOMP_SCALAR_MASK; 256 + val = SWING_SEL_UPPER(0x2) | SWING_SEL_LOWER(0x2) | 257 + RCOMP_SCALAR(0x98); 258 258 tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); 259 - tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | 260 - RCOMP_SCALAR_MASK); 261 - tmp |= SWING_SEL_UPPER(0x2); 262 - tmp |= SWING_SEL_LOWER(0x2); 263 - tmp |= RCOMP_SCALAR(0x98); 259 + tmp &= ~mask; 260 + tmp |= val; 264 261 intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); 262 + intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), mask, val); 265 263 266 - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy)); 267 - tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | 268 - RCOMP_SCALAR_MASK); 269 - tmp |= SWING_SEL_UPPER(0x2); 270 - tmp |= SWING_SEL_LOWER(0x2); 271 - tmp |= RCOMP_SCALAR(0x98); 272 - intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp); 264 + mask = POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | 265 + CURSOR_COEFF_MASK; 266 + val = POST_CURSOR_1(0x0) | POST_CURSOR_2(0x0) | 267 + CURSOR_COEFF(0x3f); 268 + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), mask, val); 273 269 274 - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy)); 275 - tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | 276 - CURSOR_COEFF_MASK); 277 - tmp |= POST_CURSOR_1(0x0); 278 - tmp |= POST_CURSOR_2(0x0); 279 - tmp |= CURSOR_COEFF(0x3f); 280 - intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp); 281 - 282 - for (lane = 0; lane <= 3; lane++) { 283 - /* Bspec: must not use GRP register for write */ 284 - tmp = intel_de_read(dev_priv, 285 - ICL_PORT_TX_DW4_LN(lane, phy)); 286 - tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | 287 - CURSOR_COEFF_MASK); 288 - tmp |= POST_CURSOR_1(0x0); 289 - tmp |= POST_CURSOR_2(0x0); 290 - tmp |= CURSOR_COEFF(0x3f); 291 - intel_de_write(dev_priv, 292 - ICL_PORT_TX_DW4_LN(lane, phy), tmp); 293 - } 270 + /* Bspec: must not use GRP register for write */ 271 + for (lane = 0; lane <= 3; lane++) 272 + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), 273 + mask, val); 294 274 } 295 275 } 296 276 ··· 278 300 { 279 301 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 280 302 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 303 + i915_reg_t dss_ctl1_reg, dss_ctl2_reg; 281 304 u32 dss_ctl1; 282 305 283 - dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1); 306 + /* FIXME: Move all DSS handling to intel_vdsc.c */ 307 + if (DISPLAY_VER(dev_priv) >= 12) { 308 + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 309 + 310 + dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe); 311 + dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe); 312 + } else { 313 + dss_ctl1_reg = DSS_CTL1; 314 + dss_ctl2_reg = DSS_CTL2; 315 + } 316 + 317 + dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg); 284 318 dss_ctl1 |= SPLITTER_ENABLE; 285 319 dss_ctl1 &= ~OVERLAP_PIXELS_MASK; 286 320 dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap); ··· 300 310 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { 301 311 const struct drm_display_mode *adjusted_mode = 302 312 &pipe_config->hw.adjusted_mode; 303 - u32 dss_ctl2; 304 313 u16 hactive = adjusted_mode->crtc_hdisplay; 305 314 u16 dl_buffer_depth; 306 315 ··· 312 323 313 324 dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK; 314 325 dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); 315 - dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2); 316 - dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK; 317 - dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); 318 - intel_de_write(dev_priv, DSS_CTL2, dss_ctl2); 326 + intel_de_rmw(dev_priv, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK, 327 + RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth)); 319 328 } else { 320 329 /* Interleave */ 321 330 dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE; 322 331 } 323 332 324 - intel_de_write(dev_priv, DSS_CTL1, dss_ctl1); 333 + intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1); 325 334 } 326 335 327 336 /* aka DSI 8X clock */ ··· 399 412 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 400 413 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 401 414 enum port port; 402 - u32 tmp; 403 415 404 - for_each_dsi_port(port, intel_dsi->ports) { 405 - tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port)); 406 - tmp |= COMBO_PHY_MODE_DSI; 407 - intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp); 408 - } 416 + for_each_dsi_port(port, intel_dsi->ports) 417 + intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), 418 + 0, COMBO_PHY_MODE_DSI); 409 419 410 420 get_dsi_io_power_domains(dev_priv, intel_dsi); 411 421 } ··· 428 444 429 445 /* Step 4b(i) set loadgen select for transmit and aux lanes */ 430 446 for_each_dsi_phy(phy, intel_dsi->phys) { 431 - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy)); 432 - tmp &= ~LOADGEN_SELECT; 433 - intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp); 434 - for (lane = 0; lane <= 3; lane++) { 435 - tmp = intel_de_read(dev_priv, 436 - ICL_PORT_TX_DW4_LN(lane, phy)); 437 - tmp &= ~LOADGEN_SELECT; 438 - if (lane != 2) 439 - tmp |= LOADGEN_SELECT; 440 - intel_de_write(dev_priv, 441 - ICL_PORT_TX_DW4_LN(lane, phy), tmp); 442 - } 447 + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), LOADGEN_SELECT, 0); 448 + for (lane = 0; lane <= 3; lane++) 449 + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), 450 + LOADGEN_SELECT, lane != 2 ? LOADGEN_SELECT : 0); 443 451 } 444 452 445 453 /* Step 4b(ii) set latency optimization for transmit and aux lanes */ 446 454 for_each_dsi_phy(phy, intel_dsi->phys) { 447 - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy)); 448 - tmp &= ~FRC_LATENCY_OPTIM_MASK; 449 - tmp |= FRC_LATENCY_OPTIM_VAL(0x5); 450 - intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp); 455 + intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), 456 + FRC_LATENCY_OPTIM_MASK, FRC_LATENCY_OPTIM_VAL(0x5)); 451 457 tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); 452 458 tmp &= ~FRC_LATENCY_OPTIM_MASK; 453 459 tmp |= FRC_LATENCY_OPTIM_VAL(0x5); ··· 445 471 446 472 /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */ 447 473 if (IS_JSL_EHL(dev_priv) || (DISPLAY_VER(dev_priv) >= 12)) { 448 - tmp = intel_de_read(dev_priv, 449 - ICL_PORT_PCS_DW1_AUX(phy)); 450 - tmp &= ~LATENCY_OPTIM_MASK; 451 - tmp |= LATENCY_OPTIM_VAL(0); 452 - intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), 453 - tmp); 474 + intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), 475 + LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0)); 454 476 455 477 tmp = intel_de_read(dev_priv, 456 478 ICL_PORT_PCS_DW1_LN(0, phy)); ··· 471 501 tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); 472 502 tmp &= ~COMMON_KEEPER_EN; 473 503 intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp); 474 - tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy)); 475 - tmp &= ~COMMON_KEEPER_EN; 476 - intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), tmp); 504 + intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0); 477 505 } 478 506 479 507 /* ··· 479 511 * Note: loadgen select program is done 480 512 * as part of lane phy sequence configuration 481 513 */ 482 - for_each_dsi_phy(phy, intel_dsi->phys) { 483 - tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)); 484 - tmp |= SUS_CLOCK_CONFIG; 485 - intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), tmp); 486 - } 514 + for_each_dsi_phy(phy, intel_dsi->phys) 515 + intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG); 487 516 488 517 /* Clear training enable to change swing values */ 489 518 for_each_dsi_phy(phy, intel_dsi->phys) { 490 519 tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); 491 520 tmp &= ~TX_TRAINING_EN; 492 521 intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); 493 - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); 494 - tmp &= ~TX_TRAINING_EN; 495 - intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp); 522 + intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0); 496 523 } 497 524 498 525 /* Program swing and de-emphasis */ ··· 498 535 tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); 499 536 tmp |= TX_TRAINING_EN; 500 537 intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); 501 - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); 502 - tmp |= TX_TRAINING_EN; 503 - intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp); 538 + intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN); 504 539 } 505 540 } 506 541 ··· 506 545 { 507 546 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 508 547 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 509 - u32 tmp; 510 548 enum port port; 511 549 512 550 for_each_dsi_port(port, intel_dsi->ports) { 513 - tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port)); 514 - tmp |= DDI_BUF_CTL_ENABLE; 515 - intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp); 551 + intel_de_rmw(dev_priv, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE); 516 552 517 553 if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & 518 554 DDI_BUF_IS_IDLE), ··· 525 567 { 526 568 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 527 569 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 528 - u32 tmp; 529 570 enum port port; 530 571 enum phy phy; 531 572 532 573 /* Program T-INIT master registers */ 533 - for_each_dsi_port(port, intel_dsi->ports) { 534 - tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port)); 535 - tmp &= ~DSI_T_INIT_MASTER_MASK; 536 - tmp |= intel_dsi->init_count; 537 - intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp); 538 - } 574 + for_each_dsi_port(port, intel_dsi->ports) 575 + intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port), 576 + DSI_T_INIT_MASTER_MASK, intel_dsi->init_count); 539 577 540 578 /* Program DPHY clock lanes timings */ 541 579 for_each_dsi_port(port, intel_dsi->ports) { ··· 562 608 if (DISPLAY_VER(dev_priv) == 11) { 563 609 if (afe_clk(encoder, crtc_state) <= 800000) { 564 610 for_each_dsi_port(port, intel_dsi->ports) { 565 - tmp = intel_de_read(dev_priv, 566 - DPHY_TA_TIMING_PARAM(port)); 567 - tmp &= ~TA_SURE_MASK; 568 - tmp |= TA_SURE_OVERRIDE | TA_SURE(0); 569 - intel_de_write(dev_priv, 570 - DPHY_TA_TIMING_PARAM(port), 571 - tmp); 611 + intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port), 612 + TA_SURE_MASK, 613 + TA_SURE_OVERRIDE | TA_SURE(0)); 572 614 573 615 /* shadow register inside display core */ 574 - tmp = intel_de_read(dev_priv, 575 - DSI_TA_TIMING_PARAM(port)); 576 - tmp &= ~TA_SURE_MASK; 577 - tmp |= TA_SURE_OVERRIDE | TA_SURE(0); 578 - intel_de_write(dev_priv, 579 - DSI_TA_TIMING_PARAM(port), tmp); 616 + intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port), 617 + TA_SURE_MASK, 618 + TA_SURE_OVERRIDE | TA_SURE(0)); 580 619 } 581 620 } 582 621 } 583 622 584 623 if (IS_JSL_EHL(dev_priv)) { 585 - for_each_dsi_phy(phy, intel_dsi->phys) { 586 - tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy)); 587 - tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP; 588 - intel_de_write(dev_priv, ICL_DPHY_CHKN(phy), tmp); 589 - } 624 + for_each_dsi_phy(phy, intel_dsi->phys) 625 + intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy), 626 + 0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP); 590 627 } 591 628 } 592 629 ··· 769 824 if (intel_dsi->dual_link) { 770 825 for_each_dsi_port(port, intel_dsi->ports) { 771 826 dsi_trans = dsi_port_to_transcoder(port); 772 - tmp = intel_de_read(dev_priv, 773 - TRANS_DDI_FUNC_CTL2(dsi_trans)); 774 - tmp |= PORT_SYNC_MODE_ENABLE; 775 - intel_de_write(dev_priv, 776 - TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); 827 + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans), 828 + 0, PORT_SYNC_MODE_ENABLE); 777 829 } 778 830 779 831 /* configure stream splitting */ ··· 900 958 /* program TRANS_HTOTAL register */ 901 959 for_each_dsi_port(port, intel_dsi->ports) { 902 960 dsi_trans = dsi_port_to_transcoder(port); 903 - intel_de_write(dev_priv, HTOTAL(dsi_trans), 904 - (hactive - 1) | ((htotal - 1) << 16)); 961 + intel_de_write(dev_priv, TRANS_HTOTAL(dsi_trans), 962 + HACTIVE(hactive - 1) | HTOTAL(htotal - 1)); 905 963 } 906 964 907 965 /* TRANS_HSYNC register to be programmed only for video mode */ ··· 923 981 924 982 for_each_dsi_port(port, intel_dsi->ports) { 925 983 dsi_trans = dsi_port_to_transcoder(port); 926 - intel_de_write(dev_priv, HSYNC(dsi_trans), 927 - (hsync_start - 1) | ((hsync_end - 1) << 16)); 984 + intel_de_write(dev_priv, TRANS_HSYNC(dsi_trans), 985 + HSYNC_START(hsync_start - 1) | HSYNC_END(hsync_end - 1)); 928 986 } 929 987 } 930 988 ··· 937 995 * struct drm_display_mode. 938 996 * For interlace mode: program required pixel minus 2 939 997 */ 940 - intel_de_write(dev_priv, VTOTAL(dsi_trans), 941 - (vactive - 1) | ((vtotal - 1) << 16)); 998 + intel_de_write(dev_priv, TRANS_VTOTAL(dsi_trans), 999 + VACTIVE(vactive - 1) | VTOTAL(vtotal - 1)); 942 1000 } 943 1001 944 1002 if (vsync_end < vsync_start || vsync_end > vtotal) ··· 951 1009 if (is_vid_mode(intel_dsi)) { 952 1010 for_each_dsi_port(port, intel_dsi->ports) { 953 1011 dsi_trans = dsi_port_to_transcoder(port); 954 - intel_de_write(dev_priv, VSYNC(dsi_trans), 955 - (vsync_start - 1) | ((vsync_end - 1) << 16)); 1012 + intel_de_write(dev_priv, TRANS_VSYNC(dsi_trans), 1013 + VSYNC_START(vsync_start - 1) | VSYNC_END(vsync_end - 1)); 956 1014 } 957 1015 } 958 1016 ··· 965 1023 if (is_vid_mode(intel_dsi)) { 966 1024 for_each_dsi_port(port, intel_dsi->ports) { 967 1025 dsi_trans = dsi_port_to_transcoder(port); 968 - intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), 1026 + intel_de_write(dev_priv, TRANS_VSYNCSHIFT(dsi_trans), 969 1027 vsync_shift); 970 1028 } 971 1029 } 972 1030 973 - /* program TRANS_VBLANK register, should be same as vtotal programmed */ 1031 + /* 1032 + * program TRANS_VBLANK register, should be same as vtotal programmed 1033 + * 1034 + * FIXME get rid of these local hacks and do it right, 1035 + * this will not handle eg. delayed vblank correctly. 1036 + */ 974 1037 if (DISPLAY_VER(dev_priv) >= 12) { 975 1038 for_each_dsi_port(port, intel_dsi->ports) { 976 1039 dsi_trans = dsi_port_to_transcoder(port); 977 - intel_de_write(dev_priv, VBLANK(dsi_trans), 978 - (vactive - 1) | ((vtotal - 1) << 16)); 1040 + intel_de_write(dev_priv, TRANS_VBLANK(dsi_trans), 1041 + VBLANK_START(vactive - 1) | VBLANK_END(vtotal - 1)); 979 1042 } 980 1043 } 981 1044 } ··· 991 1044 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 992 1045 enum port port; 993 1046 enum transcoder dsi_trans; 994 - u32 tmp; 995 1047 996 1048 for_each_dsi_port(port, intel_dsi->ports) { 997 1049 dsi_trans = dsi_port_to_transcoder(port); 998 - tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)); 999 - tmp |= PIPECONF_ENABLE; 1000 - intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp); 1050 + intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), 0, TRANSCONF_ENABLE); 1001 1051 1002 1052 /* wait for transcoder to be enabled */ 1003 - if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans), 1004 - PIPECONF_STATE_ENABLE, 10)) 1053 + if (intel_de_wait_for_set(dev_priv, TRANSCONF(dsi_trans), 1054 + TRANSCONF_STATE_ENABLE, 10)) 1005 1055 drm_err(&dev_priv->drm, 1006 1056 "DSI transcoder not enabled\n"); 1007 1057 } ··· 1011 1067 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 1012 1068 enum port port; 1013 1069 enum transcoder dsi_trans; 1014 - u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul; 1070 + u32 hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul; 1015 1071 1016 1072 /* 1017 1073 * escape clock count calculation: ··· 1031 1087 dsi_trans = dsi_port_to_transcoder(port); 1032 1088 1033 1089 /* program hst_tx_timeout */ 1034 - tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans)); 1035 - tmp &= ~HSTX_TIMEOUT_VALUE_MASK; 1036 - tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout); 1037 - intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans), tmp); 1090 + intel_de_rmw(dev_priv, DSI_HSTX_TO(dsi_trans), 1091 + HSTX_TIMEOUT_VALUE_MASK, 1092 + HSTX_TIMEOUT_VALUE(hs_tx_timeout)); 1038 1093 1039 1094 /* FIXME: DSI_CALIB_TO */ 1040 1095 1041 1096 /* program lp_rx_host timeout */ 1042 - tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans)); 1043 - tmp &= ~LPRX_TIMEOUT_VALUE_MASK; 1044 - tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout); 1045 - intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), tmp); 1097 + intel_de_rmw(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), 1098 + LPRX_TIMEOUT_VALUE_MASK, 1099 + LPRX_TIMEOUT_VALUE(lp_rx_timeout)); 1046 1100 1047 1101 /* FIXME: DSI_PWAIT_TO */ 1048 1102 1049 1103 /* program turn around timeout */ 1050 - tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans)); 1051 - tmp &= ~TA_TIMEOUT_VALUE_MASK; 1052 - tmp |= TA_TIMEOUT_VALUE(ta_timeout); 1053 - intel_de_write(dev_priv, DSI_TA_TO(dsi_trans), tmp); 1104 + intel_de_rmw(dev_priv, DSI_TA_TO(dsi_trans), 1105 + TA_TIMEOUT_VALUE_MASK, 1106 + TA_TIMEOUT_VALUE(ta_timeout)); 1054 1107 } 1055 1108 } 1056 1109 ··· 1251 1310 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 1252 1311 enum port port; 1253 1312 enum transcoder dsi_trans; 1254 - u32 tmp; 1255 1313 1256 1314 for_each_dsi_port(port, intel_dsi->ports) { 1257 1315 dsi_trans = dsi_port_to_transcoder(port); 1258 1316 1259 1317 /* disable transcoder */ 1260 - tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)); 1261 - tmp &= ~PIPECONF_ENABLE; 1262 - intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp); 1318 + intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), TRANSCONF_ENABLE, 0); 1263 1319 1264 1320 /* wait for transcoder to be disabled */ 1265 - if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans), 1266 - PIPECONF_STATE_ENABLE, 50)) 1321 + if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dsi_trans), 1322 + TRANSCONF_STATE_ENABLE, 50)) 1267 1323 drm_err(&dev_priv->drm, 1268 1324 "DSI trancoder not disabled\n"); 1269 1325 } ··· 1288 1350 1289 1351 /* disable periodic update mode */ 1290 1352 if (is_cmd_mode(intel_dsi)) { 1291 - for_each_dsi_port(port, intel_dsi->ports) { 1292 - tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port)); 1293 - tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE; 1294 - intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp); 1295 - } 1353 + for_each_dsi_port(port, intel_dsi->ports) 1354 + intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 1355 + DSI_PERIODIC_FRAME_UPDATE_ENABLE, 0); 1296 1356 } 1297 1357 1298 1358 /* put dsi link in ULPS */ ··· 1310 1374 /* disable ddi function */ 1311 1375 for_each_dsi_port(port, intel_dsi->ports) { 1312 1376 dsi_trans = dsi_port_to_transcoder(port); 1313 - tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans)); 1314 - tmp &= ~TRANS_DDI_FUNC_ENABLE; 1315 - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp); 1377 + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), 1378 + TRANS_DDI_FUNC_ENABLE, 0); 1316 1379 } 1317 1380 1318 1381 /* disable port sync mode if dual link */ 1319 1382 if (intel_dsi->dual_link) { 1320 1383 for_each_dsi_port(port, intel_dsi->ports) { 1321 1384 dsi_trans = dsi_port_to_transcoder(port); 1322 - tmp = intel_de_read(dev_priv, 1323 - TRANS_DDI_FUNC_CTL2(dsi_trans)); 1324 - tmp &= ~PORT_SYNC_MODE_ENABLE; 1325 - intel_de_write(dev_priv, 1326 - TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); 1385 + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans), 1386 + PORT_SYNC_MODE_ENABLE, 0); 1327 1387 } 1328 1388 } 1329 1389 } ··· 1328 1396 { 1329 1397 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1330 1398 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 1331 - u32 tmp; 1332 1399 enum port port; 1333 1400 1334 1401 gen11_dsi_ungate_clocks(encoder); 1335 1402 for_each_dsi_port(port, intel_dsi->ports) { 1336 - tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port)); 1337 - tmp &= ~DDI_BUF_CTL_ENABLE; 1338 - intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp); 1403 + intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); 1339 1404 1340 1405 if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) & 1341 1406 DDI_BUF_IS_IDLE), ··· 1349 1420 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1350 1421 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 1351 1422 enum port port; 1352 - u32 tmp; 1353 1423 1354 1424 for_each_dsi_port(port, intel_dsi->ports) { 1355 1425 intel_wakeref_t wakeref; ··· 1362 1434 } 1363 1435 1364 1436 /* set mode to DDI */ 1365 - for_each_dsi_port(port, intel_dsi->ports) { 1366 - tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port)); 1367 - tmp &= ~COMBO_PHY_MODE_DSI; 1368 - intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp); 1369 - } 1437 + for_each_dsi_port(port, intel_dsi->ports) 1438 + intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), 1439 + COMBO_PHY_MODE_DSI, 0); 1370 1440 } 1371 1441 1372 1442 static void gen11_dsi_disable(struct intel_atomic_state *state, ··· 1680 1754 goto out; 1681 1755 } 1682 1756 1683 - tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)); 1684 - ret = tmp & PIPECONF_ENABLE; 1757 + tmp = intel_de_read(dev_priv, TRANSCONF(dsi_trans)); 1758 + ret = tmp & TRANSCONF_ENABLE; 1685 1759 } 1686 1760 out: 1687 1761 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
+11 -73
drivers/gpu/drm/i915/display/intel_atomic_plane.c
··· 34 34 #include <drm/drm_atomic_helper.h> 35 35 #include <drm/drm_fourcc.h> 36 36 37 - #include "gt/intel_rps.h" 38 - 39 37 #include "i915_config.h" 40 38 #include "intel_atomic_plane.h" 41 39 #include "intel_cdclk.h" 40 + #include "intel_display_rps.h" 42 41 #include "intel_display_trace.h" 43 42 #include "intel_display_types.h" 44 43 #include "intel_fb.h" ··· 362 363 crtc_state->scaled_planes &= ~BIT(plane->id); 363 364 crtc_state->nv12_planes &= ~BIT(plane->id); 364 365 crtc_state->c8_planes &= ~BIT(plane->id); 366 + crtc_state->async_flip_planes &= ~BIT(plane->id); 365 367 crtc_state->data_rate[plane->id] = 0; 366 368 crtc_state->data_rate_y[plane->id] = 0; 367 369 crtc_state->rel_data_rate[plane->id] = 0; ··· 582 582 intel_plane_is_scaled(new_plane_state)))) 583 583 new_crtc_state->disable_lp_wm = true; 584 584 585 - if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) 585 + if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) { 586 586 new_crtc_state->do_async_flip = true; 587 + new_crtc_state->async_flip_planes |= BIT(plane->id); 588 + } 587 589 588 590 return 0; 589 591 } ··· 940 938 return 0; 941 939 } 942 940 943 - struct wait_rps_boost { 944 - struct wait_queue_entry wait; 945 - 946 - struct drm_crtc *crtc; 947 - struct i915_request *request; 948 - }; 949 - 950 - static int do_rps_boost(struct wait_queue_entry *_wait, 951 - unsigned mode, int sync, void *key) 952 - { 953 - struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 954 - struct i915_request *rq = wait->request; 955 - 956 - /* 957 - * If we missed the vblank, but the request is already running it 958 - * is reasonable to assume that it will complete before the next 959 - * vblank without our intervention, so leave RPS alone. 960 - */ 961 - if (!i915_request_started(rq)) 962 - intel_rps_boost(rq); 963 - i915_request_put(rq); 964 - 965 - drm_crtc_vblank_put(wait->crtc); 966 - 967 - list_del(&wait->wait.entry); 968 - kfree(wait); 969 - return 1; 970 - } 971 - 972 - static void add_rps_boost_after_vblank(struct drm_crtc *crtc, 973 - struct dma_fence *fence) 974 - { 975 - struct wait_rps_boost *wait; 976 - 977 - if (!dma_fence_is_i915(fence)) 978 - return; 979 - 980 - if (DISPLAY_VER(to_i915(crtc->dev)) < 6) 981 - return; 982 - 983 - if (drm_crtc_vblank_get(crtc)) 984 - return; 985 - 986 - wait = kmalloc(sizeof(*wait), GFP_KERNEL); 987 - if (!wait) { 988 - drm_crtc_vblank_put(crtc); 989 - return; 990 - } 991 - 992 - wait->request = to_request(dma_fence_get(fence)); 993 - wait->crtc = crtc; 994 - 995 - wait->wait.func = do_rps_boost; 996 - wait->wait.flags = 0; 997 - 998 - add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 999 - } 1000 - 1001 941 /** 1002 942 * intel_prepare_plane_fb - Prepare fb for usage on plane 1003 943 * @_plane: drm plane to prepare for ··· 1030 1086 dma_resv_iter_begin(&cursor, obj->base.resv, 1031 1087 DMA_RESV_USAGE_WRITE); 1032 1088 dma_resv_for_each_fence_unlocked(&cursor, fence) { 1033 - add_rps_boost_after_vblank(new_plane_state->hw.crtc, 1034 - fence); 1089 + intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, 1090 + fence); 1035 1091 } 1036 1092 dma_resv_iter_end(&cursor); 1037 1093 } else { 1038 - add_rps_boost_after_vblank(new_plane_state->hw.crtc, 1039 - new_plane_state->uapi.fence); 1094 + intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, 1095 + new_plane_state->uapi.fence); 1040 1096 } 1041 1097 1042 1098 /* ··· 1047 1103 * that are not quite steady state without resorting to forcing 1048 1104 * maximum clocks following a vblank miss (see do_rps_boost()). 1049 1105 */ 1050 - if (!state->rps_interactive) { 1051 - intel_rps_mark_interactive(&to_gt(dev_priv)->rps, true); 1052 - state->rps_interactive = true; 1053 - } 1106 + intel_display_rps_mark_interactive(dev_priv, state, true); 1054 1107 1055 1108 return 0; 1056 1109 ··· 1078 1137 if (!obj) 1079 1138 return; 1080 1139 1081 - if (state->rps_interactive) { 1082 - intel_rps_mark_interactive(&to_gt(dev_priv)->rps, false); 1083 - state->rps_interactive = false; 1084 - } 1140 + intel_display_rps_mark_interactive(dev_priv, state, false); 1085 1141 1086 1142 /* Should only be called after a successful intel_prepare_plane_fb()! */ 1087 1143 intel_plane_unpin_fb(old_plane_state);
+42 -44
drivers/gpu/drm/i915/display/intel_audio.c
··· 581 581 const struct intel_crtc_state *crtc_state) 582 582 { 583 583 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 584 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 585 - enum pipe pipe = crtc->pipe; 584 + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 586 585 unsigned int hblank_early_prog, samples_room; 587 586 unsigned int val; 588 587 ··· 591 592 val = intel_de_read(i915, AUD_CONFIG_BE); 592 593 593 594 if (DISPLAY_VER(i915) == 11) 594 - val |= HBLANK_EARLY_ENABLE_ICL(pipe); 595 + val |= HBLANK_EARLY_ENABLE_ICL(cpu_transcoder); 595 596 else if (DISPLAY_VER(i915) >= 12) 596 - val |= HBLANK_EARLY_ENABLE_TGL(pipe); 597 + val |= HBLANK_EARLY_ENABLE_TGL(cpu_transcoder); 597 598 598 599 if (crtc_state->dsc.compression_enable && 599 600 crtc_state->hw.adjusted_mode.hdisplay >= 3840 && 600 601 crtc_state->hw.adjusted_mode.vdisplay >= 2160) { 601 602 /* Get hblank early enable value required */ 602 - val &= ~HBLANK_START_COUNT_MASK(pipe); 603 + val &= ~HBLANK_START_COUNT_MASK(cpu_transcoder); 603 604 hblank_early_prog = calc_hblank_early_prog(encoder, crtc_state); 604 605 if (hblank_early_prog < 32) 605 - val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32); 606 + val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_32); 606 607 else if (hblank_early_prog < 64) 607 - val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64); 608 + val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_64); 608 609 else if (hblank_early_prog < 96) 609 - val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96); 610 + val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_96); 610 611 else 611 - val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128); 612 + val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_128); 612 613 613 614 /* Get samples room value required */ 614 - val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe); 615 + val &= ~NUMBER_SAMPLES_PER_LINE_MASK(cpu_transcoder); 615 616 samples_room = calc_samples_room(crtc_state); 616 617 if (samples_room < 3) 617 - val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room); 618 + val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, samples_room); 618 619 else /* Program 0 i.e "All Samples available in buffer" */ 619 - val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0); 620 + val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, 0x0); 620 621 } 621 622 622 623 intel_de_write(i915, AUD_CONFIG_BE, val); ··· 811 812 struct i915_audio_component *acomp = i915->display.audio.component; 812 813 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 813 814 struct intel_connector *connector = to_intel_connector(conn_state->connector); 815 + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 814 816 struct intel_audio_state *audio_state; 815 817 enum port port = encoder->port; 816 - enum pipe pipe = crtc->pipe; 817 818 818 819 if (!crtc_state->has_audio) 819 820 return; ··· 831 832 832 833 mutex_lock(&i915->display.audio.mutex); 833 834 834 - audio_state = &i915->display.audio.state[pipe]; 835 + audio_state = &i915->display.audio.state[cpu_transcoder]; 835 836 836 837 audio_state->encoder = encoder; 837 838 BUILD_BUG_ON(sizeof(audio_state->eld) != sizeof(crtc_state->eld)); ··· 841 842 842 843 if (acomp && acomp->base.audio_ops && 843 844 acomp->base.audio_ops->pin_eld_notify) { 844 - /* audio drivers expect pipe = -1 to indicate Non-MST cases */ 845 + /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */ 845 846 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) 846 - pipe = -1; 847 + cpu_transcoder = -1; 847 848 acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr, 848 - (int)port, (int)pipe); 849 + (int)port, (int)cpu_transcoder); 849 850 } 850 851 851 - intel_lpe_audio_notify(i915, pipe, port, crtc_state->eld, 852 + intel_lpe_audio_notify(i915, cpu_transcoder, port, crtc_state->eld, 852 853 crtc_state->port_clock, 853 854 intel_crtc_has_dp_encoder(crtc_state)); 854 855 } ··· 870 871 struct i915_audio_component *acomp = i915->display.audio.component; 871 872 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 872 873 struct intel_connector *connector = to_intel_connector(old_conn_state->connector); 874 + enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 873 875 struct intel_audio_state *audio_state; 874 876 enum port port = encoder->port; 875 - enum pipe pipe = crtc->pipe; 876 877 877 878 if (!old_crtc_state->has_audio) 878 879 return; ··· 889 890 890 891 mutex_lock(&i915->display.audio.mutex); 891 892 892 - audio_state = &i915->display.audio.state[pipe]; 893 + audio_state = &i915->display.audio.state[cpu_transcoder]; 893 894 894 895 audio_state->encoder = NULL; 895 896 memset(audio_state->eld, 0, sizeof(audio_state->eld)); ··· 898 899 899 900 if (acomp && acomp->base.audio_ops && 900 901 acomp->base.audio_ops->pin_eld_notify) { 901 - /* audio drivers expect pipe = -1 to indicate Non-MST cases */ 902 + /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */ 902 903 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) 903 - pipe = -1; 904 + cpu_transcoder = -1; 904 905 acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr, 905 - (int)port, (int)pipe); 906 + (int)port, (int)cpu_transcoder); 906 907 } 907 908 908 - intel_lpe_audio_notify(i915, pipe, port, NULL, 0, false); 909 + intel_lpe_audio_notify(i915, cpu_transcoder, port, NULL, 0, false); 909 910 } 910 911 911 912 static void intel_acomp_get_config(struct intel_encoder *encoder, 912 913 struct intel_crtc_state *crtc_state) 913 914 { 914 915 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 915 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 916 + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 916 917 struct intel_audio_state *audio_state; 917 - enum pipe pipe = crtc->pipe; 918 918 919 919 mutex_lock(&i915->display.audio.mutex); 920 920 921 - audio_state = &i915->display.audio.state[pipe]; 921 + audio_state = &i915->display.audio.state[cpu_transcoder]; 922 922 923 923 if (audio_state->encoder) 924 924 memcpy(crtc_state->eld, audio_state->eld, sizeof(audio_state->eld)); ··· 1145 1147 } 1146 1148 1147 1149 /* 1148 - * get the intel audio state according to the parameter port and pipe 1149 - * MST & (pipe >= 0): return the audio.state[pipe].encoder], 1150 + * get the intel audio state according to the parameter port and cpu_transcoder 1151 + * MST & (cpu_transcoder >= 0): return the audio.state[cpu_transcoder].encoder], 1150 1152 * when port is matched 1151 - * MST & (pipe < 0): this is invalid 1152 - * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry) 1153 + * MST & (cpu_transcoder < 0): this is invalid 1154 + * Non-MST & (cpu_transcoder >= 0): only cpu_transcoder = 0 (the first device entry) 1153 1155 * will get the right intel_encoder with port matched 1154 - * Non-MST & (pipe < 0): get the right intel_encoder with port matched 1156 + * Non-MST & (cpu_transcoder < 0): get the right intel_encoder with port matched 1155 1157 */ 1156 1158 static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915, 1157 - int port, int pipe) 1159 + int port, int cpu_transcoder) 1158 1160 { 1159 1161 /* MST */ 1160 - if (pipe >= 0) { 1162 + if (cpu_transcoder >= 0) { 1161 1163 struct intel_audio_state *audio_state; 1162 1164 struct intel_encoder *encoder; 1163 1165 1164 1166 if (drm_WARN_ON(&i915->drm, 1165 - pipe >= ARRAY_SIZE(i915->display.audio.state))) 1167 + cpu_transcoder >= ARRAY_SIZE(i915->display.audio.state))) 1166 1168 return NULL; 1167 1169 1168 - audio_state = &i915->display.audio.state[pipe]; 1170 + audio_state = &i915->display.audio.state[cpu_transcoder]; 1169 1171 encoder = audio_state->encoder; 1170 1172 1171 1173 if (encoder && encoder->port == port && ··· 1174 1176 } 1175 1177 1176 1178 /* Non-MST */ 1177 - if (pipe > 0) 1179 + if (cpu_transcoder > 0) 1178 1180 return NULL; 1179 1181 1180 - for_each_pipe(i915, pipe) { 1182 + for_each_cpu_transcoder(i915, cpu_transcoder) { 1181 1183 struct intel_audio_state *audio_state; 1182 1184 struct intel_encoder *encoder; 1183 1185 1184 - audio_state = &i915->display.audio.state[pipe]; 1186 + audio_state = &i915->display.audio.state[cpu_transcoder]; 1185 1187 encoder = audio_state->encoder; 1186 1188 1187 1189 if (encoder && encoder->port == port && ··· 1193 1195 } 1194 1196 1195 1197 static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, 1196 - int pipe, int rate) 1198 + int cpu_transcoder, int rate) 1197 1199 { 1198 1200 struct drm_i915_private *i915 = kdev_to_i915(kdev); 1199 1201 struct i915_audio_component *acomp = i915->display.audio.component; ··· 1209 1211 cookie = i915_audio_component_get_power(kdev); 1210 1212 mutex_lock(&i915->display.audio.mutex); 1211 1213 1212 - audio_state = find_audio_state(i915, port, pipe); 1214 + audio_state = find_audio_state(i915, port, cpu_transcoder); 1213 1215 if (!audio_state) { 1214 1216 drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port)); 1215 1217 err = -ENODEV; ··· 1221 1223 /* FIXME stop using the legacy crtc pointer */ 1222 1224 crtc = to_intel_crtc(encoder->base.crtc); 1223 1225 1224 - /* port must be valid now, otherwise the pipe will be invalid */ 1226 + /* port must be valid now, otherwise the cpu_transcoder will be invalid */ 1225 1227 acomp->aud_sample_rate[port] = rate; 1226 1228 1227 1229 /* FIXME get rid of the crtc->config stuff */ ··· 1234 1236 } 1235 1237 1236 1238 static int i915_audio_component_get_eld(struct device *kdev, int port, 1237 - int pipe, bool *enabled, 1239 + int cpu_transcoder, bool *enabled, 1238 1240 unsigned char *buf, int max_bytes) 1239 1241 { 1240 1242 struct drm_i915_private *i915 = kdev_to_i915(kdev); ··· 1243 1245 1244 1246 mutex_lock(&i915->display.audio.mutex); 1245 1247 1246 - audio_state = find_audio_state(i915, port, pipe); 1248 + audio_state = find_audio_state(i915, port, cpu_transcoder); 1247 1249 if (!audio_state) { 1248 1250 drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port)); 1249 1251 mutex_unlock(&i915->display.audio.mutex);
+94 -67
drivers/gpu/drm/i915/display/intel_backlight.c
··· 105 105 struct drm_i915_private *i915 = to_i915(connector->base.dev); 106 106 struct intel_panel *panel = &connector->panel; 107 107 108 - drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val); 108 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight PWM = %d\n", 109 + connector->base.base.id, connector->base.name, val); 109 110 panel->backlight.pwm_funcs->set(conn_state, val); 110 111 } 111 112 ··· 284 283 struct drm_i915_private *i915 = to_i915(connector->base.dev); 285 284 struct intel_panel *panel = &connector->panel; 286 285 287 - drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level); 286 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight level = %d\n", 287 + connector->base.base.id, connector->base.name, level); 288 288 289 289 panel->backlight.funcs->set(conn_state, level); 290 290 } ··· 347 345 */ 348 346 tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2); 349 347 if (tmp & BLM_PWM_ENABLE) { 350 - drm_dbg_kms(&i915->drm, "cpu backlight was enabled, disabling\n"); 348 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight was enabled, disabling\n", 349 + connector->base.base.id, connector->base.name); 351 350 intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); 352 351 } 353 352 354 - tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1); 355 - intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); 353 + intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0); 356 354 } 357 355 358 356 static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) 359 357 { 360 358 struct intel_connector *connector = to_intel_connector(old_conn_state->connector); 361 359 struct drm_i915_private *i915 = to_i915(connector->base.dev); 362 - u32 tmp; 363 360 364 361 intel_backlight_set_pwm_level(old_conn_state, val); 365 362 366 - tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2); 367 - intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); 363 + intel_de_rmw(i915, BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE, 0); 368 364 369 - tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1); 370 - intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); 365 + intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0); 371 366 } 372 367 373 368 static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) ··· 375 376 static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) 376 377 { 377 378 struct drm_i915_private *i915 = to_i915(old_conn_state->connector->dev); 378 - u32 tmp; 379 379 380 380 intel_backlight_set_pwm_level(old_conn_state, val); 381 381 382 - tmp = intel_de_read(i915, BLC_PWM_CTL2); 383 - intel_de_write(i915, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE); 382 + intel_de_rmw(i915, BLC_PWM_CTL2, BLM_PWM_ENABLE, 0); 384 383 } 385 384 386 385 static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) ··· 386 389 struct intel_connector *connector = to_intel_connector(old_conn_state->connector); 387 390 struct drm_i915_private *i915 = to_i915(connector->base.dev); 388 391 enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe; 389 - u32 tmp; 390 392 391 393 intel_backlight_set_pwm_level(old_conn_state, val); 392 394 393 - tmp = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe)); 394 - intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE); 395 + intel_de_rmw(i915, VLV_BLC_PWM_CTL2(pipe), BLM_PWM_ENABLE, 0); 395 396 } 396 397 397 398 static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) ··· 397 402 struct intel_connector *connector = to_intel_connector(old_conn_state->connector); 398 403 struct drm_i915_private *i915 = to_i915(connector->base.dev); 399 404 struct intel_panel *panel = &connector->panel; 400 - u32 tmp; 401 405 402 406 intel_backlight_set_pwm_level(old_conn_state, val); 403 407 404 - tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); 405 - intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), 406 - tmp & ~BXT_BLC_PWM_ENABLE); 408 + intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), 409 + BXT_BLC_PWM_ENABLE, 0); 407 410 408 - if (panel->backlight.controller == 1) { 409 - val = intel_de_read(i915, UTIL_PIN_CTL); 410 - val &= ~UTIL_PIN_ENABLE; 411 - intel_de_write(i915, UTIL_PIN_CTL, val); 412 - } 411 + if (panel->backlight.controller == 1) 412 + intel_de_rmw(i915, UTIL_PIN_CTL, UTIL_PIN_ENABLE, 0); 413 413 } 414 414 415 415 static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) ··· 412 422 struct intel_connector *connector = to_intel_connector(old_conn_state->connector); 413 423 struct drm_i915_private *i915 = to_i915(connector->base.dev); 414 424 struct intel_panel *panel = &connector->panel; 415 - u32 tmp; 416 425 417 426 intel_backlight_set_pwm_level(old_conn_state, val); 418 427 419 - tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); 420 - intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), 421 - tmp & ~BXT_BLC_PWM_ENABLE); 428 + intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), 429 + BXT_BLC_PWM_ENABLE, 0); 422 430 } 423 431 424 432 static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) ··· 446 458 * another client is not activated. 447 459 */ 448 460 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) { 449 - drm_dbg_kms(&i915->drm, "Skipping backlight disable on vga switch\n"); 461 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Skipping backlight disable on vga switch\n", 462 + connector->base.base.id, connector->base.name); 450 463 return; 451 464 } 452 465 ··· 467 478 struct intel_connector *connector = to_intel_connector(conn_state->connector); 468 479 struct drm_i915_private *i915 = to_i915(connector->base.dev); 469 480 struct intel_panel *panel = &connector->panel; 470 - u32 pch_ctl1, pch_ctl2, schicken; 481 + u32 pch_ctl1, pch_ctl2; 471 482 472 483 pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1); 473 484 if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { 474 - drm_dbg_kms(&i915->drm, "pch backlight already enabled\n"); 485 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n", 486 + connector->base.base.id, connector->base.name); 475 487 pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; 476 488 intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1); 477 489 } 478 490 479 - if (HAS_PCH_LPT(i915)) { 480 - schicken = intel_de_read(i915, SOUTH_CHICKEN2); 481 - if (panel->backlight.alternate_pwm_increment) 482 - schicken |= LPT_PWM_GRANULARITY; 483 - else 484 - schicken &= ~LPT_PWM_GRANULARITY; 485 - intel_de_write(i915, SOUTH_CHICKEN2, schicken); 486 - } else { 487 - schicken = intel_de_read(i915, SOUTH_CHICKEN1); 488 - if (panel->backlight.alternate_pwm_increment) 489 - schicken |= SPT_PWM_GRANULARITY; 490 - else 491 - schicken &= ~SPT_PWM_GRANULARITY; 492 - intel_de_write(i915, SOUTH_CHICKEN1, schicken); 493 - } 491 + if (HAS_PCH_LPT(i915)) 492 + intel_de_rmw(i915, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY, 493 + panel->backlight.alternate_pwm_increment ? 494 + LPT_PWM_GRANULARITY : 0); 495 + else 496 + intel_de_rmw(i915, SOUTH_CHICKEN1, SPT_PWM_GRANULARITY, 497 + panel->backlight.alternate_pwm_increment ? 498 + SPT_PWM_GRANULARITY : 0); 494 499 495 500 pch_ctl2 = panel->backlight.pwm_level_max << 16; 496 501 intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2); ··· 516 533 517 534 cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2); 518 535 if (cpu_ctl2 & BLM_PWM_ENABLE) { 519 - drm_dbg_kms(&i915->drm, "cpu backlight already enabled\n"); 536 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight already enabled\n", 537 + connector->base.base.id, connector->base.name); 520 538 cpu_ctl2 &= ~BLM_PWM_ENABLE; 521 539 intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2); 522 540 } 523 541 524 542 pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1); 525 543 if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { 526 - drm_dbg_kms(&i915->drm, "pch backlight already enabled\n"); 544 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n", 545 + connector->base.base.id, connector->base.name); 527 546 pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; 528 547 intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1); 529 548 } ··· 563 578 564 579 ctl = intel_de_read(i915, BLC_PWM_CTL); 565 580 if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { 566 - drm_dbg_kms(&i915->drm, "backlight already enabled\n"); 581 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", 582 + connector->base.base.id, connector->base.name); 567 583 intel_de_write(i915, BLC_PWM_CTL, 0); 568 584 } 569 585 ··· 604 618 605 619 ctl2 = intel_de_read(i915, BLC_PWM_CTL2); 606 620 if (ctl2 & BLM_PWM_ENABLE) { 607 - drm_dbg_kms(&i915->drm, "backlight already enabled\n"); 621 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", 622 + connector->base.base.id, connector->base.name); 608 623 ctl2 &= ~BLM_PWM_ENABLE; 609 624 intel_de_write(i915, BLC_PWM_CTL2, ctl2); 610 625 } ··· 640 653 641 654 ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe)); 642 655 if (ctl2 & BLM_PWM_ENABLE) { 643 - drm_dbg_kms(&i915->drm, "backlight already enabled\n"); 656 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", 657 + connector->base.base.id, connector->base.name); 644 658 ctl2 &= ~BLM_PWM_ENABLE; 645 659 intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2); 646 660 } ··· 673 685 if (panel->backlight.controller == 1) { 674 686 val = intel_de_read(i915, UTIL_PIN_CTL); 675 687 if (val & UTIL_PIN_ENABLE) { 676 - drm_dbg_kms(&i915->drm, "util pin already enabled\n"); 688 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] utility pin already enabled\n", 689 + connector->base.base.id, connector->base.name); 677 690 val &= ~UTIL_PIN_ENABLE; 678 691 intel_de_write(i915, UTIL_PIN_CTL, val); 679 692 } ··· 688 699 689 700 pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); 690 701 if (pwm_ctl & BXT_BLC_PWM_ENABLE) { 691 - drm_dbg_kms(&i915->drm, "backlight already enabled\n"); 702 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", 703 + connector->base.base.id, connector->base.name); 692 704 pwm_ctl &= ~BXT_BLC_PWM_ENABLE; 693 705 intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), 694 706 pwm_ctl); ··· 1260 1270 cpu_ctl2 & ~BLM_PWM_ENABLE); 1261 1271 } 1262 1272 1273 + drm_dbg_kms(&i915->drm, 1274 + "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n", 1275 + connector->base.base.id, connector->base.name); 1276 + 1263 1277 return 0; 1264 1278 } 1265 1279 ··· 1290 1296 cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2); 1291 1297 panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) && 1292 1298 (pch_ctl1 & BLM_PCH_PWM_ENABLE); 1299 + 1300 + drm_dbg_kms(&i915->drm, 1301 + "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n", 1302 + connector->base.base.id, connector->base.name); 1293 1303 1294 1304 return 0; 1295 1305 } ··· 1333 1335 1334 1336 panel->backlight.pwm_enabled = val != 0; 1335 1337 1338 + drm_dbg_kms(&i915->drm, 1339 + "[CONNECTOR:%d:%s] Using native PWM for backlight control\n", 1340 + connector->base.base.id, connector->base.name); 1341 + 1336 1342 return 0; 1337 1343 } 1338 1344 ··· 1366 1364 1367 1365 panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE; 1368 1366 1367 + drm_dbg_kms(&i915->drm, 1368 + "[CONNECTOR:%d:%s] Using native PWM for backlight control\n", 1369 + connector->base.base.id, connector->base.name); 1370 + 1369 1371 return 0; 1370 1372 } 1371 1373 ··· 1397 1391 panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); 1398 1392 1399 1393 panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE; 1394 + 1395 + drm_dbg_kms(&i915->drm, 1396 + "[CONNECTOR:%d:%s] Using native PWM for backlight control (on pipe %c)\n", 1397 + connector->base.base.id, connector->base.name, pipe_name(pipe)); 1400 1398 1401 1399 return 0; 1402 1400 } ··· 1437 1427 panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); 1438 1428 1439 1429 panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE; 1430 + 1431 + drm_dbg_kms(&i915->drm, 1432 + "[CONNECTOR:%d:%s] Using native PWM for backlight control (controller=%d)\n", 1433 + connector->base.base.id, connector->base.name, 1434 + panel->backlight.controller); 1440 1435 1441 1436 return 0; 1442 1437 } ··· 1483 1468 */ 1484 1469 panel->backlight.controller = connector->panel.vbt.backlight.controller; 1485 1470 if (!cnp_backlight_controller_is_valid(i915, panel->backlight.controller)) { 1486 - drm_dbg_kms(&i915->drm, "Invalid backlight controller %d, assuming 0\n", 1471 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Invalid backlight controller %d, assuming 0\n", 1472 + connector->base.base.id, connector->base.name, 1487 1473 panel->backlight.controller); 1488 1474 panel->backlight.controller = 0; 1489 1475 } ··· 1505 1489 panel->backlight.pwm_level_min = get_backlight_min_vbt(connector); 1506 1490 1507 1491 panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE; 1492 + 1493 + drm_dbg_kms(&i915->drm, 1494 + "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control (controller=%d)\n", 1495 + connector->base.base.id, connector->base.name, 1496 + panel->backlight.controller); 1508 1497 1509 1498 return 0; 1510 1499 } ··· 1532 1511 } 1533 1512 1534 1513 if (IS_ERR(panel->backlight.pwm)) { 1535 - drm_err(&i915->drm, "Failed to get the %s PWM chip\n", 1536 - desc); 1514 + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to get the %s PWM chip\n", 1515 + connector->base.base.id, connector->base.name, desc); 1537 1516 panel->backlight.pwm = NULL; 1538 1517 return -ENODEV; 1539 1518 } ··· 1550 1529 level = intel_backlight_invert_pwm_level(connector, level); 1551 1530 panel->backlight.pwm_enabled = true; 1552 1531 1553 - drm_dbg_kms(&i915->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n", 1532 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PWM already enabled at freq %ld, VBT freq %d, level %d\n", 1533 + connector->base.base.id, connector->base.name, 1554 1534 NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period, 1555 1535 get_vbt_pwm_freq(connector), level); 1556 1536 } else { ··· 1560 1538 NSEC_PER_SEC / get_vbt_pwm_freq(connector); 1561 1539 } 1562 1540 1563 - drm_info(&i915->drm, "Using %s PWM for LCD backlight control\n", 1564 - desc); 1541 + drm_dbg_kms(&i915->drm, 1542 + "[CONNECTOR:%d:%s] Using %s PWM for backlight control\n", 1543 + connector->base.base.id, connector->base.name, desc); 1544 + 1565 1545 return 0; 1566 1546 } 1567 1547 ··· 1606 1582 static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe) 1607 1583 { 1608 1584 struct intel_panel *panel = &connector->panel; 1609 - int ret = panel->backlight.pwm_funcs->setup(connector, pipe); 1585 + int ret; 1610 1586 1587 + ret = panel->backlight.pwm_funcs->setup(connector, pipe); 1611 1588 if (ret < 0) 1612 1589 return ret; 1613 1590 ··· 1648 1623 if (!connector->panel.vbt.backlight.present) { 1649 1624 if (intel_has_quirk(i915, QUIRK_BACKLIGHT_PRESENT)) { 1650 1625 drm_dbg_kms(&i915->drm, 1651 - "no backlight present per VBT, but present per quirk\n"); 1626 + "[CONNECTOR:%d:%s] no backlight present per VBT, but present per quirk\n", 1627 + connector->base.base.id, connector->base.name); 1652 1628 } else { 1653 1629 drm_dbg_kms(&i915->drm, 1654 - "no backlight present per VBT\n"); 1630 + "[CONNECTOR:%d:%s] no backlight present per VBT\n", 1631 + connector->base.base.id, connector->base.name); 1655 1632 return 0; 1656 1633 } 1657 1634 } ··· 1669 1642 1670 1643 if (ret) { 1671 1644 drm_dbg_kms(&i915->drm, 1672 - "failed to setup backlight for connector %s\n", 1673 - connector->base.name); 1645 + "[CONNECTOR:%d:%s] failed to setup backlight\n", 1646 + connector->base.base.id, connector->base.name); 1674 1647 return ret; 1675 1648 } 1676 1649 1677 1650 panel->backlight.present = true; 1678 1651 1679 1652 drm_dbg_kms(&i915->drm, 1680 - "Connector %s backlight initialized, %s, brightness %u/%u\n", 1681 - connector->base.name, 1653 + "[CONNECTOR:%d:%s] backlight initialized, %s, brightness %u/%u\n", 1654 + connector->base.base.id, connector->base.name, 1682 1655 str_enabled_disabled(panel->backlight.enabled), 1683 1656 panel->backlight.level, panel->backlight.max); 1684 1657
+69 -151
drivers/gpu/drm/i915/display/intel_bios.c
··· 1084 1084 panel->vbt.backlight.min_brightness = entry->min_brightness; 1085 1085 } 1086 1086 1087 + if (i915->display.vbt.version >= 239) 1088 + panel->vbt.backlight.hdr_dpcd_refresh_timeout = 1089 + DIV_ROUND_UP(backlight_data->hdr_dpcd_refresh_timeout[panel_type], 100); 1090 + else 1091 + panel->vbt.backlight.hdr_dpcd_refresh_timeout = 30; 1092 + 1087 1093 drm_dbg_kms(&i915->drm, 1088 1094 "VBT backlight PWM modulation frequency %u Hz, " 1089 1095 "active %s, min brightness %u, level %u, controller %u\n", ··· 1208 1202 static void 1209 1203 parse_sdvo_device_mapping(struct drm_i915_private *i915) 1210 1204 { 1211 - struct sdvo_device_mapping *mapping; 1212 1205 const struct intel_bios_encoder_data *devdata; 1213 - const struct child_device_config *child; 1214 1206 int count = 0; 1215 1207 1216 1208 /* ··· 1221 1217 } 1222 1218 1223 1219 list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { 1224 - child = &devdata->child; 1220 + const struct child_device_config *child = &devdata->child; 1221 + struct sdvo_device_mapping *mapping; 1225 1222 1226 1223 if (child->slave_addr != SLAVE_ADDR1 && 1227 1224 child->slave_addr != SLAVE_ADDR2) { ··· 2080 2075 { 2081 2076 const struct bdb_compression_parameters *params; 2082 2077 struct intel_bios_encoder_data *devdata; 2083 - const struct child_device_config *child; 2084 2078 u16 block_size; 2085 2079 int index; 2086 2080 ··· 2104 2100 } 2105 2101 2106 2102 list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { 2107 - child = &devdata->child; 2103 + const struct child_device_config *child = &devdata->child; 2108 2104 2109 2105 if (!child->compression_enable) 2110 2106 continue; ··· 2230 2226 2231 2227 static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin) 2232 2228 { 2233 - const struct intel_bios_encoder_data *devdata; 2234 2229 enum port port; 2235 2230 2236 2231 if (!ddc_pin) 2237 2232 return PORT_NONE; 2238 2233 2239 2234 for_each_port(port) { 2240 - devdata = i915->display.vbt.ports[port]; 2235 + const struct intel_bios_encoder_data *devdata = 2236 + i915->display.vbt.ports[port]; 2241 2237 2242 2238 if (devdata && ddc_pin == devdata->child.ddc_pin) 2243 2239 return port; ··· 2296 2292 2297 2293 static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch) 2298 2294 { 2299 - const struct intel_bios_encoder_data *devdata; 2300 2295 enum port port; 2301 2296 2302 2297 if (!aux_ch) 2303 2298 return PORT_NONE; 2304 2299 2305 2300 for_each_port(port) { 2306 - devdata = i915->display.vbt.ports[port]; 2301 + const struct intel_bios_encoder_data *devdata = 2302 + i915->display.vbt.ports[port]; 2307 2303 2308 2304 if (devdata && aux_ch == devdata->child.aux_channel) 2309 2305 return port; ··· 2526 2522 } 2527 2523 } 2528 2524 2529 - static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata) 2525 + int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata) 2530 2526 { 2531 2527 if (!devdata || devdata->i915->display.vbt.version < 216) 2532 2528 return 0; ··· 2537 2533 return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate); 2538 2534 } 2539 2535 2540 - static int _intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata) 2536 + int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata) 2541 2537 { 2542 2538 if (!devdata || devdata->i915->display.vbt.version < 244) 2543 2539 return 0; ··· 2591 2587 return devdata->child.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; 2592 2588 } 2593 2589 2594 - static bool 2590 + bool 2595 2591 intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata) 2596 2592 { 2597 2593 return intel_bios_encoder_supports_dp(devdata) && ··· 2604 2600 return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT; 2605 2601 } 2606 2602 2607 - static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) 2603 + bool 2604 + intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata) 2605 + { 2606 + return devdata && HAS_LSPCON(devdata->i915) && devdata->child.lspcon; 2607 + } 2608 + 2609 + /* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */ 2610 + int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) 2608 2611 { 2609 2612 if (!devdata || devdata->i915->display.vbt.version < 158) 2610 2613 return -1; ··· 2619 2608 return devdata->child.hdmi_level_shifter_value; 2620 2609 } 2621 2610 2622 - static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata) 2611 + int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata) 2623 2612 { 2624 2613 if (!devdata || devdata->i915->display.vbt.version < 204) 2625 2614 return 0; ··· 2677 2666 drm_dbg_kms(&i915->drm, 2678 2667 "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n", 2679 2668 port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi, 2680 - HAS_LSPCON(i915) && child->lspcon, 2669 + intel_bios_encoder_is_lspcon(devdata), 2681 2670 supports_typec_usb, supports_tbt, 2682 2671 devdata->dsc != NULL); 2683 2672 2684 - hdmi_level_shift = _intel_bios_hdmi_level_shift(devdata); 2673 + hdmi_level_shift = intel_bios_hdmi_level_shift(devdata); 2685 2674 if (hdmi_level_shift >= 0) { 2686 2675 drm_dbg_kms(&i915->drm, 2687 2676 "Port %c VBT HDMI level shift: %d\n", 2688 2677 port_name(port), hdmi_level_shift); 2689 2678 } 2690 2679 2691 - max_tmds_clock = _intel_bios_max_tmds_clock(devdata); 2680 + max_tmds_clock = intel_bios_hdmi_max_tmds_clock(devdata); 2692 2681 if (max_tmds_clock) 2693 2682 drm_dbg_kms(&i915->drm, 2694 2683 "Port %c VBT HDMI max TMDS clock: %d kHz\n", 2695 2684 port_name(port), max_tmds_clock); 2696 2685 2697 2686 /* I_boost config for SKL and above */ 2698 - dp_boost_level = intel_bios_encoder_dp_boost_level(devdata); 2687 + dp_boost_level = intel_bios_dp_boost_level(devdata); 2699 2688 if (dp_boost_level) 2700 2689 drm_dbg_kms(&i915->drm, 2701 2690 "Port %c VBT (e)DP boost level: %d\n", 2702 2691 port_name(port), dp_boost_level); 2703 2692 2704 - hdmi_boost_level = intel_bios_encoder_hdmi_boost_level(devdata); 2693 + hdmi_boost_level = intel_bios_hdmi_boost_level(devdata); 2705 2694 if (hdmi_boost_level) 2706 2695 drm_dbg_kms(&i915->drm, 2707 2696 "Port %c VBT HDMI boost level: %d\n", 2708 2697 port_name(port), hdmi_boost_level); 2709 2698 2710 - dp_max_link_rate = _intel_bios_dp_max_link_rate(devdata); 2699 + dp_max_link_rate = intel_bios_dp_max_link_rate(devdata); 2711 2700 if (dp_max_link_rate) 2712 2701 drm_dbg_kms(&i915->drm, 2713 2702 "Port %c VBT DP max link rate: %d\n", ··· 2822 2811 expected_size = 37; 2823 2812 } else if (i915->display.vbt.version <= 215) { 2824 2813 expected_size = 38; 2825 - } else if (i915->display.vbt.version <= 237) { 2814 + } else if (i915->display.vbt.version <= 250) { 2826 2815 expected_size = 39; 2827 2816 } else { 2828 2817 expected_size = sizeof(*child); ··· 3317 3306 bool intel_bios_is_tv_present(struct drm_i915_private *i915) 3318 3307 { 3319 3308 const struct intel_bios_encoder_data *devdata; 3320 - const struct child_device_config *child; 3321 3309 3322 3310 if (!i915->display.vbt.int_tv_support) 3323 3311 return false; ··· 3325 3315 return true; 3326 3316 3327 3317 list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { 3328 - child = &devdata->child; 3318 + const struct child_device_config *child = &devdata->child; 3329 3319 3330 3320 /* 3331 3321 * If the device type is not TV, continue. ··· 3359 3349 bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) 3360 3350 { 3361 3351 const struct intel_bios_encoder_data *devdata; 3362 - const struct child_device_config *child; 3363 3352 3364 3353 if (list_empty(&i915->display.vbt.display_devices)) 3365 3354 return true; 3366 3355 3367 3356 list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { 3368 - child = &devdata->child; 3357 + const struct child_device_config *child = &devdata->child; 3369 3358 3370 3359 /* If the device type is not LFP, continue. 3371 3360 * We have to check both the new identifiers as well as the ··· 3406 3397 */ 3407 3398 bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) 3408 3399 { 3400 + const struct intel_bios_encoder_data *devdata; 3401 + 3409 3402 if (WARN_ON(!has_ddi_port_info(i915))) 3410 3403 return true; 3411 3404 3412 - return i915->display.vbt.ports[port]; 3413 - } 3405 + if (!is_port_valid(i915, port)) 3406 + return false; 3414 3407 3415 - /** 3416 - * intel_bios_is_port_edp - is the device in given port eDP 3417 - * @i915: i915 device instance 3418 - * @port: port to check 3419 - * 3420 - * Return true if the device in %port is eDP. 3421 - */ 3422 - bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port) 3423 - { 3424 - const struct intel_bios_encoder_data *devdata = 3425 - intel_bios_encoder_data_lookup(i915, port); 3408 + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { 3409 + const struct child_device_config *child = &devdata->child; 3426 3410 3427 - return devdata && intel_bios_encoder_supports_edp(devdata); 3411 + if (dvo_port_to_port(i915, child->dvo_port) == port) 3412 + return true; 3413 + } 3414 + 3415 + return false; 3428 3416 } 3429 3417 3430 3418 static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata) ··· 3463 3457 enum port *port) 3464 3458 { 3465 3459 const struct intel_bios_encoder_data *devdata; 3466 - const struct child_device_config *child; 3467 - u8 dvo_port; 3468 3460 3469 3461 list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { 3470 - child = &devdata->child; 3462 + const struct child_device_config *child = &devdata->child; 3463 + u8 dvo_port = child->dvo_port; 3471 3464 3472 3465 if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) 3473 3466 continue; 3474 - 3475 - dvo_port = child->dvo_port; 3476 3467 3477 3468 if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) { 3478 3469 drm_dbg_kms(&i915->drm, ··· 3557 3554 { 3558 3555 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3559 3556 const struct intel_bios_encoder_data *devdata; 3560 - const struct child_device_config *child; 3561 3557 3562 3558 list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { 3563 - child = &devdata->child; 3559 + const struct child_device_config *child = &devdata->child; 3564 3560 3565 3561 if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) 3566 3562 continue; ··· 3578 3576 return false; 3579 3577 } 3580 3578 3581 - /** 3582 - * intel_bios_is_port_hpd_inverted - is HPD inverted for %port 3583 - * @i915: i915 device instance 3584 - * @port: port to check 3585 - * 3586 - * Return true if HPD should be inverted for %port. 3587 - */ 3588 - bool 3589 - intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, 3590 - enum port port) 3579 + static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel) 3591 3580 { 3592 - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; 3593 - 3594 - if (drm_WARN_ON_ONCE(&i915->drm, 3595 - !IS_GEMINILAKE(i915) && !IS_BROXTON(i915))) 3596 - return false; 3597 - 3598 - return devdata && devdata->child.hpd_invert; 3599 - } 3600 - 3601 - /** 3602 - * intel_bios_is_lspcon_present - if LSPCON is attached on %port 3603 - * @i915: i915 device instance 3604 - * @port: port to check 3605 - * 3606 - * Return true if LSPCON is present on this port 3607 - */ 3608 - bool 3609 - intel_bios_is_lspcon_present(const struct drm_i915_private *i915, 3610 - enum port port) 3611 - { 3612 - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; 3613 - 3614 - return HAS_LSPCON(i915) && devdata && devdata->child.lspcon; 3615 - } 3616 - 3617 - /** 3618 - * intel_bios_is_lane_reversal_needed - if lane reversal needed on port 3619 - * @i915: i915 device instance 3620 - * @port: port to check 3621 - * 3622 - * Return true if port requires lane reversal 3623 - */ 3624 - bool 3625 - intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, 3626 - enum port port) 3627 - { 3628 - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; 3629 - 3630 - return devdata && devdata->child.lane_reversal; 3631 - } 3632 - 3633 - enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, 3634 - enum port port) 3635 - { 3636 - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; 3637 3581 enum aux_ch aux_ch; 3638 - 3639 - if (!devdata || !devdata->child.aux_channel) { 3640 - aux_ch = (enum aux_ch)port; 3641 - 3642 - drm_dbg_kms(&i915->drm, 3643 - "using AUX %c for port %c (platform default)\n", 3644 - aux_ch_name(aux_ch), port_name(port)); 3645 - return aux_ch; 3646 - } 3647 3582 3648 3583 /* 3649 3584 * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D ··· 3589 3650 * ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E 3590 3651 * map to DDI A,TC1,TC2,TC3,TC4 respectively. 3591 3652 */ 3592 - switch (devdata->child.aux_channel) { 3653 + switch (aux_channel) { 3593 3654 case DP_AUX_A: 3594 3655 aux_ch = AUX_CH_A; 3595 3656 break; ··· 3650 3711 aux_ch = AUX_CH_I; 3651 3712 break; 3652 3713 default: 3653 - MISSING_CASE(devdata->child.aux_channel); 3714 + MISSING_CASE(aux_channel); 3654 3715 aux_ch = AUX_CH_A; 3655 3716 break; 3656 3717 } 3657 3718 3658 - drm_dbg_kms(&i915->drm, "using AUX %c for port %c (VBT)\n", 3659 - aux_ch_name(aux_ch), port_name(port)); 3660 - 3661 3719 return aux_ch; 3662 3720 } 3663 3721 3664 - int intel_bios_max_tmds_clock(struct intel_encoder *encoder) 3722 + enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata) 3665 3723 { 3666 - struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3667 - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; 3724 + if (!devdata || !devdata->child.aux_channel) 3725 + return AUX_CH_NONE; 3668 3726 3669 - return _intel_bios_max_tmds_clock(devdata); 3727 + return map_aux_ch(devdata->i915, devdata->child.aux_channel); 3670 3728 } 3671 3729 3672 - /* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */ 3673 - int intel_bios_hdmi_level_shift(struct intel_encoder *encoder) 3674 - { 3675 - struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3676 - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; 3677 - 3678 - return _intel_bios_hdmi_level_shift(devdata); 3679 - } 3680 - 3681 - int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata) 3730 + int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata) 3682 3731 { 3683 3732 if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) 3684 3733 return 0; ··· 3674 3747 return translate_iboost(devdata->child.dp_iboost_level); 3675 3748 } 3676 3749 3677 - int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata) 3750 + int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata) 3678 3751 { 3679 3752 if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) 3680 3753 return 0; ··· 3682 3755 return translate_iboost(devdata->child.hdmi_iboost_level); 3683 3756 } 3684 3757 3685 - int intel_bios_dp_max_link_rate(struct intel_encoder *encoder) 3758 + int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata) 3686 3759 { 3687 - struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3688 - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; 3689 - 3690 - return _intel_bios_dp_max_link_rate(devdata); 3691 - } 3692 - 3693 - int intel_bios_dp_max_lane_count(struct intel_encoder *encoder) 3694 - { 3695 - struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3696 - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; 3697 - 3698 - return _intel_bios_dp_max_lane_count(devdata); 3699 - } 3700 - 3701 - int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder) 3702 - { 3703 - struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3704 - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; 3705 - 3706 3760 if (!devdata || !devdata->child.ddc_pin) 3707 3761 return 0; 3708 3762 3709 - return map_ddc_pin(i915, devdata->child.ddc_pin); 3763 + return map_ddc_pin(devdata->i915, devdata->child.ddc_pin); 3710 3764 } 3711 3765 3712 3766 bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata) ··· 3698 3790 bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata) 3699 3791 { 3700 3792 return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt; 3793 + } 3794 + 3795 + bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata) 3796 + { 3797 + return devdata && devdata->child.lane_reversal; 3798 + } 3799 + 3800 + bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata) 3801 + { 3802 + return devdata && devdata->child.hpd_invert; 3701 3803 } 3702 3804 3703 3805 const struct intel_bios_encoder_data *
+13 -14
drivers/gpu/drm/i915/display/intel_bios.h
··· 38 38 struct intel_crtc_state; 39 39 struct intel_encoder; 40 40 struct intel_panel; 41 + enum aux_ch; 41 42 enum port; 42 43 43 44 enum intel_backlight_type { ··· 249 248 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 250 249 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 251 250 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 252 - bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, 253 - enum port port); 254 - bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915, 255 - enum port port); 256 - bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, 257 - enum port port); 258 - enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port); 259 251 bool intel_bios_get_dsc_params(struct intel_encoder *encoder, 260 252 struct intel_crtc_state *crtc_state, 261 253 int dsc_max_bpc); 262 - int intel_bios_max_tmds_clock(struct intel_encoder *encoder); 263 - int intel_bios_hdmi_level_shift(struct intel_encoder *encoder); 264 - int intel_bios_dp_max_link_rate(struct intel_encoder *encoder); 265 - int intel_bios_dp_max_lane_count(struct intel_encoder *encoder); 266 - int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder); 267 254 bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port); 268 255 bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port); 269 256 ··· 261 272 bool intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata); 262 273 bool intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata); 263 274 bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata); 275 + bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata); 264 276 bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata); 265 277 bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata); 266 - int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata); 267 - int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata); 278 + bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata); 279 + bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata); 280 + bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata); 281 + enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata); 282 + int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata); 283 + int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata); 284 + int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata); 285 + int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata); 286 + int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata); 287 + int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata); 288 + int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata); 268 289 269 290 #endif /* _INTEL_BIOS_H_ */
+29 -20
drivers/gpu/drm/i915/display/intel_bw.c
··· 119 119 return 0; 120 120 } 121 121 122 + static u16 icl_qgv_points_mask(struct drm_i915_private *i915) 123 + { 124 + unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; 125 + unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; 126 + u16 qgv_points = 0, psf_points = 0; 127 + 128 + /* 129 + * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects 130 + * it with failure if we try masking any unadvertised points. 131 + * So need to operate only with those returned from PCode. 132 + */ 133 + if (num_qgv_points > 0) 134 + qgv_points = GENMASK(num_qgv_points - 1, 0); 135 + 136 + if (num_psf_gv_points > 0) 137 + psf_points = GENMASK(num_psf_gv_points - 1, 0); 138 + 139 + return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points); 140 + } 141 + 142 + static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask) 143 + { 144 + return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) & 145 + ICL_PCODE_REQ_QGV_PT_MASK); 146 + } 147 + 122 148 int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, 123 149 u32 points_mask) 124 150 { ··· 161 135 drm_err(&dev_priv->drm, "Failed to disable qgv points (%d) points: 0x%x\n", ret, points_mask); 162 136 return ret; 163 137 } 138 + 139 + dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ? 140 + I915_SAGV_ENABLED : I915_SAGV_DISABLED; 164 141 165 142 return 0; 166 143 } ··· 992 963 *need_cdclk_calc = true; 993 964 994 965 return 0; 995 - } 996 - 997 - static u16 icl_qgv_points_mask(struct drm_i915_private *i915) 998 - { 999 - unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; 1000 - unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; 1001 - u16 qgv_points = 0, psf_points = 0; 1002 - 1003 - /* 1004 - * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects 1005 - * it with failure if we try masking any unadvertised points. 1006 - * So need to operate only with those returned from PCode. 1007 - */ 1008 - if (num_qgv_points > 0) 1009 - qgv_points = GENMASK(num_qgv_points - 1, 0); 1010 - 1011 - if (num_psf_gv_points > 0) 1012 - psf_points = GENMASK(num_psf_gv_points - 1, 0); 1013 - 1014 - return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points); 1015 966 } 1016 967 1017 968 static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
+39 -2
drivers/gpu/drm/i915/display/intel_cdclk.c
··· 1329 1329 {} 1330 1330 }; 1331 1331 1332 + static const struct intel_cdclk_vals rplu_cdclk_table[] = { 1333 + { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 }, 1334 + { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, 1335 + { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, 1336 + { .refclk = 19200, .cdclk = 480000, .divider = 2, .ratio = 50 }, 1337 + { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, 1338 + { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, 1339 + 1340 + { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 }, 1341 + { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, 1342 + { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, 1343 + { .refclk = 24000, .cdclk = 480000, .divider = 2, .ratio = 40 }, 1344 + { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, 1345 + { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 }, 1346 + 1347 + { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 }, 1348 + { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, 1349 + { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, 1350 + { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25 }, 1351 + { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, 1352 + { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, 1353 + {} 1354 + }; 1355 + 1332 1356 static const struct intel_cdclk_vals dg2_cdclk_table[] = { 1333 1357 { .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 }, 1334 1358 { .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 }, ··· 1825 1801 return true; 1826 1802 } 1827 1803 1804 + static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv) 1805 + { 1806 + return ((IS_DG2(dev_priv) || IS_METEORLAKE(dev_priv)) && 1807 + dev_priv->display.cdclk.hw.vco > 0 && 1808 + HAS_CDCLK_SQUASH(dev_priv)); 1809 + } 1810 + 1828 1811 static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, 1829 1812 const struct intel_cdclk_config *cdclk_config, 1830 1813 enum pipe pipe) ··· 1846 1815 !cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) { 1847 1816 if (dev_priv->display.cdclk.hw.vco != vco) 1848 1817 adlp_cdclk_pll_crawl(dev_priv, vco); 1849 - } else if (DISPLAY_VER(dev_priv) >= 11) 1818 + } else if (DISPLAY_VER(dev_priv) >= 11) { 1819 + /* wa_15010685871: dg2, mtl */ 1820 + if (pll_enable_wa_needed(dev_priv)) 1821 + dg2_cdclk_squash_program(dev_priv, 0); 1822 + 1850 1823 icl_cdclk_pll_update(dev_priv, vco); 1851 - else 1824 + } else 1852 1825 bxt_cdclk_pll_update(dev_priv, vco); 1853 1826 1854 1827 waveform = cdclk_squash_waveform(dev_priv, cdclk); ··· 3388 3353 /* Wa_22011320316:adl-p[a0] */ 3389 3354 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 3390 3355 dev_priv->display.cdclk.table = adlp_a_step_cdclk_table; 3356 + else if (IS_ADLP_RPLU(dev_priv)) 3357 + dev_priv->display.cdclk.table = rplu_cdclk_table; 3391 3358 else 3392 3359 dev_priv->display.cdclk.table = adlp_cdclk_table; 3393 3360 } else if (IS_ROCKETLAKE(dev_priv)) {
+13 -5
drivers/gpu/drm/i915/display/intel_color.c
··· 257 257 if (DISPLAY_VER(i915) >= 11) 258 258 return false; 259 259 260 - /* pre-hsw have PIPECONF_COLOR_RANGE_SELECT */ 260 + /* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */ 261 261 if (DISPLAY_VER(i915) < 7 || IS_IVYBRIDGE(i915)) 262 262 return false; 263 263 ··· 624 624 625 625 static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state) 626 626 { 627 - /* update PIPECONF GAMMA_MODE */ 627 + /* update TRANSCONF GAMMA_MODE */ 628 628 i9xx_set_pipeconf(crtc_state); 629 629 } 630 630 ··· 633 633 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 634 634 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 635 635 636 - /* update PIPECONF GAMMA_MODE */ 636 + /* update TRANSCONF GAMMA_MODE */ 637 637 ilk_set_pipeconf(crtc_state); 638 638 639 639 intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), ··· 1256 1256 break; 1257 1257 } 1258 1258 1259 - if (crtc_state->dsb) 1260 - intel_dsb_commit(crtc_state->dsb); 1259 + if (crtc_state->dsb) { 1260 + intel_dsb_finish(crtc_state->dsb); 1261 + intel_dsb_commit(crtc_state->dsb, false); 1262 + intel_dsb_wait(crtc_state->dsb); 1263 + } 1261 1264 } 1262 1265 1263 1266 static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color) ··· 1383 1380 /* FIXME DSB has issues loading LUTs, disable it for now */ 1384 1381 return; 1385 1382 1383 + if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut) 1384 + return; 1385 + 1386 1386 crtc_state->dsb = intel_dsb_prepare(crtc, 1024); 1387 1387 } 1388 1388 ··· 1506 1500 return PTR_ERR(plane_state); 1507 1501 1508 1502 new_crtc_state->update_planes |= BIT(plane->id); 1503 + new_crtc_state->async_flip_planes = 0; 1504 + new_crtc_state->do_async_flip = false; 1509 1505 1510 1506 /* plane control register changes blocked by CxSR */ 1511 1507 if (HAS_GMCH(i915))
+15 -33
drivers/gpu/drm/i915/display/intel_combo_phy.c
··· 78 78 enum phy phy) 79 79 { 80 80 const struct icl_procmon *procmon; 81 - u32 val; 82 81 83 82 procmon = icl_get_procmon_ref_values(dev_priv, phy); 84 83 85 - val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy)); 86 - val &= ~((0xff << 16) | 0xff); 87 - val |= procmon->dw1; 88 - intel_de_write(dev_priv, ICL_PORT_COMP_DW1(phy), val); 84 + intel_de_rmw(dev_priv, ICL_PORT_COMP_DW1(phy), 85 + (0xff << 16) | 0xff, procmon->dw1); 89 86 90 87 intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9); 91 88 intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10); ··· 233 236 ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2); 234 237 235 238 ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy), 236 - DCC_MODE_SELECT_MASK, 237 - DCC_MODE_SELECT_CONTINUOSLY); 239 + DCC_MODE_SELECT_MASK, RUN_DCC_ONCE); 238 240 } 239 241 240 242 ret &= icl_verify_procmon_ref_values(dev_priv, phy); ··· 263 267 int lane_count, bool lane_reversal) 264 268 { 265 269 u8 lane_mask; 266 - u32 val; 267 270 268 271 if (is_dsi) { 269 272 drm_WARN_ON(&dev_priv->drm, lane_reversal); ··· 303 308 } 304 309 } 305 310 306 - val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy)); 307 - val &= ~PWR_DOWN_LN_MASK; 308 - val |= lane_mask; 309 - intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val); 311 + intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), 312 + PWR_DOWN_LN_MASK, lane_mask); 310 313 } 311 314 312 315 static void icl_combo_phys_init(struct drm_i915_private *dev_priv) ··· 353 360 354 361 val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); 355 362 val &= ~DCC_MODE_SELECT_MASK; 356 - val |= DCC_MODE_SELECT_CONTINUOSLY; 363 + val |= RUN_DCC_ONCE; 357 364 intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val); 358 365 } 359 366 360 367 icl_set_procmon_ref_values(dev_priv, phy); 361 368 362 - if (phy_is_master(dev_priv, phy)) { 363 - val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy)); 364 - val |= IREFGEN; 365 - intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val); 366 - } 369 + if (phy_is_master(dev_priv, phy)) 370 + intel_de_rmw(dev_priv, ICL_PORT_COMP_DW8(phy), 371 + 0, IREFGEN); 367 372 368 - val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)); 369 - val |= COMP_INIT; 370 - intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val); 371 - 372 - val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)); 373 - val |= CL_POWER_DOWN_ENABLE; 374 - intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val); 373 + intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), 0, COMP_INIT); 374 + intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 375 + 0, CL_POWER_DOWN_ENABLE); 375 376 } 376 377 } 377 378 ··· 374 387 enum phy phy; 375 388 376 389 for_each_combo_phy_reverse(dev_priv, phy) { 377 - u32 val; 378 - 379 390 if (phy == PHY_A && 380 391 !icl_combo_phy_verify_state(dev_priv, phy)) { 381 392 if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) { ··· 395 410 if (!has_phy_misc(dev_priv, phy)) 396 411 goto skip_phy_misc; 397 412 398 - val = intel_de_read(dev_priv, ICL_PHY_MISC(phy)); 399 - val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; 400 - intel_de_write(dev_priv, ICL_PHY_MISC(phy), val); 413 + intel_de_rmw(dev_priv, ICL_PHY_MISC(phy), 0, 414 + ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN); 401 415 402 416 skip_phy_misc: 403 - val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)); 404 - val &= ~COMP_INIT; 405 - intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val); 417 + intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), COMP_INIT, 0); 406 418 } 407 419 } 408 420
+2 -2
drivers/gpu/drm/i915/display/intel_combo_phy_regs.h
··· 90 90 #define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy)) 91 91 #define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy)) 92 92 #define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy)) 93 - #define DCC_MODE_SELECT_MASK (0x3 << 20) 94 - #define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20) 93 + #define DCC_MODE_SELECT_MASK REG_GENMASK(21, 20) 94 + #define RUN_DCC_ONCE REG_FIELD_PREP(DCC_MODE_SELECT_MASK, 0) 95 95 #define COMMON_KEEPER_EN (1 << 26) 96 96 #define LATENCY_OPTIM_MASK (0x3 << 2) 97 97 #define LATENCY_OPTIM_VAL(x) ((x) << 2)
+24 -22
drivers/gpu/drm/i915/display/intel_crt.c
··· 260 260 261 261 ilk_pfit_disable(old_crtc_state); 262 262 263 - intel_ddi_disable_pipe_clock(old_crtc_state); 263 + intel_ddi_disable_transcoder_clock(old_crtc_state); 264 264 265 265 pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state); 266 266 ··· 300 300 301 301 hsw_fdi_link_train(encoder, crtc_state); 302 302 303 - intel_ddi_enable_pipe_clock(encoder, crtc_state); 303 + intel_ddi_enable_transcoder_clock(encoder, crtc_state); 304 304 } 305 305 306 306 static void hsw_enable_crt(struct intel_atomic_state *state, ··· 678 678 } 679 679 680 680 static enum drm_connector_status 681 - intel_crt_load_detect(struct intel_crt *crt, u32 pipe) 681 + intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) 682 682 { 683 683 struct drm_device *dev = crt->base.base.dev; 684 684 struct drm_i915_private *dev_priv = to_i915(dev); 685 + enum transcoder cpu_transcoder = (enum transcoder)pipe; 685 686 u32 save_bclrpat; 686 687 u32 save_vtotal; 687 688 u32 vtotal, vactive; ··· 694 693 695 694 drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n"); 696 695 697 - save_bclrpat = intel_de_read(dev_priv, BCLRPAT(pipe)); 698 - save_vtotal = intel_de_read(dev_priv, VTOTAL(pipe)); 699 - vblank = intel_de_read(dev_priv, VBLANK(pipe)); 696 + save_bclrpat = intel_de_read(dev_priv, BCLRPAT(cpu_transcoder)); 697 + save_vtotal = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)); 698 + vblank = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)); 700 699 701 - vtotal = ((save_vtotal >> 16) & 0xfff) + 1; 702 - vactive = (save_vtotal & 0x7ff) + 1; 700 + vtotal = REG_FIELD_GET(VTOTAL_MASK, save_vtotal) + 1; 701 + vactive = REG_FIELD_GET(VACTIVE_MASK, save_vtotal) + 1; 703 702 704 - vblank_start = (vblank & 0xfff) + 1; 705 - vblank_end = ((vblank >> 16) & 0xfff) + 1; 703 + vblank_start = REG_FIELD_GET(VBLANK_START_MASK, vblank) + 1; 704 + vblank_end = REG_FIELD_GET(VBLANK_END_MASK, vblank) + 1; 706 705 707 706 /* Set the border color to purple. */ 708 - intel_de_write(dev_priv, BCLRPAT(pipe), 0x500050); 707 + intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), 0x500050); 709 708 710 709 if (DISPLAY_VER(dev_priv) != 2) { 711 - u32 pipeconf = intel_de_read(dev_priv, PIPECONF(pipe)); 710 + u32 transconf = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); 712 711 713 - intel_de_write(dev_priv, PIPECONF(pipe), 714 - pipeconf | PIPECONF_FORCE_BORDER); 715 - intel_de_posting_read(dev_priv, PIPECONF(pipe)); 712 + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), 713 + transconf | TRANSCONF_FORCE_BORDER); 714 + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 716 715 /* Wait for next Vblank to substitue 717 716 * border color for Color info */ 718 717 intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); ··· 721 720 connector_status_connected : 722 721 connector_status_disconnected; 723 722 724 - intel_de_write(dev_priv, PIPECONF(pipe), pipeconf); 723 + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), transconf); 725 724 } else { 726 725 bool restore_vblank = false; 727 726 int count, detect; ··· 731 730 * Yes, this will flicker 732 731 */ 733 732 if (vblank_start <= vactive && vblank_end >= vtotal) { 734 - u32 vsync = intel_de_read(dev_priv, VSYNC(pipe)); 735 - u32 vsync_start = (vsync & 0xffff) + 1; 733 + u32 vsync = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)); 734 + u32 vsync_start = REG_FIELD_GET(VSYNC_START_MASK, vsync) + 1; 736 735 737 736 vblank_start = vsync_start; 738 - intel_de_write(dev_priv, VBLANK(pipe), 739 - (vblank_start - 1) | ((vblank_end - 1) << 16)); 737 + intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), 738 + VBLANK_START(vblank_start - 1) | 739 + VBLANK_END(vblank_end - 1)); 740 740 restore_vblank = true; 741 741 } 742 742 /* sample in the vertical border, selecting the larger one */ ··· 768 766 769 767 /* restore vblank if necessary */ 770 768 if (restore_vblank) 771 - intel_de_write(dev_priv, VBLANK(pipe), vblank); 769 + intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), vblank); 772 770 /* 773 771 * If more than 3/4 of the scanline detected a monitor, 774 772 * then it is assumed to be present. This works even on i830, ··· 781 779 } 782 780 783 781 /* Restore previous settings */ 784 - intel_de_write(dev_priv, BCLRPAT(pipe), save_bclrpat); 782 + intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), save_bclrpat); 785 783 786 784 return status; 787 785 }
+3
drivers/gpu/drm/i915/display/intel_crtc.c
··· 25 25 #include "intel_display_types.h" 26 26 #include "intel_drrs.h" 27 27 #include "intel_dsi.h" 28 + #include "intel_fifo_underrun.h" 28 29 #include "intel_pipe_crc.h" 29 30 #include "intel_psr.h" 30 31 #include "intel_sprite.h" ··· 314 313 goto fail; 315 314 } 316 315 crtc->plane_ids_mask |= BIT(primary->id); 316 + 317 + intel_init_fifo_underrun_reporting(dev_priv, crtc, false); 317 318 318 319 for_each_sprite(dev_priv, pipe, sprite) { 319 320 struct intel_plane *plane;
+9 -7
drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
··· 14 14 static void intel_dump_crtc_timings(struct drm_i915_private *i915, 15 15 const struct drm_display_mode *mode) 16 16 { 17 - drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, " 18 - "type: 0x%x flags: 0x%x\n", 17 + drm_dbg_kms(&i915->drm, "crtc timings: clock=%d, " 18 + "hd=%d hb=%d-%d hs=%d-%d ht=%d, " 19 + "vd=%d vb=%d-%d vs=%d-%d vt=%d, " 20 + "flags=0x%x\n", 19 21 mode->crtc_clock, 20 - mode->crtc_hdisplay, mode->crtc_hsync_start, 21 - mode->crtc_hsync_end, mode->crtc_htotal, 22 - mode->crtc_vdisplay, mode->crtc_vsync_start, 23 - mode->crtc_vsync_end, mode->crtc_vtotal, 24 - mode->type, mode->flags); 22 + mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end, 23 + mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal, 24 + mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end, 25 + mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal, 26 + mode->flags); 25 27 } 26 28 27 29 static void
+3 -2
drivers/gpu/drm/i915/display/intel_cursor.c
··· 532 532 skl_write_cursor_wm(plane, crtc_state); 533 533 534 534 if (plane_state) 535 - intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0); 535 + intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, 536 + plane_state); 536 537 else 537 - intel_psr2_disable_plane_sel_fetch(plane, crtc_state); 538 + intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state); 538 539 539 540 if (plane->cursor.base != base || 540 541 plane->cursor.size != fbc_ctl ||
+82 -93
drivers/gpu/drm/i915/display/intel_ddi.c
··· 47 47 #include "intel_dkl_phy.h" 48 48 #include "intel_dkl_phy_regs.h" 49 49 #include "intel_dp.h" 50 + #include "intel_dp_aux.h" 50 51 #include "intel_dp_link_training.h" 51 52 #include "intel_dp_mst.h" 52 53 #include "intel_dpio_phy.h" ··· 68 67 #include "intel_sprite.h" 69 68 #include "intel_tc.h" 70 69 #include "intel_vdsc.h" 70 + #include "intel_vdsc_regs.h" 71 71 #include "intel_vrr.h" 72 72 #include "skl_scaler.h" 73 73 #include "skl_universal_plane.h" ··· 91 89 { 92 90 int level; 93 91 94 - level = intel_bios_hdmi_level_shift(encoder); 92 + level = intel_bios_hdmi_level_shift(encoder->devdata); 95 93 if (level < 0) 96 94 level = trans->hdmi_default_entry; 97 95 ··· 128 126 129 127 /* If we're boosting the current, set bit 31 of trans1 */ 130 128 if (has_iboost(dev_priv) && 131 - intel_bios_encoder_dp_boost_level(encoder->devdata)) 129 + intel_bios_dp_boost_level(encoder->devdata)) 132 130 iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; 133 131 134 132 for (i = 0; i < n_entries; i++) { ··· 160 158 161 159 /* If we're boosting the current, set bit 31 of trans1 */ 162 160 if (has_iboost(dev_priv) && 163 - intel_bios_encoder_hdmi_boost_level(encoder->devdata)) 161 + intel_bios_hdmi_boost_level(encoder->devdata)) 164 162 iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; 165 163 166 164 /* Entry 9 is for HDMI: */ ··· 646 644 struct drm_i915_private *dev_priv = to_i915(dev); 647 645 intel_wakeref_t wakeref; 648 646 int ret = 0; 649 - u32 tmp; 650 647 651 648 wakeref = intel_display_power_get_if_enabled(dev_priv, 652 649 intel_encoder->power_domain); 653 650 if (drm_WARN_ON(dev, !wakeref)) 654 651 return -ENXIO; 655 652 656 - tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); 657 - if (enable) 658 - tmp |= hdcp_mask; 659 - else 660 - tmp &= ~hdcp_mask; 661 - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp); 653 + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), 654 + hdcp_mask, enable ? hdcp_mask : 0); 662 655 intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); 663 656 return ret; 664 657 } ··· 945 948 main_link_aux_power_domain_get(dig_port, crtc_state); 946 949 } 947 950 948 - void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, 949 - const struct intel_crtc_state *crtc_state) 951 + void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, 952 + const struct intel_crtc_state *crtc_state) 950 953 { 951 954 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 952 955 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); ··· 954 957 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 955 958 u32 val; 956 959 957 - if (cpu_transcoder != TRANSCODER_EDP) { 958 - if (DISPLAY_VER(dev_priv) >= 13) 959 - val = TGL_TRANS_CLK_SEL_PORT(phy); 960 - else if (DISPLAY_VER(dev_priv) >= 12) 961 - val = TGL_TRANS_CLK_SEL_PORT(encoder->port); 962 - else 963 - val = TRANS_CLK_SEL_PORT(encoder->port); 960 + if (cpu_transcoder == TRANSCODER_EDP) 961 + return; 964 962 965 - intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val); 966 - } 963 + if (DISPLAY_VER(dev_priv) >= 13) 964 + val = TGL_TRANS_CLK_SEL_PORT(phy); 965 + else if (DISPLAY_VER(dev_priv) >= 12) 966 + val = TGL_TRANS_CLK_SEL_PORT(encoder->port); 967 + else 968 + val = TRANS_CLK_SEL_PORT(encoder->port); 969 + 970 + intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val); 967 971 } 968 972 969 - void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state) 973 + void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state) 970 974 { 971 975 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 972 976 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 977 + u32 val; 973 978 974 - if (cpu_transcoder != TRANSCODER_EDP) { 975 - if (DISPLAY_VER(dev_priv) >= 12) 976 - intel_de_write(dev_priv, 977 - TRANS_CLK_SEL(cpu_transcoder), 978 - TGL_TRANS_CLK_SEL_DISABLED); 979 - else 980 - intel_de_write(dev_priv, 981 - TRANS_CLK_SEL(cpu_transcoder), 982 - TRANS_CLK_SEL_DISABLED); 983 - } 979 + if (cpu_transcoder == TRANSCODER_EDP) 980 + return; 981 + 982 + if (DISPLAY_VER(dev_priv) >= 12) 983 + val = TGL_TRANS_CLK_SEL_DISABLED; 984 + else 985 + val = TRANS_CLK_SEL_DISABLED; 986 + 987 + intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val); 984 988 } 985 989 986 990 static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, ··· 1007 1009 u8 iboost; 1008 1010 1009 1011 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1010 - iboost = intel_bios_encoder_hdmi_boost_level(encoder->devdata); 1012 + iboost = intel_bios_hdmi_boost_level(encoder->devdata); 1011 1013 else 1012 - iboost = intel_bios_encoder_dp_boost_level(encoder->devdata); 1014 + iboost = intel_bios_dp_boost_level(encoder->devdata); 1013 1015 1014 1016 if (iboost == 0) { 1015 1017 const struct intel_ddi_buf_trans *trans; ··· 2198 2200 { 2199 2201 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2200 2202 struct intel_dp *intel_dp; 2201 - u32 val; 2202 2203 2203 2204 if (!crtc_state->fec_enable) 2204 2205 return; 2205 2206 2206 2207 intel_dp = enc_to_intel_dp(encoder); 2207 - val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); 2208 - val |= DP_TP_CTL_FEC_ENABLE; 2209 - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); 2208 + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 2209 + 0, DP_TP_CTL_FEC_ENABLE); 2210 2210 } 2211 2211 2212 2212 static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, ··· 2212 2216 { 2213 2217 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2214 2218 struct intel_dp *intel_dp; 2215 - u32 val; 2216 2219 2217 2220 if (!crtc_state->fec_enable) 2218 2221 return; 2219 2222 2220 2223 intel_dp = enc_to_intel_dp(encoder); 2221 - val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); 2222 - val &= ~DP_TP_CTL_FEC_ENABLE; 2223 - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); 2224 + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 2225 + DP_TP_CTL_FEC_ENABLE, 0); 2224 2226 intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); 2225 2227 } 2226 2228 ··· 2381 2387 * 7.a Configure Transcoder Clock Select to direct the Port clock to the 2382 2388 * Transcoder. 2383 2389 */ 2384 - intel_ddi_enable_pipe_clock(encoder, crtc_state); 2390 + intel_ddi_enable_transcoder_clock(encoder, crtc_state); 2385 2391 2386 2392 if (HAS_DP20(dev_priv)) 2387 2393 intel_ddi_config_transcoder_dp2(encoder, crtc_state); ··· 2508 2514 intel_ddi_enable_fec(encoder, crtc_state); 2509 2515 2510 2516 if (!is_mst) 2511 - intel_ddi_enable_pipe_clock(encoder, crtc_state); 2517 + intel_ddi_enable_transcoder_clock(encoder, crtc_state); 2512 2518 2513 2519 intel_dsc_dp_pps_write(encoder, crtc_state); 2514 2520 } ··· 2550 2556 2551 2557 icl_program_mg_dp_mode(dig_port, crtc_state); 2552 2558 2553 - intel_ddi_enable_pipe_clock(encoder, crtc_state); 2559 + intel_ddi_enable_transcoder_clock(encoder, crtc_state); 2554 2560 2555 2561 dig_port->set_infoframes(encoder, 2556 2562 crtc_state->has_infoframe, ··· 2616 2622 wait = true; 2617 2623 } 2618 2624 2619 - if (intel_crtc_has_dp_encoder(crtc_state)) { 2620 - val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); 2621 - val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); 2622 - val |= DP_TP_CTL_LINK_TRAIN_PAT1; 2623 - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); 2624 - } 2625 + if (intel_crtc_has_dp_encoder(crtc_state)) 2626 + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 2627 + DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK, 2628 + DP_TP_CTL_LINK_TRAIN_PAT1); 2625 2629 2626 2630 /* Disable FEC in DP Sink */ 2627 2631 intel_ddi_disable_fec_state(encoder, crtc_state); ··· 2652 2660 if (DISPLAY_VER(dev_priv) >= 12) { 2653 2661 if (is_mst) { 2654 2662 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 2655 - u32 val; 2656 2663 2657 - val = intel_de_read(dev_priv, 2658 - TRANS_DDI_FUNC_CTL(cpu_transcoder)); 2659 - val &= ~(TGL_TRANS_DDI_PORT_MASK | 2660 - TRANS_DDI_MODE_SELECT_MASK); 2661 - intel_de_write(dev_priv, 2662 - TRANS_DDI_FUNC_CTL(cpu_transcoder), 2663 - val); 2664 + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), 2665 + TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK, 2666 + 0); 2664 2667 } 2665 2668 } else { 2666 2669 if (!is_mst) 2667 - intel_ddi_disable_pipe_clock(old_crtc_state); 2670 + intel_ddi_disable_transcoder_clock(old_crtc_state); 2668 2671 } 2669 2672 2670 2673 intel_disable_ddi_buf(encoder, old_crtc_state); ··· 2670 2683 * transcoder" 2671 2684 */ 2672 2685 if (DISPLAY_VER(dev_priv) >= 12) 2673 - intel_ddi_disable_pipe_clock(old_crtc_state); 2686 + intel_ddi_disable_transcoder_clock(old_crtc_state); 2674 2687 2675 2688 intel_pps_vdd_on(intel_dp); 2676 2689 intel_pps_off(intel_dp); ··· 2696 2709 old_crtc_state, old_conn_state); 2697 2710 2698 2711 if (DISPLAY_VER(dev_priv) < 12) 2699 - intel_ddi_disable_pipe_clock(old_crtc_state); 2712 + intel_ddi_disable_transcoder_clock(old_crtc_state); 2700 2713 2701 2714 intel_disable_ddi_buf(encoder, old_crtc_state); 2702 2715 2703 2716 if (DISPLAY_VER(dev_priv) >= 12) 2704 - intel_ddi_disable_pipe_clock(old_crtc_state); 2717 + intel_ddi_disable_transcoder_clock(old_crtc_state); 2705 2718 2706 2719 intel_display_power_put(dev_priv, 2707 2720 dig_port->ddi_io_power_domain, ··· 3209 3222 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 3210 3223 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3211 3224 enum port port = encoder->port; 3212 - u32 val; 3213 3225 3214 - val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); 3215 - val &= ~DP_TP_CTL_LINK_TRAIN_MASK; 3216 - val |= DP_TP_CTL_LINK_TRAIN_IDLE; 3217 - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); 3226 + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 3227 + DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE); 3218 3228 3219 3229 /* 3220 3230 * Until TGL on PORT_A we can have only eDP in SST mode. There the only ··· 4289 4305 intel_bios_encoder_supports_hdmi(devdata); 4290 4306 init_dp = intel_bios_encoder_supports_dp(devdata); 4291 4307 4292 - if (intel_bios_is_lspcon_present(dev_priv, port)) { 4308 + if (intel_bios_encoder_is_lspcon(devdata)) { 4293 4309 /* 4294 4310 * Lspcon device needs to be driven with DP connector 4295 4311 * with special detection sequence. So make sure DP ··· 4484 4500 intel_de_read(dev_priv, DDI_BUF_CTL(port)) 4485 4501 & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); 4486 4502 4487 - if (intel_bios_is_lane_reversal_needed(dev_priv, port)) 4503 + if (intel_bios_encoder_lane_reversal(devdata)) 4488 4504 dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL; 4489 4505 4490 4506 dig_port->dp.output_reg = INVALID_MMIO_REG; 4491 4507 dig_port->max_lanes = intel_ddi_max_lanes(dig_port); 4492 - dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); 4508 + dig_port->aux_ch = intel_dp_aux_ch(encoder); 4493 4509 4494 4510 if (intel_phy_is_tc(dev_priv, phy)) { 4495 4511 bool is_legacy = ··· 4505 4521 drm_WARN_ON(&dev_priv->drm, port > PORT_I); 4506 4522 dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port); 4507 4523 4524 + if (DISPLAY_VER(dev_priv) >= 11) { 4525 + if (intel_phy_is_tc(dev_priv, phy)) 4526 + dig_port->connected = intel_tc_port_connected; 4527 + else 4528 + dig_port->connected = lpt_digital_port_connected; 4529 + } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 4530 + dig_port->connected = bdw_digital_port_connected; 4531 + } else if (DISPLAY_VER(dev_priv) == 9) { 4532 + dig_port->connected = lpt_digital_port_connected; 4533 + } else if (IS_BROADWELL(dev_priv)) { 4534 + if (port == PORT_A) 4535 + dig_port->connected = bdw_digital_port_connected; 4536 + else 4537 + dig_port->connected = lpt_digital_port_connected; 4538 + } else if (IS_HASWELL(dev_priv)) { 4539 + if (port == PORT_A) 4540 + dig_port->connected = hsw_digital_port_connected; 4541 + else 4542 + dig_port->connected = lpt_digital_port_connected; 4543 + } 4544 + 4545 + intel_infoframe_init(dig_port); 4546 + 4508 4547 if (init_dp) { 4509 4548 if (!intel_ddi_init_dp_connector(dig_port)) 4510 4549 goto err; ··· 4538 4531 encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv); 4539 4532 } 4540 4533 4541 - /* In theory we don't need the encoder->type check, but leave it just in 4542 - * case we have some really bad VBTs... */ 4534 + /* 4535 + * In theory we don't need the encoder->type check, 4536 + * but leave it just in case we have some really bad VBTs... 4537 + */ 4543 4538 if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { 4544 4539 if (!intel_ddi_init_hdmi_connector(dig_port)) 4545 4540 goto err; 4546 4541 } 4547 - 4548 - if (DISPLAY_VER(dev_priv) >= 11) { 4549 - if (intel_phy_is_tc(dev_priv, phy)) 4550 - dig_port->connected = intel_tc_port_connected; 4551 - else 4552 - dig_port->connected = lpt_digital_port_connected; 4553 - } else if (DISPLAY_VER(dev_priv) >= 8) { 4554 - if (port == PORT_A || IS_GEMINILAKE(dev_priv) || 4555 - IS_BROXTON(dev_priv)) 4556 - dig_port->connected = bdw_digital_port_connected; 4557 - else 4558 - dig_port->connected = lpt_digital_port_connected; 4559 - } else { 4560 - if (port == PORT_A) 4561 - dig_port->connected = hsw_digital_port_connected; 4562 - else 4563 - dig_port->connected = lpt_digital_port_connected; 4564 - } 4565 - 4566 - intel_infoframe_init(dig_port); 4567 4542 4568 4543 return; 4569 4544
+3 -3
drivers/gpu/drm/i915/display/intel_ddi.h
··· 52 52 void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder, 53 53 const struct intel_crtc_state *crtc_state); 54 54 void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state); 55 - void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, 56 - const struct intel_crtc_state *crtc_state); 57 - void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); 55 + void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, 56 + const struct intel_crtc_state *crtc_state); 57 + void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state); 58 58 void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, 59 59 const struct drm_connector_state *conn_state); 60 60 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+225 -374
drivers/gpu/drm/i915/display/intel_display.c
··· 55 55 #include "i915_reg.h" 56 56 #include "i915_utils.h" 57 57 #include "i9xx_plane.h" 58 + #include "i9xx_wm.h" 58 59 #include "icl_dsi.h" 59 60 #include "intel_acpi.h" 60 61 #include "intel_atomic.h" ··· 95 94 #include "intel_hotplug.h" 96 95 #include "intel_hti.h" 97 96 #include "intel_lvds.h" 97 + #include "intel_lvds_regs.h" 98 98 #include "intel_modeset_setup.h" 99 99 #include "intel_modeset_verify.h" 100 100 #include "intel_overlay.h" ··· 116 114 #include "intel_tv.h" 117 115 #include "intel_vblank.h" 118 116 #include "intel_vdsc.h" 117 + #include "intel_vdsc_regs.h" 119 118 #include "intel_vga.h" 120 119 #include "intel_vrr.h" 120 + #include "intel_wm.h" 121 121 #include "skl_scaler.h" 122 122 #include "skl_universal_plane.h" 123 123 #include "skl_watermark.h" ··· 133 129 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); 134 130 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 135 131 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); 136 - 137 - /** 138 - * intel_update_watermarks - update FIFO watermark values based on current modes 139 - * @dev_priv: i915 device 140 - * 141 - * Calculate watermark values for the various WM regs based on current mode 142 - * and plane configuration. 143 - * 144 - * There are several cases to deal with here: 145 - * - normal (i.e. non-self-refresh) 146 - * - self-refresh (SR) mode 147 - * - lines are large relative to FIFO size (buffer can hold up to 2) 148 - * - lines are small relative to FIFO size (buffer can hold more than 2 149 - * lines), so need to account for TLB latency 150 - * 151 - * The normal calculation is: 152 - * watermark = dotclock * bytes per pixel * latency 153 - * where latency is platform & configuration dependent (we assume pessimal 154 - * values here). 155 - * 156 - * The SR calculation is: 157 - * watermark = (trunc(latency/line time)+1) * surface width * 158 - * bytes per pixel 159 - * where 160 - * line time = htotal / dotclock 161 - * surface width = hdisplay for normal plane and 64 for cursor 162 - * and latency is assumed to be high, as above. 163 - * 164 - * The final value programmed to the register should always be rounded up, 165 - * and include an extra 2 entries to account for clock crossings. 166 - * 167 - * We don't use the sprite, so we can ignore that. And on Crestline we have 168 - * to set the non-SR watermarks to 8. 169 - */ 170 - void intel_update_watermarks(struct drm_i915_private *dev_priv) 171 - { 172 - if (dev_priv->display.funcs.wm->update_wm) 173 - dev_priv->display.funcs.wm->update_wm(dev_priv); 174 - } 175 - 176 - static int intel_compute_pipe_wm(struct intel_atomic_state *state, 177 - struct intel_crtc *crtc) 178 - { 179 - struct drm_i915_private *dev_priv = to_i915(state->base.dev); 180 - if (dev_priv->display.funcs.wm->compute_pipe_wm) 181 - return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc); 182 - return 0; 183 - } 184 - 185 - static int intel_compute_intermediate_wm(struct intel_atomic_state *state, 186 - struct intel_crtc *crtc) 187 - { 188 - struct drm_i915_private *dev_priv = to_i915(state->base.dev); 189 - if (!dev_priv->display.funcs.wm->compute_intermediate_wm) 190 - return 0; 191 - if (drm_WARN_ON(&dev_priv->drm, 192 - !dev_priv->display.funcs.wm->compute_pipe_wm)) 193 - return 0; 194 - return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc); 195 - } 196 - 197 - static bool intel_initial_watermarks(struct intel_atomic_state *state, 198 - struct intel_crtc *crtc) 199 - { 200 - struct drm_i915_private *dev_priv = to_i915(state->base.dev); 201 - if (dev_priv->display.funcs.wm->initial_watermarks) { 202 - dev_priv->display.funcs.wm->initial_watermarks(state, crtc); 203 - return true; 204 - } 205 - return false; 206 - } 207 - 208 - static void intel_atomic_update_watermarks(struct intel_atomic_state *state, 209 - struct intel_crtc *crtc) 210 - { 211 - struct drm_i915_private *dev_priv = to_i915(state->base.dev); 212 - if (dev_priv->display.funcs.wm->atomic_update_watermarks) 213 - dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc); 214 - } 215 - 216 - static void intel_optimize_watermarks(struct intel_atomic_state *state, 217 - struct intel_crtc *crtc) 218 - { 219 - struct drm_i915_private *dev_priv = to_i915(state->base.dev); 220 - if (dev_priv->display.funcs.wm->optimize_watermarks) 221 - dev_priv->display.funcs.wm->optimize_watermarks(state, crtc); 222 - } 223 - 224 - static int intel_compute_global_watermarks(struct intel_atomic_state *state) 225 - { 226 - struct drm_i915_private *dev_priv = to_i915(state->base.dev); 227 - if (dev_priv->display.funcs.wm->compute_global_watermarks) 228 - return dev_priv->display.funcs.wm->compute_global_watermarks(state); 229 - return 0; 230 - } 231 132 232 133 /* returns HPLL frequency in kHz */ 233 134 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) ··· 202 293 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 203 294 { 204 295 if (enable) 205 - intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 206 - intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS); 296 + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 297 + 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS); 207 298 else 208 - intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 209 - intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); 299 + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 300 + DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0); 210 301 } 211 302 212 303 /* Wa_2006604312:icl,ehl */ ··· 215 306 bool enable) 216 307 { 217 308 if (enable) 218 - intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 219 - intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); 309 + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS); 220 310 else 221 - intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 222 - intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 311 + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0); 223 312 } 224 313 225 314 /* Wa_1604331009:icl,jsl,ehl */ ··· 302 395 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 303 396 304 397 /* Wait for the Pipe State to go off */ 305 - if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder), 306 - PIPECONF_STATE_ENABLE, 100)) 398 + if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder), 399 + TRANSCONF_STATE_ENABLE, 100)) 307 400 drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); 308 401 } else { 309 402 intel_wait_for_pipe_scanline_stopped(crtc); ··· 324 417 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 325 418 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 326 419 if (wakeref) { 327 - u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 328 - cur_state = !!(val & PIPECONF_ENABLE); 420 + u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); 421 + cur_state = !!(val & TRANSCONF_ENABLE); 329 422 330 423 intel_display_power_put(dev_priv, power_domain, wakeref); 331 424 } else { ··· 437 530 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe), 438 531 0, PIPE_ARB_USE_PROG_SLOTS); 439 532 440 - reg = PIPECONF(cpu_transcoder); 533 + reg = TRANSCONF(cpu_transcoder); 441 534 val = intel_de_read(dev_priv, reg); 442 - if (val & PIPECONF_ENABLE) { 535 + if (val & TRANSCONF_ENABLE) { 443 536 /* we keep both pipes enabled on 830 */ 444 537 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 445 538 return; 446 539 } 447 540 448 - intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE); 541 + intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE); 449 542 intel_de_posting_read(dev_priv, reg); 450 543 451 544 /* ··· 476 569 */ 477 570 assert_planes_disabled(crtc); 478 571 479 - reg = PIPECONF(cpu_transcoder); 572 + reg = TRANSCONF(cpu_transcoder); 480 573 val = intel_de_read(dev_priv, reg); 481 - if ((val & PIPECONF_ENABLE) == 0) 574 + if ((val & TRANSCONF_ENABLE) == 0) 482 575 return; 483 576 484 577 /* ··· 486 579 * so best keep it disabled when not needed. 487 580 */ 488 581 if (old_crtc_state->double_wide) 489 - val &= ~PIPECONF_DOUBLE_WIDE; 582 + val &= ~TRANSCONF_DOUBLE_WIDE; 490 583 491 584 /* Don't disable pipe or pipe PLLs if needed */ 492 585 if (!IS_I830(dev_priv)) 493 - val &= ~PIPECONF_ENABLE; 586 + val &= ~TRANSCONF_ENABLE; 494 587 495 588 if (DISPLAY_VER(dev_priv) >= 14) 496 589 intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), ··· 500 593 FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 501 594 502 595 intel_de_write(dev_priv, reg, val); 503 - if ((val & PIPECONF_ENABLE) == 0) 596 + if ((val & TRANSCONF_ENABLE) == 0) 504 597 intel_wait_for_pipe_off(old_crtc_state); 505 598 } 506 599 ··· 1159 1252 intel_atomic_get_old_crtc_state(state, crtc); 1160 1253 const struct intel_crtc_state *new_crtc_state = 1161 1254 intel_atomic_get_new_crtc_state(state, crtc); 1162 - u8 update_planes = new_crtc_state->update_planes; 1255 + u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & 1256 + ~new_crtc_state->async_flip_planes; 1163 1257 const struct intel_plane_state *old_plane_state; 1164 1258 struct intel_plane *plane; 1165 1259 bool need_vbl_wait = false; ··· 1169 1261 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { 1170 1262 if (plane->need_async_flip_disable_wa && 1171 1263 plane->pipe == crtc->pipe && 1172 - update_planes & BIT(plane->id)) { 1264 + disable_async_flip_planes & BIT(plane->id)) { 1173 1265 /* 1174 1266 * Apart from the async flip bit we want to 1175 1267 * preserve the old state for the plane. ··· 1286 1378 * WA for platforms where async address update enable bit 1287 1379 * is double buffered and only latched at start of vblank. 1288 1380 */ 1289 - if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip) 1381 + if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) 1290 1382 intel_crtc_async_flip_disable_wa(state, crtc); 1291 1383 } 1292 1384 ··· 1709 1801 enum transcoder transcoder = crtc_state->cpu_transcoder; 1710 1802 i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) : 1711 1803 CHICKEN_TRANS(transcoder); 1712 - u32 val; 1713 1804 1714 - val = intel_de_read(dev_priv, reg); 1715 - val &= ~HSW_FRAME_START_DELAY_MASK; 1716 - val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 1717 - intel_de_write(dev_priv, reg, val); 1805 + intel_de_rmw(dev_priv, reg, 1806 + HSW_FRAME_START_DELAY_MASK, 1807 + HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); 1718 1808 } 1719 1809 1720 1810 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, ··· 1752 1846 intel_set_transcoder_timings(crtc_state); 1753 1847 1754 1848 if (cpu_transcoder != TRANSCODER_EDP) 1755 - intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), 1849 + intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder), 1756 1850 crtc_state->pixel_multiplier - 1); 1757 1851 1758 1852 hsw_set_frame_start_delay(crtc_state); ··· 2725 2819 enum pipe pipe = crtc->pipe; 2726 2820 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2727 2821 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2728 - u32 crtc_vtotal, crtc_vblank_end; 2822 + u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; 2729 2823 int vsyncshift = 0; 2730 2824 2731 2825 /* We need to be careful not to changed the adjusted mode, for otherwise 2732 2826 * the hw state checker will get angry at the mismatch. */ 2827 + crtc_vdisplay = adjusted_mode->crtc_vdisplay; 2733 2828 crtc_vtotal = adjusted_mode->crtc_vtotal; 2829 + crtc_vblank_start = adjusted_mode->crtc_vblank_start; 2734 2830 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 2735 2831 2736 2832 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { ··· 2749 2841 vsyncshift += adjusted_mode->crtc_htotal; 2750 2842 } 2751 2843 2844 + /* 2845 + * VBLANK_START no longer works on ADL+, instead we must use 2846 + * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. 2847 + */ 2848 + if (DISPLAY_VER(dev_priv) >= 13) { 2849 + intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder), 2850 + crtc_vblank_start - crtc_vdisplay); 2851 + 2852 + /* 2853 + * VBLANK_START not used by hw, just clear it 2854 + * to make it stand out in register dumps. 2855 + */ 2856 + crtc_vblank_start = 1; 2857 + } 2858 + 2752 2859 if (DISPLAY_VER(dev_priv) > 3) 2753 - intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder), 2754 - vsyncshift); 2860 + intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder), 2861 + vsyncshift); 2755 2862 2756 - intel_de_write(dev_priv, HTOTAL(cpu_transcoder), 2757 - (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); 2758 - intel_de_write(dev_priv, HBLANK(cpu_transcoder), 2759 - (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); 2760 - intel_de_write(dev_priv, HSYNC(cpu_transcoder), 2761 - (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); 2863 + intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), 2864 + HACTIVE(adjusted_mode->crtc_hdisplay - 1) | 2865 + HTOTAL(adjusted_mode->crtc_htotal - 1)); 2866 + intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), 2867 + HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | 2868 + HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); 2869 + intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), 2870 + HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | 2871 + HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); 2762 2872 2763 - intel_de_write(dev_priv, VTOTAL(cpu_transcoder), 2764 - (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16)); 2765 - intel_de_write(dev_priv, VBLANK(cpu_transcoder), 2766 - (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16)); 2767 - intel_de_write(dev_priv, VSYNC(cpu_transcoder), 2768 - (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); 2873 + intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), 2874 + VACTIVE(crtc_vdisplay - 1) | 2875 + VTOTAL(crtc_vtotal - 1)); 2876 + intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), 2877 + VBLANK_START(crtc_vblank_start - 1) | 2878 + VBLANK_END(crtc_vblank_end - 1)); 2879 + intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), 2880 + VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | 2881 + VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); 2769 2882 2770 2883 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 2771 2884 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is ··· 2794 2865 * bits. */ 2795 2866 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 2796 2867 (pipe == PIPE_B || pipe == PIPE_C)) 2797 - intel_de_write(dev_priv, VTOTAL(pipe), 2798 - intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 2799 - 2868 + intel_de_write(dev_priv, TRANS_VTOTAL(pipe), 2869 + VACTIVE(crtc_vdisplay - 1) | 2870 + VTOTAL(crtc_vtotal - 1)); 2800 2871 } 2801 2872 2802 2873 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) ··· 2824 2895 2825 2896 if (DISPLAY_VER(dev_priv) >= 9 || 2826 2897 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2827 - return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; 2898 + return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; 2828 2899 else 2829 - return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; 2900 + return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; 2830 2901 } 2831 2902 2832 2903 static void intel_get_transcoder_timings(struct intel_crtc *crtc, ··· 2835 2906 struct drm_device *dev = crtc->base.dev; 2836 2907 struct drm_i915_private *dev_priv = to_i915(dev); 2837 2908 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 2909 + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 2838 2910 u32 tmp; 2839 2911 2840 - tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder)); 2841 - pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 2842 - pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 2912 + tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder)); 2913 + adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; 2914 + adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; 2843 2915 2844 2916 if (!transcoder_is_dsi(cpu_transcoder)) { 2845 - tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder)); 2846 - pipe_config->hw.adjusted_mode.crtc_hblank_start = 2847 - (tmp & 0xffff) + 1; 2848 - pipe_config->hw.adjusted_mode.crtc_hblank_end = 2849 - ((tmp >> 16) & 0xffff) + 1; 2917 + tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder)); 2918 + adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; 2919 + adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; 2850 2920 } 2851 - tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder)); 2852 - pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 2853 - pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 2854 2921 2855 - tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder)); 2856 - pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 2857 - pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 2922 + tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder)); 2923 + adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; 2924 + adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; 2858 2925 2926 + tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)); 2927 + adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; 2928 + adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; 2929 + 2930 + /* FIXME TGL+ DSI transcoders have this! */ 2859 2931 if (!transcoder_is_dsi(cpu_transcoder)) { 2860 - tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder)); 2861 - pipe_config->hw.adjusted_mode.crtc_vblank_start = 2862 - (tmp & 0xffff) + 1; 2863 - pipe_config->hw.adjusted_mode.crtc_vblank_end = 2864 - ((tmp >> 16) & 0xffff) + 1; 2932 + tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)); 2933 + adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; 2934 + adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; 2865 2935 } 2866 - tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder)); 2867 - pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 2868 - pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 2936 + tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)); 2937 + adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; 2938 + adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; 2869 2939 2870 2940 if (intel_pipe_is_interlaced(pipe_config)) { 2871 - pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 2872 - pipe_config->hw.adjusted_mode.crtc_vtotal += 1; 2873 - pipe_config->hw.adjusted_mode.crtc_vblank_end += 1; 2941 + adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; 2942 + adjusted_mode->crtc_vtotal += 1; 2943 + adjusted_mode->crtc_vblank_end += 1; 2874 2944 } 2945 + 2946 + if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder)) 2947 + adjusted_mode->crtc_vblank_start = 2948 + adjusted_mode->crtc_vdisplay + 2949 + intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder)); 2875 2950 } 2876 2951 2877 2952 static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) ··· 2915 2982 { 2916 2983 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2917 2984 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2918 - u32 pipeconf = 0; 2985 + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 2986 + u32 val = 0; 2919 2987 2920 2988 /* 2921 2989 * - We keep both pipes enabled on 830 ··· 2924 2990 * - During fastset the pipe is already enabled and must remain so 2925 2991 */ 2926 2992 if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state)) 2927 - pipeconf |= PIPECONF_ENABLE; 2993 + val |= TRANSCONF_ENABLE; 2928 2994 2929 2995 if (crtc_state->double_wide) 2930 - pipeconf |= PIPECONF_DOUBLE_WIDE; 2996 + val |= TRANSCONF_DOUBLE_WIDE; 2931 2997 2932 2998 /* only g4x and later have fancy bpc/dither controls */ 2933 2999 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2934 3000 IS_CHERRYVIEW(dev_priv)) { 2935 3001 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 2936 3002 if (crtc_state->dither && crtc_state->pipe_bpp != 30) 2937 - pipeconf |= PIPECONF_DITHER_EN | 2938 - PIPECONF_DITHER_TYPE_SP; 3003 + val |= TRANSCONF_DITHER_EN | 3004 + TRANSCONF_DITHER_TYPE_SP; 2939 3005 2940 3006 switch (crtc_state->pipe_bpp) { 2941 3007 default: ··· 2943 3009 MISSING_CASE(crtc_state->pipe_bpp); 2944 3010 fallthrough; 2945 3011 case 18: 2946 - pipeconf |= PIPECONF_BPC_6; 3012 + val |= TRANSCONF_BPC_6; 2947 3013 break; 2948 3014 case 24: 2949 - pipeconf |= PIPECONF_BPC_8; 3015 + val |= TRANSCONF_BPC_8; 2950 3016 break; 2951 3017 case 30: 2952 - pipeconf |= PIPECONF_BPC_10; 3018 + val |= TRANSCONF_BPC_10; 2953 3019 break; 2954 3020 } 2955 3021 } ··· 2957 3023 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 2958 3024 if (DISPLAY_VER(dev_priv) < 4 || 2959 3025 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 2960 - pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 3026 + val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; 2961 3027 else 2962 - pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 3028 + val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; 2963 3029 } else { 2964 - pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE; 3030 + val |= TRANSCONF_INTERLACE_PROGRESSIVE; 2965 3031 } 2966 3032 2967 3033 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 2968 3034 crtc_state->limited_color_range) 2969 - pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 3035 + val |= TRANSCONF_COLOR_RANGE_SELECT; 2970 3036 2971 - pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 3037 + val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 2972 3038 2973 - pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3039 + val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 2974 3040 2975 - intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf); 2976 - intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe)); 3041 + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 3042 + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 2977 3043 } 2978 3044 2979 3045 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) ··· 3132 3198 3133 3199 ret = false; 3134 3200 3135 - tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 3136 - if (!(tmp & PIPECONF_ENABLE)) 3201 + tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); 3202 + if (!(tmp & TRANSCONF_ENABLE)) 3137 3203 goto out; 3138 3204 3139 3205 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 3140 3206 IS_CHERRYVIEW(dev_priv)) { 3141 - switch (tmp & PIPECONF_BPC_MASK) { 3142 - case PIPECONF_BPC_6: 3207 + switch (tmp & TRANSCONF_BPC_MASK) { 3208 + case TRANSCONF_BPC_6: 3143 3209 pipe_config->pipe_bpp = 18; 3144 3210 break; 3145 - case PIPECONF_BPC_8: 3211 + case TRANSCONF_BPC_8: 3146 3212 pipe_config->pipe_bpp = 24; 3147 3213 break; 3148 - case PIPECONF_BPC_10: 3214 + case TRANSCONF_BPC_10: 3149 3215 pipe_config->pipe_bpp = 30; 3150 3216 break; 3151 3217 default: ··· 3155 3221 } 3156 3222 3157 3223 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 3158 - (tmp & PIPECONF_COLOR_RANGE_SELECT)) 3224 + (tmp & TRANSCONF_COLOR_RANGE_SELECT)) 3159 3225 pipe_config->limited_color_range = true; 3160 3226 3161 - pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp); 3227 + pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); 3162 3228 3163 - pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1; 3229 + pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3164 3230 3165 3231 if (IS_CHERRYVIEW(dev_priv)) 3166 3232 pipe_config->cgm_mode = intel_de_read(dev_priv, ··· 3170 3236 intel_color_get_config(pipe_config); 3171 3237 3172 3238 if (DISPLAY_VER(dev_priv) < 4) 3173 - pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 3239 + pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; 3174 3240 3175 3241 intel_get_transcoder_timings(crtc, pipe_config); 3176 3242 intel_get_pipe_src_size(crtc, pipe_config); ··· 3240 3306 { 3241 3307 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3242 3308 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3243 - enum pipe pipe = crtc->pipe; 3309 + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 3244 3310 u32 val = 0; 3245 3311 3246 3312 /* ··· 3248 3314 * - During fastset the pipe is already enabled and must remain so 3249 3315 */ 3250 3316 if (!intel_crtc_needs_modeset(crtc_state)) 3251 - val |= PIPECONF_ENABLE; 3317 + val |= TRANSCONF_ENABLE; 3252 3318 3253 3319 switch (crtc_state->pipe_bpp) { 3254 3320 default: ··· 3256 3322 MISSING_CASE(crtc_state->pipe_bpp); 3257 3323 fallthrough; 3258 3324 case 18: 3259 - val |= PIPECONF_BPC_6; 3325 + val |= TRANSCONF_BPC_6; 3260 3326 break; 3261 3327 case 24: 3262 - val |= PIPECONF_BPC_8; 3328 + val |= TRANSCONF_BPC_8; 3263 3329 break; 3264 3330 case 30: 3265 - val |= PIPECONF_BPC_10; 3331 + val |= TRANSCONF_BPC_10; 3266 3332 break; 3267 3333 case 36: 3268 - val |= PIPECONF_BPC_12; 3334 + val |= TRANSCONF_BPC_12; 3269 3335 break; 3270 3336 } 3271 3337 3272 3338 if (crtc_state->dither) 3273 - val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; 3339 + val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3274 3340 3275 3341 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3276 - val |= PIPECONF_INTERLACE_IF_ID_ILK; 3342 + val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3277 3343 else 3278 - val |= PIPECONF_INTERLACE_PF_PD_ILK; 3344 + val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3279 3345 3280 3346 /* 3281 3347 * This would end up with an odd purple hue over ··· 3286 3352 3287 3353 if (crtc_state->limited_color_range && 3288 3354 !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 3289 - val |= PIPECONF_COLOR_RANGE_SELECT; 3355 + val |= TRANSCONF_COLOR_RANGE_SELECT; 3290 3356 3291 3357 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3292 - val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; 3358 + val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709; 3293 3359 3294 - val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); 3360 + val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); 3295 3361 3296 - val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3297 - val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); 3362 + val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); 3363 + val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); 3298 3364 3299 - intel_de_write(dev_priv, PIPECONF(pipe), val); 3300 - intel_de_posting_read(dev_priv, PIPECONF(pipe)); 3365 + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 3366 + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 3301 3367 } 3302 3368 3303 3369 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) ··· 3312 3378 * - During fastset the pipe is already enabled and must remain so 3313 3379 */ 3314 3380 if (!intel_crtc_needs_modeset(crtc_state)) 3315 - val |= PIPECONF_ENABLE; 3381 + val |= TRANSCONF_ENABLE; 3316 3382 3317 3383 if (IS_HASWELL(dev_priv) && crtc_state->dither) 3318 - val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; 3384 + val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; 3319 3385 3320 3386 if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 3321 - val |= PIPECONF_INTERLACE_IF_ID_ILK; 3387 + val |= TRANSCONF_INTERLACE_IF_ID_ILK; 3322 3388 else 3323 - val |= PIPECONF_INTERLACE_PF_PD_ILK; 3389 + val |= TRANSCONF_INTERLACE_PF_PD_ILK; 3324 3390 3325 3391 if (IS_HASWELL(dev_priv) && 3326 3392 crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 3327 - val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; 3393 + val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW; 3328 3394 3329 - intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); 3330 - intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder)); 3395 + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 3396 + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 3331 3397 } 3332 3398 3333 3399 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) ··· 3552 3618 pipe_config->shared_dpll = NULL; 3553 3619 3554 3620 ret = false; 3555 - tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); 3556 - if (!(tmp & PIPECONF_ENABLE)) 3621 + tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); 3622 + if (!(tmp & TRANSCONF_ENABLE)) 3557 3623 goto out; 3558 3624 3559 - switch (tmp & PIPECONF_BPC_MASK) { 3560 - case PIPECONF_BPC_6: 3625 + switch (tmp & TRANSCONF_BPC_MASK) { 3626 + case TRANSCONF_BPC_6: 3561 3627 pipe_config->pipe_bpp = 18; 3562 3628 break; 3563 - case PIPECONF_BPC_8: 3629 + case TRANSCONF_BPC_8: 3564 3630 pipe_config->pipe_bpp = 24; 3565 3631 break; 3566 - case PIPECONF_BPC_10: 3632 + case TRANSCONF_BPC_10: 3567 3633 pipe_config->pipe_bpp = 30; 3568 3634 break; 3569 - case PIPECONF_BPC_12: 3635 + case TRANSCONF_BPC_12: 3570 3636 pipe_config->pipe_bpp = 36; 3571 3637 break; 3572 3638 default: 3573 3639 break; 3574 3640 } 3575 3641 3576 - if (tmp & PIPECONF_COLOR_RANGE_SELECT) 3642 + if (tmp & TRANSCONF_COLOR_RANGE_SELECT) 3577 3643 pipe_config->limited_color_range = true; 3578 3644 3579 - switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { 3580 - case PIPECONF_OUTPUT_COLORSPACE_YUV601: 3581 - case PIPECONF_OUTPUT_COLORSPACE_YUV709: 3645 + switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) { 3646 + case TRANSCONF_OUTPUT_COLORSPACE_YUV601: 3647 + case TRANSCONF_OUTPUT_COLORSPACE_YUV709: 3582 3648 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3583 3649 break; 3584 3650 default: ··· 3586 3652 break; 3587 3653 } 3588 3654 3589 - pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp); 3655 + pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); 3590 3656 3591 - pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1; 3657 + pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; 3592 3658 3593 - pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp); 3659 + pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp); 3594 3660 3595 3661 pipe_config->csc_mode = intel_de_read(dev_priv, 3596 3662 PIPE_CSC_MODE(crtc->pipe)); ··· 3867 3933 pipe_config->pch_pfit.force_thru = true; 3868 3934 } 3869 3935 3870 - tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder)); 3936 + tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); 3871 3937 3872 - return tmp & PIPECONF_ENABLE; 3938 + return tmp & TRANSCONF_ENABLE; 3873 3939 } 3874 3940 3875 3941 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, ··· 3973 4039 3974 4040 if (IS_HASWELL(dev_priv)) { 3975 4041 u32 tmp = intel_de_read(dev_priv, 3976 - PIPECONF(pipe_config->cpu_transcoder)); 4042 + TRANSCONF(pipe_config->cpu_transcoder)); 3977 4043 3978 - if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) 4044 + if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW) 3979 4045 pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; 3980 4046 else 3981 4047 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; ··· 4024 4090 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 4025 4091 pipe_config->pixel_multiplier = 4026 4092 intel_de_read(dev_priv, 4027 - PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 4093 + TRANS_MULT(pipe_config->cpu_transcoder)) + 1; 4028 4094 } else { 4029 4095 pipe_config->pixel_multiplier = 1; 4030 4096 } ··· 5373 5439 } 5374 5440 } 5375 5441 5442 + /* Returns the length up to and including the last differing byte */ 5443 + static size_t 5444 + memcmp_diff_len(const u8 *a, const u8 *b, size_t len) 5445 + { 5446 + int i; 5447 + 5448 + for (i = len - 1; i >= 0; i--) { 5449 + if (a[i] != b[i]) 5450 + return i + 1; 5451 + } 5452 + 5453 + return 0; 5454 + } 5455 + 5376 5456 static void 5377 5457 pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv, 5378 5458 bool fastset, const char *name, ··· 5396 5448 if (!drm_debug_enabled(DRM_UT_KMS)) 5397 5449 return; 5398 5450 5451 + /* only dump up to the last difference */ 5452 + len = memcmp_diff_len(a, b, len); 5453 + 5399 5454 drm_dbg_kms(&dev_priv->drm, 5400 5455 "fastset mismatch in %s buffer\n", name); 5401 5456 print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE, ··· 5406 5455 print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE, 5407 5456 16, 0, b, len, false); 5408 5457 } else { 5458 + /* only dump up to the last difference */ 5459 + len = memcmp_diff_len(a, b, len); 5460 + 5409 5461 drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name); 5410 5462 print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE, 5411 5463 16, 0, a, len, false); ··· 5897 5943 return ret; 5898 5944 5899 5945 crtc_state->update_planes |= crtc_state->active_planes; 5946 + crtc_state->async_flip_planes = 0; 5947 + crtc_state->do_async_flip = false; 5900 5948 } 5901 5949 5902 5950 return 0; ··· 6651 6695 * @dev: drm device 6652 6696 * @_state: state to validate 6653 6697 */ 6654 - static int intel_atomic_check(struct drm_device *dev, 6655 - struct drm_atomic_state *_state) 6698 + int intel_atomic_check(struct drm_device *dev, 6699 + struct drm_atomic_state *_state) 6656 6700 { 6657 6701 struct drm_i915_private *dev_priv = to_i915(dev); 6658 6702 struct intel_atomic_state *state = to_intel_atomic_state(_state); ··· 8312 8356 cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw; 8313 8357 } 8314 8358 8315 - static int sanitize_watermarks_add_affected(struct drm_atomic_state *state) 8316 - { 8317 - struct drm_plane *plane; 8318 - struct intel_crtc *crtc; 8319 - 8320 - for_each_intel_crtc(state->dev, crtc) { 8321 - struct intel_crtc_state *crtc_state; 8322 - 8323 - crtc_state = intel_atomic_get_crtc_state(state, crtc); 8324 - if (IS_ERR(crtc_state)) 8325 - return PTR_ERR(crtc_state); 8326 - 8327 - if (crtc_state->hw.active) { 8328 - /* 8329 - * Preserve the inherited flag to avoid 8330 - * taking the full modeset path. 8331 - */ 8332 - crtc_state->inherited = true; 8333 - } 8334 - } 8335 - 8336 - drm_for_each_plane(plane, state->dev) { 8337 - struct drm_plane_state *plane_state; 8338 - 8339 - plane_state = drm_atomic_get_plane_state(state, plane); 8340 - if (IS_ERR(plane_state)) 8341 - return PTR_ERR(plane_state); 8342 - } 8343 - 8344 - return 0; 8345 - } 8346 - 8347 - /* 8348 - * Calculate what we think the watermarks should be for the state we've read 8349 - * out of the hardware and then immediately program those watermarks so that 8350 - * we ensure the hardware settings match our internal state. 8351 - * 8352 - * We can calculate what we think WM's should be by creating a duplicate of the 8353 - * current state (which was constructed during hardware readout) and running it 8354 - * through the atomic check code to calculate new watermark values in the 8355 - * state object. 8356 - */ 8357 - static void sanitize_watermarks(struct drm_i915_private *dev_priv) 8358 - { 8359 - struct drm_atomic_state *state; 8360 - struct intel_atomic_state *intel_state; 8361 - struct intel_crtc *crtc; 8362 - struct intel_crtc_state *crtc_state; 8363 - struct drm_modeset_acquire_ctx ctx; 8364 - int ret; 8365 - int i; 8366 - 8367 - /* Only supported on platforms that use atomic watermark design */ 8368 - if (!dev_priv->display.funcs.wm->optimize_watermarks) 8369 - return; 8370 - 8371 - state = drm_atomic_state_alloc(&dev_priv->drm); 8372 - if (drm_WARN_ON(&dev_priv->drm, !state)) 8373 - return; 8374 - 8375 - intel_state = to_intel_atomic_state(state); 8376 - 8377 - drm_modeset_acquire_init(&ctx, 0); 8378 - 8379 - retry: 8380 - state->acquire_ctx = &ctx; 8381 - 8382 - /* 8383 - * Hardware readout is the only time we don't want to calculate 8384 - * intermediate watermarks (since we don't trust the current 8385 - * watermarks). 8386 - */ 8387 - if (!HAS_GMCH(dev_priv)) 8388 - intel_state->skip_intermediate_wm = true; 8389 - 8390 - ret = sanitize_watermarks_add_affected(state); 8391 - if (ret) 8392 - goto fail; 8393 - 8394 - ret = intel_atomic_check(&dev_priv->drm, state); 8395 - if (ret) 8396 - goto fail; 8397 - 8398 - /* Write calculated watermark values back */ 8399 - for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { 8400 - crtc_state->wm.need_postvbl_update = true; 8401 - intel_optimize_watermarks(intel_state, crtc); 8402 - 8403 - to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; 8404 - } 8405 - 8406 - fail: 8407 - if (ret == -EDEADLK) { 8408 - drm_atomic_state_clear(state); 8409 - drm_modeset_backoff(&ctx); 8410 - goto retry; 8411 - } 8412 - 8413 - /* 8414 - * If we fail here, it means that the hardware appears to be 8415 - * programmed in a way that shouldn't be possible, given our 8416 - * understanding of watermark requirements. This might mean a 8417 - * mistake in the hardware readout code or a mistake in the 8418 - * watermark calculations for a given platform. Raise a WARN 8419 - * so that this is noticeable. 8420 - * 8421 - * If this actually happens, we'll have to just leave the 8422 - * BIOS-programmed watermarks untouched and hope for the best. 8423 - */ 8424 - drm_WARN(&dev_priv->drm, ret, 8425 - "Could not determine valid watermarks for inherited state\n"); 8426 - 8427 - drm_atomic_state_put(state); 8428 - 8429 - drm_modeset_drop_locks(&ctx); 8430 - drm_modeset_acquire_fini(&ctx); 8431 - } 8432 - 8433 8359 static int intel_initial_commit(struct drm_device *dev) 8434 8360 { 8435 8361 struct drm_atomic_state *state = NULL; ··· 8472 8634 goto cleanup_bios; 8473 8635 8474 8636 /* FIXME: completely on the wrong abstraction layer */ 8637 + ret = intel_power_domains_init(i915); 8638 + if (ret < 0) 8639 + goto cleanup_vga; 8640 + 8475 8641 intel_power_domains_init_hw(i915, false); 8476 8642 8477 8643 if (!HAS_DISPLAY(i915)) 8478 8644 return 0; 8479 8645 8480 - intel_dmc_ucode_init(i915); 8646 + intel_dmc_init(i915); 8481 8647 8482 8648 i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); 8483 8649 i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | ··· 8516 8674 return 0; 8517 8675 8518 8676 cleanup_vga_client_pw_domain_dmc: 8519 - intel_dmc_ucode_fini(i915); 8677 + intel_dmc_fini(i915); 8520 8678 intel_power_domains_driver_remove(i915); 8679 + cleanup_vga: 8521 8680 intel_vga_unregister(i915); 8522 8681 cleanup_bios: 8523 8682 intel_bios_driver_remove(i915); ··· 8537 8694 if (!HAS_DISPLAY(i915)) 8538 8695 return 0; 8539 8696 8540 - intel_init_pm(i915); 8697 + intel_wm_init(i915); 8541 8698 8542 8699 intel_panel_sanitize_ssc(i915); 8543 8700 ··· 8593 8750 * since the watermark calculation done here will use pstate->fb. 8594 8751 */ 8595 8752 if (!HAS_GMCH(i915)) 8596 - sanitize_watermarks(i915); 8753 + ilk_wm_sanitize(i915); 8597 8754 8598 8755 return 0; 8599 8756 } ··· 8634 8791 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 8635 8792 { 8636 8793 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 8794 + enum transcoder cpu_transcoder = (enum transcoder)pipe; 8637 8795 /* 640x480@60Hz, ~25175 kHz */ 8638 8796 struct dpll clock = { 8639 8797 .m1 = 18, ··· 8661 8817 PLL_REF_INPUT_DREFCLK | 8662 8818 DPLL_VCO_ENABLE; 8663 8819 8664 - intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 8665 - intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 8666 - intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 8667 - intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 8668 - intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 8669 - intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 8670 - intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 8820 + intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), 8821 + HACTIVE(640 - 1) | HTOTAL(800 - 1)); 8822 + intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), 8823 + HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); 8824 + intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), 8825 + HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); 8826 + intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), 8827 + VACTIVE(480 - 1) | VTOTAL(525 - 1)); 8828 + intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), 8829 + VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); 8830 + intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), 8831 + VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); 8832 + intel_de_write(dev_priv, PIPESRC(pipe), 8833 + PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); 8671 8834 8672 8835 intel_de_write(dev_priv, FP0(pipe), fp); 8673 8836 intel_de_write(dev_priv, FP1(pipe), fp); ··· 8705 8854 udelay(150); /* wait for warmup */ 8706 8855 } 8707 8856 8708 - intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE); 8709 - intel_de_posting_read(dev_priv, PIPECONF(pipe)); 8857 + intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE); 8858 + intel_de_posting_read(dev_priv, TRANSCONF(pipe)); 8710 8859 8711 8860 intel_wait_for_pipe_scanline_moving(crtc); 8712 8861 } ··· 8729 8878 drm_WARN_ON(&dev_priv->drm, 8730 8879 intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK); 8731 8880 8732 - intel_de_write(dev_priv, PIPECONF(pipe), 0); 8733 - intel_de_posting_read(dev_priv, PIPECONF(pipe)); 8881 + intel_de_write(dev_priv, TRANSCONF(pipe), 0); 8882 + intel_de_posting_read(dev_priv, TRANSCONF(pipe)); 8734 8883 8735 8884 intel_wait_for_pipe_scanline_stopped(crtc); 8736 8885 ··· 8851 9000 /* part #3: call after gem init */ 8852 9001 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) 8853 9002 { 8854 - intel_dmc_ucode_fini(i915); 9003 + intel_dmc_fini(i915); 8855 9004 8856 9005 intel_power_domains_driver_remove(i915); 8857 9006 ··· 8902 9051 * enabled. We do it last so that the async config cannot run 8903 9052 * before the connectors are registered. 8904 9053 */ 8905 - intel_fbdev_initial_config_async(&i915->drm); 9054 + intel_fbdev_initial_config_async(i915); 8906 9055 8907 9056 /* 8908 9057 * We need to coordinate the hotplugs with the asynchronous
+4
drivers/gpu/drm/i915/display/intel_display.h
··· 32 32 33 33 enum drm_scaling_filter; 34 34 struct dpll; 35 + struct drm_atomic_state; 35 36 struct drm_connector; 36 37 struct drm_device; 37 38 struct drm_display_mode; ··· 172 171 }; 173 172 174 173 enum aux_ch { 174 + AUX_CH_NONE = -1, 175 + 175 176 AUX_CH_A, 176 177 AUX_CH_B, 177 178 AUX_CH_C, ··· 397 394 ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \ 398 395 (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1)) 399 396 397 + int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); 400 398 int intel_atomic_add_affected_planes(struct intel_atomic_state *state, 401 399 struct intel_crtc *crtc); 402 400 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
+10 -5
drivers/gpu/drm/i915/display/intel_display_core.h
··· 19 19 #include "intel_cdclk.h" 20 20 #include "intel_display_limits.h" 21 21 #include "intel_display_power.h" 22 - #include "intel_dmc.h" 23 22 #include "intel_dpll_mgr.h" 24 23 #include "intel_fbc.h" 25 24 #include "intel_global_state.h" 26 25 #include "intel_gmbus.h" 27 26 #include "intel_opregion.h" 28 - #include "intel_pm_types.h" 27 + #include "intel_wm_types.h" 29 28 30 29 struct drm_i915_private; 31 30 struct drm_property; ··· 39 40 struct intel_color_funcs; 40 41 struct intel_crtc; 41 42 struct intel_crtc_state; 43 + struct intel_dmc; 42 44 struct intel_dpll_funcs; 43 45 struct intel_dpll_mgr; 44 46 struct intel_fbdev; ··· 85 85 void (*optimize_watermarks)(struct intel_atomic_state *state, 86 86 struct intel_crtc *crtc); 87 87 int (*compute_global_watermarks)(struct intel_atomic_state *state); 88 + void (*get_hw_state)(struct drm_i915_private *i915); 88 89 }; 89 90 90 91 struct intel_audio_state { ··· 103 102 u32 freq_cntrl; 104 103 105 104 /* current audio state for the audio component hooks */ 106 - struct intel_audio_state state[I915_MAX_PIPES]; 105 + struct intel_audio_state state[I915_MAX_TRANSCODERS]; 107 106 108 107 /* necessary resource sharing with HDMI LPE audio driver. */ 109 108 struct { ··· 244 243 struct g4x_wm_values g4x; 245 244 }; 246 245 247 - u8 max_level; 246 + u8 num_levels; 248 247 249 248 /* 250 249 * Should be held around atomic WM register writing; also ··· 339 338 */ 340 339 spinlock_t phy_lock; 341 340 } dkl; 341 + 342 + struct { 343 + struct intel_dmc *dmc; 344 + intel_wakeref_t wakeref; 345 + } dmc; 342 346 343 347 struct { 344 348 /* VLV/CHV/BXT/GLK DSI MMIO register base address */ ··· 472 466 473 467 /* Grouping using named structs. Keep sorted. */ 474 468 struct intel_audio audio; 475 - struct intel_dmc dmc; 476 469 struct intel_dpll dpll; 477 470 struct intel_fbc *fbc[I915_MAX_FBCS]; 478 471 struct intel_frontbuffer_tracking fb_tracking;
+2 -237
drivers/gpu/drm/i915/display/intel_display_debugfs.c
··· 26 26 #include "intel_hdmi.h" 27 27 #include "intel_hotplug.h" 28 28 #include "intel_panel.h" 29 - #include "intel_pm.h" 30 29 #include "intel_psr.h" 31 30 #include "intel_sprite.h" 32 - #include "skl_watermark.h" 31 + #include "intel_wm.h" 33 32 34 33 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 35 34 { ··· 1281 1282 } 1282 1283 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); 1283 1284 1284 - static void wm_latency_show(struct seq_file *m, const u16 wm[8]) 1285 - { 1286 - struct drm_i915_private *dev_priv = m->private; 1287 - int level; 1288 - int num_levels; 1289 - 1290 - if (IS_CHERRYVIEW(dev_priv)) 1291 - num_levels = 3; 1292 - else if (IS_VALLEYVIEW(dev_priv)) 1293 - num_levels = 1; 1294 - else if (IS_G4X(dev_priv)) 1295 - num_levels = 3; 1296 - else 1297 - num_levels = ilk_wm_max_level(dev_priv) + 1; 1298 - 1299 - drm_modeset_lock_all(&dev_priv->drm); 1300 - 1301 - for (level = 0; level < num_levels; level++) { 1302 - unsigned int latency = wm[level]; 1303 - 1304 - /* 1305 - * - WM1+ latency values in 0.5us units 1306 - * - latencies are in us on gen9/vlv/chv 1307 - */ 1308 - if (DISPLAY_VER(dev_priv) >= 9 || 1309 - IS_VALLEYVIEW(dev_priv) || 1310 - IS_CHERRYVIEW(dev_priv) || 1311 - IS_G4X(dev_priv)) 1312 - latency *= 10; 1313 - else if (level > 0) 1314 - latency *= 5; 1315 - 1316 - seq_printf(m, "WM%d %u (%u.%u usec)\n", 1317 - level, wm[level], latency / 10, latency % 10); 1318 - } 1319 - 1320 - drm_modeset_unlock_all(&dev_priv->drm); 1321 - } 1322 - 1323 - static int pri_wm_latency_show(struct seq_file *m, void *data) 1324 - { 1325 - struct drm_i915_private *dev_priv = m->private; 1326 - const u16 *latencies; 1327 - 1328 - if (DISPLAY_VER(dev_priv) >= 9) 1329 - latencies = dev_priv->display.wm.skl_latency; 1330 - else 1331 - latencies = dev_priv->display.wm.pri_latency; 1332 - 1333 - wm_latency_show(m, latencies); 1334 - 1335 - return 0; 1336 - } 1337 - 1338 - static int spr_wm_latency_show(struct seq_file *m, void *data) 1339 - { 1340 - struct drm_i915_private *dev_priv = m->private; 1341 - const u16 *latencies; 1342 - 1343 - if (DISPLAY_VER(dev_priv) >= 9) 1344 - latencies = dev_priv->display.wm.skl_latency; 1345 - else 1346 - latencies = dev_priv->display.wm.spr_latency; 1347 - 1348 - wm_latency_show(m, latencies); 1349 - 1350 - return 0; 1351 - } 1352 - 1353 - static int cur_wm_latency_show(struct seq_file *m, void *data) 1354 - { 1355 - struct drm_i915_private *dev_priv = m->private; 1356 - const u16 *latencies; 1357 - 1358 - if (DISPLAY_VER(dev_priv) >= 9) 1359 - latencies = dev_priv->display.wm.skl_latency; 1360 - else 1361 - latencies = dev_priv->display.wm.cur_latency; 1362 - 1363 - wm_latency_show(m, latencies); 1364 - 1365 - return 0; 1366 - } 1367 - 1368 - static int pri_wm_latency_open(struct inode *inode, struct file *file) 1369 - { 1370 - struct drm_i915_private *dev_priv = inode->i_private; 1371 - 1372 - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) 1373 - return -ENODEV; 1374 - 1375 - return single_open(file, pri_wm_latency_show, dev_priv); 1376 - } 1377 - 1378 - static int spr_wm_latency_open(struct inode *inode, struct file *file) 1379 - { 1380 - struct drm_i915_private *dev_priv = inode->i_private; 1381 - 1382 - if (HAS_GMCH(dev_priv)) 1383 - return -ENODEV; 1384 - 1385 - return single_open(file, spr_wm_latency_show, dev_priv); 1386 - } 1387 - 1388 - static int cur_wm_latency_open(struct inode *inode, struct file *file) 1389 - { 1390 - struct drm_i915_private *dev_priv = inode->i_private; 1391 - 1392 - if (HAS_GMCH(dev_priv)) 1393 - return -ENODEV; 1394 - 1395 - return single_open(file, cur_wm_latency_show, dev_priv); 1396 - } 1397 - 1398 - static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 1399 - size_t len, loff_t *offp, u16 wm[8]) 1400 - { 1401 - struct seq_file *m = file->private_data; 1402 - struct drm_i915_private *dev_priv = m->private; 1403 - u16 new[8] = { 0 }; 1404 - int num_levels; 1405 - int level; 1406 - int ret; 1407 - char tmp[32]; 1408 - 1409 - if (IS_CHERRYVIEW(dev_priv)) 1410 - num_levels = 3; 1411 - else if (IS_VALLEYVIEW(dev_priv)) 1412 - num_levels = 1; 1413 - else if (IS_G4X(dev_priv)) 1414 - num_levels = 3; 1415 - else 1416 - num_levels = ilk_wm_max_level(dev_priv) + 1; 1417 - 1418 - if (len >= sizeof(tmp)) 1419 - return -EINVAL; 1420 - 1421 - if (copy_from_user(tmp, ubuf, len)) 1422 - return -EFAULT; 1423 - 1424 - tmp[len] = '\0'; 1425 - 1426 - ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 1427 - &new[0], &new[1], &new[2], &new[3], 1428 - &new[4], &new[5], &new[6], &new[7]); 1429 - if (ret != num_levels) 1430 - return -EINVAL; 1431 - 1432 - drm_modeset_lock_all(&dev_priv->drm); 1433 - 1434 - for (level = 0; level < num_levels; level++) 1435 - wm[level] = new[level]; 1436 - 1437 - drm_modeset_unlock_all(&dev_priv->drm); 1438 - 1439 - return len; 1440 - } 1441 - 1442 - 1443 - static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 1444 - size_t len, loff_t *offp) 1445 - { 1446 - struct seq_file *m = file->private_data; 1447 - struct drm_i915_private *dev_priv = m->private; 1448 - u16 *latencies; 1449 - 1450 - if (DISPLAY_VER(dev_priv) >= 9) 1451 - latencies = dev_priv->display.wm.skl_latency; 1452 - else 1453 - latencies = dev_priv->display.wm.pri_latency; 1454 - 1455 - return wm_latency_write(file, ubuf, len, offp, latencies); 1456 - } 1457 - 1458 - static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 1459 - size_t len, loff_t *offp) 1460 - { 1461 - struct seq_file *m = file->private_data; 1462 - struct drm_i915_private *dev_priv = m->private; 1463 - u16 *latencies; 1464 - 1465 - if (DISPLAY_VER(dev_priv) >= 9) 1466 - latencies = dev_priv->display.wm.skl_latency; 1467 - else 1468 - latencies = dev_priv->display.wm.spr_latency; 1469 - 1470 - return wm_latency_write(file, ubuf, len, offp, latencies); 1471 - } 1472 - 1473 - static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 1474 - size_t len, loff_t *offp) 1475 - { 1476 - struct seq_file *m = file->private_data; 1477 - struct drm_i915_private *dev_priv = m->private; 1478 - u16 *latencies; 1479 - 1480 - if (DISPLAY_VER(dev_priv) >= 9) 1481 - latencies = dev_priv->display.wm.skl_latency; 1482 - else 1483 - latencies = dev_priv->display.wm.cur_latency; 1484 - 1485 - return wm_latency_write(file, ubuf, len, offp, latencies); 1486 - } 1487 - 1488 - static const struct file_operations i915_pri_wm_latency_fops = { 1489 - .owner = THIS_MODULE, 1490 - .open = pri_wm_latency_open, 1491 - .read = seq_read, 1492 - .llseek = seq_lseek, 1493 - .release = single_release, 1494 - .write = pri_wm_latency_write 1495 - }; 1496 - 1497 - static const struct file_operations i915_spr_wm_latency_fops = { 1498 - .owner = THIS_MODULE, 1499 - .open = spr_wm_latency_open, 1500 - .read = seq_read, 1501 - .llseek = seq_lseek, 1502 - .release = single_release, 1503 - .write = spr_wm_latency_write 1504 - }; 1505 - 1506 - static const struct file_operations i915_cur_wm_latency_fops = { 1507 - .owner = THIS_MODULE, 1508 - .open = cur_wm_latency_open, 1509 - .read = seq_read, 1510 - .llseek = seq_lseek, 1511 - .release = single_release, 1512 - .write = cur_wm_latency_write 1513 - }; 1514 - 1515 1285 static ssize_t 1516 1286 i915_fifo_underrun_reset_write(struct file *filp, 1517 1287 const char __user *ubuf, ··· 1361 1593 const struct file_operations *fops; 1362 1594 } intel_display_debugfs_files[] = { 1363 1595 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, 1364 - {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 1365 - {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 1366 - {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 1367 1596 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 1368 1597 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 1369 1598 {"i915_dp_test_active", &i915_displayport_test_active_fops}, ··· 1387 1622 intel_dmc_debugfs_register(i915); 1388 1623 intel_fbc_debugfs_register(i915); 1389 1624 intel_hpd_debugfs_register(i915); 1390 - skl_watermark_ipc_debugfs_register(i915); 1625 + intel_wm_debugfs_register(i915); 1391 1626 } 1392 1627 1393 1628 static int i915_panel_show(struct seq_file *m, void *data)
+29 -51
drivers/gpu/drm/i915/display/intel_display_power.c
··· 264 264 } 265 265 266 266 static u32 267 - sanitize_target_dc_state(struct drm_i915_private *dev_priv, 267 + sanitize_target_dc_state(struct drm_i915_private *i915, 268 268 u32 target_dc_state) 269 269 { 270 + struct i915_power_domains *power_domains = &i915->display.power.domains; 270 271 static const u32 states[] = { 271 272 DC_STATE_EN_UPTO_DC6, 272 273 DC_STATE_EN_UPTO_DC5, ··· 280 279 if (target_dc_state != states[i]) 281 280 continue; 282 281 283 - if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state) 282 + if (power_domains->allowed_dc_mask & target_dc_state) 284 283 break; 285 284 286 285 target_dc_state = states[i + 1]; ··· 313 312 314 313 state = sanitize_target_dc_state(dev_priv, state); 315 314 316 - if (state == dev_priv->display.dmc.target_dc_state) 315 + if (state == power_domains->target_dc_state) 317 316 goto unlock; 318 317 319 318 dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well); ··· 324 323 if (!dc_off_enabled) 325 324 intel_power_well_enable(dev_priv, power_well); 326 325 327 - dev_priv->display.dmc.target_dc_state = state; 326 + power_domains->target_dc_state = state; 328 327 329 328 if (!dc_off_enabled) 330 329 intel_power_well_disable(dev_priv, power_well); ··· 993 992 dev_priv->params.disable_power_well = 994 993 sanitize_disable_power_well_option(dev_priv, 995 994 dev_priv->params.disable_power_well); 996 - dev_priv->display.dmc.allowed_dc_mask = 995 + power_domains->allowed_dc_mask = 997 996 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 998 997 999 - dev_priv->display.dmc.target_dc_state = 998 + power_domains->target_dc_state = 1000 999 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 1001 1000 1002 1001 mutex_init(&power_domains->lock); ··· 1261 1260 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 1262 1261 1263 1262 if (allow_power_down) { 1264 - val = intel_de_read(dev_priv, LCPLL_CTL); 1265 - val |= LCPLL_POWER_DOWN_ALLOW; 1266 - intel_de_write(dev_priv, LCPLL_CTL, val); 1263 + intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW); 1267 1264 intel_de_posting_read(dev_priv, LCPLL_CTL); 1268 1265 } 1269 1266 } ··· 1305 1306 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 1306 1307 1307 1308 if (val & LCPLL_CD_SOURCE_FCLK) { 1308 - val = intel_de_read(dev_priv, LCPLL_CTL); 1309 - val &= ~LCPLL_CD_SOURCE_FCLK; 1310 - intel_de_write(dev_priv, LCPLL_CTL, val); 1309 + intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); 1311 1310 1312 1311 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 1313 1312 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) ··· 1344 1347 */ 1345 1348 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 1346 1349 { 1347 - u32 val; 1348 - 1349 1350 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 1350 1351 1351 - if (HAS_PCH_LPT_LP(dev_priv)) { 1352 - val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 1353 - val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 1354 - intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 1355 - } 1352 + if (HAS_PCH_LPT_LP(dev_priv)) 1353 + intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 1354 + PCH_LP_PARTITION_LEVEL_DISABLE, 0); 1356 1355 1357 1356 lpt_disable_clkout_dp(dev_priv); 1358 1357 hsw_disable_lcpll(dev_priv, true, true); ··· 1356 1363 1357 1364 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 1358 1365 { 1359 - u32 val; 1360 - 1361 1366 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 1362 1367 1363 1368 hsw_restore_lcpll(dev_priv); 1364 1369 intel_init_pch_refclk(dev_priv); 1365 1370 1366 - if (HAS_PCH_LPT_LP(dev_priv)) { 1367 - val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 1368 - val |= PCH_LP_PARTITION_LEVEL_DISABLE; 1369 - intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 1370 - } 1371 + if (HAS_PCH_LPT_LP(dev_priv)) 1372 + intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 1373 + 0, PCH_LP_PARTITION_LEVEL_DISABLE); 1371 1374 } 1372 1375 1373 1376 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 1374 1377 bool enable) 1375 1378 { 1376 1379 i915_reg_t reg; 1377 - u32 reset_bits, val; 1380 + u32 reset_bits; 1378 1381 1379 1382 if (IS_IVYBRIDGE(dev_priv)) { 1380 1383 reg = GEN7_MSG_CTL; ··· 1383 1394 if (DISPLAY_VER(dev_priv) >= 14) 1384 1395 reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN; 1385 1396 1386 - val = intel_de_read(dev_priv, reg); 1387 - 1388 - if (enable) 1389 - val |= reset_bits; 1390 - else 1391 - val &= ~reset_bits; 1392 - 1393 - intel_de_write(dev_priv, reg, val); 1397 + intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0); 1394 1398 } 1395 1399 1396 1400 static void skl_display_core_init(struct drm_i915_private *dev_priv, ··· 1562 1580 return; 1563 1581 1564 1582 if (IS_ALDERLAKE_S(dev_priv) || 1565 - IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 1566 - IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 1567 - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) 1568 - /* Wa_1409767108:tgl,dg1,adl-s */ 1583 + IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 1584 + /* Wa_1409767108 */ 1569 1585 table = wa_1409767108_buddy_page_masks; 1570 1586 else 1571 1587 table = tgl_buddy_page_masks; ··· 1598 1618 { 1599 1619 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1600 1620 struct i915_power_well *well; 1601 - u32 val; 1602 1621 1603 1622 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1604 1623 ··· 1649 1670 intel_dmc_load_program(dev_priv); 1650 1671 1651 1672 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ 1652 - if (DISPLAY_VER(dev_priv) >= 12) { 1653 - val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 1654 - DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; 1655 - intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, val); 1656 - } 1673 + if (DISPLAY_VER(dev_priv) >= 12) 1674 + intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, 1675 + DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 1676 + DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR); 1657 1677 1658 1678 /* Wa_14011503030:xelpd */ 1659 1679 if (DISPLAY_VER(dev_priv) >= 13) ··· 2033 2055 * resources as required and also enable deeper system power states 2034 2056 * that would be blocked if the firmware was inactive. 2035 2057 */ 2036 - if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) && 2058 + if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && 2037 2059 suspend_mode == I915_DRM_SUSPEND_IDLE && 2038 2060 intel_dmc_has_payload(i915)) { 2039 2061 intel_display_power_flush_work(i915); ··· 2222 2244 2223 2245 void intel_display_power_resume(struct drm_i915_private *i915) 2224 2246 { 2247 + struct i915_power_domains *power_domains = &i915->display.power.domains; 2248 + 2225 2249 if (DISPLAY_VER(i915) >= 11) { 2226 2250 bxt_disable_dc9(i915); 2227 2251 icl_display_core_init(i915, true); 2228 2252 if (intel_dmc_has_payload(i915)) { 2229 - if (i915->display.dmc.allowed_dc_mask & 2230 - DC_STATE_EN_UPTO_DC6) 2253 + if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6) 2231 2254 skl_enable_dc6(i915); 2232 - else if (i915->display.dmc.allowed_dc_mask & 2233 - DC_STATE_EN_UPTO_DC5) 2255 + else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5) 2234 2256 gen9_enable_dc5(i915); 2235 2257 } 2236 2258 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 2237 2259 bxt_disable_dc9(i915); 2238 2260 bxt_display_core_init(i915, true); 2239 2261 if (intel_dmc_has_payload(i915) && 2240 - (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 2262 + (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 2241 2263 gen9_enable_dc5(i915); 2242 2264 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 2243 2265 hsw_disable_pc8(i915);
+4
drivers/gpu/drm/i915/display/intel_display_power.h
··· 137 137 bool display_core_suspended; 138 138 int power_well_count; 139 139 140 + u32 dc_state; 141 + u32 target_dc_state; 142 + u32 allowed_dc_mask; 143 + 140 144 intel_wakeref_t init_wakeref; 141 145 intel_wakeref_t disable_wakeref; 142 146
+60 -74
drivers/gpu/drm/i915/display/intel_display_power_well.c
··· 333 333 { 334 334 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 335 335 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 336 - u32 val; 337 336 338 337 if (power_well->desc->has_fuses) { 339 338 enum skl_power_gate pg; ··· 355 356 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 356 357 } 357 358 358 - val = intel_de_read(dev_priv, regs->driver); 359 - intel_de_write(dev_priv, regs->driver, 360 - val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 359 + intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); 361 360 362 361 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 363 362 ··· 377 380 { 378 381 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 379 382 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 380 - u32 val; 381 383 382 384 hsw_power_well_pre_disable(dev_priv, 383 385 power_well->desc->irq_pipe_mask); 384 386 385 - val = intel_de_read(dev_priv, regs->driver); 386 - intel_de_write(dev_priv, regs->driver, 387 - val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 387 + intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); 388 388 hsw_wait_for_power_well_disable(dev_priv, power_well); 389 + } 390 + 391 + static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port) 392 + { 393 + struct intel_encoder *encoder; 394 + 395 + for_each_intel_encoder(&i915->drm, encoder) { 396 + if (encoder->type == INTEL_OUTPUT_EDP && 397 + encoder->port == port) 398 + return true; 399 + } 400 + 401 + return false; 389 402 } 390 403 391 404 static void ··· 405 398 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 406 399 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 407 400 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 408 - u32 val; 409 401 410 402 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 411 403 412 - val = intel_de_read(dev_priv, regs->driver); 413 - intel_de_write(dev_priv, regs->driver, 414 - val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 404 + intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); 415 405 416 - if (DISPLAY_VER(dev_priv) < 12) { 417 - val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 418 - intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 419 - val | ICL_LANE_ENABLE_AUX); 420 - } 406 + if (DISPLAY_VER(dev_priv) < 12) 407 + intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), 408 + 0, ICL_LANE_ENABLE_AUX); 421 409 422 410 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 423 411 424 412 /* Display WA #1178: icl */ 425 413 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 426 - !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 427 - val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); 428 - val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 429 - intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); 430 - } 414 + !intel_port_is_edp(dev_priv, (enum port)phy)) 415 + intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), 416 + 0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS); 431 417 } 432 418 433 419 static void ··· 430 430 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 431 431 int pw_idx = i915_power_well_instance(power_well)->hsw.idx; 432 432 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 433 - u32 val; 434 433 435 434 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 436 435 437 - val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 438 - intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 439 - val & ~ICL_LANE_ENABLE_AUX); 436 + intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0); 440 437 441 - val = intel_de_read(dev_priv, regs->driver); 442 - intel_de_write(dev_priv, regs->driver, 443 - val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 438 + intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); 444 439 445 440 hsw_wait_for_power_well_disable(dev_priv, power_well); 446 441 } ··· 497 502 const struct i915_power_well_regs *regs = power_well->desc->ops->regs; 498 503 bool is_tbt = power_well->desc->is_tc_tbt; 499 504 bool timeout_expected; 500 - u32 val; 501 505 502 506 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 503 507 504 - val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); 505 - val &= ~DP_AUX_CH_CTL_TBT_IO; 506 - if (is_tbt) 507 - val |= DP_AUX_CH_CTL_TBT_IO; 508 - intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); 508 + intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch), 509 + DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0); 509 510 510 - val = intel_de_read(dev_priv, regs->driver); 511 - intel_de_write(dev_priv, regs->driver, 512 - val | HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx)); 511 + intel_de_rmw(dev_priv, regs->driver, 512 + 0, 513 + HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx)); 513 514 514 515 /* 515 516 * An AUX timeout is expected if the TBT DP tunnel is down, ··· 691 700 return mask; 692 701 } 693 702 694 - void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 703 + void gen9_sanitize_dc_state(struct drm_i915_private *i915) 695 704 { 705 + struct i915_power_domains *power_domains = &i915->display.power.domains; 696 706 u32 val; 697 707 698 - if (!HAS_DISPLAY(dev_priv)) 708 + if (!HAS_DISPLAY(i915)) 699 709 return; 700 710 701 - val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); 711 + val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915); 702 712 703 - drm_dbg_kms(&dev_priv->drm, 713 + drm_dbg_kms(&i915->drm, 704 714 "Resetting DC state tracking from %02x to %02x\n", 705 - dev_priv->display.dmc.dc_state, val); 706 - dev_priv->display.dmc.dc_state = val; 715 + power_domains->dc_state, val); 716 + power_domains->dc_state = val; 707 717 } 708 718 709 719 /** ··· 732 740 */ 733 741 void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 734 742 { 743 + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 735 744 u32 val; 736 745 u32 mask; 737 746 ··· 740 747 return; 741 748 742 749 if (drm_WARN_ON_ONCE(&dev_priv->drm, 743 - state & ~dev_priv->display.dmc.allowed_dc_mask)) 744 - state &= dev_priv->display.dmc.allowed_dc_mask; 750 + state & ~power_domains->allowed_dc_mask)) 751 + state &= power_domains->allowed_dc_mask; 745 752 746 753 val = intel_de_read(dev_priv, DC_STATE_EN); 747 754 mask = gen9_dc_mask(dev_priv); ··· 749 756 val & mask, state); 750 757 751 758 /* Check if DMC is ignoring our DC state requests */ 752 - if ((val & mask) != dev_priv->display.dmc.dc_state) 759 + if ((val & mask) != power_domains->dc_state) 753 760 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", 754 - dev_priv->display.dmc.dc_state, val & mask); 761 + power_domains->dc_state, val & mask); 755 762 756 763 val &= ~mask; 757 764 val |= state; 758 765 759 766 gen9_write_dc_state(dev_priv, val); 760 767 761 - dev_priv->display.dmc.dc_state = val & mask; 768 + power_domains->dc_state = val & mask; 762 769 } 763 770 764 771 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) ··· 769 776 770 777 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 771 778 { 772 - u32 val; 773 - 774 779 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); 775 - val = intel_de_read(dev_priv, DC_STATE_EN); 776 - val &= ~DC_STATE_DC3CO_STATUS; 777 - intel_de_write(dev_priv, DC_STATE_EN, val); 780 + intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0); 778 781 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 779 782 /* 780 783 * Delay of 200us DC3CO Exit time B.Spec 49196 ··· 809 820 810 821 /* Wa Display #1183: skl,kbl,cfl */ 811 822 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 812 - intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 813 - intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 823 + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 824 + 0, SKL_SELECT_ALTERNATE_DC_EXIT); 814 825 815 826 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 816 827 } ··· 836 847 837 848 /* Wa Display #1183: skl,kbl,cfl */ 838 849 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 839 - intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 840 - intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 850 + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 851 + 0, SKL_SELECT_ALTERNATE_DC_EXIT); 841 852 842 853 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 843 854 } ··· 946 957 947 958 void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 948 959 { 960 + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 949 961 struct intel_cdclk_config cdclk_config = {}; 950 962 951 - if (dev_priv->display.dmc.target_dc_state == DC_STATE_EN_DC3CO) { 963 + if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) { 952 964 tgl_disable_dc3co(dev_priv); 953 965 return; 954 966 } ··· 988 998 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 989 999 struct i915_power_well *power_well) 990 1000 { 1001 + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1002 + 991 1003 if (!intel_dmc_has_payload(dev_priv)) 992 1004 return; 993 1005 994 - switch (dev_priv->display.dmc.target_dc_state) { 1006 + switch (power_domains->target_dc_state) { 995 1007 case DC_STATE_EN_DC3CO: 996 1008 tgl_enable_dc3co(dev_priv); 997 1009 break; ··· 1025 1033 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1026 1034 struct i915_power_well *power_well) 1027 1035 { 1028 - if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1036 + if ((intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE) == 0) 1029 1037 i830_enable_pipe(dev_priv, PIPE_A); 1030 - if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1038 + if ((intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE) == 0) 1031 1039 i830_enable_pipe(dev_priv, PIPE_B); 1032 1040 } 1033 1041 ··· 1041 1049 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1042 1050 struct i915_power_well *power_well) 1043 1051 { 1044 - return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1045 - intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1052 + return intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE && 1053 + intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE; 1046 1054 } 1047 1055 1048 1056 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, ··· 1141 1149 1142 1150 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1143 1151 { 1144 - u32 val; 1145 - 1146 1152 /* 1147 1153 * On driver load, a pipe may be active and driving a DSI display. 1148 1154 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1149 1155 * (and never recovering) in this case. intel_dsi_post_disable() will 1150 1156 * clear it when we turn off the display. 1151 1157 */ 1152 - val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); 1153 - val &= DPOUNIT_CLOCK_GATE_DISABLE; 1154 - val |= VRHUNIT_CLOCK_GATE_DISABLE; 1155 - intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); 1158 + intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), 1159 + ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE); 1156 1160 1157 1161 /* 1158 1162 * Disable trickle feed and enable pnd deadline calculation ··· 1264 1276 * both PLLs disabled, or we risk losing DPIO and PLL 1265 1277 * synchronization. 1266 1278 */ 1267 - intel_de_write(dev_priv, DPIO_CTL, 1268 - intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); 1279 + intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST); 1269 1280 } 1270 1281 1271 1282 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, ··· 1276 1289 assert_pll_disabled(dev_priv, pipe); 1277 1290 1278 1291 /* Assert common reset */ 1279 - intel_de_write(dev_priv, DPIO_CTL, 1280 - intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); 1292 + intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0); 1281 1293 1282 1294 vlv_set_power_well(dev_priv, power_well, false); 1283 1295 }
+3 -7
drivers/gpu/drm/i915/display/intel_display_reg_defs.h
··· 13 13 #define VLV_DISPLAY_BASE 0x180000 14 14 15 15 /* 16 - * Named helper wrappers around _PICK_EVEN() and _PICK(). 16 + * Named helper wrappers around _PICK_EVEN() and _PICK_EVEN_2RANGES(). 17 17 */ 18 18 #define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b) 19 19 #define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b) ··· 29 29 #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) 30 30 #define _MMIO_PHY(phy, a, b) _MMIO(_PHY(phy, a, b)) 31 31 32 - #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) 33 - 34 - #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) 35 - #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) 36 - #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) 37 - #define _MMIO_PLL3(pll, ...) _MMIO(_PICK(pll, __VA_ARGS__)) 32 + #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c)) 33 + #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c)) 38 34 39 35 /* 40 36 * Device info offset array based helpers for groups of registers with unevenly
+81
drivers/gpu/drm/i915/display/intel_display_rps.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include <drm/drm_crtc.h> 7 + #include <drm/drm_vblank.h> 8 + 9 + #include "gt/intel_rps.h" 10 + #include "i915_drv.h" 11 + #include "intel_display_rps.h" 12 + #include "intel_display_types.h" 13 + 14 + struct wait_rps_boost { 15 + struct wait_queue_entry wait; 16 + 17 + struct drm_crtc *crtc; 18 + struct i915_request *request; 19 + }; 20 + 21 + static int do_rps_boost(struct wait_queue_entry *_wait, 22 + unsigned mode, int sync, void *key) 23 + { 24 + struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 25 + struct i915_request *rq = wait->request; 26 + 27 + /* 28 + * If we missed the vblank, but the request is already running it 29 + * is reasonable to assume that it will complete before the next 30 + * vblank without our intervention, so leave RPS alone. 31 + */ 32 + if (!i915_request_started(rq)) 33 + intel_rps_boost(rq); 34 + i915_request_put(rq); 35 + 36 + drm_crtc_vblank_put(wait->crtc); 37 + 38 + list_del(&wait->wait.entry); 39 + kfree(wait); 40 + return 1; 41 + } 42 + 43 + void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc, 44 + struct dma_fence *fence) 45 + { 46 + struct wait_rps_boost *wait; 47 + 48 + if (!dma_fence_is_i915(fence)) 49 + return; 50 + 51 + if (DISPLAY_VER(to_i915(crtc->dev)) < 6) 52 + return; 53 + 54 + if (drm_crtc_vblank_get(crtc)) 55 + return; 56 + 57 + wait = kmalloc(sizeof(*wait), GFP_KERNEL); 58 + if (!wait) { 59 + drm_crtc_vblank_put(crtc); 60 + return; 61 + } 62 + 63 + wait->request = to_request(dma_fence_get(fence)); 64 + wait->crtc = crtc; 65 + 66 + wait->wait.func = do_rps_boost; 67 + wait->wait.flags = 0; 68 + 69 + add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 70 + } 71 + 72 + void intel_display_rps_mark_interactive(struct drm_i915_private *i915, 73 + struct intel_atomic_state *state, 74 + bool interactive) 75 + { 76 + if (state->rps_interactive == interactive) 77 + return; 78 + 79 + intel_rps_mark_interactive(&to_gt(i915)->rps, interactive); 80 + state->rps_interactive = interactive; 81 + }
+22
drivers/gpu/drm/i915/display/intel_display_rps.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_DISPLAY_RPS_H__ 7 + #define __INTEL_DISPLAY_RPS_H__ 8 + 9 + #include <linux/types.h> 10 + 11 + struct dma_fence; 12 + struct drm_crtc; 13 + struct drm_i915_private; 14 + struct intel_atomic_state; 15 + 16 + void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc, 17 + struct dma_fence *fence); 18 + void intel_display_rps_mark_interactive(struct drm_i915_private *i915, 19 + struct intel_atomic_state *state, 20 + bool interactive); 21 + 22 + #endif /* __INTEL_DISPLAY_RPS_H__ */
+7 -12
drivers/gpu/drm/i915/display/intel_display_types.h
··· 53 53 #include "intel_display_limits.h" 54 54 #include "intel_display_power.h" 55 55 #include "intel_dpll_mgr.h" 56 - #include "intel_pm_types.h" 56 + #include "intel_wm_types.h" 57 57 58 58 struct drm_printer; 59 59 struct __intel_global_objs_state; ··· 326 326 struct { 327 327 u16 pwm_freq_hz; 328 328 u16 brightness_precision_bits; 329 + u16 hdr_dpcd_refresh_timeout; 329 330 bool present; 330 331 bool active_low_pwm; 331 332 u8 min_brightness; /* min_brightness/255 of max */ ··· 1250 1249 /* bitmask of planes that will be updated during the commit */ 1251 1250 u8 update_planes; 1252 1251 1252 + /* bitmask of planes with async flip active */ 1253 + u8 async_flip_planes; 1254 + 1253 1255 u8 framestart_delay; /* 1-4 */ 1254 1256 u8 msa_timing_delay; /* 0-3 */ 1255 1257 ··· 1506 1502 u8 cacheline_size; 1507 1503 }; 1508 1504 1509 - struct cxsr_latency { 1510 - bool is_desktop : 1; 1511 - bool is_ddr3 : 1; 1512 - u16 fsb_freq; 1513 - u16 mem_freq; 1514 - u16 display_sr; 1515 - u16 display_hpll_disable; 1516 - u16 cursor_sr; 1517 - u16 cursor_hpll_disable; 1518 - }; 1519 - 1520 1505 #define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base) 1521 1506 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 1522 1507 #define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi) ··· 1624 1631 bool psr2_sel_fetch_cff_enabled; 1625 1632 bool req_psr2_sdp_prior_scanline; 1626 1633 u8 sink_sync_latency; 1634 + u8 io_wake_lines; 1635 + u8 fast_wake_lines; 1627 1636 ktime_t last_entry_attempt; 1628 1637 ktime_t last_exit; 1629 1638 bool sink_not_reliable;
+227 -168
drivers/gpu/drm/i915/display/intel_dmc.c
··· 38 38 * low-power state and comes back to normal. 39 39 */ 40 40 41 + enum intel_dmc_id { 42 + DMC_FW_MAIN = 0, 43 + DMC_FW_PIPEA, 44 + DMC_FW_PIPEB, 45 + DMC_FW_PIPEC, 46 + DMC_FW_PIPED, 47 + DMC_FW_MAX 48 + }; 49 + 50 + struct intel_dmc { 51 + struct drm_i915_private *i915; 52 + struct work_struct work; 53 + const char *fw_path; 54 + u32 max_fw_size; /* bytes */ 55 + u32 version; 56 + struct dmc_fw_info { 57 + u32 mmio_count; 58 + i915_reg_t mmioaddr[20]; 59 + u32 mmiodata[20]; 60 + u32 dmc_offset; 61 + u32 start_mmioaddr; 62 + u32 dmc_fw_size; /*dwords */ 63 + u32 *payload; 64 + bool present; 65 + } dmc_info[DMC_FW_MAX]; 66 + }; 67 + 68 + /* Note: This may be NULL. */ 69 + static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915) 70 + { 71 + return i915->display.dmc.dmc; 72 + } 73 + 41 74 #define DMC_VERSION(major, minor) ((major) << 16 | (minor)) 42 75 #define DMC_VERSION_MAJOR(version) ((version) >> 16) 43 76 #define DMC_VERSION_MINOR(version) ((version) & 0xffff) ··· 282 249 char substepping; 283 250 }; 284 251 285 - static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id) 252 + #define for_each_dmc_id(__dmc_id) \ 253 + for ((__dmc_id) = DMC_FW_MAIN; (__dmc_id) < DMC_FW_MAX; (__dmc_id)++) 254 + 255 + static bool is_valid_dmc_id(enum intel_dmc_id dmc_id) 286 256 { 287 - return i915->display.dmc.dmc_info[dmc_id].payload; 257 + return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX; 258 + } 259 + 260 + static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id) 261 + { 262 + struct intel_dmc *dmc = i915_to_dmc(i915); 263 + 264 + return dmc && dmc->dmc_info[dmc_id].payload; 288 265 } 289 266 290 267 bool intel_dmc_has_payload(struct drm_i915_private *i915) ··· 313 270 return si; 314 271 } 315 272 316 - static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) 273 + static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915) 317 274 { 318 275 /* The below bit doesn't need to be cleared ever afterwards */ 319 - intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0, 276 + intel_de_rmw(i915, DC_STATE_DEBUG, 0, 320 277 DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP); 321 - intel_de_posting_read(dev_priv, DC_STATE_DEBUG); 278 + intel_de_posting_read(i915, DC_STATE_DEBUG); 322 279 } 323 280 324 281 static void disable_event_handler(struct drm_i915_private *i915, ··· 358 315 } 359 316 360 317 static bool 361 - get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id, 318 + get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id, 362 319 i915_reg_t *ctl_reg, i915_reg_t *htp_reg) 363 320 { 364 - switch (dmc_id) { 365 - case DMC_FW_MAIN: 321 + if (dmc_id == DMC_FW_MAIN) { 366 322 if (DISPLAY_VER(i915) == 12) { 367 323 *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3); 368 324 *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3); 369 325 370 326 return true; 371 327 } 372 - break; 373 - case DMC_FW_PIPEA ... DMC_FW_PIPED: 328 + } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) { 374 329 if (IS_DG2(i915)) { 375 330 *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2); 376 331 *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2); 377 332 378 333 return true; 379 334 } 380 - break; 381 335 } 382 336 383 337 return false; ··· 383 343 static void 384 344 disable_all_flip_queue_events(struct drm_i915_private *i915) 385 345 { 386 - int dmc_id; 346 + enum intel_dmc_id dmc_id; 387 347 388 348 /* TODO: check if the following applies to all D13+ platforms. */ 389 349 if (!IS_DG2(i915) && !IS_TIGERLAKE(i915)) 390 350 return; 391 351 392 - for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) { 352 + for_each_dmc_id(dmc_id) { 393 353 i915_reg_t ctl_reg; 394 354 i915_reg_t htp_reg; 395 355 ··· 405 365 406 366 static void disable_all_event_handlers(struct drm_i915_private *i915) 407 367 { 408 - int id; 368 + enum intel_dmc_id dmc_id; 409 369 410 370 /* TODO: disable the event handlers on pre-GEN12 platforms as well */ 411 371 if (DISPLAY_VER(i915) < 12) 412 372 return; 413 373 414 - for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) { 374 + for_each_dmc_id(dmc_id) { 415 375 int handler; 416 376 417 - if (!has_dmc_id_fw(i915, id)) 377 + if (!has_dmc_id_fw(i915, dmc_id)) 418 378 continue; 419 379 420 380 for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) 421 381 disable_event_handler(i915, 422 - DMC_EVT_CTL(i915, id, handler), 423 - DMC_EVT_HTP(i915, id, handler)); 382 + DMC_EVT_CTL(i915, dmc_id, handler), 383 + DMC_EVT_HTP(i915, dmc_id, handler)); 424 384 } 425 385 } 426 386 ··· 450 410 451 411 void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) 452 412 { 453 - if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe))) 413 + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); 414 + 415 + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) 454 416 return; 455 417 456 418 if (DISPLAY_VER(i915) >= 14) ··· 463 421 464 422 void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe) 465 423 { 466 - if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe))) 424 + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); 425 + 426 + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) 467 427 return; 468 428 469 429 if (DISPLAY_VER(i915) >= 14) ··· 476 432 477 433 /** 478 434 * intel_dmc_load_program() - write the firmware from memory to register. 479 - * @dev_priv: i915 drm device. 435 + * @i915: i915 drm device. 480 436 * 481 437 * DMC firmware is read from a .bin file and kept in internal memory one time. 482 438 * Everytime display comes back from low power state this function is called to 483 439 * copy the firmware from internal memory to registers. 484 440 */ 485 - void intel_dmc_load_program(struct drm_i915_private *dev_priv) 441 + void intel_dmc_load_program(struct drm_i915_private *i915) 486 442 { 487 - struct intel_dmc *dmc = &dev_priv->display.dmc; 488 - u32 id, i; 443 + struct i915_power_domains *power_domains = &i915->display.power.domains; 444 + struct intel_dmc *dmc = i915_to_dmc(i915); 445 + enum intel_dmc_id dmc_id; 446 + u32 i; 489 447 490 - if (!intel_dmc_has_payload(dev_priv)) 448 + if (!intel_dmc_has_payload(i915)) 491 449 return; 492 450 493 - pipedmc_clock_gating_wa(dev_priv, true); 451 + pipedmc_clock_gating_wa(i915, true); 494 452 495 - disable_all_event_handlers(dev_priv); 453 + disable_all_event_handlers(i915); 496 454 497 - assert_rpm_wakelock_held(&dev_priv->runtime_pm); 455 + assert_rpm_wakelock_held(&i915->runtime_pm); 498 456 499 457 preempt_disable(); 500 458 501 - for (id = 0; id < DMC_FW_MAX; id++) { 502 - for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) { 503 - intel_de_write_fw(dev_priv, 504 - DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i), 505 - dmc->dmc_info[id].payload[i]); 459 + for_each_dmc_id(dmc_id) { 460 + for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) { 461 + intel_de_write_fw(i915, 462 + DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i), 463 + dmc->dmc_info[dmc_id].payload[i]); 506 464 } 507 465 } 508 466 509 467 preempt_enable(); 510 468 511 - for (id = 0; id < DMC_FW_MAX; id++) { 512 - for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) { 513 - intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i], 514 - dmc->dmc_info[id].mmiodata[i]); 469 + for_each_dmc_id(dmc_id) { 470 + for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { 471 + intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i], 472 + dmc->dmc_info[dmc_id].mmiodata[i]); 515 473 } 516 474 } 517 475 518 - dev_priv->display.dmc.dc_state = 0; 476 + power_domains->dc_state = 0; 519 477 520 - gen9_set_dc_state_debugmask(dev_priv); 478 + gen9_set_dc_state_debugmask(i915); 521 479 522 480 /* 523 481 * Flip queue events need to be disabled before enabling DC5/6. 524 482 * i915 doesn't use the flip queue feature, so disable it already 525 483 * here. 526 484 */ 527 - disable_all_flip_queue_events(dev_priv); 485 + disable_all_flip_queue_events(i915); 528 486 529 - pipedmc_clock_gating_wa(dev_priv, false); 487 + pipedmc_clock_gating_wa(i915, false); 530 488 } 531 489 532 490 /** ··· 550 504 551 505 void assert_dmc_loaded(struct drm_i915_private *i915) 552 506 { 553 - drm_WARN_ONCE(&i915->drm, 554 - !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), 507 + struct intel_dmc *dmc = i915_to_dmc(i915); 508 + 509 + drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n"); 510 + drm_WARN_ONCE(&i915->drm, dmc && 511 + !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), 555 512 "DMC program storage start is NULL\n"); 556 513 drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE), 557 514 "DMC SSP Base Not fine\n"); ··· 589 540 const struct stepping_info *si, 590 541 u8 package_ver) 591 542 { 592 - unsigned int i, id; 593 - 594 - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); 543 + struct drm_i915_private *i915 = dmc->i915; 544 + enum intel_dmc_id dmc_id; 545 + unsigned int i; 595 546 596 547 for (i = 0; i < num_entries; i++) { 597 - id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; 548 + dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; 598 549 599 - if (id >= DMC_FW_MAX) { 600 - drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id); 550 + if (!is_valid_dmc_id(dmc_id)) { 551 + drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id); 601 552 continue; 602 553 } 603 554 ··· 605 556 * check for the stepping since we already found a previous FW 606 557 * for this id. 607 558 */ 608 - if (dmc->dmc_info[id].present) 559 + if (dmc->dmc_info[dmc_id].present) 609 560 continue; 610 561 611 562 if (fw_info_matches_stepping(&fw_info[i], si)) { 612 - dmc->dmc_info[id].present = true; 613 - dmc->dmc_info[id].dmc_offset = fw_info[i].offset; 563 + dmc->dmc_info[dmc_id].present = true; 564 + dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset; 614 565 } 615 566 } 616 567 } 617 568 618 569 static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, 619 570 const u32 *mmioaddr, u32 mmio_count, 620 - int header_ver, u8 dmc_id) 571 + int header_ver, enum intel_dmc_id dmc_id) 621 572 { 622 - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); 573 + struct drm_i915_private *i915 = dmc->i915; 623 574 u32 start_range, end_range; 624 575 int i; 625 - 626 - if (dmc_id >= DMC_FW_MAX) { 627 - drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id); 628 - return false; 629 - } 630 576 631 577 if (header_ver == 1) { 632 578 start_range = DMC_MMIO_START_RANGE; ··· 650 606 651 607 static u32 parse_dmc_fw_header(struct intel_dmc *dmc, 652 608 const struct intel_dmc_header_base *dmc_header, 653 - size_t rem_size, u8 dmc_id) 609 + size_t rem_size, enum intel_dmc_id dmc_id) 654 610 { 655 - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); 611 + struct drm_i915_private *i915 = dmc->i915; 656 612 struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id]; 657 613 unsigned int header_len_bytes, dmc_header_size, payload_size, i; 658 614 const u32 *mmioaddr, *mmiodata; ··· 763 719 const struct stepping_info *si, 764 720 size_t rem_size) 765 721 { 766 - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); 722 + struct drm_i915_private *i915 = dmc->i915; 767 723 u32 package_size = sizeof(struct intel_package_header); 768 724 u32 num_entries, max_entries; 769 725 const struct intel_fw_info *fw_info; ··· 817 773 struct intel_css_header *css_header, 818 774 size_t rem_size) 819 775 { 820 - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); 776 + struct drm_i915_private *i915 = dmc->i915; 821 777 822 778 if (rem_size < sizeof(struct intel_css_header)) { 823 779 drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); ··· 837 793 return sizeof(struct intel_css_header); 838 794 } 839 795 840 - static void parse_dmc_fw(struct drm_i915_private *dev_priv, 841 - const struct firmware *fw) 796 + static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) 842 797 { 798 + struct drm_i915_private *i915 = dmc->i915; 843 799 struct intel_css_header *css_header; 844 800 struct intel_package_header *package_header; 845 801 struct intel_dmc_header_base *dmc_header; 846 - struct intel_dmc *dmc = &dev_priv->display.dmc; 847 802 struct stepping_info display_info = { '*', '*'}; 848 - const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info); 803 + const struct stepping_info *si = intel_get_stepping_info(i915, &display_info); 804 + enum intel_dmc_id dmc_id; 849 805 u32 readcount = 0; 850 806 u32 r, offset; 851 - int id; 852 807 853 808 if (!fw) 854 809 return; ··· 868 825 869 826 readcount += r; 870 827 871 - for (id = 0; id < DMC_FW_MAX; id++) { 872 - if (!dev_priv->display.dmc.dmc_info[id].present) 828 + for_each_dmc_id(dmc_id) { 829 + if (!dmc->dmc_info[dmc_id].present) 873 830 continue; 874 831 875 - offset = readcount + dmc->dmc_info[id].dmc_offset * 4; 832 + offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4; 876 833 if (offset > fw->size) { 877 - drm_err(&dev_priv->drm, "Reading beyond the fw_size\n"); 834 + drm_err(&i915->drm, "Reading beyond the fw_size\n"); 878 835 continue; 879 836 } 880 837 881 838 dmc_header = (struct intel_dmc_header_base *)&fw->data[offset]; 882 - parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id); 839 + parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id); 883 840 } 884 841 } 885 842 886 - static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv) 843 + static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915) 887 844 { 888 - drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref); 889 - dev_priv->display.dmc.wakeref = 890 - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 845 + drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); 846 + i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); 891 847 } 892 848 893 - static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv) 849 + static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915) 894 850 { 895 851 intel_wakeref_t wakeref __maybe_unused = 896 - fetch_and_zero(&dev_priv->display.dmc.wakeref); 852 + fetch_and_zero(&i915->display.dmc.wakeref); 897 853 898 - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); 854 + intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 899 855 } 900 856 901 857 static const char *dmc_fallback_path(struct drm_i915_private *i915) ··· 907 865 908 866 static void dmc_load_work_fn(struct work_struct *work) 909 867 { 910 - struct drm_i915_private *dev_priv; 911 - struct intel_dmc *dmc; 868 + struct intel_dmc *dmc = container_of(work, typeof(*dmc), work); 869 + struct drm_i915_private *i915 = dmc->i915; 912 870 const struct firmware *fw = NULL; 913 871 const char *fallback_path; 914 872 int err; 915 873 916 - dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work); 917 - dmc = &dev_priv->display.dmc; 874 + err = request_firmware(&fw, dmc->fw_path, i915->drm.dev); 918 875 919 - err = request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev); 920 - 921 - if (err == -ENOENT && !dev_priv->params.dmc_firmware_path) { 922 - fallback_path = dmc_fallback_path(dev_priv); 876 + if (err == -ENOENT && !i915->params.dmc_firmware_path) { 877 + fallback_path = dmc_fallback_path(i915); 923 878 if (fallback_path) { 924 - drm_dbg_kms(&dev_priv->drm, 925 - "%s not found, falling back to %s\n", 926 - dmc->fw_path, 927 - fallback_path); 928 - err = request_firmware(&fw, fallback_path, dev_priv->drm.dev); 879 + drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n", 880 + dmc->fw_path, fallback_path); 881 + err = request_firmware(&fw, fallback_path, i915->drm.dev); 929 882 if (err == 0) 930 - dev_priv->display.dmc.fw_path = fallback_path; 883 + dmc->fw_path = fallback_path; 931 884 } 932 885 } 933 886 934 - parse_dmc_fw(dev_priv, fw); 887 + parse_dmc_fw(dmc, fw); 935 888 936 - if (intel_dmc_has_payload(dev_priv)) { 937 - intel_dmc_load_program(dev_priv); 938 - intel_dmc_runtime_pm_put(dev_priv); 889 + if (intel_dmc_has_payload(i915)) { 890 + intel_dmc_load_program(i915); 891 + intel_dmc_runtime_pm_put(i915); 939 892 940 - drm_info(&dev_priv->drm, 941 - "Finished loading DMC firmware %s (v%u.%u)\n", 942 - dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version), 893 + drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n", 894 + dmc->fw_path, DMC_VERSION_MAJOR(dmc->version), 943 895 DMC_VERSION_MINOR(dmc->version)); 944 896 } else { 945 - drm_notice(&dev_priv->drm, 897 + drm_notice(&i915->drm, 946 898 "Failed to load DMC firmware %s." 947 899 " Disabling runtime power management.\n", 948 900 dmc->fw_path); 949 - drm_notice(&dev_priv->drm, "DMC firmware homepage: %s", 901 + drm_notice(&i915->drm, "DMC firmware homepage: %s", 950 902 INTEL_UC_FIRMWARE_URL); 951 903 } 952 904 ··· 948 912 } 949 913 950 914 /** 951 - * intel_dmc_ucode_init() - initialize the firmware loading. 952 - * @dev_priv: i915 drm device. 915 + * intel_dmc_init() - initialize the firmware loading. 916 + * @i915: i915 drm device. 953 917 * 954 918 * This function is called at the time of loading the display driver to read 955 919 * firmware from a .bin file and copied into a internal memory. 956 920 */ 957 - void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) 921 + void intel_dmc_init(struct drm_i915_private *i915) 958 922 { 959 - struct intel_dmc *dmc = &dev_priv->display.dmc; 923 + struct intel_dmc *dmc; 960 924 961 - INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn); 962 - 963 - if (!HAS_DMC(dev_priv)) 925 + if (!HAS_DMC(i915)) 964 926 return; 965 927 966 928 /* ··· 969 935 * suspend as runtime suspend *requires* a working DMC for whatever 970 936 * reason. 971 937 */ 972 - intel_dmc_runtime_pm_get(dev_priv); 938 + intel_dmc_runtime_pm_get(i915); 973 939 974 - if (IS_DG2(dev_priv)) { 940 + dmc = kzalloc(sizeof(*dmc), GFP_KERNEL); 941 + if (!dmc) 942 + return; 943 + 944 + dmc->i915 = i915; 945 + 946 + INIT_WORK(&dmc->work, dmc_load_work_fn); 947 + 948 + if (IS_DG2(i915)) { 975 949 dmc->fw_path = DG2_DMC_PATH; 976 950 dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; 977 - } else if (IS_ALDERLAKE_P(dev_priv)) { 951 + } else if (IS_ALDERLAKE_P(i915)) { 978 952 dmc->fw_path = ADLP_DMC_PATH; 979 953 dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; 980 - } else if (IS_ALDERLAKE_S(dev_priv)) { 954 + } else if (IS_ALDERLAKE_S(i915)) { 981 955 dmc->fw_path = ADLS_DMC_PATH; 982 956 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 983 - } else if (IS_DG1(dev_priv)) { 957 + } else if (IS_DG1(i915)) { 984 958 dmc->fw_path = DG1_DMC_PATH; 985 959 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 986 - } else if (IS_ROCKETLAKE(dev_priv)) { 960 + } else if (IS_ROCKETLAKE(i915)) { 987 961 dmc->fw_path = RKL_DMC_PATH; 988 962 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 989 - } else if (IS_TIGERLAKE(dev_priv)) { 963 + } else if (IS_TIGERLAKE(i915)) { 990 964 dmc->fw_path = TGL_DMC_PATH; 991 965 dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; 992 - } else if (DISPLAY_VER(dev_priv) == 11) { 966 + } else if (DISPLAY_VER(i915) == 11) { 993 967 dmc->fw_path = ICL_DMC_PATH; 994 968 dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE; 995 - } else if (IS_GEMINILAKE(dev_priv)) { 969 + } else if (IS_GEMINILAKE(i915)) { 996 970 dmc->fw_path = GLK_DMC_PATH; 997 971 dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE; 998 - } else if (IS_KABYLAKE(dev_priv) || 999 - IS_COFFEELAKE(dev_priv) || 1000 - IS_COMETLAKE(dev_priv)) { 972 + } else if (IS_KABYLAKE(i915) || 973 + IS_COFFEELAKE(i915) || 974 + IS_COMETLAKE(i915)) { 1001 975 dmc->fw_path = KBL_DMC_PATH; 1002 976 dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE; 1003 - } else if (IS_SKYLAKE(dev_priv)) { 977 + } else if (IS_SKYLAKE(i915)) { 1004 978 dmc->fw_path = SKL_DMC_PATH; 1005 979 dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE; 1006 - } else if (IS_BROXTON(dev_priv)) { 980 + } else if (IS_BROXTON(i915)) { 1007 981 dmc->fw_path = BXT_DMC_PATH; 1008 982 dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE; 1009 983 } 1010 984 1011 - if (dev_priv->params.dmc_firmware_path) { 1012 - if (strlen(dev_priv->params.dmc_firmware_path) == 0) { 1013 - dmc->fw_path = NULL; 1014 - drm_info(&dev_priv->drm, 985 + if (i915->params.dmc_firmware_path) { 986 + if (strlen(i915->params.dmc_firmware_path) == 0) { 987 + drm_info(&i915->drm, 1015 988 "Disabling DMC firmware and runtime PM\n"); 1016 - return; 989 + goto out; 1017 990 } 1018 991 1019 - dmc->fw_path = dev_priv->params.dmc_firmware_path; 992 + dmc->fw_path = i915->params.dmc_firmware_path; 1020 993 } 1021 994 1022 995 if (!dmc->fw_path) { 1023 - drm_dbg_kms(&dev_priv->drm, 996 + drm_dbg_kms(&i915->drm, 1024 997 "No known DMC firmware for platform, disabling runtime PM\n"); 1025 - return; 998 + goto out; 1026 999 } 1027 1000 1028 - drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path); 1029 - schedule_work(&dev_priv->display.dmc.work); 1001 + i915->display.dmc.dmc = dmc; 1002 + 1003 + drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path); 1004 + schedule_work(&dmc->work); 1005 + 1006 + return; 1007 + 1008 + out: 1009 + kfree(dmc); 1030 1010 } 1031 1011 1032 1012 /** 1033 - * intel_dmc_ucode_suspend() - prepare DMC firmware before system suspend 1034 - * @dev_priv: i915 drm device 1013 + * intel_dmc_suspend() - prepare DMC firmware before system suspend 1014 + * @i915: i915 drm device 1035 1015 * 1036 1016 * Prepare the DMC firmware before entering system suspend. This includes 1037 1017 * flushing pending work items and releasing any resources acquired during 1038 1018 * init. 1039 1019 */ 1040 - void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv) 1020 + void intel_dmc_suspend(struct drm_i915_private *i915) 1041 1021 { 1042 - if (!HAS_DMC(dev_priv)) 1022 + struct intel_dmc *dmc = i915_to_dmc(i915); 1023 + 1024 + if (!HAS_DMC(i915)) 1043 1025 return; 1044 1026 1045 - flush_work(&dev_priv->display.dmc.work); 1027 + if (dmc) 1028 + flush_work(&dmc->work); 1046 1029 1047 1030 /* Drop the reference held in case DMC isn't loaded. */ 1048 - if (!intel_dmc_has_payload(dev_priv)) 1049 - intel_dmc_runtime_pm_put(dev_priv); 1031 + if (!intel_dmc_has_payload(i915)) 1032 + intel_dmc_runtime_pm_put(i915); 1050 1033 } 1051 1034 1052 1035 /** 1053 - * intel_dmc_ucode_resume() - init DMC firmware during system resume 1054 - * @dev_priv: i915 drm device 1036 + * intel_dmc_resume() - init DMC firmware during system resume 1037 + * @i915: i915 drm device 1055 1038 * 1056 1039 * Reinitialize the DMC firmware during system resume, reacquiring any 1057 - * resources released in intel_dmc_ucode_suspend(). 1040 + * resources released in intel_dmc_suspend(). 1058 1041 */ 1059 - void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv) 1042 + void intel_dmc_resume(struct drm_i915_private *i915) 1060 1043 { 1061 - if (!HAS_DMC(dev_priv)) 1044 + if (!HAS_DMC(i915)) 1062 1045 return; 1063 1046 1064 1047 /* 1065 1048 * Reacquire the reference to keep RPM disabled in case DMC isn't 1066 1049 * loaded. 1067 1050 */ 1068 - if (!intel_dmc_has_payload(dev_priv)) 1069 - intel_dmc_runtime_pm_get(dev_priv); 1051 + if (!intel_dmc_has_payload(i915)) 1052 + intel_dmc_runtime_pm_get(i915); 1070 1053 } 1071 1054 1072 1055 /** 1073 - * intel_dmc_ucode_fini() - unload the DMC firmware. 1074 - * @dev_priv: i915 drm device. 1056 + * intel_dmc_fini() - unload the DMC firmware. 1057 + * @i915: i915 drm device. 1075 1058 * 1076 1059 * Firmmware unloading includes freeing the internal memory and reset the 1077 1060 * firmware loading status. 1078 1061 */ 1079 - void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv) 1062 + void intel_dmc_fini(struct drm_i915_private *i915) 1080 1063 { 1081 - int id; 1064 + struct intel_dmc *dmc = i915_to_dmc(i915); 1065 + enum intel_dmc_id dmc_id; 1082 1066 1083 - if (!HAS_DMC(dev_priv)) 1067 + if (!HAS_DMC(i915)) 1084 1068 return; 1085 1069 1086 - intel_dmc_ucode_suspend(dev_priv); 1087 - drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref); 1070 + intel_dmc_suspend(i915); 1071 + drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); 1088 1072 1089 - for (id = 0; id < DMC_FW_MAX; id++) 1090 - kfree(dev_priv->display.dmc.dmc_info[id].payload); 1073 + if (dmc) { 1074 + for_each_dmc_id(dmc_id) 1075 + kfree(dmc->dmc_info[dmc_id].payload); 1076 + 1077 + kfree(dmc); 1078 + i915->display.dmc.dmc = NULL; 1079 + } 1091 1080 } 1092 1081 1093 1082 void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m, 1094 1083 struct drm_i915_private *i915) 1095 1084 { 1096 - struct intel_dmc *dmc = &i915->display.dmc; 1085 + struct intel_dmc *dmc = i915_to_dmc(i915); 1097 1086 1098 1087 if (!HAS_DMC(i915)) 1099 1088 return; 1100 1089 1090 + i915_error_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); 1101 1091 i915_error_printf(m, "DMC loaded: %s\n", 1102 1092 str_yes_no(intel_dmc_has_payload(i915))); 1103 - i915_error_printf(m, "DMC fw version: %d.%d\n", 1104 - DMC_VERSION_MAJOR(dmc->version), 1105 - DMC_VERSION_MINOR(dmc->version)); 1093 + if (dmc) 1094 + i915_error_printf(m, "DMC fw version: %d.%d\n", 1095 + DMC_VERSION_MAJOR(dmc->version), 1096 + DMC_VERSION_MINOR(dmc->version)); 1106 1097 } 1107 1098 1108 1099 static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) 1109 1100 { 1110 1101 struct drm_i915_private *i915 = m->private; 1102 + struct intel_dmc *dmc = i915_to_dmc(i915); 1111 1103 intel_wakeref_t wakeref; 1112 - struct intel_dmc *dmc; 1113 1104 i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG; 1114 1105 1115 1106 if (!HAS_DMC(i915)) 1116 1107 return -ENODEV; 1117 1108 1118 - dmc = &i915->display.dmc; 1119 - 1120 1109 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1121 1110 1111 + seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); 1122 1112 seq_printf(m, "fw loaded: %s\n", 1123 1113 str_yes_no(intel_dmc_has_payload(i915))); 1124 - seq_printf(m, "path: %s\n", dmc->fw_path); 1114 + seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A"); 1125 1115 seq_printf(m, "Pipe A fw needed: %s\n", 1126 1116 str_yes_no(GRAPHICS_VER(i915) >= 12)); 1127 1117 seq_printf(m, "Pipe A fw loaded: %s\n", 1128 - str_yes_no(dmc->dmc_info[DMC_FW_PIPEA].payload)); 1118 + str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA))); 1129 1119 seq_printf(m, "Pipe B fw needed: %s\n", 1130 1120 str_yes_no(IS_ALDERLAKE_P(i915) || 1131 1121 DISPLAY_VER(i915) >= 14)); 1132 1122 seq_printf(m, "Pipe B fw loaded: %s\n", 1133 - str_yes_no(dmc->dmc_info[DMC_FW_PIPEB].payload)); 1123 + str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB))); 1134 1124 1135 1125 if (!intel_dmc_has_payload(i915)) 1136 1126 goto out; ··· 1188 1130 seq_printf(m, "DC5 -> DC6 count: %d\n", 1189 1131 intel_de_read(i915, dc6_reg)); 1190 1132 1191 - out: 1192 1133 seq_printf(m, "program base: 0x%08x\n", 1193 1134 intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0))); 1135 + 1136 + out: 1194 1137 seq_printf(m, "ssp base: 0x%08x\n", 1195 1138 intel_de_read(i915, DMC_SSP_BASE)); 1196 1139 seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL));
+5 -39
drivers/gpu/drm/i915/display/intel_dmc.h
··· 6 6 #ifndef __INTEL_DMC_H__ 7 7 #define __INTEL_DMC_H__ 8 8 9 - #include "i915_reg_defs.h" 10 - #include "intel_wakeref.h" 11 - #include <linux/workqueue.h> 9 + #include <linux/types.h> 12 10 13 11 struct drm_i915_error_state_buf; 14 12 struct drm_i915_private; 15 - 16 13 enum pipe; 17 14 18 - enum { 19 - DMC_FW_MAIN = 0, 20 - DMC_FW_PIPEA, 21 - DMC_FW_PIPEB, 22 - DMC_FW_PIPEC, 23 - DMC_FW_PIPED, 24 - DMC_FW_MAX 25 - }; 26 - 27 - struct intel_dmc { 28 - struct work_struct work; 29 - const char *fw_path; 30 - u32 max_fw_size; /* bytes */ 31 - u32 version; 32 - struct dmc_fw_info { 33 - u32 mmio_count; 34 - i915_reg_t mmioaddr[20]; 35 - u32 mmiodata[20]; 36 - u32 dmc_offset; 37 - u32 start_mmioaddr; 38 - u32 dmc_fw_size; /*dwords */ 39 - u32 *payload; 40 - bool present; 41 - } dmc_info[DMC_FW_MAX]; 42 - 43 - u32 dc_state; 44 - u32 target_dc_state; 45 - u32 allowed_dc_mask; 46 - intel_wakeref_t wakeref; 47 - }; 48 - 49 - void intel_dmc_ucode_init(struct drm_i915_private *i915); 15 + void intel_dmc_init(struct drm_i915_private *i915); 50 16 void intel_dmc_load_program(struct drm_i915_private *i915); 51 17 void intel_dmc_disable_program(struct drm_i915_private *i915); 52 18 void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe); 53 19 void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe); 54 - void intel_dmc_ucode_fini(struct drm_i915_private *i915); 55 - void intel_dmc_ucode_suspend(struct drm_i915_private *i915); 56 - void intel_dmc_ucode_resume(struct drm_i915_private *i915); 20 + void intel_dmc_fini(struct drm_i915_private *i915); 21 + void intel_dmc_suspend(struct drm_i915_private *i915); 22 + void intel_dmc_resume(struct drm_i915_private *i915); 57 23 bool intel_dmc_has_payload(struct drm_i915_private *i915); 58 24 void intel_dmc_debugfs_register(struct drm_i915_private *i915); 59 25 void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
+56 -33
drivers/gpu/drm/i915/display/intel_dp.c
··· 288 288 289 289 static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) 290 290 { 291 - int vbt_max_lanes = intel_bios_dp_max_lane_count(&dig_port->base); 291 + int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata); 292 292 int max_lanes = dig_port->max_lanes; 293 293 294 294 if (vbt_max_lanes) ··· 425 425 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 426 426 int max_rate; 427 427 428 - max_rate = intel_bios_dp_max_link_rate(encoder); 428 + max_rate = intel_bios_dp_max_link_rate(encoder->devdata); 429 429 430 430 if (intel_dp_is_edp(intel_dp)) { 431 431 struct intel_connector *connector = intel_dp->attached_connector; ··· 1415 1415 DP_DSC_MINOR_SHIFT; 1416 1416 } 1417 1417 1418 + static int intel_dp_get_slice_height(int vactive) 1419 + { 1420 + int slice_height; 1421 + 1422 + /* 1423 + * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108 1424 + * lines is an optimal slice height, but any size can be used as long as 1425 + * vertical active integer multiple and maximum vertical slice count 1426 + * requirements are met. 1427 + */ 1428 + for (slice_height = 108; slice_height <= vactive; slice_height += 2) 1429 + if (vactive % slice_height == 0) 1430 + return slice_height; 1431 + 1432 + /* 1433 + * Highly unlikely we reach here as most of the resolutions will end up 1434 + * finding appropriate slice_height in above loop but returning 1435 + * slice_height as 2 here as it should work with all resolutions. 1436 + */ 1437 + return 2; 1438 + } 1439 + 1418 1440 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 1419 1441 struct intel_crtc_state *crtc_state) 1420 1442 { ··· 1455 1433 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1456 1434 vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; 1457 1435 1458 - /* 1459 - * Slice Height of 8 works for all currently available panels. So start 1460 - * with that if pic_height is an integral multiple of 8. Eventually add 1461 - * logic to try multiple slice heights. 1462 - */ 1463 - if (vdsc_cfg->pic_height % 8 == 0) 1464 - vdsc_cfg->slice_height = 8; 1465 - else if (vdsc_cfg->pic_height % 4 == 0) 1466 - vdsc_cfg->slice_height = 4; 1467 - else 1468 - vdsc_cfg->slice_height = 2; 1436 + vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height); 1469 1437 1470 1438 ret = intel_dsc_compute_params(crtc_state); 1471 1439 if (ret) ··· 1739 1727 * Our YCbCr output is always limited range. 1740 1728 * crtc_state->limited_color_range only applies to RGB, 1741 1729 * and it must never be set for YCbCr or we risk setting 1742 - * some conflicting bits in PIPECONF which will mess up 1730 + * some conflicting bits in TRANSCONF which will mess up 1743 1731 * the colors on the monitor. 1744 1732 */ 1745 1733 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) ··· 2003 1991 } 2004 1992 2005 1993 static bool intel_dp_has_audio(struct intel_encoder *encoder, 2006 - const struct intel_crtc_state *crtc_state, 2007 1994 const struct drm_connector_state *conn_state) 2008 1995 { 2009 1996 struct drm_i915_private *i915 = to_i915(encoder->base.dev); ··· 2068 2057 struct drm_connector *connector = conn_state->connector; 2069 2058 2070 2059 pipe_config->sdp_split_enable = 2071 - intel_dp_has_audio(encoder, pipe_config, conn_state) && 2060 + intel_dp_has_audio(encoder, conn_state) && 2072 2061 intel_dp_is_uhbr(pipe_config); 2073 2062 2074 2063 drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n", ··· 2092 2081 pipe_config->has_pch_encoder = true; 2093 2082 2094 2083 pipe_config->has_audio = 2095 - intel_dp_has_audio(encoder, pipe_config, conn_state) && 2084 + intel_dp_has_audio(encoder, conn_state) && 2096 2085 intel_audio_compute_config(encoder, pipe_config, conn_state); 2097 2086 2098 2087 fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode); ··· 2292 2281 2293 2282 void intel_dp_wait_source_oui(struct intel_dp *intel_dp) 2294 2283 { 2284 + struct intel_connector *connector = intel_dp->attached_connector; 2295 2285 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2296 2286 2297 - drm_dbg_kms(&i915->drm, "Performing OUI wait\n"); 2298 - wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30); 2287 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n", 2288 + connector->base.base.id, connector->base.name, 2289 + connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); 2290 + 2291 + wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 2292 + connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); 2299 2293 } 2300 2294 2301 2295 /* If the device supports it, try to set the power state appropriately */ ··· 4867 4851 if (!ret) 4868 4852 drm_dp_cec_register_connector(&intel_dp->aux, connector); 4869 4853 4870 - if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) 4854 + if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) 4871 4855 return ret; 4872 4856 4873 4857 /* ··· 5145 5129 return IRQ_HANDLED; 5146 5130 } 5147 5131 5148 - /* check the VBT to see whether the eDP is on another port */ 5149 - bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 5132 + static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv, 5133 + const struct intel_bios_encoder_data *devdata, 5134 + enum port port) 5150 5135 { 5151 5136 /* 5152 5137 * eDP not supported on g4x. so bail out early just ··· 5159 5142 if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) 5160 5143 return true; 5161 5144 5162 - return intel_bios_is_port_edp(dev_priv, port); 5145 + return devdata && intel_bios_encoder_supports_edp(devdata); 5146 + } 5147 + 5148 + bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port) 5149 + { 5150 + const struct intel_bios_encoder_data *devdata = 5151 + intel_bios_encoder_data_lookup(i915, port); 5152 + 5153 + return _intel_dp_is_port_edp(i915, devdata, port); 5163 5154 } 5164 5155 5165 5156 static bool 5166 - has_gamut_metadata_dip(struct drm_i915_private *i915, enum port port) 5157 + has_gamut_metadata_dip(struct intel_encoder *encoder) 5167 5158 { 5168 - if (intel_bios_is_lspcon_present(i915, port)) 5159 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 5160 + enum port port = encoder->port; 5161 + 5162 + if (intel_bios_encoder_is_lspcon(encoder->devdata)) 5169 5163 return false; 5170 5164 5171 5165 if (DISPLAY_VER(i915) >= 11) ··· 5211 5183 drm_connector_attach_max_bpc_property(connector, 6, 12); 5212 5184 5213 5185 /* Register HDMI colorspace for case of lspcon */ 5214 - if (intel_bios_is_lspcon_present(dev_priv, port)) { 5186 + if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) { 5215 5187 drm_connector_attach_content_type_property(connector); 5216 5188 intel_attach_hdmi_colorspace_property(connector); 5217 5189 } else { 5218 5190 intel_attach_dp_colorspace_property(connector); 5219 5191 } 5220 5192 5221 - if (has_gamut_metadata_dip(dev_priv, port)) 5193 + if (has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base)) 5222 5194 drm_connector_attach_hdr_output_metadata_property(connector); 5223 5195 5224 5196 if (HAS_VRR(dev_priv)) ··· 5260 5232 5261 5233 if (pipe != PIPE_A && pipe != PIPE_B) 5262 5234 pipe = PIPE_A; 5263 - 5264 - drm_dbg_kms(&i915->drm, 5265 - "[CONNECTOR:%d:%s] using pipe %c for initial backlight setup\n", 5266 - connector->base.base.id, connector->base.name, 5267 - pipe_name(pipe)); 5268 5235 } 5269 5236 5270 5237 intel_backlight_setup(connector, pipe); ··· 5435 5412 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 5436 5413 intel_dp->attached_connector = intel_connector; 5437 5414 5438 - if (intel_dp_is_port_edp(dev_priv, port)) { 5415 + if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) { 5439 5416 /* 5440 5417 * Currently we don't support eDP on TypeC ports, although in 5441 5418 * theory it could work on TypeC legacy ports.
+35
drivers/gpu/drm/i915/display/intel_dp_aux.c
··· 6 6 #include "i915_drv.h" 7 7 #include "i915_reg.h" 8 8 #include "i915_trace.h" 9 + #include "intel_bios.h" 9 10 #include "intel_de.h" 10 11 #include "intel_display_types.h" 11 12 #include "intel_dp_aux.h" ··· 737 736 738 737 intel_dp->aux.transfer = intel_dp_aux_transfer; 739 738 cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 739 + } 740 + 741 + static enum aux_ch default_aux_ch(struct intel_encoder *encoder) 742 + { 743 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 744 + 745 + /* SKL has DDI E but no AUX E */ 746 + if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E) 747 + return AUX_CH_A; 748 + 749 + return (enum aux_ch)encoder->port; 750 + } 751 + 752 + enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder) 753 + { 754 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 755 + enum aux_ch aux_ch; 756 + 757 + aux_ch = intel_bios_dp_aux_ch(encoder->devdata); 758 + if (aux_ch != AUX_CH_NONE) { 759 + drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] using AUX %c (VBT)\n", 760 + encoder->base.base.id, encoder->base.name, 761 + aux_ch_name(aux_ch)); 762 + return aux_ch; 763 + } 764 + 765 + aux_ch = default_aux_ch(encoder); 766 + 767 + drm_dbg_kms(&i915->drm, 768 + "[ENCODER:%d:%s] using AUX %c (platform default)\n", 769 + encoder->base.base.id, encoder->base.name, 770 + aux_ch_name(aux_ch)); 771 + 772 + return aux_ch; 740 773 }
+4
drivers/gpu/drm/i915/display/intel_dp_aux.h
··· 6 6 #ifndef __INTEL_DP_AUX_H__ 7 7 #define __INTEL_DP_AUX_H__ 8 8 9 + enum aux_ch; 9 10 struct intel_dp; 11 + struct intel_encoder; 10 12 11 13 void intel_dp_aux_fini(struct intel_dp *intel_dp); 12 14 void intel_dp_aux_init(struct intel_dp *intel_dp); 15 + 16 + enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder); 13 17 14 18 #endif /* __INTEL_DP_AUX_H__ */
+57 -27
drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
··· 105 105 INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3, 106 106 }; 107 107 108 + static bool is_intel_tcon_cap(const u8 tcon_cap[4]) 109 + { 110 + return tcon_cap[0] >= 1; 111 + } 112 + 108 113 /* Intel EDP backlight callbacks */ 109 114 static bool 110 115 intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) ··· 130 125 if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP)) 131 126 return false; 132 127 133 - if (tcon_cap[0] >= 1) { 134 - drm_dbg_kms(&i915->drm, "Detected Intel HDR backlight interface version %d\n", 135 - tcon_cap[0]); 136 - } else { 137 - drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n", 138 - tcon_cap[0]); 128 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n", 129 + connector->base.base.id, connector->base.name, 130 + is_intel_tcon_cap(tcon_cap) ? "Intel" : "unsupported", tcon_cap[0]); 131 + 132 + if (!is_intel_tcon_cap(tcon_cap)) 139 133 return false; 140 - } 141 134 142 135 /* 143 136 * If we don't have HDR static metadata there is no way to ··· 150 147 !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type & 151 148 BIT(HDMI_STATIC_METADATA_TYPE1))) { 152 149 drm_info(&i915->drm, 153 - "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n", 150 + "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n", 151 + connector->base.base.id, connector->base.name, 154 152 INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL); 155 153 return false; 156 154 } ··· 172 168 u8 buf[2] = { 0 }; 173 169 174 170 if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) != 1) { 175 - drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n"); 171 + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n", 172 + connector->base.base.id, connector->base.name); 176 173 return 0; 177 174 } 178 175 ··· 190 185 191 186 if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, 192 187 sizeof(buf)) != sizeof(buf)) { 193 - drm_err(&i915->drm, "Failed to read brightness from DPCD\n"); 188 + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n", 189 + connector->base.base.id, connector->base.name); 194 190 return 0; 195 191 } 196 192 ··· 211 205 212 206 if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, 213 207 sizeof(buf)) != sizeof(buf)) 214 - drm_err(dev, "Failed to write brightness level to DPCD\n"); 208 + drm_err(dev, "[CONNECTOR:%d:%s] Failed to write brightness level to DPCD\n", 209 + connector->base.base.id, connector->base.name); 215 210 } 216 211 217 212 static void ··· 245 238 246 239 ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl); 247 240 if (ret != 1) { 248 - drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret); 241 + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n", 242 + connector->base.base.id, connector->base.name, ret); 249 243 return; 250 244 } 251 245 ··· 262 254 ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE; 263 255 } 264 256 265 - if (ctrl != old_ctrl) 266 - if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1) 267 - drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n"); 257 + if (ctrl != old_ctrl && 258 + drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1) 259 + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n", 260 + connector->base.base.id, connector->base.name); 268 261 } 269 262 270 263 static void ··· 282 273 panel->backlight.pwm_funcs->disable(conn_state, intel_backlight_invert_pwm_level(connector, 0)); 283 274 } 284 275 276 + static const char *dpcd_vs_pwm_str(bool aux) 277 + { 278 + return aux ? "DPCD" : "PWM"; 279 + } 280 + 285 281 static int 286 282 intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe) 287 283 { ··· 296 282 &connector->base.display_info.luminance_range; 297 283 int ret; 298 284 299 - if (panel->backlight.edp.intel.sdr_uses_aux) { 300 - drm_dbg_kms(&i915->drm, "SDR backlight is controlled through DPCD\n"); 301 - } else { 302 - drm_dbg_kms(&i915->drm, "SDR backlight is controlled through PWM\n"); 285 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n", 286 + connector->base.base.id, connector->base.name, 287 + dpcd_vs_pwm_str(panel->backlight.edp.intel.sdr_uses_aux)); 303 288 289 + if (!panel->backlight.edp.intel.sdr_uses_aux) { 304 290 ret = panel->backlight.pwm_funcs->setup(connector, pipe); 305 291 if (ret < 0) { 306 292 drm_err(&i915->drm, 307 - "Failed to setup SDR backlight controls through PWM: %d\n", ret); 293 + "[CONNECTOR:%d:%s] Failed to setup SDR backlight controls through PWM: %d\n", 294 + connector->base.base.id, connector->base.name, ret); 308 295 return ret; 309 296 } 310 297 } ··· 318 303 panel->backlight.min = 0; 319 304 } 320 305 321 - drm_dbg_kms(&i915->drm, "Using backlight range %d..%d\n", panel->backlight.min, 322 - panel->backlight.max); 306 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n", 307 + connector->base.base.id, connector->base.name, 308 + panel->backlight.min, panel->backlight.max); 309 + 323 310 324 311 panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe); 325 312 panel->backlight.enabled = panel->backlight.level != 0; ··· 403 386 if (ret < 0) 404 387 return ret; 405 388 389 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n", 390 + connector->base.base.id, connector->base.name, 391 + dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable)); 392 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n", 393 + connector->base.base.id, connector->base.name, 394 + dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set)); 395 + 406 396 if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) { 407 397 ret = panel->backlight.pwm_funcs->setup(connector, pipe); 408 398 if (ret < 0) { 409 399 drm_err(&i915->drm, 410 - "Failed to setup PWM backlight controls for eDP backlight: %d\n", 411 - ret); 400 + "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n", 401 + connector->base.base.id, connector->base.name, ret); 412 402 return ret; 413 403 } 414 404 } ··· 442 418 } 443 419 } 444 420 421 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n", 422 + connector->base.base.id, connector->base.name); 423 + 445 424 return 0; 446 425 } 447 426 ··· 455 428 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 456 429 457 430 if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) { 458 - drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n"); 431 + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n", 432 + connector->base.base.id, connector->base.name); 459 433 return true; 460 434 } 461 435 return false; ··· 532 504 * interfaces is to probe for Intel's first, and VESA's second. 533 505 */ 534 506 if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) { 535 - drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n"); 507 + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using Intel proprietary eDP backlight controls\n", 508 + connector->base.base.id, connector->base.name); 536 509 panel->backlight.funcs = &intel_dp_hdr_bl_funcs; 537 510 return 0; 538 511 } 539 512 540 513 if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) { 541 - drm_dbg_kms(dev, "Using VESA eDP backlight controls\n"); 514 + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using VESA eDP backlight controls\n", 515 + connector->base.base.id, connector->base.name); 542 516 panel->backlight.funcs = &intel_dp_vesa_bl_funcs; 543 517 return 0; 544 518 }
+18 -11
drivers/gpu/drm/i915/display/intel_dp_mst.c
··· 265 265 return 0; 266 266 } 267 267 268 + static bool intel_dp_mst_has_audio(const struct drm_connector_state *conn_state) 269 + { 270 + const struct intel_digital_connector_state *intel_conn_state = 271 + to_intel_digital_connector_state(conn_state); 272 + struct intel_connector *connector = 273 + to_intel_connector(conn_state->connector); 274 + 275 + if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 276 + return connector->port->has_audio; 277 + else 278 + return intel_conn_state->force_audio == HDMI_AUDIO_ON; 279 + } 280 + 268 281 static int intel_dp_mst_compute_config(struct intel_encoder *encoder, 269 282 struct intel_crtc_state *pipe_config, 270 283 struct drm_connector_state *conn_state) ··· 285 272 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 286 273 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 287 274 struct intel_dp *intel_dp = &intel_mst->primary->dp; 288 - struct intel_connector *connector = 289 - to_intel_connector(conn_state->connector); 290 - struct intel_digital_connector_state *intel_conn_state = 291 - to_intel_digital_connector_state(conn_state); 292 275 const struct drm_display_mode *adjusted_mode = 293 276 &pipe_config->hw.adjusted_mode; 294 277 struct link_config_limits limits; ··· 296 287 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 297 288 pipe_config->has_pch_encoder = false; 298 289 299 - if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 300 - pipe_config->has_audio = connector->port->has_audio; 301 - else 302 - pipe_config->has_audio = 303 - intel_conn_state->force_audio == HDMI_AUDIO_ON; 290 + pipe_config->has_audio = 291 + intel_dp_mst_has_audio(conn_state) && 292 + intel_audio_compute_config(encoder, pipe_config, conn_state); 304 293 305 294 /* 306 295 * for MST we always configure max link bw - the spec doesn't ··· 611 604 * no clock to the transcoder" 612 605 */ 613 606 if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream) 614 - intel_ddi_disable_pipe_clock(old_crtc_state); 607 + intel_ddi_disable_transcoder_clock(old_crtc_state); 615 608 616 609 617 610 intel_mst->connector = NULL; ··· 691 684 * here for the following ones. 692 685 */ 693 686 if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream) 694 - intel_ddi_enable_pipe_clock(encoder, pipe_config); 687 + intel_ddi_enable_transcoder_clock(encoder, pipe_config); 695 688 696 689 intel_ddi_set_dp_msa(pipe_config, conn_state); 697 690 }
+15 -34
drivers/gpu/drm/i915/display/intel_dpio_phy.c
··· 389 389 "force reprogramming it\n", phy); 390 390 } 391 391 392 - val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); 393 - val |= phy_info->pwron_mask; 394 - intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val); 392 + intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask); 395 393 396 394 /* 397 395 * The PHY registers start out inaccessible and respond to reads with ··· 408 410 phy); 409 411 410 412 /* Program PLL Rcomp code offset */ 411 - val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy)); 412 - val &= ~IREF0RC_OFFSET_MASK; 413 - val |= 0xE4 << IREF0RC_OFFSET_SHIFT; 414 - intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val); 413 + intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK, 414 + 0xE4 << IREF0RC_OFFSET_SHIFT); 415 415 416 - val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy)); 417 - val &= ~IREF1RC_OFFSET_MASK; 418 - val |= 0xE4 << IREF1RC_OFFSET_SHIFT; 419 - intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val); 416 + intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK, 417 + 0xE4 << IREF1RC_OFFSET_SHIFT); 420 418 421 419 /* Program power gating */ 422 - val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy)); 423 - val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | 424 - SUS_CLK_CONFIG; 425 - intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val); 420 + intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0, 421 + OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG); 426 422 427 - if (phy_info->dual_channel) { 428 - val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy)); 429 - val |= DW6_OLDO_DYN_PWR_DOWN_EN; 430 - intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val); 431 - } 423 + if (phy_info->dual_channel) 424 + intel_de_rmw(dev_priv, BXT_PORT_CL2CM_DW6(phy), 0, 425 + DW6_OLDO_DYN_PWR_DOWN_EN); 432 426 433 427 if (phy_info->rcomp_phy != -1) { 434 428 u32 grc_code; ··· 439 449 val << GRC_CODE_SLOW_SHIFT | 440 450 val; 441 451 intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code); 442 - 443 - val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy)); 444 - val |= GRC_DIS | GRC_RDY_OVRD; 445 - intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val); 452 + intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy), 453 + 0, GRC_DIS | GRC_RDY_OVRD); 446 454 } 447 455 448 456 if (phy_info->reset_delay) 449 457 udelay(phy_info->reset_delay); 450 458 451 - val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)); 452 - val |= COMMON_RESET_DIS; 453 - intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val); 459 + intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS); 454 460 } 455 461 456 462 void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) 457 463 { 458 464 const struct bxt_ddi_phy_info *phy_info; 459 - u32 val; 460 465 461 466 phy_info = bxt_get_phy_info(dev_priv, phy); 462 467 463 - val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)); 464 - val &= ~COMMON_RESET_DIS; 465 - intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val); 468 + intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0); 466 469 467 - val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); 468 - val &= ~phy_info->pwron_mask; 469 - intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val); 470 + intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0); 470 471 } 471 472 472 473 void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+53 -112
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
··· 608 608 struct intel_shared_dpll *pll) 609 609 { 610 610 const enum intel_dpll_id id = pll->info->id; 611 - u32 val; 612 611 613 - val = intel_de_read(dev_priv, WRPLL_CTL(id)); 614 - intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE); 612 + intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0); 615 613 intel_de_posting_read(dev_priv, WRPLL_CTL(id)); 616 614 617 615 /* ··· 624 626 struct intel_shared_dpll *pll) 625 627 { 626 628 enum intel_dpll_id id = pll->info->id; 627 - u32 val; 628 629 629 - val = intel_de_read(dev_priv, SPLL_CTL); 630 - intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE); 630 + intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0); 631 631 intel_de_posting_read(dev_priv, SPLL_CTL); 632 632 633 633 /* ··· 1234 1238 struct intel_shared_dpll *pll) 1235 1239 { 1236 1240 const enum intel_dpll_id id = pll->info->id; 1237 - u32 val; 1238 1241 1239 - val = intel_de_read(dev_priv, DPLL_CTRL1); 1240 - 1241 - val &= ~(DPLL_CTRL1_HDMI_MODE(id) | 1242 - DPLL_CTRL1_SSC(id) | 1243 - DPLL_CTRL1_LINK_RATE_MASK(id)); 1244 - val |= pll->state.hw_state.ctrl1 << (id * 6); 1245 - 1246 - intel_de_write(dev_priv, DPLL_CTRL1, val); 1242 + intel_de_rmw(dev_priv, DPLL_CTRL1, 1243 + DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id), 1244 + pll->state.hw_state.ctrl1 << (id * 6)); 1247 1245 intel_de_posting_read(dev_priv, DPLL_CTRL1); 1248 1246 } 1249 1247 ··· 1255 1265 intel_de_posting_read(dev_priv, regs[id].cfgcr2); 1256 1266 1257 1267 /* the enable bit is always bit 31 */ 1258 - intel_de_write(dev_priv, regs[id].ctl, 1259 - intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE); 1268 + intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE); 1260 1269 1261 1270 if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5)) 1262 1271 drm_err(&dev_priv->drm, "DPLL %d not locked\n", id); ··· 1274 1285 const enum intel_dpll_id id = pll->info->id; 1275 1286 1276 1287 /* the enable bit is always bit 31 */ 1277 - intel_de_write(dev_priv, regs[id].ctl, 1278 - intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE); 1288 + intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0); 1279 1289 intel_de_posting_read(dev_priv, regs[id].ctl); 1280 1290 } 1281 1291 ··· 1890 1902 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); 1891 1903 1892 1904 /* Non-SSC reference */ 1893 - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); 1894 - temp |= PORT_PLL_REF_SEL; 1895 - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); 1905 + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL); 1896 1906 1897 1907 if (IS_GEMINILAKE(dev_priv)) { 1898 - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); 1899 - temp |= PORT_PLL_POWER_ENABLE; 1900 - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); 1908 + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 1909 + 0, PORT_PLL_POWER_ENABLE); 1901 1910 1902 1911 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & 1903 1912 PORT_PLL_POWER_STATE), 200)) ··· 1903 1918 } 1904 1919 1905 1920 /* Disable 10 bit clock */ 1906 - temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch)); 1907 - temp &= ~PORT_PLL_10BIT_CLK_ENABLE; 1908 - intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp); 1921 + intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), 1922 + PORT_PLL_10BIT_CLK_ENABLE, 0); 1909 1923 1910 1924 /* Write P1 & P2 */ 1911 - temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch)); 1912 - temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK); 1913 - temp |= pll->state.hw_state.ebb0; 1914 - intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp); 1925 + intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), 1926 + PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0); 1915 1927 1916 1928 /* Write M2 integer */ 1917 - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0)); 1918 - temp &= ~PORT_PLL_M2_INT_MASK; 1919 - temp |= pll->state.hw_state.pll0; 1920 - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp); 1929 + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0), 1930 + PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0); 1921 1931 1922 1932 /* Write N */ 1923 - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1)); 1924 - temp &= ~PORT_PLL_N_MASK; 1925 - temp |= pll->state.hw_state.pll1; 1926 - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp); 1933 + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1), 1934 + PORT_PLL_N_MASK, pll->state.hw_state.pll1); 1927 1935 1928 1936 /* Write M2 fraction */ 1929 - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2)); 1930 - temp &= ~PORT_PLL_M2_FRAC_MASK; 1931 - temp |= pll->state.hw_state.pll2; 1932 - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp); 1937 + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2), 1938 + PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2); 1933 1939 1934 1940 /* Write M2 fraction enable */ 1935 - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3)); 1936 - temp &= ~PORT_PLL_M2_FRAC_ENABLE; 1937 - temp |= pll->state.hw_state.pll3; 1938 - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp); 1941 + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3), 1942 + PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3); 1939 1943 1940 1944 /* Write coeff */ 1941 1945 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6)); ··· 1935 1961 intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp); 1936 1962 1937 1963 /* Write calibration val */ 1938 - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8)); 1939 - temp &= ~PORT_PLL_TARGET_CNT_MASK; 1940 - temp |= pll->state.hw_state.pll8; 1941 - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp); 1964 + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8), 1965 + PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8); 1942 1966 1943 - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9)); 1944 - temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK; 1945 - temp |= pll->state.hw_state.pll9; 1946 - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp); 1967 + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9), 1968 + PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9); 1947 1969 1948 1970 temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10)); 1949 1971 temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; ··· 1956 1986 intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp); 1957 1987 1958 1988 /* Enable PLL */ 1959 - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); 1960 - temp |= PORT_PLL_ENABLE; 1961 - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); 1989 + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE); 1962 1990 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); 1963 1991 1964 1992 if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), ··· 1984 2016 struct intel_shared_dpll *pll) 1985 2017 { 1986 2018 enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ 1987 - u32 temp; 1988 2019 1989 - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); 1990 - temp &= ~PORT_PLL_ENABLE; 1991 - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); 2020 + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0); 1992 2021 intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); 1993 2022 1994 2023 if (IS_GEMINILAKE(dev_priv)) { 1995 - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); 1996 - temp &= ~PORT_PLL_POWER_ENABLE; 1997 - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); 2024 + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 2025 + PORT_PLL_POWER_ENABLE, 0); 1998 2026 1999 2027 if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & 2000 2028 PORT_PLL_POWER_STATE), 200)) ··· 3605 3641 !i915_mmio_reg_valid(div0_reg)); 3606 3642 if (dev_priv->display.vbt.override_afc_startup && 3607 3643 i915_mmio_reg_valid(div0_reg)) 3608 - intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK, 3609 - hw_state->div0); 3644 + intel_de_rmw(dev_priv, div0_reg, 3645 + TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0); 3610 3646 intel_de_posting_read(dev_priv, cfgcr1_reg); 3611 3647 } 3612 3648 ··· 3615 3651 { 3616 3652 struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; 3617 3653 enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id); 3618 - u32 val; 3619 3654 3620 3655 /* 3621 3656 * Some of the following registers have reserved fields, so program ··· 3622 3659 * during the calc/readout phase if the mask depends on some other HW 3623 3660 * state like refclk, see icl_calc_mg_pll_state(). 3624 3661 */ 3625 - val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port)); 3626 - val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; 3627 - val |= hw_state->mg_refclkin_ctl; 3628 - intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val); 3662 + intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port), 3663 + MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl); 3629 3664 3630 - val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port)); 3631 - val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; 3632 - val |= hw_state->mg_clktop2_coreclkctl1; 3633 - intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val); 3665 + intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), 3666 + MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK, 3667 + hw_state->mg_clktop2_coreclkctl1); 3634 3668 3635 - val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port)); 3636 - val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | 3637 - MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | 3638 - MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | 3639 - MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); 3640 - val |= hw_state->mg_clktop2_hsclkctl; 3641 - intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val); 3669 + intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), 3670 + MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | 3671 + MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | 3672 + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | 3673 + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK, 3674 + hw_state->mg_clktop2_hsclkctl); 3642 3675 3643 3676 intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0); 3644 3677 intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1); ··· 3643 3684 hw_state->mg_pll_frac_lock); 3644 3685 intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc); 3645 3686 3646 - val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port)); 3647 - val &= ~hw_state->mg_pll_bias_mask; 3648 - val |= hw_state->mg_pll_bias; 3649 - intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val); 3687 + intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port), 3688 + hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias); 3650 3689 3651 - val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); 3652 - val &= ~hw_state->mg_pll_tdc_coldst_bias_mask; 3653 - val |= hw_state->mg_pll_tdc_coldst_bias; 3654 - intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val); 3690 + intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), 3691 + hw_state->mg_pll_tdc_coldst_bias_mask, 3692 + hw_state->mg_pll_tdc_coldst_bias); 3655 3693 3656 3694 intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); 3657 3695 } ··· 3722 3766 struct intel_shared_dpll *pll, 3723 3767 i915_reg_t enable_reg) 3724 3768 { 3725 - u32 val; 3726 - 3727 - val = intel_de_read(dev_priv, enable_reg); 3728 - val |= PLL_POWER_ENABLE; 3729 - intel_de_write(dev_priv, enable_reg, val); 3769 + intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE); 3730 3770 3731 3771 /* 3732 3772 * The spec says we need to "wait" but it also says it should be ··· 3737 3785 struct intel_shared_dpll *pll, 3738 3786 i915_reg_t enable_reg) 3739 3787 { 3740 - u32 val; 3741 - 3742 - val = intel_de_read(dev_priv, enable_reg); 3743 - val |= PLL_ENABLE; 3744 - intel_de_write(dev_priv, enable_reg, val); 3788 + intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE); 3745 3789 3746 3790 /* Timeout is actually 600us. */ 3747 3791 if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1)) ··· 3763 3815 * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled. 3764 3816 */ 3765 3817 val = intel_de_read(i915, TRANS_CMTG_CHICKEN); 3766 - val = intel_de_read(i915, TRANS_CMTG_CHICKEN); 3767 - intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING); 3818 + val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING); 3768 3819 if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING)) 3769 3820 drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val); 3770 3821 } ··· 3847 3900 struct intel_shared_dpll *pll, 3848 3901 i915_reg_t enable_reg) 3849 3902 { 3850 - u32 val; 3851 - 3852 3903 /* The first steps are done by intel_ddi_post_disable(). */ 3853 3904 3854 3905 /* ··· 3855 3910 * nothing here. 3856 3911 */ 3857 3912 3858 - val = intel_de_read(dev_priv, enable_reg); 3859 - val &= ~PLL_ENABLE; 3860 - intel_de_write(dev_priv, enable_reg, val); 3913 + intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0); 3861 3914 3862 3915 /* Timeout is actually 1us. */ 3863 3916 if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1)) ··· 3863 3920 3864 3921 /* DVFS post sequence would be here. See the comment above. */ 3865 3922 3866 - val = intel_de_read(dev_priv, enable_reg); 3867 - val &= ~PLL_POWER_ENABLE; 3868 - intel_de_write(dev_priv, enable_reg, val); 3923 + intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0); 3869 3924 3870 3925 /* 3871 3926 * The spec says we need to "wait" but it also says it should be
+5 -11
drivers/gpu/drm/i915/display/intel_drrs.c
··· 68 68 { 69 69 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 70 70 enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder; 71 - u32 val, bit; 71 + u32 bit; 72 72 73 73 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 74 - bit = PIPECONF_REFRESH_RATE_ALT_VLV; 74 + bit = TRANSCONF_REFRESH_RATE_ALT_VLV; 75 75 else 76 - bit = PIPECONF_REFRESH_RATE_ALT_ILK; 76 + bit = TRANSCONF_REFRESH_RATE_ALT_ILK; 77 77 78 - val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 79 - 80 - if (refresh_rate == DRRS_REFRESH_RATE_LOW) 81 - val |= bit; 82 - else 83 - val &= ~bit; 84 - 85 - intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); 78 + intel_de_rmw(dev_priv, TRANSCONF(cpu_transcoder), 79 + bit, refresh_rate == DRRS_REFRESH_RATE_LOW ? bit : 0); 86 80 } 87 81 88 82 static void
+26 -15
drivers/gpu/drm/i915/display/intel_dsb.c
··· 88 88 89 89 /* each instruction is 2 dwords */ 90 90 return !drm_WARN(&i915->drm, dsb->free_pos > dsb->size - 2, 91 - "DSB buffer overflow\n"); 91 + "[CRTC:%d:%s] DSB %d buffer overflow\n", 92 + crtc->base.base.id, crtc->base.name, dsb->id); 92 93 } 93 94 94 95 static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe, ··· 199 198 } 200 199 } 201 200 202 - static u32 intel_dsb_align_tail(struct intel_dsb *dsb) 201 + static void intel_dsb_align_tail(struct intel_dsb *dsb) 203 202 { 204 203 u32 aligned_tail, tail; 205 204 ··· 211 210 aligned_tail - tail); 212 211 213 212 dsb->free_pos = aligned_tail / 4; 213 + } 214 214 215 - return aligned_tail; 215 + void intel_dsb_finish(struct intel_dsb *dsb) 216 + { 217 + intel_dsb_align_tail(dsb); 216 218 } 217 219 218 220 /** 219 221 * intel_dsb_commit() - Trigger workload execution of DSB. 220 222 * @dsb: DSB context 223 + * @wait_for_vblank: wait for vblank before executing 221 224 * 222 225 * This function is used to do actual write to hardware using DSB. 223 226 */ 224 - void intel_dsb_commit(struct intel_dsb *dsb) 227 + void intel_dsb_commit(struct intel_dsb *dsb, bool wait_for_vblank) 225 228 { 226 229 struct intel_crtc *crtc = dsb->crtc; 227 230 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 228 231 enum pipe pipe = crtc->pipe; 229 232 u32 tail; 230 233 231 - tail = intel_dsb_align_tail(dsb); 232 - if (tail == 0) 234 + tail = dsb->free_pos * 4; 235 + if (drm_WARN_ON(&dev_priv->drm, !IS_ALIGNED(tail, CACHELINE_BYTES))) 233 236 return; 234 237 235 238 if (is_dsb_busy(dev_priv, pipe, dsb->id)) { 236 - drm_err(&dev_priv->drm, "DSB engine is busy.\n"); 237 - goto reset; 239 + drm_err(&dev_priv->drm, "[CRTC:%d:%s] DSB %d is busy\n", 240 + crtc->base.base.id, crtc->base.name, dsb->id); 241 + return; 238 242 } 239 243 240 244 intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), 245 + (wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0) | 241 246 DSB_ENABLE); 242 247 intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id), 243 248 i915_ggtt_offset(dsb->vma)); 244 249 intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id), 245 250 i915_ggtt_offset(dsb->vma) + tail); 251 + } 246 252 247 - drm_dbg_kms(&dev_priv->drm, 248 - "DSB execution started - head 0x%x, tail 0x%x\n", 249 - i915_ggtt_offset(dsb->vma), 250 - i915_ggtt_offset(dsb->vma) + tail); 253 + void intel_dsb_wait(struct intel_dsb *dsb) 254 + { 255 + struct intel_crtc *crtc = dsb->crtc; 256 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 257 + enum pipe pipe = crtc->pipe; 251 258 252 259 if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) 253 260 drm_err(&dev_priv->drm, 254 - "Timed out waiting for DSB workload completion.\n"); 261 + "[CRTC:%d:%s] DSB %d timed out waiting for idle\n", 262 + crtc->base.base.id, crtc->base.name, dsb->id); 255 263 256 - reset: 264 + /* Attempt to reset it */ 257 265 dsb->free_pos = 0; 258 266 dsb->ins_start_offset = 0; 259 267 intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), 0); ··· 335 325 kfree(dsb); 336 326 out: 337 327 drm_info_once(&i915->drm, 338 - "DSB queue setup failed, will fallback to MMIO for display HW programming\n"); 328 + "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n", 329 + crtc->base.base.id, crtc->base.name, DSB1); 339 330 340 331 return NULL; 341 332 }
+4 -1
drivers/gpu/drm/i915/display/intel_dsb.h
··· 15 15 16 16 struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc, 17 17 unsigned int max_cmds); 18 + void intel_dsb_finish(struct intel_dsb *dsb); 18 19 void intel_dsb_cleanup(struct intel_dsb *dsb); 19 20 void intel_dsb_reg_write(struct intel_dsb *dsb, 20 21 i915_reg_t reg, u32 val); 21 - void intel_dsb_commit(struct intel_dsb *dsb); 22 + void intel_dsb_commit(struct intel_dsb *dsb, 23 + bool wait_for_vblank); 24 + void intel_dsb_wait(struct intel_dsb *dsb); 22 25 23 26 #endif
+5
drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
··· 162 162 static int dcs_setup_backlight(struct intel_connector *connector, 163 163 enum pipe unused) 164 164 { 165 + struct drm_i915_private *i915 = to_i915(connector->base.dev); 165 166 struct intel_panel *panel = &connector->panel; 166 167 167 168 if (panel->vbt.backlight.brightness_precision_bits > 8) ··· 171 170 panel->backlight.max = PANEL_PWM_MAX_VALUE; 172 171 173 172 panel->backlight.level = panel->backlight.max; 173 + 174 + drm_dbg_kms(&i915->drm, 175 + "[CONNECTOR:%d:%s] Using DCS for backlight control\n", 176 + connector->base.base.id, connector->base.name); 174 177 175 178 return 0; 176 179 }
+2 -5
drivers/gpu/drm/i915/display/intel_dvo.c
··· 444 444 * the clock enabled before we attempt to initialize 445 445 * the device. 446 446 */ 447 - for_each_pipe(dev_priv, pipe) { 448 - dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe)); 449 - intel_de_write(dev_priv, DPLL(pipe), 450 - dpll[pipe] | DPLL_DVO_2X_MODE); 451 - } 447 + for_each_pipe(dev_priv, pipe) 448 + dpll[pipe] = intel_de_rmw(dev_priv, DPLL(pipe), 0, DPLL_DVO_2X_MODE); 452 449 453 450 ret = dvo->dev_ops->init(&intel_dvo->dev, i2c); 454 451
+6 -1
drivers/gpu/drm/i915/display/intel_fb.c
··· 2007 2007 2008 2008 vm = intel_dpt_create(intel_fb); 2009 2009 if (IS_ERR(vm)) { 2010 + drm_dbg_kms(&dev_priv->drm, "failed to create DPT\n"); 2010 2011 ret = PTR_ERR(vm); 2011 2012 goto err; 2012 2013 } ··· 2018 2017 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); 2019 2018 if (ret) { 2020 2019 drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret); 2021 - goto err; 2020 + goto err_free_dpt; 2022 2021 } 2023 2022 2024 2023 return 0; 2025 2024 2025 + err_free_dpt: 2026 + if (intel_fb_uses_dpt(fb)) 2027 + intel_dpt_destroy(intel_fb->dpt_vm); 2026 2028 err: 2027 2029 intel_frontbuffer_put(intel_fb->frontbuffer); 2028 2030 return ret; ··· 2050 2046 if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) { 2051 2047 /* object is "remote", not in local memory */ 2052 2048 i915_gem_object_put(obj); 2049 + drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n"); 2053 2050 return ERR_PTR(-EREMOTE); 2054 2051 } 2055 2052
+4 -4
drivers/gpu/drm/i915/display/intel_fbdev.c
··· 561 561 intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); 562 562 } 563 563 564 - void intel_fbdev_initial_config_async(struct drm_device *dev) 564 + void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv) 565 565 { 566 - struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; 566 + struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; 567 567 568 568 if (!ifbdev) 569 569 return; ··· 706 706 drm_fb_helper_hotplug_event(&ifbdev->helper); 707 707 } 708 708 709 - void intel_fbdev_restore_mode(struct drm_device *dev) 709 + void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv) 710 710 { 711 - struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; 711 + struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; 712 712 713 713 if (!ifbdev) 714 714 return;
+4 -4
drivers/gpu/drm/i915/display/intel_fbdev.h
··· 15 15 16 16 #ifdef CONFIG_DRM_FBDEV_EMULATION 17 17 int intel_fbdev_init(struct drm_device *dev); 18 - void intel_fbdev_initial_config_async(struct drm_device *dev); 18 + void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv); 19 19 void intel_fbdev_unregister(struct drm_i915_private *dev_priv); 20 20 void intel_fbdev_fini(struct drm_i915_private *dev_priv); 21 21 void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous); 22 22 void intel_fbdev_output_poll_changed(struct drm_device *dev); 23 - void intel_fbdev_restore_mode(struct drm_device *dev); 23 + void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv); 24 24 struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev); 25 25 #else 26 26 static inline int intel_fbdev_init(struct drm_device *dev) ··· 28 28 return 0; 29 29 } 30 30 31 - static inline void intel_fbdev_initial_config_async(struct drm_device *dev) 31 + static inline void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv) 32 32 { 33 33 } 34 34 ··· 48 48 { 49 49 } 50 50 51 - static inline void intel_fbdev_restore_mode(struct drm_device *dev) 51 + static inline void intel_fbdev_restore_mode(struct drm_i915_private *i915) 52 52 { 53 53 } 54 54 static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
+49 -110
drivers/gpu/drm/i915/display/intel_fdi.c
··· 366 366 367 367 /* IVB wants error correction enabled */ 368 368 if (IS_IVYBRIDGE(dev_priv)) 369 - intel_de_write(dev_priv, reg, 370 - intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); 369 + intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); 371 370 } 372 371 373 372 /* The FDI link training functions for ILK/Ibexpeak. */ ··· 438 439 drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); 439 440 440 441 /* Train 2 */ 441 - reg = FDI_TX_CTL(pipe); 442 - temp = intel_de_read(dev_priv, reg); 443 - temp &= ~FDI_LINK_TRAIN_NONE; 444 - temp |= FDI_LINK_TRAIN_PATTERN_2; 445 - intel_de_write(dev_priv, reg, temp); 446 - 447 - reg = FDI_RX_CTL(pipe); 448 - temp = intel_de_read(dev_priv, reg); 449 - temp &= ~FDI_LINK_TRAIN_NONE; 450 - temp |= FDI_LINK_TRAIN_PATTERN_2; 451 - intel_de_write(dev_priv, reg, temp); 452 - 453 - intel_de_posting_read(dev_priv, reg); 442 + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), 443 + FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2); 444 + intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), 445 + FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2); 446 + intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); 454 447 udelay(150); 455 448 456 449 reg = FDI_RX_IIR(pipe); ··· 529 538 udelay(150); 530 539 531 540 for (i = 0; i < 4; i++) { 532 - reg = FDI_TX_CTL(pipe); 533 - temp = intel_de_read(dev_priv, reg); 534 - temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 535 - temp |= snb_b_fdi_train_param[i]; 536 - intel_de_write(dev_priv, reg, temp); 537 - 538 - intel_de_posting_read(dev_priv, reg); 541 + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), 542 + FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]); 543 + intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); 539 544 udelay(500); 540 545 541 546 for (retry = 0; retry < 5; retry++) { ··· 580 593 udelay(150); 581 594 582 595 for (i = 0; i < 4; i++) { 583 - reg = FDI_TX_CTL(pipe); 584 - temp = intel_de_read(dev_priv, reg); 585 - temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 586 - temp |= snb_b_fdi_train_param[i]; 587 - intel_de_write(dev_priv, reg, temp); 588 - 589 - intel_de_posting_read(dev_priv, reg); 596 + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), 597 + FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]); 598 + intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); 590 599 udelay(500); 591 600 592 601 for (retry = 0; retry < 5; retry++) { ··· 702 719 } 703 720 704 721 /* Train 2 */ 705 - reg = FDI_TX_CTL(pipe); 706 - temp = intel_de_read(dev_priv, reg); 707 - temp &= ~FDI_LINK_TRAIN_NONE_IVB; 708 - temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 709 - intel_de_write(dev_priv, reg, temp); 710 - 711 - reg = FDI_RX_CTL(pipe); 712 - temp = intel_de_read(dev_priv, reg); 713 - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 714 - temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 715 - intel_de_write(dev_priv, reg, temp); 716 - 717 - intel_de_posting_read(dev_priv, reg); 722 + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), 723 + FDI_LINK_TRAIN_NONE_IVB, 724 + FDI_LINK_TRAIN_PATTERN_2_IVB); 725 + intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), 726 + FDI_LINK_TRAIN_PATTERN_MASK_CPT, 727 + FDI_LINK_TRAIN_PATTERN_2_CPT); 728 + intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); 718 729 udelay(2); /* should be 1.5us */ 719 730 720 731 for (i = 0; i < 4; i++) { ··· 814 837 udelay(30); 815 838 816 839 /* Unset FDI_RX_MISC pwrdn lanes */ 817 - temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); 818 - temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 819 - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); 840 + intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), 841 + FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0); 820 842 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); 821 843 822 844 /* Wait for FDI auto training time */ ··· 841 865 intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); 842 866 intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); 843 867 844 - temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); 845 - temp &= ~DDI_BUF_CTL_ENABLE; 846 - intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp); 868 + intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0); 847 869 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); 848 870 849 871 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ 850 - temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E)); 851 - temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); 852 - temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 853 - intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp); 872 + intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), 873 + DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK, 874 + DP_TP_CTL_LINK_TRAIN_PAT1); 854 875 intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E)); 855 876 856 877 intel_wait_ddi_buf_idle(dev_priv, PORT_E); 857 878 858 879 /* Reset FDI_RX_MISC pwrdn lanes */ 859 - temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); 860 - temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 861 - temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); 862 - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); 880 + intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), 881 + FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 882 + FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2)); 863 883 intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); 864 884 } 865 885 ··· 870 898 void hsw_fdi_disable(struct intel_encoder *encoder) 871 899 { 872 900 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 873 - u32 val; 874 901 875 902 /* 876 903 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) ··· 877 906 * step 13 is the correct place for it. Step 18 is where it was 878 907 * originally before the BUN. 879 908 */ 880 - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 881 - val &= ~FDI_RX_ENABLE; 882 - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); 883 - 884 - val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); 885 - val &= ~DDI_BUF_CTL_ENABLE; 886 - intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val); 887 - 909 + intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0); 910 + intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0); 888 911 intel_wait_ddi_buf_idle(dev_priv, PORT_E); 889 - 890 912 intel_ddi_disable_clock(encoder); 891 - 892 - val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); 893 - val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 894 - val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); 895 - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val); 896 - 897 - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 898 - val &= ~FDI_PCDCLK; 899 - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); 900 - 901 - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 902 - val &= ~FDI_RX_PLL_ENABLE; 903 - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); 913 + intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), 914 + FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 915 + FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2)); 916 + intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0); 917 + intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0); 904 918 } 905 919 906 920 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) ··· 901 945 temp = intel_de_read(dev_priv, reg); 902 946 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 903 947 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 904 - temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 948 + temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; 905 949 intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE); 906 950 907 951 intel_de_posting_read(dev_priv, reg); 908 952 udelay(200); 909 953 910 954 /* Switch from Rawclk to PCDclk */ 911 - temp = intel_de_read(dev_priv, reg); 912 - intel_de_write(dev_priv, reg, temp | FDI_PCDCLK); 913 - 955 + intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK); 914 956 intel_de_posting_read(dev_priv, reg); 915 957 udelay(200); 916 958 ··· 928 974 struct drm_device *dev = crtc->base.dev; 929 975 struct drm_i915_private *dev_priv = to_i915(dev); 930 976 enum pipe pipe = crtc->pipe; 931 - i915_reg_t reg; 932 - u32 temp; 933 977 934 978 /* Switch from PCDclk to Rawclk */ 935 - reg = FDI_RX_CTL(pipe); 936 - temp = intel_de_read(dev_priv, reg); 937 - intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK); 979 + intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0); 938 980 939 981 /* Disable CPU FDI TX PLL */ 940 - reg = FDI_TX_CTL(pipe); 941 - temp = intel_de_read(dev_priv, reg); 942 - intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE); 943 - 944 - intel_de_posting_read(dev_priv, reg); 982 + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0); 983 + intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); 945 984 udelay(100); 946 985 947 - reg = FDI_RX_CTL(pipe); 948 - temp = intel_de_read(dev_priv, reg); 949 - intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE); 950 - 951 986 /* Wait for the clocks to turn off. */ 952 - intel_de_posting_read(dev_priv, reg); 987 + intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0); 988 + intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); 953 989 udelay(100); 954 990 } 955 991 ··· 951 1007 u32 temp; 952 1008 953 1009 /* disable CPU FDI tx and PCH FDI rx */ 954 - reg = FDI_TX_CTL(pipe); 955 - temp = intel_de_read(dev_priv, reg); 956 - intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE); 957 - intel_de_posting_read(dev_priv, reg); 1010 + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0); 1011 + intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); 958 1012 959 1013 reg = FDI_RX_CTL(pipe); 960 1014 temp = intel_de_read(dev_priv, reg); 961 1015 temp &= ~(0x7 << 16); 962 - temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 1016 + temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; 963 1017 intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE); 964 1018 965 1019 intel_de_posting_read(dev_priv, reg); ··· 969 1027 FDI_RX_PHASE_SYNC_POINTER_OVR); 970 1028 971 1029 /* still set train pattern 1 */ 972 - reg = FDI_TX_CTL(pipe); 973 - temp = intel_de_read(dev_priv, reg); 974 - temp &= ~FDI_LINK_TRAIN_NONE; 975 - temp |= FDI_LINK_TRAIN_PATTERN_1; 976 - intel_de_write(dev_priv, reg, temp); 1030 + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), 1031 + FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1); 977 1032 978 1033 reg = FDI_RX_CTL(pipe); 979 1034 temp = intel_de_read(dev_priv, reg); ··· 981 1042 temp &= ~FDI_LINK_TRAIN_NONE; 982 1043 temp |= FDI_LINK_TRAIN_PATTERN_1; 983 1044 } 984 - /* BPC in FDI rx is consistent with that in PIPECONF */ 1045 + /* BPC in FDI rx is consistent with that in TRANSCONF */ 985 1046 temp &= ~(0x07 << 16); 986 - temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 1047 + temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; 987 1048 intel_de_write(dev_priv, reg, temp); 988 1049 989 1050 intel_de_posting_read(dev_priv, reg);
+20
drivers/gpu/drm/i915/display/intel_fifo_underrun.c
··· 31 31 #include "intel_display_types.h" 32 32 #include "intel_fbc.h" 33 33 #include "intel_fifo_underrun.h" 34 + #include "intel_pch_display.h" 34 35 35 36 /** 36 37 * DOC: fifo underrun handling ··· 509 508 } 510 509 511 510 spin_unlock_irq(&dev_priv->irq_lock); 511 + } 512 + 513 + void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915, 514 + struct intel_crtc *crtc, 515 + bool enable) 516 + { 517 + crtc->cpu_fifo_underrun_disabled = !enable; 518 + 519 + /* 520 + * We track the PCH trancoder underrun reporting state 521 + * within the crtc. With crtc for pipe A housing the underrun 522 + * reporting state for PCH transcoder A, crtc for pipe B housing 523 + * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 524 + * and marking underrun reporting as disabled for the non-existing 525 + * PCH transcoders B and C would prevent enabling the south 526 + * error interrupt (see cpt_can_enable_serr_int()). 527 + */ 528 + if (intel_has_pch_trancoder(i915, crtc->pipe)) 529 + crtc->pch_fifo_underrun_disabled = !enable; 512 530 }
+3
drivers/gpu/drm/i915/display/intel_fifo_underrun.h
··· 9 9 #include <linux/types.h> 10 10 11 11 struct drm_i915_private; 12 + struct intel_crtc; 12 13 enum pipe; 13 14 15 + void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915, 16 + struct intel_crtc *crtc, bool enable); 14 17 bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, 15 18 enum pipe pipe, bool enable); 16 19 bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
+6 -24
drivers/gpu/drm/i915/display/intel_gmbus.c
··· 215 215 static void pnv_gmbus_clock_gating(struct drm_i915_private *i915, 216 216 bool enable) 217 217 { 218 - u32 val; 219 - 220 218 /* When using bit bashing for I2C, this bit needs to be set to 1 */ 221 - val = intel_de_read(i915, DSPCLK_GATE_D(i915)); 222 - if (!enable) 223 - val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE; 224 - else 225 - val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE; 226 - intel_de_write(i915, DSPCLK_GATE_D(i915), val); 219 + intel_de_rmw(i915, DSPCLK_GATE_D(i915), PNV_GMBUSUNIT_CLOCK_GATE_DISABLE, 220 + !enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0); 227 221 } 228 222 229 223 static void pch_gmbus_clock_gating(struct drm_i915_private *i915, 230 224 bool enable) 231 225 { 232 - u32 val; 233 - 234 - val = intel_de_read(i915, SOUTH_DSPCLK_GATE_D); 235 - if (!enable) 236 - val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE; 237 - else 238 - val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE; 239 - intel_de_write(i915, SOUTH_DSPCLK_GATE_D, val); 226 + intel_de_rmw(i915, SOUTH_DSPCLK_GATE_D, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, 227 + !enable ? PCH_GMBUSUNIT_CLOCK_GATE_DISABLE : 0); 240 228 } 241 229 242 230 static void bxt_gmbus_clock_gating(struct drm_i915_private *i915, 243 231 bool enable) 244 232 { 245 - u32 val; 246 - 247 - val = intel_de_read(i915, GEN9_CLKGATE_DIS_4); 248 - if (!enable) 249 - val |= BXT_GMBUS_GATING_DIS; 250 - else 251 - val &= ~BXT_GMBUS_GATING_DIS; 252 - intel_de_write(i915, GEN9_CLKGATE_DIS_4, val); 233 + intel_de_rmw(i915, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS, 234 + !enable ? BXT_GMBUS_GATING_DIS : 0); 253 235 } 254 236 255 237 static u32 get_reserved(struct intel_gmbus *bus)
+6 -9
drivers/gpu/drm/i915/display/intel_hdcp.c
··· 943 943 944 944 repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, 945 945 port); 946 - intel_de_write(dev_priv, HDCP_REP_CTL, 947 - intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl); 946 + intel_de_rmw(dev_priv, HDCP_REP_CTL, repeater_ctl, 0); 948 947 949 948 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); 950 949 if (ret) { ··· 1818 1819 } 1819 1820 1820 1821 if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & 1821 - LINK_AUTH_STATUS) { 1822 + LINK_AUTH_STATUS) 1822 1823 /* Link is Authenticated. Now set for Encryption */ 1823 - intel_de_write(dev_priv, 1824 - HDCP2_CTL(dev_priv, cpu_transcoder, port), 1825 - intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ); 1826 - } 1824 + intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port), 1825 + 0, CTL_LINK_ENCRYPTION_REQ); 1827 1826 1828 1827 ret = intel_de_wait_for_set(dev_priv, 1829 1828 HDCP2_STATUS(dev_priv, cpu_transcoder, ··· 1845 1848 drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & 1846 1849 LINK_ENCRYPTION_STATUS)); 1847 1850 1848 - intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port), 1849 - intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ); 1851 + intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port), 1852 + CTL_LINK_ENCRYPTION_REQ, 0); 1850 1853 1851 1854 ret = intel_de_wait_for_clear(dev_priv, 1852 1855 HDCP2_STATUS(dev_priv, cpu_transcoder,
+42 -37
drivers/gpu/drm/i915/display/intel_hdmi.c
··· 238 238 void *frame, ssize_t len) 239 239 { 240 240 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 241 - u32 val, *data = frame; 241 + u32 *data = frame; 242 242 int i; 243 243 244 - val = intel_de_read(dev_priv, VIDEO_DIP_CTL); 245 - 246 - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 247 - val |= g4x_infoframe_index(type); 248 - 249 - intel_de_write(dev_priv, VIDEO_DIP_CTL, val); 244 + intel_de_rmw(dev_priv, VIDEO_DIP_CTL, 245 + VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); 250 246 251 247 for (i = 0; i < len; i += 4) 252 248 *data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA); ··· 310 314 { 311 315 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 312 316 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 313 - u32 val, *data = frame; 317 + u32 *data = frame; 314 318 int i; 315 319 316 - val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe)); 317 - 318 - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 319 - val |= g4x_infoframe_index(type); 320 - 321 - intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val); 320 + intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), 321 + VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); 322 322 323 323 for (i = 0; i < len; i += 4) 324 324 *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe)); ··· 388 396 { 389 397 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 390 398 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 391 - u32 val, *data = frame; 399 + u32 *data = frame; 392 400 int i; 393 401 394 - val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe)); 395 - 396 - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 397 - val |= g4x_infoframe_index(type); 398 - 399 - intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val); 402 + intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), 403 + VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); 400 404 401 405 for (i = 0; i < len; i += 4) 402 406 *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe)); ··· 460 472 { 461 473 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 462 474 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 463 - u32 val, *data = frame; 475 + u32 *data = frame; 464 476 int i; 465 477 466 - val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe)); 467 - 468 - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ 469 - val |= g4x_infoframe_index(type); 470 - 471 - intel_de_write(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), val); 478 + intel_de_rmw(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), 479 + VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); 472 480 473 481 for (i = 0; i < len; i += 4) 474 482 *data++ = intel_de_read(dev_priv, ··· 1779 1795 else 1780 1796 max_tmds_clock = 165000; 1781 1797 1782 - vbt_max_tmds_clock = intel_bios_max_tmds_clock(encoder); 1798 + vbt_max_tmds_clock = intel_bios_hdmi_max_tmds_clock(encoder->devdata); 1783 1799 if (vbt_max_tmds_clock) 1784 1800 max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock); 1785 1801 ··· 2136 2152 * Our YCbCr output is always limited range. 2137 2153 * crtc_state->limited_color_range only applies to RGB, 2138 2154 * and it must never be set for YCbCr or we risk setting 2139 - * some conflicting bits in PIPECONF which will mess up 2155 + * some conflicting bits in TRANSCONF which will mess up 2140 2156 * the colors on the monitor. 2141 2157 */ 2142 2158 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) ··· 2224 2240 !is_power_of_2(crtc_state->uapi.encoder_mask); 2225 2241 } 2226 2242 2243 + static bool source_supports_scrambling(struct intel_encoder *encoder) 2244 + { 2245 + /* 2246 + * Gen 10+ support HDMI 2.0 : the max tmds clock is 594MHz, and 2247 + * scrambling is supported. 2248 + * But there seem to be cases where certain platforms that support 2249 + * HDMI 2.0, have an HDMI1.4 retimer chip, and the max tmds clock is 2250 + * capped by VBT to less than 340MHz. 2251 + * 2252 + * In such cases when an HDMI2.0 sink is connected, it creates a 2253 + * problem : the platform and the sink both support scrambling but the 2254 + * HDMI 1.4 retimer chip doesn't. 2255 + * 2256 + * So go for scrambling, based on the max tmds clock taking into account, 2257 + * restrictions coming from VBT. 2258 + */ 2259 + return intel_hdmi_source_max_tmds_clock(encoder) > 340000; 2260 + } 2261 + 2227 2262 int intel_hdmi_compute_config(struct intel_encoder *encoder, 2228 2263 struct intel_crtc_state *pipe_config, 2229 2264 struct drm_connector_state *conn_state) ··· 2305 2302 2306 2303 pipe_config->lane_count = 4; 2307 2304 2308 - if (scdc->scrambling.supported && DISPLAY_VER(dev_priv) >= 10) { 2305 + if (scdc->scrambling.supported && source_supports_scrambling(encoder)) { 2309 2306 if (scdc->scrambling.low_rates) 2310 2307 pipe_config->hdmi_scrambling = true; 2311 2308 ··· 2855 2852 enum port port = encoder->port; 2856 2853 u8 ddc_pin; 2857 2854 2858 - ddc_pin = intel_bios_alternate_ddc_pin(encoder); 2855 + ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata); 2859 2856 if (ddc_pin) { 2860 2857 drm_dbg_kms(&dev_priv->drm, 2861 - "Using DDC pin 0x%x for port %c (VBT)\n", 2862 - ddc_pin, port_name(port)); 2858 + "[ENCODER:%d:%s] Using DDC pin 0x%x (VBT)\n", 2859 + encoder->base.base.id, encoder->base.name, 2860 + ddc_pin); 2863 2861 return ddc_pin; 2864 2862 } 2865 2863 ··· 2886 2882 ddc_pin = g4x_port_to_ddc_pin(dev_priv, port); 2887 2883 2888 2884 drm_dbg_kms(&dev_priv->drm, 2889 - "Using DDC pin 0x%x for port %c (platform default)\n", 2890 - ddc_pin, port_name(port)); 2885 + "[ENCODER:%d:%s] Using DDC pin 0x%x (platform default)\n", 2886 + encoder->base.base.id, encoder->base.name, 2887 + ddc_pin); 2891 2888 2892 2889 return ddc_pin; 2893 2890 } ··· 2909 2904 dig_port->set_infoframes = g4x_set_infoframes; 2910 2905 dig_port->infoframes_enabled = g4x_infoframes_enabled; 2911 2906 } else if (HAS_DDI(dev_priv)) { 2912 - if (intel_bios_is_lspcon_present(dev_priv, dig_port->base.port)) { 2907 + if (intel_bios_encoder_is_lspcon(dig_port->base.devdata)) { 2913 2908 dig_port->write_infoframe = lspcon_write_infoframe; 2914 2909 dig_port->read_infoframe = lspcon_read_infoframe; 2915 2910 dig_port->set_infoframes = lspcon_set_infoframes;
+3 -3
drivers/gpu/drm/i915/display/intel_lpe_audio.c
··· 315 315 * intel_lpe_audio_notify() - notify lpe audio event 316 316 * audio driver and i915 317 317 * @dev_priv: the i915 drm device private data 318 - * @pipe: pipe 318 + * @cpu_transcoder: CPU transcoder 319 319 * @port: port 320 320 * @eld : ELD data 321 321 * @ls_clock: Link symbol clock in kHz ··· 324 324 * Notify lpe audio driver of eld change. 325 325 */ 326 326 void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, 327 - enum pipe pipe, enum port port, 327 + enum transcoder cpu_transcoder, enum port port, 328 328 const void *eld, int ls_clock, bool dp_output) 329 329 { 330 330 unsigned long irqflags; ··· 344 344 345 345 if (eld != NULL) { 346 346 memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES); 347 - ppdata->pipe = pipe; 347 + ppdata->pipe = cpu_transcoder; 348 348 ppdata->ls_clock = ls_clock; 349 349 ppdata->dp_output = dp_output; 350 350
+2 -2
drivers/gpu/drm/i915/display/intel_lpe_audio.h
··· 8 8 9 9 #include <linux/types.h> 10 10 11 - enum pipe; 12 11 enum port; 12 + enum transcoder; 13 13 struct drm_i915_private; 14 14 15 15 int intel_lpe_audio_init(struct drm_i915_private *dev_priv); 16 16 void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv); 17 17 void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv); 18 18 void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, 19 - enum pipe pipe, enum port port, 19 + enum transcoder cpu_transcoder, enum port port, 20 20 const void *eld, int ls_clock, bool dp_output); 21 21 22 22 #endif /* __INTEL_LPE_AUDIO_H__ */
+1 -1
drivers/gpu/drm/i915/display/intel_lspcon.c
··· 689 689 struct drm_i915_private *i915 = to_i915(dev); 690 690 enum drm_lspcon_mode expected_mode; 691 691 692 - if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) 692 + if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) 693 693 return; 694 694 695 695 if (!lspcon->active) {
+163 -171
drivers/gpu/drm/i915/display/intel_lvds.c
··· 49 49 #include "intel_fdi.h" 50 50 #include "intel_gmbus.h" 51 51 #include "intel_lvds.h" 52 + #include "intel_lvds_regs.h" 52 53 #include "intel_panel.h" 53 54 54 55 /* Private structure for the integrated LVDS support */ ··· 85 84 return container_of(encoder, struct intel_lvds_encoder, base); 86 85 } 87 86 88 - bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, 87 + bool intel_lvds_port_enabled(struct drm_i915_private *i915, 89 88 i915_reg_t lvds_reg, enum pipe *pipe) 90 89 { 91 90 u32 val; 92 91 93 - val = intel_de_read(dev_priv, lvds_reg); 92 + val = intel_de_read(i915, lvds_reg); 94 93 95 94 /* asserts want to know the pipe even if the port is disabled */ 96 - if (HAS_PCH_CPT(dev_priv)) 97 - *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT; 95 + if (HAS_PCH_CPT(i915)) 96 + *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val); 98 97 else 99 - *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT; 98 + *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val); 100 99 101 100 return val & LVDS_PORT_EN; 102 101 } ··· 104 103 static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, 105 104 enum pipe *pipe) 106 105 { 107 - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 106 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 108 107 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); 109 108 intel_wakeref_t wakeref; 110 109 bool ret; 111 110 112 - wakeref = intel_display_power_get_if_enabled(dev_priv, 113 - encoder->power_domain); 111 + wakeref = intel_display_power_get_if_enabled(i915, encoder->power_domain); 114 112 if (!wakeref) 115 113 return false; 116 114 117 - ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe); 115 + ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe); 118 116 119 - intel_display_power_put(dev_priv, encoder->power_domain, wakeref); 117 + intel_display_power_put(i915, encoder->power_domain, wakeref); 120 118 121 119 return ret; 122 120 } 123 121 124 122 static void intel_lvds_get_config(struct intel_encoder *encoder, 125 - struct intel_crtc_state *pipe_config) 123 + struct intel_crtc_state *crtc_state) 126 124 { 127 125 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 128 126 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); 129 127 u32 tmp, flags = 0; 130 128 131 - pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS); 129 + crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS); 132 130 133 131 tmp = intel_de_read(dev_priv, lvds_encoder->reg); 134 132 if (tmp & LVDS_HSYNC_POLARITY) ··· 139 139 else 140 140 flags |= DRM_MODE_FLAG_PVSYNC; 141 141 142 - pipe_config->hw.adjusted_mode.flags |= flags; 142 + crtc_state->hw.adjusted_mode.flags |= flags; 143 143 144 144 if (DISPLAY_VER(dev_priv) < 5) 145 - pipe_config->gmch_pfit.lvds_border_bits = 145 + crtc_state->gmch_pfit.lvds_border_bits = 146 146 tmp & LVDS_BORDER_ENABLE; 147 147 148 148 /* gen2/3 store dither state in pfit control, needs to match */ 149 149 if (DISPLAY_VER(dev_priv) < 4) { 150 150 tmp = intel_de_read(dev_priv, PFIT_CONTROL); 151 151 152 - pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; 152 + crtc_state->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; 153 153 } 154 154 155 - pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock; 155 + crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock; 156 156 } 157 157 158 158 static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, ··· 216 216 intel_de_write(dev_priv, PP_CONTROL(0), val); 217 217 218 218 intel_de_write(dev_priv, PP_ON_DELAYS(0), 219 - REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5)); 219 + REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | 220 + REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | 221 + REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5)); 220 222 221 223 intel_de_write(dev_priv, PP_OFF_DELAYS(0), 222 - REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx)); 224 + REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | 225 + REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx)); 223 226 224 227 intel_de_write(dev_priv, PP_DIVISOR(0), 225 - REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1)); 228 + REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | 229 + REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1)); 226 230 } 227 231 228 232 static void intel_pre_enable_lvds(struct intel_atomic_state *state, 229 233 struct intel_encoder *encoder, 230 - const struct intel_crtc_state *pipe_config, 234 + const struct intel_crtc_state *crtc_state, 231 235 const struct drm_connector_state *conn_state) 232 236 { 233 237 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); 234 - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 235 - struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 236 - const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 238 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 239 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 240 + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 237 241 enum pipe pipe = crtc->pipe; 238 242 u32 temp; 239 243 240 - if (HAS_PCH_SPLIT(dev_priv)) { 241 - assert_fdi_rx_pll_disabled(dev_priv, pipe); 242 - assert_shared_dpll_disabled(dev_priv, 243 - pipe_config->shared_dpll); 244 + if (HAS_PCH_SPLIT(i915)) { 245 + assert_fdi_rx_pll_disabled(i915, pipe); 246 + assert_shared_dpll_disabled(i915, crtc_state->shared_dpll); 244 247 } else { 245 - assert_pll_disabled(dev_priv, pipe); 248 + assert_pll_disabled(i915, pipe); 246 249 } 247 250 248 - intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps); 251 + intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps); 249 252 250 253 temp = lvds_encoder->init_lvds_val; 251 254 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 252 255 253 - if (HAS_PCH_CPT(dev_priv)) { 256 + if (HAS_PCH_CPT(i915)) { 254 257 temp &= ~LVDS_PIPE_SEL_MASK_CPT; 255 258 temp |= LVDS_PIPE_SEL_CPT(pipe); 256 259 } else { ··· 263 260 264 261 /* set the corresponsding LVDS_BORDER bit */ 265 262 temp &= ~LVDS_BORDER_ENABLE; 266 - temp |= pipe_config->gmch_pfit.lvds_border_bits; 263 + temp |= crtc_state->gmch_pfit.lvds_border_bits; 267 264 268 265 /* 269 266 * Set the B0-B3 data pairs corresponding to whether we're going to ··· 286 283 /* 287 284 * Set the dithering flag on LVDS as needed, note that there is no 288 285 * special lvds dither control bit on pch-split platforms, dithering is 289 - * only controlled through the PIPECONF reg. 286 + * only controlled through the TRANSCONF reg. 290 287 */ 291 - if (DISPLAY_VER(dev_priv) == 4) { 288 + if (DISPLAY_VER(i915) == 4) { 292 289 /* 293 290 * Bspec wording suggests that LVDS port dithering only exists 294 291 * for 18bpp panels. 295 292 */ 296 - if (pipe_config->dither && pipe_config->pipe_bpp == 18) 293 + if (crtc_state->dither && crtc_state->pipe_bpp == 18) 297 294 temp |= LVDS_ENABLE_DITHER; 298 295 else 299 296 temp &= ~LVDS_ENABLE_DITHER; ··· 304 301 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 305 302 temp |= LVDS_VSYNC_POLARITY; 306 303 307 - intel_de_write(dev_priv, lvds_encoder->reg, temp); 304 + intel_de_write(i915, lvds_encoder->reg, temp); 308 305 } 309 306 310 307 /* ··· 312 309 */ 313 310 static void intel_enable_lvds(struct intel_atomic_state *state, 314 311 struct intel_encoder *encoder, 315 - const struct intel_crtc_state *pipe_config, 312 + const struct intel_crtc_state *crtc_state, 316 313 const struct drm_connector_state *conn_state) 317 314 { 318 - struct drm_device *dev = encoder->base.dev; 319 315 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); 320 - struct drm_i915_private *dev_priv = to_i915(dev); 316 + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 321 317 322 - intel_de_write(dev_priv, lvds_encoder->reg, 323 - intel_de_read(dev_priv, lvds_encoder->reg) | LVDS_PORT_EN); 318 + intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN); 324 319 325 - intel_de_write(dev_priv, PP_CONTROL(0), 326 - intel_de_read(dev_priv, PP_CONTROL(0)) | PANEL_POWER_ON); 320 + intel_de_rmw(dev_priv, PP_CONTROL(0), 0, PANEL_POWER_ON); 327 321 intel_de_posting_read(dev_priv, lvds_encoder->reg); 328 322 329 323 if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000)) 330 324 drm_err(&dev_priv->drm, 331 325 "timed out waiting for panel to power on\n"); 332 326 333 - intel_backlight_enable(pipe_config, conn_state); 327 + intel_backlight_enable(crtc_state, conn_state); 334 328 } 335 329 336 330 static void intel_disable_lvds(struct intel_atomic_state *state, ··· 338 338 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); 339 339 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 340 340 341 - intel_de_write(dev_priv, PP_CONTROL(0), 342 - intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON); 341 + intel_de_rmw(dev_priv, PP_CONTROL(0), PANEL_POWER_ON, 0); 343 342 if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000)) 344 343 drm_err(&dev_priv->drm, 345 344 "timed out waiting for panel to power off\n"); 346 345 347 - intel_de_write(dev_priv, lvds_encoder->reg, 348 - intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN); 346 + intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0); 349 347 intel_de_posting_read(dev_priv, lvds_encoder->reg); 350 348 } 351 349 ··· 384 386 } 385 387 386 388 static enum drm_mode_status 387 - intel_lvds_mode_valid(struct drm_connector *connector, 389 + intel_lvds_mode_valid(struct drm_connector *_connector, 388 390 struct drm_display_mode *mode) 389 391 { 390 - struct intel_connector *intel_connector = to_intel_connector(connector); 392 + struct intel_connector *connector = to_intel_connector(_connector); 391 393 const struct drm_display_mode *fixed_mode = 392 - intel_panel_fixed_mode(intel_connector, mode); 393 - int max_pixclk = to_i915(connector->dev)->max_dotclk_freq; 394 + intel_panel_fixed_mode(connector, mode); 395 + int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq; 394 396 enum drm_mode_status status; 395 397 396 398 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 397 399 return MODE_NO_DBLESCAN; 398 400 399 - status = intel_panel_mode_valid(intel_connector, mode); 401 + status = intel_panel_mode_valid(connector, mode); 400 402 if (status != MODE_OK) 401 403 return status; 402 404 ··· 406 408 return MODE_OK; 407 409 } 408 410 409 - static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, 410 - struct intel_crtc_state *pipe_config, 411 + static int intel_lvds_compute_config(struct intel_encoder *encoder, 412 + struct intel_crtc_state *crtc_state, 411 413 struct drm_connector_state *conn_state) 412 414 { 413 - struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 414 - struct intel_lvds_encoder *lvds_encoder = 415 - to_lvds_encoder(intel_encoder); 416 - struct intel_connector *intel_connector = 417 - lvds_encoder->attached_connector; 418 - struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 419 - struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 415 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 416 + struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); 417 + struct intel_connector *connector = lvds_encoder->attached_connector; 418 + struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 419 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 420 420 unsigned int lvds_bpp; 421 421 int ret; 422 422 423 423 /* Should never happen!! */ 424 - if (DISPLAY_VER(dev_priv) < 4 && crtc->pipe == 0) { 425 - drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n"); 424 + if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) { 425 + drm_err(&i915->drm, "Can't support LVDS on pipe A\n"); 426 426 return -EINVAL; 427 427 } 428 428 ··· 429 433 else 430 434 lvds_bpp = 6*3; 431 435 432 - if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) { 433 - drm_dbg_kms(&dev_priv->drm, 436 + if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) { 437 + drm_dbg_kms(&i915->drm, 434 438 "forcing display bpp (was %d) to LVDS (%d)\n", 435 - pipe_config->pipe_bpp, lvds_bpp); 436 - pipe_config->pipe_bpp = lvds_bpp; 439 + crtc_state->pipe_bpp, lvds_bpp); 440 + crtc_state->pipe_bpp = lvds_bpp; 437 441 } 438 442 439 - pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 443 + crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; 440 444 441 445 /* 442 446 * We have timings from the BIOS for the panel, put them in ··· 444 448 * with the panel scaling set up to source from the H/VDisplay 445 449 * of the original mode. 446 450 */ 447 - ret = intel_panel_compute_config(intel_connector, adjusted_mode); 451 + ret = intel_panel_compute_config(connector, adjusted_mode); 448 452 if (ret) 449 453 return ret; 450 454 451 455 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 452 456 return -EINVAL; 453 457 454 - if (HAS_PCH_SPLIT(dev_priv)) 455 - pipe_config->has_pch_encoder = true; 458 + if (HAS_PCH_SPLIT(i915)) 459 + crtc_state->has_pch_encoder = true; 456 460 457 - ret = intel_panel_fitting(pipe_config, conn_state); 461 + ret = intel_panel_fitting(crtc_state, conn_state); 458 462 if (ret) 459 463 return ret; 460 464 ··· 470 474 /* 471 475 * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. 472 476 */ 473 - static int intel_lvds_get_modes(struct drm_connector *connector) 477 + static int intel_lvds_get_modes(struct drm_connector *_connector) 474 478 { 475 - struct intel_connector *intel_connector = to_intel_connector(connector); 476 - const struct drm_edid *fixed_edid = intel_connector->panel.fixed_edid; 479 + struct intel_connector *connector = to_intel_connector(_connector); 480 + const struct drm_edid *fixed_edid = connector->panel.fixed_edid; 477 481 478 482 /* Use panel fixed edid if we have one */ 479 483 if (!IS_ERR_OR_NULL(fixed_edid)) { 480 - drm_edid_connector_update(connector, fixed_edid); 484 + drm_edid_connector_update(&connector->base, fixed_edid); 481 485 482 - return drm_edid_connector_add_modes(connector); 486 + return drm_edid_connector_add_modes(&connector->base); 483 487 } 484 488 485 - return intel_panel_get_modes(intel_connector); 489 + return intel_panel_get_modes(connector); 486 490 } 487 491 488 492 static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { ··· 581 585 }, 582 586 { 583 587 .callback = intel_no_lvds_dmi_callback, 584 - .ident = "AOpen i45GMx-I", 585 - .matches = { 586 - DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), 587 - DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"), 588 - }, 589 - }, 588 + .ident = "AOpen i45GMx-I", 589 + .matches = { 590 + DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), 591 + DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"), 592 + }, 593 + }, 590 594 { 591 595 .callback = intel_no_lvds_dmi_callback, 592 596 .ident = "Aopen i945GTt-VFA", ··· 603 607 }, 604 608 }, 605 609 { 606 - .callback = intel_no_lvds_dmi_callback, 607 - .ident = "Clientron E830", 608 - .matches = { 609 - DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), 610 - DMI_MATCH(DMI_PRODUCT_NAME, "E830"), 611 - }, 612 - }, 613 - { 610 + .callback = intel_no_lvds_dmi_callback, 611 + .ident = "Clientron E830", 612 + .matches = { 613 + DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), 614 + DMI_MATCH(DMI_PRODUCT_NAME, "E830"), 615 + }, 616 + }, 617 + { 614 618 .callback = intel_no_lvds_dmi_callback, 615 619 .ident = "Asus EeeBox PC EB1007", 616 620 .matches = { ··· 760 764 { } /* terminating entry */ 761 765 }; 762 766 763 - struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv) 767 + struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915) 764 768 { 765 769 struct intel_encoder *encoder; 766 770 767 - for_each_intel_encoder(&dev_priv->drm, encoder) { 771 + for_each_intel_encoder(&i915->drm, encoder) { 768 772 if (encoder->type == INTEL_OUTPUT_LVDS) 769 773 return encoder; 770 774 } ··· 772 776 return NULL; 773 777 } 774 778 775 - bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv) 779 + bool intel_is_dual_link_lvds(struct drm_i915_private *i915) 776 780 { 777 - struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv); 781 + struct intel_encoder *encoder = intel_get_lvds_encoder(i915); 778 782 779 783 return encoder && to_lvds_encoder(encoder)->is_dual_link; 780 784 } 781 785 782 786 static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) 783 787 { 784 - struct drm_i915_private *dev_priv = to_i915(lvds_encoder->base.base.dev); 788 + struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev); 785 789 struct intel_connector *connector = lvds_encoder->attached_connector; 786 790 const struct drm_display_mode *fixed_mode = 787 791 intel_panel_preferred_fixed_mode(connector); 788 792 unsigned int val; 789 793 790 794 /* use the module option value if specified */ 791 - if (dev_priv->params.lvds_channel_mode > 0) 792 - return dev_priv->params.lvds_channel_mode == 2; 795 + if (i915->params.lvds_channel_mode > 0) 796 + return i915->params.lvds_channel_mode == 2; 793 797 794 798 /* single channel LVDS is limited to 112 MHz */ 795 799 if (fixed_mode->clock > 112999) ··· 804 808 * we need to check "the value to be set" in VBT when LVDS 805 809 * register is uninitialized. 806 810 */ 807 - val = intel_de_read(dev_priv, lvds_encoder->reg); 808 - if (HAS_PCH_CPT(dev_priv)) 811 + val = intel_de_read(i915, lvds_encoder->reg); 812 + if (HAS_PCH_CPT(i915)) 809 813 val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT); 810 814 else 811 815 val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK); ··· 822 826 823 827 /** 824 828 * intel_lvds_init - setup LVDS connectors on this device 825 - * @dev_priv: i915 device 829 + * @i915: i915 device 826 830 * 827 831 * Create the connector, register the LVDS DDC bus, and try to figure out what 828 832 * modes we can display on the LVDS panel (if present). 829 833 */ 830 - void intel_lvds_init(struct drm_i915_private *dev_priv) 834 + void intel_lvds_init(struct drm_i915_private *i915) 831 835 { 832 836 struct intel_lvds_encoder *lvds_encoder; 833 - struct intel_encoder *intel_encoder; 834 - struct intel_connector *intel_connector; 835 - struct drm_connector *connector; 836 - struct drm_encoder *encoder; 837 + struct intel_connector *connector; 837 838 const struct drm_edid *drm_edid; 839 + struct intel_encoder *encoder; 838 840 i915_reg_t lvds_reg; 839 841 u32 lvds; 840 842 u8 pin; 841 843 842 844 /* Skip init on machines we know falsely report LVDS */ 843 845 if (dmi_check_system(intel_no_lvds)) { 844 - drm_WARN(&dev_priv->drm, !dev_priv->display.vbt.int_lvds_support, 846 + drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support, 845 847 "Useless DMI match. Internal LVDS support disabled by VBT\n"); 846 848 return; 847 849 } 848 850 849 - if (!dev_priv->display.vbt.int_lvds_support) { 850 - drm_dbg_kms(&dev_priv->drm, 851 + if (!i915->display.vbt.int_lvds_support) { 852 + drm_dbg_kms(&i915->drm, 851 853 "Internal LVDS support disabled by VBT\n"); 852 854 return; 853 855 } 854 856 855 - if (HAS_PCH_SPLIT(dev_priv)) 857 + if (HAS_PCH_SPLIT(i915)) 856 858 lvds_reg = PCH_LVDS; 857 859 else 858 860 lvds_reg = LVDS; 859 861 860 - lvds = intel_de_read(dev_priv, lvds_reg); 862 + lvds = intel_de_read(i915, lvds_reg); 861 863 862 - if (HAS_PCH_SPLIT(dev_priv)) { 864 + if (HAS_PCH_SPLIT(i915)) { 863 865 if ((lvds & LVDS_DETECTED) == 0) 864 866 return; 865 867 } 866 868 867 869 pin = GMBUS_PIN_PANEL; 868 - if (!intel_bios_is_lvds_present(dev_priv, &pin)) { 870 + if (!intel_bios_is_lvds_present(i915, &pin)) { 869 871 if ((lvds & LVDS_PORT_EN) == 0) { 870 - drm_dbg_kms(&dev_priv->drm, 872 + drm_dbg_kms(&i915->drm, 871 873 "LVDS is not present in VBT\n"); 872 874 return; 873 875 } 874 - drm_dbg_kms(&dev_priv->drm, 876 + drm_dbg_kms(&i915->drm, 875 877 "LVDS is not present in VBT, but enabled anyway\n"); 876 878 } 877 879 ··· 877 883 if (!lvds_encoder) 878 884 return; 879 885 880 - intel_connector = intel_connector_alloc(); 881 - if (!intel_connector) { 886 + connector = intel_connector_alloc(); 887 + if (!connector) { 882 888 kfree(lvds_encoder); 883 889 return; 884 890 } 885 891 886 - lvds_encoder->attached_connector = intel_connector; 892 + lvds_encoder->attached_connector = connector; 893 + encoder = &lvds_encoder->base; 887 894 888 - intel_encoder = &lvds_encoder->base; 889 - encoder = &intel_encoder->base; 890 - connector = &intel_connector->base; 891 - drm_connector_init(&dev_priv->drm, &intel_connector->base, &intel_lvds_connector_funcs, 895 + drm_connector_init(&i915->drm, &connector->base, &intel_lvds_connector_funcs, 892 896 DRM_MODE_CONNECTOR_LVDS); 893 897 894 - drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_lvds_enc_funcs, 898 + drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs, 895 899 DRM_MODE_ENCODER_LVDS, "LVDS"); 896 900 897 - intel_encoder->enable = intel_enable_lvds; 898 - intel_encoder->pre_enable = intel_pre_enable_lvds; 899 - intel_encoder->compute_config = intel_lvds_compute_config; 900 - if (HAS_PCH_SPLIT(dev_priv)) { 901 - intel_encoder->disable = pch_disable_lvds; 902 - intel_encoder->post_disable = pch_post_disable_lvds; 901 + encoder->enable = intel_enable_lvds; 902 + encoder->pre_enable = intel_pre_enable_lvds; 903 + encoder->compute_config = intel_lvds_compute_config; 904 + if (HAS_PCH_SPLIT(i915)) { 905 + encoder->disable = pch_disable_lvds; 906 + encoder->post_disable = pch_post_disable_lvds; 903 907 } else { 904 - intel_encoder->disable = gmch_disable_lvds; 908 + encoder->disable = gmch_disable_lvds; 905 909 } 906 - intel_encoder->get_hw_state = intel_lvds_get_hw_state; 907 - intel_encoder->get_config = intel_lvds_get_config; 908 - intel_encoder->update_pipe = intel_backlight_update; 909 - intel_encoder->shutdown = intel_lvds_shutdown; 910 - intel_connector->get_hw_state = intel_connector_get_hw_state; 910 + encoder->get_hw_state = intel_lvds_get_hw_state; 911 + encoder->get_config = intel_lvds_get_config; 912 + encoder->update_pipe = intel_backlight_update; 913 + encoder->shutdown = intel_lvds_shutdown; 914 + connector->get_hw_state = intel_connector_get_hw_state; 911 915 912 - intel_connector_attach_encoder(intel_connector, intel_encoder); 916 + intel_connector_attach_encoder(connector, encoder); 913 917 914 - intel_encoder->type = INTEL_OUTPUT_LVDS; 915 - intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER; 916 - intel_encoder->port = PORT_NONE; 917 - intel_encoder->cloneable = 0; 918 - if (DISPLAY_VER(dev_priv) < 4) 919 - intel_encoder->pipe_mask = BIT(PIPE_B); 918 + encoder->type = INTEL_OUTPUT_LVDS; 919 + encoder->power_domain = POWER_DOMAIN_PORT_OTHER; 920 + encoder->port = PORT_NONE; 921 + encoder->cloneable = 0; 922 + if (DISPLAY_VER(i915) < 4) 923 + encoder->pipe_mask = BIT(PIPE_B); 920 924 else 921 - intel_encoder->pipe_mask = ~0; 925 + encoder->pipe_mask = ~0; 922 926 923 - drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 924 - connector->display_info.subpixel_order = SubPixelHorizontalRGB; 927 + drm_connector_helper_add(&connector->base, &intel_lvds_connector_helper_funcs); 928 + connector->base.display_info.subpixel_order = SubPixelHorizontalRGB; 925 929 926 930 lvds_encoder->reg = lvds_reg; 927 931 928 - intel_lvds_add_properties(connector); 932 + intel_lvds_add_properties(&connector->base); 929 933 930 - intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps); 934 + intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps); 931 935 lvds_encoder->init_lvds_val = lvds; 932 936 933 937 /* ··· 940 948 * Attempt to get the fixed panel mode from DDC. Assume that the 941 949 * preferred mode is the right one. 942 950 */ 943 - mutex_lock(&dev_priv->drm.mode_config.mutex); 951 + mutex_lock(&i915->drm.mode_config.mutex); 944 952 if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) { 945 953 const struct edid *edid; 946 954 947 955 /* FIXME: Make drm_get_edid_switcheroo() return drm_edid */ 948 - edid = drm_get_edid_switcheroo(connector, 949 - intel_gmbus_get_adapter(dev_priv, pin)); 956 + edid = drm_get_edid_switcheroo(&connector->base, 957 + intel_gmbus_get_adapter(i915, pin)); 950 958 if (edid) { 951 959 drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH); 952 960 kfree(edid); ··· 954 962 drm_edid = NULL; 955 963 } 956 964 } else { 957 - drm_edid = drm_edid_read_ddc(connector, 958 - intel_gmbus_get_adapter(dev_priv, pin)); 965 + drm_edid = drm_edid_read_ddc(&connector->base, 966 + intel_gmbus_get_adapter(i915, pin)); 959 967 } 960 968 if (drm_edid) { 961 - if (drm_edid_connector_update(connector, drm_edid) || 962 - !drm_edid_connector_add_modes(connector)) { 963 - drm_edid_connector_update(connector, NULL); 969 + if (drm_edid_connector_update(&connector->base, drm_edid) || 970 + !drm_edid_connector_add_modes(&connector->base)) { 971 + drm_edid_connector_update(&connector->base, NULL); 964 972 drm_edid_free(drm_edid); 965 973 drm_edid = ERR_PTR(-EINVAL); 966 974 } 967 975 } else { 968 976 drm_edid = ERR_PTR(-ENOENT); 969 977 } 970 - intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, 978 + intel_bios_init_panel_late(i915, &connector->panel, NULL, 971 979 IS_ERR(drm_edid) ? NULL : drm_edid); 972 980 973 981 /* Try EDID first */ 974 - intel_panel_add_edid_fixed_modes(intel_connector, true); 982 + intel_panel_add_edid_fixed_modes(connector, true); 975 983 976 984 /* Failed to get EDID, what about VBT? */ 977 - if (!intel_panel_preferred_fixed_mode(intel_connector)) 978 - intel_panel_add_vbt_lfp_fixed_mode(intel_connector); 985 + if (!intel_panel_preferred_fixed_mode(connector)) 986 + intel_panel_add_vbt_lfp_fixed_mode(connector); 979 987 980 988 /* 981 989 * If we didn't get a fixed mode from EDID or VBT, try checking 982 990 * if the panel is already turned on. If so, assume that 983 991 * whatever is currently programmed is the correct mode. 984 992 */ 985 - if (!intel_panel_preferred_fixed_mode(intel_connector)) 986 - intel_panel_add_encoder_fixed_mode(intel_connector, intel_encoder); 993 + if (!intel_panel_preferred_fixed_mode(connector)) 994 + intel_panel_add_encoder_fixed_mode(connector, encoder); 987 995 988 - mutex_unlock(&dev_priv->drm.mode_config.mutex); 996 + mutex_unlock(&i915->drm.mode_config.mutex); 989 997 990 998 /* If we still don't have a mode after all that, give up. */ 991 - if (!intel_panel_preferred_fixed_mode(intel_connector)) 999 + if (!intel_panel_preferred_fixed_mode(connector)) 992 1000 goto failed; 993 1001 994 - intel_panel_init(intel_connector, drm_edid); 1002 + intel_panel_init(connector, drm_edid); 995 1003 996 - intel_backlight_setup(intel_connector, INVALID_PIPE); 1004 + intel_backlight_setup(connector, INVALID_PIPE); 997 1005 998 1006 lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); 999 - drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n", 1007 + drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n", 1000 1008 lvds_encoder->is_dual_link ? "dual" : "single"); 1001 1009 1002 1010 lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; ··· 1004 1012 return; 1005 1013 1006 1014 failed: 1007 - drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n"); 1008 - drm_connector_cleanup(connector); 1009 - drm_encoder_cleanup(encoder); 1015 + drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n"); 1016 + drm_connector_cleanup(&connector->base); 1017 + drm_encoder_cleanup(&encoder->base); 1010 1018 kfree(lvds_encoder); 1011 - intel_connector_free(intel_connector); 1019 + intel_connector_free(connector); 1012 1020 return; 1013 1021 }
+65
drivers/gpu/drm/i915/display/intel_lvds_regs.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_LVDS_REGS_H__ 7 + #define __INTEL_LVDS_REGS_H__ 8 + 9 + #include "intel_display_reg_defs.h" 10 + 11 + /* LVDS port control */ 12 + #define LVDS _MMIO(0x61180) 13 + /* 14 + * Enables the LVDS port. This bit must be set before DPLLs are enabled, as 15 + * the DPLL semantics change when the LVDS is assigned to that pipe. 16 + */ 17 + #define LVDS_PORT_EN REG_BIT(31) 18 + /* Selects pipe B for LVDS data. Must be set on pre-965. */ 19 + #define LVDS_PIPE_SEL_MASK REG_BIT(30) 20 + #define LVDS_PIPE_SEL(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK, (pipe)) 21 + #define LVDS_PIPE_SEL_MASK_CPT REG_GENMASK(30, 29) 22 + #define LVDS_PIPE_SEL_CPT(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK_CPT, (pipe)) 23 + /* LVDS dithering flag on 965/g4x platform */ 24 + #define LVDS_ENABLE_DITHER REG_BIT(25) 25 + /* LVDS sync polarity flags. Set to invert (i.e. negative) */ 26 + #define LVDS_VSYNC_POLARITY REG_BIT(21) 27 + #define LVDS_HSYNC_POLARITY REG_BIT(20) 28 + 29 + /* Enable border for unscaled (or aspect-scaled) display */ 30 + #define LVDS_BORDER_ENABLE REG_BIT(15) 31 + /* 32 + * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per 33 + * pixel. 34 + */ 35 + #define LVDS_A0A2_CLKA_POWER_MASK REG_GENMASK(9, 8) 36 + #define LVDS_A0A2_CLKA_POWER_DOWN REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 0) 37 + #define LVDS_A0A2_CLKA_POWER_UP REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 3) 38 + /* 39 + * Controls the A3 data pair, which contains the additional LSBs for 24 bit 40 + * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be 41 + * on. 42 + */ 43 + #define LVDS_A3_POWER_MASK REG_GENMASK(7, 6) 44 + #define LVDS_A3_POWER_DOWN REG_FIELD_PREP(LVDS_A3_POWER_MASK, 0) 45 + #define LVDS_A3_POWER_UP REG_FIELD_PREP(LVDS_A3_POWER_MASK, 3) 46 + /* 47 + * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP 48 + * is set. 49 + */ 50 + #define LVDS_CLKB_POWER_MASK REG_GENMASK(5, 4) 51 + #define LVDS_CLKB_POWER_DOWN REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 0) 52 + #define LVDS_CLKB_POWER_UP REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 3) 53 + /* 54 + * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 55 + * setting for whether we are in dual-channel mode. The B3 pair will 56 + * additionally only be powered up when LVDS_A3_POWER_UP is set. 57 + */ 58 + #define LVDS_B0B3_POWER_MASK REG_GENMASK(3, 2) 59 + #define LVDS_B0B3_POWER_DOWN REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 0) 60 + #define LVDS_B0B3_POWER_UP REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 3) 61 + 62 + #define PCH_LVDS _MMIO(0xe1180) 63 + #define LVDS_DETECTED REG_BIT(1) 64 + 65 + #endif /* __INTEL_LVDS_REGS_H__ */
+3 -1
drivers/gpu/drm/i915/display/intel_mg_phy_regs.h
··· 142 142 #define FIA1_BASE 0x163000 143 143 #define FIA2_BASE 0x16E000 144 144 #define FIA3_BASE 0x16F000 145 - #define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE) 145 + #define _FIA(fia) _PICK_EVEN_2RANGES((fia), 1, \ 146 + FIA1_BASE, FIA1_BASE,\ 147 + FIA2_BASE, FIA3_BASE) 146 148 #define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off)) 147 149 148 150 /* ICL PHY DFLEX registers */
+17 -41
drivers/gpu/drm/i915/display/intel_modeset_setup.c
··· 11 11 12 12 #include "i915_drv.h" 13 13 #include "i915_reg.h" 14 + #include "i9xx_wm.h" 14 15 #include "intel_atomic.h" 15 16 #include "intel_bw.h" 16 17 #include "intel_color.h" ··· 22 21 #include "intel_display.h" 23 22 #include "intel_display_power.h" 24 23 #include "intel_display_types.h" 24 + #include "intel_dmc.h" 25 + #include "intel_fifo_underrun.h" 25 26 #include "intel_modeset_setup.h" 26 27 #include "intel_pch_display.h" 27 - #include "intel_pm.h" 28 + #include "intel_wm.h" 28 29 #include "skl_watermark.h" 29 30 30 31 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, ··· 237 234 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 238 235 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 239 236 240 - if (!crtc_state->hw.active && !HAS_GMCH(i915)) 241 - return; 242 - 243 237 /* 244 - * We start out with underrun reporting disabled to avoid races. 245 - * For correct bookkeeping mark this on active crtcs. 238 + * We start out with underrun reporting disabled on active 239 + * pipes to avoid races. 246 240 * 247 241 * Also on gmch platforms we dont have any hardware bits to 248 242 * disable the underrun reporting. Which means we need to start ··· 250 250 * No protection against concurrent access is required - at 251 251 * worst a fifo underrun happens which also sets this to false. 252 252 */ 253 - crtc->cpu_fifo_underrun_disabled = true; 254 - 255 - /* 256 - * We track the PCH trancoder underrun reporting state 257 - * within the crtc. With crtc for pipe A housing the underrun 258 - * reporting state for PCH transcoder A, crtc for pipe B housing 259 - * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 260 - * and marking underrun reporting as disabled for the non-existing 261 - * PCH transcoders B and C would prevent enabling the south 262 - * error interrupt (see cpt_can_enable_serr_int()). 263 - */ 264 - if (intel_has_pch_trancoder(i915, crtc->pipe)) 265 - crtc->pch_fifo_underrun_disabled = true; 253 + intel_init_fifo_underrun_reporting(i915, crtc, 254 + !crtc_state->hw.active && 255 + !HAS_GMCH(i915)); 266 256 } 267 257 268 258 static void intel_sanitize_crtc(struct intel_crtc *crtc, ··· 637 647 * Also known as Wa_14010480278. 638 648 */ 639 649 if (IS_DISPLAY_VER(i915, 10, 12)) 640 - intel_de_write(i915, GEN9_CLKGATE_DIS_0, 641 - intel_de_read(i915, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS); 650 + intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS); 642 651 643 - if (IS_HASWELL(i915)) { 644 - /* 645 - * WaRsPkgCStateDisplayPMReq:hsw 646 - * System hang if this isn't done before disabling all planes! 647 - */ 648 - intel_de_write(i915, CHICKEN_PAR1_1, 649 - intel_de_read(i915, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 650 - } 652 + /* 653 + * WaRsPkgCStateDisplayPMReq:hsw 654 + * System hang if this isn't done before disabling all planes! 655 + */ 656 + if (IS_HASWELL(i915)) 657 + intel_de_rmw(i915, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES); 651 658 652 659 if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) { 653 660 /* Display WA #1142:kbl,cfl,cml */ ··· 710 723 711 724 intel_dpll_sanitize_state(i915); 712 725 713 - if (IS_G4X(i915)) { 714 - g4x_wm_get_hw_state(i915); 715 - g4x_wm_sanitize(i915); 716 - } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 717 - vlv_wm_get_hw_state(i915); 718 - vlv_wm_sanitize(i915); 719 - } else if (DISPLAY_VER(i915) >= 9) { 720 - skl_wm_get_hw_state(i915); 721 - skl_wm_sanitize(i915); 722 - } else if (HAS_PCH_SPLIT(i915)) { 723 - ilk_wm_get_hw_state(i915); 724 - } 726 + intel_wm_get_hw_state(i915); 725 727 726 728 for_each_intel_crtc(&i915->drm, crtc) { 727 729 struct intel_crtc_state *crtc_state =
+1
drivers/gpu/drm/i915/display/intel_panel.c
··· 39 39 #include "intel_de.h" 40 40 #include "intel_display_types.h" 41 41 #include "intel_drrs.h" 42 + #include "intel_lvds_regs.h" 42 43 #include "intel_panel.h" 43 44 #include "intel_quirks.h" 44 45
+27 -45
drivers/gpu/drm/i915/display/intel_pch_display.c
··· 10 10 #include "intel_display_types.h" 11 11 #include "intel_fdi.h" 12 12 #include "intel_lvds.h" 13 + #include "intel_lvds_regs.h" 13 14 #include "intel_pch_display.h" 14 15 #include "intel_pch_refclk.h" 15 16 #include "intel_pps.h" ··· 220 219 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 221 220 222 221 intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), 223 - intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); 222 + intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder))); 224 223 intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), 225 - intel_de_read(dev_priv, HBLANK(cpu_transcoder))); 224 + intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder))); 226 225 intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), 227 - intel_de_read(dev_priv, HSYNC(cpu_transcoder))); 226 + intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder))); 228 227 229 228 intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), 230 - intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 229 + intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder))); 231 230 intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), 232 - intel_de_read(dev_priv, VBLANK(cpu_transcoder))); 231 + intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder))); 233 232 intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), 234 - intel_de_read(dev_priv, VSYNC(cpu_transcoder))); 233 + intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder))); 235 234 intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), 236 - intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); 235 + intel_de_read(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder))); 237 236 } 238 237 239 238 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) ··· 267 266 268 267 reg = PCH_TRANSCONF(pipe); 269 268 val = intel_de_read(dev_priv, reg); 270 - pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); 269 + pipeconf_val = intel_de_read(dev_priv, TRANSCONF(pipe)); 271 270 272 271 if (HAS_PCH_IBX(dev_priv)) { 273 272 /* Configure frame start delay to match the CPU */ ··· 279 278 * that in pipeconf reg. For HDMI we must use 8bpc 280 279 * here for both 8bpc and 12bpc. 281 280 */ 282 - val &= ~PIPECONF_BPC_MASK; 281 + val &= ~TRANSCONF_BPC_MASK; 283 282 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 284 - val |= PIPECONF_BPC_8; 283 + val |= TRANSCONF_BPC_8; 285 284 else 286 - val |= pipeconf_val & PIPECONF_BPC_MASK; 285 + val |= pipeconf_val & TRANSCONF_BPC_MASK; 287 286 } 288 287 289 288 val &= ~TRANS_INTERLACE_MASK; 290 - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_ILK) == PIPECONF_INTERLACE_IF_ID_ILK) { 289 + if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_ILK) == TRANSCONF_INTERLACE_IF_ID_ILK) { 291 290 if (HAS_PCH_IBX(dev_priv) && 292 291 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 293 292 val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX; ··· 308 307 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 309 308 enum pipe pipe = crtc->pipe; 310 309 i915_reg_t reg; 311 - u32 val; 312 310 313 311 /* FDI relies on the transcoder */ 314 312 assert_fdi_tx_disabled(dev_priv, pipe); ··· 317 317 assert_pch_ports_disabled(dev_priv, pipe); 318 318 319 319 reg = PCH_TRANSCONF(pipe); 320 - val = intel_de_read(dev_priv, reg); 321 - val &= ~TRANS_ENABLE; 322 - intel_de_write(dev_priv, reg, val); 320 + intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0); 323 321 /* wait for PCH transcoder off, transcoder state */ 324 322 if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) 325 323 drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", 326 324 pipe_name(pipe)); 327 325 328 - if (HAS_PCH_CPT(dev_priv)) { 326 + if (HAS_PCH_CPT(dev_priv)) 329 327 /* Workaround: Clear the timing override chicken bit again. */ 330 - reg = TRANS_CHICKEN2(pipe); 331 - val = intel_de_read(dev_priv, reg); 332 - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 333 - intel_de_write(dev_priv, reg, val); 334 - } 328 + intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe), 329 + TRANS_CHICKEN2_TIMING_OVERRIDE, 0); 335 330 } 336 331 337 332 void ilk_pch_pre_enable(struct intel_atomic_state *state, ··· 409 414 intel_crtc_has_dp_encoder(crtc_state)) { 410 415 const struct drm_display_mode *adjusted_mode = 411 416 &crtc_state->hw.adjusted_mode; 412 - u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 417 + u32 bpc = (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) >> 5; 413 418 i915_reg_t reg = TRANS_DP_CTL(pipe); 414 419 enum port port; 415 420 ··· 451 456 ilk_disable_pch_transcoder(crtc); 452 457 453 458 if (HAS_PCH_CPT(dev_priv)) { 454 - i915_reg_t reg; 455 - u32 temp; 456 - 457 459 /* disable TRANS_DP_CTL */ 458 - reg = TRANS_DP_CTL(pipe); 459 - temp = intel_de_read(dev_priv, reg); 460 - temp &= ~(TRANS_DP_OUTPUT_ENABLE | 461 - TRANS_DP_PORT_SEL_MASK); 462 - temp |= TRANS_DP_PORT_SEL_NONE; 463 - intel_de_write(dev_priv, reg, temp); 460 + intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe), 461 + TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK, 462 + TRANS_DP_PORT_SEL_NONE); 464 463 465 464 /* disable DPLL_SEL */ 466 - temp = intel_de_read(dev_priv, PCH_DPLL_SEL); 467 - temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 468 - intel_de_write(dev_priv, PCH_DPLL_SEL, temp); 465 + intel_de_rmw(dev_priv, PCH_DPLL_SEL, 466 + TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0); 469 467 } 470 468 471 469 ilk_fdi_pll_disable(crtc); ··· 553 565 intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 554 566 555 567 val = TRANS_ENABLE; 556 - pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 568 + pipeconf_val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); 557 569 558 - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == PIPECONF_INTERLACE_IF_ID_ILK) 570 + if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK) 559 571 val |= TRANS_INTERLACE_INTERLACED; 560 572 else 561 573 val |= TRANS_INTERLACE_PROGRESSIVE; ··· 568 580 569 581 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 570 582 { 571 - u32 val; 572 - 573 - val = intel_de_read(dev_priv, LPT_TRANSCONF); 574 - val &= ~TRANS_ENABLE; 575 - intel_de_write(dev_priv, LPT_TRANSCONF, val); 583 + intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0); 576 584 /* wait for PCH transcoder off, transcoder state */ 577 585 if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, 578 586 TRANS_STATE_ENABLE, 50)) 579 587 drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); 580 588 581 589 /* Workaround: clear timing override bit. */ 582 - val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); 583 - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 584 - intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 590 + intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0); 585 591 } 586 592 587 593 void lpt_pch_enable(struct intel_atomic_state *state,
+2 -8
drivers/gpu/drm/i915/display/intel_pch_refclk.c
··· 12 12 13 13 static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv) 14 14 { 15 - u32 tmp; 16 - 17 - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 18 - tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 19 - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 15 + intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL); 20 16 21 17 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & 22 18 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 23 19 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); 24 20 25 - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 26 - tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 27 - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 21 + intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0); 28 22 29 23 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & 30 24 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+6 -9
drivers/gpu/drm/i915/display/intel_pps.c
··· 13 13 #include "intel_dpio_phy.h" 14 14 #include "intel_dpll.h" 15 15 #include "intel_lvds.h" 16 + #include "intel_lvds_regs.h" 16 17 #include "intel_pps.h" 17 18 #include "intel_quirks.h" 18 19 ··· 1535 1534 /* 1536 1535 * Compute the divisor for the pp clock, simply match the Bspec formula. 1537 1536 */ 1538 - if (i915_mmio_reg_valid(regs.pp_div)) { 1537 + if (i915_mmio_reg_valid(regs.pp_div)) 1539 1538 intel_de_write(dev_priv, regs.pp_div, 1540 1539 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); 1541 - } else { 1542 - u32 pp_ctl; 1543 - 1544 - pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); 1545 - pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; 1546 - pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); 1547 - intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); 1548 - } 1540 + else 1541 + intel_de_rmw(dev_priv, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK, 1542 + REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, 1543 + DIV_ROUND_UP(seq->t11_t12, 1000))); 1549 1544 1550 1545 drm_dbg_kms(&dev_priv->drm, 1551 1546 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+103 -106
drivers/gpu/drm/i915/display/intel_psr.c
··· 152 152 { 153 153 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 154 154 i915_reg_t imr_reg; 155 - u32 mask, val; 155 + u32 mask; 156 156 157 157 if (DISPLAY_VER(dev_priv) >= 12) 158 158 imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder); ··· 164 164 mask |= psr_irq_post_exit_bit_get(intel_dp) | 165 165 psr_irq_pre_entry_bit_get(intel_dp); 166 166 167 - val = intel_de_read(dev_priv, imr_reg); 168 - val &= ~psr_irq_mask_get(intel_dp); 169 - val |= ~mask; 170 - intel_de_write(dev_priv, imr_reg, val); 167 + intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask); 171 168 } 172 169 173 170 static void psr_event_print(struct drm_i915_private *i915, ··· 242 245 } 243 246 244 247 if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) { 245 - u32 val; 246 - 247 248 drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n", 248 249 transcoder_name(cpu_transcoder)); 249 250 ··· 255 260 * again so we don't care about unmask the interruption 256 261 * or unset irq_aux_error. 257 262 */ 258 - val = intel_de_read(dev_priv, imr_reg); 259 - val |= psr_irq_psr_error_bit_get(intel_dp); 260 - intel_de_write(dev_priv, imr_reg, val); 263 + intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp)); 261 264 262 265 schedule_work(&intel_dp->psr.work); 263 266 } ··· 535 542 val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2)); 536 543 val |= intel_psr2_get_tp_time(intel_dp); 537 544 545 + if (DISPLAY_VER(dev_priv) >= 12) { 546 + if (intel_dp->psr.io_wake_lines < 9 && 547 + intel_dp->psr.fast_wake_lines < 9) 548 + val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; 549 + else 550 + val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3; 551 + } 552 + 538 553 /* Wa_22012278275:adl-p */ 539 554 if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) { 540 555 static const u8 map[] = { ··· 559 558 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see 560 559 * comments bellow for more information 561 560 */ 562 - u32 tmp, lines = 7; 561 + u32 tmp; 563 562 564 - val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; 565 - 566 - tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; 563 + tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; 567 564 tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT; 568 565 val |= tmp; 569 566 570 - tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; 567 + tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; 571 568 tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT; 572 569 val |= tmp; 573 570 } else if (DISPLAY_VER(dev_priv) >= 12) { 574 - /* 575 - * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default 576 - * values from BSpec. In order to setting an optimal power 577 - * consumption, lower than 4k resolution mode needs to decrease 578 - * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution 579 - * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE. 580 - */ 581 - val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; 582 - val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7); 583 - val |= TGL_EDP_PSR2_FAST_WAKE(7); 571 + val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); 572 + val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); 584 573 } else if (DISPLAY_VER(dev_priv) >= 9) { 585 - val |= EDP_PSR2_IO_BUFFER_WAKE(7); 586 - val |= EDP_PSR2_FAST_WAKE(7); 574 + val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); 575 + val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); 587 576 } 588 577 589 578 if (intel_dp->psr.req_psr2_sdp_prior_scanline) ··· 581 590 582 591 if (intel_dp->psr.psr2_sel_fetch_enabled) { 583 592 u32 tmp; 584 - 585 - /* Wa_1408330847 */ 586 - if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 587 - intel_de_rmw(dev_priv, CHICKEN_PAR1_1, 588 - DIS_RAM_BYPASS_PSR2_MAN_TRACK, 589 - DIS_RAM_BYPASS_PSR2_MAN_TRACK); 590 593 591 594 tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)); 592 595 drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE)); ··· 622 637 u32 idle_frames) 623 638 { 624 639 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 625 - u32 val; 626 640 627 641 idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; 628 - val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder)); 629 - val &= ~EDP_PSR2_IDLE_FRAME_MASK; 630 - val |= idle_frames; 631 - intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val); 642 + intel_de_rmw(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), 643 + EDP_PSR2_IDLE_FRAME_MASK, idle_frames); 632 644 } 633 645 634 646 static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp) ··· 690 708 { 691 709 const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; 692 710 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 711 + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 693 712 u32 exit_scanlines; 694 713 695 714 /* ··· 707 724 if (crtc_state->enable_psr2_sel_fetch) 708 725 return; 709 726 710 - if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO)) 727 + if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO)) 711 728 return; 712 729 713 730 if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state)) ··· 745 762 if (crtc_state->uapi.async_flip) { 746 763 drm_dbg_kms(&dev_priv->drm, 747 764 "PSR2 sel fetch not enabled, async flip enabled\n"); 748 - return false; 749 - } 750 - 751 - /* Wa_14010254185 Wa_14010103792 */ 752 - if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) { 753 - drm_dbg_kms(&dev_priv->drm, 754 - "PSR2 sel fetch not enabled, missing the implementation of WAs\n"); 755 765 return false; 756 766 } 757 767 ··· 815 839 return false; 816 840 817 841 crtc_state->req_psr2_sdp_prior_scanline = true; 842 + return true; 843 + } 844 + 845 + static bool _compute_psr2_wake_times(struct intel_dp *intel_dp, 846 + struct intel_crtc_state *crtc_state) 847 + { 848 + struct drm_i915_private *i915 = dp_to_i915(intel_dp); 849 + int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time; 850 + u8 max_wake_lines; 851 + 852 + if (DISPLAY_VER(i915) >= 12) { 853 + io_wake_time = 42; 854 + /* 855 + * According to Bspec it's 42us, but based on testing 856 + * it is not enough -> use 45 us. 857 + */ 858 + fast_wake_time = 45; 859 + max_wake_lines = 12; 860 + } else { 861 + io_wake_time = 50; 862 + fast_wake_time = 32; 863 + max_wake_lines = 8; 864 + } 865 + 866 + io_wake_lines = intel_usecs_to_scanlines( 867 + &crtc_state->uapi.adjusted_mode, io_wake_time); 868 + fast_wake_lines = intel_usecs_to_scanlines( 869 + &crtc_state->uapi.adjusted_mode, fast_wake_time); 870 + 871 + if (io_wake_lines > max_wake_lines || 872 + fast_wake_lines > max_wake_lines) 873 + return false; 874 + 875 + if (i915->params.psr_safest_params) 876 + io_wake_lines = fast_wake_lines = max_wake_lines; 877 + 878 + /* According to Bspec lower limit should be set as 7 lines. */ 879 + intel_dp->psr.io_wake_lines = max(io_wake_lines, 7); 880 + intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7); 881 + 818 882 return true; 819 883 } 820 884 ··· 952 936 return false; 953 937 } 954 938 939 + if (!_compute_psr2_wake_times(intel_dp, crtc_state)) { 940 + drm_dbg_kms(&dev_priv->drm, 941 + "PSR2 not enabled, Unable to use long enough wake times\n"); 942 + return false; 943 + } 944 + 955 945 if (HAS_PSR2_SEL_FETCH(dev_priv)) { 956 946 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && 957 947 !HAS_PSR_HW_TRACKING(dev_priv)) { ··· 965 943 "PSR2 not enabled, selective fetch not valid and no HW tracking available\n"); 966 944 return false; 967 945 } 968 - } 969 - 970 - /* Wa_2209313811 */ 971 - if (!crtc_state->enable_psr2_sel_fetch && 972 - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) { 973 - drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n"); 974 - goto unsupported; 975 946 } 976 947 977 948 if (!psr2_granularity_check(intel_dp, crtc_state)) { ··· 1086 1071 } 1087 1072 1088 1073 if (DISPLAY_VER(dev_priv) >= 12) { 1089 - val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder)); 1074 + val = intel_de_read(dev_priv, TRANS_EXITLINE(intel_dp->psr.transcoder)); 1090 1075 val &= EXITLINE_MASK; 1091 1076 pipe_config->dc3co_exitline = val; 1092 1077 } ··· 1160 1145 1161 1146 psr_irq_control(intel_dp); 1162 1147 1163 - if (intel_dp->psr.dc3co_exitline) { 1164 - u32 val; 1165 - 1166 - /* 1167 - * TODO: if future platforms supports DC3CO in more than one 1168 - * transcoder, EXITLINE will need to be unset when disabling PSR 1169 - */ 1170 - val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder)); 1171 - val &= ~EXITLINE_MASK; 1172 - val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT; 1173 - val |= EXITLINE_ENABLE; 1174 - intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val); 1175 - } 1148 + /* 1149 + * TODO: if future platforms supports DC3CO in more than one 1150 + * transcoder, EXITLINE will need to be unset when disabling PSR 1151 + */ 1152 + if (intel_dp->psr.dc3co_exitline) 1153 + intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK, 1154 + intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE); 1176 1155 1177 1156 if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv)) 1178 1157 intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, ··· 1179 1170 */ 1180 1171 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 1181 1172 IS_DISPLAY_VER(dev_priv, 12, 13)) { 1182 - u16 vtotal, vblank; 1183 - 1184 - vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal - 1185 - crtc_state->uapi.adjusted_mode.crtc_vdisplay; 1186 - vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end - 1187 - crtc_state->uapi.adjusted_mode.crtc_vblank_start; 1188 - if (vblank > vtotal) 1173 + if (crtc_state->hw.adjusted_mode.crtc_vblank_start != 1174 + crtc_state->hw.adjusted_mode.crtc_vdisplay) 1189 1175 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, 1190 1176 wa_16013835468_bit_get(intel_dp)); 1191 1177 } ··· 1202 1198 else if (IS_ALDERLAKE_P(dev_priv)) 1203 1199 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, 1204 1200 ADLP_1_BASED_X_GRANULARITY); 1205 - 1206 - /* Wa_16011168373:adl-p */ 1207 - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 1208 - intel_de_rmw(dev_priv, 1209 - TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), 1210 - TRANS_SET_CONTEXT_LATENCY_MASK, 1211 - TRANS_SET_CONTEXT_LATENCY_VALUE(1)); 1212 1201 1213 1202 /* Wa_16012604467:adlp,mtl[a0,b0] */ 1214 1203 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) ··· 1357 1360 intel_psr_exit(intel_dp); 1358 1361 intel_psr_wait_exit_locked(intel_dp); 1359 1362 1360 - /* Wa_1408330847 */ 1361 - if (intel_dp->psr.psr2_sel_fetch_enabled && 1362 - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 1363 - intel_de_rmw(dev_priv, CHICKEN_PAR1_1, 1364 - DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0); 1365 - 1366 1363 /* 1367 1364 * Wa_16013835468 1368 1365 * Wa_14015648006 ··· 1367 1376 wa_16013835468_bit_get(intel_dp), 0); 1368 1377 1369 1378 if (intel_dp->psr.psr2_enabled) { 1370 - /* Wa_16011168373:adl-p */ 1371 - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 1372 - intel_de_rmw(dev_priv, 1373 - TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), 1374 - TRANS_SET_CONTEXT_LATENCY_MASK, 0); 1375 - 1376 1379 /* Wa_16012604467:adlp,mtl[a0,b0] */ 1377 1380 if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 1378 1381 intel_de_rmw(dev_priv, ··· 1532 1547 intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); 1533 1548 } 1534 1549 1535 - void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane, 1536 - const struct intel_crtc_state *crtc_state) 1550 + void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane, 1551 + const struct intel_crtc_state *crtc_state) 1537 1552 { 1538 1553 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1539 1554 enum pipe pipe = plane->pipe; ··· 1544 1559 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0); 1545 1560 } 1546 1561 1547 - void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, 1548 - const struct intel_crtc_state *crtc_state, 1549 - const struct intel_plane_state *plane_state, 1550 - int color_plane) 1562 + void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane, 1563 + const struct intel_crtc_state *crtc_state, 1564 + const struct intel_plane_state *plane_state) 1565 + { 1566 + struct drm_i915_private *i915 = to_i915(plane->base.dev); 1567 + enum pipe pipe = plane->pipe; 1568 + 1569 + if (!crtc_state->enable_psr2_sel_fetch) 1570 + return; 1571 + 1572 + if (plane->id == PLANE_CURSOR) 1573 + intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), 1574 + plane_state->ctl); 1575 + else 1576 + intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), 1577 + PLANE_SEL_FETCH_CTL_ENABLE); 1578 + } 1579 + 1580 + void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane, 1581 + const struct intel_crtc_state *crtc_state, 1582 + const struct intel_plane_state *plane_state, 1583 + int color_plane) 1551 1584 { 1552 1585 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1553 1586 enum pipe pipe = plane->pipe; ··· 1576 1573 if (!crtc_state->enable_psr2_sel_fetch) 1577 1574 return; 1578 1575 1579 - if (plane->id == PLANE_CURSOR) { 1580 - intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 1581 - plane_state->ctl); 1576 + if (plane->id == PLANE_CURSOR) 1582 1577 return; 1583 - } 1584 1578 1585 1579 clip = &plane_state->psr2_sel_fetch_area; 1586 1580 ··· 1605 1605 val = (drm_rect_height(clip) - 1) << 16; 1606 1606 val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; 1607 1607 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); 1608 - 1609 - intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 1610 - PLANE_SEL_FETCH_CTL_ENABLE); 1611 1608 } 1612 1609 1613 1610 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
+10 -6
drivers/gpu/drm/i915/display/intel_psr.h
··· 46 46 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, 47 47 struct intel_crtc *crtc); 48 48 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state); 49 - void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, 50 - const struct intel_crtc_state *crtc_state, 51 - const struct intel_plane_state *plane_state, 52 - int color_plane); 53 - void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane, 54 - const struct intel_crtc_state *crtc_state); 49 + void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane, 50 + const struct intel_crtc_state *crtc_state, 51 + const struct intel_plane_state *plane_state, 52 + int color_plane); 53 + void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane, 54 + const struct intel_crtc_state *crtc_state, 55 + const struct intel_plane_state *plane_state); 56 + 57 + void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane, 58 + const struct intel_crtc_state *crtc_state); 55 59 void intel_psr_pause(struct intel_dp *intel_dp); 56 60 void intel_psr_resume(struct intel_dp *intel_dp); 57 61
+62
drivers/gpu/drm/i915/display/intel_snps_phy.c
··· 1419 1419 REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), 1420 1420 }; 1421 1421 1422 + static const struct intel_mpllb_state dg2_hdmi_267300 = { 1423 + .clock = 267300, 1424 + .ref_control = 1425 + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), 1426 + .mpllb_cp = 1427 + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | 1428 + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | 1429 + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | 1430 + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), 1431 + .mpllb_div = 1432 + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | 1433 + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | 1434 + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | 1435 + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | 1436 + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), 1437 + .mpllb_div2 = 1438 + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | 1439 + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) | 1440 + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), 1441 + .mpllb_fracn1 = 1442 + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | 1443 + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | 1444 + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), 1445 + .mpllb_fracn2 = 1446 + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) | 1447 + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699), 1448 + .mpllb_sscen = 1449 + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), 1450 + }; 1451 + 1422 1452 static const struct intel_mpllb_state dg2_hdmi_268500 = { 1423 1453 .clock = 268500, 1424 1454 .ref_control = ··· 1535 1505 .mpllb_fracn2 = 1536 1506 REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) | 1537 1507 REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 39320), 1508 + .mpllb_sscen = 1509 + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), 1510 + }; 1511 + 1512 + static const struct intel_mpllb_state dg2_hdmi_319890 = { 1513 + .clock = 319890, 1514 + .ref_control = 1515 + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), 1516 + .mpllb_cp = 1517 + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | 1518 + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | 1519 + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | 1520 + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), 1521 + .mpllb_div = 1522 + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | 1523 + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | 1524 + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | 1525 + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | 1526 + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), 1527 + .mpllb_div2 = 1528 + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | 1529 + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) | 1530 + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), 1531 + .mpllb_fracn1 = 1532 + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | 1533 + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | 1534 + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), 1535 + .mpllb_fracn2 = 1536 + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) | 1537 + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631), 1538 1538 .mpllb_sscen = 1539 1539 REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), 1540 1540 }; ··· 1756 1696 &dg2_hdmi_209800, 1757 1697 &dg2_hdmi_241500, 1758 1698 &dg2_hdmi_262750, 1699 + &dg2_hdmi_267300, 1759 1700 &dg2_hdmi_268500, 1760 1701 &dg2_hdmi_296703, 1702 + &dg2_hdmi_319890, 1761 1703 &dg2_hdmi_497750, 1762 1704 &dg2_hdmi_592000, 1763 1705 &dg2_hdmi_593407,
+2 -1
drivers/gpu/drm/i915/display/intel_sprite.c
··· 1217 1217 } 1218 1218 1219 1219 intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset); 1220 - intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), (y << 16) | x); 1220 + intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), 1221 + DVS_OFFSET_Y(y) | DVS_OFFSET_X(x)); 1221 1222 1222 1223 /* 1223 1224 * The control register self-arms if the plane was previously
+2 -4
drivers/gpu/drm/i915/display/intel_tv.c
··· 930 930 /* Prevents vblank waits from timing out in intel_tv_detect_type() */ 931 931 intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc)); 932 932 933 - intel_de_write(dev_priv, TV_CTL, 934 - intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE); 933 + intel_de_rmw(dev_priv, TV_CTL, 0, TV_ENC_ENABLE); 935 934 } 936 935 937 936 static void ··· 942 943 struct drm_device *dev = encoder->base.dev; 943 944 struct drm_i915_private *dev_priv = to_i915(dev); 944 945 945 - intel_de_write(dev_priv, TV_CTL, 946 - intel_de_read(dev_priv, TV_CTL) & ~TV_ENC_ENABLE); 946 + intel_de_rmw(dev_priv, TV_CTL, TV_ENC_ENABLE, 0); 947 947 } 948 948 949 949 static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
+2 -2
drivers/gpu/drm/i915/display/intel_vblank.c
··· 26 26 * | 27 27 * | frame start: 28 28 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 29 - * | may be shifted forward 1-3 extra lines via PIPECONF 29 + * | may be shifted forward 1-3 extra lines via TRANSCONF 30 30 * | | 31 31 * | | start of vsync: 32 32 * | | generate vsync interrupt ··· 54 54 * Summary: 55 55 * - most events happen at the start of horizontal sync 56 56 * - frame start happens at the start of horizontal blank, 1-4 lines 57 - * (depending on PIPECONF settings) after the start of vblank 57 + * (depending on TRANSCONF settings) after the start of vblank 58 58 * - gen3/4 pixel and frame counter are synchronized with the start 59 59 * of horizontal active on the first line of vertical active 60 60 */
+1
drivers/gpu/drm/i915/display/intel_vdsc.c
··· 17 17 #include "intel_dsi.h" 18 18 #include "intel_qp_tables.h" 19 19 #include "intel_vdsc.h" 20 + #include "intel_vdsc_regs.h" 20 21 21 22 enum ROW_INDEX_BPP { 22 23 ROW_INDEX_6BPP = 0,
+461
drivers/gpu/drm/i915/display/intel_vdsc_regs.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_VDSC_REGS_H__ 7 + #define __INTEL_VDSC_REGS_H__ 8 + 9 + #include "intel_display_reg_defs.h" 10 + 11 + /* Display Stream Splitter Control */ 12 + #define DSS_CTL1 _MMIO(0x67400) 13 + #define SPLITTER_ENABLE (1 << 31) 14 + #define JOINER_ENABLE (1 << 30) 15 + #define DUAL_LINK_MODE_INTERLEAVE (1 << 24) 16 + #define DUAL_LINK_MODE_FRONTBACK (0 << 24) 17 + #define OVERLAP_PIXELS_MASK (0xf << 16) 18 + #define OVERLAP_PIXELS(pixels) ((pixels) << 16) 19 + #define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) 20 + #define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) 21 + #define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0 22 + 23 + #define DSS_CTL2 _MMIO(0x67404) 24 + #define LEFT_BRANCH_VDSC_ENABLE (1 << 31) 25 + #define RIGHT_BRANCH_VDSC_ENABLE (1 << 15) 26 + #define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) 27 + #define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) 28 + 29 + #define _ICL_PIPE_DSS_CTL1_PB 0x78200 30 + #define _ICL_PIPE_DSS_CTL1_PC 0x78400 31 + #define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 32 + _ICL_PIPE_DSS_CTL1_PB, \ 33 + _ICL_PIPE_DSS_CTL1_PC) 34 + #define BIG_JOINER_ENABLE (1 << 29) 35 + #define MASTER_BIG_JOINER_ENABLE (1 << 28) 36 + #define VGA_CENTERING_ENABLE (1 << 27) 37 + #define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25) 38 + #define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0) 39 + #define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1) 40 + #define UNCOMPRESSED_JOINER_MASTER (1 << 21) 41 + #define UNCOMPRESSED_JOINER_SLAVE (1 << 20) 42 + 43 + #define _ICL_PIPE_DSS_CTL2_PB 0x78204 44 + #define _ICL_PIPE_DSS_CTL2_PC 0x78404 45 + #define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 46 + _ICL_PIPE_DSS_CTL2_PB, \ 47 + _ICL_PIPE_DSS_CTL2_PC) 48 + 49 + /* Icelake Display Stream Compression Registers */ 50 + #define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200) 51 + #define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00) 52 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270 53 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370 54 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470 55 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570 56 + #define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 57 + _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \ 58 + _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC) 59 + #define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 60 + _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \ 61 + _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC) 62 + #define DSC_ALT_ICH_SEL (1 << 20) 63 + #define DSC_VBR_ENABLE (1 << 19) 64 + #define DSC_422_ENABLE (1 << 18) 65 + #define DSC_COLOR_SPACE_CONVERSION (1 << 17) 66 + #define DSC_BLOCK_PREDICTION (1 << 16) 67 + #define DSC_LINE_BUF_DEPTH_SHIFT 12 68 + #define DSC_BPC_SHIFT 8 69 + #define DSC_VER_MIN_SHIFT 4 70 + #define DSC_VER_MAJ (0x1 << 0) 71 + 72 + #define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204) 73 + #define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04) 74 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274 75 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374 76 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474 77 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574 78 + #define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 79 + _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \ 80 + _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC) 81 + #define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 82 + _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \ 83 + _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC) 84 + #define DSC_BPP(bpp) ((bpp) << 0) 85 + 86 + #define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208) 87 + #define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08) 88 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278 89 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378 90 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478 91 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578 92 + #define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 93 + _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \ 94 + _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC) 95 + #define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 96 + _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \ 97 + _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC) 98 + #define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16) 99 + #define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0) 100 + 101 + #define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C) 102 + #define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C) 103 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C 104 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C 105 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C 106 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C 107 + #define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 108 + _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \ 109 + _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC) 110 + #define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 111 + _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \ 112 + _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC) 113 + #define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16) 114 + #define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0) 115 + 116 + #define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210) 117 + #define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10) 118 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280 119 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380 120 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480 121 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580 122 + #define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 123 + _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ 124 + _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) 125 + #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 126 + _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \ 127 + _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) 128 + #define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) 129 + #define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) 130 + 131 + #define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214) 132 + #define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14) 133 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284 134 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384 135 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484 136 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584 137 + #define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 138 + _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ 139 + _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) 140 + #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 141 + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \ 142 + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) 143 + #define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16) 144 + #define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) 145 + 146 + #define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218) 147 + #define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18) 148 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288 149 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388 150 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488 151 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588 152 + #define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 153 + _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \ 154 + _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC) 155 + #define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 156 + _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \ 157 + _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC) 158 + #define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24) 159 + #define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16) 160 + #define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8) 161 + #define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0) 162 + 163 + #define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C) 164 + #define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C) 165 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C 166 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C 167 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C 168 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C 169 + #define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 170 + _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \ 171 + _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC) 172 + #define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 173 + _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \ 174 + _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC) 175 + #define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16) 176 + #define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0) 177 + 178 + #define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220) 179 + #define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20) 180 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290 181 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390 182 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490 183 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590 184 + #define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 185 + _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \ 186 + _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC) 187 + #define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 188 + _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \ 189 + _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC) 190 + #define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16) 191 + #define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0) 192 + 193 + #define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224) 194 + #define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24) 195 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294 196 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394 197 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494 198 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594 199 + #define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 200 + _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \ 201 + _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC) 202 + #define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 203 + _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \ 204 + _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC) 205 + #define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16) 206 + #define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0) 207 + 208 + #define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228) 209 + #define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28) 210 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298 211 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398 212 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498 213 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598 214 + #define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 215 + _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \ 216 + _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC) 217 + #define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 218 + _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \ 219 + _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC) 220 + #define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20) 221 + #define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16) 222 + #define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8) 223 + #define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0) 224 + 225 + #define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C) 226 + #define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C) 227 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C 228 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C 229 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C 230 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C 231 + #define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 232 + _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \ 233 + _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC) 234 + #define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 235 + _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \ 236 + _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC) 237 + 238 + #define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260) 239 + #define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60) 240 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0 241 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0 242 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0 243 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0 244 + #define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 245 + _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \ 246 + _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC) 247 + #define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 248 + _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \ 249 + _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC) 250 + 251 + #define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264) 252 + #define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64) 253 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4 254 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4 255 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4 256 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4 257 + #define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 258 + _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \ 259 + _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC) 260 + #define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 261 + _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \ 262 + _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC) 263 + 264 + #define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268) 265 + #define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68) 266 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8 267 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8 268 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8 269 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8 270 + #define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 271 + _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \ 272 + _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC) 273 + #define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 274 + _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \ 275 + _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC) 276 + 277 + #define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C) 278 + #define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C) 279 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC 280 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC 281 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC 282 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC 283 + #define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 284 + _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \ 285 + _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC) 286 + #define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 287 + _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \ 288 + _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC) 289 + 290 + #define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270) 291 + #define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70) 292 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0 293 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0 294 + #define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0 295 + #define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0 296 + #define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 297 + _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \ 298 + _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC) 299 + #define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 300 + _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ 301 + _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) 302 + #define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20) 303 + #define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) 304 + #define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0) 305 + 306 + /* Icelake Rate Control Buffer Threshold Registers */ 307 + #define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230) 308 + #define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4) 309 + #define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30) 310 + #define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4) 311 + #define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254) 312 + #define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4) 313 + #define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354) 314 + #define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4) 315 + #define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454) 316 + #define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4) 317 + #define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554) 318 + #define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4) 319 + #define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 320 + _ICL_DSC0_RC_BUF_THRESH_0_PB, \ 321 + _ICL_DSC0_RC_BUF_THRESH_0_PC) 322 + #define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 323 + _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \ 324 + _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC) 325 + #define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 326 + _ICL_DSC1_RC_BUF_THRESH_0_PB, \ 327 + _ICL_DSC1_RC_BUF_THRESH_0_PC) 328 + #define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 329 + _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \ 330 + _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC) 331 + 332 + #define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238) 333 + #define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4) 334 + #define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38) 335 + #define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4) 336 + #define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C) 337 + #define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4) 338 + #define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C) 339 + #define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4) 340 + #define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C) 341 + #define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4) 342 + #define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C) 343 + #define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4) 344 + #define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 345 + _ICL_DSC0_RC_BUF_THRESH_1_PB, \ 346 + _ICL_DSC0_RC_BUF_THRESH_1_PC) 347 + #define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 348 + _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \ 349 + _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC) 350 + #define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 351 + _ICL_DSC1_RC_BUF_THRESH_1_PB, \ 352 + _ICL_DSC1_RC_BUF_THRESH_1_PC) 353 + #define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 354 + _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ 355 + _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) 356 + 357 + /* Icelake DSC Rate Control Range Parameter Registers */ 358 + #define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240) 359 + #define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4) 360 + #define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40) 361 + #define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4) 362 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208) 363 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4) 364 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308) 365 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4) 366 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408) 367 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4) 368 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508) 369 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4) 370 + #define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 371 + _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \ 372 + _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC) 373 + #define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 374 + _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \ 375 + _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC) 376 + #define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 377 + _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \ 378 + _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC) 379 + #define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 380 + _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \ 381 + _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC) 382 + #define RC_BPG_OFFSET_SHIFT 10 383 + #define RC_MAX_QP_SHIFT 5 384 + #define RC_MIN_QP_SHIFT 0 385 + 386 + #define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248) 387 + #define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4) 388 + #define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48) 389 + #define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4) 390 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210) 391 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4) 392 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310) 393 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4) 394 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410) 395 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4) 396 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510) 397 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4) 398 + #define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 399 + _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \ 400 + _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC) 401 + #define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 402 + _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \ 403 + _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC) 404 + #define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 405 + _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \ 406 + _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC) 407 + #define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 408 + _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \ 409 + _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC) 410 + 411 + #define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250) 412 + #define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4) 413 + #define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50) 414 + #define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4) 415 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218) 416 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4) 417 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318) 418 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4) 419 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418) 420 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4) 421 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518) 422 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4) 423 + #define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 424 + _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \ 425 + _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC) 426 + #define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 427 + _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \ 428 + _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC) 429 + #define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 430 + _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \ 431 + _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC) 432 + #define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 433 + _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \ 434 + _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC) 435 + 436 + #define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258) 437 + #define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4) 438 + #define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58) 439 + #define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4) 440 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220) 441 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4) 442 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320) 443 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4) 444 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420) 445 + #define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4) 446 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520) 447 + #define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4) 448 + #define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 449 + _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \ 450 + _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC) 451 + #define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 452 + _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \ 453 + _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC) 454 + #define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 455 + _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \ 456 + _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC) 457 + #define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 458 + _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \ 459 + _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC) 460 + 461 + #endif /* __INTEL_VDSC_REGS_H__ */
+2 -8
drivers/gpu/drm/i915/display/intel_vrr.c
··· 144 144 * is deprecated. 145 145 */ 146 146 if (DISPLAY_VER(i915) >= 13) { 147 - /* 148 - * FIXME: Subtract Window2 delay from below value. 149 - * 150 - * Window2 specifies time required to program DSB (Window2) in 151 - * number of scan lines. Assuming 0 for no DSB. 152 - */ 153 147 crtc_state->vrr.guardband = 154 - crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vdisplay; 148 + crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start; 155 149 } else { 156 150 crtc_state->vrr.pipeline_full = 157 - min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 151 + min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start - 158 152 crtc_state->framestart_delay - 1); 159 153 } 160 154
+408
drivers/gpu/drm/i915/display/intel_wm.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "i915_drv.h" 7 + #include "i9xx_wm.h" 8 + #include "intel_display_types.h" 9 + #include "intel_wm.h" 10 + #include "skl_watermark.h" 11 + 12 + /** 13 + * intel_update_watermarks - update FIFO watermark values based on current modes 14 + * @dev_priv: i915 device 15 + * 16 + * Calculate watermark values for the various WM regs based on current mode 17 + * and plane configuration. 18 + * 19 + * There are several cases to deal with here: 20 + * - normal (i.e. non-self-refresh) 21 + * - self-refresh (SR) mode 22 + * - lines are large relative to FIFO size (buffer can hold up to 2) 23 + * - lines are small relative to FIFO size (buffer can hold more than 2 24 + * lines), so need to account for TLB latency 25 + * 26 + * The normal calculation is: 27 + * watermark = dotclock * bytes per pixel * latency 28 + * where latency is platform & configuration dependent (we assume pessimal 29 + * values here). 30 + * 31 + * The SR calculation is: 32 + * watermark = (trunc(latency/line time)+1) * surface width * 33 + * bytes per pixel 34 + * where 35 + * line time = htotal / dotclock 36 + * surface width = hdisplay for normal plane and 64 for cursor 37 + * and latency is assumed to be high, as above. 38 + * 39 + * The final value programmed to the register should always be rounded up, 40 + * and include an extra 2 entries to account for clock crossings. 41 + * 42 + * We don't use the sprite, so we can ignore that. And on Crestline we have 43 + * to set the non-SR watermarks to 8. 44 + */ 45 + void intel_update_watermarks(struct drm_i915_private *i915) 46 + { 47 + if (i915->display.funcs.wm->update_wm) 48 + i915->display.funcs.wm->update_wm(i915); 49 + } 50 + 51 + int intel_compute_pipe_wm(struct intel_atomic_state *state, 52 + struct intel_crtc *crtc) 53 + { 54 + struct drm_i915_private *i915 = to_i915(state->base.dev); 55 + 56 + if (i915->display.funcs.wm->compute_pipe_wm) 57 + return i915->display.funcs.wm->compute_pipe_wm(state, crtc); 58 + 59 + return 0; 60 + } 61 + 62 + int intel_compute_intermediate_wm(struct intel_atomic_state *state, 63 + struct intel_crtc *crtc) 64 + { 65 + struct drm_i915_private *i915 = to_i915(state->base.dev); 66 + 67 + if (!i915->display.funcs.wm->compute_intermediate_wm) 68 + return 0; 69 + 70 + if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm)) 71 + return 0; 72 + 73 + return i915->display.funcs.wm->compute_intermediate_wm(state, crtc); 74 + } 75 + 76 + bool intel_initial_watermarks(struct intel_atomic_state *state, 77 + struct intel_crtc *crtc) 78 + { 79 + struct drm_i915_private *i915 = to_i915(state->base.dev); 80 + 81 + if (i915->display.funcs.wm->initial_watermarks) { 82 + i915->display.funcs.wm->initial_watermarks(state, crtc); 83 + return true; 84 + } 85 + 86 + return false; 87 + } 88 + 89 + void intel_atomic_update_watermarks(struct intel_atomic_state *state, 90 + struct intel_crtc *crtc) 91 + { 92 + struct drm_i915_private *i915 = to_i915(state->base.dev); 93 + 94 + if (i915->display.funcs.wm->atomic_update_watermarks) 95 + i915->display.funcs.wm->atomic_update_watermarks(state, crtc); 96 + } 97 + 98 + void intel_optimize_watermarks(struct intel_atomic_state *state, 99 + struct intel_crtc *crtc) 100 + { 101 + struct drm_i915_private *i915 = to_i915(state->base.dev); 102 + 103 + if (i915->display.funcs.wm->optimize_watermarks) 104 + i915->display.funcs.wm->optimize_watermarks(state, crtc); 105 + } 106 + 107 + int intel_compute_global_watermarks(struct intel_atomic_state *state) 108 + { 109 + struct drm_i915_private *i915 = to_i915(state->base.dev); 110 + 111 + if (i915->display.funcs.wm->compute_global_watermarks) 112 + return i915->display.funcs.wm->compute_global_watermarks(state); 113 + 114 + return 0; 115 + } 116 + 117 + void intel_wm_get_hw_state(struct drm_i915_private *i915) 118 + { 119 + if (i915->display.funcs.wm->get_hw_state) 120 + return i915->display.funcs.wm->get_hw_state(i915); 121 + } 122 + 123 + bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, 124 + const struct intel_plane_state *plane_state) 125 + { 126 + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 127 + 128 + /* FIXME check the 'enable' instead */ 129 + if (!crtc_state->hw.active) 130 + return false; 131 + 132 + /* 133 + * Treat cursor with fb as always visible since cursor updates 134 + * can happen faster than the vrefresh rate, and the current 135 + * watermark code doesn't handle that correctly. Cursor updates 136 + * which set/clear the fb or change the cursor size are going 137 + * to get throttled by intel_legacy_cursor_update() to work 138 + * around this problem with the watermark code. 139 + */ 140 + if (plane->id == PLANE_CURSOR) 141 + return plane_state->hw.fb != NULL; 142 + else 143 + return plane_state->uapi.visible; 144 + } 145 + 146 + void intel_print_wm_latency(struct drm_i915_private *dev_priv, 147 + const char *name, const u16 wm[]) 148 + { 149 + int level; 150 + 151 + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { 152 + unsigned int latency = wm[level]; 153 + 154 + if (latency == 0) { 155 + drm_dbg_kms(&dev_priv->drm, 156 + "%s WM%d latency not provided\n", 157 + name, level); 158 + continue; 159 + } 160 + 161 + /* 162 + * - latencies are in us on gen9. 163 + * - before then, WM1+ latency values are in 0.5us units 164 + */ 165 + if (DISPLAY_VER(dev_priv) >= 9) 166 + latency *= 10; 167 + else if (level > 0) 168 + latency *= 5; 169 + 170 + drm_dbg_kms(&dev_priv->drm, 171 + "%s WM%d latency %u (%u.%u usec)\n", name, level, 172 + wm[level], latency / 10, latency % 10); 173 + } 174 + } 175 + 176 + void intel_wm_init(struct drm_i915_private *i915) 177 + { 178 + if (DISPLAY_VER(i915) >= 9) 179 + skl_wm_init(i915); 180 + else 181 + i9xx_wm_init(i915); 182 + } 183 + 184 + static void wm_latency_show(struct seq_file *m, const u16 wm[8]) 185 + { 186 + struct drm_i915_private *dev_priv = m->private; 187 + int level; 188 + 189 + drm_modeset_lock_all(&dev_priv->drm); 190 + 191 + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { 192 + unsigned int latency = wm[level]; 193 + 194 + /* 195 + * - WM1+ latency values in 0.5us units 196 + * - latencies are in us on gen9/vlv/chv 197 + */ 198 + if (DISPLAY_VER(dev_priv) >= 9 || 199 + IS_VALLEYVIEW(dev_priv) || 200 + IS_CHERRYVIEW(dev_priv) || 201 + IS_G4X(dev_priv)) 202 + latency *= 10; 203 + else if (level > 0) 204 + latency *= 5; 205 + 206 + seq_printf(m, "WM%d %u (%u.%u usec)\n", 207 + level, wm[level], latency / 10, latency % 10); 208 + } 209 + 210 + drm_modeset_unlock_all(&dev_priv->drm); 211 + } 212 + 213 + static int pri_wm_latency_show(struct seq_file *m, void *data) 214 + { 215 + struct drm_i915_private *dev_priv = m->private; 216 + const u16 *latencies; 217 + 218 + if (DISPLAY_VER(dev_priv) >= 9) 219 + latencies = dev_priv->display.wm.skl_latency; 220 + else 221 + latencies = dev_priv->display.wm.pri_latency; 222 + 223 + wm_latency_show(m, latencies); 224 + 225 + return 0; 226 + } 227 + 228 + static int spr_wm_latency_show(struct seq_file *m, void *data) 229 + { 230 + struct drm_i915_private *dev_priv = m->private; 231 + const u16 *latencies; 232 + 233 + if (DISPLAY_VER(dev_priv) >= 9) 234 + latencies = dev_priv->display.wm.skl_latency; 235 + else 236 + latencies = dev_priv->display.wm.spr_latency; 237 + 238 + wm_latency_show(m, latencies); 239 + 240 + return 0; 241 + } 242 + 243 + static int cur_wm_latency_show(struct seq_file *m, void *data) 244 + { 245 + struct drm_i915_private *dev_priv = m->private; 246 + const u16 *latencies; 247 + 248 + if (DISPLAY_VER(dev_priv) >= 9) 249 + latencies = dev_priv->display.wm.skl_latency; 250 + else 251 + latencies = dev_priv->display.wm.cur_latency; 252 + 253 + wm_latency_show(m, latencies); 254 + 255 + return 0; 256 + } 257 + 258 + static int pri_wm_latency_open(struct inode *inode, struct file *file) 259 + { 260 + struct drm_i915_private *dev_priv = inode->i_private; 261 + 262 + if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) 263 + return -ENODEV; 264 + 265 + return single_open(file, pri_wm_latency_show, dev_priv); 266 + } 267 + 268 + static int spr_wm_latency_open(struct inode *inode, struct file *file) 269 + { 270 + struct drm_i915_private *dev_priv = inode->i_private; 271 + 272 + if (HAS_GMCH(dev_priv)) 273 + return -ENODEV; 274 + 275 + return single_open(file, spr_wm_latency_show, dev_priv); 276 + } 277 + 278 + static int cur_wm_latency_open(struct inode *inode, struct file *file) 279 + { 280 + struct drm_i915_private *dev_priv = inode->i_private; 281 + 282 + if (HAS_GMCH(dev_priv)) 283 + return -ENODEV; 284 + 285 + return single_open(file, cur_wm_latency_show, dev_priv); 286 + } 287 + 288 + static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 289 + size_t len, loff_t *offp, u16 wm[8]) 290 + { 291 + struct seq_file *m = file->private_data; 292 + struct drm_i915_private *dev_priv = m->private; 293 + u16 new[8] = { 0 }; 294 + int level; 295 + int ret; 296 + char tmp[32]; 297 + 298 + if (len >= sizeof(tmp)) 299 + return -EINVAL; 300 + 301 + if (copy_from_user(tmp, ubuf, len)) 302 + return -EFAULT; 303 + 304 + tmp[len] = '\0'; 305 + 306 + ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 307 + &new[0], &new[1], &new[2], &new[3], 308 + &new[4], &new[5], &new[6], &new[7]); 309 + if (ret != dev_priv->display.wm.num_levels) 310 + return -EINVAL; 311 + 312 + drm_modeset_lock_all(&dev_priv->drm); 313 + 314 + for (level = 0; level < dev_priv->display.wm.num_levels; level++) 315 + wm[level] = new[level]; 316 + 317 + drm_modeset_unlock_all(&dev_priv->drm); 318 + 319 + return len; 320 + } 321 + 322 + static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 323 + size_t len, loff_t *offp) 324 + { 325 + struct seq_file *m = file->private_data; 326 + struct drm_i915_private *dev_priv = m->private; 327 + u16 *latencies; 328 + 329 + if (DISPLAY_VER(dev_priv) >= 9) 330 + latencies = dev_priv->display.wm.skl_latency; 331 + else 332 + latencies = dev_priv->display.wm.pri_latency; 333 + 334 + return wm_latency_write(file, ubuf, len, offp, latencies); 335 + } 336 + 337 + static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 338 + size_t len, loff_t *offp) 339 + { 340 + struct seq_file *m = file->private_data; 341 + struct drm_i915_private *dev_priv = m->private; 342 + u16 *latencies; 343 + 344 + if (DISPLAY_VER(dev_priv) >= 9) 345 + latencies = dev_priv->display.wm.skl_latency; 346 + else 347 + latencies = dev_priv->display.wm.spr_latency; 348 + 349 + return wm_latency_write(file, ubuf, len, offp, latencies); 350 + } 351 + 352 + static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 353 + size_t len, loff_t *offp) 354 + { 355 + struct seq_file *m = file->private_data; 356 + struct drm_i915_private *dev_priv = m->private; 357 + u16 *latencies; 358 + 359 + if (DISPLAY_VER(dev_priv) >= 9) 360 + latencies = dev_priv->display.wm.skl_latency; 361 + else 362 + latencies = dev_priv->display.wm.cur_latency; 363 + 364 + return wm_latency_write(file, ubuf, len, offp, latencies); 365 + } 366 + 367 + static const struct file_operations i915_pri_wm_latency_fops = { 368 + .owner = THIS_MODULE, 369 + .open = pri_wm_latency_open, 370 + .read = seq_read, 371 + .llseek = seq_lseek, 372 + .release = single_release, 373 + .write = pri_wm_latency_write 374 + }; 375 + 376 + static const struct file_operations i915_spr_wm_latency_fops = { 377 + .owner = THIS_MODULE, 378 + .open = spr_wm_latency_open, 379 + .read = seq_read, 380 + .llseek = seq_lseek, 381 + .release = single_release, 382 + .write = spr_wm_latency_write 383 + }; 384 + 385 + static const struct file_operations i915_cur_wm_latency_fops = { 386 + .owner = THIS_MODULE, 387 + .open = cur_wm_latency_open, 388 + .read = seq_read, 389 + .llseek = seq_lseek, 390 + .release = single_release, 391 + .write = cur_wm_latency_write 392 + }; 393 + 394 + void intel_wm_debugfs_register(struct drm_i915_private *i915) 395 + { 396 + struct drm_minor *minor = i915->drm.primary; 397 + 398 + debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root, 399 + i915, &i915_pri_wm_latency_fops); 400 + 401 + debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root, 402 + i915, &i915_spr_wm_latency_fops); 403 + 404 + debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root, 405 + i915, &i915_cur_wm_latency_fops); 406 + 407 + skl_watermark_debugfs_register(i915); 408 + }
+37
drivers/gpu/drm/i915/display/intel_wm.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_WM_H__ 7 + #define __INTEL_WM_H__ 8 + 9 + #include <linux/types.h> 10 + 11 + struct drm_i915_private; 12 + struct intel_atomic_state; 13 + struct intel_crtc; 14 + struct intel_crtc_state; 15 + struct intel_plane_state; 16 + 17 + void intel_update_watermarks(struct drm_i915_private *i915); 18 + int intel_compute_pipe_wm(struct intel_atomic_state *state, 19 + struct intel_crtc *crtc); 20 + int intel_compute_intermediate_wm(struct intel_atomic_state *state, 21 + struct intel_crtc *crtc); 22 + bool intel_initial_watermarks(struct intel_atomic_state *state, 23 + struct intel_crtc *crtc); 24 + void intel_atomic_update_watermarks(struct intel_atomic_state *state, 25 + struct intel_crtc *crtc); 26 + void intel_optimize_watermarks(struct intel_atomic_state *state, 27 + struct intel_crtc *crtc); 28 + int intel_compute_global_watermarks(struct intel_atomic_state *state); 29 + void intel_wm_get_hw_state(struct drm_i915_private *i915); 30 + bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, 31 + const struct intel_plane_state *plane_state); 32 + void intel_print_wm_latency(struct drm_i915_private *i915, 33 + const char *name, const u16 wm[]); 34 + void intel_wm_init(struct drm_i915_private *i915); 35 + void intel_wm_debugfs_register(struct drm_i915_private *i915); 36 + 37 + #endif /* __INTEL_WM_H__ */
+5 -3
drivers/gpu/drm/i915/display/skl_universal_plane.c
··· 642 642 643 643 skl_write_plane_wm(plane, crtc_state); 644 644 645 - intel_psr2_disable_plane_sel_fetch(plane, crtc_state); 645 + intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state); 646 646 intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); 647 647 intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); 648 648 } ··· 1260 1260 if (plane_state->force_black) 1261 1261 icl_plane_csc_load_black(plane); 1262 1262 1263 - intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); 1263 + intel_psr2_program_plane_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane); 1264 1264 } 1265 1265 1266 1266 static void ··· 1286 1286 */ 1287 1287 if (plane_state->scaler_id >= 0) 1288 1288 skl_program_plane_scaler(plane, crtc_state, plane_state); 1289 + 1290 + intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, plane_state); 1289 1291 1290 1292 /* 1291 1293 * The control register self-arms if the plane was previously ··· 2182 2180 if (DISPLAY_VER(i915) < 12) 2183 2181 return false; 2184 2182 2185 - /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ 2183 + /* Wa_14010477008 */ 2186 2184 if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || 2187 2185 IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0)) 2188 2186 return false;
+96 -57
drivers/gpu/drm/i915/display/skl_watermark.c
··· 5 5 6 6 #include <drm/drm_blend.h> 7 7 8 + #include "i915_drv.h" 9 + #include "i915_fixed.h" 10 + #include "i915_reg.h" 11 + #include "i9xx_wm.h" 8 12 #include "intel_atomic.h" 9 13 #include "intel_atomic_plane.h" 10 14 #include "intel_bw.h" ··· 17 13 #include "intel_display_power.h" 18 14 #include "intel_display_types.h" 19 15 #include "intel_fb.h" 20 - #include "skl_watermark.h" 21 - 22 - #include "i915_drv.h" 23 - #include "i915_fixed.h" 24 - #include "i915_reg.h" 25 16 #include "intel_pcode.h" 26 - #include "intel_pm.h" 17 + #include "intel_wm.h" 18 + #include "skl_watermark.h" 27 19 28 20 static void skl_sagv_disable(struct drm_i915_private *i915); 29 21 ··· 64 64 static bool 65 65 intel_has_sagv(struct drm_i915_private *i915) 66 66 { 67 - return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) && 67 + return HAS_SAGV(i915) && 68 68 i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED; 69 69 } 70 70 ··· 92 92 return val; 93 93 } else if (DISPLAY_VER(i915) == 11) { 94 94 return 10; 95 - } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) { 95 + } else if (HAS_SAGV(i915)) { 96 96 return 30; 97 97 } else { 98 98 return 0; ··· 101 101 102 102 static void intel_sagv_init(struct drm_i915_private *i915) 103 103 { 104 - if (!intel_has_sagv(i915)) 104 + if (!HAS_SAGV(i915)) 105 105 i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; 106 106 107 107 /* ··· 359 359 continue; 360 360 361 361 /* Find the highest enabled wm level for this plane */ 362 - for (level = ilk_wm_max_level(i915); 362 + for (level = i915->display.wm.num_levels - 1; 363 363 !wm->wm[level].enable; --level) 364 364 { } 365 365 ··· 710 710 { 711 711 struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor); 712 712 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 713 - int level, max_level = ilk_wm_max_level(i915); 714 713 struct skl_wm_level wm = {}; 715 714 int ret, min_ddb_alloc = 0; 716 715 struct skl_wm_params wp; 716 + int level; 717 717 718 718 ret = skl_compute_wm_params(crtc_state, 256, 719 719 drm_format_info(DRM_FORMAT_ARGB8888), ··· 722 722 crtc_state->pixel_rate, &wp, 0); 723 723 drm_WARN_ON(&i915->drm, ret); 724 724 725 - for (level = 0; level <= max_level; level++) { 725 + for (level = 0; level < i915->display.wm.num_levels; level++) { 726 726 unsigned int latency = i915->display.wm.skl_latency[level]; 727 727 728 728 skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); ··· 1407 1407 } 1408 1408 } 1409 1409 1410 - static bool icl_need_wm1_wa(struct drm_i915_private *i915, 1411 - enum plane_id plane_id) 1410 + static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level, 1411 + const struct skl_plane_wm *wm) 1412 1412 { 1413 1413 /* 1414 1414 * Wa_1408961008:icl, ehl 1415 1415 * Wa_14012656716:tgl, adl 1416 - * Underruns with WM1+ disabled 1416 + * Wa_14017887344:icl 1417 + * Wa_14017868169:adl, tgl 1418 + * Due to some power saving optimizations, different subsystems 1419 + * like PSR, might still use even disabled wm level registers, 1420 + * for "reference", so lets keep at least the values sane. 1421 + * Considering amount of WA requiring us to do similar things, was 1422 + * decided to simply do it for all of the platforms, as those wm 1423 + * levels are disabled, this isn't going to do harm anyway. 1417 1424 */ 1418 - return DISPLAY_VER(i915) == 11 || 1419 - (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR); 1425 + return level > 0 && !wm->wm[level].enable; 1420 1426 } 1421 1427 1422 1428 struct skl_plane_ddb_iter { ··· 1498 1492 * Find the highest watermark level for which we can satisfy the block 1499 1493 * requirement of active planes. 1500 1494 */ 1501 - for (level = ilk_wm_max_level(i915); level >= 0; level--) { 1495 + for (level = i915->display.wm.num_levels - 1; level >= 0; level--) { 1502 1496 blocks = 0; 1503 1497 for_each_plane_id_on_crtc(crtc, plane_id) { 1504 1498 const struct skl_plane_wm *wm = ··· 1574 1568 * all levels as "enabled." Go back now and disable the ones 1575 1569 * that aren't actually possible. 1576 1570 */ 1577 - for (level++; level <= ilk_wm_max_level(i915); level++) { 1571 + for (level++; level < i915->display.wm.num_levels; level++) { 1578 1572 for_each_plane_id_on_crtc(crtc, plane_id) { 1579 1573 const struct skl_ddb_entry *ddb = 1580 1574 &crtc_state->wm.skl.plane_ddb[plane_id]; ··· 1591 1585 else 1592 1586 skl_check_wm_level(&wm->wm[level], ddb); 1593 1587 1594 - if (icl_need_wm1_wa(i915, plane_id) && 1595 - level == 1 && !wm->wm[level].enable && 1596 - wm->wm[0].enable) { 1597 - wm->wm[level].blocks = wm->wm[0].blocks; 1598 - wm->wm[level].lines = wm->wm[0].lines; 1599 - wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; 1588 + if (skl_need_wm_copy_wa(i915, level, wm)) { 1589 + wm->wm[level].blocks = wm->wm[level - 1].blocks; 1590 + wm->wm[level].lines = wm->wm[level - 1].lines; 1591 + wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines; 1600 1592 } 1601 1593 } 1602 1594 } ··· 1971 1967 struct skl_wm_level *levels) 1972 1968 { 1973 1969 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1974 - int level, max_level = ilk_wm_max_level(i915); 1975 1970 struct skl_wm_level *result_prev = &levels[0]; 1971 + int level; 1976 1972 1977 - for (level = 0; level <= max_level; level++) { 1973 + for (level = 0; level < i915->display.wm.num_levels; level++) { 1978 1974 struct skl_wm_level *result = &levels[level]; 1979 1975 unsigned int latency = i915->display.wm.skl_latency[level]; 1980 1976 ··· 2252 2248 const struct intel_crtc_state *crtc_state) 2253 2249 { 2254 2250 struct drm_i915_private *i915 = to_i915(plane->base.dev); 2255 - int level, max_level = ilk_wm_max_level(i915); 2256 2251 enum plane_id plane_id = plane->id; 2257 2252 enum pipe pipe = plane->pipe; 2258 2253 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; ··· 2259 2256 &crtc_state->wm.skl.plane_ddb[plane_id]; 2260 2257 const struct skl_ddb_entry *ddb_y = 2261 2258 &crtc_state->wm.skl.plane_ddb_y[plane_id]; 2259 + int level; 2262 2260 2263 - for (level = 0; level <= max_level; level++) 2261 + for (level = 0; level < i915->display.wm.num_levels; level++) 2264 2262 skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level), 2265 2263 skl_plane_wm_level(pipe_wm, plane_id, level)); 2266 2264 ··· 2289 2285 const struct intel_crtc_state *crtc_state) 2290 2286 { 2291 2287 struct drm_i915_private *i915 = to_i915(plane->base.dev); 2292 - int level, max_level = ilk_wm_max_level(i915); 2293 2288 enum plane_id plane_id = plane->id; 2294 2289 enum pipe pipe = plane->pipe; 2295 2290 const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; 2296 2291 const struct skl_ddb_entry *ddb = 2297 2292 &crtc_state->wm.skl.plane_ddb[plane_id]; 2293 + int level; 2298 2294 2299 - for (level = 0; level <= max_level; level++) 2295 + for (level = 0; level < i915->display.wm.num_levels; level++) 2300 2296 skl_write_wm_level(i915, CUR_WM(pipe, level), 2301 2297 skl_plane_wm_level(pipe_wm, plane_id, level)); 2302 2298 ··· 2328 2324 const struct skl_plane_wm *wm1, 2329 2325 const struct skl_plane_wm *wm2) 2330 2326 { 2331 - int level, max_level = ilk_wm_max_level(i915); 2327 + int level; 2332 2328 2333 - for (level = 0; level <= max_level; level++) { 2329 + for (level = 0; level < i915->display.wm.num_levels; level++) { 2334 2330 /* 2335 2331 * We don't check uv_wm as the hardware doesn't actually 2336 2332 * use it. It only gets used for calculating the required ··· 2402 2398 return PTR_ERR(plane_state); 2403 2399 2404 2400 new_crtc_state->update_planes |= BIT(plane_id); 2401 + new_crtc_state->async_flip_planes = 0; 2402 + new_crtc_state->do_async_flip = false; 2405 2403 } 2406 2404 2407 2405 return 0; ··· 2680 2674 const struct skl_pipe_wm *new_pipe_wm) 2681 2675 { 2682 2676 struct drm_i915_private *i915 = to_i915(plane->base.dev); 2683 - int level, max_level = ilk_wm_max_level(i915); 2677 + int level; 2684 2678 2685 - for (level = 0; level <= max_level; level++) { 2679 + for (level = 0; level < i915->display.wm.num_levels; level++) { 2686 2680 /* 2687 2681 * We don't check uv_wm as the hardware doesn't actually 2688 2682 * use it. It only gets used for calculating the required ··· 2761 2755 return PTR_ERR(plane_state); 2762 2756 2763 2757 new_crtc_state->update_planes |= BIT(plane_id); 2758 + new_crtc_state->async_flip_planes = 0; 2759 + new_crtc_state->do_async_flip = false; 2764 2760 } 2765 2761 2766 2762 return 0; ··· 2818 2810 { 2819 2811 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2820 2812 enum pipe pipe = crtc->pipe; 2821 - int level, max_level; 2822 2813 enum plane_id plane_id; 2814 + int level; 2823 2815 u32 val; 2824 - 2825 - max_level = ilk_wm_max_level(i915); 2826 2816 2827 2817 for_each_plane_id_on_crtc(crtc, plane_id) { 2828 2818 struct skl_plane_wm *wm = &out->planes[plane_id]; 2829 2819 2830 - for (level = 0; level <= max_level; level++) { 2820 + for (level = 0; level < i915->display.wm.num_levels; level++) { 2831 2821 if (plane_id != PLANE_CURSOR) 2832 2822 val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level)); 2833 2823 else ··· 2862 2856 } 2863 2857 } 2864 2858 2865 - void skl_wm_get_hw_state(struct drm_i915_private *i915) 2859 + static void skl_wm_get_hw_state(struct drm_i915_private *i915) 2866 2860 { 2867 2861 struct intel_dbuf_state *dbuf_state = 2868 2862 to_intel_dbuf_state(i915->display.dbuf.obj.state); ··· 2962 2956 return false; 2963 2957 } 2964 2958 2965 - void skl_wm_sanitize(struct drm_i915_private *i915) 2959 + static void skl_wm_sanitize(struct drm_i915_private *i915) 2966 2960 { 2967 2961 struct intel_crtc *crtc; 2968 2962 ··· 2998 2992 } 2999 2993 } 3000 2994 2995 + static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915) 2996 + { 2997 + skl_wm_get_hw_state(i915); 2998 + skl_wm_sanitize(i915); 2999 + } 3000 + 3001 3001 void intel_wm_state_verify(struct intel_crtc *crtc, 3002 3002 struct intel_crtc_state *new_crtc_state) 3003 3003 { ··· 3014 3002 struct skl_pipe_wm wm; 3015 3003 } *hw; 3016 3004 const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal; 3017 - int level, max_level = ilk_wm_max_level(i915); 3018 3005 struct intel_plane *plane; 3019 3006 u8 hw_enabled_slices; 3007 + int level; 3020 3008 3021 3009 if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active) 3022 3010 return; ··· 3043 3031 const struct skl_wm_level *hw_wm_level, *sw_wm_level; 3044 3032 3045 3033 /* Watermarks */ 3046 - for (level = 0; level <= max_level; level++) { 3034 + for (level = 0; level < i915->display.wm.num_levels; level++) { 3047 3035 hw_wm_level = &hw->wm.planes[plane->id].wm[level]; 3048 3036 sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level); 3049 3037 ··· 3165 3153 3166 3154 static void 3167 3155 adjust_wm_latency(struct drm_i915_private *i915, 3168 - u16 wm[], int max_level, int read_latency) 3156 + u16 wm[], int num_levels, int read_latency) 3169 3157 { 3170 3158 bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed; 3171 3159 int i, level; ··· 3175 3163 * need to be disabled. We make sure to sanitize the values out 3176 3164 * of the punit to satisfy this requirement. 3177 3165 */ 3178 - for (level = 1; level <= max_level; level++) { 3166 + for (level = 1; level < num_levels; level++) { 3179 3167 if (wm[level] == 0) { 3180 - for (i = level + 1; i <= max_level; i++) 3168 + for (i = level + 1; i < num_levels; i++) 3181 3169 wm[i] = 0; 3182 3170 3183 - max_level = level - 1; 3171 + num_levels = level; 3184 3172 break; 3185 3173 } 3186 3174 } ··· 3193 3181 * from the punit when level 0 response data is 0us. 3194 3182 */ 3195 3183 if (wm[0] == 0) { 3196 - for (level = 0; level <= max_level; level++) 3184 + for (level = 0; level < num_levels; level++) 3197 3185 wm[level] += read_latency; 3198 3186 } 3199 3187 ··· 3209 3197 3210 3198 static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) 3211 3199 { 3212 - int max_level = ilk_wm_max_level(i915); 3200 + int num_levels = i915->display.wm.num_levels; 3213 3201 u32 val; 3214 3202 3215 3203 val = intel_de_read(i915, MTL_LATENCY_LP0_LP1); ··· 3224 3212 wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); 3225 3213 wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); 3226 3214 3227 - adjust_wm_latency(i915, wm, max_level, 6); 3215 + adjust_wm_latency(i915, wm, num_levels, 6); 3228 3216 } 3229 3217 3230 3218 static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) 3231 3219 { 3232 - int max_level = ilk_wm_max_level(i915); 3220 + int num_levels = i915->display.wm.num_levels; 3233 3221 int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2; 3234 3222 int mult = IS_DG2(i915) ? 2 : 1; 3235 3223 u32 val; ··· 3261 3249 wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult; 3262 3250 wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult; 3263 3251 3264 - adjust_wm_latency(i915, wm, max_level, read_latency); 3252 + adjust_wm_latency(i915, wm, num_levels, read_latency); 3265 3253 } 3266 3254 3267 3255 static void skl_setup_wm_latency(struct drm_i915_private *i915) 3268 3256 { 3257 + if (HAS_HW_SAGV_WM(i915)) 3258 + i915->display.wm.num_levels = 6; 3259 + else 3260 + i915->display.wm.num_levels = 8; 3261 + 3269 3262 if (DISPLAY_VER(i915) >= 14) 3270 3263 mtl_read_wm_latency(i915, i915->display.wm.skl_latency); 3271 3264 else ··· 3281 3264 3282 3265 static const struct intel_wm_funcs skl_wm_funcs = { 3283 3266 .compute_global_watermarks = skl_compute_wm, 3267 + .get_hw_state = skl_wm_get_hw_state_and_sanitize, 3284 3268 }; 3285 3269 3286 3270 void skl_wm_init(struct drm_i915_private *i915) ··· 3559 3541 .write = skl_watermark_ipc_status_write 3560 3542 }; 3561 3543 3562 - void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915) 3544 + static int intel_sagv_status_show(struct seq_file *m, void *unused) 3545 + { 3546 + struct drm_i915_private *i915 = m->private; 3547 + static const char * const sagv_status[] = { 3548 + [I915_SAGV_UNKNOWN] = "unknown", 3549 + [I915_SAGV_DISABLED] = "disabled", 3550 + [I915_SAGV_ENABLED] = "enabled", 3551 + [I915_SAGV_NOT_CONTROLLED] = "not controlled", 3552 + }; 3553 + 3554 + seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915))); 3555 + seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]); 3556 + seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us); 3557 + 3558 + return 0; 3559 + } 3560 + 3561 + DEFINE_SHOW_ATTRIBUTE(intel_sagv_status); 3562 + 3563 + void skl_watermark_debugfs_register(struct drm_i915_private *i915) 3563 3564 { 3564 3565 struct drm_minor *minor = i915->drm.primary; 3565 3566 3566 - if (!HAS_IPC(i915)) 3567 - return; 3567 + if (HAS_IPC(i915)) 3568 + debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915, 3569 + &skl_watermark_ipc_status_fops); 3568 3570 3569 - debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915, 3570 - &skl_watermark_ipc_status_fops); 3571 + if (HAS_SAGV(i915)) 3572 + debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915, 3573 + &intel_sagv_status_fops); 3571 3574 }
+2 -5
drivers/gpu/drm/i915/display/skl_watermark.h
··· 10 10 11 11 #include "intel_display_limits.h" 12 12 #include "intel_global_state.h" 13 - #include "intel_pm_types.h" 13 + #include "intel_wm_types.h" 14 14 15 15 struct drm_i915_private; 16 16 struct intel_atomic_state; ··· 38 38 const struct skl_ddb_entry *entries, 39 39 int num_entries, int ignore_idx); 40 40 41 - void skl_wm_get_hw_state(struct drm_i915_private *i915); 42 - void skl_wm_sanitize(struct drm_i915_private *i915); 43 - 44 41 void intel_wm_state_verify(struct intel_crtc *crtc, 45 42 struct intel_crtc_state *new_crtc_state); 46 43 47 44 void skl_watermark_ipc_init(struct drm_i915_private *i915); 48 45 void skl_watermark_ipc_update(struct drm_i915_private *i915); 49 46 bool skl_watermark_ipc_enabled(struct drm_i915_private *i915); 50 - void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915); 47 + void skl_watermark_debugfs_register(struct drm_i915_private *i915); 51 48 52 49 void skl_wm_init(struct drm_i915_private *i915); 53 50
+46 -110
drivers/gpu/drm/i915/display/vlv_dsi.c
··· 331 331 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 332 332 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 333 333 enum port port; 334 - u32 tmp; 335 334 bool cold_boot = false; 336 335 337 336 /* Set the MIPI mode 338 337 * If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting. 339 338 * Power ON MIPI IO first and then write into IO reset and LP wake bits 340 339 */ 341 - for_each_dsi_port(port, intel_dsi->ports) { 342 - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); 343 - intel_de_write(dev_priv, MIPI_CTRL(port), 344 - tmp | GLK_MIPIIO_ENABLE); 345 - } 340 + for_each_dsi_port(port, intel_dsi->ports) 341 + intel_de_rmw(dev_priv, MIPI_CTRL(port), 0, GLK_MIPIIO_ENABLE); 346 342 347 343 /* Put the IO into reset */ 348 - tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); 349 - tmp &= ~GLK_MIPIIO_RESET_RELEASED; 350 - intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp); 344 + intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0); 351 345 352 346 /* Program LP Wake */ 353 347 for_each_dsi_port(port, intel_dsi->ports) { 354 - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); 355 - if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) 356 - tmp &= ~GLK_LP_WAKE; 357 - else 358 - tmp |= GLK_LP_WAKE; 359 - intel_de_write(dev_priv, MIPI_CTRL(port), tmp); 348 + u32 tmp = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); 349 + intel_de_rmw(dev_priv, MIPI_CTRL(port), 350 + GLK_LP_WAKE, (tmp & DEVICE_READY) ? GLK_LP_WAKE : 0); 360 351 } 361 352 362 353 /* Wait for Pwr ACK */ ··· 371 380 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 372 381 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 373 382 enum port port; 374 - u32 val; 375 383 376 384 /* Wait for MIPI PHY status bit to set */ 377 385 for_each_dsi_port(port, intel_dsi->ports) { ··· 380 390 } 381 391 382 392 /* Get IO out of reset */ 383 - val = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); 384 - intel_de_write(dev_priv, MIPI_CTRL(PORT_A), 385 - val | GLK_MIPIIO_RESET_RELEASED); 393 + intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), 0, GLK_MIPIIO_RESET_RELEASED); 386 394 387 395 /* Get IO out of Low power state*/ 388 396 for_each_dsi_port(port, intel_dsi->ports) { 389 397 if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) { 390 - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); 391 - val &= ~ULPS_STATE_MASK; 392 - val |= DEVICE_READY; 393 - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); 398 + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), 399 + ULPS_STATE_MASK, DEVICE_READY); 394 400 usleep_range(10, 15); 395 401 } else { 396 402 /* Enter ULPS */ 397 - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); 398 - val &= ~ULPS_STATE_MASK; 399 - val |= (ULPS_STATE_ENTER | DEVICE_READY); 400 - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); 403 + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), 404 + ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY); 401 405 402 406 /* Wait for ULPS active */ 403 407 if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), ··· 399 415 drm_err(&dev_priv->drm, "ULPS not active\n"); 400 416 401 417 /* Exit ULPS */ 402 - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); 403 - val &= ~ULPS_STATE_MASK; 404 - val |= (ULPS_STATE_EXIT | DEVICE_READY); 405 - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); 418 + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), 419 + ULPS_STATE_MASK, ULPS_STATE_EXIT | DEVICE_READY); 406 420 407 421 /* Enter Normal Mode */ 408 - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); 409 - val &= ~ULPS_STATE_MASK; 410 - val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY); 411 - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); 422 + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), 423 + ULPS_STATE_MASK, 424 + ULPS_STATE_NORMAL_OPERATION | DEVICE_READY); 412 425 413 - val = intel_de_read(dev_priv, MIPI_CTRL(port)); 414 - val &= ~GLK_LP_WAKE; 415 - intel_de_write(dev_priv, MIPI_CTRL(port), val); 426 + intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_LP_WAKE, 0); 416 427 } 417 428 } 418 429 ··· 439 460 440 461 /* Enable MIPI PHY transparent latch */ 441 462 for_each_dsi_port(port, intel_dsi->ports) { 442 - val = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); 443 - intel_de_write(dev_priv, BXT_MIPI_PORT_CTRL(port), 444 - val | LP_OUTPUT_HOLD); 463 + intel_de_rmw(dev_priv, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD); 445 464 usleep_range(2000, 2500); 446 465 } 447 466 ··· 459 482 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 460 483 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 461 484 enum port port; 462 - u32 val; 463 485 464 486 drm_dbg_kms(&dev_priv->drm, "\n"); 465 487 ··· 481 505 * Common bit for both MIPI Port A & MIPI Port C 482 506 * No similar bit in MIPI Port C reg 483 507 */ 484 - val = intel_de_read(dev_priv, MIPI_PORT_CTRL(PORT_A)); 485 - intel_de_write(dev_priv, MIPI_PORT_CTRL(PORT_A), 486 - val | LP_OUTPUT_HOLD); 508 + intel_de_rmw(dev_priv, MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD); 487 509 usleep_range(1000, 1500); 488 510 489 511 intel_de_write(dev_priv, MIPI_DEVICE_READY(port), ··· 511 537 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 512 538 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 513 539 enum port port; 514 - u32 val; 515 540 516 541 /* Enter ULPS */ 517 - for_each_dsi_port(port, intel_dsi->ports) { 518 - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); 519 - val &= ~ULPS_STATE_MASK; 520 - val |= (ULPS_STATE_ENTER | DEVICE_READY); 521 - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); 522 - } 542 + for_each_dsi_port(port, intel_dsi->ports) 543 + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), 544 + ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY); 523 545 524 546 /* Wait for MIPI PHY status bit to unset */ 525 547 for_each_dsi_port(port, intel_dsi->ports) { ··· 538 568 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 539 569 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 540 570 enum port port; 541 - u32 tmp; 542 571 543 572 /* Put the IO into reset */ 544 - tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); 545 - tmp &= ~GLK_MIPIIO_RESET_RELEASED; 546 - intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp); 573 + intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0); 547 574 548 575 /* Wait for MIPI PHY status bit to unset */ 549 576 for_each_dsi_port(port, intel_dsi->ports) { ··· 550 583 } 551 584 552 585 /* Clear MIPI mode */ 553 - for_each_dsi_port(port, intel_dsi->ports) { 554 - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); 555 - tmp &= ~GLK_MIPIIO_ENABLE; 556 - intel_de_write(dev_priv, MIPI_CTRL(port), tmp); 557 - } 586 + for_each_dsi_port(port, intel_dsi->ports) 587 + intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_ENABLE, 0); 558 588 } 559 589 560 590 static void glk_dsi_clear_device_ready(struct intel_encoder *encoder) ··· 571 607 /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ 572 608 i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? 573 609 BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A); 574 - u32 val; 575 610 576 611 intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 577 612 DEVICE_READY | ULPS_STATE_ENTER); ··· 594 631 drm_err(&dev_priv->drm, "DSI LP not going Low\n"); 595 632 596 633 /* Disable MIPI PHY transparent latch */ 597 - val = intel_de_read(dev_priv, port_ctrl); 598 - intel_de_write(dev_priv, port_ctrl, val & ~LP_OUTPUT_HOLD); 634 + intel_de_rmw(dev_priv, port_ctrl, LP_OUTPUT_HOLD, 0); 599 635 usleep_range(1000, 1500); 600 636 601 637 intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00); ··· 611 649 enum port port; 612 650 613 651 if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { 614 - u32 temp; 652 + u32 temp = intel_dsi->pixel_overlap; 653 + 615 654 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 616 - for_each_dsi_port(port, intel_dsi->ports) { 617 - temp = intel_de_read(dev_priv, 618 - MIPI_CTRL(port)); 619 - temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK | 620 - intel_dsi->pixel_overlap << 621 - BXT_PIXEL_OVERLAP_CNT_SHIFT; 622 - intel_de_write(dev_priv, MIPI_CTRL(port), 623 - temp); 624 - } 655 + for_each_dsi_port(port, intel_dsi->ports) 656 + intel_de_rmw(dev_priv, MIPI_CTRL(port), 657 + BXT_PIXEL_OVERLAP_CNT_MASK, 658 + temp << BXT_PIXEL_OVERLAP_CNT_SHIFT); 625 659 } else { 626 - temp = intel_de_read(dev_priv, VLV_CHICKEN_3); 627 - temp &= ~PIXEL_OVERLAP_CNT_MASK | 628 - intel_dsi->pixel_overlap << 629 - PIXEL_OVERLAP_CNT_SHIFT; 630 - intel_de_write(dev_priv, VLV_CHICKEN_3, temp); 660 + intel_de_rmw(dev_priv, VLV_CHICKEN_3, 661 + PIXEL_OVERLAP_CNT_MASK, 662 + temp << PIXEL_OVERLAP_CNT_SHIFT); 631 663 } 632 664 } 633 665 ··· 665 709 for_each_dsi_port(port, intel_dsi->ports) { 666 710 i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? 667 711 BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); 668 - u32 temp; 669 712 670 713 /* de-assert ip_tg_enable signal */ 671 - temp = intel_de_read(dev_priv, port_ctrl); 672 - intel_de_write(dev_priv, port_ctrl, temp & ~DPI_ENABLE); 714 + intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0); 673 715 intel_de_posting_read(dev_priv, port_ctrl); 674 716 } 675 717 } ··· 741 787 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 742 788 enum pipe pipe = crtc->pipe; 743 789 enum port port; 744 - u32 val; 745 790 bool glk_cold_boot = false; 746 791 747 792 drm_dbg_kms(&dev_priv->drm, "\n"); ··· 763 810 764 811 if (IS_BROXTON(dev_priv)) { 765 812 /* Add MIPI IO reset programming for modeset */ 766 - val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); 767 - intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, 768 - val | MIPIO_RST_CTRL); 813 + intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL); 769 814 770 815 /* Power up DSI regulator */ 771 816 intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); ··· 771 820 } 772 821 773 822 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 774 - u32 val; 775 - 776 823 /* Disable DPOunit clock gating, can stall pipe */ 777 - val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); 778 - val |= DPOUNIT_CLOCK_GATE_DISABLE; 779 - intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); 824 + intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), 825 + 0, DPOUNIT_CLOCK_GATE_DISABLE); 780 826 } 781 827 782 828 if (!IS_GEMINILAKE(dev_priv)) ··· 897 949 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 898 950 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 899 951 enum port port; 900 - u32 val; 901 952 902 953 drm_dbg_kms(&dev_priv->drm, "\n"); 903 954 ··· 934 987 HS_IO_CTRL_SELECT); 935 988 936 989 /* Add MIPI IO reset programming for modeset */ 937 - val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); 938 - intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, 939 - val & ~MIPIO_RST_CTRL); 990 + intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0); 940 991 } 941 992 942 993 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 943 994 bxt_dsi_pll_disable(encoder); 944 995 } else { 945 - u32 val; 946 - 947 996 vlv_dsi_pll_disable(encoder); 948 997 949 - val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); 950 - val &= ~DPOUNIT_CLOCK_GATE_DISABLE; 951 - intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); 998 + intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), 999 + DPOUNIT_CLOCK_GATE_DISABLE, 0); 952 1000 } 953 1001 954 1002 /* Assert reset */ ··· 1000 1058 */ 1001 1059 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1002 1060 port == PORT_C) 1003 - enabled = intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1061 + enabled = intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE; 1004 1062 1005 1063 /* Try command mode if video mode not enabled */ 1006 1064 if (!enabled) { ··· 1374 1432 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 1375 1433 enum pipe pipe = crtc->pipe; 1376 1434 1377 - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); 1378 - tmp &= ~BXT_PIPE_SELECT_MASK; 1379 - 1380 - tmp |= BXT_PIPE_SELECT(pipe); 1381 - intel_de_write(dev_priv, MIPI_CTRL(port), tmp); 1435 + intel_de_rmw(dev_priv, MIPI_CTRL(port), 1436 + BXT_PIPE_SELECT_MASK, BXT_PIPE_SELECT(pipe)); 1382 1437 } 1383 1438 1384 1439 /* XXX: why here, why like this? handling in irq handler?! */ ··· 1544 1605 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1545 1606 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 1546 1607 enum port port; 1547 - u32 val; 1548 1608 1549 1609 if (IS_GEMINILAKE(dev_priv)) 1550 1610 return; ··· 1558 1620 vlv_dsi_reset_clocks(encoder, port); 1559 1621 intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP); 1560 1622 1561 - val = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)); 1562 - val &= ~VID_MODE_FORMAT_MASK; 1563 - intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val); 1623 + intel_de_rmw(dev_priv, MIPI_DSI_FUNC_PRG(port), VID_MODE_FORMAT_MASK, 0); 1564 1624 1565 1625 intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1); 1566 1626 }
+4 -14
drivers/gpu/drm/i915/display/vlv_dsi_pll.c
··· 302 302 void bxt_dsi_pll_disable(struct intel_encoder *encoder) 303 303 { 304 304 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 305 - u32 val; 306 305 307 306 drm_dbg_kms(&dev_priv->drm, "\n"); 308 307 309 - val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE); 310 - val &= ~BXT_DSI_PLL_DO_ENABLE; 311 - intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val); 308 + intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0); 312 309 313 310 /* 314 311 * PLL lock should deassert within 200us. ··· 539 542 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 540 543 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 541 544 enum port port; 542 - u32 val; 543 545 544 546 drm_dbg_kms(&dev_priv->drm, "\n"); 545 547 ··· 555 559 } 556 560 557 561 /* Enable DSI PLL */ 558 - val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE); 559 - val |= BXT_DSI_PLL_DO_ENABLE; 560 - intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val); 562 + intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE); 561 563 562 564 /* Timeout and fail if PLL not locked */ 563 565 if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE, ··· 583 589 tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port)); 584 590 intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp); 585 591 } else { 586 - tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV1); 587 - tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK; 588 - intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, tmp); 592 + intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0); 589 593 590 - tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV2); 591 - tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK; 592 - intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, tmp); 594 + intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0); 593 595 } 594 596 intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP); 595 597 }
-1
drivers/gpu/drm/i915/gt/intel_gt.c
··· 28 28 #include "intel_migrate.h" 29 29 #include "intel_mocs.h" 30 30 #include "intel_pci_config.h" 31 - #include "intel_pm.h" 32 31 #include "intel_rc6.h" 33 32 #include "intel_renderstate.h" 34 33 #include "intel_rps.h"
-1
drivers/gpu/drm/i915/gt/intel_gt_pm.c
··· 17 17 #include "intel_gt_print.h" 18 18 #include "intel_gt_requests.h" 19 19 #include "intel_llc.h" 20 - #include "intel_pm.h" 21 20 #include "intel_rc6.h" 22 21 #include "intel_rps.h" 23 22 #include "intel_wakeref.h"
-2
drivers/gpu/drm/i915/gt/intel_gt_regs.h
··· 9 9 #include "i915_reg_defs.h" 10 10 #include "display/intel_display_reg_defs.h" /* VLV_DISPLAY_BASE */ 11 11 12 - #define MCR_REG(offset) ((const i915_mcr_reg_t){ .reg = (offset) }) 13 - 14 12 /* 15 13 * The perf control registers are technically multicast registers, but the 16 14 * driver never needs to read/write them directly; we only use them to build
+1 -1
drivers/gpu/drm/i915/gt/intel_region_lmem.c
··· 158 158 static bool get_legacy_lowmem_region(struct intel_uncore *uncore, 159 159 u64 *start, u32 *size) 160 160 { 161 - if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0)) 161 + if (!IS_DG1(uncore->i915)) 162 162 return false; 163 163 164 164 *start = 0;
-29
drivers/gpu/drm/i915/gt/intel_rps.c
··· 1677 1677 static void vlv_rps_init(struct intel_rps *rps) 1678 1678 { 1679 1679 struct drm_i915_private *i915 = rps_to_i915(rps); 1680 - u32 val; 1681 1680 1682 1681 vlv_iosf_sb_get(i915, 1683 1682 BIT(VLV_IOSF_SB_PUNIT) | ··· 1684 1685 BIT(VLV_IOSF_SB_CCK)); 1685 1686 1686 1687 vlv_init_gpll_ref_freq(rps); 1687 - 1688 - val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 1689 - switch ((val >> 6) & 3) { 1690 - case 0: 1691 - case 1: 1692 - i915->mem_freq = 800; 1693 - break; 1694 - case 2: 1695 - i915->mem_freq = 1066; 1696 - break; 1697 - case 3: 1698 - i915->mem_freq = 1333; 1699 - break; 1700 - } 1701 - drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); 1702 1688 1703 1689 rps->max_freq = vlv_rps_max_freq(rps); 1704 1690 rps->rp0_freq = rps->max_freq; ··· 1711 1727 static void chv_rps_init(struct intel_rps *rps) 1712 1728 { 1713 1729 struct drm_i915_private *i915 = rps_to_i915(rps); 1714 - u32 val; 1715 1730 1716 1731 vlv_iosf_sb_get(i915, 1717 1732 BIT(VLV_IOSF_SB_PUNIT) | ··· 1718 1735 BIT(VLV_IOSF_SB_CCK)); 1719 1736 1720 1737 vlv_init_gpll_ref_freq(rps); 1721 - 1722 - val = vlv_cck_read(i915, CCK_FUSE_REG); 1723 - 1724 - switch ((val >> 2) & 0x7) { 1725 - case 3: 1726 - i915->mem_freq = 2000; 1727 - break; 1728 - default: 1729 - i915->mem_freq = 1600; 1730 - break; 1731 - } 1732 - drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); 1733 1738 1734 1739 rps->max_freq = chv_rps_max_freq(rps); 1735 1740 rps->rp0_freq = rps->max_freq;
+4 -82
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 1470 1470 } 1471 1471 1472 1472 static void 1473 - tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1474 - { 1475 - struct drm_i915_private *i915 = gt->i915; 1476 - 1477 - gen12_gt_workarounds_init(gt, wal); 1478 - 1479 - /* Wa_1409420604:tgl */ 1480 - if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) 1481 - wa_mcr_write_or(wal, 1482 - SUBSLICE_UNIT_LEVEL_CLKGATE2, 1483 - CPSSUNIT_CLKGATE_DIS); 1484 - 1485 - /* Wa_1607087056:tgl also know as BUG:1409180338 */ 1486 - if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) 1487 - wa_write_or(wal, 1488 - GEN11_SLICE_UNIT_LEVEL_CLKGATE, 1489 - L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); 1490 - 1491 - /* Wa_1408615072:tgl[a0] */ 1492 - if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) 1493 - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, 1494 - VSUNIT_CLKGATE_DIS_TGL); 1495 - } 1496 - 1497 - static void 1498 1473 dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1499 1474 { 1500 1475 struct drm_i915_private *i915 = gt->i915; 1501 1476 1502 1477 gen12_gt_workarounds_init(gt, wal); 1503 - 1504 - /* Wa_1607087056:dg1 */ 1505 - if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) 1506 - wa_write_or(wal, 1507 - GEN11_SLICE_UNIT_LEVEL_CLKGATE, 1508 - L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); 1509 1478 1510 1479 /* Wa_1409420604:dg1 */ 1511 1480 if (IS_DG1(i915)) ··· 1748 1779 xehpsdv_gt_workarounds_init(gt, wal); 1749 1780 else if (IS_DG1(i915)) 1750 1781 dg1_gt_workarounds_init(gt, wal); 1751 - else if (IS_TIGERLAKE(i915)) 1752 - tgl_gt_workarounds_init(gt, wal); 1753 1782 else if (GRAPHICS_VER(i915) == 12) 1754 1783 gen12_gt_workarounds_init(gt, wal); 1755 1784 else if (GRAPHICS_VER(i915) == 11) ··· 2160 2193 } 2161 2194 } 2162 2195 2163 - static void dg1_whitelist_build(struct intel_engine_cs *engine) 2164 - { 2165 - struct i915_wa_list *w = &engine->whitelist; 2166 - 2167 - tgl_whitelist_build(engine); 2168 - 2169 - /* GEN:BUG:1409280441:dg1 */ 2170 - if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) && 2171 - (engine->class == RENDER_CLASS || 2172 - engine->class == COPY_ENGINE_CLASS)) 2173 - whitelist_reg_ext(w, RING_ID(engine->mmio_base), 2174 - RING_FORCE_TO_NONPRIV_ACCESS_RD); 2175 - } 2176 - 2177 2196 static void xehpsdv_whitelist_build(struct intel_engine_cs *engine) 2178 2197 { 2179 2198 allow_read_ctx_timestamp(engine); ··· 2239 2286 dg2_whitelist_build(engine); 2240 2287 else if (IS_XEHPSDV(i915)) 2241 2288 xehpsdv_whitelist_build(engine); 2242 - else if (IS_DG1(i915)) 2243 - dg1_whitelist_build(engine); 2244 2289 else if (GRAPHICS_VER(i915) == 12) 2245 2290 tgl_whitelist_build(engine); 2246 2291 else if (GRAPHICS_VER(i915) == 11) ··· 2433 2482 true); 2434 2483 } 2435 2484 2436 - if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || 2437 - IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { 2438 - /* 2439 - * Wa_1607138336:tgl[a0],dg1[a0] 2440 - * Wa_1607063988:tgl[a0],dg1[a0] 2441 - */ 2442 - wa_write_or(wal, 2443 - GEN9_CTX_PREEMPT_REG, 2444 - GEN12_DISABLE_POSH_BUSY_FF_DOP_CG); 2445 - } 2446 - 2447 - if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { 2448 - /* 2449 - * Wa_1606679103:tgl 2450 - * (see also Wa_1606682166:icl) 2451 - */ 2452 - wa_write_or(wal, 2453 - GEN7_SARCHKMD, 2454 - GEN7_DISABLE_SAMPLER_PREFETCH); 2455 - } 2456 - 2457 2485 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) || 2458 2486 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { 2459 2487 /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */ ··· 2462 2532 } 2463 2533 2464 2534 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || 2465 - IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || 2466 2535 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { 2467 - /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */ 2536 + /* Wa_1409804808 */ 2468 2537 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, 2469 2538 GEN12_PUSH_CONST_DEREF_HOLD_DIS); 2470 2539 2471 - /* 2472 - * Wa_1409085225:tgl 2473 - * Wa_14010229206:tgl,rkl,dg1[a0],adl-s,adl-p 2474 - */ 2540 + /* Wa_14010229206 */ 2475 2541 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); 2476 2542 } 2477 2543 2478 - if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || 2479 - IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) { 2544 + if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) { 2480 2545 /* 2481 - * Wa_1607030317:tgl 2482 - * Wa_1607186500:tgl 2483 - * Wa_1607297627:tgl,rkl,dg1[a0],adlp 2546 + * Wa_1607297627 2484 2547 * 2485 2548 * On TGL and RKL there are multiple entries for this WA in the 2486 2549 * BSpec; some indicate this is an A0-only WA, others indicate 2487 2550 * it applies to all steppings so we trust the "all steppings." 2488 - * For DG1 this only applies to A0. 2489 2551 */ 2490 2552 wa_masked_en(wal, 2491 2553 RING_PSMI_CTL(RENDER_RING_BASE),
-1
drivers/gpu/drm/i915/gt/selftest_llc.c
··· 3 3 * Copyright © 2019 Intel Corporation 4 4 */ 5 5 6 - #include "intel_pm.h" /* intel_gpu_freq() */ 7 6 #include "selftest_llc.h" 8 7 #include "intel_rps.h" 9 8
+8 -8
drivers/gpu/drm/i915/gvt/display.c
··· 63 63 { 64 64 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; 65 65 66 - if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE)) 66 + if (!(vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_EDP)) & TRANSCONF_ENABLE)) 67 67 return 0; 68 68 69 69 if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE)) ··· 79 79 pipe < PIPE_A || pipe >= I915_MAX_PIPES)) 80 80 return -EINVAL; 81 81 82 - if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE) 82 + if (vgpu_vreg_t(vgpu, TRANSCONF(pipe)) & TRANSCONF_ENABLE) 83 83 return 1; 84 84 85 85 if (edp_pipe_is_enabled(vgpu) && ··· 187 187 GEN8_DE_PORT_HOTPLUG(HPD_PORT_C)); 188 188 189 189 for_each_pipe(dev_priv, pipe) { 190 - vgpu_vreg_t(vgpu, PIPECONF(pipe)) &= 191 - ~(PIPECONF_ENABLE | PIPECONF_STATE_ENABLE); 190 + vgpu_vreg_t(vgpu, TRANSCONF(pipe)) &= 191 + ~(TRANSCONF_ENABLE | TRANSCONF_STATE_ENABLE); 192 192 vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE; 193 193 vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; 194 194 vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK; ··· 248 248 * TRANSCODER_A can be enabled. PORT_x depends on the input of 249 249 * setup_virtual_dp_monitor. 250 250 */ 251 - vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; 252 - vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_STATE_ENABLE; 251 + vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE; 252 + vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE; 253 253 254 254 /* 255 255 * Golden M/N are calculated based on: ··· 506 506 vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; 507 507 } 508 508 509 - vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; 509 + vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE; 510 510 } 511 511 512 512 static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) ··· 584 584 * @turnon: Turn ON/OFF vblank_timer 585 585 * 586 586 * This function is used to turn on/off or update the per-vGPU vblank_timer 587 - * when PIPECONF is enabled or disabled. vblank_timer period is also updated 587 + * when TRANSCONF is enabled or disabled. vblank_timer period is also updated 588 588 * if guest changed the refresh rate. 589 589 * 590 590 */
+9 -9
drivers/gpu/drm/i915/gvt/handlers.c
··· 666 666 link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)); 667 667 668 668 /* Get H/V total from transcoder timing */ 669 - htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT); 670 - vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT); 669 + htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT); 670 + vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT); 671 671 672 672 if (dp_br && link_n && htotal && vtotal) { 673 673 u64 pixel_clk = 0; ··· 697 697 write_vreg(vgpu, offset, p_data, bytes); 698 698 data = vgpu_vreg(vgpu, offset); 699 699 700 - if (data & PIPECONF_ENABLE) { 701 - vgpu_vreg(vgpu, offset) |= PIPECONF_STATE_ENABLE; 700 + if (data & TRANSCONF_ENABLE) { 701 + vgpu_vreg(vgpu, offset) |= TRANSCONF_STATE_ENABLE; 702 702 vgpu_update_refresh_rate(vgpu); 703 703 vgpu_update_vblank_emulation(vgpu, true); 704 704 } else { 705 - vgpu_vreg(vgpu, offset) &= ~PIPECONF_STATE_ENABLE; 705 + vgpu_vreg(vgpu, offset) &= ~TRANSCONF_STATE_ENABLE; 706 706 vgpu_update_vblank_emulation(vgpu, false); 707 707 } 708 708 return 0; ··· 2262 2262 MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2263 2263 2264 2264 /* display */ 2265 - MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write); 2266 - MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write); 2267 - MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write); 2268 - MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write); 2265 + MMIO_DH(TRANSCONF(TRANSCODER_A), D_ALL, NULL, pipeconf_mmio_write); 2266 + MMIO_DH(TRANSCONF(TRANSCODER_B), D_ALL, NULL, pipeconf_mmio_write); 2267 + MMIO_DH(TRANSCONF(TRANSCODER_C), D_ALL, NULL, pipeconf_mmio_write); 2268 + MMIO_DH(TRANSCONF(TRANSCODER_EDP), D_ALL, NULL, pipeconf_mmio_write); 2269 2269 MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); 2270 2270 MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL, 2271 2271 reg50080_mmio_write);
-1
drivers/gpu/drm/i915/i915_debugfs.c
··· 52 52 #include "i915_irq.h" 53 53 #include "i915_scheduler.h" 54 54 #include "intel_mchbar_regs.h" 55 - #include "intel_pm.h" 56 55 57 56 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 58 57 {
+26 -16
drivers/gpu/drm/i915/i915_driver.c
··· 167 167 pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1; 168 168 pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3; 169 169 pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7; 170 + pre |= IS_TIGERLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1; 171 + pre |= IS_DG1(dev_priv) && INTEL_REVID(dev_priv) < 0x1; 170 172 171 173 if (pre) { 172 174 drm_err(&dev_priv->drm, "This is a pre-production stepping. " ··· 250 248 /* This must be called before any calls to HAS_PCH_* */ 251 249 intel_detect_pch(dev_priv); 252 250 253 - intel_pm_setup(dev_priv); 254 - ret = intel_power_domains_init(dev_priv); 255 - if (ret < 0) 256 - goto err_gem; 257 251 intel_irq_init(dev_priv); 258 252 intel_init_display_hooks(dev_priv); 259 253 intel_init_clock_gating_hooks(dev_priv); ··· 258 260 259 261 return 0; 260 262 261 - err_gem: 262 - i915_gem_cleanup_early(dev_priv); 263 - intel_gt_driver_late_release_all(dev_priv); 264 - i915_drm_clients_fini(&dev_priv->clients); 265 263 err_rootgt: 266 264 intel_region_ttm_device_fini(dev_priv); 267 265 err_ttm: ··· 930 936 */ 931 937 static void i915_driver_lastclose(struct drm_device *dev) 932 938 { 933 - intel_fbdev_restore_mode(dev); 939 + struct drm_i915_private *i915 = to_i915(dev); 940 + 941 + intel_fbdev_restore_mode(i915); 934 942 935 943 vga_switcheroo_process_delayed_switch(); 936 944 } ··· 998 1002 intel_suspend_encoders(i915); 999 1003 intel_shutdown_encoders(i915); 1000 1004 1001 - intel_dmc_ucode_suspend(i915); 1005 + intel_dmc_suspend(i915); 1002 1006 1003 1007 i915_gem_suspend(i915); 1004 1008 ··· 1026 1030 return true; 1027 1031 #endif 1028 1032 return false; 1033 + } 1034 + 1035 + static void i915_drm_complete(struct drm_device *dev) 1036 + { 1037 + struct drm_i915_private *i915 = to_i915(dev); 1038 + 1039 + intel_pxp_resume_complete(i915->pxp); 1029 1040 } 1030 1041 1031 1042 static int i915_drm_prepare(struct drm_device *dev) ··· 1075 1072 1076 1073 intel_suspend_encoders(dev_priv); 1077 1074 1078 - intel_suspend_hw(dev_priv); 1079 - 1080 1075 /* Must be called before GGTT is suspended. */ 1081 1076 intel_dpt_suspend(dev_priv); 1082 1077 i915_ggtt_suspend(to_gt(dev_priv)->ggtt); ··· 1088 1087 1089 1088 dev_priv->suspend_count++; 1090 1089 1091 - intel_dmc_ucode_suspend(dev_priv); 1090 + intel_dmc_suspend(dev_priv); 1092 1091 1093 1092 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1094 1093 ··· 1209 1208 /* Must be called after GGTT is resumed. */ 1210 1209 intel_dpt_resume(dev_priv); 1211 1210 1212 - intel_dmc_ucode_resume(dev_priv); 1211 + intel_dmc_resume(dev_priv); 1213 1212 1214 1213 i915_restore_display(dev_priv); 1215 1214 intel_pps_unlock_regs_wa(dev_priv); ··· 1232 1231 drm_mode_config_reset(dev); 1233 1232 1234 1233 i915_gem_resume(dev_priv); 1235 - 1236 - intel_pxp_resume(dev_priv->pxp); 1237 1234 1238 1235 intel_modeset_init_hw(dev_priv); 1239 1236 intel_init_clock_gating(dev_priv); ··· 1422 1423 return 0; 1423 1424 1424 1425 return i915_drm_resume(&i915->drm); 1426 + } 1427 + 1428 + static void i915_pm_complete(struct device *kdev) 1429 + { 1430 + struct drm_i915_private *i915 = kdev_to_i915(kdev); 1431 + 1432 + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) 1433 + return; 1434 + 1435 + i915_drm_complete(&i915->drm); 1425 1436 } 1426 1437 1427 1438 /* freeze: before creating the hibernation_image */ ··· 1654 1645 .suspend_late = i915_pm_suspend_late, 1655 1646 .resume_early = i915_pm_resume_early, 1656 1647 .resume = i915_pm_resume, 1648 + .complete = i915_pm_complete, 1657 1649 1658 1650 /* 1659 1651 * S4 event handlers
+4 -14
drivers/gpu/drm/i915/i915_drv.h
··· 580 580 IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N) 581 581 #define IS_ADLP_RPLP(dev_priv) \ 582 582 IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL) 583 + #define IS_ADLP_RPLU(dev_priv) \ 584 + IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU) 583 585 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ 584 586 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) 585 587 #define IS_BDW_ULT(dev_priv) \ ··· 655 653 (IS_TIGERLAKE(__i915) && \ 656 654 IS_DISPLAY_STEP(__i915, since, until)) 657 655 658 - #define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \ 659 - (IS_TGL_UY(__i915) && \ 660 - IS_GRAPHICS_STEP(__i915, since, until)) 661 - 662 - #define IS_TGL_GRAPHICS_STEP(__i915, since, until) \ 663 - (IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \ 664 - IS_GRAPHICS_STEP(__i915, since, until)) 665 - 666 656 #define IS_RKL_DISPLAY_STEP(p, since, until) \ 667 657 (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until)) 668 - 669 - #define IS_DG1_GRAPHICS_STEP(p, since, until) \ 670 - (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until)) 671 - #define IS_DG1_DISPLAY_STEP(p, since, until) \ 672 - (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until)) 673 658 674 659 #define IS_ADLS_DISPLAY_STEP(__i915, since, until) \ 675 660 (IS_ALDERLAKE_S(__i915) && \ ··· 865 876 */ 866 877 #define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages) 867 878 868 - #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) 879 + #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) 880 + #define HAS_SAGV(dev_priv) (DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv)) 869 881 870 882 #define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i)) 871 883 #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
+26 -25
drivers/gpu/drm/i915/i915_hwmon.c
··· 99 99 return mul_u64_u32_shr(reg_value, scale_factor, nshift); 100 100 } 101 101 102 - static void 103 - hwm_field_scale_and_write(struct hwm_drvdata *ddat, i915_reg_t rgadr, 104 - int nshift, unsigned int scale_factor, long lval) 105 - { 106 - u32 nval; 107 - 108 - /* Computation in 64-bits to avoid overflow. Round to nearest. */ 109 - nval = DIV_ROUND_CLOSEST_ULL((u64)lval << nshift, scale_factor); 110 - 111 - hwm_locked_with_pm_intel_uncore_rmw(ddat, rgadr, 112 - PKG_PWR_LIM_1, 113 - REG_FIELD_PREP(PKG_PWR_LIM_1, nval)); 114 - } 115 - 116 102 /* 117 103 * hwm_energy - Obtain energy value 118 104 * ··· 218 232 /* val in hw units */ 219 233 val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME); 220 234 /* Convert to 1.x * power(2,y) */ 221 - if (!val) 222 - return -EINVAL; 223 - y = ilog2(val); 224 - /* x = (val - (1 << y)) >> (y - 2); */ 225 - x = (val - (1ul << y)) << x_w >> y; 235 + if (!val) { 236 + /* Avoid ilog2(0) */ 237 + y = 0; 238 + x = 0; 239 + } else { 240 + y = ilog2(val); 241 + /* x = (val - (1 << y)) >> (y - 2); */ 242 + x = (val - (1ul << y)) << x_w >> y; 243 + } 226 244 227 245 rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y); 228 246 ··· 382 392 } 383 393 384 394 static int 395 + hwm_power_max_write(struct hwm_drvdata *ddat, long val) 396 + { 397 + struct i915_hwmon *hwmon = ddat->hwmon; 398 + u32 nval; 399 + 400 + /* Computation in 64-bits to avoid overflow. Round to nearest. */ 401 + nval = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_power, SF_POWER); 402 + nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval); 403 + 404 + hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit, 405 + PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, 406 + nval); 407 + return 0; 408 + } 409 + 410 + static int 385 411 hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val) 386 412 { 387 413 struct i915_hwmon *hwmon = ddat->hwmon; ··· 431 425 static int 432 426 hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val) 433 427 { 434 - struct i915_hwmon *hwmon = ddat->hwmon; 435 428 u32 uval; 436 429 437 430 switch (attr) { 438 431 case hwmon_power_max: 439 - hwm_field_scale_and_write(ddat, 440 - hwmon->rg.pkg_rapl_limit, 441 - hwmon->scl_shift_power, 442 - SF_POWER, val); 443 - return 0; 432 + return hwm_power_max_write(ddat, val); 444 433 case hwmon_power_crit: 445 434 uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER); 446 435 return hwm_pcode_write_i1(ddat->uncore->i915, uval);
+82 -60
drivers/gpu/drm/i915/i915_irq.c
··· 52 52 #include "i915_driver.h" 53 53 #include "i915_drv.h" 54 54 #include "i915_irq.h" 55 - #include "intel_pm.h" 56 55 57 56 /** 58 57 * DOC: interrupt handling ··· 80 81 } 81 82 82 83 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); 83 - typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915, 84 - enum hpd_pin pin); 84 + typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder); 85 85 86 86 static const u32 hpd_ilk[HPD_NUM_PINS] = { 87 87 [HPD_PORT_A] = DE_DP_A_HOTPLUG, ··· 197 199 hpd->hpd = hpd_gen11; 198 200 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 199 201 hpd->hpd = hpd_bxt; 202 + else if (DISPLAY_VER(dev_priv) == 9) 203 + hpd->hpd = NULL; /* no north HPD on SKL */ 200 204 else if (DISPLAY_VER(dev_priv) >= 8) 201 205 hpd->hpd = hpd_bdw; 202 206 else if (DISPLAY_VER(dev_priv) >= 7) ··· 884 884 u32 hotplug = 0; 885 885 886 886 for_each_intel_encoder(&i915->drm, encoder) 887 - hotplug |= hotplug_enables(i915, encoder->hpd_pin); 887 + hotplug |= hotplug_enables(encoder); 888 888 889 889 return hotplug; 890 890 } ··· 2835 2835 spin_unlock_irq(&dev_priv->irq_lock); 2836 2836 } 2837 2837 2838 - static u32 ibx_hotplug_enables(struct drm_i915_private *i915, 2839 - enum hpd_pin pin) 2838 + static u32 ibx_hotplug_enables(struct intel_encoder *encoder) 2840 2839 { 2841 - switch (pin) { 2840 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2841 + 2842 + switch (encoder->hpd_pin) { 2842 2843 case HPD_PORT_A: 2843 2844 /* 2844 2845 * When CPU and PCH are on the same package, port A ··· 2891 2890 ibx_hpd_detection_setup(dev_priv); 2892 2891 } 2893 2892 2894 - static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915, 2895 - enum hpd_pin pin) 2893 + static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder) 2896 2894 { 2897 - switch (pin) { 2895 + switch (encoder->hpd_pin) { 2898 2896 case HPD_PORT_A: 2899 2897 case HPD_PORT_B: 2900 2898 case HPD_PORT_C: 2901 2899 case HPD_PORT_D: 2902 - return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin); 2900 + return SHOTPLUG_CTL_DDI_HPD_ENABLE(encoder->hpd_pin); 2903 2901 default: 2904 2902 return 0; 2905 2903 } 2906 2904 } 2907 2905 2908 - static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915, 2909 - enum hpd_pin pin) 2906 + static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder) 2910 2907 { 2911 - switch (pin) { 2908 + switch (encoder->hpd_pin) { 2912 2909 case HPD_PORT_TC1: 2913 2910 case HPD_PORT_TC2: 2914 2911 case HPD_PORT_TC3: 2915 2912 case HPD_PORT_TC4: 2916 2913 case HPD_PORT_TC5: 2917 2914 case HPD_PORT_TC6: 2918 - return ICP_TC_HPD_ENABLE(pin); 2915 + return ICP_TC_HPD_ENABLE(encoder->hpd_pin); 2919 2916 default: 2920 2917 return 0; 2921 2918 } ··· 2957 2958 icp_tc_hpd_detection_setup(dev_priv); 2958 2959 } 2959 2960 2960 - static u32 gen11_hotplug_enables(struct drm_i915_private *i915, 2961 - enum hpd_pin pin) 2961 + static u32 gen11_hotplug_enables(struct intel_encoder *encoder) 2962 2962 { 2963 - switch (pin) { 2963 + switch (encoder->hpd_pin) { 2964 2964 case HPD_PORT_TC1: 2965 2965 case HPD_PORT_TC2: 2966 2966 case HPD_PORT_TC3: 2967 2967 case HPD_PORT_TC4: 2968 2968 case HPD_PORT_TC5: 2969 2969 case HPD_PORT_TC6: 2970 - return GEN11_HOTPLUG_CTL_ENABLE(pin); 2970 + return GEN11_HOTPLUG_CTL_ENABLE(encoder->hpd_pin); 2971 2971 default: 2972 2972 return 0; 2973 2973 } ··· 3029 3031 icp_hpd_irq_setup(dev_priv); 3030 3032 } 3031 3033 3032 - static u32 spt_hotplug_enables(struct drm_i915_private *i915, 3033 - enum hpd_pin pin) 3034 + static u32 spt_hotplug_enables(struct intel_encoder *encoder) 3034 3035 { 3035 - switch (pin) { 3036 + switch (encoder->hpd_pin) { 3036 3037 case HPD_PORT_A: 3037 3038 return PORTA_HOTPLUG_ENABLE; 3038 3039 case HPD_PORT_B: ··· 3045 3048 } 3046 3049 } 3047 3050 3048 - static u32 spt_hotplug2_enables(struct drm_i915_private *i915, 3049 - enum hpd_pin pin) 3051 + static u32 spt_hotplug2_enables(struct intel_encoder *encoder) 3050 3052 { 3051 - switch (pin) { 3053 + switch (encoder->hpd_pin) { 3052 3054 case HPD_PORT_E: 3053 3055 return PORTE_HOTPLUG_ENABLE; 3054 3056 default: ··· 3090 3094 spt_hpd_detection_setup(dev_priv); 3091 3095 } 3092 3096 3093 - static u32 ilk_hotplug_enables(struct drm_i915_private *i915, 3094 - enum hpd_pin pin) 3097 + static u32 ilk_hotplug_enables(struct intel_encoder *encoder) 3095 3098 { 3096 - switch (pin) { 3099 + switch (encoder->hpd_pin) { 3097 3100 case HPD_PORT_A: 3098 3101 return DIGITAL_PORTA_HOTPLUG_ENABLE | 3099 3102 DIGITAL_PORTA_PULSE_DURATION_2ms; ··· 3130 3135 ibx_hpd_irq_setup(dev_priv); 3131 3136 } 3132 3137 3133 - static u32 bxt_hotplug_enables(struct drm_i915_private *i915, 3134 - enum hpd_pin pin) 3138 + static u32 bxt_hotplug_enables(struct intel_encoder *encoder) 3135 3139 { 3136 3140 u32 hotplug; 3137 3141 3138 - switch (pin) { 3142 + switch (encoder->hpd_pin) { 3139 3143 case HPD_PORT_A: 3140 3144 hotplug = PORTA_HOTPLUG_ENABLE; 3141 - if (intel_bios_is_port_hpd_inverted(i915, PORT_A)) 3145 + if (intel_bios_encoder_hpd_invert(encoder->devdata)) 3142 3146 hotplug |= BXT_DDIA_HPD_INVERT; 3143 3147 return hotplug; 3144 3148 case HPD_PORT_B: 3145 3149 hotplug = PORTB_HOTPLUG_ENABLE; 3146 - if (intel_bios_is_port_hpd_inverted(i915, PORT_B)) 3150 + if (intel_bios_encoder_hpd_invert(encoder->devdata)) 3147 3151 hotplug |= BXT_DDIB_HPD_INVERT; 3148 3152 return hotplug; 3149 3153 case HPD_PORT_C: 3150 3154 hotplug = PORTC_HOTPLUG_ENABLE; 3151 - if (intel_bios_is_port_hpd_inverted(i915, PORT_C)) 3155 + if (intel_bios_encoder_hpd_invert(encoder->devdata)) 3152 3156 hotplug |= BXT_DDIC_HPD_INVERT; 3153 3157 return hotplug; 3154 3158 default: ··· 3465 3471 dev_priv->irq_mask = ~0u; 3466 3472 } 3467 3473 3474 + static u32 i9xx_error_mask(struct drm_i915_private *i915) 3475 + { 3476 + /* 3477 + * On gen2/3 FBC generates (seemingly spurious) 3478 + * display INVALID_GTT/INVALID_GTT_PTE table errors. 3479 + * 3480 + * Also gen3 bspec has this to say: 3481 + * "DISPA_INVALID_GTT_PTE 3482 + " [DevNapa] : Reserved. This bit does not reflect the page 3483 + " table error for the display plane A." 3484 + * 3485 + * Unfortunately we can't mask off individual PGTBL_ER bits, 3486 + * so we just have to mask off all page table errors via EMR. 3487 + */ 3488 + if (HAS_FBC(i915)) 3489 + return ~I915_ERROR_MEMORY_REFRESH; 3490 + else 3491 + return ~(I915_ERROR_PAGE_TABLE | 3492 + I915_ERROR_MEMORY_REFRESH); 3493 + } 3494 + 3468 3495 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) 3469 3496 { 3470 3497 struct intel_uncore *uncore = &dev_priv->uncore; 3471 3498 u16 enable_mask; 3472 3499 3473 - intel_uncore_write16(uncore, 3474 - EMR, 3475 - ~(I915_ERROR_PAGE_TABLE | 3476 - I915_ERROR_MEMORY_REFRESH)); 3500 + intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv)); 3477 3501 3478 3502 /* Unmask the interrupts that we always want on. */ 3479 3503 dev_priv->irq_mask = ··· 3522 3510 u16 emr; 3523 3511 3524 3512 *eir = intel_uncore_read16(uncore, EIR); 3525 - 3526 - if (*eir) 3527 - intel_uncore_write16(uncore, EIR, *eir); 3513 + intel_uncore_write16(uncore, EIR, *eir); 3528 3514 3529 3515 *eir_stuck = intel_uncore_read16(uncore, EIR); 3530 3516 if (*eir_stuck == 0) ··· 3551 3541 if (eir_stuck) 3552 3542 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", 3553 3543 eir_stuck); 3544 + 3545 + drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", 3546 + intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); 3554 3547 } 3555 3548 3556 3549 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, ··· 3561 3548 { 3562 3549 u32 emr; 3563 3550 3564 - *eir = intel_uncore_rmw(&dev_priv->uncore, EIR, 0, 0); 3551 + *eir = intel_uncore_read(&dev_priv->uncore, EIR); 3552 + intel_uncore_write(&dev_priv->uncore, EIR, *eir); 3565 3553 3566 3554 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR); 3567 3555 if (*eir_stuck == 0) ··· 3578 3564 * (or by a GPU reset) so we mask any bit that 3579 3565 * remains set. 3580 3566 */ 3581 - emr = intel_uncore_rmw(&dev_priv->uncore, EMR, ~0, 0xffffffff); 3567 + emr = intel_uncore_read(&dev_priv->uncore, EMR); 3568 + intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff); 3582 3569 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck); 3583 3570 } 3584 3571 ··· 3591 3576 if (eir_stuck) 3592 3577 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", 3593 3578 eir_stuck); 3579 + 3580 + drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", 3581 + intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); 3594 3582 } 3595 3583 3596 3584 static irqreturn_t i8xx_irq_handler(int irq, void *arg) ··· 3663 3645 struct intel_uncore *uncore = &dev_priv->uncore; 3664 3646 u32 enable_mask; 3665 3647 3666 - intel_uncore_write(uncore, EMR, ~(I915_ERROR_PAGE_TABLE | 3667 - I915_ERROR_MEMORY_REFRESH)); 3648 + intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv)); 3668 3649 3669 3650 /* Unmask the interrupts that we always want on. */ 3670 3651 dev_priv->irq_mask = ··· 3766 3749 dev_priv->irq_mask = ~0u; 3767 3750 } 3768 3751 3752 + static u32 i965_error_mask(struct drm_i915_private *i915) 3753 + { 3754 + /* 3755 + * Enable some error detection, note the instruction error mask 3756 + * bit is reserved, so we leave it masked. 3757 + * 3758 + * i965 FBC no longer generates spurious GTT errors, 3759 + * so we can always enable the page table errors. 3760 + */ 3761 + if (IS_G4X(i915)) 3762 + return ~(GM45_ERROR_PAGE_TABLE | 3763 + GM45_ERROR_MEM_PRIV | 3764 + GM45_ERROR_CP_PRIV | 3765 + I915_ERROR_MEMORY_REFRESH); 3766 + else 3767 + return ~(I915_ERROR_PAGE_TABLE | 3768 + I915_ERROR_MEMORY_REFRESH); 3769 + } 3770 + 3769 3771 static void i965_irq_postinstall(struct drm_i915_private *dev_priv) 3770 3772 { 3771 3773 struct intel_uncore *uncore = &dev_priv->uncore; 3772 3774 u32 enable_mask; 3773 - u32 error_mask; 3774 3775 3775 - /* 3776 - * Enable some error detection, note the instruction error mask 3777 - * bit is reserved, so we leave it masked. 3778 - */ 3779 - if (IS_G4X(dev_priv)) { 3780 - error_mask = ~(GM45_ERROR_PAGE_TABLE | 3781 - GM45_ERROR_MEM_PRIV | 3782 - GM45_ERROR_CP_PRIV | 3783 - I915_ERROR_MEMORY_REFRESH); 3784 - } else { 3785 - error_mask = ~(I915_ERROR_PAGE_TABLE | 3786 - I915_ERROR_MEMORY_REFRESH); 3787 - } 3788 - intel_uncore_write(uncore, EMR, error_mask); 3776 + intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv)); 3789 3777 3790 3778 /* Unmask the interrupts that we always want on. */ 3791 3779 dev_priv->irq_mask =
-1
drivers/gpu/drm/i915/i915_pmu.c
··· 17 17 18 18 #include "i915_drv.h" 19 19 #include "i915_pmu.h" 20 - #include "intel_pm.h" 21 20 22 21 /* Frequency for the sampling timer for events which need it. */ 23 22 #define FREQUENCY 200
+229 -668
drivers/gpu/drm/i915/i915_reg.h
··· 116 116 * #define GEN8_BAR _MMIO(0xb888) 117 117 */ 118 118 119 + #define GU_CNTL_PROTECTED _MMIO(0x10100C) 120 + #define DEPRESENT REG_BIT(9) 121 + 119 122 #define GU_CNTL _MMIO(0x101010) 120 123 #define LMEM_INIT REG_BIT(7) 121 124 #define DRIVERFLR REG_BIT(31) ··· 544 541 #define _BXT_PHY0_BASE 0x6C000 545 542 #define _BXT_PHY1_BASE 0x162000 546 543 #define _BXT_PHY2_BASE 0x163000 547 - #define BXT_PHY_BASE(phy) _PHY3((phy), _BXT_PHY0_BASE, \ 548 - _BXT_PHY1_BASE, \ 549 - _BXT_PHY2_BASE) 544 + #define BXT_PHY_BASE(phy) \ 545 + _PICK_EVEN_2RANGES(phy, 1, \ 546 + _BXT_PHY0_BASE, _BXT_PHY0_BASE, \ 547 + _BXT_PHY1_BASE, _BXT_PHY2_BASE) 550 548 551 549 #define _BXT_PHY(phy, reg) \ 552 550 _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg)) ··· 570 566 #define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \ 571 567 _BXT_PHY_CTL_DDI_B) 572 568 573 - #define _PHY_CTL_FAMILY_EDP 0x64C80 574 569 #define _PHY_CTL_FAMILY_DDI 0x64C90 570 + #define _PHY_CTL_FAMILY_EDP 0x64C80 575 571 #define _PHY_CTL_FAMILY_DDI_C 0x64CA0 576 572 #define COMMON_RESET_DIS (1 << 31) 577 - #define BXT_PHY_CTL_FAMILY(phy) _MMIO_PHY3((phy), _PHY_CTL_FAMILY_DDI, \ 578 - _PHY_CTL_FAMILY_EDP, \ 579 - _PHY_CTL_FAMILY_DDI_C) 573 + #define BXT_PHY_CTL_FAMILY(phy) \ 574 + _MMIO(_PICK_EVEN_2RANGES(phy, 1, \ 575 + _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \ 576 + _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C)) 580 577 581 578 /* BXT PHY PLL registers */ 582 579 #define _PORT_PLL_A 0x46074 ··· 1043 1038 #define _MBUS_ABOX0_CTL 0x45038 1044 1039 #define _MBUS_ABOX1_CTL 0x45048 1045 1040 #define _MBUS_ABOX2_CTL 0x4504C 1046 - #define MBUS_ABOX_CTL(x) _MMIO(_PICK(x, _MBUS_ABOX0_CTL, \ 1047 - _MBUS_ABOX1_CTL, \ 1048 - _MBUS_ABOX2_CTL)) 1041 + #define MBUS_ABOX_CTL(x) \ 1042 + _MMIO(_PICK_EVEN_2RANGES(x, 2, \ 1043 + _MBUS_ABOX0_CTL, _MBUS_ABOX1_CTL, \ 1044 + _MBUS_ABOX2_CTL, _MBUS_ABOX2_CTL)) 1045 + 1049 1046 #define MBUS_ABOX_BW_CREDIT_MASK (3 << 20) 1050 1047 #define MBUS_ABOX_BW_CREDIT(x) ((x) << 20) 1051 1048 #define MBUS_ABOX_B_CREDIT_MASK (0xF << 16) ··· 1737 1730 #define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6) 1738 1731 #define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2) 1739 1732 #define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0) 1740 - #define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \ 1741 - _PICK((pipe), _PALETTE_A, \ 1742 - _PALETTE_B, _CHV_PALETTE_C) + \ 1743 - (i) * 4) 1733 + #define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \ 1734 + _PICK_EVEN_2RANGES(pipe, 2, \ 1735 + _PALETTE_A, _PALETTE_B, \ 1736 + _CHV_PALETTE_C, _CHV_PALETTE_C) + \ 1737 + (i) * 4) 1744 1738 1745 1739 #define PEG_BAND_GAP_DATA _MMIO(0x14d68) 1746 1740 ··· 1914 1906 #define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915) 1915 1907 #define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X) 1916 1908 1917 - /* Pipe A timing regs */ 1918 - #define _HTOTAL_A 0x60000 1919 - #define _HBLANK_A 0x60004 1920 - #define _HSYNC_A 0x60008 1921 - #define _VTOTAL_A 0x6000c 1922 - #define _VBLANK_A 0x60010 1923 - #define _VSYNC_A 0x60014 1924 - #define _EXITLINE_A 0x60018 1925 - #define _PIPEASRC 0x6001c 1909 + /* Pipe/transcoder A timing regs */ 1910 + #define _TRANS_HTOTAL_A 0x60000 1911 + #define HTOTAL_MASK REG_GENMASK(31, 16) 1912 + #define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal)) 1913 + #define HACTIVE_MASK REG_GENMASK(15, 0) 1914 + #define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay)) 1915 + #define _TRANS_HBLANK_A 0x60004 1916 + #define HBLANK_END_MASK REG_GENMASK(31, 16) 1917 + #define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end)) 1918 + #define HBLANK_START_MASK REG_GENMASK(15, 0) 1919 + #define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start)) 1920 + #define _TRANS_HSYNC_A 0x60008 1921 + #define HSYNC_END_MASK REG_GENMASK(31, 16) 1922 + #define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end)) 1923 + #define HSYNC_START_MASK REG_GENMASK(15, 0) 1924 + #define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start)) 1925 + #define _TRANS_VTOTAL_A 0x6000c 1926 + #define VTOTAL_MASK REG_GENMASK(31, 16) 1927 + #define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal)) 1928 + #define VACTIVE_MASK REG_GENMASK(15, 0) 1929 + #define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay)) 1930 + #define _TRANS_VBLANK_A 0x60010 1931 + #define VBLANK_END_MASK REG_GENMASK(31, 16) 1932 + #define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end)) 1933 + #define VBLANK_START_MASK REG_GENMASK(15, 0) 1934 + #define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start)) 1935 + #define _TRANS_VSYNC_A 0x60014 1936 + #define VSYNC_END_MASK REG_GENMASK(31, 16) 1937 + #define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end)) 1938 + #define VSYNC_START_MASK REG_GENMASK(15, 0) 1939 + #define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start)) 1940 + #define _TRANS_EXITLINE_A 0x60018 1941 + #define _PIPEASRC 0x6001c 1926 1942 #define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16) 1927 1943 #define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w)) 1928 1944 #define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0) 1929 1945 #define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h)) 1930 - #define _BCLRPAT_A 0x60020 1931 - #define _VSYNCSHIFT_A 0x60028 1932 - #define _PIPE_MULT_A 0x6002c 1946 + #define _BCLRPAT_A 0x60020 1947 + #define _TRANS_VSYNCSHIFT_A 0x60028 1948 + #define _TRANS_MULT_A 0x6002c 1933 1949 1934 - /* Pipe B timing regs */ 1935 - #define _HTOTAL_B 0x61000 1936 - #define _HBLANK_B 0x61004 1937 - #define _HSYNC_B 0x61008 1938 - #define _VTOTAL_B 0x6100c 1939 - #define _VBLANK_B 0x61010 1940 - #define _VSYNC_B 0x61014 1941 - #define _PIPEBSRC 0x6101c 1942 - #define _BCLRPAT_B 0x61020 1943 - #define _VSYNCSHIFT_B 0x61028 1944 - #define _PIPE_MULT_B 0x6102c 1950 + /* Pipe/transcoder B timing regs */ 1951 + #define _TRANS_HTOTAL_B 0x61000 1952 + #define _TRANS_HBLANK_B 0x61004 1953 + #define _TRANS_HSYNC_B 0x61008 1954 + #define _TRANS_VTOTAL_B 0x6100c 1955 + #define _TRANS_VBLANK_B 0x61010 1956 + #define _TRANS_VSYNC_B 0x61014 1957 + #define _PIPEBSRC 0x6101c 1958 + #define _BCLRPAT_B 0x61020 1959 + #define _TRANS_VSYNCSHIFT_B 0x61028 1960 + #define _TRANS_MULT_B 0x6102c 1945 1961 1946 1962 /* DSI 0 timing regs */ 1947 - #define _HTOTAL_DSI0 0x6b000 1948 - #define _HSYNC_DSI0 0x6b008 1949 - #define _VTOTAL_DSI0 0x6b00c 1950 - #define _VSYNC_DSI0 0x6b014 1951 - #define _VSYNCSHIFT_DSI0 0x6b028 1963 + #define _TRANS_HTOTAL_DSI0 0x6b000 1964 + #define _TRANS_HSYNC_DSI0 0x6b008 1965 + #define _TRANS_VTOTAL_DSI0 0x6b00c 1966 + #define _TRANS_VSYNC_DSI0 0x6b014 1967 + #define _TRANS_VSYNCSHIFT_DSI0 0x6b028 1952 1968 1953 1969 /* DSI 1 timing regs */ 1954 - #define _HTOTAL_DSI1 0x6b800 1955 - #define _HSYNC_DSI1 0x6b808 1956 - #define _VTOTAL_DSI1 0x6b80c 1957 - #define _VSYNC_DSI1 0x6b814 1958 - #define _VSYNCSHIFT_DSI1 0x6b828 1970 + #define _TRANS_HTOTAL_DSI1 0x6b800 1971 + #define _TRANS_HSYNC_DSI1 0x6b808 1972 + #define _TRANS_VTOTAL_DSI1 0x6b80c 1973 + #define _TRANS_VSYNC_DSI1 0x6b814 1974 + #define _TRANS_VSYNCSHIFT_DSI1 0x6b828 1959 1975 1960 1976 #define TRANSCODER_A_OFFSET 0x60000 1961 1977 #define TRANSCODER_B_OFFSET 0x61000 ··· 1990 1958 #define TRANSCODER_DSI0_OFFSET 0x6b000 1991 1959 #define TRANSCODER_DSI1_OFFSET 0x6b800 1992 1960 1993 - #define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A) 1994 - #define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A) 1995 - #define HSYNC(trans) _MMIO_TRANS2(trans, _HSYNC_A) 1996 - #define VTOTAL(trans) _MMIO_TRANS2(trans, _VTOTAL_A) 1997 - #define VBLANK(trans) _MMIO_TRANS2(trans, _VBLANK_A) 1998 - #define VSYNC(trans) _MMIO_TRANS2(trans, _VSYNC_A) 1999 - #define BCLRPAT(trans) _MMIO_TRANS2(trans, _BCLRPAT_A) 2000 - #define VSYNCSHIFT(trans) _MMIO_TRANS2(trans, _VSYNCSHIFT_A) 2001 - #define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC) 2002 - #define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A) 1961 + #define TRANS_HTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_HTOTAL_A) 1962 + #define TRANS_HBLANK(trans) _MMIO_TRANS2((trans), _TRANS_HBLANK_A) 1963 + #define TRANS_HSYNC(trans) _MMIO_TRANS2((trans), _TRANS_HSYNC_A) 1964 + #define TRANS_VTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_VTOTAL_A) 1965 + #define TRANS_VBLANK(trans) _MMIO_TRANS2((trans), _TRANS_VBLANK_A) 1966 + #define TRANS_VSYNC(trans) _MMIO_TRANS2((trans), _TRANS_VSYNC_A) 1967 + #define BCLRPAT(trans) _MMIO_TRANS2((trans), _BCLRPAT_A) 1968 + #define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2((trans), _TRANS_VSYNCSHIFT_A) 1969 + #define PIPESRC(pipe) _MMIO_TRANS2((pipe), _PIPEASRC) 1970 + #define TRANS_MULT(trans) _MMIO_TRANS2((trans), _TRANS_MULT_A) 2003 1971 2004 - #define EXITLINE(trans) _MMIO_TRANS2(trans, _EXITLINE_A) 1972 + #define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A) 2005 1973 #define EXITLINE_ENABLE REG_BIT(31) 2006 1974 #define EXITLINE_MASK REG_GENMASK(12, 0) 2007 1975 #define EXITLINE_SHIFT 0 ··· 2298 2266 #define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14) 2299 2267 #define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13) 2300 2268 2301 - /* Icelake DSC Rate Control Range Parameter Registers */ 2302 - #define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240) 2303 - #define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4) 2304 - #define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40) 2305 - #define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4) 2306 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208) 2307 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4) 2308 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308) 2309 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4) 2310 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408) 2311 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4) 2312 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508) 2313 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4) 2314 - #define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2315 - _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \ 2316 - _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC) 2317 - #define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2318 - _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \ 2319 - _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC) 2320 - #define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2321 - _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \ 2322 - _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC) 2323 - #define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2324 - _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \ 2325 - _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC) 2326 - #define RC_BPG_OFFSET_SHIFT 10 2327 - #define RC_MAX_QP_SHIFT 5 2328 - #define RC_MIN_QP_SHIFT 0 2329 - 2330 - #define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248) 2331 - #define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4) 2332 - #define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48) 2333 - #define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4) 2334 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210) 2335 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4) 2336 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310) 2337 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4) 2338 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410) 2339 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4) 2340 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510) 2341 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4) 2342 - #define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2343 - _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \ 2344 - _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC) 2345 - #define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2346 - _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \ 2347 - _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC) 2348 - #define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2349 - _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \ 2350 - _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC) 2351 - #define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2352 - _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \ 2353 - _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC) 2354 - 2355 - #define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250) 2356 - #define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4) 2357 - #define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50) 2358 - #define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4) 2359 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218) 2360 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4) 2361 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318) 2362 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4) 2363 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418) 2364 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4) 2365 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518) 2366 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4) 2367 - #define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2368 - _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \ 2369 - _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC) 2370 - #define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2371 - _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \ 2372 - _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC) 2373 - #define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2374 - _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \ 2375 - _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC) 2376 - #define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2377 - _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \ 2378 - _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC) 2379 - 2380 - #define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258) 2381 - #define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4) 2382 - #define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58) 2383 - #define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4) 2384 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220) 2385 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4) 2386 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320) 2387 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4) 2388 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420) 2389 - #define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4) 2390 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520) 2391 - #define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4) 2392 - #define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2393 - _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \ 2394 - _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC) 2395 - #define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2396 - _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \ 2397 - _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC) 2398 - #define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2399 - _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \ 2400 - _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC) 2401 - #define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 2402 - _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \ 2403 - _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC) 2404 - 2405 2269 /* VGA port control */ 2406 2270 #define ADPA _MMIO(0x61100) 2407 2271 #define PCH_ADPA _MMIO(0xe1100) ··· 2379 2451 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 2380 2452 2381 2453 #define PORT_HOTPLUG_STAT _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114) 2382 - /* 2383 - * HDMI/DP bits are g4x+ 2384 - * 2385 - * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. 2386 - * Please check the detailed lore in the commit message for for experimental 2387 - * evidence. 2388 - */ 2389 - /* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */ 2390 - #define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29) 2391 - #define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28) 2392 - #define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27) 2393 - /* G4X/VLV/CHV DP/HDMI bits again match Bspec */ 2454 + /* HDMI/DP bits are g4x+ */ 2394 2455 #define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27) 2395 2456 #define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28) 2396 2457 #define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29) ··· 2508 2591 #define SDVO_PIPE_SEL_SHIFT_CHV 24 2509 2592 #define SDVO_PIPE_SEL_MASK_CHV (3 << 24) 2510 2593 #define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24) 2511 - 2512 - /* LVDS port control */ 2513 - #define LVDS _MMIO(0x61180) 2514 - /* 2515 - * Enables the LVDS port. This bit must be set before DPLLs are enabled, as 2516 - * the DPLL semantics change when the LVDS is assigned to that pipe. 2517 - */ 2518 - #define LVDS_PORT_EN (1 << 31) 2519 - /* Selects pipe B for LVDS data. Must be set on pre-965. */ 2520 - #define LVDS_PIPE_SEL_SHIFT 30 2521 - #define LVDS_PIPE_SEL_MASK (1 << 30) 2522 - #define LVDS_PIPE_SEL(pipe) ((pipe) << 30) 2523 - #define LVDS_PIPE_SEL_SHIFT_CPT 29 2524 - #define LVDS_PIPE_SEL_MASK_CPT (3 << 29) 2525 - #define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29) 2526 - /* LVDS dithering flag on 965/g4x platform */ 2527 - #define LVDS_ENABLE_DITHER (1 << 25) 2528 - /* LVDS sync polarity flags. Set to invert (i.e. negative) */ 2529 - #define LVDS_VSYNC_POLARITY (1 << 21) 2530 - #define LVDS_HSYNC_POLARITY (1 << 20) 2531 - 2532 - /* Enable border for unscaled (or aspect-scaled) display */ 2533 - #define LVDS_BORDER_ENABLE (1 << 15) 2534 - /* 2535 - * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per 2536 - * pixel. 2537 - */ 2538 - #define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) 2539 - #define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) 2540 - #define LVDS_A0A2_CLKA_POWER_UP (3 << 8) 2541 - /* 2542 - * Controls the A3 data pair, which contains the additional LSBs for 24 bit 2543 - * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be 2544 - * on. 2545 - */ 2546 - #define LVDS_A3_POWER_MASK (3 << 6) 2547 - #define LVDS_A3_POWER_DOWN (0 << 6) 2548 - #define LVDS_A3_POWER_UP (3 << 6) 2549 - /* 2550 - * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP 2551 - * is set. 2552 - */ 2553 - #define LVDS_CLKB_POWER_MASK (3 << 4) 2554 - #define LVDS_CLKB_POWER_DOWN (0 << 4) 2555 - #define LVDS_CLKB_POWER_UP (3 << 4) 2556 - /* 2557 - * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 2558 - * setting for whether we are in dual-channel mode. The B3 pair will 2559 - * additionally only be powered up when LVDS_A3_POWER_UP is set. 2560 - */ 2561 - #define LVDS_B0B3_POWER_MASK (3 << 2) 2562 - #define LVDS_B0B3_POWER_DOWN (0 << 2) 2563 - #define LVDS_B0B3_POWER_UP (3 << 2) 2564 2594 2565 2595 /* Video Data Island Packet control */ 2566 2596 #define VIDEO_DIP_DATA _MMIO(0x61178) ··· 3356 3492 #define _PIPEADSL 0x70000 3357 3493 #define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */ 3358 3494 #define PIPEDSL_LINE_MASK REG_GENMASK(19, 0) 3359 - #define _PIPEACONF 0x70008 3360 - #define PIPECONF_ENABLE REG_BIT(31) 3361 - #define PIPECONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */ 3362 - #define PIPECONF_STATE_ENABLE REG_BIT(30) /* i965+ */ 3363 - #define PIPECONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */ 3364 - #define PIPECONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */ 3365 - #define PIPECONF_FRAME_START_DELAY(x) REG_FIELD_PREP(PIPECONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */ 3366 - #define PIPECONF_PIPE_LOCKED REG_BIT(25) 3367 - #define PIPECONF_FORCE_BORDER REG_BIT(25) 3368 - #define PIPECONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */ 3369 - #define PIPECONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */ 3370 - #define PIPECONF_GAMMA_MODE_8BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 0) 3371 - #define PIPECONF_GAMMA_MODE_10BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 1) 3372 - #define PIPECONF_GAMMA_MODE_12BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */ 3373 - #define PIPECONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */ 3374 - #define PIPECONF_GAMMA_MODE(x) REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */ 3375 - #define PIPECONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */ 3376 - #define PIPECONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 0) 3377 - #define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 4) /* gen4 only */ 3378 - #define PIPECONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 5) /* gen4 only */ 3379 - #define PIPECONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 6) 3380 - #define PIPECONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 7) /* gen3 only */ 3495 + #define _TRANSACONF 0x70008 3496 + #define TRANSCONF_ENABLE REG_BIT(31) 3497 + #define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */ 3498 + #define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */ 3499 + #define TRANSCONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */ 3500 + #define TRANSCONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */ 3501 + #define TRANSCONF_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANSCONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */ 3502 + #define TRANSCONF_PIPE_LOCKED REG_BIT(25) 3503 + #define TRANSCONF_FORCE_BORDER REG_BIT(25) 3504 + #define TRANSCONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */ 3505 + #define TRANSCONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */ 3506 + #define TRANSCONF_GAMMA_MODE_8BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 0) 3507 + #define TRANSCONF_GAMMA_MODE_10BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 1) 3508 + #define TRANSCONF_GAMMA_MODE_12BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */ 3509 + #define TRANSCONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */ 3510 + #define TRANSCONF_GAMMA_MODE(x) REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */ 3511 + #define TRANSCONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */ 3512 + #define TRANSCONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 0) 3513 + #define TRANSCONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 4) /* gen4 only */ 3514 + #define TRANSCONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 5) /* gen4 only */ 3515 + #define TRANSCONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 6) 3516 + #define TRANSCONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 7) /* gen3 only */ 3381 3517 /* 3382 3518 * ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display, 3383 3519 * DBL=power saving pixel doubling, PF-ID* requires panel fitter 3384 3520 */ 3385 - #define PIPECONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */ 3386 - #define PIPECONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */ 3387 - #define PIPECONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 0) 3388 - #define PIPECONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 1) 3389 - #define PIPECONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 3) 3390 - #define PIPECONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */ 3391 - #define PIPECONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */ 3392 - #define PIPECONF_REFRESH_RATE_ALT_ILK REG_BIT(20) 3393 - #define PIPECONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */ 3394 - #define PIPECONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(PIPECONF_MSA_TIMING_DELAY_MASK, (x)) 3395 - #define PIPECONF_CXSR_DOWNCLOCK REG_BIT(16) 3396 - #define PIPECONF_REFRESH_RATE_ALT_VLV REG_BIT(14) 3397 - #define PIPECONF_COLOR_RANGE_SELECT REG_BIT(13) 3398 - #define PIPECONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */ 3399 - #define PIPECONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */ 3400 - #define PIPECONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */ 3401 - #define PIPECONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */ 3402 - #define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */ 3403 - #define PIPECONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */ 3404 - #define PIPECONF_BPC_8 REG_FIELD_PREP(PIPECONF_BPC_MASK, 0) 3405 - #define PIPECONF_BPC_10 REG_FIELD_PREP(PIPECONF_BPC_MASK, 1) 3406 - #define PIPECONF_BPC_6 REG_FIELD_PREP(PIPECONF_BPC_MASK, 2) 3407 - #define PIPECONF_BPC_12 REG_FIELD_PREP(PIPECONF_BPC_MASK, 3) 3408 - #define PIPECONF_DITHER_EN REG_BIT(4) 3409 - #define PIPECONF_DITHER_TYPE_MASK REG_GENMASK(3, 2) 3410 - #define PIPECONF_DITHER_TYPE_SP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 0) 3411 - #define PIPECONF_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 1) 3412 - #define PIPECONF_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 2) 3413 - #define PIPECONF_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 3) 3521 + #define TRANSCONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */ 3522 + #define TRANSCONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */ 3523 + #define TRANSCONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 0) 3524 + #define TRANSCONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 1) 3525 + #define TRANSCONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 3) 3526 + #define TRANSCONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */ 3527 + #define TRANSCONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */ 3528 + #define TRANSCONF_REFRESH_RATE_ALT_ILK REG_BIT(20) 3529 + #define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */ 3530 + #define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x)) 3531 + #define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16) 3532 + #define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14) 3533 + #define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13) 3534 + #define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */ 3535 + #define TRANSCONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */ 3536 + #define TRANSCONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */ 3537 + #define TRANSCONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */ 3538 + #define TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */ 3539 + #define TRANSCONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */ 3540 + #define TRANSCONF_BPC_8 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 0) 3541 + #define TRANSCONF_BPC_10 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 1) 3542 + #define TRANSCONF_BPC_6 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 2) 3543 + #define TRANSCONF_BPC_12 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 3) 3544 + #define TRANSCONF_DITHER_EN REG_BIT(4) 3545 + #define TRANSCONF_DITHER_TYPE_MASK REG_GENMASK(3, 2) 3546 + #define TRANSCONF_DITHER_TYPE_SP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 0) 3547 + #define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1) 3548 + #define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2) 3549 + #define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3) 3414 3550 #define _PIPEASTAT 0x70024 3415 3551 #define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31) 3416 3552 #define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30) ··· 3479 3615 #define PIPE_DSI0_OFFSET 0x7b000 3480 3616 #define PIPE_DSI1_OFFSET 0x7b800 3481 3617 3482 - #define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF) 3618 + #define TRANSCONF(trans) _MMIO_PIPE2((trans), _TRANSACONF) 3483 3619 #define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL) 3484 3620 #define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH) 3485 3621 #define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL) ··· 4119 4255 4120 4256 /* Pipe B */ 4121 4257 #define _PIPEBDSL (DISPLAY_MMIO_BASE(dev_priv) + 0x71000) 4122 - #define _PIPEBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008) 4258 + #define _TRANSBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008) 4123 4259 #define _PIPEBSTAT (DISPLAY_MMIO_BASE(dev_priv) + 0x71024) 4124 4260 #define _PIPEBFRAMEHIGH 0x71040 4125 4261 #define _PIPEBFRAMEPIXEL 0x71044 ··· 5296 5432 #define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28) 5297 5433 #define XELPD_PIPE_SOFT_UNDERRUN (1 << 22) 5298 5434 #define XELPD_PIPE_HARD_UNDERRUN (1 << 21) 5435 + #define GEN12_PIPE_VBLANK_UNMOD (1 << 19) 5299 5436 #define GEN8_PIPE_CURSOR_FAULT (1 << 10) 5300 5437 #define GEN8_PIPE_SPRITE_FAULT (1 << 9) 5301 5438 #define GEN8_PIPE_PRIMARY_FAULT (1 << 8) ··· 6257 6392 #define FDI_PLL_CTL_1 _MMIO(0xfe000) 6258 6393 #define FDI_PLL_CTL_2 _MMIO(0xfe004) 6259 6394 6260 - #define PCH_LVDS _MMIO(0xe1180) 6261 - #define LVDS_DETECTED (1 << 1) 6262 - 6263 6395 #define _PCH_DP_B 0xe4100 6264 6396 #define PCH_DP_B _MMIO(_PCH_DP_B) 6265 6397 #define _PCH_DPB_AUX_CH_CTL 0xe4110 ··· 7086 7224 ADLS_DPCLKA_DDIK_SEL_MASK) 7087 7225 7088 7226 /* ICL PLL */ 7089 - #define DPLL0_ENABLE 0x46010 7090 - #define DPLL1_ENABLE 0x46014 7227 + #define _DPLL0_ENABLE 0x46010 7228 + #define _DPLL1_ENABLE 0x46014 7091 7229 #define _ADLS_DPLL2_ENABLE 0x46018 7092 7230 #define _ADLS_DPLL3_ENABLE 0x46030 7093 - #define PLL_ENABLE (1 << 31) 7094 - #define PLL_LOCK (1 << 30) 7095 - #define PLL_POWER_ENABLE (1 << 27) 7096 - #define PLL_POWER_STATE (1 << 26) 7097 - #define ICL_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \ 7098 - _ADLS_DPLL2_ENABLE, _ADLS_DPLL3_ENABLE) 7231 + #define PLL_ENABLE REG_BIT(31) 7232 + #define PLL_LOCK REG_BIT(30) 7233 + #define PLL_POWER_ENABLE REG_BIT(27) 7234 + #define PLL_POWER_STATE REG_BIT(26) 7235 + #define ICL_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \ 7236 + _DPLL0_ENABLE, _DPLL1_ENABLE, \ 7237 + _ADLS_DPLL3_ENABLE, _ADLS_DPLL3_ENABLE)) 7099 7238 7100 7239 #define _DG2_PLL3_ENABLE 0x4601C 7101 7240 7102 - #define DG2_PLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \ 7103 - _ADLS_DPLL2_ENABLE, _DG2_PLL3_ENABLE) 7241 + #define DG2_PLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \ 7242 + _DPLL0_ENABLE, _DPLL1_ENABLE, \ 7243 + _DG2_PLL3_ENABLE, _DG2_PLL3_ENABLE)) 7104 7244 7105 7245 #define TBT_PLL_ENABLE _MMIO(0x46020) 7106 7246 ··· 7110 7246 #define _MG_PLL2_ENABLE 0x46034 7111 7247 #define _MG_PLL3_ENABLE 0x46038 7112 7248 #define _MG_PLL4_ENABLE 0x4603C 7113 - /* Bits are the same as DPLL0_ENABLE */ 7249 + /* Bits are the same as _DPLL0_ENABLE */ 7114 7250 #define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \ 7115 7251 _MG_PLL2_ENABLE) 7116 7252 7117 7253 /* DG1 PLL */ 7118 - #define DG1_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \ 7119 - _MG_PLL1_ENABLE, _MG_PLL2_ENABLE) 7254 + #define DG1_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ 7255 + _DPLL0_ENABLE, _DPLL1_ENABLE, \ 7256 + _MG_PLL1_ENABLE, _MG_PLL2_ENABLE)) 7120 7257 7121 7258 /* ADL-P Type C PLL */ 7122 7259 #define PORTTC1_PLL_ENABLE 0x46038 ··· 7177 7312 #define _TGL_DPLL0_CFGCR0 0x164284 7178 7313 #define _TGL_DPLL1_CFGCR0 0x16428C 7179 7314 #define _TGL_TBTPLL_CFGCR0 0x16429C 7180 - #define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ 7181 - _TGL_DPLL1_CFGCR0, \ 7182 - _TGL_TBTPLL_CFGCR0) 7315 + #define TGL_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ 7316 + _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \ 7317 + _TGL_TBTPLL_CFGCR0, _TGL_TBTPLL_CFGCR0)) 7183 7318 #define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \ 7184 7319 _TGL_DPLL1_CFGCR0) 7185 7320 ··· 7192 7327 #define _TGL_DPLL0_CFGCR1 0x164288 7193 7328 #define _TGL_DPLL1_CFGCR1 0x164290 7194 7329 #define _TGL_TBTPLL_CFGCR1 0x1642A0 7195 - #define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ 7196 - _TGL_DPLL1_CFGCR1, \ 7197 - _TGL_TBTPLL_CFGCR1) 7330 + #define TGL_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ 7331 + _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \ 7332 + _TGL_TBTPLL_CFGCR1, _TGL_TBTPLL_CFGCR1)) 7198 7333 #define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \ 7199 7334 _TGL_DPLL1_CFGCR1) 7200 7335 7201 7336 #define _DG1_DPLL2_CFGCR0 0x16C284 7202 7337 #define _DG1_DPLL3_CFGCR0 0x16C28C 7203 - #define DG1_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ 7204 - _TGL_DPLL1_CFGCR0, \ 7205 - _DG1_DPLL2_CFGCR0, \ 7206 - _DG1_DPLL3_CFGCR0) 7338 + #define DG1_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ 7339 + _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \ 7340 + _DG1_DPLL2_CFGCR0, _DG1_DPLL3_CFGCR0)) 7207 7341 7208 7342 #define _DG1_DPLL2_CFGCR1 0x16C288 7209 7343 #define _DG1_DPLL3_CFGCR1 0x16C290 7210 - #define DG1_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ 7211 - _TGL_DPLL1_CFGCR1, \ 7212 - _DG1_DPLL2_CFGCR1, \ 7213 - _DG1_DPLL3_CFGCR1) 7344 + #define DG1_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ 7345 + _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \ 7346 + _DG1_DPLL2_CFGCR1, _DG1_DPLL3_CFGCR1)) 7214 7347 7215 7348 /* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */ 7216 - #define _ADLS_DPLL3_CFGCR0 0x1642C0 7217 7349 #define _ADLS_DPLL4_CFGCR0 0x164294 7218 - #define ADLS_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ 7219 - _TGL_DPLL1_CFGCR0, \ 7220 - _ADLS_DPLL4_CFGCR0, \ 7221 - _ADLS_DPLL3_CFGCR0) 7350 + #define _ADLS_DPLL3_CFGCR0 0x1642C0 7351 + #define ADLS_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ 7352 + _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \ 7353 + _ADLS_DPLL4_CFGCR0, _ADLS_DPLL3_CFGCR0)) 7222 7354 7223 - #define _ADLS_DPLL3_CFGCR1 0x1642C4 7224 7355 #define _ADLS_DPLL4_CFGCR1 0x164298 7225 - #define ADLS_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ 7226 - _TGL_DPLL1_CFGCR1, \ 7227 - _ADLS_DPLL4_CFGCR1, \ 7228 - _ADLS_DPLL3_CFGCR1) 7356 + #define _ADLS_DPLL3_CFGCR1 0x1642C4 7357 + #define ADLS_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ 7358 + _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \ 7359 + _ADLS_DPLL4_CFGCR1, _ADLS_DPLL3_CFGCR1)) 7229 7360 7230 7361 /* BXT display engine PLL */ 7231 7362 #define BXT_DE_PLL_CTL _MMIO(0x6d000) ··· 7554 7693 #define PIPE_FRMTMSTMP(pipe) \ 7555 7694 _MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A) 7556 7695 7557 - /* Display Stream Splitter Control */ 7558 - #define DSS_CTL1 _MMIO(0x67400) 7559 - #define SPLITTER_ENABLE (1 << 31) 7560 - #define JOINER_ENABLE (1 << 30) 7561 - #define DUAL_LINK_MODE_INTERLEAVE (1 << 24) 7562 - #define DUAL_LINK_MODE_FRONTBACK (0 << 24) 7563 - #define OVERLAP_PIXELS_MASK (0xf << 16) 7564 - #define OVERLAP_PIXELS(pixels) ((pixels) << 16) 7565 - #define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) 7566 - #define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) 7567 - #define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0 7568 - 7569 - #define DSS_CTL2 _MMIO(0x67404) 7570 - #define LEFT_BRANCH_VDSC_ENABLE (1 << 31) 7571 - #define RIGHT_BRANCH_VDSC_ENABLE (1 << 15) 7572 - #define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) 7573 - #define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) 7574 - 7575 - #define _ICL_PIPE_DSS_CTL1_PB 0x78200 7576 - #define _ICL_PIPE_DSS_CTL1_PC 0x78400 7577 - #define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7578 - _ICL_PIPE_DSS_CTL1_PB, \ 7579 - _ICL_PIPE_DSS_CTL1_PC) 7580 - #define BIG_JOINER_ENABLE (1 << 29) 7581 - #define MASTER_BIG_JOINER_ENABLE (1 << 28) 7582 - #define VGA_CENTERING_ENABLE (1 << 27) 7583 - #define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25) 7584 - #define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0) 7585 - #define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1) 7586 - #define UNCOMPRESSED_JOINER_MASTER (1 << 21) 7587 - #define UNCOMPRESSED_JOINER_SLAVE (1 << 20) 7588 - 7589 - #define _ICL_PIPE_DSS_CTL2_PB 0x78204 7590 - #define _ICL_PIPE_DSS_CTL2_PC 0x78404 7591 - #define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7592 - _ICL_PIPE_DSS_CTL2_PB, \ 7593 - _ICL_PIPE_DSS_CTL2_PC) 7594 - 7595 7696 #define GGC _MMIO(0x108040) 7596 7697 #define GMS_MASK REG_GENMASK(15, 8) 7597 7698 #define GGMS_MASK REG_GENMASK(7, 6) ··· 7576 7753 #define ICL_PHY_MISC_MUX_DDID (1 << 28) 7577 7754 #define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23) 7578 7755 #define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20) 7579 - 7580 - /* Icelake Display Stream Compression Registers */ 7581 - #define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200) 7582 - #define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00) 7583 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270 7584 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370 7585 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470 7586 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570 7587 - #define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7588 - _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \ 7589 - _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC) 7590 - #define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7591 - _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \ 7592 - _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC) 7593 - #define DSC_ALT_ICH_SEL (1 << 20) 7594 - #define DSC_VBR_ENABLE (1 << 19) 7595 - #define DSC_422_ENABLE (1 << 18) 7596 - #define DSC_COLOR_SPACE_CONVERSION (1 << 17) 7597 - #define DSC_BLOCK_PREDICTION (1 << 16) 7598 - #define DSC_LINE_BUF_DEPTH_SHIFT 12 7599 - #define DSC_BPC_SHIFT 8 7600 - #define DSC_VER_MIN_SHIFT 4 7601 - #define DSC_VER_MAJ (0x1 << 0) 7602 - 7603 - #define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204) 7604 - #define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04) 7605 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274 7606 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374 7607 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474 7608 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574 7609 - #define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7610 - _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \ 7611 - _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC) 7612 - #define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7613 - _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \ 7614 - _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC) 7615 - #define DSC_BPP(bpp) ((bpp) << 0) 7616 - 7617 - #define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208) 7618 - #define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08) 7619 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278 7620 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378 7621 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478 7622 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578 7623 - #define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7624 - _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \ 7625 - _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC) 7626 - #define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7627 - _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \ 7628 - _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC) 7629 - #define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16) 7630 - #define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0) 7631 - 7632 - #define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C) 7633 - #define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C) 7634 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C 7635 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C 7636 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C 7637 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C 7638 - #define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7639 - _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \ 7640 - _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC) 7641 - #define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7642 - _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \ 7643 - _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC) 7644 - #define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16) 7645 - #define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0) 7646 - 7647 - #define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210) 7648 - #define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10) 7649 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280 7650 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380 7651 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480 7652 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580 7653 - #define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7654 - _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ 7655 - _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) 7656 - #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7657 - _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \ 7658 - _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) 7659 - #define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) 7660 - #define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) 7661 - 7662 - #define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214) 7663 - #define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14) 7664 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284 7665 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384 7666 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484 7667 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584 7668 - #define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7669 - _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ 7670 - _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) 7671 - #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7672 - _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \ 7673 - _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) 7674 - #define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16) 7675 - #define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) 7676 - 7677 - #define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218) 7678 - #define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18) 7679 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288 7680 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388 7681 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488 7682 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588 7683 - #define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7684 - _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \ 7685 - _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC) 7686 - #define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7687 - _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \ 7688 - _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC) 7689 - #define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24) 7690 - #define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16) 7691 - #define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8) 7692 - #define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0) 7693 - 7694 - #define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C) 7695 - #define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C) 7696 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C 7697 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C 7698 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C 7699 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C 7700 - #define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7701 - _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \ 7702 - _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC) 7703 - #define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7704 - _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \ 7705 - _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC) 7706 - #define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16) 7707 - #define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0) 7708 - 7709 - #define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220) 7710 - #define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20) 7711 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290 7712 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390 7713 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490 7714 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590 7715 - #define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7716 - _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \ 7717 - _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC) 7718 - #define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7719 - _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \ 7720 - _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC) 7721 - #define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16) 7722 - #define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0) 7723 - 7724 - #define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224) 7725 - #define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24) 7726 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294 7727 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394 7728 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494 7729 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594 7730 - #define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7731 - _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \ 7732 - _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC) 7733 - #define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7734 - _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \ 7735 - _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC) 7736 - #define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16) 7737 - #define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0) 7738 - 7739 - #define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228) 7740 - #define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28) 7741 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298 7742 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398 7743 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498 7744 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598 7745 - #define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7746 - _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \ 7747 - _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC) 7748 - #define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7749 - _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \ 7750 - _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC) 7751 - #define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20) 7752 - #define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16) 7753 - #define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8) 7754 - #define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0) 7755 - 7756 - #define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C) 7757 - #define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C) 7758 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C 7759 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C 7760 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C 7761 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C 7762 - #define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7763 - _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \ 7764 - _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC) 7765 - #define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7766 - _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \ 7767 - _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC) 7768 - 7769 - #define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260) 7770 - #define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60) 7771 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0 7772 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0 7773 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0 7774 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0 7775 - #define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7776 - _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \ 7777 - _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC) 7778 - #define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7779 - _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \ 7780 - _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC) 7781 - 7782 - #define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264) 7783 - #define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64) 7784 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4 7785 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4 7786 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4 7787 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4 7788 - #define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7789 - _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \ 7790 - _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC) 7791 - #define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7792 - _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \ 7793 - _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC) 7794 - 7795 - #define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268) 7796 - #define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68) 7797 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8 7798 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8 7799 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8 7800 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8 7801 - #define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7802 - _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \ 7803 - _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC) 7804 - #define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7805 - _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \ 7806 - _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC) 7807 - 7808 - #define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C) 7809 - #define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C) 7810 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC 7811 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC 7812 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC 7813 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC 7814 - #define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7815 - _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \ 7816 - _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC) 7817 - #define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7818 - _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \ 7819 - _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC) 7820 - 7821 - #define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270) 7822 - #define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70) 7823 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0 7824 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0 7825 - #define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0 7826 - #define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0 7827 - #define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7828 - _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \ 7829 - _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC) 7830 - #define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7831 - _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ 7832 - _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) 7833 - #define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20) 7834 - #define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) 7835 - #define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0) 7836 - 7837 - /* Icelake Rate Control Buffer Threshold Registers */ 7838 - #define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230) 7839 - #define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4) 7840 - #define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30) 7841 - #define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4) 7842 - #define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254) 7843 - #define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4) 7844 - #define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354) 7845 - #define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4) 7846 - #define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454) 7847 - #define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4) 7848 - #define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554) 7849 - #define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4) 7850 - #define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7851 - _ICL_DSC0_RC_BUF_THRESH_0_PB, \ 7852 - _ICL_DSC0_RC_BUF_THRESH_0_PC) 7853 - #define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7854 - _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \ 7855 - _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC) 7856 - #define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7857 - _ICL_DSC1_RC_BUF_THRESH_0_PB, \ 7858 - _ICL_DSC1_RC_BUF_THRESH_0_PC) 7859 - #define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7860 - _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \ 7861 - _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC) 7862 - 7863 - #define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238) 7864 - #define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4) 7865 - #define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38) 7866 - #define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4) 7867 - #define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C) 7868 - #define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4) 7869 - #define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C) 7870 - #define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4) 7871 - #define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C) 7872 - #define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4) 7873 - #define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C) 7874 - #define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4) 7875 - #define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7876 - _ICL_DSC0_RC_BUF_THRESH_1_PB, \ 7877 - _ICL_DSC0_RC_BUF_THRESH_1_PC) 7878 - #define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7879 - _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \ 7880 - _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC) 7881 - #define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7882 - _ICL_DSC1_RC_BUF_THRESH_1_PB, \ 7883 - _ICL_DSC1_RC_BUF_THRESH_1_PC) 7884 - #define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 7885 - _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ 7886 - _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) 7887 7756 7888 7757 #define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0) 7889 7758 #define MODULAR_FIA_MASK (1 << 4) ··· 7620 8105 #define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0) 7621 8106 #define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4) 7622 8107 #define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8) 7623 - #define DSB_ENABLE (1 << 31) 7624 - #define DSB_STATUS_BUSY (1 << 0) 8108 + #define DSB_ENABLE REG_BIT(31) 8109 + #define DSB_BUF_REITERATE REG_BIT(29) 8110 + #define DSB_WAIT_FOR_VBLANK REG_BIT(28) 8111 + #define DSB_WAIT_FOR_LINE_IN REG_BIT(27) 8112 + #define DSB_HALT REG_BIT(16) 8113 + #define DSB_NON_POSTED REG_BIT(8) 8114 + #define DSB_STATUS_BUSY REG_BIT(0) 8115 + #define DSB_MMIOCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xc) 8116 + #define DSB_MMIO_DEAD_CLOCKS_ENABLE REG_BIT(31) 8117 + #define DSB_MMIO_DEAD_CLOCKS_COUNT_MASK REG_GENMASK(15, 8) 8118 + #define DSB_MMIO_DEAD_CLOCKS_COUNT(x) REG_FIELD_PREP(DSB_MMIO_DEAD_CLOCK_COUNT_MASK, (x)) 8119 + #define DSB_MMIO_CYCLES_MASK REG_GENMASK(7, 0) 8120 + #define DSB_MMIO_CYCLES(x) REG_FIELD_PREP(DSB_MMIO_CYCLES_MASK, (x)) 8121 + #define DSB_POLLFUNC(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x10) 8122 + #define DSB_POLL_ENABLE REG_BIT(31) 8123 + #define DSB_POLL_WAIT_MASK REG_GENMASK(30, 23) 8124 + #define DSB_POLL_WAIT(x) REG_FIELD_PREP(DSB_POLL_WAIT_MASK, (x)) /* usec */ 8125 + #define DSB_POLL_COUNT_MASK REG_GENMASK(22, 15) 8126 + #define DSB_POLL_COUNT(x) REG_FIELD_PREP(DSB_POLL_COUNT_MASK, (x)) 8127 + #define DSB_DEBUG(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x14) 8128 + #define DSB_POLLMASK(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x1c) 8129 + #define DSB_STATUS(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x24) 8130 + #define DSB_INTERRUPT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x28) 8131 + #define DSB_ATS_FAULT_INT_EN REG_BIT(20) 8132 + #define DSB_GTT_FAULT_INT_EN REG_BIT(19) 8133 + #define DSB_RSPTIMEOUT_INT_EN REG_BIT(18) 8134 + #define DSB_POLL_ERR_INT_EN REG_BIT(17) 8135 + #define DSB_PROG_INT_EN REG_BIT(16) 8136 + #define DSB_ATS_FAULT_INT_STATUS REG_BIT(4) 8137 + #define DSB_GTT_FAULT_INT_STATUS REG_BIT(3) 8138 + #define DSB_RSPTIMEOUT_INT_STATUS REG_BIT(2) 8139 + #define DSB_POLL_ERR_INT_STATUS REG_BIT(1) 8140 + #define DSB_PROG_INT_STATUS REG_BIT(0) 8141 + #define DSB_CURRENT_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x2c) 8142 + #define DSB_RM_TIMEOUT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x30) 8143 + #define DSB_RM_CLAIM_TIMEOUT REG_BIT(31) 8144 + #define DSB_RM_READY_TIMEOUT REG_BIT(30) 8145 + #define DSB_RM_CLAIM_TIMEOUT_COUNT_MASK REG_GENMASK(23, 16) 8146 + #define DSB_RM_CLAIM_TIMEOUT_COUNT(x) REG_FIELD_PREP(DSB_RM_CLAIM_TIMEOUT_COUNT_MASK, (x)) /* clocks */ 8147 + #define DSB_RM_READY_TIMEOUT_VALUE_MASK REG_GENMASK(15, 0) 8148 + #define DSB_RM_READY_TIMEOUT_VALUE(x) REG_FIELD_PREP(DSB_RM_READY_TIMEOUT_VALUE, (x)) /* usec */ 8149 + #define DSB_RMTIMEOUTREG_CAPTURE(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x34) 8150 + #define DSB_PMCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x38) 8151 + #define DSB_PMCTRL_2(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x3c) 8152 + #define DSB_PF_LN_LOWER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x40) 8153 + #define DSB_PF_LN_UPPER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x44) 8154 + #define DSB_BUFRPT_CNT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x48) 8155 + #define DSB_CHICKEN(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xf0) 7625 8156 7626 8157 #define CLKREQ_POLICY _MMIO(0x101038) 7627 8158 #define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
+31
drivers/gpu/drm/i915/i915_reg_defs.h
··· 120 120 #define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a))) 121 121 122 122 /* 123 + * Like _PICK_EVEN(), but supports 2 ranges of evenly spaced address offsets. 124 + * @__c_index corresponds to the index in which the second range starts to be 125 + * used. Using math interval notation, the first range is used for indexes [ 0, 126 + * @__c_index), while the second range is used for [ @__c_index, ... ). Example: 127 + * 128 + * #define _FOO_A 0xf000 129 + * #define _FOO_B 0xf004 130 + * #define _FOO_C 0xf008 131 + * #define _SUPER_FOO_A 0xa000 132 + * #define _SUPER_FOO_B 0xa100 133 + * #define FOO(x) _MMIO(_PICK_EVEN_2RANGES(x, 3, \ 134 + * _FOO_A, _FOO_B, \ 135 + * _SUPER_FOO_A, _SUPER_FOO_B)) 136 + * 137 + * This expands to: 138 + * 0: 0xf000, 139 + * 1: 0xf004, 140 + * 2: 0xf008, 141 + * 3: 0xa000, 142 + * 4: 0xa100, 143 + * 5: 0xa200, 144 + * ... 145 + */ 146 + #define _PICK_EVEN_2RANGES(__index, __c_index, __a, __b, __c, __d) \ 147 + (BUILD_BUG_ON_ZERO(!__is_constexpr(__c_index)) + \ 148 + ((__index) < (__c_index) ? _PICK_EVEN(__index, __a, __b) : \ 149 + _PICK_EVEN((__index) - (__c_index), __c, __d))) 150 + 151 + /* 123 152 * Given the arbitrary numbers in varargs, pick the 0-based __index'th number. 124 153 * 125 154 * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced. ··· 164 135 typedef struct { 165 136 u32 reg; 166 137 } i915_mcr_reg_t; 138 + 139 + #define MCR_REG(offset) ((const i915_mcr_reg_t){ .reg = (offset) }) 167 140 168 141 #define INVALID_MMIO_REG _MMIO(0) 169 142
-1
drivers/gpu/drm/i915/i915_request.c
··· 48 48 #include "i915_driver.h" 49 49 #include "i915_drv.h" 50 50 #include "i915_trace.h" 51 - #include "intel_pm.h" 52 51 53 52 struct execute_cb { 54 53 struct irq_work work;
-1
drivers/gpu/drm/i915/i915_sysfs.c
··· 37 37 38 38 #include "i915_drv.h" 39 39 #include "i915_sysfs.h" 40 - #include "intel_pm.h" 41 40 42 41 struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) 43 42 {
+23 -5
drivers/gpu/drm/i915/intel_device_info.c
··· 119 119 drm_printf(p, "display version: %u\n", 120 120 runtime->display.ip.ver); 121 121 122 + drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step)); 123 + drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step)); 124 + drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step.display_step)); 125 + drm_printf(p, "base die stepping: %s\n", intel_step_name(runtime->step.basedie_step)); 126 + 122 127 drm_printf(p, "gt: %d\n", info->gt); 123 - drm_printf(p, "memory-regions: %x\n", runtime->memory_regions); 124 - drm_printf(p, "page-sizes: %x\n", runtime->page_sizes); 128 + drm_printf(p, "memory-regions: 0x%x\n", runtime->memory_regions); 129 + drm_printf(p, "page-sizes: 0x%x\n", runtime->page_sizes); 125 130 drm_printf(p, "platform: %s\n", intel_platform_name(info->platform)); 126 131 drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size); 127 132 drm_printf(p, "ppgtt-type: %d\n", runtime->ppgtt_type); ··· 207 202 INTEL_RPLP_IDS(0), 208 203 }; 209 204 205 + static const u16 subplatform_rplu_ids[] = { 206 + INTEL_RPLU_IDS(0), 207 + }; 208 + 210 209 static const u16 subplatform_g10_ids[] = { 211 210 INTEL_DG2_G10_IDS(0), 212 211 INTEL_ATS_M150_IDS(0), ··· 278 269 } else if (find_devid(devid, subplatform_rpl_ids, 279 270 ARRAY_SIZE(subplatform_rpl_ids))) { 280 271 mask = BIT(INTEL_SUBPLATFORM_RPL); 272 + if (find_devid(devid, subplatform_rplu_ids, 273 + ARRAY_SIZE(subplatform_rplu_ids))) 274 + mask |= BIT(INTEL_SUBPLATFORM_RPLU); 281 275 } else if (find_devid(devid, subplatform_g10_ids, 282 276 ARRAY_SIZE(subplatform_g10_ids))) { 283 277 mask = BIT(INTEL_SUBPLATFORM_G10); ··· 448 436 runtime->num_sprites[pipe] = 1; 449 437 } 450 438 439 + if (HAS_DISPLAY(dev_priv) && 440 + (IS_DGFX(dev_priv) || DISPLAY_VER(dev_priv) >= 14) && 441 + !(intel_de_read(dev_priv, GU_CNTL_PROTECTED) & DEPRESENT)) { 442 + drm_info(&dev_priv->drm, "Display not present, disabling\n"); 443 + 444 + runtime->pipe_mask = 0; 445 + } 446 + 451 447 if (HAS_DISPLAY(dev_priv) && IS_GRAPHICS_VER(dev_priv, 7, 8) && 452 448 HAS_PCH_SPLIT(dev_priv)) { 453 449 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); ··· 477 457 drm_info(&dev_priv->drm, 478 458 "Display fused off, disabling\n"); 479 459 runtime->pipe_mask = 0; 480 - runtime->cpu_transcoder_mask = 0; 481 - runtime->fbc_mask = 0; 482 460 } else if (fuse_strap & IVB_PIPE_C_DISABLE) { 483 461 drm_info(&dev_priv->drm, "PipeC fused off\n"); 484 462 runtime->pipe_mask &= ~BIT(PIPE_C); ··· 553 535 { 554 536 drm_printf(p, "Has logical contexts? %s\n", 555 537 str_yes_no(caps->has_logical_contexts)); 556 - drm_printf(p, "scheduler: %x\n", caps->scheduler); 538 + drm_printf(p, "scheduler: 0x%x\n", caps->scheduler); 557 539 }
+1
drivers/gpu/drm/i915/intel_device_info.h
··· 127 127 * bit set 128 128 */ 129 129 #define INTEL_SUBPLATFORM_N 1 130 + #define INTEL_SUBPLATFORM_RPLU 2 130 131 131 132 /* MTL */ 132 133 #define INTEL_SUBPLATFORM_M 0
+36 -35
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
··· 8 8 #include "display/intel_display_types.h" 9 9 #include "display/intel_dmc_regs.h" 10 10 #include "display/intel_dpio_phy.h" 11 + #include "display/intel_lvds_regs.h" 11 12 #include "display/vlv_dsi_pll_regs.h" 12 13 #include "gt/intel_gt_regs.h" 13 14 #include "gvt/gvt.h" ··· 118 117 MMIO_D(PIPEDSL(PIPE_B)); 119 118 MMIO_D(PIPEDSL(PIPE_C)); 120 119 MMIO_D(PIPEDSL(_PIPE_EDP)); 121 - MMIO_D(PIPECONF(PIPE_A)); 122 - MMIO_D(PIPECONF(PIPE_B)); 123 - MMIO_D(PIPECONF(PIPE_C)); 124 - MMIO_D(PIPECONF(_PIPE_EDP)); 120 + MMIO_D(TRANSCONF(TRANSCODER_A)); 121 + MMIO_D(TRANSCONF(TRANSCODER_B)); 122 + MMIO_D(TRANSCONF(TRANSCODER_C)); 123 + MMIO_D(TRANSCONF(TRANSCODER_EDP)); 125 124 MMIO_D(PIPESTAT(PIPE_A)); 126 125 MMIO_D(PIPESTAT(PIPE_B)); 127 126 MMIO_D(PIPESTAT(PIPE_C)); ··· 219 218 MMIO_D(SPRSCALE(PIPE_C)); 220 219 MMIO_D(SPRSURFLIVE(PIPE_C)); 221 220 MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0)); 222 - MMIO_D(HTOTAL(TRANSCODER_A)); 223 - MMIO_D(HBLANK(TRANSCODER_A)); 224 - MMIO_D(HSYNC(TRANSCODER_A)); 225 - MMIO_D(VTOTAL(TRANSCODER_A)); 226 - MMIO_D(VBLANK(TRANSCODER_A)); 227 - MMIO_D(VSYNC(TRANSCODER_A)); 221 + MMIO_D(TRANS_HTOTAL(TRANSCODER_A)); 222 + MMIO_D(TRANS_HBLANK(TRANSCODER_A)); 223 + MMIO_D(TRANS_HSYNC(TRANSCODER_A)); 224 + MMIO_D(TRANS_VTOTAL(TRANSCODER_A)); 225 + MMIO_D(TRANS_VBLANK(TRANSCODER_A)); 226 + MMIO_D(TRANS_VSYNC(TRANSCODER_A)); 228 227 MMIO_D(BCLRPAT(TRANSCODER_A)); 229 - MMIO_D(VSYNCSHIFT(TRANSCODER_A)); 228 + MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_A)); 230 229 MMIO_D(PIPESRC(TRANSCODER_A)); 231 - MMIO_D(HTOTAL(TRANSCODER_B)); 232 - MMIO_D(HBLANK(TRANSCODER_B)); 233 - MMIO_D(HSYNC(TRANSCODER_B)); 234 - MMIO_D(VTOTAL(TRANSCODER_B)); 235 - MMIO_D(VBLANK(TRANSCODER_B)); 236 - MMIO_D(VSYNC(TRANSCODER_B)); 230 + MMIO_D(TRANS_HTOTAL(TRANSCODER_B)); 231 + MMIO_D(TRANS_HBLANK(TRANSCODER_B)); 232 + MMIO_D(TRANS_HSYNC(TRANSCODER_B)); 233 + MMIO_D(TRANS_VTOTAL(TRANSCODER_B)); 234 + MMIO_D(TRANS_VBLANK(TRANSCODER_B)); 235 + MMIO_D(TRANS_VSYNC(TRANSCODER_B)); 237 236 MMIO_D(BCLRPAT(TRANSCODER_B)); 238 - MMIO_D(VSYNCSHIFT(TRANSCODER_B)); 237 + MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_B)); 239 238 MMIO_D(PIPESRC(TRANSCODER_B)); 240 - MMIO_D(HTOTAL(TRANSCODER_C)); 241 - MMIO_D(HBLANK(TRANSCODER_C)); 242 - MMIO_D(HSYNC(TRANSCODER_C)); 243 - MMIO_D(VTOTAL(TRANSCODER_C)); 244 - MMIO_D(VBLANK(TRANSCODER_C)); 245 - MMIO_D(VSYNC(TRANSCODER_C)); 239 + MMIO_D(TRANS_HTOTAL(TRANSCODER_C)); 240 + MMIO_D(TRANS_HBLANK(TRANSCODER_C)); 241 + MMIO_D(TRANS_HSYNC(TRANSCODER_C)); 242 + MMIO_D(TRANS_VTOTAL(TRANSCODER_C)); 243 + MMIO_D(TRANS_VBLANK(TRANSCODER_C)); 244 + MMIO_D(TRANS_VSYNC(TRANSCODER_C)); 246 245 MMIO_D(BCLRPAT(TRANSCODER_C)); 247 - MMIO_D(VSYNCSHIFT(TRANSCODER_C)); 246 + MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_C)); 248 247 MMIO_D(PIPESRC(TRANSCODER_C)); 249 - MMIO_D(HTOTAL(TRANSCODER_EDP)); 250 - MMIO_D(HBLANK(TRANSCODER_EDP)); 251 - MMIO_D(HSYNC(TRANSCODER_EDP)); 252 - MMIO_D(VTOTAL(TRANSCODER_EDP)); 253 - MMIO_D(VBLANK(TRANSCODER_EDP)); 254 - MMIO_D(VSYNC(TRANSCODER_EDP)); 248 + MMIO_D(TRANS_HTOTAL(TRANSCODER_EDP)); 249 + MMIO_D(TRANS_HBLANK(TRANSCODER_EDP)); 250 + MMIO_D(TRANS_HSYNC(TRANSCODER_EDP)); 251 + MMIO_D(TRANS_VTOTAL(TRANSCODER_EDP)); 252 + MMIO_D(TRANS_VBLANK(TRANSCODER_EDP)); 253 + MMIO_D(TRANS_VSYNC(TRANSCODER_EDP)); 255 254 MMIO_D(BCLRPAT(TRANSCODER_EDP)); 256 - MMIO_D(VSYNCSHIFT(TRANSCODER_EDP)); 255 + MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_EDP)); 257 256 MMIO_D(PIPE_DATA_M1(TRANSCODER_A)); 258 257 MMIO_D(PIPE_DATA_N1(TRANSCODER_A)); 259 258 MMIO_D(PIPE_DATA_M2(TRANSCODER_A)); ··· 494 493 MMIO_D(GAMMA_MODE(PIPE_A)); 495 494 MMIO_D(GAMMA_MODE(PIPE_B)); 496 495 MMIO_D(GAMMA_MODE(PIPE_C)); 497 - MMIO_D(PIPE_MULT(PIPE_A)); 498 - MMIO_D(PIPE_MULT(PIPE_B)); 499 - MMIO_D(PIPE_MULT(PIPE_C)); 496 + MMIO_D(TRANS_MULT(TRANSCODER_A)); 497 + MMIO_D(TRANS_MULT(TRANSCODER_B)); 498 + MMIO_D(TRANS_MULT(TRANSCODER_C)); 500 499 MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A)); 501 500 MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B)); 502 501 MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C));
+3 -4109
drivers/gpu/drm/i915/intel_pm.c
··· 44 44 void (*init_clock_gating)(struct drm_i915_private *i915); 45 45 }; 46 46 47 - /* used in computing the new watermarks state */ 48 - struct intel_wm_config { 49 - unsigned int num_pipes_active; 50 - bool sprites_enabled; 51 - bool sprites_scaled; 52 - }; 53 - 54 47 static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) 55 48 { 56 49 if (HAS_LLC(dev_priv)) { ··· 122 129 */ 123 130 intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) | 124 131 PWM1_GATING_DIS | PWM2_GATING_DIS); 125 - } 126 - 127 - static void pnv_get_mem_freq(struct drm_i915_private *dev_priv) 128 - { 129 - u32 tmp; 130 - 131 - tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG); 132 - 133 - switch (tmp & CLKCFG_FSB_MASK) { 134 - case CLKCFG_FSB_533: 135 - dev_priv->fsb_freq = 533; /* 133*4 */ 136 - break; 137 - case CLKCFG_FSB_800: 138 - dev_priv->fsb_freq = 800; /* 200*4 */ 139 - break; 140 - case CLKCFG_FSB_667: 141 - dev_priv->fsb_freq = 667; /* 167*4 */ 142 - break; 143 - case CLKCFG_FSB_400: 144 - dev_priv->fsb_freq = 400; /* 100*4 */ 145 - break; 146 - } 147 - 148 - switch (tmp & CLKCFG_MEM_MASK) { 149 - case CLKCFG_MEM_533: 150 - dev_priv->mem_freq = 533; 151 - break; 152 - case CLKCFG_MEM_667: 153 - dev_priv->mem_freq = 667; 154 - break; 155 - case CLKCFG_MEM_800: 156 - dev_priv->mem_freq = 800; 157 - break; 158 - } 159 - 160 - /* detect pineview DDR3 setting */ 161 - tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL); 162 - dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 163 - } 164 - 165 - static void ilk_get_mem_freq(struct drm_i915_private *dev_priv) 166 - { 167 - u16 ddrpll, csipll; 168 - 169 - ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1); 170 - csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0); 171 - 172 - switch (ddrpll & 0xff) { 173 - case 0xc: 174 - dev_priv->mem_freq = 800; 175 - break; 176 - case 0x10: 177 - dev_priv->mem_freq = 1066; 178 - break; 179 - case 0x14: 180 - dev_priv->mem_freq = 1333; 181 - break; 182 - case 0x18: 183 - dev_priv->mem_freq = 1600; 184 - break; 185 - default: 186 - drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n", 187 - ddrpll & 0xff); 188 - dev_priv->mem_freq = 0; 189 - break; 190 - } 191 - 192 - switch (csipll & 0x3ff) { 193 - case 0x00c: 194 - dev_priv->fsb_freq = 3200; 195 - break; 196 - case 0x00e: 197 - dev_priv->fsb_freq = 3733; 198 - break; 199 - case 0x010: 200 - dev_priv->fsb_freq = 4266; 201 - break; 202 - case 0x012: 203 - dev_priv->fsb_freq = 4800; 204 - break; 205 - case 0x014: 206 - dev_priv->fsb_freq = 5333; 207 - break; 208 - case 0x016: 209 - dev_priv->fsb_freq = 5866; 210 - break; 211 - case 0x018: 212 - dev_priv->fsb_freq = 6400; 213 - break; 214 - default: 215 - drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n", 216 - csipll & 0x3ff); 217 - dev_priv->fsb_freq = 0; 218 - break; 219 - } 220 - } 221 - 222 - static const struct cxsr_latency cxsr_latency_table[] = { 223 - {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 224 - {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 225 - {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 226 - {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ 227 - {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ 228 - 229 - {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 230 - {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 231 - {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 232 - {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ 233 - {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ 234 - 235 - {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 236 - {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 237 - {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 238 - {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ 239 - {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ 240 - 241 - {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 242 - {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 243 - {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 244 - {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ 245 - {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ 246 - 247 - {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 248 - {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 249 - {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 250 - {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ 251 - {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ 252 - 253 - {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 254 - {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 255 - {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 256 - {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ 257 - {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 258 - }; 259 - 260 - static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop, 261 - bool is_ddr3, 262 - int fsb, 263 - int mem) 264 - { 265 - const struct cxsr_latency *latency; 266 - int i; 267 - 268 - if (fsb == 0 || mem == 0) 269 - return NULL; 270 - 271 - for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 272 - latency = &cxsr_latency_table[i]; 273 - if (is_desktop == latency->is_desktop && 274 - is_ddr3 == latency->is_ddr3 && 275 - fsb == latency->fsb_freq && mem == latency->mem_freq) 276 - return latency; 277 - } 278 - 279 - DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 280 - 281 - return NULL; 282 - } 283 - 284 - static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) 285 - { 286 - u32 val; 287 - 288 - vlv_punit_get(dev_priv); 289 - 290 - val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 291 - if (enable) 292 - val &= ~FORCE_DDR_HIGH_FREQ; 293 - else 294 - val |= FORCE_DDR_HIGH_FREQ; 295 - val &= ~FORCE_DDR_LOW_FREQ; 296 - val |= FORCE_DDR_FREQ_REQ_ACK; 297 - vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 298 - 299 - if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 300 - FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) 301 - drm_err(&dev_priv->drm, 302 - "timed out waiting for Punit DDR DVFS request\n"); 303 - 304 - vlv_punit_put(dev_priv); 305 - } 306 - 307 - static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) 308 - { 309 - u32 val; 310 - 311 - vlv_punit_get(dev_priv); 312 - 313 - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 314 - if (enable) 315 - val |= DSP_MAXFIFO_PM5_ENABLE; 316 - else 317 - val &= ~DSP_MAXFIFO_PM5_ENABLE; 318 - vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); 319 - 320 - vlv_punit_put(dev_priv); 321 - } 322 - 323 - #define FW_WM(value, plane) \ 324 - (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) 325 - 326 - static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 327 - { 328 - bool was_enabled; 329 - u32 val; 330 - 331 - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 332 - was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 333 - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); 334 - intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV); 335 - } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { 336 - was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; 337 - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); 338 - intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); 339 - } else if (IS_PINEVIEW(dev_priv)) { 340 - val = intel_uncore_read(&dev_priv->uncore, DSPFW3); 341 - was_enabled = val & PINEVIEW_SELF_REFRESH_EN; 342 - if (enable) 343 - val |= PINEVIEW_SELF_REFRESH_EN; 344 - else 345 - val &= ~PINEVIEW_SELF_REFRESH_EN; 346 - intel_uncore_write(&dev_priv->uncore, DSPFW3, val); 347 - intel_uncore_posting_read(&dev_priv->uncore, DSPFW3); 348 - } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { 349 - was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; 350 - val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : 351 - _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); 352 - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val); 353 - intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); 354 - } else if (IS_I915GM(dev_priv)) { 355 - /* 356 - * FIXME can't find a bit like this for 915G, and 357 - * and yet it does have the related watermark in 358 - * FW_BLC_SELF. What's going on? 359 - */ 360 - was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN; 361 - val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : 362 - _MASKED_BIT_DISABLE(INSTPM_SELF_EN); 363 - intel_uncore_write(&dev_priv->uncore, INSTPM, val); 364 - intel_uncore_posting_read(&dev_priv->uncore, INSTPM); 365 - } else { 366 - return false; 367 - } 368 - 369 - trace_intel_memory_cxsr(dev_priv, was_enabled, enable); 370 - 371 - drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n", 372 - str_enabled_disabled(enable), 373 - str_enabled_disabled(was_enabled)); 374 - 375 - return was_enabled; 376 - } 377 - 378 - /** 379 - * intel_set_memory_cxsr - Configure CxSR state 380 - * @dev_priv: i915 device 381 - * @enable: Allow vs. disallow CxSR 382 - * 383 - * Allow or disallow the system to enter a special CxSR 384 - * (C-state self refresh) state. What typically happens in CxSR mode 385 - * is that several display FIFOs may get combined into a single larger 386 - * FIFO for a particular plane (so called max FIFO mode) to allow the 387 - * system to defer memory fetches longer, and the memory will enter 388 - * self refresh. 389 - * 390 - * Note that enabling CxSR does not guarantee that the system enter 391 - * this special mode, nor does it guarantee that the system stays 392 - * in that mode once entered. So this just allows/disallows the system 393 - * to autonomously utilize the CxSR mode. Other factors such as core 394 - * C-states will affect when/if the system actually enters/exits the 395 - * CxSR mode. 396 - * 397 - * Note that on VLV/CHV this actually only controls the max FIFO mode, 398 - * and the system is free to enter/exit memory self refresh at any time 399 - * even when the use of CxSR has been disallowed. 400 - * 401 - * While the system is actually in the CxSR/max FIFO mode, some plane 402 - * control registers will not get latched on vblank. Thus in order to 403 - * guarantee the system will respond to changes in the plane registers 404 - * we must always disallow CxSR prior to making changes to those registers. 405 - * Unfortunately the system will re-evaluate the CxSR conditions at 406 - * frame start which happens after vblank start (which is when the plane 407 - * registers would get latched), so we can't proceed with the plane update 408 - * during the same frame where we disallowed CxSR. 409 - * 410 - * Certain platforms also have a deeper HPLL SR mode. Fortunately the 411 - * HPLL SR mode depends on CxSR itself, so we don't have to hand hold 412 - * the hardware w.r.t. HPLL SR when writing to plane registers. 413 - * Disallowing just CxSR is sufficient. 414 - */ 415 - bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 416 - { 417 - bool ret; 418 - 419 - mutex_lock(&dev_priv->display.wm.wm_mutex); 420 - ret = _intel_set_memory_cxsr(dev_priv, enable); 421 - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 422 - dev_priv->display.wm.vlv.cxsr = enable; 423 - else if (IS_G4X(dev_priv)) 424 - dev_priv->display.wm.g4x.cxsr = enable; 425 - mutex_unlock(&dev_priv->display.wm.wm_mutex); 426 - 427 - return ret; 428 - } 429 - 430 - /* 431 - * Latency for FIFO fetches is dependent on several factors: 432 - * - memory configuration (speed, channels) 433 - * - chipset 434 - * - current MCH state 435 - * It can be fairly high in some situations, so here we assume a fairly 436 - * pessimal value. It's a tradeoff between extra memory fetches (if we 437 - * set this value too high, the FIFO will fetch frequently to stay full) 438 - * and power consumption (set it too low to save power and we might see 439 - * FIFO underruns and display "flicker"). 440 - * 441 - * A value of 5us seems to be a good balance; safe for very low end 442 - * platforms but not overly aggressive on lower latency configs. 443 - */ 444 - static const int pessimal_latency_ns = 5000; 445 - 446 - #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ 447 - ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) 448 - 449 - static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) 450 - { 451 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 452 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 453 - struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; 454 - enum pipe pipe = crtc->pipe; 455 - int sprite0_start, sprite1_start; 456 - u32 dsparb, dsparb2, dsparb3; 457 - 458 - switch (pipe) { 459 - case PIPE_A: 460 - dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 461 - dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); 462 - sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); 463 - sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); 464 - break; 465 - case PIPE_B: 466 - dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 467 - dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); 468 - sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); 469 - sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); 470 - break; 471 - case PIPE_C: 472 - dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); 473 - dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3); 474 - sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); 475 - sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); 476 - break; 477 - default: 478 - MISSING_CASE(pipe); 479 - return; 480 - } 481 - 482 - fifo_state->plane[PLANE_PRIMARY] = sprite0_start; 483 - fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start; 484 - fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start; 485 - fifo_state->plane[PLANE_CURSOR] = 63; 486 - } 487 - 488 - static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, 489 - enum i9xx_plane_id i9xx_plane) 490 - { 491 - u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 492 - int size; 493 - 494 - size = dsparb & 0x7f; 495 - if (i9xx_plane == PLANE_B) 496 - size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; 497 - 498 - drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", 499 - dsparb, plane_name(i9xx_plane), size); 500 - 501 - return size; 502 - } 503 - 504 - static int i830_get_fifo_size(struct drm_i915_private *dev_priv, 505 - enum i9xx_plane_id i9xx_plane) 506 - { 507 - u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 508 - int size; 509 - 510 - size = dsparb & 0x1ff; 511 - if (i9xx_plane == PLANE_B) 512 - size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; 513 - size >>= 1; /* Convert to cachelines */ 514 - 515 - drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", 516 - dsparb, plane_name(i9xx_plane), size); 517 - 518 - return size; 519 - } 520 - 521 - static int i845_get_fifo_size(struct drm_i915_private *dev_priv, 522 - enum i9xx_plane_id i9xx_plane) 523 - { 524 - u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 525 - int size; 526 - 527 - size = dsparb & 0x7f; 528 - size >>= 2; /* Convert to cachelines */ 529 - 530 - drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", 531 - dsparb, plane_name(i9xx_plane), size); 532 - 533 - return size; 534 - } 535 - 536 - /* Pineview has different values for various configs */ 537 - static const struct intel_watermark_params pnv_display_wm = { 538 - .fifo_size = PINEVIEW_DISPLAY_FIFO, 539 - .max_wm = PINEVIEW_MAX_WM, 540 - .default_wm = PINEVIEW_DFT_WM, 541 - .guard_size = PINEVIEW_GUARD_WM, 542 - .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 543 - }; 544 - 545 - static const struct intel_watermark_params pnv_display_hplloff_wm = { 546 - .fifo_size = PINEVIEW_DISPLAY_FIFO, 547 - .max_wm = PINEVIEW_MAX_WM, 548 - .default_wm = PINEVIEW_DFT_HPLLOFF_WM, 549 - .guard_size = PINEVIEW_GUARD_WM, 550 - .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 551 - }; 552 - 553 - static const struct intel_watermark_params pnv_cursor_wm = { 554 - .fifo_size = PINEVIEW_CURSOR_FIFO, 555 - .max_wm = PINEVIEW_CURSOR_MAX_WM, 556 - .default_wm = PINEVIEW_CURSOR_DFT_WM, 557 - .guard_size = PINEVIEW_CURSOR_GUARD_WM, 558 - .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 559 - }; 560 - 561 - static const struct intel_watermark_params pnv_cursor_hplloff_wm = { 562 - .fifo_size = PINEVIEW_CURSOR_FIFO, 563 - .max_wm = PINEVIEW_CURSOR_MAX_WM, 564 - .default_wm = PINEVIEW_CURSOR_DFT_WM, 565 - .guard_size = PINEVIEW_CURSOR_GUARD_WM, 566 - .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 567 - }; 568 - 569 - static const struct intel_watermark_params i965_cursor_wm_info = { 570 - .fifo_size = I965_CURSOR_FIFO, 571 - .max_wm = I965_CURSOR_MAX_WM, 572 - .default_wm = I965_CURSOR_DFT_WM, 573 - .guard_size = 2, 574 - .cacheline_size = I915_FIFO_LINE_SIZE, 575 - }; 576 - 577 - static const struct intel_watermark_params i945_wm_info = { 578 - .fifo_size = I945_FIFO_SIZE, 579 - .max_wm = I915_MAX_WM, 580 - .default_wm = 1, 581 - .guard_size = 2, 582 - .cacheline_size = I915_FIFO_LINE_SIZE, 583 - }; 584 - 585 - static const struct intel_watermark_params i915_wm_info = { 586 - .fifo_size = I915_FIFO_SIZE, 587 - .max_wm = I915_MAX_WM, 588 - .default_wm = 1, 589 - .guard_size = 2, 590 - .cacheline_size = I915_FIFO_LINE_SIZE, 591 - }; 592 - 593 - static const struct intel_watermark_params i830_a_wm_info = { 594 - .fifo_size = I855GM_FIFO_SIZE, 595 - .max_wm = I915_MAX_WM, 596 - .default_wm = 1, 597 - .guard_size = 2, 598 - .cacheline_size = I830_FIFO_LINE_SIZE, 599 - }; 600 - 601 - static const struct intel_watermark_params i830_bc_wm_info = { 602 - .fifo_size = I855GM_FIFO_SIZE, 603 - .max_wm = I915_MAX_WM/2, 604 - .default_wm = 1, 605 - .guard_size = 2, 606 - .cacheline_size = I830_FIFO_LINE_SIZE, 607 - }; 608 - 609 - static const struct intel_watermark_params i845_wm_info = { 610 - .fifo_size = I830_FIFO_SIZE, 611 - .max_wm = I915_MAX_WM, 612 - .default_wm = 1, 613 - .guard_size = 2, 614 - .cacheline_size = I830_FIFO_LINE_SIZE, 615 - }; 616 - 617 - /** 618 - * intel_wm_method1 - Method 1 / "small buffer" watermark formula 619 - * @pixel_rate: Pipe pixel rate in kHz 620 - * @cpp: Plane bytes per pixel 621 - * @latency: Memory wakeup latency in 0.1us units 622 - * 623 - * Compute the watermark using the method 1 or "small buffer" 624 - * formula. The caller may additonally add extra cachelines 625 - * to account for TLB misses and clock crossings. 626 - * 627 - * This method is concerned with the short term drain rate 628 - * of the FIFO, ie. it does not account for blanking periods 629 - * which would effectively reduce the average drain rate across 630 - * a longer period. The name "small" refers to the fact the 631 - * FIFO is relatively small compared to the amount of data 632 - * fetched. 633 - * 634 - * The FIFO level vs. time graph might look something like: 635 - * 636 - * |\ |\ 637 - * | \ | \ 638 - * __---__---__ (- plane active, _ blanking) 639 - * -> time 640 - * 641 - * or perhaps like this: 642 - * 643 - * |\|\ |\|\ 644 - * __----__----__ (- plane active, _ blanking) 645 - * -> time 646 - * 647 - * Returns: 648 - * The watermark in bytes 649 - */ 650 - static unsigned int intel_wm_method1(unsigned int pixel_rate, 651 - unsigned int cpp, 652 - unsigned int latency) 653 - { 654 - u64 ret; 655 - 656 - ret = mul_u32_u32(pixel_rate, cpp * latency); 657 - ret = DIV_ROUND_UP_ULL(ret, 10000); 658 - 659 - return ret; 660 - } 661 - 662 - /** 663 - * intel_wm_method2 - Method 2 / "large buffer" watermark formula 664 - * @pixel_rate: Pipe pixel rate in kHz 665 - * @htotal: Pipe horizontal total 666 - * @width: Plane width in pixels 667 - * @cpp: Plane bytes per pixel 668 - * @latency: Memory wakeup latency in 0.1us units 669 - * 670 - * Compute the watermark using the method 2 or "large buffer" 671 - * formula. The caller may additonally add extra cachelines 672 - * to account for TLB misses and clock crossings. 673 - * 674 - * This method is concerned with the long term drain rate 675 - * of the FIFO, ie. it does account for blanking periods 676 - * which effectively reduce the average drain rate across 677 - * a longer period. The name "large" refers to the fact the 678 - * FIFO is relatively large compared to the amount of data 679 - * fetched. 680 - * 681 - * The FIFO level vs. time graph might look something like: 682 - * 683 - * |\___ |\___ 684 - * | \___ | \___ 685 - * | \ | \ 686 - * __ --__--__--__--__--__--__ (- plane active, _ blanking) 687 - * -> time 688 - * 689 - * Returns: 690 - * The watermark in bytes 691 - */ 692 - static unsigned int intel_wm_method2(unsigned int pixel_rate, 693 - unsigned int htotal, 694 - unsigned int width, 695 - unsigned int cpp, 696 - unsigned int latency) 697 - { 698 - unsigned int ret; 699 - 700 - /* 701 - * FIXME remove once all users are computing 702 - * watermarks in the correct place. 703 - */ 704 - if (WARN_ON_ONCE(htotal == 0)) 705 - htotal = 1; 706 - 707 - ret = (latency * pixel_rate) / (htotal * 10000); 708 - ret = (ret + 1) * width * cpp; 709 - 710 - return ret; 711 - } 712 - 713 - /** 714 - * intel_calculate_wm - calculate watermark level 715 - * @pixel_rate: pixel clock 716 - * @wm: chip FIFO params 717 - * @fifo_size: size of the FIFO buffer 718 - * @cpp: bytes per pixel 719 - * @latency_ns: memory latency for the platform 720 - * 721 - * Calculate the watermark level (the level at which the display plane will 722 - * start fetching from memory again). Each chip has a different display 723 - * FIFO size and allocation, so the caller needs to figure that out and pass 724 - * in the correct intel_watermark_params structure. 725 - * 726 - * As the pixel clock runs, the FIFO will be drained at a rate that depends 727 - * on the pixel size. When it reaches the watermark level, it'll start 728 - * fetching FIFO line sized based chunks from memory until the FIFO fills 729 - * past the watermark point. If the FIFO drains completely, a FIFO underrun 730 - * will occur, and a display engine hang could result. 731 - */ 732 - static unsigned int intel_calculate_wm(int pixel_rate, 733 - const struct intel_watermark_params *wm, 734 - int fifo_size, int cpp, 735 - unsigned int latency_ns) 736 - { 737 - int entries, wm_size; 738 - 739 - /* 740 - * Note: we need to make sure we don't overflow for various clock & 741 - * latency values. 742 - * clocks go from a few thousand to several hundred thousand. 743 - * latency is usually a few thousand 744 - */ 745 - entries = intel_wm_method1(pixel_rate, cpp, 746 - latency_ns / 100); 747 - entries = DIV_ROUND_UP(entries, wm->cacheline_size) + 748 - wm->guard_size; 749 - DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries); 750 - 751 - wm_size = fifo_size - entries; 752 - DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); 753 - 754 - /* Don't promote wm_size to unsigned... */ 755 - if (wm_size > wm->max_wm) 756 - wm_size = wm->max_wm; 757 - if (wm_size <= 0) 758 - wm_size = wm->default_wm; 759 - 760 - /* 761 - * Bspec seems to indicate that the value shouldn't be lower than 762 - * 'burst size + 1'. Certainly 830 is quite unhappy with low values. 763 - * Lets go for 8 which is the burst size since certain platforms 764 - * already use a hardcoded 8 (which is what the spec says should be 765 - * done). 766 - */ 767 - if (wm_size <= 8) 768 - wm_size = 8; 769 - 770 - return wm_size; 771 - } 772 - 773 - static bool is_disabling(int old, int new, int threshold) 774 - { 775 - return old >= threshold && new < threshold; 776 - } 777 - 778 - static bool is_enabling(int old, int new, int threshold) 779 - { 780 - return old < threshold && new >= threshold; 781 - } 782 - 783 - static int intel_wm_num_levels(struct drm_i915_private *dev_priv) 784 - { 785 - return dev_priv->display.wm.max_level + 1; 786 - } 787 - 788 - bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, 789 - const struct intel_plane_state *plane_state) 790 - { 791 - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 792 - 793 - /* FIXME check the 'enable' instead */ 794 - if (!crtc_state->hw.active) 795 - return false; 796 - 797 - /* 798 - * Treat cursor with fb as always visible since cursor updates 799 - * can happen faster than the vrefresh rate, and the current 800 - * watermark code doesn't handle that correctly. Cursor updates 801 - * which set/clear the fb or change the cursor size are going 802 - * to get throttled by intel_legacy_cursor_update() to work 803 - * around this problem with the watermark code. 804 - */ 805 - if (plane->id == PLANE_CURSOR) 806 - return plane_state->hw.fb != NULL; 807 - else 808 - return plane_state->uapi.visible; 809 - } 810 - 811 - static bool intel_crtc_active(struct intel_crtc *crtc) 812 - { 813 - /* Be paranoid as we can arrive here with only partial 814 - * state retrieved from the hardware during setup. 815 - * 816 - * We can ditch the adjusted_mode.crtc_clock check as soon 817 - * as Haswell has gained clock readout/fastboot support. 818 - * 819 - * We can ditch the crtc->primary->state->fb check as soon as we can 820 - * properly reconstruct framebuffers. 821 - * 822 - * FIXME: The intel_crtc->active here should be switched to 823 - * crtc->state->active once we have proper CRTC states wired up 824 - * for atomic. 825 - */ 826 - return crtc && crtc->active && crtc->base.primary->state->fb && 827 - crtc->config->hw.adjusted_mode.crtc_clock; 828 - } 829 - 830 - static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) 831 - { 832 - struct intel_crtc *crtc, *enabled = NULL; 833 - 834 - for_each_intel_crtc(&dev_priv->drm, crtc) { 835 - if (intel_crtc_active(crtc)) { 836 - if (enabled) 837 - return NULL; 838 - enabled = crtc; 839 - } 840 - } 841 - 842 - return enabled; 843 - } 844 - 845 - static void pnv_update_wm(struct drm_i915_private *dev_priv) 846 - { 847 - struct intel_crtc *crtc; 848 - const struct cxsr_latency *latency; 849 - u32 reg; 850 - unsigned int wm; 851 - 852 - latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv), 853 - dev_priv->is_ddr3, 854 - dev_priv->fsb_freq, 855 - dev_priv->mem_freq); 856 - if (!latency) { 857 - drm_dbg_kms(&dev_priv->drm, 858 - "Unknown FSB/MEM found, disable CxSR\n"); 859 - intel_set_memory_cxsr(dev_priv, false); 860 - return; 861 - } 862 - 863 - crtc = single_enabled_crtc(dev_priv); 864 - if (crtc) { 865 - const struct drm_framebuffer *fb = 866 - crtc->base.primary->state->fb; 867 - int pixel_rate = crtc->config->pixel_rate; 868 - int cpp = fb->format->cpp[0]; 869 - 870 - /* Display SR */ 871 - wm = intel_calculate_wm(pixel_rate, &pnv_display_wm, 872 - pnv_display_wm.fifo_size, 873 - cpp, latency->display_sr); 874 - reg = intel_uncore_read(&dev_priv->uncore, DSPFW1); 875 - reg &= ~DSPFW_SR_MASK; 876 - reg |= FW_WM(wm, SR); 877 - intel_uncore_write(&dev_priv->uncore, DSPFW1, reg); 878 - drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); 879 - 880 - /* cursor SR */ 881 - wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm, 882 - pnv_display_wm.fifo_size, 883 - 4, latency->cursor_sr); 884 - intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK, 885 - FW_WM(wm, CURSOR_SR)); 886 - 887 - /* Display HPLL off SR */ 888 - wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm, 889 - pnv_display_hplloff_wm.fifo_size, 890 - cpp, latency->display_hpll_disable); 891 - intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR)); 892 - 893 - /* cursor HPLL off SR */ 894 - wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm, 895 - pnv_display_hplloff_wm.fifo_size, 896 - 4, latency->cursor_hpll_disable); 897 - reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); 898 - reg &= ~DSPFW_HPLL_CURSOR_MASK; 899 - reg |= FW_WM(wm, HPLL_CURSOR); 900 - intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); 901 - drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg); 902 - 903 - intel_set_memory_cxsr(dev_priv, true); 904 - } else { 905 - intel_set_memory_cxsr(dev_priv, false); 906 - } 907 - } 908 - 909 - /* 910 - * Documentation says: 911 - * "If the line size is small, the TLB fetches can get in the way of the 912 - * data fetches, causing some lag in the pixel data return which is not 913 - * accounted for in the above formulas. The following adjustment only 914 - * needs to be applied if eight whole lines fit in the buffer at once. 915 - * The WM is adjusted upwards by the difference between the FIFO size 916 - * and the size of 8 whole lines. This adjustment is always performed 917 - * in the actual pixel depth regardless of whether FBC is enabled or not." 918 - */ 919 - static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) 920 - { 921 - int tlb_miss = fifo_size * 64 - width * cpp * 8; 922 - 923 - return max(0, tlb_miss); 924 - } 925 - 926 - static void g4x_write_wm_values(struct drm_i915_private *dev_priv, 927 - const struct g4x_wm_values *wm) 928 - { 929 - enum pipe pipe; 930 - 931 - for_each_pipe(dev_priv, pipe) 932 - trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); 933 - 934 - intel_uncore_write(&dev_priv->uncore, DSPFW1, 935 - FW_WM(wm->sr.plane, SR) | 936 - FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | 937 - FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | 938 - FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); 939 - intel_uncore_write(&dev_priv->uncore, DSPFW2, 940 - (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) | 941 - FW_WM(wm->sr.fbc, FBC_SR) | 942 - FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | 943 - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | 944 - FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | 945 - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); 946 - intel_uncore_write(&dev_priv->uncore, DSPFW3, 947 - (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) | 948 - FW_WM(wm->sr.cursor, CURSOR_SR) | 949 - FW_WM(wm->hpll.cursor, HPLL_CURSOR) | 950 - FW_WM(wm->hpll.plane, HPLL_SR)); 951 - 952 - intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); 953 - } 954 - 955 - #define FW_WM_VLV(value, plane) \ 956 - (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) 957 - 958 - static void vlv_write_wm_values(struct drm_i915_private *dev_priv, 959 - const struct vlv_wm_values *wm) 960 - { 961 - enum pipe pipe; 962 - 963 - for_each_pipe(dev_priv, pipe) { 964 - trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); 965 - 966 - intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), 967 - (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | 968 - (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | 969 - (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | 970 - (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); 971 - } 972 - 973 - /* 974 - * Zero the (unused) WM1 watermarks, and also clear all the 975 - * high order bits so that there are no out of bounds values 976 - * present in the registers during the reprogramming. 977 - */ 978 - intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0); 979 - intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0); 980 - intel_uncore_write(&dev_priv->uncore, DSPFW4, 0); 981 - intel_uncore_write(&dev_priv->uncore, DSPFW5, 0); 982 - intel_uncore_write(&dev_priv->uncore, DSPFW6, 0); 983 - 984 - intel_uncore_write(&dev_priv->uncore, DSPFW1, 985 - FW_WM(wm->sr.plane, SR) | 986 - FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | 987 - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | 988 - FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); 989 - intel_uncore_write(&dev_priv->uncore, DSPFW2, 990 - FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | 991 - FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | 992 - FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); 993 - intel_uncore_write(&dev_priv->uncore, DSPFW3, 994 - FW_WM(wm->sr.cursor, CURSOR_SR)); 995 - 996 - if (IS_CHERRYVIEW(dev_priv)) { 997 - intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV, 998 - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | 999 - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); 1000 - intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV, 1001 - FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | 1002 - FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); 1003 - intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV, 1004 - FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | 1005 - FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); 1006 - intel_uncore_write(&dev_priv->uncore, DSPHOWM, 1007 - FW_WM(wm->sr.plane >> 9, SR_HI) | 1008 - FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | 1009 - FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | 1010 - FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) | 1011 - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | 1012 - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | 1013 - FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | 1014 - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | 1015 - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | 1016 - FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); 1017 - } else { 1018 - intel_uncore_write(&dev_priv->uncore, DSPFW7, 1019 - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | 1020 - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); 1021 - intel_uncore_write(&dev_priv->uncore, DSPHOWM, 1022 - FW_WM(wm->sr.plane >> 9, SR_HI) | 1023 - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | 1024 - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | 1025 - FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | 1026 - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | 1027 - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | 1028 - FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); 1029 - } 1030 - 1031 - intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); 1032 - } 1033 - 1034 - #undef FW_WM_VLV 1035 - 1036 - static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv) 1037 - { 1038 - /* all latencies in usec */ 1039 - dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; 1040 - dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12; 1041 - dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; 1042 - 1043 - dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL; 1044 - } 1045 - 1046 - static int g4x_plane_fifo_size(enum plane_id plane_id, int level) 1047 - { 1048 - /* 1049 - * DSPCNTR[13] supposedly controls whether the 1050 - * primary plane can use the FIFO space otherwise 1051 - * reserved for the sprite plane. It's not 100% clear 1052 - * what the actual FIFO size is, but it looks like we 1053 - * can happily set both primary and sprite watermarks 1054 - * up to 127 cachelines. So that would seem to mean 1055 - * that either DSPCNTR[13] doesn't do anything, or that 1056 - * the total FIFO is >= 256 cachelines in size. Either 1057 - * way, we don't seem to have to worry about this 1058 - * repartitioning as the maximum watermark value the 1059 - * register can hold for each plane is lower than the 1060 - * minimum FIFO size. 1061 - */ 1062 - switch (plane_id) { 1063 - case PLANE_CURSOR: 1064 - return 63; 1065 - case PLANE_PRIMARY: 1066 - return level == G4X_WM_LEVEL_NORMAL ? 127 : 511; 1067 - case PLANE_SPRITE0: 1068 - return level == G4X_WM_LEVEL_NORMAL ? 127 : 0; 1069 - default: 1070 - MISSING_CASE(plane_id); 1071 - return 0; 1072 - } 1073 - } 1074 - 1075 - static int g4x_fbc_fifo_size(int level) 1076 - { 1077 - switch (level) { 1078 - case G4X_WM_LEVEL_SR: 1079 - return 7; 1080 - case G4X_WM_LEVEL_HPLL: 1081 - return 15; 1082 - default: 1083 - MISSING_CASE(level); 1084 - return 0; 1085 - } 1086 - } 1087 - 1088 - static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, 1089 - const struct intel_plane_state *plane_state, 1090 - int level) 1091 - { 1092 - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1093 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1094 - const struct drm_display_mode *pipe_mode = 1095 - &crtc_state->hw.pipe_mode; 1096 - unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10; 1097 - unsigned int pixel_rate, htotal, cpp, width, wm; 1098 - 1099 - if (latency == 0) 1100 - return USHRT_MAX; 1101 - 1102 - if (!intel_wm_plane_visible(crtc_state, plane_state)) 1103 - return 0; 1104 - 1105 - cpp = plane_state->hw.fb->format->cpp[0]; 1106 - 1107 - /* 1108 - * WaUse32BppForSRWM:ctg,elk 1109 - * 1110 - * The spec fails to list this restriction for the 1111 - * HPLL watermark, which seems a little strange. 1112 - * Let's use 32bpp for the HPLL watermark as well. 1113 - */ 1114 - if (plane->id == PLANE_PRIMARY && 1115 - level != G4X_WM_LEVEL_NORMAL) 1116 - cpp = max(cpp, 4u); 1117 - 1118 - pixel_rate = crtc_state->pixel_rate; 1119 - htotal = pipe_mode->crtc_htotal; 1120 - width = drm_rect_width(&plane_state->uapi.src) >> 16; 1121 - 1122 - if (plane->id == PLANE_CURSOR) { 1123 - wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); 1124 - } else if (plane->id == PLANE_PRIMARY && 1125 - level == G4X_WM_LEVEL_NORMAL) { 1126 - wm = intel_wm_method1(pixel_rate, cpp, latency); 1127 - } else { 1128 - unsigned int small, large; 1129 - 1130 - small = intel_wm_method1(pixel_rate, cpp, latency); 1131 - large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); 1132 - 1133 - wm = min(small, large); 1134 - } 1135 - 1136 - wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level), 1137 - width, cpp); 1138 - 1139 - wm = DIV_ROUND_UP(wm, 64) + 2; 1140 - 1141 - return min_t(unsigned int, wm, USHRT_MAX); 1142 - } 1143 - 1144 - static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, 1145 - int level, enum plane_id plane_id, u16 value) 1146 - { 1147 - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1148 - bool dirty = false; 1149 - 1150 - for (; level < intel_wm_num_levels(dev_priv); level++) { 1151 - struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; 1152 - 1153 - dirty |= raw->plane[plane_id] != value; 1154 - raw->plane[plane_id] = value; 1155 - } 1156 - 1157 - return dirty; 1158 - } 1159 - 1160 - static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, 1161 - int level, u16 value) 1162 - { 1163 - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1164 - bool dirty = false; 1165 - 1166 - /* NORMAL level doesn't have an FBC watermark */ 1167 - level = max(level, G4X_WM_LEVEL_SR); 1168 - 1169 - for (; level < intel_wm_num_levels(dev_priv); level++) { 1170 - struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; 1171 - 1172 - dirty |= raw->fbc != value; 1173 - raw->fbc = value; 1174 - } 1175 - 1176 - return dirty; 1177 - } 1178 - 1179 - static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, 1180 - const struct intel_plane_state *plane_state, 1181 - u32 pri_val); 1182 - 1183 - static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, 1184 - const struct intel_plane_state *plane_state) 1185 - { 1186 - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1187 - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1188 - int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); 1189 - enum plane_id plane_id = plane->id; 1190 - bool dirty = false; 1191 - int level; 1192 - 1193 - if (!intel_wm_plane_visible(crtc_state, plane_state)) { 1194 - dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0); 1195 - if (plane_id == PLANE_PRIMARY) 1196 - dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0); 1197 - goto out; 1198 - } 1199 - 1200 - for (level = 0; level < num_levels; level++) { 1201 - struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; 1202 - int wm, max_wm; 1203 - 1204 - wm = g4x_compute_wm(crtc_state, plane_state, level); 1205 - max_wm = g4x_plane_fifo_size(plane_id, level); 1206 - 1207 - if (wm > max_wm) 1208 - break; 1209 - 1210 - dirty |= raw->plane[plane_id] != wm; 1211 - raw->plane[plane_id] = wm; 1212 - 1213 - if (plane_id != PLANE_PRIMARY || 1214 - level == G4X_WM_LEVEL_NORMAL) 1215 - continue; 1216 - 1217 - wm = ilk_compute_fbc_wm(crtc_state, plane_state, 1218 - raw->plane[plane_id]); 1219 - max_wm = g4x_fbc_fifo_size(level); 1220 - 1221 - /* 1222 - * FBC wm is not mandatory as we 1223 - * can always just disable its use. 1224 - */ 1225 - if (wm > max_wm) 1226 - wm = USHRT_MAX; 1227 - 1228 - dirty |= raw->fbc != wm; 1229 - raw->fbc = wm; 1230 - } 1231 - 1232 - /* mark watermarks as invalid */ 1233 - dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); 1234 - 1235 - if (plane_id == PLANE_PRIMARY) 1236 - dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); 1237 - 1238 - out: 1239 - if (dirty) { 1240 - drm_dbg_kms(&dev_priv->drm, 1241 - "%s watermarks: normal=%d, SR=%d, HPLL=%d\n", 1242 - plane->base.name, 1243 - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], 1244 - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id], 1245 - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); 1246 - 1247 - if (plane_id == PLANE_PRIMARY) 1248 - drm_dbg_kms(&dev_priv->drm, 1249 - "FBC watermarks: SR=%d, HPLL=%d\n", 1250 - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, 1251 - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); 1252 - } 1253 - 1254 - return dirty; 1255 - } 1256 - 1257 - static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, 1258 - enum plane_id plane_id, int level) 1259 - { 1260 - const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; 1261 - 1262 - return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level); 1263 - } 1264 - 1265 - static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, 1266 - int level) 1267 - { 1268 - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1269 - 1270 - if (level > dev_priv->display.wm.max_level) 1271 - return false; 1272 - 1273 - return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && 1274 - g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && 1275 - g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); 1276 - } 1277 - 1278 - /* mark all levels starting from 'level' as invalid */ 1279 - static void g4x_invalidate_wms(struct intel_crtc *crtc, 1280 - struct g4x_wm_state *wm_state, int level) 1281 - { 1282 - if (level <= G4X_WM_LEVEL_NORMAL) { 1283 - enum plane_id plane_id; 1284 - 1285 - for_each_plane_id_on_crtc(crtc, plane_id) 1286 - wm_state->wm.plane[plane_id] = USHRT_MAX; 1287 - } 1288 - 1289 - if (level <= G4X_WM_LEVEL_SR) { 1290 - wm_state->cxsr = false; 1291 - wm_state->sr.cursor = USHRT_MAX; 1292 - wm_state->sr.plane = USHRT_MAX; 1293 - wm_state->sr.fbc = USHRT_MAX; 1294 - } 1295 - 1296 - if (level <= G4X_WM_LEVEL_HPLL) { 1297 - wm_state->hpll_en = false; 1298 - wm_state->hpll.cursor = USHRT_MAX; 1299 - wm_state->hpll.plane = USHRT_MAX; 1300 - wm_state->hpll.fbc = USHRT_MAX; 1301 - } 1302 - } 1303 - 1304 - static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state, 1305 - int level) 1306 - { 1307 - if (level < G4X_WM_LEVEL_SR) 1308 - return false; 1309 - 1310 - if (level >= G4X_WM_LEVEL_SR && 1311 - wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR)) 1312 - return false; 1313 - 1314 - if (level >= G4X_WM_LEVEL_HPLL && 1315 - wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL)) 1316 - return false; 1317 - 1318 - return true; 1319 - } 1320 - 1321 - static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) 1322 - { 1323 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1324 - struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; 1325 - u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); 1326 - const struct g4x_pipe_wm *raw; 1327 - enum plane_id plane_id; 1328 - int level; 1329 - 1330 - level = G4X_WM_LEVEL_NORMAL; 1331 - if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) 1332 - goto out; 1333 - 1334 - raw = &crtc_state->wm.g4x.raw[level]; 1335 - for_each_plane_id_on_crtc(crtc, plane_id) 1336 - wm_state->wm.plane[plane_id] = raw->plane[plane_id]; 1337 - 1338 - level = G4X_WM_LEVEL_SR; 1339 - if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) 1340 - goto out; 1341 - 1342 - raw = &crtc_state->wm.g4x.raw[level]; 1343 - wm_state->sr.plane = raw->plane[PLANE_PRIMARY]; 1344 - wm_state->sr.cursor = raw->plane[PLANE_CURSOR]; 1345 - wm_state->sr.fbc = raw->fbc; 1346 - 1347 - wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY); 1348 - 1349 - level = G4X_WM_LEVEL_HPLL; 1350 - if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) 1351 - goto out; 1352 - 1353 - raw = &crtc_state->wm.g4x.raw[level]; 1354 - wm_state->hpll.plane = raw->plane[PLANE_PRIMARY]; 1355 - wm_state->hpll.cursor = raw->plane[PLANE_CURSOR]; 1356 - wm_state->hpll.fbc = raw->fbc; 1357 - 1358 - wm_state->hpll_en = wm_state->cxsr; 1359 - 1360 - level++; 1361 - 1362 - out: 1363 - if (level == G4X_WM_LEVEL_NORMAL) 1364 - return -EINVAL; 1365 - 1366 - /* invalidate the higher levels */ 1367 - g4x_invalidate_wms(crtc, wm_state, level); 1368 - 1369 - /* 1370 - * Determine if the FBC watermark(s) can be used. IF 1371 - * this isn't the case we prefer to disable the FBC 1372 - * watermark(s) rather than disable the SR/HPLL 1373 - * level(s) entirely. 'level-1' is the highest valid 1374 - * level here. 1375 - */ 1376 - wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1); 1377 - 1378 - return 0; 1379 - } 1380 - 1381 - static int g4x_compute_pipe_wm(struct intel_atomic_state *state, 1382 - struct intel_crtc *crtc) 1383 - { 1384 - struct intel_crtc_state *crtc_state = 1385 - intel_atomic_get_new_crtc_state(state, crtc); 1386 - const struct intel_plane_state *old_plane_state; 1387 - const struct intel_plane_state *new_plane_state; 1388 - struct intel_plane *plane; 1389 - unsigned int dirty = 0; 1390 - int i; 1391 - 1392 - for_each_oldnew_intel_plane_in_state(state, plane, 1393 - old_plane_state, 1394 - new_plane_state, i) { 1395 - if (new_plane_state->hw.crtc != &crtc->base && 1396 - old_plane_state->hw.crtc != &crtc->base) 1397 - continue; 1398 - 1399 - if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state)) 1400 - dirty |= BIT(plane->id); 1401 - } 1402 - 1403 - if (!dirty) 1404 - return 0; 1405 - 1406 - return _g4x_compute_pipe_wm(crtc_state); 1407 - } 1408 - 1409 - static int g4x_compute_intermediate_wm(struct intel_atomic_state *state, 1410 - struct intel_crtc *crtc) 1411 - { 1412 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1413 - struct intel_crtc_state *new_crtc_state = 1414 - intel_atomic_get_new_crtc_state(state, crtc); 1415 - const struct intel_crtc_state *old_crtc_state = 1416 - intel_atomic_get_old_crtc_state(state, crtc); 1417 - struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate; 1418 - const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal; 1419 - const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal; 1420 - enum plane_id plane_id; 1421 - 1422 - if (!new_crtc_state->hw.active || 1423 - intel_crtc_needs_modeset(new_crtc_state)) { 1424 - *intermediate = *optimal; 1425 - 1426 - intermediate->cxsr = false; 1427 - intermediate->hpll_en = false; 1428 - goto out; 1429 - } 1430 - 1431 - intermediate->cxsr = optimal->cxsr && active->cxsr && 1432 - !new_crtc_state->disable_cxsr; 1433 - intermediate->hpll_en = optimal->hpll_en && active->hpll_en && 1434 - !new_crtc_state->disable_cxsr; 1435 - intermediate->fbc_en = optimal->fbc_en && active->fbc_en; 1436 - 1437 - for_each_plane_id_on_crtc(crtc, plane_id) { 1438 - intermediate->wm.plane[plane_id] = 1439 - max(optimal->wm.plane[plane_id], 1440 - active->wm.plane[plane_id]); 1441 - 1442 - drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] > 1443 - g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL)); 1444 - } 1445 - 1446 - intermediate->sr.plane = max(optimal->sr.plane, 1447 - active->sr.plane); 1448 - intermediate->sr.cursor = max(optimal->sr.cursor, 1449 - active->sr.cursor); 1450 - intermediate->sr.fbc = max(optimal->sr.fbc, 1451 - active->sr.fbc); 1452 - 1453 - intermediate->hpll.plane = max(optimal->hpll.plane, 1454 - active->hpll.plane); 1455 - intermediate->hpll.cursor = max(optimal->hpll.cursor, 1456 - active->hpll.cursor); 1457 - intermediate->hpll.fbc = max(optimal->hpll.fbc, 1458 - active->hpll.fbc); 1459 - 1460 - drm_WARN_ON(&dev_priv->drm, 1461 - (intermediate->sr.plane > 1462 - g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || 1463 - intermediate->sr.cursor > 1464 - g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && 1465 - intermediate->cxsr); 1466 - drm_WARN_ON(&dev_priv->drm, 1467 - (intermediate->sr.plane > 1468 - g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || 1469 - intermediate->sr.cursor > 1470 - g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && 1471 - intermediate->hpll_en); 1472 - 1473 - drm_WARN_ON(&dev_priv->drm, 1474 - intermediate->sr.fbc > g4x_fbc_fifo_size(1) && 1475 - intermediate->fbc_en && intermediate->cxsr); 1476 - drm_WARN_ON(&dev_priv->drm, 1477 - intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && 1478 - intermediate->fbc_en && intermediate->hpll_en); 1479 - 1480 - out: 1481 - /* 1482 - * If our intermediate WM are identical to the final WM, then we can 1483 - * omit the post-vblank programming; only update if it's different. 1484 - */ 1485 - if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) 1486 - new_crtc_state->wm.need_postvbl_update = true; 1487 - 1488 - return 0; 1489 - } 1490 - 1491 - static void g4x_merge_wm(struct drm_i915_private *dev_priv, 1492 - struct g4x_wm_values *wm) 1493 - { 1494 - struct intel_crtc *crtc; 1495 - int num_active_pipes = 0; 1496 - 1497 - wm->cxsr = true; 1498 - wm->hpll_en = true; 1499 - wm->fbc_en = true; 1500 - 1501 - for_each_intel_crtc(&dev_priv->drm, crtc) { 1502 - const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; 1503 - 1504 - if (!crtc->active) 1505 - continue; 1506 - 1507 - if (!wm_state->cxsr) 1508 - wm->cxsr = false; 1509 - if (!wm_state->hpll_en) 1510 - wm->hpll_en = false; 1511 - if (!wm_state->fbc_en) 1512 - wm->fbc_en = false; 1513 - 1514 - num_active_pipes++; 1515 - } 1516 - 1517 - if (num_active_pipes != 1) { 1518 - wm->cxsr = false; 1519 - wm->hpll_en = false; 1520 - wm->fbc_en = false; 1521 - } 1522 - 1523 - for_each_intel_crtc(&dev_priv->drm, crtc) { 1524 - const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; 1525 - enum pipe pipe = crtc->pipe; 1526 - 1527 - wm->pipe[pipe] = wm_state->wm; 1528 - if (crtc->active && wm->cxsr) 1529 - wm->sr = wm_state->sr; 1530 - if (crtc->active && wm->hpll_en) 1531 - wm->hpll = wm_state->hpll; 1532 - } 1533 - } 1534 - 1535 - static void g4x_program_watermarks(struct drm_i915_private *dev_priv) 1536 - { 1537 - struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x; 1538 - struct g4x_wm_values new_wm = {}; 1539 - 1540 - g4x_merge_wm(dev_priv, &new_wm); 1541 - 1542 - if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) 1543 - return; 1544 - 1545 - if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) 1546 - _intel_set_memory_cxsr(dev_priv, false); 1547 - 1548 - g4x_write_wm_values(dev_priv, &new_wm); 1549 - 1550 - if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) 1551 - _intel_set_memory_cxsr(dev_priv, true); 1552 - 1553 - *old_wm = new_wm; 1554 - } 1555 - 1556 - static void g4x_initial_watermarks(struct intel_atomic_state *state, 1557 - struct intel_crtc *crtc) 1558 - { 1559 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1560 - const struct intel_crtc_state *crtc_state = 1561 - intel_atomic_get_new_crtc_state(state, crtc); 1562 - 1563 - mutex_lock(&dev_priv->display.wm.wm_mutex); 1564 - crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; 1565 - g4x_program_watermarks(dev_priv); 1566 - mutex_unlock(&dev_priv->display.wm.wm_mutex); 1567 - } 1568 - 1569 - static void g4x_optimize_watermarks(struct intel_atomic_state *state, 1570 - struct intel_crtc *crtc) 1571 - { 1572 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1573 - const struct intel_crtc_state *crtc_state = 1574 - intel_atomic_get_new_crtc_state(state, crtc); 1575 - 1576 - if (!crtc_state->wm.need_postvbl_update) 1577 - return; 1578 - 1579 - mutex_lock(&dev_priv->display.wm.wm_mutex); 1580 - crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; 1581 - g4x_program_watermarks(dev_priv); 1582 - mutex_unlock(&dev_priv->display.wm.wm_mutex); 1583 - } 1584 - 1585 - /* latency must be in 0.1us units. */ 1586 - static unsigned int vlv_wm_method2(unsigned int pixel_rate, 1587 - unsigned int htotal, 1588 - unsigned int width, 1589 - unsigned int cpp, 1590 - unsigned int latency) 1591 - { 1592 - unsigned int ret; 1593 - 1594 - ret = intel_wm_method2(pixel_rate, htotal, 1595 - width, cpp, latency); 1596 - ret = DIV_ROUND_UP(ret, 64); 1597 - 1598 - return ret; 1599 - } 1600 - 1601 - static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) 1602 - { 1603 - /* all latencies in usec */ 1604 - dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; 1605 - 1606 - dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2; 1607 - 1608 - if (IS_CHERRYVIEW(dev_priv)) { 1609 - dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; 1610 - dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; 1611 - 1612 - dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS; 1613 - } 1614 - } 1615 - 1616 - static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, 1617 - const struct intel_plane_state *plane_state, 1618 - int level) 1619 - { 1620 - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1621 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1622 - const struct drm_display_mode *pipe_mode = 1623 - &crtc_state->hw.pipe_mode; 1624 - unsigned int pixel_rate, htotal, cpp, width, wm; 1625 - 1626 - if (dev_priv->display.wm.pri_latency[level] == 0) 1627 - return USHRT_MAX; 1628 - 1629 - if (!intel_wm_plane_visible(crtc_state, plane_state)) 1630 - return 0; 1631 - 1632 - cpp = plane_state->hw.fb->format->cpp[0]; 1633 - pixel_rate = crtc_state->pixel_rate; 1634 - htotal = pipe_mode->crtc_htotal; 1635 - width = drm_rect_width(&plane_state->uapi.src) >> 16; 1636 - 1637 - if (plane->id == PLANE_CURSOR) { 1638 - /* 1639 - * FIXME the formula gives values that are 1640 - * too big for the cursor FIFO, and hence we 1641 - * would never be able to use cursors. For 1642 - * now just hardcode the watermark. 1643 - */ 1644 - wm = 63; 1645 - } else { 1646 - wm = vlv_wm_method2(pixel_rate, htotal, width, cpp, 1647 - dev_priv->display.wm.pri_latency[level] * 10); 1648 - } 1649 - 1650 - return min_t(unsigned int, wm, USHRT_MAX); 1651 - } 1652 - 1653 - static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes) 1654 - { 1655 - return (active_planes & (BIT(PLANE_SPRITE0) | 1656 - BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1); 1657 - } 1658 - 1659 - static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) 1660 - { 1661 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1662 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1663 - const struct g4x_pipe_wm *raw = 1664 - &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; 1665 - struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; 1666 - u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); 1667 - int num_active_planes = hweight8(active_planes); 1668 - const int fifo_size = 511; 1669 - int fifo_extra, fifo_left = fifo_size; 1670 - int sprite0_fifo_extra = 0; 1671 - unsigned int total_rate; 1672 - enum plane_id plane_id; 1673 - 1674 - /* 1675 - * When enabling sprite0 after sprite1 has already been enabled 1676 - * we tend to get an underrun unless sprite0 already has some 1677 - * FIFO space allcoated. Hence we always allocate at least one 1678 - * cacheline for sprite0 whenever sprite1 is enabled. 1679 - * 1680 - * All other plane enable sequences appear immune to this problem. 1681 - */ 1682 - if (vlv_need_sprite0_fifo_workaround(active_planes)) 1683 - sprite0_fifo_extra = 1; 1684 - 1685 - total_rate = raw->plane[PLANE_PRIMARY] + 1686 - raw->plane[PLANE_SPRITE0] + 1687 - raw->plane[PLANE_SPRITE1] + 1688 - sprite0_fifo_extra; 1689 - 1690 - if (total_rate > fifo_size) 1691 - return -EINVAL; 1692 - 1693 - if (total_rate == 0) 1694 - total_rate = 1; 1695 - 1696 - for_each_plane_id_on_crtc(crtc, plane_id) { 1697 - unsigned int rate; 1698 - 1699 - if ((active_planes & BIT(plane_id)) == 0) { 1700 - fifo_state->plane[plane_id] = 0; 1701 - continue; 1702 - } 1703 - 1704 - rate = raw->plane[plane_id]; 1705 - fifo_state->plane[plane_id] = fifo_size * rate / total_rate; 1706 - fifo_left -= fifo_state->plane[plane_id]; 1707 - } 1708 - 1709 - fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra; 1710 - fifo_left -= sprite0_fifo_extra; 1711 - 1712 - fifo_state->plane[PLANE_CURSOR] = 63; 1713 - 1714 - fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1); 1715 - 1716 - /* spread the remainder evenly */ 1717 - for_each_plane_id_on_crtc(crtc, plane_id) { 1718 - int plane_extra; 1719 - 1720 - if (fifo_left == 0) 1721 - break; 1722 - 1723 - if ((active_planes & BIT(plane_id)) == 0) 1724 - continue; 1725 - 1726 - plane_extra = min(fifo_extra, fifo_left); 1727 - fifo_state->plane[plane_id] += plane_extra; 1728 - fifo_left -= plane_extra; 1729 - } 1730 - 1731 - drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0); 1732 - 1733 - /* give it all to the first plane if none are active */ 1734 - if (active_planes == 0) { 1735 - drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size); 1736 - fifo_state->plane[PLANE_PRIMARY] = fifo_left; 1737 - } 1738 - 1739 - return 0; 1740 - } 1741 - 1742 - /* mark all levels starting from 'level' as invalid */ 1743 - static void vlv_invalidate_wms(struct intel_crtc *crtc, 1744 - struct vlv_wm_state *wm_state, int level) 1745 - { 1746 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1747 - 1748 - for (; level < intel_wm_num_levels(dev_priv); level++) { 1749 - enum plane_id plane_id; 1750 - 1751 - for_each_plane_id_on_crtc(crtc, plane_id) 1752 - wm_state->wm[level].plane[plane_id] = USHRT_MAX; 1753 - 1754 - wm_state->sr[level].cursor = USHRT_MAX; 1755 - wm_state->sr[level].plane = USHRT_MAX; 1756 - } 1757 - } 1758 - 1759 - static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) 1760 - { 1761 - if (wm > fifo_size) 1762 - return USHRT_MAX; 1763 - else 1764 - return fifo_size - wm; 1765 - } 1766 - 1767 - /* 1768 - * Starting from 'level' set all higher 1769 - * levels to 'value' in the "raw" watermarks. 1770 - */ 1771 - static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, 1772 - int level, enum plane_id plane_id, u16 value) 1773 - { 1774 - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1775 - int num_levels = intel_wm_num_levels(dev_priv); 1776 - bool dirty = false; 1777 - 1778 - for (; level < num_levels; level++) { 1779 - struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; 1780 - 1781 - dirty |= raw->plane[plane_id] != value; 1782 - raw->plane[plane_id] = value; 1783 - } 1784 - 1785 - return dirty; 1786 - } 1787 - 1788 - static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, 1789 - const struct intel_plane_state *plane_state) 1790 - { 1791 - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1792 - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 1793 - enum plane_id plane_id = plane->id; 1794 - int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); 1795 - int level; 1796 - bool dirty = false; 1797 - 1798 - if (!intel_wm_plane_visible(crtc_state, plane_state)) { 1799 - dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0); 1800 - goto out; 1801 - } 1802 - 1803 - for (level = 0; level < num_levels; level++) { 1804 - struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; 1805 - int wm = vlv_compute_wm_level(crtc_state, plane_state, level); 1806 - int max_wm = plane_id == PLANE_CURSOR ? 63 : 511; 1807 - 1808 - if (wm > max_wm) 1809 - break; 1810 - 1811 - dirty |= raw->plane[plane_id] != wm; 1812 - raw->plane[plane_id] = wm; 1813 - } 1814 - 1815 - /* mark all higher levels as invalid */ 1816 - dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); 1817 - 1818 - out: 1819 - if (dirty) 1820 - drm_dbg_kms(&dev_priv->drm, 1821 - "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", 1822 - plane->base.name, 1823 - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], 1824 - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id], 1825 - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]); 1826 - 1827 - return dirty; 1828 - } 1829 - 1830 - static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, 1831 - enum plane_id plane_id, int level) 1832 - { 1833 - const struct g4x_pipe_wm *raw = 1834 - &crtc_state->wm.vlv.raw[level]; 1835 - const struct vlv_fifo_state *fifo_state = 1836 - &crtc_state->wm.vlv.fifo_state; 1837 - 1838 - return raw->plane[plane_id] <= fifo_state->plane[plane_id]; 1839 - } 1840 - 1841 - static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level) 1842 - { 1843 - return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && 1844 - vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && 1845 - vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) && 1846 - vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); 1847 - } 1848 - 1849 - static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) 1850 - { 1851 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1852 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1853 - struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; 1854 - const struct vlv_fifo_state *fifo_state = 1855 - &crtc_state->wm.vlv.fifo_state; 1856 - u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); 1857 - int num_active_planes = hweight8(active_planes); 1858 - enum plane_id plane_id; 1859 - int level; 1860 - 1861 - /* initially allow all levels */ 1862 - wm_state->num_levels = intel_wm_num_levels(dev_priv); 1863 - /* 1864 - * Note that enabling cxsr with no primary/sprite planes 1865 - * enabled can wedge the pipe. Hence we only allow cxsr 1866 - * with exactly one enabled primary/sprite plane. 1867 - */ 1868 - wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1; 1869 - 1870 - for (level = 0; level < wm_state->num_levels; level++) { 1871 - const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; 1872 - const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1; 1873 - 1874 - if (!vlv_raw_crtc_wm_is_valid(crtc_state, level)) 1875 - break; 1876 - 1877 - for_each_plane_id_on_crtc(crtc, plane_id) { 1878 - wm_state->wm[level].plane[plane_id] = 1879 - vlv_invert_wm_value(raw->plane[plane_id], 1880 - fifo_state->plane[plane_id]); 1881 - } 1882 - 1883 - wm_state->sr[level].plane = 1884 - vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY], 1885 - raw->plane[PLANE_SPRITE0], 1886 - raw->plane[PLANE_SPRITE1]), 1887 - sr_fifo_size); 1888 - 1889 - wm_state->sr[level].cursor = 1890 - vlv_invert_wm_value(raw->plane[PLANE_CURSOR], 1891 - 63); 1892 - } 1893 - 1894 - if (level == 0) 1895 - return -EINVAL; 1896 - 1897 - /* limit to only levels we can actually handle */ 1898 - wm_state->num_levels = level; 1899 - 1900 - /* invalidate the higher levels */ 1901 - vlv_invalidate_wms(crtc, wm_state, level); 1902 - 1903 - return 0; 1904 - } 1905 - 1906 - static int vlv_compute_pipe_wm(struct intel_atomic_state *state, 1907 - struct intel_crtc *crtc) 1908 - { 1909 - struct intel_crtc_state *crtc_state = 1910 - intel_atomic_get_new_crtc_state(state, crtc); 1911 - const struct intel_plane_state *old_plane_state; 1912 - const struct intel_plane_state *new_plane_state; 1913 - struct intel_plane *plane; 1914 - unsigned int dirty = 0; 1915 - int i; 1916 - 1917 - for_each_oldnew_intel_plane_in_state(state, plane, 1918 - old_plane_state, 1919 - new_plane_state, i) { 1920 - if (new_plane_state->hw.crtc != &crtc->base && 1921 - old_plane_state->hw.crtc != &crtc->base) 1922 - continue; 1923 - 1924 - if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state)) 1925 - dirty |= BIT(plane->id); 1926 - } 1927 - 1928 - /* 1929 - * DSPARB registers may have been reset due to the 1930 - * power well being turned off. Make sure we restore 1931 - * them to a consistent state even if no primary/sprite 1932 - * planes are initially active. We also force a FIFO 1933 - * recomputation so that we are sure to sanitize the 1934 - * FIFO setting we took over from the BIOS even if there 1935 - * are no active planes on the crtc. 1936 - */ 1937 - if (intel_crtc_needs_modeset(crtc_state)) 1938 - dirty = ~0; 1939 - 1940 - if (!dirty) 1941 - return 0; 1942 - 1943 - /* cursor changes don't warrant a FIFO recompute */ 1944 - if (dirty & ~BIT(PLANE_CURSOR)) { 1945 - const struct intel_crtc_state *old_crtc_state = 1946 - intel_atomic_get_old_crtc_state(state, crtc); 1947 - const struct vlv_fifo_state *old_fifo_state = 1948 - &old_crtc_state->wm.vlv.fifo_state; 1949 - const struct vlv_fifo_state *new_fifo_state = 1950 - &crtc_state->wm.vlv.fifo_state; 1951 - int ret; 1952 - 1953 - ret = vlv_compute_fifo(crtc_state); 1954 - if (ret) 1955 - return ret; 1956 - 1957 - if (intel_crtc_needs_modeset(crtc_state) || 1958 - memcmp(old_fifo_state, new_fifo_state, 1959 - sizeof(*new_fifo_state)) != 0) 1960 - crtc_state->fifo_changed = true; 1961 - } 1962 - 1963 - return _vlv_compute_pipe_wm(crtc_state); 1964 - } 1965 - 1966 - #define VLV_FIFO(plane, value) \ 1967 - (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) 1968 - 1969 - static void vlv_atomic_update_fifo(struct intel_atomic_state *state, 1970 - struct intel_crtc *crtc) 1971 - { 1972 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1973 - struct intel_uncore *uncore = &dev_priv->uncore; 1974 - const struct intel_crtc_state *crtc_state = 1975 - intel_atomic_get_new_crtc_state(state, crtc); 1976 - const struct vlv_fifo_state *fifo_state = 1977 - &crtc_state->wm.vlv.fifo_state; 1978 - int sprite0_start, sprite1_start, fifo_size; 1979 - u32 dsparb, dsparb2, dsparb3; 1980 - 1981 - if (!crtc_state->fifo_changed) 1982 - return; 1983 - 1984 - sprite0_start = fifo_state->plane[PLANE_PRIMARY]; 1985 - sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start; 1986 - fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start; 1987 - 1988 - drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63); 1989 - drm_WARN_ON(&dev_priv->drm, fifo_size != 511); 1990 - 1991 - trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size); 1992 - 1993 - /* 1994 - * uncore.lock serves a double purpose here. It allows us to 1995 - * use the less expensive I915_{READ,WRITE}_FW() functions, and 1996 - * it protects the DSPARB registers from getting clobbered by 1997 - * parallel updates from multiple pipes. 1998 - * 1999 - * intel_pipe_update_start() has already disabled interrupts 2000 - * for us, so a plain spin_lock() is sufficient here. 2001 - */ 2002 - spin_lock(&uncore->lock); 2003 - 2004 - switch (crtc->pipe) { 2005 - case PIPE_A: 2006 - dsparb = intel_uncore_read_fw(uncore, DSPARB); 2007 - dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); 2008 - 2009 - dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | 2010 - VLV_FIFO(SPRITEB, 0xff)); 2011 - dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | 2012 - VLV_FIFO(SPRITEB, sprite1_start)); 2013 - 2014 - dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | 2015 - VLV_FIFO(SPRITEB_HI, 0x1)); 2016 - dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | 2017 - VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); 2018 - 2019 - intel_uncore_write_fw(uncore, DSPARB, dsparb); 2020 - intel_uncore_write_fw(uncore, DSPARB2, dsparb2); 2021 - break; 2022 - case PIPE_B: 2023 - dsparb = intel_uncore_read_fw(uncore, DSPARB); 2024 - dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); 2025 - 2026 - dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | 2027 - VLV_FIFO(SPRITED, 0xff)); 2028 - dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | 2029 - VLV_FIFO(SPRITED, sprite1_start)); 2030 - 2031 - dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | 2032 - VLV_FIFO(SPRITED_HI, 0xff)); 2033 - dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | 2034 - VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); 2035 - 2036 - intel_uncore_write_fw(uncore, DSPARB, dsparb); 2037 - intel_uncore_write_fw(uncore, DSPARB2, dsparb2); 2038 - break; 2039 - case PIPE_C: 2040 - dsparb3 = intel_uncore_read_fw(uncore, DSPARB3); 2041 - dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); 2042 - 2043 - dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | 2044 - VLV_FIFO(SPRITEF, 0xff)); 2045 - dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | 2046 - VLV_FIFO(SPRITEF, sprite1_start)); 2047 - 2048 - dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | 2049 - VLV_FIFO(SPRITEF_HI, 0xff)); 2050 - dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | 2051 - VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); 2052 - 2053 - intel_uncore_write_fw(uncore, DSPARB3, dsparb3); 2054 - intel_uncore_write_fw(uncore, DSPARB2, dsparb2); 2055 - break; 2056 - default: 2057 - break; 2058 - } 2059 - 2060 - intel_uncore_posting_read_fw(uncore, DSPARB); 2061 - 2062 - spin_unlock(&uncore->lock); 2063 - } 2064 - 2065 - #undef VLV_FIFO 2066 - 2067 - static int vlv_compute_intermediate_wm(struct intel_atomic_state *state, 2068 - struct intel_crtc *crtc) 2069 - { 2070 - struct intel_crtc_state *new_crtc_state = 2071 - intel_atomic_get_new_crtc_state(state, crtc); 2072 - const struct intel_crtc_state *old_crtc_state = 2073 - intel_atomic_get_old_crtc_state(state, crtc); 2074 - struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate; 2075 - const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal; 2076 - const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal; 2077 - int level; 2078 - 2079 - if (!new_crtc_state->hw.active || 2080 - intel_crtc_needs_modeset(new_crtc_state)) { 2081 - *intermediate = *optimal; 2082 - 2083 - intermediate->cxsr = false; 2084 - goto out; 2085 - } 2086 - 2087 - intermediate->num_levels = min(optimal->num_levels, active->num_levels); 2088 - intermediate->cxsr = optimal->cxsr && active->cxsr && 2089 - !new_crtc_state->disable_cxsr; 2090 - 2091 - for (level = 0; level < intermediate->num_levels; level++) { 2092 - enum plane_id plane_id; 2093 - 2094 - for_each_plane_id_on_crtc(crtc, plane_id) { 2095 - intermediate->wm[level].plane[plane_id] = 2096 - min(optimal->wm[level].plane[plane_id], 2097 - active->wm[level].plane[plane_id]); 2098 - } 2099 - 2100 - intermediate->sr[level].plane = min(optimal->sr[level].plane, 2101 - active->sr[level].plane); 2102 - intermediate->sr[level].cursor = min(optimal->sr[level].cursor, 2103 - active->sr[level].cursor); 2104 - } 2105 - 2106 - vlv_invalidate_wms(crtc, intermediate, level); 2107 - 2108 - out: 2109 - /* 2110 - * If our intermediate WM are identical to the final WM, then we can 2111 - * omit the post-vblank programming; only update if it's different. 2112 - */ 2113 - if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) 2114 - new_crtc_state->wm.need_postvbl_update = true; 2115 - 2116 - return 0; 2117 - } 2118 - 2119 - static void vlv_merge_wm(struct drm_i915_private *dev_priv, 2120 - struct vlv_wm_values *wm) 2121 - { 2122 - struct intel_crtc *crtc; 2123 - int num_active_pipes = 0; 2124 - 2125 - wm->level = dev_priv->display.wm.max_level; 2126 - wm->cxsr = true; 2127 - 2128 - for_each_intel_crtc(&dev_priv->drm, crtc) { 2129 - const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; 2130 - 2131 - if (!crtc->active) 2132 - continue; 2133 - 2134 - if (!wm_state->cxsr) 2135 - wm->cxsr = false; 2136 - 2137 - num_active_pipes++; 2138 - wm->level = min_t(int, wm->level, wm_state->num_levels - 1); 2139 - } 2140 - 2141 - if (num_active_pipes != 1) 2142 - wm->cxsr = false; 2143 - 2144 - if (num_active_pipes > 1) 2145 - wm->level = VLV_WM_LEVEL_PM2; 2146 - 2147 - for_each_intel_crtc(&dev_priv->drm, crtc) { 2148 - const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; 2149 - enum pipe pipe = crtc->pipe; 2150 - 2151 - wm->pipe[pipe] = wm_state->wm[wm->level]; 2152 - if (crtc->active && wm->cxsr) 2153 - wm->sr = wm_state->sr[wm->level]; 2154 - 2155 - wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2; 2156 - wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2; 2157 - wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2; 2158 - wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2; 2159 - } 2160 - } 2161 - 2162 - static void vlv_program_watermarks(struct drm_i915_private *dev_priv) 2163 - { 2164 - struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv; 2165 - struct vlv_wm_values new_wm = {}; 2166 - 2167 - vlv_merge_wm(dev_priv, &new_wm); 2168 - 2169 - if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) 2170 - return; 2171 - 2172 - if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) 2173 - chv_set_memory_dvfs(dev_priv, false); 2174 - 2175 - if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) 2176 - chv_set_memory_pm5(dev_priv, false); 2177 - 2178 - if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) 2179 - _intel_set_memory_cxsr(dev_priv, false); 2180 - 2181 - vlv_write_wm_values(dev_priv, &new_wm); 2182 - 2183 - if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) 2184 - _intel_set_memory_cxsr(dev_priv, true); 2185 - 2186 - if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) 2187 - chv_set_memory_pm5(dev_priv, true); 2188 - 2189 - if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) 2190 - chv_set_memory_dvfs(dev_priv, true); 2191 - 2192 - *old_wm = new_wm; 2193 - } 2194 - 2195 - static void vlv_initial_watermarks(struct intel_atomic_state *state, 2196 - struct intel_crtc *crtc) 2197 - { 2198 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2199 - const struct intel_crtc_state *crtc_state = 2200 - intel_atomic_get_new_crtc_state(state, crtc); 2201 - 2202 - mutex_lock(&dev_priv->display.wm.wm_mutex); 2203 - crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; 2204 - vlv_program_watermarks(dev_priv); 2205 - mutex_unlock(&dev_priv->display.wm.wm_mutex); 2206 - } 2207 - 2208 - static void vlv_optimize_watermarks(struct intel_atomic_state *state, 2209 - struct intel_crtc *crtc) 2210 - { 2211 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2212 - const struct intel_crtc_state *crtc_state = 2213 - intel_atomic_get_new_crtc_state(state, crtc); 2214 - 2215 - if (!crtc_state->wm.need_postvbl_update) 2216 - return; 2217 - 2218 - mutex_lock(&dev_priv->display.wm.wm_mutex); 2219 - crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; 2220 - vlv_program_watermarks(dev_priv); 2221 - mutex_unlock(&dev_priv->display.wm.wm_mutex); 2222 - } 2223 - 2224 - static void i965_update_wm(struct drm_i915_private *dev_priv) 2225 - { 2226 - struct intel_crtc *crtc; 2227 - int srwm = 1; 2228 - int cursor_sr = 16; 2229 - bool cxsr_enabled; 2230 - 2231 - /* Calc sr entries for one plane configs */ 2232 - crtc = single_enabled_crtc(dev_priv); 2233 - if (crtc) { 2234 - /* self-refresh has much higher latency */ 2235 - static const int sr_latency_ns = 12000; 2236 - const struct drm_display_mode *pipe_mode = 2237 - &crtc->config->hw.pipe_mode; 2238 - const struct drm_framebuffer *fb = 2239 - crtc->base.primary->state->fb; 2240 - int pixel_rate = crtc->config->pixel_rate; 2241 - int htotal = pipe_mode->crtc_htotal; 2242 - int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; 2243 - int cpp = fb->format->cpp[0]; 2244 - int entries; 2245 - 2246 - entries = intel_wm_method2(pixel_rate, htotal, 2247 - width, cpp, sr_latency_ns / 100); 2248 - entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); 2249 - srwm = I965_FIFO_SIZE - entries; 2250 - if (srwm < 0) 2251 - srwm = 1; 2252 - srwm &= 0x1ff; 2253 - drm_dbg_kms(&dev_priv->drm, 2254 - "self-refresh entries: %d, wm: %d\n", 2255 - entries, srwm); 2256 - 2257 - entries = intel_wm_method2(pixel_rate, htotal, 2258 - crtc->base.cursor->state->crtc_w, 4, 2259 - sr_latency_ns / 100); 2260 - entries = DIV_ROUND_UP(entries, 2261 - i965_cursor_wm_info.cacheline_size) + 2262 - i965_cursor_wm_info.guard_size; 2263 - 2264 - cursor_sr = i965_cursor_wm_info.fifo_size - entries; 2265 - if (cursor_sr > i965_cursor_wm_info.max_wm) 2266 - cursor_sr = i965_cursor_wm_info.max_wm; 2267 - 2268 - drm_dbg_kms(&dev_priv->drm, 2269 - "self-refresh watermark: display plane %d " 2270 - "cursor %d\n", srwm, cursor_sr); 2271 - 2272 - cxsr_enabled = true; 2273 - } else { 2274 - cxsr_enabled = false; 2275 - /* Turn off self refresh if both pipes are enabled */ 2276 - intel_set_memory_cxsr(dev_priv, false); 2277 - } 2278 - 2279 - drm_dbg_kms(&dev_priv->drm, 2280 - "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 2281 - srwm); 2282 - 2283 - /* 965 has limitations... */ 2284 - intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) | 2285 - FW_WM(8, CURSORB) | 2286 - FW_WM(8, PLANEB) | 2287 - FW_WM(8, PLANEA)); 2288 - intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) | 2289 - FW_WM(8, PLANEC_OLD)); 2290 - /* update cursor SR watermark */ 2291 - intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); 2292 - 2293 - if (cxsr_enabled) 2294 - intel_set_memory_cxsr(dev_priv, true); 2295 - } 2296 - 2297 - #undef FW_WM 2298 - 2299 - static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, 2300 - enum i9xx_plane_id i9xx_plane) 2301 - { 2302 - struct intel_plane *plane; 2303 - 2304 - for_each_intel_plane(&i915->drm, plane) { 2305 - if (plane->id == PLANE_PRIMARY && 2306 - plane->i9xx_plane == i9xx_plane) 2307 - return intel_crtc_for_pipe(i915, plane->pipe); 2308 - } 2309 - 2310 - return NULL; 2311 - } 2312 - 2313 - static void i9xx_update_wm(struct drm_i915_private *dev_priv) 2314 - { 2315 - const struct intel_watermark_params *wm_info; 2316 - u32 fwater_lo; 2317 - u32 fwater_hi; 2318 - int cwm, srwm = 1; 2319 - int fifo_size; 2320 - int planea_wm, planeb_wm; 2321 - struct intel_crtc *crtc; 2322 - 2323 - if (IS_I945GM(dev_priv)) 2324 - wm_info = &i945_wm_info; 2325 - else if (DISPLAY_VER(dev_priv) != 2) 2326 - wm_info = &i915_wm_info; 2327 - else 2328 - wm_info = &i830_a_wm_info; 2329 - 2330 - if (DISPLAY_VER(dev_priv) == 2) 2331 - fifo_size = i830_get_fifo_size(dev_priv, PLANE_A); 2332 - else 2333 - fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A); 2334 - crtc = intel_crtc_for_plane(dev_priv, PLANE_A); 2335 - if (intel_crtc_active(crtc)) { 2336 - const struct drm_framebuffer *fb = 2337 - crtc->base.primary->state->fb; 2338 - int cpp; 2339 - 2340 - if (DISPLAY_VER(dev_priv) == 2) 2341 - cpp = 4; 2342 - else 2343 - cpp = fb->format->cpp[0]; 2344 - 2345 - planea_wm = intel_calculate_wm(crtc->config->pixel_rate, 2346 - wm_info, fifo_size, cpp, 2347 - pessimal_latency_ns); 2348 - } else { 2349 - planea_wm = fifo_size - wm_info->guard_size; 2350 - if (planea_wm > (long)wm_info->max_wm) 2351 - planea_wm = wm_info->max_wm; 2352 - } 2353 - 2354 - if (DISPLAY_VER(dev_priv) == 2) 2355 - wm_info = &i830_bc_wm_info; 2356 - 2357 - if (DISPLAY_VER(dev_priv) == 2) 2358 - fifo_size = i830_get_fifo_size(dev_priv, PLANE_B); 2359 - else 2360 - fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B); 2361 - crtc = intel_crtc_for_plane(dev_priv, PLANE_B); 2362 - if (intel_crtc_active(crtc)) { 2363 - const struct drm_framebuffer *fb = 2364 - crtc->base.primary->state->fb; 2365 - int cpp; 2366 - 2367 - if (DISPLAY_VER(dev_priv) == 2) 2368 - cpp = 4; 2369 - else 2370 - cpp = fb->format->cpp[0]; 2371 - 2372 - planeb_wm = intel_calculate_wm(crtc->config->pixel_rate, 2373 - wm_info, fifo_size, cpp, 2374 - pessimal_latency_ns); 2375 - } else { 2376 - planeb_wm = fifo_size - wm_info->guard_size; 2377 - if (planeb_wm > (long)wm_info->max_wm) 2378 - planeb_wm = wm_info->max_wm; 2379 - } 2380 - 2381 - drm_dbg_kms(&dev_priv->drm, 2382 - "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 2383 - 2384 - crtc = single_enabled_crtc(dev_priv); 2385 - if (IS_I915GM(dev_priv) && crtc) { 2386 - struct drm_i915_gem_object *obj; 2387 - 2388 - obj = intel_fb_obj(crtc->base.primary->state->fb); 2389 - 2390 - /* self-refresh seems busted with untiled */ 2391 - if (!i915_gem_object_is_tiled(obj)) 2392 - crtc = NULL; 2393 - } 2394 - 2395 - /* 2396 - * Overlay gets an aggressive default since video jitter is bad. 2397 - */ 2398 - cwm = 2; 2399 - 2400 - /* Play safe and disable self-refresh before adjusting watermarks. */ 2401 - intel_set_memory_cxsr(dev_priv, false); 2402 - 2403 - /* Calc sr entries for one plane configs */ 2404 - if (HAS_FW_BLC(dev_priv) && crtc) { 2405 - /* self-refresh has much higher latency */ 2406 - static const int sr_latency_ns = 6000; 2407 - const struct drm_display_mode *pipe_mode = 2408 - &crtc->config->hw.pipe_mode; 2409 - const struct drm_framebuffer *fb = 2410 - crtc->base.primary->state->fb; 2411 - int pixel_rate = crtc->config->pixel_rate; 2412 - int htotal = pipe_mode->crtc_htotal; 2413 - int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; 2414 - int cpp; 2415 - int entries; 2416 - 2417 - if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) 2418 - cpp = 4; 2419 - else 2420 - cpp = fb->format->cpp[0]; 2421 - 2422 - entries = intel_wm_method2(pixel_rate, htotal, width, cpp, 2423 - sr_latency_ns / 100); 2424 - entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); 2425 - drm_dbg_kms(&dev_priv->drm, 2426 - "self-refresh entries: %d\n", entries); 2427 - srwm = wm_info->fifo_size - entries; 2428 - if (srwm < 0) 2429 - srwm = 1; 2430 - 2431 - if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 2432 - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, 2433 - FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); 2434 - else 2435 - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f); 2436 - } 2437 - 2438 - drm_dbg_kms(&dev_priv->drm, 2439 - "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2440 - planea_wm, planeb_wm, cwm, srwm); 2441 - 2442 - fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 2443 - fwater_hi = (cwm & 0x1f); 2444 - 2445 - /* Set request length to 8 cachelines per fetch */ 2446 - fwater_lo = fwater_lo | (1 << 24) | (1 << 8); 2447 - fwater_hi = fwater_hi | (1 << 8); 2448 - 2449 - intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); 2450 - intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi); 2451 - 2452 - if (crtc) 2453 - intel_set_memory_cxsr(dev_priv, true); 2454 - } 2455 - 2456 - static void i845_update_wm(struct drm_i915_private *dev_priv) 2457 - { 2458 - struct intel_crtc *crtc; 2459 - u32 fwater_lo; 2460 - int planea_wm; 2461 - 2462 - crtc = single_enabled_crtc(dev_priv); 2463 - if (crtc == NULL) 2464 - return; 2465 - 2466 - planea_wm = intel_calculate_wm(crtc->config->pixel_rate, 2467 - &i845_wm_info, 2468 - i845_get_fifo_size(dev_priv, PLANE_A), 2469 - 4, pessimal_latency_ns); 2470 - fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff; 2471 - fwater_lo |= (3<<8) | planea_wm; 2472 - 2473 - drm_dbg_kms(&dev_priv->drm, 2474 - "Setting FIFO watermarks - A: %d\n", planea_wm); 2475 - 2476 - intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); 2477 - } 2478 - 2479 - /* latency must be in 0.1us units. */ 2480 - static unsigned int ilk_wm_method1(unsigned int pixel_rate, 2481 - unsigned int cpp, 2482 - unsigned int latency) 2483 - { 2484 - unsigned int ret; 2485 - 2486 - ret = intel_wm_method1(pixel_rate, cpp, latency); 2487 - ret = DIV_ROUND_UP(ret, 64) + 2; 2488 - 2489 - return ret; 2490 - } 2491 - 2492 - /* latency must be in 0.1us units. */ 2493 - static unsigned int ilk_wm_method2(unsigned int pixel_rate, 2494 - unsigned int htotal, 2495 - unsigned int width, 2496 - unsigned int cpp, 2497 - unsigned int latency) 2498 - { 2499 - unsigned int ret; 2500 - 2501 - ret = intel_wm_method2(pixel_rate, htotal, 2502 - width, cpp, latency); 2503 - ret = DIV_ROUND_UP(ret, 64) + 2; 2504 - 2505 - return ret; 2506 - } 2507 - 2508 - static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp) 2509 - { 2510 - /* 2511 - * Neither of these should be possible since this function shouldn't be 2512 - * called if the CRTC is off or the plane is invisible. But let's be 2513 - * extra paranoid to avoid a potential divide-by-zero if we screw up 2514 - * elsewhere in the driver. 2515 - */ 2516 - if (WARN_ON(!cpp)) 2517 - return 0; 2518 - if (WARN_ON(!horiz_pixels)) 2519 - return 0; 2520 - 2521 - return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2; 2522 - } 2523 - 2524 - struct ilk_wm_maximums { 2525 - u16 pri; 2526 - u16 spr; 2527 - u16 cur; 2528 - u16 fbc; 2529 - }; 2530 - 2531 - /* 2532 - * For both WM_PIPE and WM_LP. 2533 - * mem_value must be in 0.1us units. 2534 - */ 2535 - static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, 2536 - const struct intel_plane_state *plane_state, 2537 - u32 mem_value, bool is_lp) 2538 - { 2539 - u32 method1, method2; 2540 - int cpp; 2541 - 2542 - if (mem_value == 0) 2543 - return U32_MAX; 2544 - 2545 - if (!intel_wm_plane_visible(crtc_state, plane_state)) 2546 - return 0; 2547 - 2548 - cpp = plane_state->hw.fb->format->cpp[0]; 2549 - 2550 - method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); 2551 - 2552 - if (!is_lp) 2553 - return method1; 2554 - 2555 - method2 = ilk_wm_method2(crtc_state->pixel_rate, 2556 - crtc_state->hw.pipe_mode.crtc_htotal, 2557 - drm_rect_width(&plane_state->uapi.src) >> 16, 2558 - cpp, mem_value); 2559 - 2560 - return min(method1, method2); 2561 - } 2562 - 2563 - /* 2564 - * For both WM_PIPE and WM_LP. 2565 - * mem_value must be in 0.1us units. 2566 - */ 2567 - static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, 2568 - const struct intel_plane_state *plane_state, 2569 - u32 mem_value) 2570 - { 2571 - u32 method1, method2; 2572 - int cpp; 2573 - 2574 - if (mem_value == 0) 2575 - return U32_MAX; 2576 - 2577 - if (!intel_wm_plane_visible(crtc_state, plane_state)) 2578 - return 0; 2579 - 2580 - cpp = plane_state->hw.fb->format->cpp[0]; 2581 - 2582 - method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); 2583 - method2 = ilk_wm_method2(crtc_state->pixel_rate, 2584 - crtc_state->hw.pipe_mode.crtc_htotal, 2585 - drm_rect_width(&plane_state->uapi.src) >> 16, 2586 - cpp, mem_value); 2587 - return min(method1, method2); 2588 - } 2589 - 2590 - /* 2591 - * For both WM_PIPE and WM_LP. 2592 - * mem_value must be in 0.1us units. 2593 - */ 2594 - static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, 2595 - const struct intel_plane_state *plane_state, 2596 - u32 mem_value) 2597 - { 2598 - int cpp; 2599 - 2600 - if (mem_value == 0) 2601 - return U32_MAX; 2602 - 2603 - if (!intel_wm_plane_visible(crtc_state, plane_state)) 2604 - return 0; 2605 - 2606 - cpp = plane_state->hw.fb->format->cpp[0]; 2607 - 2608 - return ilk_wm_method2(crtc_state->pixel_rate, 2609 - crtc_state->hw.pipe_mode.crtc_htotal, 2610 - drm_rect_width(&plane_state->uapi.src) >> 16, 2611 - cpp, mem_value); 2612 - } 2613 - 2614 - /* Only for WM_LP. */ 2615 - static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, 2616 - const struct intel_plane_state *plane_state, 2617 - u32 pri_val) 2618 - { 2619 - int cpp; 2620 - 2621 - if (!intel_wm_plane_visible(crtc_state, plane_state)) 2622 - return 0; 2623 - 2624 - cpp = plane_state->hw.fb->format->cpp[0]; 2625 - 2626 - return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16, 2627 - cpp); 2628 - } 2629 - 2630 - static unsigned int 2631 - ilk_display_fifo_size(const struct drm_i915_private *dev_priv) 2632 - { 2633 - if (DISPLAY_VER(dev_priv) >= 8) 2634 - return 3072; 2635 - else if (DISPLAY_VER(dev_priv) >= 7) 2636 - return 768; 2637 - else 2638 - return 512; 2639 - } 2640 - 2641 - static unsigned int 2642 - ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv, 2643 - int level, bool is_sprite) 2644 - { 2645 - if (DISPLAY_VER(dev_priv) >= 8) 2646 - /* BDW primary/sprite plane watermarks */ 2647 - return level == 0 ? 255 : 2047; 2648 - else if (DISPLAY_VER(dev_priv) >= 7) 2649 - /* IVB/HSW primary/sprite plane watermarks */ 2650 - return level == 0 ? 127 : 1023; 2651 - else if (!is_sprite) 2652 - /* ILK/SNB primary plane watermarks */ 2653 - return level == 0 ? 127 : 511; 2654 - else 2655 - /* ILK/SNB sprite plane watermarks */ 2656 - return level == 0 ? 63 : 255; 2657 - } 2658 - 2659 - static unsigned int 2660 - ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level) 2661 - { 2662 - if (DISPLAY_VER(dev_priv) >= 7) 2663 - return level == 0 ? 63 : 255; 2664 - else 2665 - return level == 0 ? 31 : 63; 2666 - } 2667 - 2668 - static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) 2669 - { 2670 - if (DISPLAY_VER(dev_priv) >= 8) 2671 - return 31; 2672 - else 2673 - return 15; 2674 - } 2675 - 2676 - /* Calculate the maximum primary/sprite plane watermark */ 2677 - static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, 2678 - int level, 2679 - const struct intel_wm_config *config, 2680 - enum intel_ddb_partitioning ddb_partitioning, 2681 - bool is_sprite) 2682 - { 2683 - unsigned int fifo_size = ilk_display_fifo_size(dev_priv); 2684 - 2685 - /* if sprites aren't enabled, sprites get nothing */ 2686 - if (is_sprite && !config->sprites_enabled) 2687 - return 0; 2688 - 2689 - /* HSW allows LP1+ watermarks even with multiple pipes */ 2690 - if (level == 0 || config->num_pipes_active > 1) { 2691 - fifo_size /= INTEL_NUM_PIPES(dev_priv); 2692 - 2693 - /* 2694 - * For some reason the non self refresh 2695 - * FIFO size is only half of the self 2696 - * refresh FIFO size on ILK/SNB. 2697 - */ 2698 - if (DISPLAY_VER(dev_priv) <= 6) 2699 - fifo_size /= 2; 2700 - } 2701 - 2702 - if (config->sprites_enabled) { 2703 - /* level 0 is always calculated with 1:1 split */ 2704 - if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { 2705 - if (is_sprite) 2706 - fifo_size *= 5; 2707 - fifo_size /= 6; 2708 - } else { 2709 - fifo_size /= 2; 2710 - } 2711 - } 2712 - 2713 - /* clamp to max that the registers can hold */ 2714 - return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite)); 2715 - } 2716 - 2717 - /* Calculate the maximum cursor plane watermark */ 2718 - static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv, 2719 - int level, 2720 - const struct intel_wm_config *config) 2721 - { 2722 - /* HSW LP1+ watermarks w/ multiple pipes */ 2723 - if (level > 0 && config->num_pipes_active > 1) 2724 - return 64; 2725 - 2726 - /* otherwise just report max that registers can hold */ 2727 - return ilk_cursor_wm_reg_max(dev_priv, level); 2728 - } 2729 - 2730 - static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv, 2731 - int level, 2732 - const struct intel_wm_config *config, 2733 - enum intel_ddb_partitioning ddb_partitioning, 2734 - struct ilk_wm_maximums *max) 2735 - { 2736 - max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false); 2737 - max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true); 2738 - max->cur = ilk_cursor_wm_max(dev_priv, level, config); 2739 - max->fbc = ilk_fbc_wm_reg_max(dev_priv); 2740 - } 2741 - 2742 - static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, 2743 - int level, 2744 - struct ilk_wm_maximums *max) 2745 - { 2746 - max->pri = ilk_plane_wm_reg_max(dev_priv, level, false); 2747 - max->spr = ilk_plane_wm_reg_max(dev_priv, level, true); 2748 - max->cur = ilk_cursor_wm_reg_max(dev_priv, level); 2749 - max->fbc = ilk_fbc_wm_reg_max(dev_priv); 2750 - } 2751 - 2752 - static bool ilk_validate_wm_level(int level, 2753 - const struct ilk_wm_maximums *max, 2754 - struct intel_wm_level *result) 2755 - { 2756 - bool ret; 2757 - 2758 - /* already determined to be invalid? */ 2759 - if (!result->enable) 2760 - return false; 2761 - 2762 - result->enable = result->pri_val <= max->pri && 2763 - result->spr_val <= max->spr && 2764 - result->cur_val <= max->cur; 2765 - 2766 - ret = result->enable; 2767 - 2768 - /* 2769 - * HACK until we can pre-compute everything, 2770 - * and thus fail gracefully if LP0 watermarks 2771 - * are exceeded... 2772 - */ 2773 - if (level == 0 && !result->enable) { 2774 - if (result->pri_val > max->pri) 2775 - DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", 2776 - level, result->pri_val, max->pri); 2777 - if (result->spr_val > max->spr) 2778 - DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", 2779 - level, result->spr_val, max->spr); 2780 - if (result->cur_val > max->cur) 2781 - DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", 2782 - level, result->cur_val, max->cur); 2783 - 2784 - result->pri_val = min_t(u32, result->pri_val, max->pri); 2785 - result->spr_val = min_t(u32, result->spr_val, max->spr); 2786 - result->cur_val = min_t(u32, result->cur_val, max->cur); 2787 - result->enable = true; 2788 - } 2789 - 2790 - return ret; 2791 - } 2792 - 2793 - static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, 2794 - const struct intel_crtc *crtc, 2795 - int level, 2796 - struct intel_crtc_state *crtc_state, 2797 - const struct intel_plane_state *pristate, 2798 - const struct intel_plane_state *sprstate, 2799 - const struct intel_plane_state *curstate, 2800 - struct intel_wm_level *result) 2801 - { 2802 - u16 pri_latency = dev_priv->display.wm.pri_latency[level]; 2803 - u16 spr_latency = dev_priv->display.wm.spr_latency[level]; 2804 - u16 cur_latency = dev_priv->display.wm.cur_latency[level]; 2805 - 2806 - /* WM1+ latency values stored in 0.5us units */ 2807 - if (level > 0) { 2808 - pri_latency *= 5; 2809 - spr_latency *= 5; 2810 - cur_latency *= 5; 2811 - } 2812 - 2813 - if (pristate) { 2814 - result->pri_val = ilk_compute_pri_wm(crtc_state, pristate, 2815 - pri_latency, level); 2816 - result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val); 2817 - } 2818 - 2819 - if (sprstate) 2820 - result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency); 2821 - 2822 - if (curstate) 2823 - result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency); 2824 - 2825 - result->enable = true; 2826 - } 2827 - 2828 - static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) 2829 - { 2830 - u64 sskpd; 2831 - 2832 - sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD); 2833 - 2834 - wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd); 2835 - if (wm[0] == 0) 2836 - wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd); 2837 - wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd); 2838 - wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd); 2839 - wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd); 2840 - wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); 2841 - } 2842 - 2843 - static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) 2844 - { 2845 - u32 sskpd; 2846 - 2847 - sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD); 2848 - 2849 - wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd); 2850 - wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd); 2851 - wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd); 2852 - wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); 2853 - } 2854 - 2855 - static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) 2856 - { 2857 - u32 mltr; 2858 - 2859 - mltr = intel_uncore_read(&i915->uncore, MLTR_ILK); 2860 - 2861 - /* ILK primary LP0 latency is 700 ns */ 2862 - wm[0] = 7; 2863 - wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr); 2864 - wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); 2865 - } 2866 - 2867 - static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, 2868 - u16 wm[5]) 2869 - { 2870 - /* ILK sprite LP0 latency is 1300 ns */ 2871 - if (DISPLAY_VER(dev_priv) == 5) 2872 - wm[0] = 13; 2873 - } 2874 - 2875 - static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, 2876 - u16 wm[5]) 2877 - { 2878 - /* ILK cursor LP0 latency is 1300 ns */ 2879 - if (DISPLAY_VER(dev_priv) == 5) 2880 - wm[0] = 13; 2881 - } 2882 - 2883 - int ilk_wm_max_level(const struct drm_i915_private *dev_priv) 2884 - { 2885 - /* how many WM levels are we expecting */ 2886 - if (HAS_HW_SAGV_WM(dev_priv)) 2887 - return 5; 2888 - else if (DISPLAY_VER(dev_priv) >= 9) 2889 - return 7; 2890 - else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 2891 - return 4; 2892 - else if (DISPLAY_VER(dev_priv) >= 6) 2893 - return 3; 2894 - else 2895 - return 2; 2896 - } 2897 - 2898 - void intel_print_wm_latency(struct drm_i915_private *dev_priv, 2899 - const char *name, const u16 wm[]) 2900 - { 2901 - int level, max_level = ilk_wm_max_level(dev_priv); 2902 - 2903 - for (level = 0; level <= max_level; level++) { 2904 - unsigned int latency = wm[level]; 2905 - 2906 - if (latency == 0) { 2907 - drm_dbg_kms(&dev_priv->drm, 2908 - "%s WM%d latency not provided\n", 2909 - name, level); 2910 - continue; 2911 - } 2912 - 2913 - /* 2914 - * - latencies are in us on gen9. 2915 - * - before then, WM1+ latency values are in 0.5us units 2916 - */ 2917 - if (DISPLAY_VER(dev_priv) >= 9) 2918 - latency *= 10; 2919 - else if (level > 0) 2920 - latency *= 5; 2921 - 2922 - drm_dbg_kms(&dev_priv->drm, 2923 - "%s WM%d latency %u (%u.%u usec)\n", name, level, 2924 - wm[level], latency / 10, latency % 10); 2925 - } 2926 - } 2927 - 2928 - static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, 2929 - u16 wm[5], u16 min) 2930 - { 2931 - int level, max_level = ilk_wm_max_level(dev_priv); 2932 - 2933 - if (wm[0] >= min) 2934 - return false; 2935 - 2936 - wm[0] = max(wm[0], min); 2937 - for (level = 1; level <= max_level; level++) 2938 - wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5)); 2939 - 2940 - return true; 2941 - } 2942 - 2943 - static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) 2944 - { 2945 - bool changed; 2946 - 2947 - /* 2948 - * The BIOS provided WM memory latency values are often 2949 - * inadequate for high resolution displays. Adjust them. 2950 - */ 2951 - changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12); 2952 - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12); 2953 - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12); 2954 - 2955 - if (!changed) 2956 - return; 2957 - 2958 - drm_dbg_kms(&dev_priv->drm, 2959 - "WM latency values increased to avoid potential underruns\n"); 2960 - intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); 2961 - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); 2962 - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); 2963 - } 2964 - 2965 - static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) 2966 - { 2967 - /* 2968 - * On some SNB machines (Thinkpad X220 Tablet at least) 2969 - * LP3 usage can cause vblank interrupts to be lost. 2970 - * The DEIIR bit will go high but it looks like the CPU 2971 - * never gets interrupted. 2972 - * 2973 - * It's not clear whether other interrupt source could 2974 - * be affected or if this is somehow limited to vblank 2975 - * interrupts only. To play it safe we disable LP3 2976 - * watermarks entirely. 2977 - */ 2978 - if (dev_priv->display.wm.pri_latency[3] == 0 && 2979 - dev_priv->display.wm.spr_latency[3] == 0 && 2980 - dev_priv->display.wm.cur_latency[3] == 0) 2981 - return; 2982 - 2983 - dev_priv->display.wm.pri_latency[3] = 0; 2984 - dev_priv->display.wm.spr_latency[3] = 0; 2985 - dev_priv->display.wm.cur_latency[3] = 0; 2986 - 2987 - drm_dbg_kms(&dev_priv->drm, 2988 - "LP3 watermarks disabled due to potential for lost interrupts\n"); 2989 - intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); 2990 - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); 2991 - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); 2992 - } 2993 - 2994 - static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) 2995 - { 2996 - if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 2997 - hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); 2998 - else if (DISPLAY_VER(dev_priv) >= 6) 2999 - snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); 3000 - else 3001 - ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); 3002 - 3003 - memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency, 3004 - sizeof(dev_priv->display.wm.pri_latency)); 3005 - memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency, 3006 - sizeof(dev_priv->display.wm.pri_latency)); 3007 - 3008 - intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency); 3009 - intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency); 3010 - 3011 - intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); 3012 - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); 3013 - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); 3014 - 3015 - if (DISPLAY_VER(dev_priv) == 6) { 3016 - snb_wm_latency_quirk(dev_priv); 3017 - snb_wm_lp3_irq_quirk(dev_priv); 3018 - } 3019 - } 3020 - 3021 - static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, 3022 - struct intel_pipe_wm *pipe_wm) 3023 - { 3024 - /* LP0 watermark maximums depend on this pipe alone */ 3025 - const struct intel_wm_config config = { 3026 - .num_pipes_active = 1, 3027 - .sprites_enabled = pipe_wm->sprites_enabled, 3028 - .sprites_scaled = pipe_wm->sprites_scaled, 3029 - }; 3030 - struct ilk_wm_maximums max; 3031 - 3032 - /* LP0 watermarks always use 1/2 DDB partitioning */ 3033 - ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max); 3034 - 3035 - /* At least LP0 must be valid */ 3036 - if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { 3037 - drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n"); 3038 - return false; 3039 - } 3040 - 3041 - return true; 3042 - } 3043 - 3044 - /* Compute new watermarks for the pipe */ 3045 - static int ilk_compute_pipe_wm(struct intel_atomic_state *state, 3046 - struct intel_crtc *crtc) 3047 - { 3048 - struct drm_i915_private *dev_priv = to_i915(state->base.dev); 3049 - struct intel_crtc_state *crtc_state = 3050 - intel_atomic_get_new_crtc_state(state, crtc); 3051 - struct intel_pipe_wm *pipe_wm; 3052 - struct intel_plane *plane; 3053 - const struct intel_plane_state *plane_state; 3054 - const struct intel_plane_state *pristate = NULL; 3055 - const struct intel_plane_state *sprstate = NULL; 3056 - const struct intel_plane_state *curstate = NULL; 3057 - int level, max_level = ilk_wm_max_level(dev_priv), usable_level; 3058 - struct ilk_wm_maximums max; 3059 - 3060 - pipe_wm = &crtc_state->wm.ilk.optimal; 3061 - 3062 - intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { 3063 - if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 3064 - pristate = plane_state; 3065 - else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY) 3066 - sprstate = plane_state; 3067 - else if (plane->base.type == DRM_PLANE_TYPE_CURSOR) 3068 - curstate = plane_state; 3069 - } 3070 - 3071 - pipe_wm->pipe_enabled = crtc_state->hw.active; 3072 - pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0); 3073 - pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0); 3074 - 3075 - usable_level = max_level; 3076 - 3077 - /* ILK/SNB: LP2+ watermarks only w/o sprites */ 3078 - if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled) 3079 - usable_level = 1; 3080 - 3081 - /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ 3082 - if (pipe_wm->sprites_scaled) 3083 - usable_level = 0; 3084 - 3085 - memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); 3086 - ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state, 3087 - pristate, sprstate, curstate, &pipe_wm->wm[0]); 3088 - 3089 - if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) 3090 - return -EINVAL; 3091 - 3092 - ilk_compute_wm_reg_maximums(dev_priv, 1, &max); 3093 - 3094 - for (level = 1; level <= usable_level; level++) { 3095 - struct intel_wm_level *wm = &pipe_wm->wm[level]; 3096 - 3097 - ilk_compute_wm_level(dev_priv, crtc, level, crtc_state, 3098 - pristate, sprstate, curstate, wm); 3099 - 3100 - /* 3101 - * Disable any watermark level that exceeds the 3102 - * register maximums since such watermarks are 3103 - * always invalid. 3104 - */ 3105 - if (!ilk_validate_wm_level(level, &max, wm)) { 3106 - memset(wm, 0, sizeof(*wm)); 3107 - break; 3108 - } 3109 - } 3110 - 3111 - return 0; 3112 - } 3113 - 3114 - /* 3115 - * Build a set of 'intermediate' watermark values that satisfy both the old 3116 - * state and the new state. These can be programmed to the hardware 3117 - * immediately. 3118 - */ 3119 - static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, 3120 - struct intel_crtc *crtc) 3121 - { 3122 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3123 - struct intel_crtc_state *new_crtc_state = 3124 - intel_atomic_get_new_crtc_state(state, crtc); 3125 - const struct intel_crtc_state *old_crtc_state = 3126 - intel_atomic_get_old_crtc_state(state, crtc); 3127 - struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate; 3128 - const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal; 3129 - int level, max_level = ilk_wm_max_level(dev_priv); 3130 - 3131 - /* 3132 - * Start with the final, target watermarks, then combine with the 3133 - * currently active watermarks to get values that are safe both before 3134 - * and after the vblank. 3135 - */ 3136 - *a = new_crtc_state->wm.ilk.optimal; 3137 - if (!new_crtc_state->hw.active || 3138 - intel_crtc_needs_modeset(new_crtc_state) || 3139 - state->skip_intermediate_wm) 3140 - return 0; 3141 - 3142 - a->pipe_enabled |= b->pipe_enabled; 3143 - a->sprites_enabled |= b->sprites_enabled; 3144 - a->sprites_scaled |= b->sprites_scaled; 3145 - 3146 - for (level = 0; level <= max_level; level++) { 3147 - struct intel_wm_level *a_wm = &a->wm[level]; 3148 - const struct intel_wm_level *b_wm = &b->wm[level]; 3149 - 3150 - a_wm->enable &= b_wm->enable; 3151 - a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); 3152 - a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); 3153 - a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); 3154 - a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); 3155 - } 3156 - 3157 - /* 3158 - * We need to make sure that these merged watermark values are 3159 - * actually a valid configuration themselves. If they're not, 3160 - * there's no safe way to transition from the old state to 3161 - * the new state, so we need to fail the atomic transaction. 3162 - */ 3163 - if (!ilk_validate_pipe_wm(dev_priv, a)) 3164 - return -EINVAL; 3165 - 3166 - /* 3167 - * If our intermediate WM are identical to the final WM, then we can 3168 - * omit the post-vblank programming; only update if it's different. 3169 - */ 3170 - if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0) 3171 - new_crtc_state->wm.need_postvbl_update = true; 3172 - 3173 - return 0; 3174 - } 3175 - 3176 - /* 3177 - * Merge the watermarks from all active pipes for a specific level. 3178 - */ 3179 - static void ilk_merge_wm_level(struct drm_i915_private *dev_priv, 3180 - int level, 3181 - struct intel_wm_level *ret_wm) 3182 - { 3183 - const struct intel_crtc *crtc; 3184 - 3185 - ret_wm->enable = true; 3186 - 3187 - for_each_intel_crtc(&dev_priv->drm, crtc) { 3188 - const struct intel_pipe_wm *active = &crtc->wm.active.ilk; 3189 - const struct intel_wm_level *wm = &active->wm[level]; 3190 - 3191 - if (!active->pipe_enabled) 3192 - continue; 3193 - 3194 - /* 3195 - * The watermark values may have been used in the past, 3196 - * so we must maintain them in the registers for some 3197 - * time even if the level is now disabled. 3198 - */ 3199 - if (!wm->enable) 3200 - ret_wm->enable = false; 3201 - 3202 - ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); 3203 - ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); 3204 - ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); 3205 - ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); 3206 - } 3207 - } 3208 - 3209 - /* 3210 - * Merge all low power watermarks for all active pipes. 3211 - */ 3212 - static void ilk_wm_merge(struct drm_i915_private *dev_priv, 3213 - const struct intel_wm_config *config, 3214 - const struct ilk_wm_maximums *max, 3215 - struct intel_pipe_wm *merged) 3216 - { 3217 - int level, max_level = ilk_wm_max_level(dev_priv); 3218 - int last_enabled_level = max_level; 3219 - 3220 - /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ 3221 - if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) && 3222 - config->num_pipes_active > 1) 3223 - last_enabled_level = 0; 3224 - 3225 - /* ILK: FBC WM must be disabled always */ 3226 - merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6; 3227 - 3228 - /* merge each WM1+ level */ 3229 - for (level = 1; level <= max_level; level++) { 3230 - struct intel_wm_level *wm = &merged->wm[level]; 3231 - 3232 - ilk_merge_wm_level(dev_priv, level, wm); 3233 - 3234 - if (level > last_enabled_level) 3235 - wm->enable = false; 3236 - else if (!ilk_validate_wm_level(level, max, wm)) 3237 - /* make sure all following levels get disabled */ 3238 - last_enabled_level = level - 1; 3239 - 3240 - /* 3241 - * The spec says it is preferred to disable 3242 - * FBC WMs instead of disabling a WM level. 3243 - */ 3244 - if (wm->fbc_val > max->fbc) { 3245 - if (wm->enable) 3246 - merged->fbc_wm_enabled = false; 3247 - wm->fbc_val = 0; 3248 - } 3249 - } 3250 - 3251 - /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ 3252 - if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) && 3253 - dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) { 3254 - for (level = 2; level <= max_level; level++) { 3255 - struct intel_wm_level *wm = &merged->wm[level]; 3256 - 3257 - wm->enable = false; 3258 - } 3259 - } 3260 - } 3261 - 3262 - static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) 3263 - { 3264 - /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ 3265 - return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); 3266 - } 3267 - 3268 - /* The value we need to program into the WM_LPx latency field */ 3269 - static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv, 3270 - int level) 3271 - { 3272 - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3273 - return 2 * level; 3274 - else 3275 - return dev_priv->display.wm.pri_latency[level]; 3276 - } 3277 - 3278 - static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, 3279 - const struct intel_pipe_wm *merged, 3280 - enum intel_ddb_partitioning partitioning, 3281 - struct ilk_wm_values *results) 3282 - { 3283 - struct intel_crtc *crtc; 3284 - int level, wm_lp; 3285 - 3286 - results->enable_fbc_wm = merged->fbc_wm_enabled; 3287 - results->partitioning = partitioning; 3288 - 3289 - /* LP1+ register values */ 3290 - for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 3291 - const struct intel_wm_level *r; 3292 - 3293 - level = ilk_wm_lp_to_level(wm_lp, merged); 3294 - 3295 - r = &merged->wm[level]; 3296 - 3297 - /* 3298 - * Maintain the watermark values even if the level is 3299 - * disabled. Doing otherwise could cause underruns. 3300 - */ 3301 - results->wm_lp[wm_lp - 1] = 3302 - WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) | 3303 - WM_LP_PRIMARY(r->pri_val) | 3304 - WM_LP_CURSOR(r->cur_val); 3305 - 3306 - if (r->enable) 3307 - results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE; 3308 - 3309 - if (DISPLAY_VER(dev_priv) >= 8) 3310 - results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val); 3311 - else 3312 - results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val); 3313 - 3314 - results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val); 3315 - 3316 - /* 3317 - * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the 3318 - * level is disabled. Doing otherwise could cause underruns. 3319 - */ 3320 - if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) { 3321 - drm_WARN_ON(&dev_priv->drm, wm_lp != 1); 3322 - results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE; 3323 - } 3324 - } 3325 - 3326 - /* LP0 register values */ 3327 - for_each_intel_crtc(&dev_priv->drm, crtc) { 3328 - enum pipe pipe = crtc->pipe; 3329 - const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk; 3330 - const struct intel_wm_level *r = &pipe_wm->wm[0]; 3331 - 3332 - if (drm_WARN_ON(&dev_priv->drm, !r->enable)) 3333 - continue; 3334 - 3335 - results->wm_pipe[pipe] = 3336 - WM0_PIPE_PRIMARY(r->pri_val) | 3337 - WM0_PIPE_SPRITE(r->spr_val) | 3338 - WM0_PIPE_CURSOR(r->cur_val); 3339 - } 3340 - } 3341 - 3342 - /* Find the result with the highest level enabled. Check for enable_fbc_wm in 3343 - * case both are at the same level. Prefer r1 in case they're the same. */ 3344 - static struct intel_pipe_wm * 3345 - ilk_find_best_result(struct drm_i915_private *dev_priv, 3346 - struct intel_pipe_wm *r1, 3347 - struct intel_pipe_wm *r2) 3348 - { 3349 - int level, max_level = ilk_wm_max_level(dev_priv); 3350 - int level1 = 0, level2 = 0; 3351 - 3352 - for (level = 1; level <= max_level; level++) { 3353 - if (r1->wm[level].enable) 3354 - level1 = level; 3355 - if (r2->wm[level].enable) 3356 - level2 = level; 3357 - } 3358 - 3359 - if (level1 == level2) { 3360 - if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) 3361 - return r2; 3362 - else 3363 - return r1; 3364 - } else if (level1 > level2) { 3365 - return r1; 3366 - } else { 3367 - return r2; 3368 - } 3369 - } 3370 - 3371 - /* dirty bits used to track which watermarks need changes */ 3372 - #define WM_DIRTY_PIPE(pipe) (1 << (pipe)) 3373 - #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) 3374 - #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) 3375 - #define WM_DIRTY_FBC (1 << 24) 3376 - #define WM_DIRTY_DDB (1 << 25) 3377 - 3378 - static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, 3379 - const struct ilk_wm_values *old, 3380 - const struct ilk_wm_values *new) 3381 - { 3382 - unsigned int dirty = 0; 3383 - enum pipe pipe; 3384 - int wm_lp; 3385 - 3386 - for_each_pipe(dev_priv, pipe) { 3387 - if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { 3388 - dirty |= WM_DIRTY_PIPE(pipe); 3389 - /* Must disable LP1+ watermarks too */ 3390 - dirty |= WM_DIRTY_LP_ALL; 3391 - } 3392 - } 3393 - 3394 - if (old->enable_fbc_wm != new->enable_fbc_wm) { 3395 - dirty |= WM_DIRTY_FBC; 3396 - /* Must disable LP1+ watermarks too */ 3397 - dirty |= WM_DIRTY_LP_ALL; 3398 - } 3399 - 3400 - if (old->partitioning != new->partitioning) { 3401 - dirty |= WM_DIRTY_DDB; 3402 - /* Must disable LP1+ watermarks too */ 3403 - dirty |= WM_DIRTY_LP_ALL; 3404 - } 3405 - 3406 - /* LP1+ watermarks already deemed dirty, no need to continue */ 3407 - if (dirty & WM_DIRTY_LP_ALL) 3408 - return dirty; 3409 - 3410 - /* Find the lowest numbered LP1+ watermark in need of an update... */ 3411 - for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 3412 - if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || 3413 - old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) 3414 - break; 3415 - } 3416 - 3417 - /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ 3418 - for (; wm_lp <= 3; wm_lp++) 3419 - dirty |= WM_DIRTY_LP(wm_lp); 3420 - 3421 - return dirty; 3422 - } 3423 - 3424 - static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, 3425 - unsigned int dirty) 3426 - { 3427 - struct ilk_wm_values *previous = &dev_priv->display.wm.hw; 3428 - bool changed = false; 3429 - 3430 - if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) { 3431 - previous->wm_lp[2] &= ~WM_LP_ENABLE; 3432 - intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]); 3433 - changed = true; 3434 - } 3435 - if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) { 3436 - previous->wm_lp[1] &= ~WM_LP_ENABLE; 3437 - intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]); 3438 - changed = true; 3439 - } 3440 - if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) { 3441 - previous->wm_lp[0] &= ~WM_LP_ENABLE; 3442 - intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]); 3443 - changed = true; 3444 - } 3445 - 3446 - /* 3447 - * Don't touch WM_LP_SPRITE_ENABLE here. 3448 - * Doing so could cause underruns. 3449 - */ 3450 - 3451 - return changed; 3452 - } 3453 - 3454 - /* 3455 - * The spec says we shouldn't write when we don't need, because every write 3456 - * causes WMs to be re-evaluated, expending some power. 3457 - */ 3458 - static void ilk_write_wm_values(struct drm_i915_private *dev_priv, 3459 - struct ilk_wm_values *results) 3460 - { 3461 - struct ilk_wm_values *previous = &dev_priv->display.wm.hw; 3462 - unsigned int dirty; 3463 - 3464 - dirty = ilk_compute_wm_dirty(dev_priv, previous, results); 3465 - if (!dirty) 3466 - return; 3467 - 3468 - _ilk_disable_lp_wm(dev_priv, dirty); 3469 - 3470 - if (dirty & WM_DIRTY_PIPE(PIPE_A)) 3471 - intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); 3472 - if (dirty & WM_DIRTY_PIPE(PIPE_B)) 3473 - intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); 3474 - if (dirty & WM_DIRTY_PIPE(PIPE_C)) 3475 - intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); 3476 - 3477 - if (dirty & WM_DIRTY_DDB) { 3478 - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3479 - intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6, 3480 - results->partitioning == INTEL_DDB_PART_1_2 ? 0 : 3481 - WM_MISC_DATA_PARTITION_5_6); 3482 - else 3483 - intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6, 3484 - results->partitioning == INTEL_DDB_PART_1_2 ? 0 : 3485 - DISP_DATA_PARTITION_5_6); 3486 - } 3487 - 3488 - if (dirty & WM_DIRTY_FBC) 3489 - intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS, 3490 - results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS); 3491 - 3492 - if (dirty & WM_DIRTY_LP(1) && 3493 - previous->wm_lp_spr[0] != results->wm_lp_spr[0]) 3494 - intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]); 3495 - 3496 - if (DISPLAY_VER(dev_priv) >= 7) { 3497 - if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) 3498 - intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]); 3499 - if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) 3500 - intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]); 3501 - } 3502 - 3503 - if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) 3504 - intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]); 3505 - if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) 3506 - intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]); 3507 - if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) 3508 - intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]); 3509 - 3510 - dev_priv->display.wm.hw = *results; 3511 - } 3512 - 3513 - bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) 3514 - { 3515 - return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); 3516 - } 3517 - 3518 - static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, 3519 - struct intel_wm_config *config) 3520 - { 3521 - struct intel_crtc *crtc; 3522 - 3523 - /* Compute the currently _active_ config */ 3524 - for_each_intel_crtc(&dev_priv->drm, crtc) { 3525 - const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; 3526 - 3527 - if (!wm->pipe_enabled) 3528 - continue; 3529 - 3530 - config->sprites_enabled |= wm->sprites_enabled; 3531 - config->sprites_scaled |= wm->sprites_scaled; 3532 - config->num_pipes_active++; 3533 - } 3534 - } 3535 - 3536 - static void ilk_program_watermarks(struct drm_i915_private *dev_priv) 3537 - { 3538 - struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 3539 - struct ilk_wm_maximums max; 3540 - struct intel_wm_config config = {}; 3541 - struct ilk_wm_values results = {}; 3542 - enum intel_ddb_partitioning partitioning; 3543 - 3544 - ilk_compute_wm_config(dev_priv, &config); 3545 - 3546 - ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max); 3547 - ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2); 3548 - 3549 - /* 5/6 split only in single pipe config on IVB+ */ 3550 - if (DISPLAY_VER(dev_priv) >= 7 && 3551 - config.num_pipes_active == 1 && config.sprites_enabled) { 3552 - ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max); 3553 - ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6); 3554 - 3555 - best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6); 3556 - } else { 3557 - best_lp_wm = &lp_wm_1_2; 3558 - } 3559 - 3560 - partitioning = (best_lp_wm == &lp_wm_1_2) ? 3561 - INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 3562 - 3563 - ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results); 3564 - 3565 - ilk_write_wm_values(dev_priv, &results); 3566 - } 3567 - 3568 - static void ilk_initial_watermarks(struct intel_atomic_state *state, 3569 - struct intel_crtc *crtc) 3570 - { 3571 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3572 - const struct intel_crtc_state *crtc_state = 3573 - intel_atomic_get_new_crtc_state(state, crtc); 3574 - 3575 - mutex_lock(&dev_priv->display.wm.wm_mutex); 3576 - crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; 3577 - ilk_program_watermarks(dev_priv); 3578 - mutex_unlock(&dev_priv->display.wm.wm_mutex); 3579 - } 3580 - 3581 - static void ilk_optimize_watermarks(struct intel_atomic_state *state, 3582 - struct intel_crtc *crtc) 3583 - { 3584 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3585 - const struct intel_crtc_state *crtc_state = 3586 - intel_atomic_get_new_crtc_state(state, crtc); 3587 - 3588 - if (!crtc_state->wm.need_postvbl_update) 3589 - return; 3590 - 3591 - mutex_lock(&dev_priv->display.wm.wm_mutex); 3592 - crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; 3593 - ilk_program_watermarks(dev_priv); 3594 - mutex_unlock(&dev_priv->display.wm.wm_mutex); 3595 - } 3596 - 3597 - static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) 3598 - { 3599 - struct drm_device *dev = crtc->base.dev; 3600 - struct drm_i915_private *dev_priv = to_i915(dev); 3601 - struct ilk_wm_values *hw = &dev_priv->display.wm.hw; 3602 - struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); 3603 - struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; 3604 - enum pipe pipe = crtc->pipe; 3605 - 3606 - hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe)); 3607 - 3608 - memset(active, 0, sizeof(*active)); 3609 - 3610 - active->pipe_enabled = crtc->active; 3611 - 3612 - if (active->pipe_enabled) { 3613 - u32 tmp = hw->wm_pipe[pipe]; 3614 - 3615 - /* 3616 - * For active pipes LP0 watermark is marked as 3617 - * enabled, and LP1+ watermaks as disabled since 3618 - * we can't really reverse compute them in case 3619 - * multiple pipes are active. 3620 - */ 3621 - active->wm[0].enable = true; 3622 - active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp); 3623 - active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp); 3624 - active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp); 3625 - } else { 3626 - int level, max_level = ilk_wm_max_level(dev_priv); 3627 - 3628 - /* 3629 - * For inactive pipes, all watermark levels 3630 - * should be marked as enabled but zeroed, 3631 - * which is what we'd compute them to. 3632 - */ 3633 - for (level = 0; level <= max_level; level++) 3634 - active->wm[level].enable = true; 3635 - } 3636 - 3637 - crtc->wm.active.ilk = *active; 3638 - } 3639 - 3640 - #define _FW_WM(value, plane) \ 3641 - (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) 3642 - #define _FW_WM_VLV(value, plane) \ 3643 - (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) 3644 - 3645 - static void g4x_read_wm_values(struct drm_i915_private *dev_priv, 3646 - struct g4x_wm_values *wm) 3647 - { 3648 - u32 tmp; 3649 - 3650 - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); 3651 - wm->sr.plane = _FW_WM(tmp, SR); 3652 - wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); 3653 - wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB); 3654 - wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA); 3655 - 3656 - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); 3657 - wm->fbc_en = tmp & DSPFW_FBC_SR_EN; 3658 - wm->sr.fbc = _FW_WM(tmp, FBC_SR); 3659 - wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR); 3660 - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB); 3661 - wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); 3662 - wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA); 3663 - 3664 - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); 3665 - wm->hpll_en = tmp & DSPFW_HPLL_SR_EN; 3666 - wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); 3667 - wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR); 3668 - wm->hpll.plane = _FW_WM(tmp, HPLL_SR); 3669 - } 3670 - 3671 - static void vlv_read_wm_values(struct drm_i915_private *dev_priv, 3672 - struct vlv_wm_values *wm) 3673 - { 3674 - enum pipe pipe; 3675 - u32 tmp; 3676 - 3677 - for_each_pipe(dev_priv, pipe) { 3678 - tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe)); 3679 - 3680 - wm->ddl[pipe].plane[PLANE_PRIMARY] = 3681 - (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 3682 - wm->ddl[pipe].plane[PLANE_CURSOR] = 3683 - (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 3684 - wm->ddl[pipe].plane[PLANE_SPRITE0] = 3685 - (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 3686 - wm->ddl[pipe].plane[PLANE_SPRITE1] = 3687 - (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 3688 - } 3689 - 3690 - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); 3691 - wm->sr.plane = _FW_WM(tmp, SR); 3692 - wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); 3693 - wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); 3694 - wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); 3695 - 3696 - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); 3697 - wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); 3698 - wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); 3699 - wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); 3700 - 3701 - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); 3702 - wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); 3703 - 3704 - if (IS_CHERRYVIEW(dev_priv)) { 3705 - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV); 3706 - wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); 3707 - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); 3708 - 3709 - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV); 3710 - wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); 3711 - wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); 3712 - 3713 - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV); 3714 - wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); 3715 - wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); 3716 - 3717 - tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); 3718 - wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 3719 - wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; 3720 - wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; 3721 - wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8; 3722 - wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; 3723 - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; 3724 - wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; 3725 - wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; 3726 - wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; 3727 - wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; 3728 - } else { 3729 - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7); 3730 - wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); 3731 - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); 3732 - 3733 - tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); 3734 - wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 3735 - wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; 3736 - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; 3737 - wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; 3738 - wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; 3739 - wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; 3740 - wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; 3741 - } 3742 - } 3743 - 3744 - #undef _FW_WM 3745 - #undef _FW_WM_VLV 3746 - 3747 - void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) 3748 - { 3749 - struct g4x_wm_values *wm = &dev_priv->display.wm.g4x; 3750 - struct intel_crtc *crtc; 3751 - 3752 - g4x_read_wm_values(dev_priv, wm); 3753 - 3754 - wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; 3755 - 3756 - for_each_intel_crtc(&dev_priv->drm, crtc) { 3757 - struct intel_crtc_state *crtc_state = 3758 - to_intel_crtc_state(crtc->base.state); 3759 - struct g4x_wm_state *active = &crtc->wm.active.g4x; 3760 - struct g4x_pipe_wm *raw; 3761 - enum pipe pipe = crtc->pipe; 3762 - enum plane_id plane_id; 3763 - int level, max_level; 3764 - 3765 - active->cxsr = wm->cxsr; 3766 - active->hpll_en = wm->hpll_en; 3767 - active->fbc_en = wm->fbc_en; 3768 - 3769 - active->sr = wm->sr; 3770 - active->hpll = wm->hpll; 3771 - 3772 - for_each_plane_id_on_crtc(crtc, plane_id) { 3773 - active->wm.plane[plane_id] = 3774 - wm->pipe[pipe].plane[plane_id]; 3775 - } 3776 - 3777 - if (wm->cxsr && wm->hpll_en) 3778 - max_level = G4X_WM_LEVEL_HPLL; 3779 - else if (wm->cxsr) 3780 - max_level = G4X_WM_LEVEL_SR; 3781 - else 3782 - max_level = G4X_WM_LEVEL_NORMAL; 3783 - 3784 - level = G4X_WM_LEVEL_NORMAL; 3785 - raw = &crtc_state->wm.g4x.raw[level]; 3786 - for_each_plane_id_on_crtc(crtc, plane_id) 3787 - raw->plane[plane_id] = active->wm.plane[plane_id]; 3788 - 3789 - level = G4X_WM_LEVEL_SR; 3790 - if (level > max_level) 3791 - goto out; 3792 - 3793 - raw = &crtc_state->wm.g4x.raw[level]; 3794 - raw->plane[PLANE_PRIMARY] = active->sr.plane; 3795 - raw->plane[PLANE_CURSOR] = active->sr.cursor; 3796 - raw->plane[PLANE_SPRITE0] = 0; 3797 - raw->fbc = active->sr.fbc; 3798 - 3799 - level = G4X_WM_LEVEL_HPLL; 3800 - if (level > max_level) 3801 - goto out; 3802 - 3803 - raw = &crtc_state->wm.g4x.raw[level]; 3804 - raw->plane[PLANE_PRIMARY] = active->hpll.plane; 3805 - raw->plane[PLANE_CURSOR] = active->hpll.cursor; 3806 - raw->plane[PLANE_SPRITE0] = 0; 3807 - raw->fbc = active->hpll.fbc; 3808 - 3809 - level++; 3810 - out: 3811 - for_each_plane_id_on_crtc(crtc, plane_id) 3812 - g4x_raw_plane_wm_set(crtc_state, level, 3813 - plane_id, USHRT_MAX); 3814 - g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); 3815 - 3816 - g4x_invalidate_wms(crtc, active, level); 3817 - 3818 - crtc_state->wm.g4x.optimal = *active; 3819 - crtc_state->wm.g4x.intermediate = *active; 3820 - 3821 - drm_dbg_kms(&dev_priv->drm, 3822 - "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", 3823 - pipe_name(pipe), 3824 - wm->pipe[pipe].plane[PLANE_PRIMARY], 3825 - wm->pipe[pipe].plane[PLANE_CURSOR], 3826 - wm->pipe[pipe].plane[PLANE_SPRITE0]); 3827 - } 3828 - 3829 - drm_dbg_kms(&dev_priv->drm, 3830 - "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", 3831 - wm->sr.plane, wm->sr.cursor, wm->sr.fbc); 3832 - drm_dbg_kms(&dev_priv->drm, 3833 - "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", 3834 - wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); 3835 - drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n", 3836 - str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en), 3837 - str_yes_no(wm->fbc_en)); 3838 - } 3839 - 3840 - void g4x_wm_sanitize(struct drm_i915_private *dev_priv) 3841 - { 3842 - struct intel_plane *plane; 3843 - struct intel_crtc *crtc; 3844 - 3845 - mutex_lock(&dev_priv->display.wm.wm_mutex); 3846 - 3847 - for_each_intel_plane(&dev_priv->drm, plane) { 3848 - struct intel_crtc *crtc = 3849 - intel_crtc_for_pipe(dev_priv, plane->pipe); 3850 - struct intel_crtc_state *crtc_state = 3851 - to_intel_crtc_state(crtc->base.state); 3852 - struct intel_plane_state *plane_state = 3853 - to_intel_plane_state(plane->base.state); 3854 - enum plane_id plane_id = plane->id; 3855 - int level, num_levels = intel_wm_num_levels(dev_priv); 3856 - 3857 - if (plane_state->uapi.visible) 3858 - continue; 3859 - 3860 - for (level = 0; level < num_levels; level++) { 3861 - struct g4x_pipe_wm *raw = 3862 - &crtc_state->wm.g4x.raw[level]; 3863 - 3864 - raw->plane[plane_id] = 0; 3865 - 3866 - if (plane_id == PLANE_PRIMARY) 3867 - raw->fbc = 0; 3868 - } 3869 - } 3870 - 3871 - for_each_intel_crtc(&dev_priv->drm, crtc) { 3872 - struct intel_crtc_state *crtc_state = 3873 - to_intel_crtc_state(crtc->base.state); 3874 - int ret; 3875 - 3876 - ret = _g4x_compute_pipe_wm(crtc_state); 3877 - drm_WARN_ON(&dev_priv->drm, ret); 3878 - 3879 - crtc_state->wm.g4x.intermediate = 3880 - crtc_state->wm.g4x.optimal; 3881 - crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; 3882 - } 3883 - 3884 - g4x_program_watermarks(dev_priv); 3885 - 3886 - mutex_unlock(&dev_priv->display.wm.wm_mutex); 3887 - } 3888 - 3889 - void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) 3890 - { 3891 - struct vlv_wm_values *wm = &dev_priv->display.wm.vlv; 3892 - struct intel_crtc *crtc; 3893 - u32 val; 3894 - 3895 - vlv_read_wm_values(dev_priv, wm); 3896 - 3897 - wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 3898 - wm->level = VLV_WM_LEVEL_PM2; 3899 - 3900 - if (IS_CHERRYVIEW(dev_priv)) { 3901 - vlv_punit_get(dev_priv); 3902 - 3903 - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 3904 - if (val & DSP_MAXFIFO_PM5_ENABLE) 3905 - wm->level = VLV_WM_LEVEL_PM5; 3906 - 3907 - /* 3908 - * If DDR DVFS is disabled in the BIOS, Punit 3909 - * will never ack the request. So if that happens 3910 - * assume we don't have to enable/disable DDR DVFS 3911 - * dynamically. To test that just set the REQ_ACK 3912 - * bit to poke the Punit, but don't change the 3913 - * HIGH/LOW bits so that we don't actually change 3914 - * the current state. 3915 - */ 3916 - val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 3917 - val |= FORCE_DDR_FREQ_REQ_ACK; 3918 - vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 3919 - 3920 - if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 3921 - FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { 3922 - drm_dbg_kms(&dev_priv->drm, 3923 - "Punit not acking DDR DVFS request, " 3924 - "assuming DDR DVFS is disabled\n"); 3925 - dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5; 3926 - } else { 3927 - val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 3928 - if ((val & FORCE_DDR_HIGH_FREQ) == 0) 3929 - wm->level = VLV_WM_LEVEL_DDR_DVFS; 3930 - } 3931 - 3932 - vlv_punit_put(dev_priv); 3933 - } 3934 - 3935 - for_each_intel_crtc(&dev_priv->drm, crtc) { 3936 - struct intel_crtc_state *crtc_state = 3937 - to_intel_crtc_state(crtc->base.state); 3938 - struct vlv_wm_state *active = &crtc->wm.active.vlv; 3939 - const struct vlv_fifo_state *fifo_state = 3940 - &crtc_state->wm.vlv.fifo_state; 3941 - enum pipe pipe = crtc->pipe; 3942 - enum plane_id plane_id; 3943 - int level; 3944 - 3945 - vlv_get_fifo_size(crtc_state); 3946 - 3947 - active->num_levels = wm->level + 1; 3948 - active->cxsr = wm->cxsr; 3949 - 3950 - for (level = 0; level < active->num_levels; level++) { 3951 - struct g4x_pipe_wm *raw = 3952 - &crtc_state->wm.vlv.raw[level]; 3953 - 3954 - active->sr[level].plane = wm->sr.plane; 3955 - active->sr[level].cursor = wm->sr.cursor; 3956 - 3957 - for_each_plane_id_on_crtc(crtc, plane_id) { 3958 - active->wm[level].plane[plane_id] = 3959 - wm->pipe[pipe].plane[plane_id]; 3960 - 3961 - raw->plane[plane_id] = 3962 - vlv_invert_wm_value(active->wm[level].plane[plane_id], 3963 - fifo_state->plane[plane_id]); 3964 - } 3965 - } 3966 - 3967 - for_each_plane_id_on_crtc(crtc, plane_id) 3968 - vlv_raw_plane_wm_set(crtc_state, level, 3969 - plane_id, USHRT_MAX); 3970 - vlv_invalidate_wms(crtc, active, level); 3971 - 3972 - crtc_state->wm.vlv.optimal = *active; 3973 - crtc_state->wm.vlv.intermediate = *active; 3974 - 3975 - drm_dbg_kms(&dev_priv->drm, 3976 - "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", 3977 - pipe_name(pipe), 3978 - wm->pipe[pipe].plane[PLANE_PRIMARY], 3979 - wm->pipe[pipe].plane[PLANE_CURSOR], 3980 - wm->pipe[pipe].plane[PLANE_SPRITE0], 3981 - wm->pipe[pipe].plane[PLANE_SPRITE1]); 3982 - } 3983 - 3984 - drm_dbg_kms(&dev_priv->drm, 3985 - "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", 3986 - wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); 3987 - } 3988 - 3989 - void vlv_wm_sanitize(struct drm_i915_private *dev_priv) 3990 - { 3991 - struct intel_plane *plane; 3992 - struct intel_crtc *crtc; 3993 - 3994 - mutex_lock(&dev_priv->display.wm.wm_mutex); 3995 - 3996 - for_each_intel_plane(&dev_priv->drm, plane) { 3997 - struct intel_crtc *crtc = 3998 - intel_crtc_for_pipe(dev_priv, plane->pipe); 3999 - struct intel_crtc_state *crtc_state = 4000 - to_intel_crtc_state(crtc->base.state); 4001 - struct intel_plane_state *plane_state = 4002 - to_intel_plane_state(plane->base.state); 4003 - enum plane_id plane_id = plane->id; 4004 - int level, num_levels = intel_wm_num_levels(dev_priv); 4005 - 4006 - if (plane_state->uapi.visible) 4007 - continue; 4008 - 4009 - for (level = 0; level < num_levels; level++) { 4010 - struct g4x_pipe_wm *raw = 4011 - &crtc_state->wm.vlv.raw[level]; 4012 - 4013 - raw->plane[plane_id] = 0; 4014 - } 4015 - } 4016 - 4017 - for_each_intel_crtc(&dev_priv->drm, crtc) { 4018 - struct intel_crtc_state *crtc_state = 4019 - to_intel_crtc_state(crtc->base.state); 4020 - int ret; 4021 - 4022 - ret = _vlv_compute_pipe_wm(crtc_state); 4023 - drm_WARN_ON(&dev_priv->drm, ret); 4024 - 4025 - crtc_state->wm.vlv.intermediate = 4026 - crtc_state->wm.vlv.optimal; 4027 - crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; 4028 - } 4029 - 4030 - vlv_program_watermarks(dev_priv); 4031 - 4032 - mutex_unlock(&dev_priv->display.wm.wm_mutex); 4033 - } 4034 - 4035 - /* 4036 - * FIXME should probably kill this and improve 4037 - * the real watermark readout/sanitation instead 4038 - */ 4039 - static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) 4040 - { 4041 - intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0); 4042 - intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0); 4043 - intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0); 4044 - 4045 - /* 4046 - * Don't touch WM_LP_SPRITE_ENABLE here. 4047 - * Doing so could cause underruns. 4048 - */ 4049 - } 4050 - 4051 - void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) 4052 - { 4053 - struct ilk_wm_values *hw = &dev_priv->display.wm.hw; 4054 - struct intel_crtc *crtc; 4055 - 4056 - ilk_init_lp_watermarks(dev_priv); 4057 - 4058 - for_each_intel_crtc(&dev_priv->drm, crtc) 4059 - ilk_pipe_wm_get_hw_state(crtc); 4060 - 4061 - hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK); 4062 - hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK); 4063 - hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK); 4064 - 4065 - hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK); 4066 - if (DISPLAY_VER(dev_priv) >= 7) { 4067 - hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB); 4068 - hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB); 4069 - } 4070 - 4071 - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 4072 - hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 4073 - INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 4074 - else if (IS_IVYBRIDGE(dev_priv)) 4075 - hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? 4076 - INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 4077 - 4078 - hw->enable_fbc_wm = 4079 - !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS); 4080 132 } 4081 133 4082 134 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv) ··· 320 4282 0, TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 321 4283 } 322 4284 323 - static void lpt_suspend_hw(struct drm_i915_private *dev_priv) 324 - { 325 - if (HAS_PCH_LPT_LP(dev_priv)) { 326 - u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D); 327 - 328 - val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 329 - intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val); 330 - } 331 - } 332 - 333 4285 static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, 334 4286 int general_prio_credits, 335 4287 int high_prio_credits) ··· 364 4336 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 365 4337 DPFC_CHICKEN_COMP_DUMMY_PIXEL); 366 4338 367 - /* Wa_1409825376:tgl (pre-prod)*/ 368 - if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) 369 - intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, TGL_VRH_GATING_DIS); 370 - 371 4339 /* Wa_14013723622:tgl,rkl,dg1,adl-s */ 372 4340 if (DISPLAY_VER(dev_priv) == 12) 373 4341 intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY, ··· 379 4355 380 4356 /* Bspec/49189 Initialize Sequence */ 381 4357 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0); 382 - } 383 - 384 - static void dg1_init_clock_gating(struct drm_i915_private *dev_priv) 385 - { 386 - gen12lp_init_clock_gating(dev_priv); 387 - 388 - /* Wa_1409836686:dg1[a0] */ 389 - if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0)) 390 - intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, DPT_GATING_DIS); 391 4358 } 392 4359 393 4360 static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv) ··· 779 4764 dev_priv->clock_gating_funcs->init_clock_gating(dev_priv); 780 4765 } 781 4766 782 - void intel_suspend_hw(struct drm_i915_private *dev_priv) 783 - { 784 - if (HAS_PCH_LPT(dev_priv)) 785 - lpt_suspend_hw(dev_priv); 786 - } 787 - 788 4767 static void nop_init_clock_gating(struct drm_i915_private *dev_priv) 789 4768 { 790 4769 drm_dbg_kms(&dev_priv->drm, ··· 794 4785 CG_FUNCS(dg2); 795 4786 CG_FUNCS(xehpsdv); 796 4787 CG_FUNCS(adlp); 797 - CG_FUNCS(dg1); 798 4788 CG_FUNCS(gen12lp); 799 4789 CG_FUNCS(icl); 800 4790 CG_FUNCS(cfl); ··· 828 4820 */ 829 4821 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) 830 4822 { 831 - if (IS_PONTEVECCHIO(dev_priv)) 4823 + if (IS_METEORLAKE(dev_priv)) 4824 + dev_priv->clock_gating_funcs = &nop_clock_gating_funcs; 4825 + else if (IS_PONTEVECCHIO(dev_priv)) 832 4826 dev_priv->clock_gating_funcs = &pvc_clock_gating_funcs; 833 4827 else if (IS_DG2(dev_priv)) 834 4828 dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs; ··· 838 4828 dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs; 839 4829 else if (IS_ALDERLAKE_P(dev_priv)) 840 4830 dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs; 841 - else if (IS_DG1(dev_priv)) 842 - dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs; 843 4831 else if (GRAPHICS_VER(dev_priv) == 12) 844 4832 dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs; 845 4833 else if (GRAPHICS_VER(dev_priv) == 11) ··· 882 4874 MISSING_CASE(INTEL_DEVID(dev_priv)); 883 4875 dev_priv->clock_gating_funcs = &nop_clock_gating_funcs; 884 4876 } 885 - } 886 - 887 - static const struct intel_wm_funcs ilk_wm_funcs = { 888 - .compute_pipe_wm = ilk_compute_pipe_wm, 889 - .compute_intermediate_wm = ilk_compute_intermediate_wm, 890 - .initial_watermarks = ilk_initial_watermarks, 891 - .optimize_watermarks = ilk_optimize_watermarks, 892 - }; 893 - 894 - static const struct intel_wm_funcs vlv_wm_funcs = { 895 - .compute_pipe_wm = vlv_compute_pipe_wm, 896 - .compute_intermediate_wm = vlv_compute_intermediate_wm, 897 - .initial_watermarks = vlv_initial_watermarks, 898 - .optimize_watermarks = vlv_optimize_watermarks, 899 - .atomic_update_watermarks = vlv_atomic_update_fifo, 900 - }; 901 - 902 - static const struct intel_wm_funcs g4x_wm_funcs = { 903 - .compute_pipe_wm = g4x_compute_pipe_wm, 904 - .compute_intermediate_wm = g4x_compute_intermediate_wm, 905 - .initial_watermarks = g4x_initial_watermarks, 906 - .optimize_watermarks = g4x_optimize_watermarks, 907 - }; 908 - 909 - static const struct intel_wm_funcs pnv_wm_funcs = { 910 - .update_wm = pnv_update_wm, 911 - }; 912 - 913 - static const struct intel_wm_funcs i965_wm_funcs = { 914 - .update_wm = i965_update_wm, 915 - }; 916 - 917 - static const struct intel_wm_funcs i9xx_wm_funcs = { 918 - .update_wm = i9xx_update_wm, 919 - }; 920 - 921 - static const struct intel_wm_funcs i845_wm_funcs = { 922 - .update_wm = i845_update_wm, 923 - }; 924 - 925 - static const struct intel_wm_funcs nop_funcs = { 926 - }; 927 - 928 - /* Set up chip specific power management-related functions */ 929 - void intel_init_pm(struct drm_i915_private *dev_priv) 930 - { 931 - if (DISPLAY_VER(dev_priv) >= 9) { 932 - skl_wm_init(dev_priv); 933 - return; 934 - } 935 - 936 - /* For cxsr */ 937 - if (IS_PINEVIEW(dev_priv)) 938 - pnv_get_mem_freq(dev_priv); 939 - else if (GRAPHICS_VER(dev_priv) == 5) 940 - ilk_get_mem_freq(dev_priv); 941 - 942 - /* For FIFO watermark updates */ 943 - if (HAS_PCH_SPLIT(dev_priv)) { 944 - ilk_setup_wm_latency(dev_priv); 945 - 946 - if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] && 947 - dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) || 948 - (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] && 949 - dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) { 950 - dev_priv->display.funcs.wm = &ilk_wm_funcs; 951 - } else { 952 - drm_dbg_kms(&dev_priv->drm, 953 - "Failed to read display plane latency. " 954 - "Disable CxSR\n"); 955 - dev_priv->display.funcs.wm = &nop_funcs; 956 - } 957 - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 958 - vlv_setup_wm_latency(dev_priv); 959 - dev_priv->display.funcs.wm = &vlv_wm_funcs; 960 - } else if (IS_G4X(dev_priv)) { 961 - g4x_setup_wm_latency(dev_priv); 962 - dev_priv->display.funcs.wm = &g4x_wm_funcs; 963 - } else if (IS_PINEVIEW(dev_priv)) { 964 - if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv), 965 - dev_priv->is_ddr3, 966 - dev_priv->fsb_freq, 967 - dev_priv->mem_freq)) { 968 - drm_info(&dev_priv->drm, 969 - "failed to find known CxSR latency " 970 - "(found ddr%s fsb freq %d, mem freq %d), " 971 - "disabling CxSR\n", 972 - (dev_priv->is_ddr3 == 1) ? "3" : "2", 973 - dev_priv->fsb_freq, dev_priv->mem_freq); 974 - /* Disable CxSR and never update its watermark again */ 975 - intel_set_memory_cxsr(dev_priv, false); 976 - dev_priv->display.funcs.wm = &nop_funcs; 977 - } else 978 - dev_priv->display.funcs.wm = &pnv_wm_funcs; 979 - } else if (DISPLAY_VER(dev_priv) == 4) { 980 - dev_priv->display.funcs.wm = &i965_wm_funcs; 981 - } else if (DISPLAY_VER(dev_priv) == 3) { 982 - dev_priv->display.funcs.wm = &i9xx_wm_funcs; 983 - } else if (DISPLAY_VER(dev_priv) == 2) { 984 - if (INTEL_NUM_PIPES(dev_priv) == 1) 985 - dev_priv->display.funcs.wm = &i845_wm_funcs; 986 - else 987 - dev_priv->display.funcs.wm = &i9xx_wm_funcs; 988 - } else { 989 - drm_err(&dev_priv->drm, 990 - "unexpected fall-through in %s\n", __func__); 991 - dev_priv->display.funcs.wm = &nop_funcs; 992 - } 993 - } 994 - 995 - void intel_pm_setup(struct drm_i915_private *dev_priv) 996 - { 997 - dev_priv->runtime_pm.suspended = false; 998 - atomic_set(&dev_priv->runtime_pm.wakeref_count, 0); 999 4877 }
-16
drivers/gpu/drm/i915/intel_pm.h
··· 13 13 struct intel_plane_state; 14 14 15 15 void intel_init_clock_gating(struct drm_i915_private *dev_priv); 16 - void intel_suspend_hw(struct drm_i915_private *dev_priv); 17 - int ilk_wm_max_level(const struct drm_i915_private *dev_priv); 18 - void intel_init_pm(struct drm_i915_private *dev_priv); 19 16 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); 20 - void intel_pm_setup(struct drm_i915_private *dev_priv); 21 - void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv); 22 - void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv); 23 - void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv); 24 - void g4x_wm_sanitize(struct drm_i915_private *dev_priv); 25 - void vlv_wm_sanitize(struct drm_i915_private *dev_priv); 26 - bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv); 27 - bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, 28 - const struct intel_plane_state *plane_state); 29 - void intel_print_wm_latency(struct drm_i915_private *dev_priv, 30 - const char *name, const u16 wm[]); 31 - 32 - bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); 33 17 34 18 #endif /* __INTEL_PM_H__ */
+4 -4
drivers/gpu/drm/i915/intel_pm_types.h drivers/gpu/drm/i915/display/intel_wm_types.h
··· 3 3 * Copyright © 2021 Intel Corporation 4 4 */ 5 5 6 - #ifndef __INTEL_PM_TYPES_H__ 7 - #define __INTEL_PM_TYPES_H__ 6 + #ifndef __INTEL_WM_TYPES_H__ 7 + #define __INTEL_WM_TYPES_H__ 8 8 9 9 #include <linux/types.h> 10 10 11 - #include "display/intel_display_limits.h" 11 + #include "intel_display_limits.h" 12 12 13 13 enum intel_ddb_partitioning { 14 14 INTEL_DDB_PART_1_2, ··· 73 73 return false; 74 74 } 75 75 76 - #endif /* __INTEL_PM_TYPES_H__ */ 76 + #endif /* __INTEL_WM_TYPES_H__ */
+2
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 652 652 653 653 rpm->kdev = kdev; 654 654 rpm->available = HAS_RUNTIME_PM(i915); 655 + rpm->suspended = false; 656 + atomic_set(&rpm->wakeref_count, 0); 655 657 656 658 init_intel_runtime_pm_wakeref(rpm); 657 659 INIT_LIST_HEAD(&rpm->lmem_userfault_list);
+3 -3
drivers/gpu/drm/i915/intel_uncore.c
··· 32 32 #include "i915_reg.h" 33 33 #include "i915_trace.h" 34 34 #include "i915_vgpu.h" 35 - #include "intel_pm.h" 36 35 37 36 #define FORCEWAKE_ACK_TIMEOUT_MS 50 38 37 #define GT_FIFO_TIMEOUT_MS 10 ··· 2459 2460 2460 2461 static void uncore_unmap_mmio(struct drm_device *drm, void *regs) 2461 2462 { 2462 - iounmap(regs); 2463 + iounmap((void __iomem *)regs); 2463 2464 } 2464 2465 2465 2466 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr) ··· 2490 2491 return -EIO; 2491 2492 } 2492 2493 2493 - return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs); 2494 + return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, 2495 + (void __force *)uncore->regs); 2494 2496 } 2495 2497 2496 2498 void intel_uncore_init_early(struct intel_uncore *uncore,
+56 -9
drivers/gpu/drm/i915/pxp/intel_pxp.c
··· 270 270 return bound; 271 271 } 272 272 273 + static int __pxp_global_teardown_final(struct intel_pxp *pxp) 274 + { 275 + if (!pxp->arb_is_valid) 276 + return 0; 277 + /* 278 + * To ensure synchronous and coherent session teardown completion 279 + * in response to suspend or shutdown triggers, don't use a worker. 280 + */ 281 + intel_pxp_mark_termination_in_progress(pxp); 282 + intel_pxp_terminate(pxp, false); 283 + 284 + if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250))) 285 + return -ETIMEDOUT; 286 + 287 + return 0; 288 + } 289 + 290 + static int __pxp_global_teardown_restart(struct intel_pxp *pxp) 291 + { 292 + if (pxp->arb_is_valid) 293 + return 0; 294 + /* 295 + * The arb-session is currently inactive and we are doing a reset and restart 296 + * due to a runtime event. Use the worker that was designed for this. 297 + */ 298 + pxp_queue_termination(pxp); 299 + 300 + if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250))) 301 + return -ETIMEDOUT; 302 + 303 + return 0; 304 + } 305 + 306 + void intel_pxp_end(struct intel_pxp *pxp) 307 + { 308 + struct drm_i915_private *i915 = pxp->ctrl_gt->i915; 309 + intel_wakeref_t wakeref; 310 + 311 + if (!intel_pxp_is_enabled(pxp)) 312 + return; 313 + 314 + wakeref = intel_runtime_pm_get(&i915->runtime_pm); 315 + 316 + mutex_lock(&pxp->arb_mutex); 317 + 318 + if (__pxp_global_teardown_final(pxp)) 319 + drm_dbg(&i915->drm, "PXP end timed out\n"); 320 + 321 + mutex_unlock(&pxp->arb_mutex); 322 + 323 + intel_pxp_fini_hw(pxp); 324 + intel_runtime_pm_put(&i915->runtime_pm, wakeref); 325 + } 326 + 273 327 /* 274 328 * the arb session is restarted from the irq work when we receive the 275 329 * termination completion interrupt ··· 340 286 341 287 mutex_lock(&pxp->arb_mutex); 342 288 343 - if (pxp->arb_is_valid) 289 + ret = __pxp_global_teardown_restart(pxp); 290 + if (ret) 344 291 goto unlock; 345 - 346 - pxp_queue_termination(pxp); 347 - 348 - if (!wait_for_completion_timeout(&pxp->termination, 349 - msecs_to_jiffies(250))) { 350 - ret = -ETIMEDOUT; 351 - goto unlock; 352 - } 353 292 354 293 /* make sure the compiler doesn't optimize the double access */ 355 294 barrier();
+2
drivers/gpu/drm/i915/pxp/intel_pxp.h
··· 24 24 void intel_pxp_fini_hw(struct intel_pxp *pxp); 25 25 26 26 void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp); 27 + void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id); 27 28 28 29 int intel_pxp_start(struct intel_pxp *pxp); 30 + void intel_pxp_end(struct intel_pxp *pxp); 29 31 30 32 int intel_pxp_key_check(struct intel_pxp *pxp, 31 33 struct drm_i915_gem_object *obj,
+15
drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h
··· 12 12 /* PXP-Opcode for Init Session */ 13 13 #define PXP42_CMDID_INIT_SESSION 0x1e 14 14 15 + /* PXP-Opcode for Invalidate Stream Key */ 16 + #define PXP42_CMDID_INVALIDATE_STREAM_KEY 0x00000007 17 + 15 18 /* PXP-Input-Packet: Init Session (Arb-Session) */ 16 19 struct pxp42_create_arb_in { 17 20 struct pxp_cmd_header header; ··· 26 23 /* PXP-Output-Packet: Init Session */ 27 24 struct pxp42_create_arb_out { 28 25 struct pxp_cmd_header header; 26 + } __packed; 27 + 28 + /* PXP-Input-Packet: Invalidate Stream Key */ 29 + struct pxp42_inv_stream_key_in { 30 + struct pxp_cmd_header header; 31 + u32 rsvd[3]; 32 + } __packed; 33 + 34 + /* PXP-Output-Packet: Invalidate Stream Key */ 35 + struct pxp42_inv_stream_key_out { 36 + struct pxp_cmd_header header; 37 + u32 rsvd; 29 38 } __packed; 30 39 31 40 #endif /* __INTEL_PXP_FW_INTERFACE_42_H__ */
+3
drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
··· 28 28 union { 29 29 u32 status; /* out */ 30 30 u32 stream_id; /* in */ 31 + #define PXP_CMDHDR_EXTDATA_SESSION_VALID GENMASK(0, 0) 32 + #define PXP_CMDHDR_EXTDATA_APP_TYPE GENMASK(1, 1) 33 + #define PXP_CMDHDR_EXTDATA_SESSION_ID GENMASK(17, 2) 31 34 }; 32 35 /* Length of the message (excluding the header) */ 33 36 u32 buffer_len;
+2 -2
drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
··· 16 16 if (!intel_pxp_is_enabled(pxp)) 17 17 return; 18 18 19 - pxp->arb_is_valid = false; 19 + intel_pxp_end(pxp); 20 20 21 21 intel_pxp_invalidate(pxp); 22 22 } ··· 34 34 } 35 35 } 36 36 37 - void intel_pxp_resume(struct intel_pxp *pxp) 37 + void intel_pxp_resume_complete(struct intel_pxp *pxp) 38 38 { 39 39 if (!intel_pxp_is_enabled(pxp)) 40 40 return;
+3 -3
drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
··· 11 11 #ifdef CONFIG_DRM_I915_PXP 12 12 void intel_pxp_suspend_prepare(struct intel_pxp *pxp); 13 13 void intel_pxp_suspend(struct intel_pxp *pxp); 14 - void intel_pxp_resume(struct intel_pxp *pxp); 14 + void intel_pxp_resume_complete(struct intel_pxp *pxp); 15 15 void intel_pxp_runtime_suspend(struct intel_pxp *pxp); 16 16 #else 17 17 static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp) ··· 22 22 { 23 23 } 24 24 25 - static inline void intel_pxp_resume(struct intel_pxp *pxp) 25 + static inline void intel_pxp_resume_complete(struct intel_pxp *pxp) 26 26 { 27 27 } 28 28 ··· 32 32 #endif 33 33 static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp) 34 34 { 35 - intel_pxp_resume(pxp); 35 + intel_pxp_resume_complete(pxp); 36 36 } 37 37 #endif /* __INTEL_PXP_PM_H__ */
+5 -3
drivers/gpu/drm/i915/pxp/intel_pxp_session.c
··· 110 110 111 111 intel_uncore_write(gt->uncore, PXP_GLOBAL_TERMINATE, 1); 112 112 113 + intel_pxp_tee_end_arb_fw_session(pxp, ARB_SESSION); 114 + 113 115 return ret; 114 116 } 115 117 116 - static void pxp_terminate(struct intel_pxp *pxp) 118 + void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart) 117 119 { 118 120 int ret; 119 121 120 - pxp->hw_state_invalidated = true; 122 + pxp->hw_state_invalidated = post_invalidation_needs_restart; 121 123 122 124 /* 123 125 * if we fail to submit the termination there is no point in waiting for ··· 167 165 168 166 if (events & PXP_TERMINATION_REQUEST) { 169 167 events &= ~PXP_TERMINATION_COMPLETE; 170 - pxp_terminate(pxp); 168 + intel_pxp_terminate(pxp, true); 171 169 } 172 170 173 171 if (events & PXP_TERMINATION_COMPLETE)
+5
drivers/gpu/drm/i915/pxp/intel_pxp_session.h
··· 12 12 13 13 #ifdef CONFIG_DRM_I915_PXP 14 14 void intel_pxp_session_management_init(struct intel_pxp *pxp); 15 + void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart); 15 16 #else 16 17 static inline void intel_pxp_session_management_init(struct intel_pxp *pxp) 18 + { 19 + } 20 + 21 + static inline void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart) 17 22 { 18 23 } 19 24 #endif
+46
drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
··· 127 127 intel_wakeref_t wakeref; 128 128 int ret = 0; 129 129 130 + if (!HAS_HECI_PXP(i915)) { 131 + pxp->dev_link = device_link_add(i915_kdev, tee_kdev, DL_FLAG_STATELESS); 132 + if (drm_WARN_ON(&i915->drm, !pxp->dev_link)) 133 + return -ENODEV; 134 + } 135 + 130 136 mutex_lock(&pxp->tee_mutex); 131 137 pxp->pxp_component = data; 132 138 pxp->pxp_component->tee_dev = tee_kdev; ··· 175 169 mutex_lock(&pxp->tee_mutex); 176 170 pxp->pxp_component = NULL; 177 171 mutex_unlock(&pxp->tee_mutex); 172 + 173 + if (pxp->dev_link) { 174 + device_link_del(pxp->dev_link); 175 + pxp->dev_link = NULL; 176 + } 178 177 } 179 178 180 179 static const struct component_ops i915_pxp_tee_component_ops = { ··· 318 307 msg_out.header.status); 319 308 320 309 return ret; 310 + } 311 + 312 + void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id) 313 + { 314 + struct drm_i915_private *i915 = pxp->ctrl_gt->i915; 315 + struct pxp42_inv_stream_key_in msg_in = {0}; 316 + struct pxp42_inv_stream_key_out msg_out = {0}; 317 + int ret, trials = 0; 318 + 319 + try_again: 320 + memset(&msg_in, 0, sizeof(msg_in)); 321 + memset(&msg_out, 0, sizeof(msg_out)); 322 + msg_in.header.api_version = PXP_APIVER(4, 2); 323 + msg_in.header.command_id = PXP42_CMDID_INVALIDATE_STREAM_KEY; 324 + msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header); 325 + 326 + msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1); 327 + msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0); 328 + msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, session_id); 329 + 330 + ret = intel_pxp_tee_io_message(pxp, 331 + &msg_in, sizeof(msg_in), 332 + &msg_out, sizeof(msg_out), 333 + NULL); 334 + 335 + /* Cleanup coherency between GT and Firmware is critical, so try again if it fails */ 336 + if ((ret || msg_out.header.status != 0x0) && ++trials < 3) 337 + goto try_again; 338 + 339 + if (ret) 340 + drm_err(&i915->drm, "Failed to send tee msg for inv-stream-key-%d, ret=[%d]\n", 341 + session_id, ret); 342 + else if (msg_out.header.status != 0x0) 343 + drm_warn(&i915->drm, "PXP firmware failed inv-stream-key-%d with status 0x%08x\n", 344 + session_id, msg_out.header.status); 321 345 }
+3
drivers/gpu/drm/i915/pxp/intel_pxp_types.h
··· 32 32 * which are protected by &tee_mutex. 33 33 */ 34 34 struct i915_pxp_component *pxp_component; 35 + 36 + /* @dev_link: Enforce module relationship for power management ordering. */ 37 + struct device_link *dev_link; 35 38 /** 36 39 * @pxp_component_added: track if the pxp component has been added. 37 40 * Set and cleared in tee init and fini functions respectively.
+152
drivers/gpu/drm/i915/soc/intel_dram.c
··· 10 10 #include "intel_dram.h" 11 11 #include "intel_mchbar_regs.h" 12 12 #include "intel_pcode.h" 13 + #include "vlv_sideband.h" 13 14 14 15 struct dram_dimm_info { 15 16 u16 size; ··· 42 41 } 43 42 44 43 #undef DRAM_TYPE_STR 44 + 45 + static void pnv_detect_mem_freq(struct drm_i915_private *dev_priv) 46 + { 47 + u32 tmp; 48 + 49 + tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG); 50 + 51 + switch (tmp & CLKCFG_FSB_MASK) { 52 + case CLKCFG_FSB_533: 53 + dev_priv->fsb_freq = 533; /* 133*4 */ 54 + break; 55 + case CLKCFG_FSB_800: 56 + dev_priv->fsb_freq = 800; /* 200*4 */ 57 + break; 58 + case CLKCFG_FSB_667: 59 + dev_priv->fsb_freq = 667; /* 167*4 */ 60 + break; 61 + case CLKCFG_FSB_400: 62 + dev_priv->fsb_freq = 400; /* 100*4 */ 63 + break; 64 + } 65 + 66 + switch (tmp & CLKCFG_MEM_MASK) { 67 + case CLKCFG_MEM_533: 68 + dev_priv->mem_freq = 533; 69 + break; 70 + case CLKCFG_MEM_667: 71 + dev_priv->mem_freq = 667; 72 + break; 73 + case CLKCFG_MEM_800: 74 + dev_priv->mem_freq = 800; 75 + break; 76 + } 77 + 78 + /* detect pineview DDR3 setting */ 79 + tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL); 80 + dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 81 + } 82 + 83 + static void ilk_detect_mem_freq(struct drm_i915_private *dev_priv) 84 + { 85 + u16 ddrpll, csipll; 86 + 87 + ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1); 88 + switch (ddrpll & 0xff) { 89 + case 0xc: 90 + dev_priv->mem_freq = 800; 91 + break; 92 + case 0x10: 93 + dev_priv->mem_freq = 1066; 94 + break; 95 + case 0x14: 96 + dev_priv->mem_freq = 1333; 97 + break; 98 + case 0x18: 99 + dev_priv->mem_freq = 1600; 100 + break; 101 + default: 102 + drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n", 103 + ddrpll & 0xff); 104 + dev_priv->mem_freq = 0; 105 + break; 106 + } 107 + 108 + csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0); 109 + switch (csipll & 0x3ff) { 110 + case 0x00c: 111 + dev_priv->fsb_freq = 3200; 112 + break; 113 + case 0x00e: 114 + dev_priv->fsb_freq = 3733; 115 + break; 116 + case 0x010: 117 + dev_priv->fsb_freq = 4266; 118 + break; 119 + case 0x012: 120 + dev_priv->fsb_freq = 4800; 121 + break; 122 + case 0x014: 123 + dev_priv->fsb_freq = 5333; 124 + break; 125 + case 0x016: 126 + dev_priv->fsb_freq = 5866; 127 + break; 128 + case 0x018: 129 + dev_priv->fsb_freq = 6400; 130 + break; 131 + default: 132 + drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n", 133 + csipll & 0x3ff); 134 + dev_priv->fsb_freq = 0; 135 + break; 136 + } 137 + } 138 + 139 + static void chv_detect_mem_freq(struct drm_i915_private *i915) 140 + { 141 + u32 val; 142 + 143 + vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK)); 144 + val = vlv_cck_read(i915, CCK_FUSE_REG); 145 + vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK)); 146 + 147 + switch ((val >> 2) & 0x7) { 148 + case 3: 149 + i915->mem_freq = 2000; 150 + break; 151 + default: 152 + i915->mem_freq = 1600; 153 + break; 154 + } 155 + } 156 + 157 + static void vlv_detect_mem_freq(struct drm_i915_private *i915) 158 + { 159 + u32 val; 160 + 161 + vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT)); 162 + val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 163 + vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT)); 164 + 165 + switch ((val >> 6) & 3) { 166 + case 0: 167 + case 1: 168 + i915->mem_freq = 800; 169 + break; 170 + case 2: 171 + i915->mem_freq = 1066; 172 + break; 173 + case 3: 174 + i915->mem_freq = 1333; 175 + break; 176 + } 177 + } 178 + 179 + static void detect_mem_freq(struct drm_i915_private *i915) 180 + { 181 + if (IS_PINEVIEW(i915)) 182 + pnv_detect_mem_freq(i915); 183 + else if (GRAPHICS_VER(i915) == 5) 184 + ilk_detect_mem_freq(i915); 185 + else if (IS_CHERRYVIEW(i915)) 186 + chv_detect_mem_freq(i915); 187 + else if (IS_VALLEYVIEW(i915)) 188 + vlv_detect_mem_freq(i915); 189 + 190 + if (i915->mem_freq) 191 + drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); 192 + } 45 193 46 194 static int intel_dimm_num_devices(const struct dram_dimm_info *dimm) 47 195 { ··· 656 506 { 657 507 struct dram_info *dram_info = &i915->dram_info; 658 508 int ret; 509 + 510 + detect_mem_freq(i915); 659 511 660 512 if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915)) 661 513 return;
+3 -1
drivers/misc/mei/client.c
··· 1343 1343 struct mei_cl_vtag *vtag_l; 1344 1344 1345 1345 list_for_each_entry(vtag_l, &cl->vtag_map, list) { 1346 - if (vtag_l->vtag == vtag) { 1346 + /* The client on bus has one fixed vtag map */ 1347 + if ((cl->cldev && mei_cldev_enabled(cl->cldev)) || 1348 + vtag_l->vtag == vtag) { 1347 1349 vtag_l->pending_read = false; 1348 1350 break; 1349 1351 }
+19 -1
drivers/misc/mei/pci-me.c
··· 342 342 } 343 343 344 344 #ifdef CONFIG_PM_SLEEP 345 + static int mei_me_pci_prepare(struct device *device) 346 + { 347 + pm_runtime_resume(device); 348 + return 0; 349 + } 350 + 345 351 static int mei_me_pci_suspend(struct device *device) 346 352 { 347 353 struct pci_dev *pdev = to_pci_dev(device); ··· 404 398 405 399 return 0; 406 400 } 407 - #endif /* CONFIG_PM_SLEEP */ 401 + 402 + static void mei_me_pci_complete(struct device *device) 403 + { 404 + pm_runtime_suspend(device); 405 + } 406 + #else /* CONFIG_PM_SLEEP */ 407 + 408 + #define mei_me_pci_prepare NULL 409 + #define mei_me_pci_complete NULL 410 + 411 + #endif /* !CONFIG_PM_SLEEP */ 408 412 409 413 #ifdef CONFIG_PM 410 414 static int mei_me_pm_runtime_idle(struct device *device) ··· 517 501 } 518 502 519 503 static const struct dev_pm_ops mei_me_pm_ops = { 504 + .prepare = mei_me_pci_prepare, 505 + .complete = mei_me_pci_complete, 520 506 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend, 521 507 mei_me_pci_resume) 522 508 SET_RUNTIME_PM_OPS(
+9 -5
include/drm/i915_pciids.h
··· 588 588 INTEL_VGA_DEVICE(0x4551, info), \ 589 589 INTEL_VGA_DEVICE(0x4555, info), \ 590 590 INTEL_VGA_DEVICE(0x4557, info), \ 591 + INTEL_VGA_DEVICE(0x4570, info), \ 591 592 INTEL_VGA_DEVICE(0x4571, info) 592 593 593 594 /* JSL */ ··· 685 684 INTEL_VGA_DEVICE(0xA78A, info), \ 686 685 INTEL_VGA_DEVICE(0xA78B, info) 687 686 687 + /* RPL-U */ 688 + #define INTEL_RPLU_IDS(info) \ 689 + INTEL_VGA_DEVICE(0xA721, info), \ 690 + INTEL_VGA_DEVICE(0xA7A1, info), \ 691 + INTEL_VGA_DEVICE(0xA7A9, info) 692 + 688 693 /* RPL-P */ 689 694 #define INTEL_RPLP_IDS(info) \ 695 + INTEL_RPLU_IDS(info), \ 690 696 INTEL_VGA_DEVICE(0xA720, info), \ 691 - INTEL_VGA_DEVICE(0xA721, info), \ 692 697 INTEL_VGA_DEVICE(0xA7A0, info), \ 693 - INTEL_VGA_DEVICE(0xA7A1, info), \ 694 - INTEL_VGA_DEVICE(0xA7A8, info), \ 695 - INTEL_VGA_DEVICE(0xA7A9, info) 698 + INTEL_VGA_DEVICE(0xA7A8, info) 696 699 697 700 /* DG2 */ 698 701 #define INTEL_DG2_G10_IDS(info) \ ··· 711 706 INTEL_VGA_DEVICE(0x5693, info), \ 712 707 INTEL_VGA_DEVICE(0x5694, info), \ 713 708 INTEL_VGA_DEVICE(0x5695, info), \ 714 - INTEL_VGA_DEVICE(0x5698, info), \ 715 709 INTEL_VGA_DEVICE(0x56A5, info), \ 716 710 INTEL_VGA_DEVICE(0x56A6, info), \ 717 711 INTEL_VGA_DEVICE(0x56B0, info), \