at v4.13 15990 lines 460 kB view raw
1/* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27#include <linux/dmi.h> 28#include <linux/module.h> 29#include <linux/input.h> 30#include <linux/i2c.h> 31#include <linux/kernel.h> 32#include <linux/slab.h> 33#include <linux/vgaarb.h> 34#include <drm/drm_edid.h> 35#include <drm/drmP.h> 36#include "intel_drv.h" 37#include "intel_frontbuffer.h" 38#include <drm/i915_drm.h> 39#include "i915_drv.h" 40#include "i915_gem_clflush.h" 41#include "intel_dsi.h" 42#include "i915_trace.h" 43#include <drm/drm_atomic.h> 44#include <drm/drm_atomic_helper.h> 45#include <drm/drm_dp_helper.h> 46#include <drm/drm_crtc_helper.h> 47#include <drm/drm_plane_helper.h> 48#include <drm/drm_rect.h> 49#include <linux/dma_remapping.h> 50#include <linux/reservation.h> 51 52static bool is_mmio_work(struct intel_flip_work *work) 53{ 54 return work->mmio_work.func; 55} 56 57/* Primary plane formats for gen <= 3 */ 58static const uint32_t i8xx_primary_formats[] = { 59 DRM_FORMAT_C8, 60 DRM_FORMAT_RGB565, 61 DRM_FORMAT_XRGB1555, 62 DRM_FORMAT_XRGB8888, 63}; 64 65/* Primary plane formats for gen >= 4 */ 66static const uint32_t i965_primary_formats[] = { 67 DRM_FORMAT_C8, 68 DRM_FORMAT_RGB565, 69 DRM_FORMAT_XRGB8888, 70 DRM_FORMAT_XBGR8888, 71 DRM_FORMAT_XRGB2101010, 72 DRM_FORMAT_XBGR2101010, 73}; 74 75static const uint32_t skl_primary_formats[] = { 76 DRM_FORMAT_C8, 77 DRM_FORMAT_RGB565, 78 DRM_FORMAT_XRGB8888, 79 DRM_FORMAT_XBGR8888, 80 DRM_FORMAT_ARGB8888, 81 DRM_FORMAT_ABGR8888, 82 DRM_FORMAT_XRGB2101010, 83 DRM_FORMAT_XBGR2101010, 84 DRM_FORMAT_YUYV, 85 DRM_FORMAT_YVYU, 86 DRM_FORMAT_UYVY, 87 DRM_FORMAT_VYUY, 88}; 89 90/* Cursor formats */ 91static const uint32_t intel_cursor_formats[] = { 92 DRM_FORMAT_ARGB8888, 93}; 94 95static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 96 struct intel_crtc_state *pipe_config); 97static void ironlake_pch_clock_get(struct intel_crtc *crtc, 98 struct intel_crtc_state *pipe_config); 99 100static int intel_framebuffer_init(struct intel_framebuffer *ifb, 101 struct drm_i915_gem_object *obj, 102 struct drm_mode_fb_cmd2 *mode_cmd); 103static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 104static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 105static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc); 106static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 107 struct intel_link_m_n *m_n, 108 struct intel_link_m_n *m2_n2); 109static void ironlake_set_pipeconf(struct drm_crtc *crtc); 110static void haswell_set_pipeconf(struct drm_crtc *crtc); 111static void haswell_set_pipemisc(struct drm_crtc *crtc); 112static void vlv_prepare_pll(struct intel_crtc *crtc, 113 const struct intel_crtc_state *pipe_config); 114static void chv_prepare_pll(struct intel_crtc *crtc, 115 const struct intel_crtc_state *pipe_config); 116static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 117static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 118static void intel_crtc_init_scalers(struct intel_crtc *crtc, 119 struct intel_crtc_state *crtc_state); 120static void skylake_pfit_enable(struct intel_crtc *crtc); 121static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 122static void ironlake_pfit_enable(struct intel_crtc *crtc); 123static void intel_modeset_setup_hw_state(struct drm_device *dev, 124 struct drm_modeset_acquire_ctx *ctx); 125static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 126 127struct intel_limit { 128 struct { 129 int min, max; 130 } dot, vco, n, m, m1, m2, p, p1; 131 132 struct { 133 int dot_limit; 134 int p2_slow, p2_fast; 135 } p2; 136}; 137 138/* returns HPLL frequency in kHz */ 139int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 140{ 141 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 142 143 /* Obtain SKU information */ 144 mutex_lock(&dev_priv->sb_lock); 145 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 146 CCK_FUSE_HPLL_FREQ_MASK; 147 mutex_unlock(&dev_priv->sb_lock); 148 149 return vco_freq[hpll_freq] * 1000; 150} 151 152int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 153 const char *name, u32 reg, int ref_freq) 154{ 155 u32 val; 156 int divider; 157 158 mutex_lock(&dev_priv->sb_lock); 159 val = vlv_cck_read(dev_priv, reg); 160 mutex_unlock(&dev_priv->sb_lock); 161 162 divider = val & CCK_FREQUENCY_VALUES; 163 164 WARN((val & CCK_FREQUENCY_STATUS) != 165 (divider << CCK_FREQUENCY_STATUS_SHIFT), 166 "%s change in progress\n", name); 167 168 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 169} 170 171int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 172 const char *name, u32 reg) 173{ 174 if (dev_priv->hpll_freq == 0) 175 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 176 177 return vlv_get_cck_clock(dev_priv, name, reg, 178 dev_priv->hpll_freq); 179} 180 181static void intel_update_czclk(struct drm_i915_private *dev_priv) 182{ 183 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 184 return; 185 186 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 187 CCK_CZ_CLOCK_CONTROL); 188 189 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 190} 191 192static inline u32 /* units of 100MHz */ 193intel_fdi_link_freq(struct drm_i915_private *dev_priv, 194 const struct intel_crtc_state *pipe_config) 195{ 196 if (HAS_DDI(dev_priv)) 197 return pipe_config->port_clock; /* SPLL */ 198 else if (IS_GEN5(dev_priv)) 199 return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000; 200 else 201 return 270000; 202} 203 204static const struct intel_limit intel_limits_i8xx_dac = { 205 .dot = { .min = 25000, .max = 350000 }, 206 .vco = { .min = 908000, .max = 1512000 }, 207 .n = { .min = 2, .max = 16 }, 208 .m = { .min = 96, .max = 140 }, 209 .m1 = { .min = 18, .max = 26 }, 210 .m2 = { .min = 6, .max = 16 }, 211 .p = { .min = 4, .max = 128 }, 212 .p1 = { .min = 2, .max = 33 }, 213 .p2 = { .dot_limit = 165000, 214 .p2_slow = 4, .p2_fast = 2 }, 215}; 216 217static const struct intel_limit intel_limits_i8xx_dvo = { 218 .dot = { .min = 25000, .max = 350000 }, 219 .vco = { .min = 908000, .max = 1512000 }, 220 .n = { .min = 2, .max = 16 }, 221 .m = { .min = 96, .max = 140 }, 222 .m1 = { .min = 18, .max = 26 }, 223 .m2 = { .min = 6, .max = 16 }, 224 .p = { .min = 4, .max = 128 }, 225 .p1 = { .min = 2, .max = 33 }, 226 .p2 = { .dot_limit = 165000, 227 .p2_slow = 4, .p2_fast = 4 }, 228}; 229 230static const struct intel_limit intel_limits_i8xx_lvds = { 231 .dot = { .min = 25000, .max = 350000 }, 232 .vco = { .min = 908000, .max = 1512000 }, 233 .n = { .min = 2, .max = 16 }, 234 .m = { .min = 96, .max = 140 }, 235 .m1 = { .min = 18, .max = 26 }, 236 .m2 = { .min = 6, .max = 16 }, 237 .p = { .min = 4, .max = 128 }, 238 .p1 = { .min = 1, .max = 6 }, 239 .p2 = { .dot_limit = 165000, 240 .p2_slow = 14, .p2_fast = 7 }, 241}; 242 243static const struct intel_limit intel_limits_i9xx_sdvo = { 244 .dot = { .min = 20000, .max = 400000 }, 245 .vco = { .min = 1400000, .max = 2800000 }, 246 .n = { .min = 1, .max = 6 }, 247 .m = { .min = 70, .max = 120 }, 248 .m1 = { .min = 8, .max = 18 }, 249 .m2 = { .min = 3, .max = 7 }, 250 .p = { .min = 5, .max = 80 }, 251 .p1 = { .min = 1, .max = 8 }, 252 .p2 = { .dot_limit = 200000, 253 .p2_slow = 10, .p2_fast = 5 }, 254}; 255 256static const struct intel_limit intel_limits_i9xx_lvds = { 257 .dot = { .min = 20000, .max = 400000 }, 258 .vco = { .min = 1400000, .max = 2800000 }, 259 .n = { .min = 1, .max = 6 }, 260 .m = { .min = 70, .max = 120 }, 261 .m1 = { .min = 8, .max = 18 }, 262 .m2 = { .min = 3, .max = 7 }, 263 .p = { .min = 7, .max = 98 }, 264 .p1 = { .min = 1, .max = 8 }, 265 .p2 = { .dot_limit = 112000, 266 .p2_slow = 14, .p2_fast = 7 }, 267}; 268 269 270static const struct intel_limit intel_limits_g4x_sdvo = { 271 .dot = { .min = 25000, .max = 270000 }, 272 .vco = { .min = 1750000, .max = 3500000}, 273 .n = { .min = 1, .max = 4 }, 274 .m = { .min = 104, .max = 138 }, 275 .m1 = { .min = 17, .max = 23 }, 276 .m2 = { .min = 5, .max = 11 }, 277 .p = { .min = 10, .max = 30 }, 278 .p1 = { .min = 1, .max = 3}, 279 .p2 = { .dot_limit = 270000, 280 .p2_slow = 10, 281 .p2_fast = 10 282 }, 283}; 284 285static const struct intel_limit intel_limits_g4x_hdmi = { 286 .dot = { .min = 22000, .max = 400000 }, 287 .vco = { .min = 1750000, .max = 3500000}, 288 .n = { .min = 1, .max = 4 }, 289 .m = { .min = 104, .max = 138 }, 290 .m1 = { .min = 16, .max = 23 }, 291 .m2 = { .min = 5, .max = 11 }, 292 .p = { .min = 5, .max = 80 }, 293 .p1 = { .min = 1, .max = 8}, 294 .p2 = { .dot_limit = 165000, 295 .p2_slow = 10, .p2_fast = 5 }, 296}; 297 298static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 299 .dot = { .min = 20000, .max = 115000 }, 300 .vco = { .min = 1750000, .max = 3500000 }, 301 .n = { .min = 1, .max = 3 }, 302 .m = { .min = 104, .max = 138 }, 303 .m1 = { .min = 17, .max = 23 }, 304 .m2 = { .min = 5, .max = 11 }, 305 .p = { .min = 28, .max = 112 }, 306 .p1 = { .min = 2, .max = 8 }, 307 .p2 = { .dot_limit = 0, 308 .p2_slow = 14, .p2_fast = 14 309 }, 310}; 311 312static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 313 .dot = { .min = 80000, .max = 224000 }, 314 .vco = { .min = 1750000, .max = 3500000 }, 315 .n = { .min = 1, .max = 3 }, 316 .m = { .min = 104, .max = 138 }, 317 .m1 = { .min = 17, .max = 23 }, 318 .m2 = { .min = 5, .max = 11 }, 319 .p = { .min = 14, .max = 42 }, 320 .p1 = { .min = 2, .max = 6 }, 321 .p2 = { .dot_limit = 0, 322 .p2_slow = 7, .p2_fast = 7 323 }, 324}; 325 326static const struct intel_limit intel_limits_pineview_sdvo = { 327 .dot = { .min = 20000, .max = 400000}, 328 .vco = { .min = 1700000, .max = 3500000 }, 329 /* Pineview's Ncounter is a ring counter */ 330 .n = { .min = 3, .max = 6 }, 331 .m = { .min = 2, .max = 256 }, 332 /* Pineview only has one combined m divider, which we treat as m2. */ 333 .m1 = { .min = 0, .max = 0 }, 334 .m2 = { .min = 0, .max = 254 }, 335 .p = { .min = 5, .max = 80 }, 336 .p1 = { .min = 1, .max = 8 }, 337 .p2 = { .dot_limit = 200000, 338 .p2_slow = 10, .p2_fast = 5 }, 339}; 340 341static const struct intel_limit intel_limits_pineview_lvds = { 342 .dot = { .min = 20000, .max = 400000 }, 343 .vco = { .min = 1700000, .max = 3500000 }, 344 .n = { .min = 3, .max = 6 }, 345 .m = { .min = 2, .max = 256 }, 346 .m1 = { .min = 0, .max = 0 }, 347 .m2 = { .min = 0, .max = 254 }, 348 .p = { .min = 7, .max = 112 }, 349 .p1 = { .min = 1, .max = 8 }, 350 .p2 = { .dot_limit = 112000, 351 .p2_slow = 14, .p2_fast = 14 }, 352}; 353 354/* Ironlake / Sandybridge 355 * 356 * We calculate clock using (register_value + 2) for N/M1/M2, so here 357 * the range value for them is (actual_value - 2). 358 */ 359static const struct intel_limit intel_limits_ironlake_dac = { 360 .dot = { .min = 25000, .max = 350000 }, 361 .vco = { .min = 1760000, .max = 3510000 }, 362 .n = { .min = 1, .max = 5 }, 363 .m = { .min = 79, .max = 127 }, 364 .m1 = { .min = 12, .max = 22 }, 365 .m2 = { .min = 5, .max = 9 }, 366 .p = { .min = 5, .max = 80 }, 367 .p1 = { .min = 1, .max = 8 }, 368 .p2 = { .dot_limit = 225000, 369 .p2_slow = 10, .p2_fast = 5 }, 370}; 371 372static const struct intel_limit intel_limits_ironlake_single_lvds = { 373 .dot = { .min = 25000, .max = 350000 }, 374 .vco = { .min = 1760000, .max = 3510000 }, 375 .n = { .min = 1, .max = 3 }, 376 .m = { .min = 79, .max = 118 }, 377 .m1 = { .min = 12, .max = 22 }, 378 .m2 = { .min = 5, .max = 9 }, 379 .p = { .min = 28, .max = 112 }, 380 .p1 = { .min = 2, .max = 8 }, 381 .p2 = { .dot_limit = 225000, 382 .p2_slow = 14, .p2_fast = 14 }, 383}; 384 385static const struct intel_limit intel_limits_ironlake_dual_lvds = { 386 .dot = { .min = 25000, .max = 350000 }, 387 .vco = { .min = 1760000, .max = 3510000 }, 388 .n = { .min = 1, .max = 3 }, 389 .m = { .min = 79, .max = 127 }, 390 .m1 = { .min = 12, .max = 22 }, 391 .m2 = { .min = 5, .max = 9 }, 392 .p = { .min = 14, .max = 56 }, 393 .p1 = { .min = 2, .max = 8 }, 394 .p2 = { .dot_limit = 225000, 395 .p2_slow = 7, .p2_fast = 7 }, 396}; 397 398/* LVDS 100mhz refclk limits. */ 399static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { 400 .dot = { .min = 25000, .max = 350000 }, 401 .vco = { .min = 1760000, .max = 3510000 }, 402 .n = { .min = 1, .max = 2 }, 403 .m = { .min = 79, .max = 126 }, 404 .m1 = { .min = 12, .max = 22 }, 405 .m2 = { .min = 5, .max = 9 }, 406 .p = { .min = 28, .max = 112 }, 407 .p1 = { .min = 2, .max = 8 }, 408 .p2 = { .dot_limit = 225000, 409 .p2_slow = 14, .p2_fast = 14 }, 410}; 411 412static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { 413 .dot = { .min = 25000, .max = 350000 }, 414 .vco = { .min = 1760000, .max = 3510000 }, 415 .n = { .min = 1, .max = 3 }, 416 .m = { .min = 79, .max = 126 }, 417 .m1 = { .min = 12, .max = 22 }, 418 .m2 = { .min = 5, .max = 9 }, 419 .p = { .min = 14, .max = 42 }, 420 .p1 = { .min = 2, .max = 6 }, 421 .p2 = { .dot_limit = 225000, 422 .p2_slow = 7, .p2_fast = 7 }, 423}; 424 425static const struct intel_limit intel_limits_vlv = { 426 /* 427 * These are the data rate limits (measured in fast clocks) 428 * since those are the strictest limits we have. The fast 429 * clock and actual rate limits are more relaxed, so checking 430 * them would make no difference. 431 */ 432 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 433 .vco = { .min = 4000000, .max = 6000000 }, 434 .n = { .min = 1, .max = 7 }, 435 .m1 = { .min = 2, .max = 3 }, 436 .m2 = { .min = 11, .max = 156 }, 437 .p1 = { .min = 2, .max = 3 }, 438 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 439}; 440 441static const struct intel_limit intel_limits_chv = { 442 /* 443 * These are the data rate limits (measured in fast clocks) 444 * since those are the strictest limits we have. The fast 445 * clock and actual rate limits are more relaxed, so checking 446 * them would make no difference. 447 */ 448 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 449 .vco = { .min = 4800000, .max = 6480000 }, 450 .n = { .min = 1, .max = 1 }, 451 .m1 = { .min = 2, .max = 2 }, 452 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 453 .p1 = { .min = 2, .max = 4 }, 454 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 455}; 456 457static const struct intel_limit intel_limits_bxt = { 458 /* FIXME: find real dot limits */ 459 .dot = { .min = 0, .max = INT_MAX }, 460 .vco = { .min = 4800000, .max = 6700000 }, 461 .n = { .min = 1, .max = 1 }, 462 .m1 = { .min = 2, .max = 2 }, 463 /* FIXME: find real m2 limits */ 464 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 465 .p1 = { .min = 2, .max = 4 }, 466 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 467}; 468 469static bool 470needs_modeset(struct drm_crtc_state *state) 471{ 472 return drm_atomic_crtc_needs_modeset(state); 473} 474 475/* 476 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 477 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 478 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 479 * The helpers' return value is the rate of the clock that is fed to the 480 * display engine's pipe which can be the above fast dot clock rate or a 481 * divided-down version of it. 482 */ 483/* m1 is reserved as 0 in Pineview, n is a ring counter */ 484static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 485{ 486 clock->m = clock->m2 + 2; 487 clock->p = clock->p1 * clock->p2; 488 if (WARN_ON(clock->n == 0 || clock->p == 0)) 489 return 0; 490 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 491 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 492 493 return clock->dot; 494} 495 496static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 497{ 498 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 499} 500 501static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 502{ 503 clock->m = i9xx_dpll_compute_m(clock); 504 clock->p = clock->p1 * clock->p2; 505 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 506 return 0; 507 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 508 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 509 510 return clock->dot; 511} 512 513static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 514{ 515 clock->m = clock->m1 * clock->m2; 516 clock->p = clock->p1 * clock->p2; 517 if (WARN_ON(clock->n == 0 || clock->p == 0)) 518 return 0; 519 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 520 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 521 522 return clock->dot / 5; 523} 524 525int chv_calc_dpll_params(int refclk, struct dpll *clock) 526{ 527 clock->m = clock->m1 * clock->m2; 528 clock->p = clock->p1 * clock->p2; 529 if (WARN_ON(clock->n == 0 || clock->p == 0)) 530 return 0; 531 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 532 clock->n << 22); 533 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 534 535 return clock->dot / 5; 536} 537 538#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 539/** 540 * Returns whether the given set of divisors are valid for a given refclk with 541 * the given connectors. 542 */ 543 544static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, 545 const struct intel_limit *limit, 546 const struct dpll *clock) 547{ 548 if (clock->n < limit->n.min || limit->n.max < clock->n) 549 INTELPllInvalid("n out of range\n"); 550 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 551 INTELPllInvalid("p1 out of range\n"); 552 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 553 INTELPllInvalid("m2 out of range\n"); 554 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 555 INTELPllInvalid("m1 out of range\n"); 556 557 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && 558 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) 559 if (clock->m1 <= clock->m2) 560 INTELPllInvalid("m1 <= m2\n"); 561 562 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 563 !IS_GEN9_LP(dev_priv)) { 564 if (clock->p < limit->p.min || limit->p.max < clock->p) 565 INTELPllInvalid("p out of range\n"); 566 if (clock->m < limit->m.min || limit->m.max < clock->m) 567 INTELPllInvalid("m out of range\n"); 568 } 569 570 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 571 INTELPllInvalid("vco out of range\n"); 572 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 573 * connector, etc., rather than just a single range. 574 */ 575 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 576 INTELPllInvalid("dot out of range\n"); 577 578 return true; 579} 580 581static int 582i9xx_select_p2_div(const struct intel_limit *limit, 583 const struct intel_crtc_state *crtc_state, 584 int target) 585{ 586 struct drm_device *dev = crtc_state->base.crtc->dev; 587 588 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 589 /* 590 * For LVDS just rely on its current settings for dual-channel. 591 * We haven't figured out how to reliably set up different 592 * single/dual channel state, if we even can. 593 */ 594 if (intel_is_dual_link_lvds(dev)) 595 return limit->p2.p2_fast; 596 else 597 return limit->p2.p2_slow; 598 } else { 599 if (target < limit->p2.dot_limit) 600 return limit->p2.p2_slow; 601 else 602 return limit->p2.p2_fast; 603 } 604} 605 606/* 607 * Returns a set of divisors for the desired target clock with the given 608 * refclk, or FALSE. The returned values represent the clock equation: 609 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 610 * 611 * Target and reference clocks are specified in kHz. 612 * 613 * If match_clock is provided, then best_clock P divider must match the P 614 * divider from @match_clock used for LVDS downclocking. 615 */ 616static bool 617i9xx_find_best_dpll(const struct intel_limit *limit, 618 struct intel_crtc_state *crtc_state, 619 int target, int refclk, struct dpll *match_clock, 620 struct dpll *best_clock) 621{ 622 struct drm_device *dev = crtc_state->base.crtc->dev; 623 struct dpll clock; 624 int err = target; 625 626 memset(best_clock, 0, sizeof(*best_clock)); 627 628 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 629 630 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 631 clock.m1++) { 632 for (clock.m2 = limit->m2.min; 633 clock.m2 <= limit->m2.max; clock.m2++) { 634 if (clock.m2 >= clock.m1) 635 break; 636 for (clock.n = limit->n.min; 637 clock.n <= limit->n.max; clock.n++) { 638 for (clock.p1 = limit->p1.min; 639 clock.p1 <= limit->p1.max; clock.p1++) { 640 int this_err; 641 642 i9xx_calc_dpll_params(refclk, &clock); 643 if (!intel_PLL_is_valid(to_i915(dev), 644 limit, 645 &clock)) 646 continue; 647 if (match_clock && 648 clock.p != match_clock->p) 649 continue; 650 651 this_err = abs(clock.dot - target); 652 if (this_err < err) { 653 *best_clock = clock; 654 err = this_err; 655 } 656 } 657 } 658 } 659 } 660 661 return (err != target); 662} 663 664/* 665 * Returns a set of divisors for the desired target clock with the given 666 * refclk, or FALSE. The returned values represent the clock equation: 667 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 668 * 669 * Target and reference clocks are specified in kHz. 670 * 671 * If match_clock is provided, then best_clock P divider must match the P 672 * divider from @match_clock used for LVDS downclocking. 673 */ 674static bool 675pnv_find_best_dpll(const struct intel_limit *limit, 676 struct intel_crtc_state *crtc_state, 677 int target, int refclk, struct dpll *match_clock, 678 struct dpll *best_clock) 679{ 680 struct drm_device *dev = crtc_state->base.crtc->dev; 681 struct dpll clock; 682 int err = target; 683 684 memset(best_clock, 0, sizeof(*best_clock)); 685 686 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 687 688 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 689 clock.m1++) { 690 for (clock.m2 = limit->m2.min; 691 clock.m2 <= limit->m2.max; clock.m2++) { 692 for (clock.n = limit->n.min; 693 clock.n <= limit->n.max; clock.n++) { 694 for (clock.p1 = limit->p1.min; 695 clock.p1 <= limit->p1.max; clock.p1++) { 696 int this_err; 697 698 pnv_calc_dpll_params(refclk, &clock); 699 if (!intel_PLL_is_valid(to_i915(dev), 700 limit, 701 &clock)) 702 continue; 703 if (match_clock && 704 clock.p != match_clock->p) 705 continue; 706 707 this_err = abs(clock.dot - target); 708 if (this_err < err) { 709 *best_clock = clock; 710 err = this_err; 711 } 712 } 713 } 714 } 715 } 716 717 return (err != target); 718} 719 720/* 721 * Returns a set of divisors for the desired target clock with the given 722 * refclk, or FALSE. The returned values represent the clock equation: 723 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 724 * 725 * Target and reference clocks are specified in kHz. 726 * 727 * If match_clock is provided, then best_clock P divider must match the P 728 * divider from @match_clock used for LVDS downclocking. 729 */ 730static bool 731g4x_find_best_dpll(const struct intel_limit *limit, 732 struct intel_crtc_state *crtc_state, 733 int target, int refclk, struct dpll *match_clock, 734 struct dpll *best_clock) 735{ 736 struct drm_device *dev = crtc_state->base.crtc->dev; 737 struct dpll clock; 738 int max_n; 739 bool found = false; 740 /* approximately equals target * 0.00585 */ 741 int err_most = (target >> 8) + (target >> 9); 742 743 memset(best_clock, 0, sizeof(*best_clock)); 744 745 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 746 747 max_n = limit->n.max; 748 /* based on hardware requirement, prefer smaller n to precision */ 749 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 750 /* based on hardware requirement, prefere larger m1,m2 */ 751 for (clock.m1 = limit->m1.max; 752 clock.m1 >= limit->m1.min; clock.m1--) { 753 for (clock.m2 = limit->m2.max; 754 clock.m2 >= limit->m2.min; clock.m2--) { 755 for (clock.p1 = limit->p1.max; 756 clock.p1 >= limit->p1.min; clock.p1--) { 757 int this_err; 758 759 i9xx_calc_dpll_params(refclk, &clock); 760 if (!intel_PLL_is_valid(to_i915(dev), 761 limit, 762 &clock)) 763 continue; 764 765 this_err = abs(clock.dot - target); 766 if (this_err < err_most) { 767 *best_clock = clock; 768 err_most = this_err; 769 max_n = clock.n; 770 found = true; 771 } 772 } 773 } 774 } 775 } 776 return found; 777} 778 779/* 780 * Check if the calculated PLL configuration is more optimal compared to the 781 * best configuration and error found so far. Return the calculated error. 782 */ 783static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 784 const struct dpll *calculated_clock, 785 const struct dpll *best_clock, 786 unsigned int best_error_ppm, 787 unsigned int *error_ppm) 788{ 789 /* 790 * For CHV ignore the error and consider only the P value. 791 * Prefer a bigger P value based on HW requirements. 792 */ 793 if (IS_CHERRYVIEW(to_i915(dev))) { 794 *error_ppm = 0; 795 796 return calculated_clock->p > best_clock->p; 797 } 798 799 if (WARN_ON_ONCE(!target_freq)) 800 return false; 801 802 *error_ppm = div_u64(1000000ULL * 803 abs(target_freq - calculated_clock->dot), 804 target_freq); 805 /* 806 * Prefer a better P value over a better (smaller) error if the error 807 * is small. Ensure this preference for future configurations too by 808 * setting the error to 0. 809 */ 810 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 811 *error_ppm = 0; 812 813 return true; 814 } 815 816 return *error_ppm + 10 < best_error_ppm; 817} 818 819/* 820 * Returns a set of divisors for the desired target clock with the given 821 * refclk, or FALSE. The returned values represent the clock equation: 822 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 823 */ 824static bool 825vlv_find_best_dpll(const struct intel_limit *limit, 826 struct intel_crtc_state *crtc_state, 827 int target, int refclk, struct dpll *match_clock, 828 struct dpll *best_clock) 829{ 830 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 831 struct drm_device *dev = crtc->base.dev; 832 struct dpll clock; 833 unsigned int bestppm = 1000000; 834 /* min update 19.2 MHz */ 835 int max_n = min(limit->n.max, refclk / 19200); 836 bool found = false; 837 838 target *= 5; /* fast clock */ 839 840 memset(best_clock, 0, sizeof(*best_clock)); 841 842 /* based on hardware requirement, prefer smaller n to precision */ 843 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 844 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 845 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 846 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 847 clock.p = clock.p1 * clock.p2; 848 /* based on hardware requirement, prefer bigger m1,m2 values */ 849 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 850 unsigned int ppm; 851 852 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 853 refclk * clock.m1); 854 855 vlv_calc_dpll_params(refclk, &clock); 856 857 if (!intel_PLL_is_valid(to_i915(dev), 858 limit, 859 &clock)) 860 continue; 861 862 if (!vlv_PLL_is_optimal(dev, target, 863 &clock, 864 best_clock, 865 bestppm, &ppm)) 866 continue; 867 868 *best_clock = clock; 869 bestppm = ppm; 870 found = true; 871 } 872 } 873 } 874 } 875 876 return found; 877} 878 879/* 880 * Returns a set of divisors for the desired target clock with the given 881 * refclk, or FALSE. The returned values represent the clock equation: 882 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 883 */ 884static bool 885chv_find_best_dpll(const struct intel_limit *limit, 886 struct intel_crtc_state *crtc_state, 887 int target, int refclk, struct dpll *match_clock, 888 struct dpll *best_clock) 889{ 890 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 891 struct drm_device *dev = crtc->base.dev; 892 unsigned int best_error_ppm; 893 struct dpll clock; 894 uint64_t m2; 895 int found = false; 896 897 memset(best_clock, 0, sizeof(*best_clock)); 898 best_error_ppm = 1000000; 899 900 /* 901 * Based on hardware doc, the n always set to 1, and m1 always 902 * set to 2. If requires to support 200Mhz refclk, we need to 903 * revisit this because n may not 1 anymore. 904 */ 905 clock.n = 1, clock.m1 = 2; 906 target *= 5; /* fast clock */ 907 908 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 909 for (clock.p2 = limit->p2.p2_fast; 910 clock.p2 >= limit->p2.p2_slow; 911 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 912 unsigned int error_ppm; 913 914 clock.p = clock.p1 * clock.p2; 915 916 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 917 clock.n) << 22, refclk * clock.m1); 918 919 if (m2 > INT_MAX/clock.m1) 920 continue; 921 922 clock.m2 = m2; 923 924 chv_calc_dpll_params(refclk, &clock); 925 926 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock)) 927 continue; 928 929 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 930 best_error_ppm, &error_ppm)) 931 continue; 932 933 *best_clock = clock; 934 best_error_ppm = error_ppm; 935 found = true; 936 } 937 } 938 939 return found; 940} 941 942bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 943 struct dpll *best_clock) 944{ 945 int refclk = 100000; 946 const struct intel_limit *limit = &intel_limits_bxt; 947 948 return chv_find_best_dpll(limit, crtc_state, 949 target_clock, refclk, NULL, best_clock); 950} 951 952bool intel_crtc_active(struct intel_crtc *crtc) 953{ 954 /* Be paranoid as we can arrive here with only partial 955 * state retrieved from the hardware during setup. 956 * 957 * We can ditch the adjusted_mode.crtc_clock check as soon 958 * as Haswell has gained clock readout/fastboot support. 959 * 960 * We can ditch the crtc->primary->fb check as soon as we can 961 * properly reconstruct framebuffers. 962 * 963 * FIXME: The intel_crtc->active here should be switched to 964 * crtc->state->active once we have proper CRTC states wired up 965 * for atomic. 966 */ 967 return crtc->active && crtc->base.primary->state->fb && 968 crtc->config->base.adjusted_mode.crtc_clock; 969} 970 971enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 972 enum pipe pipe) 973{ 974 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 975 976 return crtc->config->cpu_transcoder; 977} 978 979static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe) 980{ 981 i915_reg_t reg = PIPEDSL(pipe); 982 u32 line1, line2; 983 u32 line_mask; 984 985 if (IS_GEN2(dev_priv)) 986 line_mask = DSL_LINEMASK_GEN2; 987 else 988 line_mask = DSL_LINEMASK_GEN3; 989 990 line1 = I915_READ(reg) & line_mask; 991 msleep(5); 992 line2 = I915_READ(reg) & line_mask; 993 994 return line1 == line2; 995} 996 997/* 998 * intel_wait_for_pipe_off - wait for pipe to turn off 999 * @crtc: crtc whose pipe to wait for 1000 * 1001 * After disabling a pipe, we can't wait for vblank in the usual way, 1002 * spinning on the vblank interrupt status bit, since we won't actually 1003 * see an interrupt when the pipe is disabled. 1004 * 1005 * On Gen4 and above: 1006 * wait for the pipe register state bit to turn off 1007 * 1008 * Otherwise: 1009 * wait for the display line value to settle (it usually 1010 * ends up stopping at the start of the next frame). 1011 * 1012 */ 1013static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 1014{ 1015 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1016 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1017 enum pipe pipe = crtc->pipe; 1018 1019 if (INTEL_GEN(dev_priv) >= 4) { 1020 i915_reg_t reg = PIPECONF(cpu_transcoder); 1021 1022 /* Wait for the Pipe State to go off */ 1023 if (intel_wait_for_register(dev_priv, 1024 reg, I965_PIPECONF_ACTIVE, 0, 1025 100)) 1026 WARN(1, "pipe_off wait timed out\n"); 1027 } else { 1028 /* Wait for the display line to settle */ 1029 if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) 1030 WARN(1, "pipe_off wait timed out\n"); 1031 } 1032} 1033 1034/* Only for pre-ILK configs */ 1035void assert_pll(struct drm_i915_private *dev_priv, 1036 enum pipe pipe, bool state) 1037{ 1038 u32 val; 1039 bool cur_state; 1040 1041 val = I915_READ(DPLL(pipe)); 1042 cur_state = !!(val & DPLL_VCO_ENABLE); 1043 I915_STATE_WARN(cur_state != state, 1044 "PLL state assertion failure (expected %s, current %s)\n", 1045 onoff(state), onoff(cur_state)); 1046} 1047 1048/* XXX: the dsi pll is shared between MIPI DSI ports */ 1049void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1050{ 1051 u32 val; 1052 bool cur_state; 1053 1054 mutex_lock(&dev_priv->sb_lock); 1055 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1056 mutex_unlock(&dev_priv->sb_lock); 1057 1058 cur_state = val & DSI_PLL_VCO_EN; 1059 I915_STATE_WARN(cur_state != state, 1060 "DSI PLL state assertion failure (expected %s, current %s)\n", 1061 onoff(state), onoff(cur_state)); 1062} 1063 1064static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1065 enum pipe pipe, bool state) 1066{ 1067 bool cur_state; 1068 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1069 pipe); 1070 1071 if (HAS_DDI(dev_priv)) { 1072 /* DDI does not have a specific FDI_TX register */ 1073 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1074 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1075 } else { 1076 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1077 cur_state = !!(val & FDI_TX_ENABLE); 1078 } 1079 I915_STATE_WARN(cur_state != state, 1080 "FDI TX state assertion failure (expected %s, current %s)\n", 1081 onoff(state), onoff(cur_state)); 1082} 1083#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1084#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1085 1086static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1087 enum pipe pipe, bool state) 1088{ 1089 u32 val; 1090 bool cur_state; 1091 1092 val = I915_READ(FDI_RX_CTL(pipe)); 1093 cur_state = !!(val & FDI_RX_ENABLE); 1094 I915_STATE_WARN(cur_state != state, 1095 "FDI RX state assertion failure (expected %s, current %s)\n", 1096 onoff(state), onoff(cur_state)); 1097} 1098#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1099#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1100 1101static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1102 enum pipe pipe) 1103{ 1104 u32 val; 1105 1106 /* ILK FDI PLL is always enabled */ 1107 if (IS_GEN5(dev_priv)) 1108 return; 1109 1110 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1111 if (HAS_DDI(dev_priv)) 1112 return; 1113 1114 val = I915_READ(FDI_TX_CTL(pipe)); 1115 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1116} 1117 1118void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1119 enum pipe pipe, bool state) 1120{ 1121 u32 val; 1122 bool cur_state; 1123 1124 val = I915_READ(FDI_RX_CTL(pipe)); 1125 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1126 I915_STATE_WARN(cur_state != state, 1127 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1128 onoff(state), onoff(cur_state)); 1129} 1130 1131void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) 1132{ 1133 i915_reg_t pp_reg; 1134 u32 val; 1135 enum pipe panel_pipe = PIPE_A; 1136 bool locked = true; 1137 1138 if (WARN_ON(HAS_DDI(dev_priv))) 1139 return; 1140 1141 if (HAS_PCH_SPLIT(dev_priv)) { 1142 u32 port_sel; 1143 1144 pp_reg = PP_CONTROL(0); 1145 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1146 1147 if (port_sel == PANEL_PORT_SELECT_LVDS && 1148 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) 1149 panel_pipe = PIPE_B; 1150 /* XXX: else fix for eDP */ 1151 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1152 /* presumably write lock depends on pipe, not port select */ 1153 pp_reg = PP_CONTROL(pipe); 1154 panel_pipe = pipe; 1155 } else { 1156 pp_reg = PP_CONTROL(0); 1157 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) 1158 panel_pipe = PIPE_B; 1159 } 1160 1161 val = I915_READ(pp_reg); 1162 if (!(val & PANEL_POWER_ON) || 1163 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1164 locked = false; 1165 1166 I915_STATE_WARN(panel_pipe == pipe && locked, 1167 "panel assertion failure, pipe %c regs locked\n", 1168 pipe_name(pipe)); 1169} 1170 1171static void assert_cursor(struct drm_i915_private *dev_priv, 1172 enum pipe pipe, bool state) 1173{ 1174 bool cur_state; 1175 1176 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 1177 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 1178 else 1179 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1180 1181 I915_STATE_WARN(cur_state != state, 1182 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1183 pipe_name(pipe), onoff(state), onoff(cur_state)); 1184} 1185#define assert_cursor_enabled(d, p) assert_cursor(d, p, true) 1186#define assert_cursor_disabled(d, p) assert_cursor(d, p, false) 1187 1188void assert_pipe(struct drm_i915_private *dev_priv, 1189 enum pipe pipe, bool state) 1190{ 1191 bool cur_state; 1192 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1193 pipe); 1194 enum intel_display_power_domain power_domain; 1195 1196 /* we keep both pipes enabled on 830 */ 1197 if (IS_I830(dev_priv)) 1198 state = true; 1199 1200 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1201 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 1202 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1203 cur_state = !!(val & PIPECONF_ENABLE); 1204 1205 intel_display_power_put(dev_priv, power_domain); 1206 } else { 1207 cur_state = false; 1208 } 1209 1210 I915_STATE_WARN(cur_state != state, 1211 "pipe %c assertion failure (expected %s, current %s)\n", 1212 pipe_name(pipe), onoff(state), onoff(cur_state)); 1213} 1214 1215static void assert_plane(struct drm_i915_private *dev_priv, 1216 enum plane plane, bool state) 1217{ 1218 u32 val; 1219 bool cur_state; 1220 1221 val = I915_READ(DSPCNTR(plane)); 1222 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1223 I915_STATE_WARN(cur_state != state, 1224 "plane %c assertion failure (expected %s, current %s)\n", 1225 plane_name(plane), onoff(state), onoff(cur_state)); 1226} 1227 1228#define assert_plane_enabled(d, p) assert_plane(d, p, true) 1229#define assert_plane_disabled(d, p) assert_plane(d, p, false) 1230 1231static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1232 enum pipe pipe) 1233{ 1234 int i; 1235 1236 /* Primary planes are fixed to pipes on gen4+ */ 1237 if (INTEL_GEN(dev_priv) >= 4) { 1238 u32 val = I915_READ(DSPCNTR(pipe)); 1239 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, 1240 "plane %c assertion failure, should be disabled but not\n", 1241 plane_name(pipe)); 1242 return; 1243 } 1244 1245 /* Need to check both planes against the pipe */ 1246 for_each_pipe(dev_priv, i) { 1247 u32 val = I915_READ(DSPCNTR(i)); 1248 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1249 DISPPLANE_SEL_PIPE_SHIFT; 1250 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1251 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1252 plane_name(i), pipe_name(pipe)); 1253 } 1254} 1255 1256static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1257 enum pipe pipe) 1258{ 1259 int sprite; 1260 1261 if (INTEL_GEN(dev_priv) >= 9) { 1262 for_each_sprite(dev_priv, pipe, sprite) { 1263 u32 val = I915_READ(PLANE_CTL(pipe, sprite)); 1264 I915_STATE_WARN(val & PLANE_CTL_ENABLE, 1265 "plane %d assertion failure, should be off on pipe %c but is still active\n", 1266 sprite, pipe_name(pipe)); 1267 } 1268 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1269 for_each_sprite(dev_priv, pipe, sprite) { 1270 u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite)); 1271 I915_STATE_WARN(val & SP_ENABLE, 1272 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1273 sprite_name(pipe, sprite), pipe_name(pipe)); 1274 } 1275 } else if (INTEL_GEN(dev_priv) >= 7) { 1276 u32 val = I915_READ(SPRCTL(pipe)); 1277 I915_STATE_WARN(val & SPRITE_ENABLE, 1278 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1279 plane_name(pipe), pipe_name(pipe)); 1280 } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { 1281 u32 val = I915_READ(DVSCNTR(pipe)); 1282 I915_STATE_WARN(val & DVS_ENABLE, 1283 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1284 plane_name(pipe), pipe_name(pipe)); 1285 } 1286} 1287 1288static void assert_vblank_disabled(struct drm_crtc *crtc) 1289{ 1290 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1291 drm_crtc_vblank_put(crtc); 1292} 1293 1294void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1295 enum pipe pipe) 1296{ 1297 u32 val; 1298 bool enabled; 1299 1300 val = I915_READ(PCH_TRANSCONF(pipe)); 1301 enabled = !!(val & TRANS_ENABLE); 1302 I915_STATE_WARN(enabled, 1303 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1304 pipe_name(pipe)); 1305} 1306 1307static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1308 enum pipe pipe, u32 port_sel, u32 val) 1309{ 1310 if ((val & DP_PORT_EN) == 0) 1311 return false; 1312 1313 if (HAS_PCH_CPT(dev_priv)) { 1314 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe)); 1315 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1316 return false; 1317 } else if (IS_CHERRYVIEW(dev_priv)) { 1318 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1319 return false; 1320 } else { 1321 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1322 return false; 1323 } 1324 return true; 1325} 1326 1327static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1328 enum pipe pipe, u32 val) 1329{ 1330 if ((val & SDVO_ENABLE) == 0) 1331 return false; 1332 1333 if (HAS_PCH_CPT(dev_priv)) { 1334 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1335 return false; 1336 } else if (IS_CHERRYVIEW(dev_priv)) { 1337 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1338 return false; 1339 } else { 1340 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1341 return false; 1342 } 1343 return true; 1344} 1345 1346static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1347 enum pipe pipe, u32 val) 1348{ 1349 if ((val & LVDS_PORT_EN) == 0) 1350 return false; 1351 1352 if (HAS_PCH_CPT(dev_priv)) { 1353 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1354 return false; 1355 } else { 1356 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1357 return false; 1358 } 1359 return true; 1360} 1361 1362static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1363 enum pipe pipe, u32 val) 1364{ 1365 if ((val & ADPA_DAC_ENABLE) == 0) 1366 return false; 1367 if (HAS_PCH_CPT(dev_priv)) { 1368 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1369 return false; 1370 } else { 1371 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1372 return false; 1373 } 1374 return true; 1375} 1376 1377static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1378 enum pipe pipe, i915_reg_t reg, 1379 u32 port_sel) 1380{ 1381 u32 val = I915_READ(reg); 1382 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1383 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1384 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1385 1386 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0 1387 && (val & DP_PIPEB_SELECT), 1388 "IBX PCH dp port still using transcoder B\n"); 1389} 1390 1391static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1392 enum pipe pipe, i915_reg_t reg) 1393{ 1394 u32 val = I915_READ(reg); 1395 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1396 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1397 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1398 1399 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0 1400 && (val & SDVO_PIPE_B_SELECT), 1401 "IBX PCH hdmi port still using transcoder B\n"); 1402} 1403 1404static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1405 enum pipe pipe) 1406{ 1407 u32 val; 1408 1409 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1410 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1411 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1412 1413 val = I915_READ(PCH_ADPA); 1414 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1415 "PCH VGA enabled on transcoder %c, should be disabled\n", 1416 pipe_name(pipe)); 1417 1418 val = I915_READ(PCH_LVDS); 1419 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1420 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1421 pipe_name(pipe)); 1422 1423 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1424 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1425 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1426} 1427 1428static void _vlv_enable_pll(struct intel_crtc *crtc, 1429 const struct intel_crtc_state *pipe_config) 1430{ 1431 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1432 enum pipe pipe = crtc->pipe; 1433 1434 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1435 POSTING_READ(DPLL(pipe)); 1436 udelay(150); 1437 1438 if (intel_wait_for_register(dev_priv, 1439 DPLL(pipe), 1440 DPLL_LOCK_VLV, 1441 DPLL_LOCK_VLV, 1442 1)) 1443 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1444} 1445 1446static void vlv_enable_pll(struct intel_crtc *crtc, 1447 const struct intel_crtc_state *pipe_config) 1448{ 1449 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1450 enum pipe pipe = crtc->pipe; 1451 1452 assert_pipe_disabled(dev_priv, pipe); 1453 1454 /* PLL is protected by panel, make sure we can write it */ 1455 assert_panel_unlocked(dev_priv, pipe); 1456 1457 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1458 _vlv_enable_pll(crtc, pipe_config); 1459 1460 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1461 POSTING_READ(DPLL_MD(pipe)); 1462} 1463 1464 1465static void _chv_enable_pll(struct intel_crtc *crtc, 1466 const struct intel_crtc_state *pipe_config) 1467{ 1468 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1469 enum pipe pipe = crtc->pipe; 1470 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1471 u32 tmp; 1472 1473 mutex_lock(&dev_priv->sb_lock); 1474 1475 /* Enable back the 10bit clock to display controller */ 1476 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1477 tmp |= DPIO_DCLKP_EN; 1478 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1479 1480 mutex_unlock(&dev_priv->sb_lock); 1481 1482 /* 1483 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1484 */ 1485 udelay(1); 1486 1487 /* Enable PLL */ 1488 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1489 1490 /* Check PLL is locked */ 1491 if (intel_wait_for_register(dev_priv, 1492 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV, 1493 1)) 1494 DRM_ERROR("PLL %d failed to lock\n", pipe); 1495} 1496 1497static void chv_enable_pll(struct intel_crtc *crtc, 1498 const struct intel_crtc_state *pipe_config) 1499{ 1500 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1501 enum pipe pipe = crtc->pipe; 1502 1503 assert_pipe_disabled(dev_priv, pipe); 1504 1505 /* PLL is protected by panel, make sure we can write it */ 1506 assert_panel_unlocked(dev_priv, pipe); 1507 1508 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1509 _chv_enable_pll(crtc, pipe_config); 1510 1511 if (pipe != PIPE_A) { 1512 /* 1513 * WaPixelRepeatModeFixForC0:chv 1514 * 1515 * DPLLCMD is AWOL. Use chicken bits to propagate 1516 * the value from DPLLBMD to either pipe B or C. 1517 */ 1518 I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C); 1519 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); 1520 I915_WRITE(CBR4_VLV, 0); 1521 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1522 1523 /* 1524 * DPLLB VGA mode also seems to cause problems. 1525 * We should always have it disabled. 1526 */ 1527 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); 1528 } else { 1529 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1530 POSTING_READ(DPLL_MD(pipe)); 1531 } 1532} 1533 1534static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv) 1535{ 1536 struct intel_crtc *crtc; 1537 int count = 0; 1538 1539 for_each_intel_crtc(&dev_priv->drm, crtc) { 1540 count += crtc->base.state->active && 1541 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO); 1542 } 1543 1544 return count; 1545} 1546 1547static void i9xx_enable_pll(struct intel_crtc *crtc) 1548{ 1549 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1550 i915_reg_t reg = DPLL(crtc->pipe); 1551 u32 dpll = crtc->config->dpll_hw_state.dpll; 1552 int i; 1553 1554 assert_pipe_disabled(dev_priv, crtc->pipe); 1555 1556 /* PLL is protected by panel, make sure we can write it */ 1557 if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) 1558 assert_panel_unlocked(dev_priv, crtc->pipe); 1559 1560 /* Enable DVO 2x clock on both PLLs if necessary */ 1561 if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) { 1562 /* 1563 * It appears to be important that we don't enable this 1564 * for the current pipe before otherwise configuring the 1565 * PLL. No idea how this should be handled if multiple 1566 * DVO outputs are enabled simultaneosly. 1567 */ 1568 dpll |= DPLL_DVO_2X_MODE; 1569 I915_WRITE(DPLL(!crtc->pipe), 1570 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1571 } 1572 1573 /* 1574 * Apparently we need to have VGA mode enabled prior to changing 1575 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1576 * dividers, even though the register value does change. 1577 */ 1578 I915_WRITE(reg, 0); 1579 1580 I915_WRITE(reg, dpll); 1581 1582 /* Wait for the clocks to stabilize. */ 1583 POSTING_READ(reg); 1584 udelay(150); 1585 1586 if (INTEL_GEN(dev_priv) >= 4) { 1587 I915_WRITE(DPLL_MD(crtc->pipe), 1588 crtc->config->dpll_hw_state.dpll_md); 1589 } else { 1590 /* The pixel multiplier can only be updated once the 1591 * DPLL is enabled and the clocks are stable. 1592 * 1593 * So write it again. 1594 */ 1595 I915_WRITE(reg, dpll); 1596 } 1597 1598 /* We do this three times for luck */ 1599 for (i = 0; i < 3; i++) { 1600 I915_WRITE(reg, dpll); 1601 POSTING_READ(reg); 1602 udelay(150); /* wait for warmup */ 1603 } 1604} 1605 1606/** 1607 * i9xx_disable_pll - disable a PLL 1608 * @dev_priv: i915 private structure 1609 * @pipe: pipe PLL to disable 1610 * 1611 * Disable the PLL for @pipe, making sure the pipe is off first. 1612 * 1613 * Note! This is for pre-ILK only. 1614 */ 1615static void i9xx_disable_pll(struct intel_crtc *crtc) 1616{ 1617 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1618 enum pipe pipe = crtc->pipe; 1619 1620 /* Disable DVO 2x clock on both PLLs if necessary */ 1621 if (IS_I830(dev_priv) && 1622 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && 1623 !intel_num_dvo_pipes(dev_priv)) { 1624 I915_WRITE(DPLL(PIPE_B), 1625 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1626 I915_WRITE(DPLL(PIPE_A), 1627 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); 1628 } 1629 1630 /* Don't disable pipe or pipe PLLs if needed */ 1631 if (IS_I830(dev_priv)) 1632 return; 1633 1634 /* Make sure the pipe isn't still relying on us */ 1635 assert_pipe_disabled(dev_priv, pipe); 1636 1637 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1638 POSTING_READ(DPLL(pipe)); 1639} 1640 1641static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1642{ 1643 u32 val; 1644 1645 /* Make sure the pipe isn't still relying on us */ 1646 assert_pipe_disabled(dev_priv, pipe); 1647 1648 val = DPLL_INTEGRATED_REF_CLK_VLV | 1649 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1650 if (pipe != PIPE_A) 1651 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1652 1653 I915_WRITE(DPLL(pipe), val); 1654 POSTING_READ(DPLL(pipe)); 1655} 1656 1657static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1658{ 1659 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1660 u32 val; 1661 1662 /* Make sure the pipe isn't still relying on us */ 1663 assert_pipe_disabled(dev_priv, pipe); 1664 1665 val = DPLL_SSC_REF_CLK_CHV | 1666 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1667 if (pipe != PIPE_A) 1668 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1669 1670 I915_WRITE(DPLL(pipe), val); 1671 POSTING_READ(DPLL(pipe)); 1672 1673 mutex_lock(&dev_priv->sb_lock); 1674 1675 /* Disable 10bit clock to display controller */ 1676 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1677 val &= ~DPIO_DCLKP_EN; 1678 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1679 1680 mutex_unlock(&dev_priv->sb_lock); 1681} 1682 1683void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1684 struct intel_digital_port *dport, 1685 unsigned int expected_mask) 1686{ 1687 u32 port_mask; 1688 i915_reg_t dpll_reg; 1689 1690 switch (dport->port) { 1691 case PORT_B: 1692 port_mask = DPLL_PORTB_READY_MASK; 1693 dpll_reg = DPLL(0); 1694 break; 1695 case PORT_C: 1696 port_mask = DPLL_PORTC_READY_MASK; 1697 dpll_reg = DPLL(0); 1698 expected_mask <<= 4; 1699 break; 1700 case PORT_D: 1701 port_mask = DPLL_PORTD_READY_MASK; 1702 dpll_reg = DPIO_PHY_STATUS; 1703 break; 1704 default: 1705 BUG(); 1706 } 1707 1708 if (intel_wait_for_register(dev_priv, 1709 dpll_reg, port_mask, expected_mask, 1710 1000)) 1711 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1712 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); 1713} 1714 1715static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1716 enum pipe pipe) 1717{ 1718 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 1719 pipe); 1720 i915_reg_t reg; 1721 uint32_t val, pipeconf_val; 1722 1723 /* Make sure PCH DPLL is enabled */ 1724 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll); 1725 1726 /* FDI must be feeding us bits for PCH ports */ 1727 assert_fdi_tx_enabled(dev_priv, pipe); 1728 assert_fdi_rx_enabled(dev_priv, pipe); 1729 1730 if (HAS_PCH_CPT(dev_priv)) { 1731 /* Workaround: Set the timing override bit before enabling the 1732 * pch transcoder. */ 1733 reg = TRANS_CHICKEN2(pipe); 1734 val = I915_READ(reg); 1735 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1736 I915_WRITE(reg, val); 1737 } 1738 1739 reg = PCH_TRANSCONF(pipe); 1740 val = I915_READ(reg); 1741 pipeconf_val = I915_READ(PIPECONF(pipe)); 1742 1743 if (HAS_PCH_IBX(dev_priv)) { 1744 /* 1745 * Make the BPC in transcoder be consistent with 1746 * that in pipeconf reg. For HDMI we must use 8bpc 1747 * here for both 8bpc and 12bpc. 1748 */ 1749 val &= ~PIPECONF_BPC_MASK; 1750 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI)) 1751 val |= PIPECONF_8BPC; 1752 else 1753 val |= pipeconf_val & PIPECONF_BPC_MASK; 1754 } 1755 1756 val &= ~TRANS_INTERLACE_MASK; 1757 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1758 if (HAS_PCH_IBX(dev_priv) && 1759 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 1760 val |= TRANS_LEGACY_INTERLACED_ILK; 1761 else 1762 val |= TRANS_INTERLACED; 1763 else 1764 val |= TRANS_PROGRESSIVE; 1765 1766 I915_WRITE(reg, val | TRANS_ENABLE); 1767 if (intel_wait_for_register(dev_priv, 1768 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE, 1769 100)) 1770 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1771} 1772 1773static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1774 enum transcoder cpu_transcoder) 1775{ 1776 u32 val, pipeconf_val; 1777 1778 /* FDI must be feeding us bits for PCH ports */ 1779 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1780 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 1781 1782 /* Workaround: set timing override bit. */ 1783 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1784 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1785 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1786 1787 val = TRANS_ENABLE; 1788 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1789 1790 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1791 PIPECONF_INTERLACED_ILK) 1792 val |= TRANS_INTERLACED; 1793 else 1794 val |= TRANS_PROGRESSIVE; 1795 1796 I915_WRITE(LPT_TRANSCONF, val); 1797 if (intel_wait_for_register(dev_priv, 1798 LPT_TRANSCONF, 1799 TRANS_STATE_ENABLE, 1800 TRANS_STATE_ENABLE, 1801 100)) 1802 DRM_ERROR("Failed to enable PCH transcoder\n"); 1803} 1804 1805static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1806 enum pipe pipe) 1807{ 1808 i915_reg_t reg; 1809 uint32_t val; 1810 1811 /* FDI relies on the transcoder */ 1812 assert_fdi_tx_disabled(dev_priv, pipe); 1813 assert_fdi_rx_disabled(dev_priv, pipe); 1814 1815 /* Ports must be off as well */ 1816 assert_pch_ports_disabled(dev_priv, pipe); 1817 1818 reg = PCH_TRANSCONF(pipe); 1819 val = I915_READ(reg); 1820 val &= ~TRANS_ENABLE; 1821 I915_WRITE(reg, val); 1822 /* wait for PCH transcoder off, transcoder state */ 1823 if (intel_wait_for_register(dev_priv, 1824 reg, TRANS_STATE_ENABLE, 0, 1825 50)) 1826 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1827 1828 if (HAS_PCH_CPT(dev_priv)) { 1829 /* Workaround: Clear the timing override chicken bit again. */ 1830 reg = TRANS_CHICKEN2(pipe); 1831 val = I915_READ(reg); 1832 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1833 I915_WRITE(reg, val); 1834 } 1835} 1836 1837void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1838{ 1839 u32 val; 1840 1841 val = I915_READ(LPT_TRANSCONF); 1842 val &= ~TRANS_ENABLE; 1843 I915_WRITE(LPT_TRANSCONF, val); 1844 /* wait for PCH transcoder off, transcoder state */ 1845 if (intel_wait_for_register(dev_priv, 1846 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0, 1847 50)) 1848 DRM_ERROR("Failed to disable PCH transcoder\n"); 1849 1850 /* Workaround: clear timing override bit. */ 1851 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1852 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1853 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1854} 1855 1856enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc) 1857{ 1858 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1859 1860 WARN_ON(!crtc->config->has_pch_encoder); 1861 1862 if (HAS_PCH_LPT(dev_priv)) 1863 return TRANSCODER_A; 1864 else 1865 return (enum transcoder) crtc->pipe; 1866} 1867 1868/** 1869 * intel_enable_pipe - enable a pipe, asserting requirements 1870 * @crtc: crtc responsible for the pipe 1871 * 1872 * Enable @crtc's pipe, making sure that various hardware specific requirements 1873 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1874 */ 1875static void intel_enable_pipe(struct intel_crtc *crtc) 1876{ 1877 struct drm_device *dev = crtc->base.dev; 1878 struct drm_i915_private *dev_priv = to_i915(dev); 1879 enum pipe pipe = crtc->pipe; 1880 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1881 i915_reg_t reg; 1882 u32 val; 1883 1884 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1885 1886 assert_planes_disabled(dev_priv, pipe); 1887 assert_cursor_disabled(dev_priv, pipe); 1888 assert_sprites_disabled(dev_priv, pipe); 1889 1890 /* 1891 * A pipe without a PLL won't actually be able to drive bits from 1892 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1893 * need the check. 1894 */ 1895 if (HAS_GMCH_DISPLAY(dev_priv)) { 1896 if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI)) 1897 assert_dsi_pll_enabled(dev_priv); 1898 else 1899 assert_pll_enabled(dev_priv, pipe); 1900 } else { 1901 if (crtc->config->has_pch_encoder) { 1902 /* if driving the PCH, we need FDI enabled */ 1903 assert_fdi_rx_pll_enabled(dev_priv, 1904 (enum pipe) intel_crtc_pch_transcoder(crtc)); 1905 assert_fdi_tx_pll_enabled(dev_priv, 1906 (enum pipe) cpu_transcoder); 1907 } 1908 /* FIXME: assert CPU port conditions for SNB+ */ 1909 } 1910 1911 reg = PIPECONF(cpu_transcoder); 1912 val = I915_READ(reg); 1913 if (val & PIPECONF_ENABLE) { 1914 /* we keep both pipes enabled on 830 */ 1915 WARN_ON(!IS_I830(dev_priv)); 1916 return; 1917 } 1918 1919 I915_WRITE(reg, val | PIPECONF_ENABLE); 1920 POSTING_READ(reg); 1921 1922 /* 1923 * Until the pipe starts DSL will read as 0, which would cause 1924 * an apparent vblank timestamp jump, which messes up also the 1925 * frame count when it's derived from the timestamps. So let's 1926 * wait for the pipe to start properly before we call 1927 * drm_crtc_vblank_on() 1928 */ 1929 if (dev->max_vblank_count == 0 && 1930 wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50)) 1931 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe)); 1932} 1933 1934/** 1935 * intel_disable_pipe - disable a pipe, asserting requirements 1936 * @crtc: crtc whose pipes is to be disabled 1937 * 1938 * Disable the pipe of @crtc, making sure that various hardware 1939 * specific requirements are met, if applicable, e.g. plane 1940 * disabled, panel fitter off, etc. 1941 * 1942 * Will wait until the pipe has shut down before returning. 1943 */ 1944static void intel_disable_pipe(struct intel_crtc *crtc) 1945{ 1946 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1947 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1948 enum pipe pipe = crtc->pipe; 1949 i915_reg_t reg; 1950 u32 val; 1951 1952 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 1953 1954 /* 1955 * Make sure planes won't keep trying to pump pixels to us, 1956 * or we might hang the display. 1957 */ 1958 assert_planes_disabled(dev_priv, pipe); 1959 assert_cursor_disabled(dev_priv, pipe); 1960 assert_sprites_disabled(dev_priv, pipe); 1961 1962 reg = PIPECONF(cpu_transcoder); 1963 val = I915_READ(reg); 1964 if ((val & PIPECONF_ENABLE) == 0) 1965 return; 1966 1967 /* 1968 * Double wide has implications for planes 1969 * so best keep it disabled when not needed. 1970 */ 1971 if (crtc->config->double_wide) 1972 val &= ~PIPECONF_DOUBLE_WIDE; 1973 1974 /* Don't disable pipe or pipe PLLs if needed */ 1975 if (!IS_I830(dev_priv)) 1976 val &= ~PIPECONF_ENABLE; 1977 1978 I915_WRITE(reg, val); 1979 if ((val & PIPECONF_ENABLE) == 0) 1980 intel_wait_for_pipe_off(crtc); 1981} 1982 1983static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1984{ 1985 return IS_GEN2(dev_priv) ? 2048 : 4096; 1986} 1987 1988static unsigned int 1989intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane) 1990{ 1991 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1992 unsigned int cpp = fb->format->cpp[plane]; 1993 1994 switch (fb->modifier) { 1995 case DRM_FORMAT_MOD_LINEAR: 1996 return cpp; 1997 case I915_FORMAT_MOD_X_TILED: 1998 if (IS_GEN2(dev_priv)) 1999 return 128; 2000 else 2001 return 512; 2002 case I915_FORMAT_MOD_Y_TILED: 2003 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv)) 2004 return 128; 2005 else 2006 return 512; 2007 case I915_FORMAT_MOD_Yf_TILED: 2008 switch (cpp) { 2009 case 1: 2010 return 64; 2011 case 2: 2012 case 4: 2013 return 128; 2014 case 8: 2015 case 16: 2016 return 256; 2017 default: 2018 MISSING_CASE(cpp); 2019 return cpp; 2020 } 2021 break; 2022 default: 2023 MISSING_CASE(fb->modifier); 2024 return cpp; 2025 } 2026} 2027 2028static unsigned int 2029intel_tile_height(const struct drm_framebuffer *fb, int plane) 2030{ 2031 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 2032 return 1; 2033 else 2034 return intel_tile_size(to_i915(fb->dev)) / 2035 intel_tile_width_bytes(fb, plane); 2036} 2037 2038/* Return the tile dimensions in pixel units */ 2039static void intel_tile_dims(const struct drm_framebuffer *fb, int plane, 2040 unsigned int *tile_width, 2041 unsigned int *tile_height) 2042{ 2043 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane); 2044 unsigned int cpp = fb->format->cpp[plane]; 2045 2046 *tile_width = tile_width_bytes / cpp; 2047 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes; 2048} 2049 2050unsigned int 2051intel_fb_align_height(const struct drm_framebuffer *fb, 2052 int plane, unsigned int height) 2053{ 2054 unsigned int tile_height = intel_tile_height(fb, plane); 2055 2056 return ALIGN(height, tile_height); 2057} 2058 2059unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 2060{ 2061 unsigned int size = 0; 2062 int i; 2063 2064 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 2065 size += rot_info->plane[i].width * rot_info->plane[i].height; 2066 2067 return size; 2068} 2069 2070static void 2071intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 2072 const struct drm_framebuffer *fb, 2073 unsigned int rotation) 2074{ 2075 view->type = I915_GGTT_VIEW_NORMAL; 2076 if (drm_rotation_90_or_270(rotation)) { 2077 view->type = I915_GGTT_VIEW_ROTATED; 2078 view->rotated = to_intel_framebuffer(fb)->rot_info; 2079 } 2080} 2081 2082static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv) 2083{ 2084 if (IS_I830(dev_priv)) 2085 return 16 * 1024; 2086 else if (IS_I85X(dev_priv)) 2087 return 256; 2088 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 2089 return 32; 2090 else 2091 return 4 * 1024; 2092} 2093 2094static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2095{ 2096 if (INTEL_INFO(dev_priv)->gen >= 9) 2097 return 256 * 1024; 2098 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2099 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2100 return 128 * 1024; 2101 else if (INTEL_INFO(dev_priv)->gen >= 4) 2102 return 4 * 1024; 2103 else 2104 return 0; 2105} 2106 2107static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2108 int plane) 2109{ 2110 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2111 2112 /* AUX_DIST needs only 4K alignment */ 2113 if (fb->format->format == DRM_FORMAT_NV12 && plane == 1) 2114 return 4096; 2115 2116 switch (fb->modifier) { 2117 case DRM_FORMAT_MOD_LINEAR: 2118 return intel_linear_alignment(dev_priv); 2119 case I915_FORMAT_MOD_X_TILED: 2120 if (INTEL_GEN(dev_priv) >= 9) 2121 return 256 * 1024; 2122 return 0; 2123 case I915_FORMAT_MOD_Y_TILED: 2124 case I915_FORMAT_MOD_Yf_TILED: 2125 return 1 * 1024 * 1024; 2126 default: 2127 MISSING_CASE(fb->modifier); 2128 return 0; 2129 } 2130} 2131 2132struct i915_vma * 2133intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2134{ 2135 struct drm_device *dev = fb->dev; 2136 struct drm_i915_private *dev_priv = to_i915(dev); 2137 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2138 struct i915_ggtt_view view; 2139 struct i915_vma *vma; 2140 u32 alignment; 2141 2142 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2143 2144 alignment = intel_surf_alignment(fb, 0); 2145 2146 intel_fill_fb_ggtt_view(&view, fb, rotation); 2147 2148 /* Note that the w/a also requires 64 PTE of padding following the 2149 * bo. We currently fill all unused PTE with the shadow page and so 2150 * we should always have valid PTE following the scanout preventing 2151 * the VT-d warning. 2152 */ 2153 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2154 alignment = 256 * 1024; 2155 2156 /* 2157 * Global gtt pte registers are special registers which actually forward 2158 * writes to a chunk of system memory. Which means that there is no risk 2159 * that the register values disappear as soon as we call 2160 * intel_runtime_pm_put(), so it is correct to wrap only the 2161 * pin/unpin/fence and not more. 2162 */ 2163 intel_runtime_pm_get(dev_priv); 2164 2165 vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view); 2166 if (IS_ERR(vma)) 2167 goto err; 2168 2169 if (i915_vma_is_map_and_fenceable(vma)) { 2170 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2171 * fence, whereas 965+ only requires a fence if using 2172 * framebuffer compression. For simplicity, we always, when 2173 * possible, install a fence as the cost is not that onerous. 2174 * 2175 * If we fail to fence the tiled scanout, then either the 2176 * modeset will reject the change (which is highly unlikely as 2177 * the affected systems, all but one, do not have unmappable 2178 * space) or we will not be able to enable full powersaving 2179 * techniques (also likely not to apply due to various limits 2180 * FBC and the like impose on the size of the buffer, which 2181 * presumably we violated anyway with this unmappable buffer). 2182 * Anyway, it is presumably better to stumble onwards with 2183 * something and try to run the system in a "less than optimal" 2184 * mode that matches the user configuration. 2185 */ 2186 if (i915_vma_get_fence(vma) == 0) 2187 i915_vma_pin_fence(vma); 2188 } 2189 2190 i915_vma_get(vma); 2191err: 2192 intel_runtime_pm_put(dev_priv); 2193 return vma; 2194} 2195 2196void intel_unpin_fb_vma(struct i915_vma *vma) 2197{ 2198 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 2199 2200 i915_vma_unpin_fence(vma); 2201 i915_gem_object_unpin_from_display_plane(vma); 2202 i915_vma_put(vma); 2203} 2204 2205static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, 2206 unsigned int rotation) 2207{ 2208 if (drm_rotation_90_or_270(rotation)) 2209 return to_intel_framebuffer(fb)->rotated[plane].pitch; 2210 else 2211 return fb->pitches[plane]; 2212} 2213 2214/* 2215 * Convert the x/y offsets into a linear offset. 2216 * Only valid with 0/180 degree rotation, which is fine since linear 2217 * offset is only used with linear buffers on pre-hsw and tiled buffers 2218 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 2219 */ 2220u32 intel_fb_xy_to_linear(int x, int y, 2221 const struct intel_plane_state *state, 2222 int plane) 2223{ 2224 const struct drm_framebuffer *fb = state->base.fb; 2225 unsigned int cpp = fb->format->cpp[plane]; 2226 unsigned int pitch = fb->pitches[plane]; 2227 2228 return y * pitch + x * cpp; 2229} 2230 2231/* 2232 * Add the x/y offsets derived from fb->offsets[] to the user 2233 * specified plane src x/y offsets. The resulting x/y offsets 2234 * specify the start of scanout from the beginning of the gtt mapping. 2235 */ 2236void intel_add_fb_offsets(int *x, int *y, 2237 const struct intel_plane_state *state, 2238 int plane) 2239 2240{ 2241 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb); 2242 unsigned int rotation = state->base.rotation; 2243 2244 if (drm_rotation_90_or_270(rotation)) { 2245 *x += intel_fb->rotated[plane].x; 2246 *y += intel_fb->rotated[plane].y; 2247 } else { 2248 *x += intel_fb->normal[plane].x; 2249 *y += intel_fb->normal[plane].y; 2250 } 2251} 2252 2253/* 2254 * Input tile dimensions and pitch must already be 2255 * rotated to match x and y, and in pixel units. 2256 */ 2257static u32 _intel_adjust_tile_offset(int *x, int *y, 2258 unsigned int tile_width, 2259 unsigned int tile_height, 2260 unsigned int tile_size, 2261 unsigned int pitch_tiles, 2262 u32 old_offset, 2263 u32 new_offset) 2264{ 2265 unsigned int pitch_pixels = pitch_tiles * tile_width; 2266 unsigned int tiles; 2267 2268 WARN_ON(old_offset & (tile_size - 1)); 2269 WARN_ON(new_offset & (tile_size - 1)); 2270 WARN_ON(new_offset > old_offset); 2271 2272 tiles = (old_offset - new_offset) / tile_size; 2273 2274 *y += tiles / pitch_tiles * tile_height; 2275 *x += tiles % pitch_tiles * tile_width; 2276 2277 /* minimize x in case it got needlessly big */ 2278 *y += *x / pitch_pixels * tile_height; 2279 *x %= pitch_pixels; 2280 2281 return new_offset; 2282} 2283 2284/* 2285 * Adjust the tile offset by moving the difference into 2286 * the x/y offsets. 2287 */ 2288static u32 intel_adjust_tile_offset(int *x, int *y, 2289 const struct intel_plane_state *state, int plane, 2290 u32 old_offset, u32 new_offset) 2291{ 2292 const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev); 2293 const struct drm_framebuffer *fb = state->base.fb; 2294 unsigned int cpp = fb->format->cpp[plane]; 2295 unsigned int rotation = state->base.rotation; 2296 unsigned int pitch = intel_fb_pitch(fb, plane, rotation); 2297 2298 WARN_ON(new_offset > old_offset); 2299 2300 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 2301 unsigned int tile_size, tile_width, tile_height; 2302 unsigned int pitch_tiles; 2303 2304 tile_size = intel_tile_size(dev_priv); 2305 intel_tile_dims(fb, plane, &tile_width, &tile_height); 2306 2307 if (drm_rotation_90_or_270(rotation)) { 2308 pitch_tiles = pitch / tile_height; 2309 swap(tile_width, tile_height); 2310 } else { 2311 pitch_tiles = pitch / (tile_width * cpp); 2312 } 2313 2314 _intel_adjust_tile_offset(x, y, tile_width, tile_height, 2315 tile_size, pitch_tiles, 2316 old_offset, new_offset); 2317 } else { 2318 old_offset += *y * pitch + *x * cpp; 2319 2320 *y = (old_offset - new_offset) / pitch; 2321 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 2322 } 2323 2324 return new_offset; 2325} 2326 2327/* 2328 * Computes the linear offset to the base tile and adjusts 2329 * x, y. bytes per pixel is assumed to be a power-of-two. 2330 * 2331 * In the 90/270 rotated case, x and y are assumed 2332 * to be already rotated to match the rotated GTT view, and 2333 * pitch is the tile_height aligned framebuffer height. 2334 * 2335 * This function is used when computing the derived information 2336 * under intel_framebuffer, so using any of that information 2337 * here is not allowed. Anything under drm_framebuffer can be 2338 * used. This is why the user has to pass in the pitch since it 2339 * is specified in the rotated orientation. 2340 */ 2341static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv, 2342 int *x, int *y, 2343 const struct drm_framebuffer *fb, int plane, 2344 unsigned int pitch, 2345 unsigned int rotation, 2346 u32 alignment) 2347{ 2348 uint64_t fb_modifier = fb->modifier; 2349 unsigned int cpp = fb->format->cpp[plane]; 2350 u32 offset, offset_aligned; 2351 2352 if (alignment) 2353 alignment--; 2354 2355 if (fb_modifier != DRM_FORMAT_MOD_LINEAR) { 2356 unsigned int tile_size, tile_width, tile_height; 2357 unsigned int tile_rows, tiles, pitch_tiles; 2358 2359 tile_size = intel_tile_size(dev_priv); 2360 intel_tile_dims(fb, plane, &tile_width, &tile_height); 2361 2362 if (drm_rotation_90_or_270(rotation)) { 2363 pitch_tiles = pitch / tile_height; 2364 swap(tile_width, tile_height); 2365 } else { 2366 pitch_tiles = pitch / (tile_width * cpp); 2367 } 2368 2369 tile_rows = *y / tile_height; 2370 *y %= tile_height; 2371 2372 tiles = *x / tile_width; 2373 *x %= tile_width; 2374 2375 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2376 offset_aligned = offset & ~alignment; 2377 2378 _intel_adjust_tile_offset(x, y, tile_width, tile_height, 2379 tile_size, pitch_tiles, 2380 offset, offset_aligned); 2381 } else { 2382 offset = *y * pitch + *x * cpp; 2383 offset_aligned = offset & ~alignment; 2384 2385 *y = (offset & alignment) / pitch; 2386 *x = ((offset & alignment) - *y * pitch) / cpp; 2387 } 2388 2389 return offset_aligned; 2390} 2391 2392u32 intel_compute_tile_offset(int *x, int *y, 2393 const struct intel_plane_state *state, 2394 int plane) 2395{ 2396 struct intel_plane *intel_plane = to_intel_plane(state->base.plane); 2397 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 2398 const struct drm_framebuffer *fb = state->base.fb; 2399 unsigned int rotation = state->base.rotation; 2400 int pitch = intel_fb_pitch(fb, plane, rotation); 2401 u32 alignment; 2402 2403 if (intel_plane->id == PLANE_CURSOR) 2404 alignment = intel_cursor_alignment(dev_priv); 2405 else 2406 alignment = intel_surf_alignment(fb, plane); 2407 2408 return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch, 2409 rotation, alignment); 2410} 2411 2412/* Convert the fb->offset[] linear offset into x/y offsets */ 2413static void intel_fb_offset_to_xy(int *x, int *y, 2414 const struct drm_framebuffer *fb, int plane) 2415{ 2416 unsigned int cpp = fb->format->cpp[plane]; 2417 unsigned int pitch = fb->pitches[plane]; 2418 u32 linear_offset = fb->offsets[plane]; 2419 2420 *y = linear_offset / pitch; 2421 *x = linear_offset % pitch / cpp; 2422} 2423 2424static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier) 2425{ 2426 switch (fb_modifier) { 2427 case I915_FORMAT_MOD_X_TILED: 2428 return I915_TILING_X; 2429 case I915_FORMAT_MOD_Y_TILED: 2430 return I915_TILING_Y; 2431 default: 2432 return I915_TILING_NONE; 2433 } 2434} 2435 2436static int 2437intel_fill_fb_info(struct drm_i915_private *dev_priv, 2438 struct drm_framebuffer *fb) 2439{ 2440 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2441 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2442 u32 gtt_offset_rotated = 0; 2443 unsigned int max_size = 0; 2444 int i, num_planes = fb->format->num_planes; 2445 unsigned int tile_size = intel_tile_size(dev_priv); 2446 2447 for (i = 0; i < num_planes; i++) { 2448 unsigned int width, height; 2449 unsigned int cpp, size; 2450 u32 offset; 2451 int x, y; 2452 2453 cpp = fb->format->cpp[i]; 2454 width = drm_framebuffer_plane_width(fb->width, fb, i); 2455 height = drm_framebuffer_plane_height(fb->height, fb, i); 2456 2457 intel_fb_offset_to_xy(&x, &y, fb, i); 2458 2459 /* 2460 * The fence (if used) is aligned to the start of the object 2461 * so having the framebuffer wrap around across the edge of the 2462 * fenced region doesn't really work. We have no API to configure 2463 * the fence start offset within the object (nor could we probably 2464 * on gen2/3). So it's just easier if we just require that the 2465 * fb layout agrees with the fence layout. We already check that the 2466 * fb stride matches the fence stride elsewhere. 2467 */ 2468 if (i915_gem_object_is_tiled(intel_fb->obj) && 2469 (x + width) * cpp > fb->pitches[i]) { 2470 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2471 i, fb->offsets[i]); 2472 return -EINVAL; 2473 } 2474 2475 /* 2476 * First pixel of the framebuffer from 2477 * the start of the normal gtt mapping. 2478 */ 2479 intel_fb->normal[i].x = x; 2480 intel_fb->normal[i].y = y; 2481 2482 offset = _intel_compute_tile_offset(dev_priv, &x, &y, 2483 fb, i, fb->pitches[i], 2484 DRM_MODE_ROTATE_0, tile_size); 2485 offset /= tile_size; 2486 2487 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 2488 unsigned int tile_width, tile_height; 2489 unsigned int pitch_tiles; 2490 struct drm_rect r; 2491 2492 intel_tile_dims(fb, i, &tile_width, &tile_height); 2493 2494 rot_info->plane[i].offset = offset; 2495 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); 2496 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 2497 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 2498 2499 intel_fb->rotated[i].pitch = 2500 rot_info->plane[i].height * tile_height; 2501 2502 /* how many tiles does this plane need */ 2503 size = rot_info->plane[i].stride * rot_info->plane[i].height; 2504 /* 2505 * If the plane isn't horizontally tile aligned, 2506 * we need one more tile. 2507 */ 2508 if (x != 0) 2509 size++; 2510 2511 /* rotate the x/y offsets to match the GTT view */ 2512 r.x1 = x; 2513 r.y1 = y; 2514 r.x2 = x + width; 2515 r.y2 = y + height; 2516 drm_rect_rotate(&r, 2517 rot_info->plane[i].width * tile_width, 2518 rot_info->plane[i].height * tile_height, 2519 DRM_MODE_ROTATE_270); 2520 x = r.x1; 2521 y = r.y1; 2522 2523 /* rotate the tile dimensions to match the GTT view */ 2524 pitch_tiles = intel_fb->rotated[i].pitch / tile_height; 2525 swap(tile_width, tile_height); 2526 2527 /* 2528 * We only keep the x/y offsets, so push all of the 2529 * gtt offset into the x/y offsets. 2530 */ 2531 _intel_adjust_tile_offset(&x, &y, 2532 tile_width, tile_height, 2533 tile_size, pitch_tiles, 2534 gtt_offset_rotated * tile_size, 0); 2535 2536 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; 2537 2538 /* 2539 * First pixel of the framebuffer from 2540 * the start of the rotated gtt mapping. 2541 */ 2542 intel_fb->rotated[i].x = x; 2543 intel_fb->rotated[i].y = y; 2544 } else { 2545 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 2546 x * cpp, tile_size); 2547 } 2548 2549 /* how many tiles in total needed in the bo */ 2550 max_size = max(max_size, offset + size); 2551 } 2552 2553 if (max_size * tile_size > intel_fb->obj->base.size) { 2554 DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n", 2555 max_size * tile_size, intel_fb->obj->base.size); 2556 return -EINVAL; 2557 } 2558 2559 return 0; 2560} 2561 2562static int i9xx_format_to_fourcc(int format) 2563{ 2564 switch (format) { 2565 case DISPPLANE_8BPP: 2566 return DRM_FORMAT_C8; 2567 case DISPPLANE_BGRX555: 2568 return DRM_FORMAT_XRGB1555; 2569 case DISPPLANE_BGRX565: 2570 return DRM_FORMAT_RGB565; 2571 default: 2572 case DISPPLANE_BGRX888: 2573 return DRM_FORMAT_XRGB8888; 2574 case DISPPLANE_RGBX888: 2575 return DRM_FORMAT_XBGR8888; 2576 case DISPPLANE_BGRX101010: 2577 return DRM_FORMAT_XRGB2101010; 2578 case DISPPLANE_RGBX101010: 2579 return DRM_FORMAT_XBGR2101010; 2580 } 2581} 2582 2583static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2584{ 2585 switch (format) { 2586 case PLANE_CTL_FORMAT_RGB_565: 2587 return DRM_FORMAT_RGB565; 2588 default: 2589 case PLANE_CTL_FORMAT_XRGB_8888: 2590 if (rgb_order) { 2591 if (alpha) 2592 return DRM_FORMAT_ABGR8888; 2593 else 2594 return DRM_FORMAT_XBGR8888; 2595 } else { 2596 if (alpha) 2597 return DRM_FORMAT_ARGB8888; 2598 else 2599 return DRM_FORMAT_XRGB8888; 2600 } 2601 case PLANE_CTL_FORMAT_XRGB_2101010: 2602 if (rgb_order) 2603 return DRM_FORMAT_XBGR2101010; 2604 else 2605 return DRM_FORMAT_XRGB2101010; 2606 } 2607} 2608 2609static bool 2610intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 2611 struct intel_initial_plane_config *plane_config) 2612{ 2613 struct drm_device *dev = crtc->base.dev; 2614 struct drm_i915_private *dev_priv = to_i915(dev); 2615 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2616 struct drm_i915_gem_object *obj = NULL; 2617 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2618 struct drm_framebuffer *fb = &plane_config->fb->base; 2619 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 2620 u32 size_aligned = round_up(plane_config->base + plane_config->size, 2621 PAGE_SIZE); 2622 2623 size_aligned -= base_aligned; 2624 2625 if (plane_config->size == 0) 2626 return false; 2627 2628 /* If the FB is too big, just don't use it since fbdev is not very 2629 * important and we should probably use that space with FBC or other 2630 * features. */ 2631 if (size_aligned * 2 > ggtt->stolen_usable_size) 2632 return false; 2633 2634 mutex_lock(&dev->struct_mutex); 2635 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, 2636 base_aligned, 2637 base_aligned, 2638 size_aligned); 2639 mutex_unlock(&dev->struct_mutex); 2640 if (!obj) 2641 return false; 2642 2643 if (plane_config->tiling == I915_TILING_X) 2644 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X; 2645 2646 mode_cmd.pixel_format = fb->format->format; 2647 mode_cmd.width = fb->width; 2648 mode_cmd.height = fb->height; 2649 mode_cmd.pitches[0] = fb->pitches[0]; 2650 mode_cmd.modifier[0] = fb->modifier; 2651 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2652 2653 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) { 2654 DRM_DEBUG_KMS("intel fb init failed\n"); 2655 goto out_unref_obj; 2656 } 2657 2658 2659 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 2660 return true; 2661 2662out_unref_obj: 2663 i915_gem_object_put(obj); 2664 return false; 2665} 2666 2667/* Update plane->state->fb to match plane->fb after driver-internal updates */ 2668static void 2669update_state_fb(struct drm_plane *plane) 2670{ 2671 if (plane->fb == plane->state->fb) 2672 return; 2673 2674 if (plane->state->fb) 2675 drm_framebuffer_unreference(plane->state->fb); 2676 plane->state->fb = plane->fb; 2677 if (plane->state->fb) 2678 drm_framebuffer_reference(plane->state->fb); 2679} 2680 2681static void 2682intel_set_plane_visible(struct intel_crtc_state *crtc_state, 2683 struct intel_plane_state *plane_state, 2684 bool visible) 2685{ 2686 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2687 2688 plane_state->base.visible = visible; 2689 2690 /* FIXME pre-g4x don't work like this */ 2691 if (visible) { 2692 crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base)); 2693 crtc_state->active_planes |= BIT(plane->id); 2694 } else { 2695 crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base)); 2696 crtc_state->active_planes &= ~BIT(plane->id); 2697 } 2698 2699 DRM_DEBUG_KMS("%s active planes 0x%x\n", 2700 crtc_state->base.crtc->name, 2701 crtc_state->active_planes); 2702} 2703 2704static void 2705intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2706 struct intel_initial_plane_config *plane_config) 2707{ 2708 struct drm_device *dev = intel_crtc->base.dev; 2709 struct drm_i915_private *dev_priv = to_i915(dev); 2710 struct drm_crtc *c; 2711 struct drm_i915_gem_object *obj; 2712 struct drm_plane *primary = intel_crtc->base.primary; 2713 struct drm_plane_state *plane_state = primary->state; 2714 struct drm_crtc_state *crtc_state = intel_crtc->base.state; 2715 struct intel_plane *intel_plane = to_intel_plane(primary); 2716 struct intel_plane_state *intel_state = 2717 to_intel_plane_state(plane_state); 2718 struct drm_framebuffer *fb; 2719 2720 if (!plane_config->fb) 2721 return; 2722 2723 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 2724 fb = &plane_config->fb->base; 2725 goto valid_fb; 2726 } 2727 2728 kfree(plane_config->fb); 2729 2730 /* 2731 * Failed to alloc the obj, check to see if we should share 2732 * an fb with another CRTC instead 2733 */ 2734 for_each_crtc(dev, c) { 2735 struct intel_plane_state *state; 2736 2737 if (c == &intel_crtc->base) 2738 continue; 2739 2740 if (!to_intel_crtc(c)->active) 2741 continue; 2742 2743 state = to_intel_plane_state(c->primary->state); 2744 if (!state->vma) 2745 continue; 2746 2747 if (intel_plane_ggtt_offset(state) == plane_config->base) { 2748 fb = c->primary->fb; 2749 drm_framebuffer_reference(fb); 2750 goto valid_fb; 2751 } 2752 } 2753 2754 /* 2755 * We've failed to reconstruct the BIOS FB. Current display state 2756 * indicates that the primary plane is visible, but has a NULL FB, 2757 * which will lead to problems later if we don't fix it up. The 2758 * simplest solution is to just disable the primary plane now and 2759 * pretend the BIOS never had it enabled. 2760 */ 2761 intel_set_plane_visible(to_intel_crtc_state(crtc_state), 2762 to_intel_plane_state(plane_state), 2763 false); 2764 intel_pre_disable_primary_noatomic(&intel_crtc->base); 2765 trace_intel_disable_plane(primary, intel_crtc); 2766 intel_plane->disable_plane(intel_plane, intel_crtc); 2767 2768 return; 2769 2770valid_fb: 2771 mutex_lock(&dev->struct_mutex); 2772 intel_state->vma = 2773 intel_pin_and_fence_fb_obj(fb, primary->state->rotation); 2774 mutex_unlock(&dev->struct_mutex); 2775 if (IS_ERR(intel_state->vma)) { 2776 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", 2777 intel_crtc->pipe, PTR_ERR(intel_state->vma)); 2778 2779 intel_state->vma = NULL; 2780 drm_framebuffer_unreference(fb); 2781 return; 2782 } 2783 2784 plane_state->src_x = 0; 2785 plane_state->src_y = 0; 2786 plane_state->src_w = fb->width << 16; 2787 plane_state->src_h = fb->height << 16; 2788 2789 plane_state->crtc_x = 0; 2790 plane_state->crtc_y = 0; 2791 plane_state->crtc_w = fb->width; 2792 plane_state->crtc_h = fb->height; 2793 2794 intel_state->base.src = drm_plane_state_src(plane_state); 2795 intel_state->base.dst = drm_plane_state_dest(plane_state); 2796 2797 obj = intel_fb_obj(fb); 2798 if (i915_gem_object_is_tiled(obj)) 2799 dev_priv->preserve_bios_swizzle = true; 2800 2801 drm_framebuffer_reference(fb); 2802 primary->fb = primary->state->fb = fb; 2803 primary->crtc = primary->state->crtc = &intel_crtc->base; 2804 2805 intel_set_plane_visible(to_intel_crtc_state(crtc_state), 2806 to_intel_plane_state(plane_state), 2807 true); 2808 2809 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 2810 &obj->frontbuffer_bits); 2811} 2812 2813static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane, 2814 unsigned int rotation) 2815{ 2816 int cpp = fb->format->cpp[plane]; 2817 2818 switch (fb->modifier) { 2819 case DRM_FORMAT_MOD_LINEAR: 2820 case I915_FORMAT_MOD_X_TILED: 2821 switch (cpp) { 2822 case 8: 2823 return 4096; 2824 case 4: 2825 case 2: 2826 case 1: 2827 return 8192; 2828 default: 2829 MISSING_CASE(cpp); 2830 break; 2831 } 2832 break; 2833 case I915_FORMAT_MOD_Y_TILED: 2834 case I915_FORMAT_MOD_Yf_TILED: 2835 switch (cpp) { 2836 case 8: 2837 return 2048; 2838 case 4: 2839 return 4096; 2840 case 2: 2841 case 1: 2842 return 8192; 2843 default: 2844 MISSING_CASE(cpp); 2845 break; 2846 } 2847 break; 2848 default: 2849 MISSING_CASE(fb->modifier); 2850 } 2851 2852 return 2048; 2853} 2854 2855static int skl_check_main_surface(struct intel_plane_state *plane_state) 2856{ 2857 const struct drm_framebuffer *fb = plane_state->base.fb; 2858 unsigned int rotation = plane_state->base.rotation; 2859 int x = plane_state->base.src.x1 >> 16; 2860 int y = plane_state->base.src.y1 >> 16; 2861 int w = drm_rect_width(&plane_state->base.src) >> 16; 2862 int h = drm_rect_height(&plane_state->base.src) >> 16; 2863 int max_width = skl_max_plane_width(fb, 0, rotation); 2864 int max_height = 4096; 2865 u32 alignment, offset, aux_offset = plane_state->aux.offset; 2866 2867 if (w > max_width || h > max_height) { 2868 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", 2869 w, h, max_width, max_height); 2870 return -EINVAL; 2871 } 2872 2873 intel_add_fb_offsets(&x, &y, plane_state, 0); 2874 offset = intel_compute_tile_offset(&x, &y, plane_state, 0); 2875 alignment = intel_surf_alignment(fb, 0); 2876 2877 /* 2878 * AUX surface offset is specified as the distance from the 2879 * main surface offset, and it must be non-negative. Make 2880 * sure that is what we will get. 2881 */ 2882 if (offset > aux_offset) 2883 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, 2884 offset, aux_offset & ~(alignment - 1)); 2885 2886 /* 2887 * When using an X-tiled surface, the plane blows up 2888 * if the x offset + width exceed the stride. 2889 * 2890 * TODO: linear and Y-tiled seem fine, Yf untested, 2891 */ 2892 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 2893 int cpp = fb->format->cpp[0]; 2894 2895 while ((x + w) * cpp > fb->pitches[0]) { 2896 if (offset == 0) { 2897 DRM_DEBUG_KMS("Unable to find suitable display surface offset\n"); 2898 return -EINVAL; 2899 } 2900 2901 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, 2902 offset, offset - alignment); 2903 } 2904 } 2905 2906 plane_state->main.offset = offset; 2907 plane_state->main.x = x; 2908 plane_state->main.y = y; 2909 2910 return 0; 2911} 2912 2913static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 2914{ 2915 const struct drm_framebuffer *fb = plane_state->base.fb; 2916 unsigned int rotation = plane_state->base.rotation; 2917 int max_width = skl_max_plane_width(fb, 1, rotation); 2918 int max_height = 4096; 2919 int x = plane_state->base.src.x1 >> 17; 2920 int y = plane_state->base.src.y1 >> 17; 2921 int w = drm_rect_width(&plane_state->base.src) >> 17; 2922 int h = drm_rect_height(&plane_state->base.src) >> 17; 2923 u32 offset; 2924 2925 intel_add_fb_offsets(&x, &y, plane_state, 1); 2926 offset = intel_compute_tile_offset(&x, &y, plane_state, 1); 2927 2928 /* FIXME not quite sure how/if these apply to the chroma plane */ 2929 if (w > max_width || h > max_height) { 2930 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", 2931 w, h, max_width, max_height); 2932 return -EINVAL; 2933 } 2934 2935 plane_state->aux.offset = offset; 2936 plane_state->aux.x = x; 2937 plane_state->aux.y = y; 2938 2939 return 0; 2940} 2941 2942int skl_check_plane_surface(struct intel_plane_state *plane_state) 2943{ 2944 const struct drm_framebuffer *fb = plane_state->base.fb; 2945 unsigned int rotation = plane_state->base.rotation; 2946 int ret; 2947 2948 if (!plane_state->base.visible) 2949 return 0; 2950 2951 /* Rotate src coordinates to match rotated GTT view */ 2952 if (drm_rotation_90_or_270(rotation)) 2953 drm_rect_rotate(&plane_state->base.src, 2954 fb->width << 16, fb->height << 16, 2955 DRM_MODE_ROTATE_270); 2956 2957 /* 2958 * Handle the AUX surface first since 2959 * the main surface setup depends on it. 2960 */ 2961 if (fb->format->format == DRM_FORMAT_NV12) { 2962 ret = skl_check_nv12_aux_surface(plane_state); 2963 if (ret) 2964 return ret; 2965 } else { 2966 plane_state->aux.offset = ~0xfff; 2967 plane_state->aux.x = 0; 2968 plane_state->aux.y = 0; 2969 } 2970 2971 ret = skl_check_main_surface(plane_state); 2972 if (ret) 2973 return ret; 2974 2975 return 0; 2976} 2977 2978static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 2979 const struct intel_plane_state *plane_state) 2980{ 2981 struct drm_i915_private *dev_priv = 2982 to_i915(plane_state->base.plane->dev); 2983 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 2984 const struct drm_framebuffer *fb = plane_state->base.fb; 2985 unsigned int rotation = plane_state->base.rotation; 2986 u32 dspcntr; 2987 2988 dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE; 2989 2990 if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) || 2991 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) 2992 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2993 2994 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 2995 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 2996 2997 if (INTEL_GEN(dev_priv) < 4) 2998 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 2999 3000 switch (fb->format->format) { 3001 case DRM_FORMAT_C8: 3002 dspcntr |= DISPPLANE_8BPP; 3003 break; 3004 case DRM_FORMAT_XRGB1555: 3005 dspcntr |= DISPPLANE_BGRX555; 3006 break; 3007 case DRM_FORMAT_RGB565: 3008 dspcntr |= DISPPLANE_BGRX565; 3009 break; 3010 case DRM_FORMAT_XRGB8888: 3011 dspcntr |= DISPPLANE_BGRX888; 3012 break; 3013 case DRM_FORMAT_XBGR8888: 3014 dspcntr |= DISPPLANE_RGBX888; 3015 break; 3016 case DRM_FORMAT_XRGB2101010: 3017 dspcntr |= DISPPLANE_BGRX101010; 3018 break; 3019 case DRM_FORMAT_XBGR2101010: 3020 dspcntr |= DISPPLANE_RGBX101010; 3021 break; 3022 default: 3023 MISSING_CASE(fb->format->format); 3024 return 0; 3025 } 3026 3027 if (INTEL_GEN(dev_priv) >= 4 && 3028 fb->modifier == I915_FORMAT_MOD_X_TILED) 3029 dspcntr |= DISPPLANE_TILED; 3030 3031 if (rotation & DRM_MODE_ROTATE_180) 3032 dspcntr |= DISPPLANE_ROTATE_180; 3033 3034 if (rotation & DRM_MODE_REFLECT_X) 3035 dspcntr |= DISPPLANE_MIRROR; 3036 3037 return dspcntr; 3038} 3039 3040int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 3041{ 3042 struct drm_i915_private *dev_priv = 3043 to_i915(plane_state->base.plane->dev); 3044 int src_x = plane_state->base.src.x1 >> 16; 3045 int src_y = plane_state->base.src.y1 >> 16; 3046 u32 offset; 3047 3048 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 3049 3050 if (INTEL_GEN(dev_priv) >= 4) 3051 offset = intel_compute_tile_offset(&src_x, &src_y, 3052 plane_state, 0); 3053 else 3054 offset = 0; 3055 3056 /* HSW/BDW do this automagically in hardware */ 3057 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 3058 unsigned int rotation = plane_state->base.rotation; 3059 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 3060 int src_h = drm_rect_height(&plane_state->base.src) >> 16; 3061 3062 if (rotation & DRM_MODE_ROTATE_180) { 3063 src_x += src_w - 1; 3064 src_y += src_h - 1; 3065 } else if (rotation & DRM_MODE_REFLECT_X) { 3066 src_x += src_w - 1; 3067 } 3068 } 3069 3070 plane_state->main.offset = offset; 3071 plane_state->main.x = src_x; 3072 plane_state->main.y = src_y; 3073 3074 return 0; 3075} 3076 3077static void i9xx_update_primary_plane(struct intel_plane *primary, 3078 const struct intel_crtc_state *crtc_state, 3079 const struct intel_plane_state *plane_state) 3080{ 3081 struct drm_i915_private *dev_priv = to_i915(primary->base.dev); 3082 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 3083 const struct drm_framebuffer *fb = plane_state->base.fb; 3084 enum plane plane = primary->plane; 3085 u32 linear_offset; 3086 u32 dspcntr = plane_state->ctl; 3087 i915_reg_t reg = DSPCNTR(plane); 3088 int x = plane_state->main.x; 3089 int y = plane_state->main.y; 3090 unsigned long irqflags; 3091 3092 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 3093 3094 if (INTEL_GEN(dev_priv) >= 4) 3095 crtc->dspaddr_offset = plane_state->main.offset; 3096 else 3097 crtc->dspaddr_offset = linear_offset; 3098 3099 crtc->adjusted_x = x; 3100 crtc->adjusted_y = y; 3101 3102 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3103 3104 if (INTEL_GEN(dev_priv) < 4) { 3105 /* pipesrc and dspsize control the size that is scaled from, 3106 * which should always be the user's requested size. 3107 */ 3108 I915_WRITE_FW(DSPSIZE(plane), 3109 ((crtc_state->pipe_src_h - 1) << 16) | 3110 (crtc_state->pipe_src_w - 1)); 3111 I915_WRITE_FW(DSPPOS(plane), 0); 3112 } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) { 3113 I915_WRITE_FW(PRIMSIZE(plane), 3114 ((crtc_state->pipe_src_h - 1) << 16) | 3115 (crtc_state->pipe_src_w - 1)); 3116 I915_WRITE_FW(PRIMPOS(plane), 0); 3117 I915_WRITE_FW(PRIMCNSTALPHA(plane), 0); 3118 } 3119 3120 I915_WRITE_FW(reg, dspcntr); 3121 3122 I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]); 3123 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3124 I915_WRITE_FW(DSPSURF(plane), 3125 intel_plane_ggtt_offset(plane_state) + 3126 crtc->dspaddr_offset); 3127 I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x); 3128 } else if (INTEL_GEN(dev_priv) >= 4) { 3129 I915_WRITE_FW(DSPSURF(plane), 3130 intel_plane_ggtt_offset(plane_state) + 3131 crtc->dspaddr_offset); 3132 I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x); 3133 I915_WRITE_FW(DSPLINOFF(plane), linear_offset); 3134 } else { 3135 I915_WRITE_FW(DSPADDR(plane), 3136 intel_plane_ggtt_offset(plane_state) + 3137 crtc->dspaddr_offset); 3138 } 3139 POSTING_READ_FW(reg); 3140 3141 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3142} 3143 3144static void i9xx_disable_primary_plane(struct intel_plane *primary, 3145 struct intel_crtc *crtc) 3146{ 3147 struct drm_i915_private *dev_priv = to_i915(primary->base.dev); 3148 enum plane plane = primary->plane; 3149 unsigned long irqflags; 3150 3151 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3152 3153 I915_WRITE_FW(DSPCNTR(plane), 0); 3154 if (INTEL_INFO(dev_priv)->gen >= 4) 3155 I915_WRITE_FW(DSPSURF(plane), 0); 3156 else 3157 I915_WRITE_FW(DSPADDR(plane), 0); 3158 POSTING_READ_FW(DSPCNTR(plane)); 3159 3160 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3161} 3162 3163static u32 3164intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane) 3165{ 3166 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 3167 return 64; 3168 else 3169 return intel_tile_width_bytes(fb, plane); 3170} 3171 3172static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 3173{ 3174 struct drm_device *dev = intel_crtc->base.dev; 3175 struct drm_i915_private *dev_priv = to_i915(dev); 3176 3177 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 3178 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 3179 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 3180} 3181 3182/* 3183 * This function detaches (aka. unbinds) unused scalers in hardware 3184 */ 3185static void skl_detach_scalers(struct intel_crtc *intel_crtc) 3186{ 3187 struct intel_crtc_scaler_state *scaler_state; 3188 int i; 3189 3190 scaler_state = &intel_crtc->config->scaler_state; 3191 3192 /* loop through and disable scalers that aren't in use */ 3193 for (i = 0; i < intel_crtc->num_scalers; i++) { 3194 if (!scaler_state->scalers[i].in_use) 3195 skl_detach_scaler(intel_crtc, i); 3196 } 3197} 3198 3199u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, 3200 unsigned int rotation) 3201{ 3202 u32 stride; 3203 3204 if (plane >= fb->format->num_planes) 3205 return 0; 3206 3207 stride = intel_fb_pitch(fb, plane, rotation); 3208 3209 /* 3210 * The stride is either expressed as a multiple of 64 bytes chunks for 3211 * linear buffers or in number of tiles for tiled buffers. 3212 */ 3213 if (drm_rotation_90_or_270(rotation)) 3214 stride /= intel_tile_height(fb, plane); 3215 else 3216 stride /= intel_fb_stride_alignment(fb, plane); 3217 3218 return stride; 3219} 3220 3221static u32 skl_plane_ctl_format(uint32_t pixel_format) 3222{ 3223 switch (pixel_format) { 3224 case DRM_FORMAT_C8: 3225 return PLANE_CTL_FORMAT_INDEXED; 3226 case DRM_FORMAT_RGB565: 3227 return PLANE_CTL_FORMAT_RGB_565; 3228 case DRM_FORMAT_XBGR8888: 3229 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 3230 case DRM_FORMAT_XRGB8888: 3231 return PLANE_CTL_FORMAT_XRGB_8888; 3232 /* 3233 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers 3234 * to be already pre-multiplied. We need to add a knob (or a different 3235 * DRM_FORMAT) for user-space to configure that. 3236 */ 3237 case DRM_FORMAT_ABGR8888: 3238 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX | 3239 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3240 case DRM_FORMAT_ARGB8888: 3241 return PLANE_CTL_FORMAT_XRGB_8888 | 3242 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3243 case DRM_FORMAT_XRGB2101010: 3244 return PLANE_CTL_FORMAT_XRGB_2101010; 3245 case DRM_FORMAT_XBGR2101010: 3246 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; 3247 case DRM_FORMAT_YUYV: 3248 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 3249 case DRM_FORMAT_YVYU: 3250 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 3251 case DRM_FORMAT_UYVY: 3252 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 3253 case DRM_FORMAT_VYUY: 3254 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 3255 default: 3256 MISSING_CASE(pixel_format); 3257 } 3258 3259 return 0; 3260} 3261 3262static u32 skl_plane_ctl_tiling(uint64_t fb_modifier) 3263{ 3264 switch (fb_modifier) { 3265 case DRM_FORMAT_MOD_LINEAR: 3266 break; 3267 case I915_FORMAT_MOD_X_TILED: 3268 return PLANE_CTL_TILED_X; 3269 case I915_FORMAT_MOD_Y_TILED: 3270 return PLANE_CTL_TILED_Y; 3271 case I915_FORMAT_MOD_Yf_TILED: 3272 return PLANE_CTL_TILED_YF; 3273 default: 3274 MISSING_CASE(fb_modifier); 3275 } 3276 3277 return 0; 3278} 3279 3280static u32 skl_plane_ctl_rotation(unsigned int rotation) 3281{ 3282 switch (rotation) { 3283 case DRM_MODE_ROTATE_0: 3284 break; 3285 /* 3286 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 3287 * while i915 HW rotation is clockwise, thats why this swapping. 3288 */ 3289 case DRM_MODE_ROTATE_90: 3290 return PLANE_CTL_ROTATE_270; 3291 case DRM_MODE_ROTATE_180: 3292 return PLANE_CTL_ROTATE_180; 3293 case DRM_MODE_ROTATE_270: 3294 return PLANE_CTL_ROTATE_90; 3295 default: 3296 MISSING_CASE(rotation); 3297 } 3298 3299 return 0; 3300} 3301 3302u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 3303 const struct intel_plane_state *plane_state) 3304{ 3305 struct drm_i915_private *dev_priv = 3306 to_i915(plane_state->base.plane->dev); 3307 const struct drm_framebuffer *fb = plane_state->base.fb; 3308 unsigned int rotation = plane_state->base.rotation; 3309 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 3310 u32 plane_ctl; 3311 3312 plane_ctl = PLANE_CTL_ENABLE; 3313 3314 if (!IS_GEMINILAKE(dev_priv)) { 3315 plane_ctl |= 3316 PLANE_CTL_PIPE_GAMMA_ENABLE | 3317 PLANE_CTL_PIPE_CSC_ENABLE | 3318 PLANE_CTL_PLANE_GAMMA_DISABLE; 3319 } 3320 3321 plane_ctl |= skl_plane_ctl_format(fb->format->format); 3322 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 3323 plane_ctl |= skl_plane_ctl_rotation(rotation); 3324 3325 if (key->flags & I915_SET_COLORKEY_DESTINATION) 3326 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 3327 else if (key->flags & I915_SET_COLORKEY_SOURCE) 3328 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 3329 3330 return plane_ctl; 3331} 3332 3333static void skylake_update_primary_plane(struct intel_plane *plane, 3334 const struct intel_crtc_state *crtc_state, 3335 const struct intel_plane_state *plane_state) 3336{ 3337 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 3338 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 3339 const struct drm_framebuffer *fb = plane_state->base.fb; 3340 enum plane_id plane_id = plane->id; 3341 enum pipe pipe = plane->pipe; 3342 u32 plane_ctl = plane_state->ctl; 3343 unsigned int rotation = plane_state->base.rotation; 3344 u32 stride = skl_plane_stride(fb, 0, rotation); 3345 u32 surf_addr = plane_state->main.offset; 3346 int scaler_id = plane_state->scaler_id; 3347 int src_x = plane_state->main.x; 3348 int src_y = plane_state->main.y; 3349 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 3350 int src_h = drm_rect_height(&plane_state->base.src) >> 16; 3351 int dst_x = plane_state->base.dst.x1; 3352 int dst_y = plane_state->base.dst.y1; 3353 int dst_w = drm_rect_width(&plane_state->base.dst); 3354 int dst_h = drm_rect_height(&plane_state->base.dst); 3355 unsigned long irqflags; 3356 3357 /* Sizes are 0 based */ 3358 src_w--; 3359 src_h--; 3360 dst_w--; 3361 dst_h--; 3362 3363 crtc->dspaddr_offset = surf_addr; 3364 3365 crtc->adjusted_x = src_x; 3366 crtc->adjusted_y = src_y; 3367 3368 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3369 3370 if (IS_GEMINILAKE(dev_priv)) { 3371 I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), 3372 PLANE_COLOR_PIPE_GAMMA_ENABLE | 3373 PLANE_COLOR_PIPE_CSC_ENABLE | 3374 PLANE_COLOR_PLANE_GAMMA_DISABLE); 3375 } 3376 3377 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); 3378 I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x); 3379 I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); 3380 I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); 3381 3382 if (scaler_id >= 0) { 3383 uint32_t ps_ctrl = 0; 3384 3385 WARN_ON(!dst_w || !dst_h); 3386 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) | 3387 crtc_state->scaler_state.scalers[scaler_id].mode; 3388 I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 3389 I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0); 3390 I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y); 3391 I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h); 3392 I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0); 3393 } else { 3394 I915_WRITE_FW(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x); 3395 } 3396 3397 I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 3398 intel_plane_ggtt_offset(plane_state) + surf_addr); 3399 3400 POSTING_READ_FW(PLANE_SURF(pipe, plane_id)); 3401 3402 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3403} 3404 3405static void skylake_disable_primary_plane(struct intel_plane *primary, 3406 struct intel_crtc *crtc) 3407{ 3408 struct drm_i915_private *dev_priv = to_i915(primary->base.dev); 3409 enum plane_id plane_id = primary->id; 3410 enum pipe pipe = primary->pipe; 3411 unsigned long irqflags; 3412 3413 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3414 3415 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); 3416 I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0); 3417 POSTING_READ_FW(PLANE_SURF(pipe, plane_id)); 3418 3419 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3420} 3421 3422static void intel_complete_page_flips(struct drm_i915_private *dev_priv) 3423{ 3424 struct intel_crtc *crtc; 3425 3426 for_each_intel_crtc(&dev_priv->drm, crtc) 3427 intel_finish_page_flip_cs(dev_priv, crtc->pipe); 3428} 3429 3430static int 3431__intel_display_resume(struct drm_device *dev, 3432 struct drm_atomic_state *state, 3433 struct drm_modeset_acquire_ctx *ctx) 3434{ 3435 struct drm_crtc_state *crtc_state; 3436 struct drm_crtc *crtc; 3437 int i, ret; 3438 3439 intel_modeset_setup_hw_state(dev, ctx); 3440 i915_redisable_vga(to_i915(dev)); 3441 3442 if (!state) 3443 return 0; 3444 3445 /* 3446 * We've duplicated the state, pointers to the old state are invalid. 3447 * 3448 * Don't attempt to use the old state until we commit the duplicated state. 3449 */ 3450 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 3451 /* 3452 * Force recalculation even if we restore 3453 * current state. With fast modeset this may not result 3454 * in a modeset when the state is compatible. 3455 */ 3456 crtc_state->mode_changed = true; 3457 } 3458 3459 /* ignore any reset values/BIOS leftovers in the WM registers */ 3460 if (!HAS_GMCH_DISPLAY(to_i915(dev))) 3461 to_intel_atomic_state(state)->skip_intermediate_wm = true; 3462 3463 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 3464 3465 WARN_ON(ret == -EDEADLK); 3466 return ret; 3467} 3468 3469static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 3470{ 3471 return intel_has_gpu_reset(dev_priv) && 3472 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv); 3473} 3474 3475void intel_prepare_reset(struct drm_i915_private *dev_priv) 3476{ 3477 struct drm_device *dev = &dev_priv->drm; 3478 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3479 struct drm_atomic_state *state; 3480 int ret; 3481 3482 3483 /* reset doesn't touch the display */ 3484 if (!i915.force_reset_modeset_test && 3485 !gpu_reset_clobbers_display(dev_priv)) 3486 return; 3487 3488 /* We have a modeset vs reset deadlock, defensively unbreak it. 3489 * 3490 * FIXME: We can do a _lot_ better, this is just a first iteration. 3491 */ 3492 i915_gem_set_wedged(dev_priv); 3493 DRM_DEBUG_DRIVER("Wedging GPU to avoid deadlocks with pending modeset updates\n"); 3494 3495 /* 3496 * Need mode_config.mutex so that we don't 3497 * trample ongoing ->detect() and whatnot. 3498 */ 3499 mutex_lock(&dev->mode_config.mutex); 3500 drm_modeset_acquire_init(ctx, 0); 3501 while (1) { 3502 ret = drm_modeset_lock_all_ctx(dev, ctx); 3503 if (ret != -EDEADLK) 3504 break; 3505 3506 drm_modeset_backoff(ctx); 3507 } 3508 /* 3509 * Disabling the crtcs gracefully seems nicer. Also the 3510 * g33 docs say we should at least disable all the planes. 3511 */ 3512 state = drm_atomic_helper_duplicate_state(dev, ctx); 3513 if (IS_ERR(state)) { 3514 ret = PTR_ERR(state); 3515 DRM_ERROR("Duplicating state failed with %i\n", ret); 3516 return; 3517 } 3518 3519 ret = drm_atomic_helper_disable_all(dev, ctx); 3520 if (ret) { 3521 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 3522 drm_atomic_state_put(state); 3523 return; 3524 } 3525 3526 dev_priv->modeset_restore_state = state; 3527 state->acquire_ctx = ctx; 3528} 3529 3530void intel_finish_reset(struct drm_i915_private *dev_priv) 3531{ 3532 struct drm_device *dev = &dev_priv->drm; 3533 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3534 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 3535 int ret; 3536 3537 /* reset doesn't touch the display */ 3538 if (!i915.force_reset_modeset_test && 3539 !gpu_reset_clobbers_display(dev_priv)) 3540 return; 3541 3542 if (!state) 3543 goto unlock; 3544 3545 /* 3546 * Flips in the rings will be nuked by the reset, 3547 * so complete all pending flips so that user space 3548 * will get its events and not get stuck. 3549 */ 3550 intel_complete_page_flips(dev_priv); 3551 3552 dev_priv->modeset_restore_state = NULL; 3553 3554 /* reset doesn't touch the display */ 3555 if (!gpu_reset_clobbers_display(dev_priv)) { 3556 /* for testing only restore the display */ 3557 ret = __intel_display_resume(dev, state, ctx); 3558 if (ret) 3559 DRM_ERROR("Restoring old state failed with %i\n", ret); 3560 } else { 3561 /* 3562 * The display has been reset as well, 3563 * so need a full re-initialization. 3564 */ 3565 intel_runtime_pm_disable_interrupts(dev_priv); 3566 intel_runtime_pm_enable_interrupts(dev_priv); 3567 3568 intel_pps_unlock_regs_wa(dev_priv); 3569 intel_modeset_init_hw(dev); 3570 3571 spin_lock_irq(&dev_priv->irq_lock); 3572 if (dev_priv->display.hpd_irq_setup) 3573 dev_priv->display.hpd_irq_setup(dev_priv); 3574 spin_unlock_irq(&dev_priv->irq_lock); 3575 3576 ret = __intel_display_resume(dev, state, ctx); 3577 if (ret) 3578 DRM_ERROR("Restoring old state failed with %i\n", ret); 3579 3580 intel_hpd_init(dev_priv); 3581 } 3582 3583 drm_atomic_state_put(state); 3584unlock: 3585 drm_modeset_drop_locks(ctx); 3586 drm_modeset_acquire_fini(ctx); 3587 mutex_unlock(&dev->mode_config.mutex); 3588} 3589 3590static bool abort_flip_on_reset(struct intel_crtc *crtc) 3591{ 3592 struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error; 3593 3594 if (i915_reset_backoff(error)) 3595 return true; 3596 3597 if (crtc->reset_count != i915_reset_count(error)) 3598 return true; 3599 3600 return false; 3601} 3602 3603static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3604{ 3605 struct drm_device *dev = crtc->dev; 3606 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3607 bool pending; 3608 3609 if (abort_flip_on_reset(intel_crtc)) 3610 return false; 3611 3612 spin_lock_irq(&dev->event_lock); 3613 pending = to_intel_crtc(crtc)->flip_work != NULL; 3614 spin_unlock_irq(&dev->event_lock); 3615 3616 return pending; 3617} 3618 3619static void intel_update_pipe_config(struct intel_crtc *crtc, 3620 struct intel_crtc_state *old_crtc_state) 3621{ 3622 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3623 struct intel_crtc_state *pipe_config = 3624 to_intel_crtc_state(crtc->base.state); 3625 3626 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 3627 crtc->base.mode = crtc->base.state->mode; 3628 3629 /* 3630 * Update pipe size and adjust fitter if needed: the reason for this is 3631 * that in compute_mode_changes we check the native mode (not the pfit 3632 * mode) to see if we can flip rather than do a full mode set. In the 3633 * fastboot case, we'll flip, but if we don't update the pipesrc and 3634 * pfit state, we'll end up with a big fb scanned out into the wrong 3635 * sized surface. 3636 */ 3637 3638 I915_WRITE(PIPESRC(crtc->pipe), 3639 ((pipe_config->pipe_src_w - 1) << 16) | 3640 (pipe_config->pipe_src_h - 1)); 3641 3642 /* on skylake this is done by detaching scalers */ 3643 if (INTEL_GEN(dev_priv) >= 9) { 3644 skl_detach_scalers(crtc); 3645 3646 if (pipe_config->pch_pfit.enabled) 3647 skylake_pfit_enable(crtc); 3648 } else if (HAS_PCH_SPLIT(dev_priv)) { 3649 if (pipe_config->pch_pfit.enabled) 3650 ironlake_pfit_enable(crtc); 3651 else if (old_crtc_state->pch_pfit.enabled) 3652 ironlake_pfit_disable(crtc, true); 3653 } 3654} 3655 3656static void intel_fdi_normal_train(struct intel_crtc *crtc) 3657{ 3658 struct drm_device *dev = crtc->base.dev; 3659 struct drm_i915_private *dev_priv = to_i915(dev); 3660 int pipe = crtc->pipe; 3661 i915_reg_t reg; 3662 u32 temp; 3663 3664 /* enable normal train */ 3665 reg = FDI_TX_CTL(pipe); 3666 temp = I915_READ(reg); 3667 if (IS_IVYBRIDGE(dev_priv)) { 3668 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3669 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 3670 } else { 3671 temp &= ~FDI_LINK_TRAIN_NONE; 3672 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 3673 } 3674 I915_WRITE(reg, temp); 3675 3676 reg = FDI_RX_CTL(pipe); 3677 temp = I915_READ(reg); 3678 if (HAS_PCH_CPT(dev_priv)) { 3679 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3680 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 3681 } else { 3682 temp &= ~FDI_LINK_TRAIN_NONE; 3683 temp |= FDI_LINK_TRAIN_NONE; 3684 } 3685 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 3686 3687 /* wait one idle pattern time */ 3688 POSTING_READ(reg); 3689 udelay(1000); 3690 3691 /* IVB wants error correction enabled */ 3692 if (IS_IVYBRIDGE(dev_priv)) 3693 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 3694 FDI_FE_ERRC_ENABLE); 3695} 3696 3697/* The FDI link training functions for ILK/Ibexpeak. */ 3698static void ironlake_fdi_link_train(struct intel_crtc *crtc, 3699 const struct intel_crtc_state *crtc_state) 3700{ 3701 struct drm_device *dev = crtc->base.dev; 3702 struct drm_i915_private *dev_priv = to_i915(dev); 3703 int pipe = crtc->pipe; 3704 i915_reg_t reg; 3705 u32 temp, tries; 3706 3707 /* FDI needs bits from pipe first */ 3708 assert_pipe_enabled(dev_priv, pipe); 3709 3710 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3711 for train result */ 3712 reg = FDI_RX_IMR(pipe); 3713 temp = I915_READ(reg); 3714 temp &= ~FDI_RX_SYMBOL_LOCK; 3715 temp &= ~FDI_RX_BIT_LOCK; 3716 I915_WRITE(reg, temp); 3717 I915_READ(reg); 3718 udelay(150); 3719 3720 /* enable CPU FDI TX and PCH FDI RX */ 3721 reg = FDI_TX_CTL(pipe); 3722 temp = I915_READ(reg); 3723 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3724 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 3725 temp &= ~FDI_LINK_TRAIN_NONE; 3726 temp |= FDI_LINK_TRAIN_PATTERN_1; 3727 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3728 3729 reg = FDI_RX_CTL(pipe); 3730 temp = I915_READ(reg); 3731 temp &= ~FDI_LINK_TRAIN_NONE; 3732 temp |= FDI_LINK_TRAIN_PATTERN_1; 3733 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3734 3735 POSTING_READ(reg); 3736 udelay(150); 3737 3738 /* Ironlake workaround, enable clock pointer after FDI enable*/ 3739 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3740 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 3741 FDI_RX_PHASE_SYNC_POINTER_EN); 3742 3743 reg = FDI_RX_IIR(pipe); 3744 for (tries = 0; tries < 5; tries++) { 3745 temp = I915_READ(reg); 3746 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3747 3748 if ((temp & FDI_RX_BIT_LOCK)) { 3749 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3750 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3751 break; 3752 } 3753 } 3754 if (tries == 5) 3755 DRM_ERROR("FDI train 1 fail!\n"); 3756 3757 /* Train 2 */ 3758 reg = FDI_TX_CTL(pipe); 3759 temp = I915_READ(reg); 3760 temp &= ~FDI_LINK_TRAIN_NONE; 3761 temp |= FDI_LINK_TRAIN_PATTERN_2; 3762 I915_WRITE(reg, temp); 3763 3764 reg = FDI_RX_CTL(pipe); 3765 temp = I915_READ(reg); 3766 temp &= ~FDI_LINK_TRAIN_NONE; 3767 temp |= FDI_LINK_TRAIN_PATTERN_2; 3768 I915_WRITE(reg, temp); 3769 3770 POSTING_READ(reg); 3771 udelay(150); 3772 3773 reg = FDI_RX_IIR(pipe); 3774 for (tries = 0; tries < 5; tries++) { 3775 temp = I915_READ(reg); 3776 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3777 3778 if (temp & FDI_RX_SYMBOL_LOCK) { 3779 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3780 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3781 break; 3782 } 3783 } 3784 if (tries == 5) 3785 DRM_ERROR("FDI train 2 fail!\n"); 3786 3787 DRM_DEBUG_KMS("FDI train done\n"); 3788 3789} 3790 3791static const int snb_b_fdi_train_param[] = { 3792 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 3793 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 3794 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 3795 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 3796}; 3797 3798/* The FDI link training functions for SNB/Cougarpoint. */ 3799static void gen6_fdi_link_train(struct intel_crtc *crtc, 3800 const struct intel_crtc_state *crtc_state) 3801{ 3802 struct drm_device *dev = crtc->base.dev; 3803 struct drm_i915_private *dev_priv = to_i915(dev); 3804 int pipe = crtc->pipe; 3805 i915_reg_t reg; 3806 u32 temp, i, retry; 3807 3808 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3809 for train result */ 3810 reg = FDI_RX_IMR(pipe); 3811 temp = I915_READ(reg); 3812 temp &= ~FDI_RX_SYMBOL_LOCK; 3813 temp &= ~FDI_RX_BIT_LOCK; 3814 I915_WRITE(reg, temp); 3815 3816 POSTING_READ(reg); 3817 udelay(150); 3818 3819 /* enable CPU FDI TX and PCH FDI RX */ 3820 reg = FDI_TX_CTL(pipe); 3821 temp = I915_READ(reg); 3822 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3823 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 3824 temp &= ~FDI_LINK_TRAIN_NONE; 3825 temp |= FDI_LINK_TRAIN_PATTERN_1; 3826 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3827 /* SNB-B */ 3828 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3829 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3830 3831 I915_WRITE(FDI_RX_MISC(pipe), 3832 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3833 3834 reg = FDI_RX_CTL(pipe); 3835 temp = I915_READ(reg); 3836 if (HAS_PCH_CPT(dev_priv)) { 3837 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3838 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3839 } else { 3840 temp &= ~FDI_LINK_TRAIN_NONE; 3841 temp |= FDI_LINK_TRAIN_PATTERN_1; 3842 } 3843 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3844 3845 POSTING_READ(reg); 3846 udelay(150); 3847 3848 for (i = 0; i < 4; i++) { 3849 reg = FDI_TX_CTL(pipe); 3850 temp = I915_READ(reg); 3851 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3852 temp |= snb_b_fdi_train_param[i]; 3853 I915_WRITE(reg, temp); 3854 3855 POSTING_READ(reg); 3856 udelay(500); 3857 3858 for (retry = 0; retry < 5; retry++) { 3859 reg = FDI_RX_IIR(pipe); 3860 temp = I915_READ(reg); 3861 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3862 if (temp & FDI_RX_BIT_LOCK) { 3863 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3864 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3865 break; 3866 } 3867 udelay(50); 3868 } 3869 if (retry < 5) 3870 break; 3871 } 3872 if (i == 4) 3873 DRM_ERROR("FDI train 1 fail!\n"); 3874 3875 /* Train 2 */ 3876 reg = FDI_TX_CTL(pipe); 3877 temp = I915_READ(reg); 3878 temp &= ~FDI_LINK_TRAIN_NONE; 3879 temp |= FDI_LINK_TRAIN_PATTERN_2; 3880 if (IS_GEN6(dev_priv)) { 3881 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3882 /* SNB-B */ 3883 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3884 } 3885 I915_WRITE(reg, temp); 3886 3887 reg = FDI_RX_CTL(pipe); 3888 temp = I915_READ(reg); 3889 if (HAS_PCH_CPT(dev_priv)) { 3890 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3891 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3892 } else { 3893 temp &= ~FDI_LINK_TRAIN_NONE; 3894 temp |= FDI_LINK_TRAIN_PATTERN_2; 3895 } 3896 I915_WRITE(reg, temp); 3897 3898 POSTING_READ(reg); 3899 udelay(150); 3900 3901 for (i = 0; i < 4; i++) { 3902 reg = FDI_TX_CTL(pipe); 3903 temp = I915_READ(reg); 3904 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3905 temp |= snb_b_fdi_train_param[i]; 3906 I915_WRITE(reg, temp); 3907 3908 POSTING_READ(reg); 3909 udelay(500); 3910 3911 for (retry = 0; retry < 5; retry++) { 3912 reg = FDI_RX_IIR(pipe); 3913 temp = I915_READ(reg); 3914 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3915 if (temp & FDI_RX_SYMBOL_LOCK) { 3916 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3917 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3918 break; 3919 } 3920 udelay(50); 3921 } 3922 if (retry < 5) 3923 break; 3924 } 3925 if (i == 4) 3926 DRM_ERROR("FDI train 2 fail!\n"); 3927 3928 DRM_DEBUG_KMS("FDI train done.\n"); 3929} 3930 3931/* Manual link training for Ivy Bridge A0 parts */ 3932static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 3933 const struct intel_crtc_state *crtc_state) 3934{ 3935 struct drm_device *dev = crtc->base.dev; 3936 struct drm_i915_private *dev_priv = to_i915(dev); 3937 int pipe = crtc->pipe; 3938 i915_reg_t reg; 3939 u32 temp, i, j; 3940 3941 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3942 for train result */ 3943 reg = FDI_RX_IMR(pipe); 3944 temp = I915_READ(reg); 3945 temp &= ~FDI_RX_SYMBOL_LOCK; 3946 temp &= ~FDI_RX_BIT_LOCK; 3947 I915_WRITE(reg, temp); 3948 3949 POSTING_READ(reg); 3950 udelay(150); 3951 3952 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 3953 I915_READ(FDI_RX_IIR(pipe))); 3954 3955 /* Try each vswing and preemphasis setting twice before moving on */ 3956 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 3957 /* disable first in case we need to retry */ 3958 reg = FDI_TX_CTL(pipe); 3959 temp = I915_READ(reg); 3960 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 3961 temp &= ~FDI_TX_ENABLE; 3962 I915_WRITE(reg, temp); 3963 3964 reg = FDI_RX_CTL(pipe); 3965 temp = I915_READ(reg); 3966 temp &= ~FDI_LINK_TRAIN_AUTO; 3967 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3968 temp &= ~FDI_RX_ENABLE; 3969 I915_WRITE(reg, temp); 3970 3971 /* enable CPU FDI TX and PCH FDI RX */ 3972 reg = FDI_TX_CTL(pipe); 3973 temp = I915_READ(reg); 3974 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3975 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 3976 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 3977 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3978 temp |= snb_b_fdi_train_param[j/2]; 3979 temp |= FDI_COMPOSITE_SYNC; 3980 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3981 3982 I915_WRITE(FDI_RX_MISC(pipe), 3983 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3984 3985 reg = FDI_RX_CTL(pipe); 3986 temp = I915_READ(reg); 3987 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3988 temp |= FDI_COMPOSITE_SYNC; 3989 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3990 3991 POSTING_READ(reg); 3992 udelay(1); /* should be 0.5us */ 3993 3994 for (i = 0; i < 4; i++) { 3995 reg = FDI_RX_IIR(pipe); 3996 temp = I915_READ(reg); 3997 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3998 3999 if (temp & FDI_RX_BIT_LOCK || 4000 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 4001 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4002 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 4003 i); 4004 break; 4005 } 4006 udelay(1); /* should be 0.5us */ 4007 } 4008 if (i == 4) { 4009 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 4010 continue; 4011 } 4012 4013 /* Train 2 */ 4014 reg = FDI_TX_CTL(pipe); 4015 temp = I915_READ(reg); 4016 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4017 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 4018 I915_WRITE(reg, temp); 4019 4020 reg = FDI_RX_CTL(pipe); 4021 temp = I915_READ(reg); 4022 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4023 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 4024 I915_WRITE(reg, temp); 4025 4026 POSTING_READ(reg); 4027 udelay(2); /* should be 1.5us */ 4028 4029 for (i = 0; i < 4; i++) { 4030 reg = FDI_RX_IIR(pipe); 4031 temp = I915_READ(reg); 4032 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4033 4034 if (temp & FDI_RX_SYMBOL_LOCK || 4035 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 4036 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4037 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 4038 i); 4039 goto train_done; 4040 } 4041 udelay(2); /* should be 1.5us */ 4042 } 4043 if (i == 4) 4044 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 4045 } 4046 4047train_done: 4048 DRM_DEBUG_KMS("FDI train done.\n"); 4049} 4050 4051static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 4052{ 4053 struct drm_device *dev = intel_crtc->base.dev; 4054 struct drm_i915_private *dev_priv = to_i915(dev); 4055 int pipe = intel_crtc->pipe; 4056 i915_reg_t reg; 4057 u32 temp; 4058 4059 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 4060 reg = FDI_RX_CTL(pipe); 4061 temp = I915_READ(reg); 4062 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 4063 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 4064 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4065 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 4066 4067 POSTING_READ(reg); 4068 udelay(200); 4069 4070 /* Switch from Rawclk to PCDclk */ 4071 temp = I915_READ(reg); 4072 I915_WRITE(reg, temp | FDI_PCDCLK); 4073 4074 POSTING_READ(reg); 4075 udelay(200); 4076 4077 /* Enable CPU FDI TX PLL, always on for Ironlake */ 4078 reg = FDI_TX_CTL(pipe); 4079 temp = I915_READ(reg); 4080 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 4081 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 4082 4083 POSTING_READ(reg); 4084 udelay(100); 4085 } 4086} 4087 4088static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 4089{ 4090 struct drm_device *dev = intel_crtc->base.dev; 4091 struct drm_i915_private *dev_priv = to_i915(dev); 4092 int pipe = intel_crtc->pipe; 4093 i915_reg_t reg; 4094 u32 temp; 4095 4096 /* Switch from PCDclk to Rawclk */ 4097 reg = FDI_RX_CTL(pipe); 4098 temp = I915_READ(reg); 4099 I915_WRITE(reg, temp & ~FDI_PCDCLK); 4100 4101 /* Disable CPU FDI TX PLL */ 4102 reg = FDI_TX_CTL(pipe); 4103 temp = I915_READ(reg); 4104 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 4105 4106 POSTING_READ(reg); 4107 udelay(100); 4108 4109 reg = FDI_RX_CTL(pipe); 4110 temp = I915_READ(reg); 4111 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 4112 4113 /* Wait for the clocks to turn off. */ 4114 POSTING_READ(reg); 4115 udelay(100); 4116} 4117 4118static void ironlake_fdi_disable(struct drm_crtc *crtc) 4119{ 4120 struct drm_device *dev = crtc->dev; 4121 struct drm_i915_private *dev_priv = to_i915(dev); 4122 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4123 int pipe = intel_crtc->pipe; 4124 i915_reg_t reg; 4125 u32 temp; 4126 4127 /* disable CPU FDI tx and PCH FDI rx */ 4128 reg = FDI_TX_CTL(pipe); 4129 temp = I915_READ(reg); 4130 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 4131 POSTING_READ(reg); 4132 4133 reg = FDI_RX_CTL(pipe); 4134 temp = I915_READ(reg); 4135 temp &= ~(0x7 << 16); 4136 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4137 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 4138 4139 POSTING_READ(reg); 4140 udelay(100); 4141 4142 /* Ironlake workaround, disable clock pointer after downing FDI */ 4143 if (HAS_PCH_IBX(dev_priv)) 4144 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 4145 4146 /* still set train pattern 1 */ 4147 reg = FDI_TX_CTL(pipe); 4148 temp = I915_READ(reg); 4149 temp &= ~FDI_LINK_TRAIN_NONE; 4150 temp |= FDI_LINK_TRAIN_PATTERN_1; 4151 I915_WRITE(reg, temp); 4152 4153 reg = FDI_RX_CTL(pipe); 4154 temp = I915_READ(reg); 4155 if (HAS_PCH_CPT(dev_priv)) { 4156 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4157 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4158 } else { 4159 temp &= ~FDI_LINK_TRAIN_NONE; 4160 temp |= FDI_LINK_TRAIN_PATTERN_1; 4161 } 4162 /* BPC in FDI rx is consistent with that in PIPECONF */ 4163 temp &= ~(0x07 << 16); 4164 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4165 I915_WRITE(reg, temp); 4166 4167 POSTING_READ(reg); 4168 udelay(100); 4169} 4170 4171bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 4172{ 4173 struct intel_crtc *crtc; 4174 4175 /* Note that we don't need to be called with mode_config.lock here 4176 * as our list of CRTC objects is static for the lifetime of the 4177 * device and so cannot disappear as we iterate. Similarly, we can 4178 * happily treat the predicates as racy, atomic checks as userspace 4179 * cannot claim and pin a new fb without at least acquring the 4180 * struct_mutex and so serialising with us. 4181 */ 4182 for_each_intel_crtc(&dev_priv->drm, crtc) { 4183 if (atomic_read(&crtc->unpin_work_count) == 0) 4184 continue; 4185 4186 if (crtc->flip_work) 4187 intel_wait_for_vblank(dev_priv, crtc->pipe); 4188 4189 return true; 4190 } 4191 4192 return false; 4193} 4194 4195static void page_flip_completed(struct intel_crtc *intel_crtc) 4196{ 4197 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 4198 struct intel_flip_work *work = intel_crtc->flip_work; 4199 4200 intel_crtc->flip_work = NULL; 4201 4202 if (work->event) 4203 drm_crtc_send_vblank_event(&intel_crtc->base, work->event); 4204 4205 drm_crtc_vblank_put(&intel_crtc->base); 4206 4207 wake_up_all(&dev_priv->pending_flip_queue); 4208 trace_i915_flip_complete(intel_crtc->plane, 4209 work->pending_flip_obj); 4210 4211 queue_work(dev_priv->wq, &work->unpin_work); 4212} 4213 4214static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 4215{ 4216 struct drm_device *dev = crtc->dev; 4217 struct drm_i915_private *dev_priv = to_i915(dev); 4218 long ret; 4219 4220 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 4221 4222 ret = wait_event_interruptible_timeout( 4223 dev_priv->pending_flip_queue, 4224 !intel_crtc_has_pending_flip(crtc), 4225 60*HZ); 4226 4227 if (ret < 0) 4228 return ret; 4229 4230 if (ret == 0) { 4231 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4232 struct intel_flip_work *work; 4233 4234 spin_lock_irq(&dev->event_lock); 4235 work = intel_crtc->flip_work; 4236 if (work && !is_mmio_work(work)) { 4237 WARN_ONCE(1, "Removing stuck page flip\n"); 4238 page_flip_completed(intel_crtc); 4239 } 4240 spin_unlock_irq(&dev->event_lock); 4241 } 4242 4243 return 0; 4244} 4245 4246void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 4247{ 4248 u32 temp; 4249 4250 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 4251 4252 mutex_lock(&dev_priv->sb_lock); 4253 4254 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4255 temp |= SBI_SSCCTL_DISABLE; 4256 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4257 4258 mutex_unlock(&dev_priv->sb_lock); 4259} 4260 4261/* Program iCLKIP clock to the desired frequency */ 4262static void lpt_program_iclkip(struct intel_crtc *crtc) 4263{ 4264 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4265 int clock = crtc->config->base.adjusted_mode.crtc_clock; 4266 u32 divsel, phaseinc, auxdiv, phasedir = 0; 4267 u32 temp; 4268 4269 lpt_disable_iclkip(dev_priv); 4270 4271 /* The iCLK virtual clock root frequency is in MHz, 4272 * but the adjusted_mode->crtc_clock in in KHz. To get the 4273 * divisors, it is necessary to divide one by another, so we 4274 * convert the virtual clock precision to KHz here for higher 4275 * precision. 4276 */ 4277 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 4278 u32 iclk_virtual_root_freq = 172800 * 1000; 4279 u32 iclk_pi_range = 64; 4280 u32 desired_divisor; 4281 4282 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 4283 clock << auxdiv); 4284 divsel = (desired_divisor / iclk_pi_range) - 2; 4285 phaseinc = desired_divisor % iclk_pi_range; 4286 4287 /* 4288 * Near 20MHz is a corner case which is 4289 * out of range for the 7-bit divisor 4290 */ 4291 if (divsel <= 0x7f) 4292 break; 4293 } 4294 4295 /* This should not happen with any sane values */ 4296 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 4297 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 4298 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 4299 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 4300 4301 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 4302 clock, 4303 auxdiv, 4304 divsel, 4305 phasedir, 4306 phaseinc); 4307 4308 mutex_lock(&dev_priv->sb_lock); 4309 4310 /* Program SSCDIVINTPHASE6 */ 4311 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 4312 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 4313 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 4314 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 4315 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 4316 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 4317 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 4318 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 4319 4320 /* Program SSCAUXDIV */ 4321 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 4322 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 4323 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 4324 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 4325 4326 /* Enable modulator and associated divider */ 4327 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4328 temp &= ~SBI_SSCCTL_DISABLE; 4329 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4330 4331 mutex_unlock(&dev_priv->sb_lock); 4332 4333 /* Wait for initialization time */ 4334 udelay(24); 4335 4336 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 4337} 4338 4339int lpt_get_iclkip(struct drm_i915_private *dev_priv) 4340{ 4341 u32 divsel, phaseinc, auxdiv; 4342 u32 iclk_virtual_root_freq = 172800 * 1000; 4343 u32 iclk_pi_range = 64; 4344 u32 desired_divisor; 4345 u32 temp; 4346 4347 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 4348 return 0; 4349 4350 mutex_lock(&dev_priv->sb_lock); 4351 4352 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4353 if (temp & SBI_SSCCTL_DISABLE) { 4354 mutex_unlock(&dev_priv->sb_lock); 4355 return 0; 4356 } 4357 4358 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 4359 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 4360 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 4361 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 4362 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 4363 4364 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 4365 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 4366 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 4367 4368 mutex_unlock(&dev_priv->sb_lock); 4369 4370 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 4371 4372 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 4373 desired_divisor << auxdiv); 4374} 4375 4376static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 4377 enum pipe pch_transcoder) 4378{ 4379 struct drm_device *dev = crtc->base.dev; 4380 struct drm_i915_private *dev_priv = to_i915(dev); 4381 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 4382 4383 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 4384 I915_READ(HTOTAL(cpu_transcoder))); 4385 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 4386 I915_READ(HBLANK(cpu_transcoder))); 4387 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 4388 I915_READ(HSYNC(cpu_transcoder))); 4389 4390 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 4391 I915_READ(VTOTAL(cpu_transcoder))); 4392 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 4393 I915_READ(VBLANK(cpu_transcoder))); 4394 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 4395 I915_READ(VSYNC(cpu_transcoder))); 4396 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 4397 I915_READ(VSYNCSHIFT(cpu_transcoder))); 4398} 4399 4400static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 4401{ 4402 struct drm_i915_private *dev_priv = to_i915(dev); 4403 uint32_t temp; 4404 4405 temp = I915_READ(SOUTH_CHICKEN1); 4406 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 4407 return; 4408 4409 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 4410 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 4411 4412 temp &= ~FDI_BC_BIFURCATION_SELECT; 4413 if (enable) 4414 temp |= FDI_BC_BIFURCATION_SELECT; 4415 4416 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 4417 I915_WRITE(SOUTH_CHICKEN1, temp); 4418 POSTING_READ(SOUTH_CHICKEN1); 4419} 4420 4421static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 4422{ 4423 struct drm_device *dev = intel_crtc->base.dev; 4424 4425 switch (intel_crtc->pipe) { 4426 case PIPE_A: 4427 break; 4428 case PIPE_B: 4429 if (intel_crtc->config->fdi_lanes > 2) 4430 cpt_set_fdi_bc_bifurcation(dev, false); 4431 else 4432 cpt_set_fdi_bc_bifurcation(dev, true); 4433 4434 break; 4435 case PIPE_C: 4436 cpt_set_fdi_bc_bifurcation(dev, true); 4437 4438 break; 4439 default: 4440 BUG(); 4441 } 4442} 4443 4444/* Return which DP Port should be selected for Transcoder DP control */ 4445static enum port 4446intel_trans_dp_port_sel(struct intel_crtc *crtc) 4447{ 4448 struct drm_device *dev = crtc->base.dev; 4449 struct intel_encoder *encoder; 4450 4451 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 4452 if (encoder->type == INTEL_OUTPUT_DP || 4453 encoder->type == INTEL_OUTPUT_EDP) 4454 return enc_to_dig_port(&encoder->base)->port; 4455 } 4456 4457 return -1; 4458} 4459 4460/* 4461 * Enable PCH resources required for PCH ports: 4462 * - PCH PLLs 4463 * - FDI training & RX/TX 4464 * - update transcoder timings 4465 * - DP transcoding bits 4466 * - transcoder 4467 */ 4468static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state) 4469{ 4470 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 4471 struct drm_device *dev = crtc->base.dev; 4472 struct drm_i915_private *dev_priv = to_i915(dev); 4473 int pipe = crtc->pipe; 4474 u32 temp; 4475 4476 assert_pch_transcoder_disabled(dev_priv, pipe); 4477 4478 if (IS_IVYBRIDGE(dev_priv)) 4479 ivybridge_update_fdi_bc_bifurcation(crtc); 4480 4481 /* Write the TU size bits before fdi link training, so that error 4482 * detection works. */ 4483 I915_WRITE(FDI_RX_TUSIZE1(pipe), 4484 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 4485 4486 /* For PCH output, training FDI link */ 4487 dev_priv->display.fdi_link_train(crtc, crtc_state); 4488 4489 /* We need to program the right clock selection before writing the pixel 4490 * mutliplier into the DPLL. */ 4491 if (HAS_PCH_CPT(dev_priv)) { 4492 u32 sel; 4493 4494 temp = I915_READ(PCH_DPLL_SEL); 4495 temp |= TRANS_DPLL_ENABLE(pipe); 4496 sel = TRANS_DPLLB_SEL(pipe); 4497 if (crtc_state->shared_dpll == 4498 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 4499 temp |= sel; 4500 else 4501 temp &= ~sel; 4502 I915_WRITE(PCH_DPLL_SEL, temp); 4503 } 4504 4505 /* XXX: pch pll's can be enabled any time before we enable the PCH 4506 * transcoder, and we actually should do this to not upset any PCH 4507 * transcoder that already use the clock when we share it. 4508 * 4509 * Note that enable_shared_dpll tries to do the right thing, but 4510 * get_shared_dpll unconditionally resets the pll - we need that to have 4511 * the right LVDS enable sequence. */ 4512 intel_enable_shared_dpll(crtc); 4513 4514 /* set transcoder timing, panel must allow it */ 4515 assert_panel_unlocked(dev_priv, pipe); 4516 ironlake_pch_transcoder_set_timings(crtc, pipe); 4517 4518 intel_fdi_normal_train(crtc); 4519 4520 /* For PCH DP, enable TRANS_DP_CTL */ 4521 if (HAS_PCH_CPT(dev_priv) && 4522 intel_crtc_has_dp_encoder(crtc_state)) { 4523 const struct drm_display_mode *adjusted_mode = 4524 &crtc_state->base.adjusted_mode; 4525 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4526 i915_reg_t reg = TRANS_DP_CTL(pipe); 4527 temp = I915_READ(reg); 4528 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4529 TRANS_DP_SYNC_MASK | 4530 TRANS_DP_BPC_MASK); 4531 temp |= TRANS_DP_OUTPUT_ENABLE; 4532 temp |= bpc << 9; /* same format but at 11:9 */ 4533 4534 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 4535 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4536 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 4537 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4538 4539 switch (intel_trans_dp_port_sel(crtc)) { 4540 case PORT_B: 4541 temp |= TRANS_DP_PORT_SEL_B; 4542 break; 4543 case PORT_C: 4544 temp |= TRANS_DP_PORT_SEL_C; 4545 break; 4546 case PORT_D: 4547 temp |= TRANS_DP_PORT_SEL_D; 4548 break; 4549 default: 4550 BUG(); 4551 } 4552 4553 I915_WRITE(reg, temp); 4554 } 4555 4556 ironlake_enable_pch_transcoder(dev_priv, pipe); 4557} 4558 4559static void lpt_pch_enable(const struct intel_crtc_state *crtc_state) 4560{ 4561 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 4562 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4563 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 4564 4565 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); 4566 4567 lpt_program_iclkip(crtc); 4568 4569 /* Set transcoder timing. */ 4570 ironlake_pch_transcoder_set_timings(crtc, PIPE_A); 4571 4572 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4573} 4574 4575static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4576{ 4577 struct drm_i915_private *dev_priv = to_i915(dev); 4578 i915_reg_t dslreg = PIPEDSL(pipe); 4579 u32 temp; 4580 4581 temp = I915_READ(dslreg); 4582 udelay(500); 4583 if (wait_for(I915_READ(dslreg) != temp, 5)) { 4584 if (wait_for(I915_READ(dslreg) != temp, 5)) 4585 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 4586 } 4587} 4588 4589static int 4590skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4591 unsigned int scaler_user, int *scaler_id, 4592 int src_w, int src_h, int dst_w, int dst_h) 4593{ 4594 struct intel_crtc_scaler_state *scaler_state = 4595 &crtc_state->scaler_state; 4596 struct intel_crtc *intel_crtc = 4597 to_intel_crtc(crtc_state->base.crtc); 4598 int need_scaling; 4599 4600 /* 4601 * Src coordinates are already rotated by 270 degrees for 4602 * the 90/270 degree plane rotation cases (to match the 4603 * GTT mapping), hence no need to account for rotation here. 4604 */ 4605 need_scaling = src_w != dst_w || src_h != dst_h; 4606 4607 /* 4608 * if plane is being disabled or scaler is no more required or force detach 4609 * - free scaler binded to this plane/crtc 4610 * - in order to do this, update crtc->scaler_usage 4611 * 4612 * Here scaler state in crtc_state is set free so that 4613 * scaler can be assigned to other user. Actual register 4614 * update to free the scaler is done in plane/panel-fit programming. 4615 * For this purpose crtc/plane_state->scaler_id isn't reset here. 4616 */ 4617 if (force_detach || !need_scaling) { 4618 if (*scaler_id >= 0) { 4619 scaler_state->scaler_users &= ~(1 << scaler_user); 4620 scaler_state->scalers[*scaler_id].in_use = 0; 4621 4622 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4623 "Staged freeing scaler id %d scaler_users = 0x%x\n", 4624 intel_crtc->pipe, scaler_user, *scaler_id, 4625 scaler_state->scaler_users); 4626 *scaler_id = -1; 4627 } 4628 return 0; 4629 } 4630 4631 /* range checks */ 4632 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 4633 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 4634 4635 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 4636 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) { 4637 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 4638 "size is out of scaler range\n", 4639 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 4640 return -EINVAL; 4641 } 4642 4643 /* mark this plane as a scaler user in crtc_state */ 4644 scaler_state->scaler_users |= (1 << scaler_user); 4645 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4646 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 4647 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 4648 scaler_state->scaler_users); 4649 4650 return 0; 4651} 4652 4653/** 4654 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 4655 * 4656 * @state: crtc's scaler state 4657 * 4658 * Return 4659 * 0 - scaler_usage updated successfully 4660 * error - requested scaling cannot be supported or other error condition 4661 */ 4662int skl_update_scaler_crtc(struct intel_crtc_state *state) 4663{ 4664 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4665 4666 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4667 &state->scaler_state.scaler_id, 4668 state->pipe_src_w, state->pipe_src_h, 4669 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); 4670} 4671 4672/** 4673 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 4674 * 4675 * @state: crtc's scaler state 4676 * @plane_state: atomic plane state to update 4677 * 4678 * Return 4679 * 0 - scaler_usage updated successfully 4680 * error - requested scaling cannot be supported or other error condition 4681 */ 4682static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 4683 struct intel_plane_state *plane_state) 4684{ 4685 4686 struct intel_plane *intel_plane = 4687 to_intel_plane(plane_state->base.plane); 4688 struct drm_framebuffer *fb = plane_state->base.fb; 4689 int ret; 4690 4691 bool force_detach = !fb || !plane_state->base.visible; 4692 4693 ret = skl_update_scaler(crtc_state, force_detach, 4694 drm_plane_index(&intel_plane->base), 4695 &plane_state->scaler_id, 4696 drm_rect_width(&plane_state->base.src) >> 16, 4697 drm_rect_height(&plane_state->base.src) >> 16, 4698 drm_rect_width(&plane_state->base.dst), 4699 drm_rect_height(&plane_state->base.dst)); 4700 4701 if (ret || plane_state->scaler_id < 0) 4702 return ret; 4703 4704 /* check colorkey */ 4705 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { 4706 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", 4707 intel_plane->base.base.id, 4708 intel_plane->base.name); 4709 return -EINVAL; 4710 } 4711 4712 /* Check src format */ 4713 switch (fb->format->format) { 4714 case DRM_FORMAT_RGB565: 4715 case DRM_FORMAT_XBGR8888: 4716 case DRM_FORMAT_XRGB8888: 4717 case DRM_FORMAT_ABGR8888: 4718 case DRM_FORMAT_ARGB8888: 4719 case DRM_FORMAT_XRGB2101010: 4720 case DRM_FORMAT_XBGR2101010: 4721 case DRM_FORMAT_YUYV: 4722 case DRM_FORMAT_YVYU: 4723 case DRM_FORMAT_UYVY: 4724 case DRM_FORMAT_VYUY: 4725 break; 4726 default: 4727 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 4728 intel_plane->base.base.id, intel_plane->base.name, 4729 fb->base.id, fb->format->format); 4730 return -EINVAL; 4731 } 4732 4733 return 0; 4734} 4735 4736static void skylake_scaler_disable(struct intel_crtc *crtc) 4737{ 4738 int i; 4739 4740 for (i = 0; i < crtc->num_scalers; i++) 4741 skl_detach_scaler(crtc, i); 4742} 4743 4744static void skylake_pfit_enable(struct intel_crtc *crtc) 4745{ 4746 struct drm_device *dev = crtc->base.dev; 4747 struct drm_i915_private *dev_priv = to_i915(dev); 4748 int pipe = crtc->pipe; 4749 struct intel_crtc_scaler_state *scaler_state = 4750 &crtc->config->scaler_state; 4751 4752 if (crtc->config->pch_pfit.enabled) { 4753 int id; 4754 4755 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) 4756 return; 4757 4758 id = scaler_state->scaler_id; 4759 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4760 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4761 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4762 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4763 } 4764} 4765 4766static void ironlake_pfit_enable(struct intel_crtc *crtc) 4767{ 4768 struct drm_device *dev = crtc->base.dev; 4769 struct drm_i915_private *dev_priv = to_i915(dev); 4770 int pipe = crtc->pipe; 4771 4772 if (crtc->config->pch_pfit.enabled) { 4773 /* Force use of hard-coded filter coefficients 4774 * as some pre-programmed values are broken, 4775 * e.g. x201. 4776 */ 4777 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 4778 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 4779 PF_PIPE_SEL_IVB(pipe)); 4780 else 4781 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 4782 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4783 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4784 } 4785} 4786 4787void hsw_enable_ips(struct intel_crtc *crtc) 4788{ 4789 struct drm_device *dev = crtc->base.dev; 4790 struct drm_i915_private *dev_priv = to_i915(dev); 4791 4792 if (!crtc->config->ips_enabled) 4793 return; 4794 4795 /* 4796 * We can only enable IPS after we enable a plane and wait for a vblank 4797 * This function is called from post_plane_update, which is run after 4798 * a vblank wait. 4799 */ 4800 4801 assert_plane_enabled(dev_priv, crtc->plane); 4802 if (IS_BROADWELL(dev_priv)) { 4803 mutex_lock(&dev_priv->rps.hw_lock); 4804 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 4805 mutex_unlock(&dev_priv->rps.hw_lock); 4806 /* Quoting Art Runyan: "its not safe to expect any particular 4807 * value in IPS_CTL bit 31 after enabling IPS through the 4808 * mailbox." Moreover, the mailbox may return a bogus state, 4809 * so we need to just enable it and continue on. 4810 */ 4811 } else { 4812 I915_WRITE(IPS_CTL, IPS_ENABLE); 4813 /* The bit only becomes 1 in the next vblank, so this wait here 4814 * is essentially intel_wait_for_vblank. If we don't have this 4815 * and don't wait for vblanks until the end of crtc_enable, then 4816 * the HW state readout code will complain that the expected 4817 * IPS_CTL value is not the one we read. */ 4818 if (intel_wait_for_register(dev_priv, 4819 IPS_CTL, IPS_ENABLE, IPS_ENABLE, 4820 50)) 4821 DRM_ERROR("Timed out waiting for IPS enable\n"); 4822 } 4823} 4824 4825void hsw_disable_ips(struct intel_crtc *crtc) 4826{ 4827 struct drm_device *dev = crtc->base.dev; 4828 struct drm_i915_private *dev_priv = to_i915(dev); 4829 4830 if (!crtc->config->ips_enabled) 4831 return; 4832 4833 assert_plane_enabled(dev_priv, crtc->plane); 4834 if (IS_BROADWELL(dev_priv)) { 4835 mutex_lock(&dev_priv->rps.hw_lock); 4836 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4837 mutex_unlock(&dev_priv->rps.hw_lock); 4838 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4839 if (intel_wait_for_register(dev_priv, 4840 IPS_CTL, IPS_ENABLE, 0, 4841 42)) 4842 DRM_ERROR("Timed out waiting for IPS disable\n"); 4843 } else { 4844 I915_WRITE(IPS_CTL, 0); 4845 POSTING_READ(IPS_CTL); 4846 } 4847 4848 /* We need to wait for a vblank before we can disable the plane. */ 4849 intel_wait_for_vblank(dev_priv, crtc->pipe); 4850} 4851 4852static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 4853{ 4854 if (intel_crtc->overlay) { 4855 struct drm_device *dev = intel_crtc->base.dev; 4856 4857 mutex_lock(&dev->struct_mutex); 4858 (void) intel_overlay_switch_off(intel_crtc->overlay); 4859 mutex_unlock(&dev->struct_mutex); 4860 } 4861 4862 /* Let userspace switch the overlay on again. In most cases userspace 4863 * has to recompute where to put it anyway. 4864 */ 4865} 4866 4867/** 4868 * intel_post_enable_primary - Perform operations after enabling primary plane 4869 * @crtc: the CRTC whose primary plane was just enabled 4870 * 4871 * Performs potentially sleeping operations that must be done after the primary 4872 * plane is enabled, such as updating FBC and IPS. Note that this may be 4873 * called due to an explicit primary plane update, or due to an implicit 4874 * re-enable that is caused when a sprite plane is updated to no longer 4875 * completely hide the primary plane. 4876 */ 4877static void 4878intel_post_enable_primary(struct drm_crtc *crtc) 4879{ 4880 struct drm_device *dev = crtc->dev; 4881 struct drm_i915_private *dev_priv = to_i915(dev); 4882 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4883 int pipe = intel_crtc->pipe; 4884 4885 /* 4886 * FIXME IPS should be fine as long as one plane is 4887 * enabled, but in practice it seems to have problems 4888 * when going from primary only to sprite only and vice 4889 * versa. 4890 */ 4891 hsw_enable_ips(intel_crtc); 4892 4893 /* 4894 * Gen2 reports pipe underruns whenever all planes are disabled. 4895 * So don't enable underrun reporting before at least some planes 4896 * are enabled. 4897 * FIXME: Need to fix the logic to work when we turn off all planes 4898 * but leave the pipe running. 4899 */ 4900 if (IS_GEN2(dev_priv)) 4901 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4902 4903 /* Underruns don't always raise interrupts, so check manually. */ 4904 intel_check_cpu_fifo_underruns(dev_priv); 4905 intel_check_pch_fifo_underruns(dev_priv); 4906} 4907 4908/* FIXME move all this to pre_plane_update() with proper state tracking */ 4909static void 4910intel_pre_disable_primary(struct drm_crtc *crtc) 4911{ 4912 struct drm_device *dev = crtc->dev; 4913 struct drm_i915_private *dev_priv = to_i915(dev); 4914 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4915 int pipe = intel_crtc->pipe; 4916 4917 /* 4918 * Gen2 reports pipe underruns whenever all planes are disabled. 4919 * So diasble underrun reporting before all the planes get disabled. 4920 * FIXME: Need to fix the logic to work when we turn off all planes 4921 * but leave the pipe running. 4922 */ 4923 if (IS_GEN2(dev_priv)) 4924 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4925 4926 /* 4927 * FIXME IPS should be fine as long as one plane is 4928 * enabled, but in practice it seems to have problems 4929 * when going from primary only to sprite only and vice 4930 * versa. 4931 */ 4932 hsw_disable_ips(intel_crtc); 4933} 4934 4935/* FIXME get rid of this and use pre_plane_update */ 4936static void 4937intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) 4938{ 4939 struct drm_device *dev = crtc->dev; 4940 struct drm_i915_private *dev_priv = to_i915(dev); 4941 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4942 int pipe = intel_crtc->pipe; 4943 4944 intel_pre_disable_primary(crtc); 4945 4946 /* 4947 * Vblank time updates from the shadow to live plane control register 4948 * are blocked if the memory self-refresh mode is active at that 4949 * moment. So to make sure the plane gets truly disabled, disable 4950 * first the self-refresh mode. The self-refresh enable bit in turn 4951 * will be checked/applied by the HW only at the next frame start 4952 * event which is after the vblank start event, so we need to have a 4953 * wait-for-vblank between disabling the plane and the pipe. 4954 */ 4955 if (HAS_GMCH_DISPLAY(dev_priv) && 4956 intel_set_memory_cxsr(dev_priv, false)) 4957 intel_wait_for_vblank(dev_priv, pipe); 4958} 4959 4960static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) 4961{ 4962 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4963 struct drm_atomic_state *old_state = old_crtc_state->base.state; 4964 struct intel_crtc_state *pipe_config = 4965 to_intel_crtc_state(crtc->base.state); 4966 struct drm_plane *primary = crtc->base.primary; 4967 struct drm_plane_state *old_pri_state = 4968 drm_atomic_get_existing_plane_state(old_state, primary); 4969 4970 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); 4971 4972 if (pipe_config->update_wm_post && pipe_config->base.active) 4973 intel_update_watermarks(crtc); 4974 4975 if (old_pri_state) { 4976 struct intel_plane_state *primary_state = 4977 to_intel_plane_state(primary->state); 4978 struct intel_plane_state *old_primary_state = 4979 to_intel_plane_state(old_pri_state); 4980 4981 intel_fbc_post_update(crtc); 4982 4983 if (primary_state->base.visible && 4984 (needs_modeset(&pipe_config->base) || 4985 !old_primary_state->base.visible)) 4986 intel_post_enable_primary(&crtc->base); 4987 } 4988} 4989 4990static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, 4991 struct intel_crtc_state *pipe_config) 4992{ 4993 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4994 struct drm_device *dev = crtc->base.dev; 4995 struct drm_i915_private *dev_priv = to_i915(dev); 4996 struct drm_atomic_state *old_state = old_crtc_state->base.state; 4997 struct drm_plane *primary = crtc->base.primary; 4998 struct drm_plane_state *old_pri_state = 4999 drm_atomic_get_existing_plane_state(old_state, primary); 5000 bool modeset = needs_modeset(&pipe_config->base); 5001 struct intel_atomic_state *old_intel_state = 5002 to_intel_atomic_state(old_state); 5003 5004 if (old_pri_state) { 5005 struct intel_plane_state *primary_state = 5006 to_intel_plane_state(primary->state); 5007 struct intel_plane_state *old_primary_state = 5008 to_intel_plane_state(old_pri_state); 5009 5010 intel_fbc_pre_update(crtc, pipe_config, primary_state); 5011 5012 if (old_primary_state->base.visible && 5013 (modeset || !primary_state->base.visible)) 5014 intel_pre_disable_primary(&crtc->base); 5015 } 5016 5017 /* 5018 * Vblank time updates from the shadow to live plane control register 5019 * are blocked if the memory self-refresh mode is active at that 5020 * moment. So to make sure the plane gets truly disabled, disable 5021 * first the self-refresh mode. The self-refresh enable bit in turn 5022 * will be checked/applied by the HW only at the next frame start 5023 * event which is after the vblank start event, so we need to have a 5024 * wait-for-vblank between disabling the plane and the pipe. 5025 */ 5026 if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active && 5027 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 5028 intel_wait_for_vblank(dev_priv, crtc->pipe); 5029 5030 /* 5031 * IVB workaround: must disable low power watermarks for at least 5032 * one frame before enabling scaling. LP watermarks can be re-enabled 5033 * when scaling is disabled. 5034 * 5035 * WaCxSRDisabledForSpriteScaling:ivb 5036 */ 5037 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev)) 5038 intel_wait_for_vblank(dev_priv, crtc->pipe); 5039 5040 /* 5041 * If we're doing a modeset, we're done. No need to do any pre-vblank 5042 * watermark programming here. 5043 */ 5044 if (needs_modeset(&pipe_config->base)) 5045 return; 5046 5047 /* 5048 * For platforms that support atomic watermarks, program the 5049 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 5050 * will be the intermediate values that are safe for both pre- and 5051 * post- vblank; when vblank happens, the 'active' values will be set 5052 * to the final 'target' values and we'll do this again to get the 5053 * optimal watermarks. For gen9+ platforms, the values we program here 5054 * will be the final target values which will get automatically latched 5055 * at vblank time; no further programming will be necessary. 5056 * 5057 * If a platform hasn't been transitioned to atomic watermarks yet, 5058 * we'll continue to update watermarks the old way, if flags tell 5059 * us to. 5060 */ 5061 if (dev_priv->display.initial_watermarks != NULL) 5062 dev_priv->display.initial_watermarks(old_intel_state, 5063 pipe_config); 5064 else if (pipe_config->update_wm_pre) 5065 intel_update_watermarks(crtc); 5066} 5067 5068static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) 5069{ 5070 struct drm_device *dev = crtc->dev; 5071 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5072 struct drm_plane *p; 5073 int pipe = intel_crtc->pipe; 5074 5075 intel_crtc_dpms_overlay_disable(intel_crtc); 5076 5077 drm_for_each_plane_mask(p, dev, plane_mask) 5078 to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc); 5079 5080 /* 5081 * FIXME: Once we grow proper nuclear flip support out of this we need 5082 * to compute the mask of flip planes precisely. For the time being 5083 * consider this a flip to a NULL plane. 5084 */ 5085 intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe)); 5086} 5087 5088static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, 5089 struct intel_crtc_state *crtc_state, 5090 struct drm_atomic_state *old_state) 5091{ 5092 struct drm_connector_state *conn_state; 5093 struct drm_connector *conn; 5094 int i; 5095 5096 for_each_new_connector_in_state(old_state, conn, conn_state, i) { 5097 struct intel_encoder *encoder = 5098 to_intel_encoder(conn_state->best_encoder); 5099 5100 if (conn_state->crtc != crtc) 5101 continue; 5102 5103 if (encoder->pre_pll_enable) 5104 encoder->pre_pll_enable(encoder, crtc_state, conn_state); 5105 } 5106} 5107 5108static void intel_encoders_pre_enable(struct drm_crtc *crtc, 5109 struct intel_crtc_state *crtc_state, 5110 struct drm_atomic_state *old_state) 5111{ 5112 struct drm_connector_state *conn_state; 5113 struct drm_connector *conn; 5114 int i; 5115 5116 for_each_new_connector_in_state(old_state, conn, conn_state, i) { 5117 struct intel_encoder *encoder = 5118 to_intel_encoder(conn_state->best_encoder); 5119 5120 if (conn_state->crtc != crtc) 5121 continue; 5122 5123 if (encoder->pre_enable) 5124 encoder->pre_enable(encoder, crtc_state, conn_state); 5125 } 5126} 5127 5128static void intel_encoders_enable(struct drm_crtc *crtc, 5129 struct intel_crtc_state *crtc_state, 5130 struct drm_atomic_state *old_state) 5131{ 5132 struct drm_connector_state *conn_state; 5133 struct drm_connector *conn; 5134 int i; 5135 5136 for_each_new_connector_in_state(old_state, conn, conn_state, i) { 5137 struct intel_encoder *encoder = 5138 to_intel_encoder(conn_state->best_encoder); 5139 5140 if (conn_state->crtc != crtc) 5141 continue; 5142 5143 encoder->enable(encoder, crtc_state, conn_state); 5144 intel_opregion_notify_encoder(encoder, true); 5145 } 5146} 5147 5148static void intel_encoders_disable(struct drm_crtc *crtc, 5149 struct intel_crtc_state *old_crtc_state, 5150 struct drm_atomic_state *old_state) 5151{ 5152 struct drm_connector_state *old_conn_state; 5153 struct drm_connector *conn; 5154 int i; 5155 5156 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { 5157 struct intel_encoder *encoder = 5158 to_intel_encoder(old_conn_state->best_encoder); 5159 5160 if (old_conn_state->crtc != crtc) 5161 continue; 5162 5163 intel_opregion_notify_encoder(encoder, false); 5164 encoder->disable(encoder, old_crtc_state, old_conn_state); 5165 } 5166} 5167 5168static void intel_encoders_post_disable(struct drm_crtc *crtc, 5169 struct intel_crtc_state *old_crtc_state, 5170 struct drm_atomic_state *old_state) 5171{ 5172 struct drm_connector_state *old_conn_state; 5173 struct drm_connector *conn; 5174 int i; 5175 5176 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { 5177 struct intel_encoder *encoder = 5178 to_intel_encoder(old_conn_state->best_encoder); 5179 5180 if (old_conn_state->crtc != crtc) 5181 continue; 5182 5183 if (encoder->post_disable) 5184 encoder->post_disable(encoder, old_crtc_state, old_conn_state); 5185 } 5186} 5187 5188static void intel_encoders_post_pll_disable(struct drm_crtc *crtc, 5189 struct intel_crtc_state *old_crtc_state, 5190 struct drm_atomic_state *old_state) 5191{ 5192 struct drm_connector_state *old_conn_state; 5193 struct drm_connector *conn; 5194 int i; 5195 5196 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { 5197 struct intel_encoder *encoder = 5198 to_intel_encoder(old_conn_state->best_encoder); 5199 5200 if (old_conn_state->crtc != crtc) 5201 continue; 5202 5203 if (encoder->post_pll_disable) 5204 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state); 5205 } 5206} 5207 5208static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, 5209 struct drm_atomic_state *old_state) 5210{ 5211 struct drm_crtc *crtc = pipe_config->base.crtc; 5212 struct drm_device *dev = crtc->dev; 5213 struct drm_i915_private *dev_priv = to_i915(dev); 5214 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5215 int pipe = intel_crtc->pipe; 5216 struct intel_atomic_state *old_intel_state = 5217 to_intel_atomic_state(old_state); 5218 5219 if (WARN_ON(intel_crtc->active)) 5220 return; 5221 5222 /* 5223 * Sometimes spurious CPU pipe underruns happen during FDI 5224 * training, at least with VGA+HDMI cloning. Suppress them. 5225 * 5226 * On ILK we get an occasional spurious CPU pipe underruns 5227 * between eDP port A enable and vdd enable. Also PCH port 5228 * enable seems to result in the occasional CPU pipe underrun. 5229 * 5230 * Spurious PCH underruns also occur during PCH enabling. 5231 */ 5232 if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv)) 5233 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5234 if (intel_crtc->config->has_pch_encoder) 5235 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5236 5237 if (intel_crtc->config->has_pch_encoder) 5238 intel_prepare_shared_dpll(intel_crtc); 5239 5240 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5241 intel_dp_set_m_n(intel_crtc, M1_N1); 5242 5243 intel_set_pipe_timings(intel_crtc); 5244 intel_set_pipe_src_size(intel_crtc); 5245 5246 if (intel_crtc->config->has_pch_encoder) { 5247 intel_cpu_transcoder_set_m_n(intel_crtc, 5248 &intel_crtc->config->fdi_m_n, NULL); 5249 } 5250 5251 ironlake_set_pipeconf(crtc); 5252 5253 intel_crtc->active = true; 5254 5255 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5256 5257 if (intel_crtc->config->has_pch_encoder) { 5258 /* Note: FDI PLL enabling _must_ be done before we enable the 5259 * cpu pipes, hence this is separate from all the other fdi/pch 5260 * enabling. */ 5261 ironlake_fdi_pll_enable(intel_crtc); 5262 } else { 5263 assert_fdi_tx_disabled(dev_priv, pipe); 5264 assert_fdi_rx_disabled(dev_priv, pipe); 5265 } 5266 5267 ironlake_pfit_enable(intel_crtc); 5268 5269 /* 5270 * On ILK+ LUT must be loaded before the pipe is running but with 5271 * clocks enabled 5272 */ 5273 intel_color_load_luts(&pipe_config->base); 5274 5275 if (dev_priv->display.initial_watermarks != NULL) 5276 dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config); 5277 intel_enable_pipe(intel_crtc); 5278 5279 if (intel_crtc->config->has_pch_encoder) 5280 ironlake_pch_enable(pipe_config); 5281 5282 assert_vblank_disabled(crtc); 5283 drm_crtc_vblank_on(crtc); 5284 5285 intel_encoders_enable(crtc, pipe_config, old_state); 5286 5287 if (HAS_PCH_CPT(dev_priv)) 5288 cpt_verify_modeset(dev, intel_crtc->pipe); 5289 5290 /* Must wait for vblank to avoid spurious PCH FIFO underruns */ 5291 if (intel_crtc->config->has_pch_encoder) 5292 intel_wait_for_vblank(dev_priv, pipe); 5293 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5294 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5295} 5296 5297/* IPS only exists on ULT machines and is tied to pipe A. */ 5298static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 5299{ 5300 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 5301} 5302 5303static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, 5304 struct drm_atomic_state *old_state) 5305{ 5306 struct drm_crtc *crtc = pipe_config->base.crtc; 5307 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5308 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5309 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 5310 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5311 struct intel_atomic_state *old_intel_state = 5312 to_intel_atomic_state(old_state); 5313 5314 if (WARN_ON(intel_crtc->active)) 5315 return; 5316 5317 if (intel_crtc->config->has_pch_encoder) 5318 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5319 false); 5320 5321 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); 5322 5323 if (intel_crtc->config->shared_dpll) 5324 intel_enable_shared_dpll(intel_crtc); 5325 5326 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5327 intel_dp_set_m_n(intel_crtc, M1_N1); 5328 5329 if (!transcoder_is_dsi(cpu_transcoder)) 5330 intel_set_pipe_timings(intel_crtc); 5331 5332 intel_set_pipe_src_size(intel_crtc); 5333 5334 if (cpu_transcoder != TRANSCODER_EDP && 5335 !transcoder_is_dsi(cpu_transcoder)) { 5336 I915_WRITE(PIPE_MULT(cpu_transcoder), 5337 intel_crtc->config->pixel_multiplier - 1); 5338 } 5339 5340 if (intel_crtc->config->has_pch_encoder) { 5341 intel_cpu_transcoder_set_m_n(intel_crtc, 5342 &intel_crtc->config->fdi_m_n, NULL); 5343 } 5344 5345 if (!transcoder_is_dsi(cpu_transcoder)) 5346 haswell_set_pipeconf(crtc); 5347 5348 haswell_set_pipemisc(crtc); 5349 5350 intel_color_set_csc(&pipe_config->base); 5351 5352 intel_crtc->active = true; 5353 5354 if (intel_crtc->config->has_pch_encoder) 5355 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5356 else 5357 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5358 5359 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5360 5361 if (intel_crtc->config->has_pch_encoder) 5362 dev_priv->display.fdi_link_train(intel_crtc, pipe_config); 5363 5364 if (!transcoder_is_dsi(cpu_transcoder)) 5365 intel_ddi_enable_pipe_clock(pipe_config); 5366 5367 if (INTEL_GEN(dev_priv) >= 9) 5368 skylake_pfit_enable(intel_crtc); 5369 else 5370 ironlake_pfit_enable(intel_crtc); 5371 5372 /* 5373 * On ILK+ LUT must be loaded before the pipe is running but with 5374 * clocks enabled 5375 */ 5376 intel_color_load_luts(&pipe_config->base); 5377 5378 intel_ddi_set_pipe_settings(pipe_config); 5379 if (!transcoder_is_dsi(cpu_transcoder)) 5380 intel_ddi_enable_transcoder_func(pipe_config); 5381 5382 if (dev_priv->display.initial_watermarks != NULL) 5383 dev_priv->display.initial_watermarks(old_intel_state, pipe_config); 5384 5385 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5386 if (!transcoder_is_dsi(cpu_transcoder)) 5387 intel_enable_pipe(intel_crtc); 5388 5389 if (intel_crtc->config->has_pch_encoder) 5390 lpt_pch_enable(pipe_config); 5391 5392 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) 5393 intel_ddi_set_vc_payload_alloc(pipe_config, true); 5394 5395 assert_vblank_disabled(crtc); 5396 drm_crtc_vblank_on(crtc); 5397 5398 intel_encoders_enable(crtc, pipe_config, old_state); 5399 5400 if (intel_crtc->config->has_pch_encoder) { 5401 intel_wait_for_vblank(dev_priv, pipe); 5402 intel_wait_for_vblank(dev_priv, pipe); 5403 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5404 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5405 true); 5406 } 5407 5408 /* If we change the relative order between pipe/planes enabling, we need 5409 * to change the workaround. */ 5410 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 5411 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 5412 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 5413 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 5414 } 5415} 5416 5417static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) 5418{ 5419 struct drm_device *dev = crtc->base.dev; 5420 struct drm_i915_private *dev_priv = to_i915(dev); 5421 int pipe = crtc->pipe; 5422 5423 /* To avoid upsetting the power well on haswell only disable the pfit if 5424 * it's in use. The hw state code will make sure we get this right. */ 5425 if (force || crtc->config->pch_pfit.enabled) { 5426 I915_WRITE(PF_CTL(pipe), 0); 5427 I915_WRITE(PF_WIN_POS(pipe), 0); 5428 I915_WRITE(PF_WIN_SZ(pipe), 0); 5429 } 5430} 5431 5432static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, 5433 struct drm_atomic_state *old_state) 5434{ 5435 struct drm_crtc *crtc = old_crtc_state->base.crtc; 5436 struct drm_device *dev = crtc->dev; 5437 struct drm_i915_private *dev_priv = to_i915(dev); 5438 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5439 int pipe = intel_crtc->pipe; 5440 5441 /* 5442 * Sometimes spurious CPU pipe underruns happen when the 5443 * pipe is already disabled, but FDI RX/TX is still enabled. 5444 * Happens at least with VGA+HDMI cloning. Suppress them. 5445 */ 5446 if (intel_crtc->config->has_pch_encoder) { 5447 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5448 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5449 } 5450 5451 intel_encoders_disable(crtc, old_crtc_state, old_state); 5452 5453 drm_crtc_vblank_off(crtc); 5454 assert_vblank_disabled(crtc); 5455 5456 intel_disable_pipe(intel_crtc); 5457 5458 ironlake_pfit_disable(intel_crtc, false); 5459 5460 if (intel_crtc->config->has_pch_encoder) 5461 ironlake_fdi_disable(crtc); 5462 5463 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5464 5465 if (intel_crtc->config->has_pch_encoder) { 5466 ironlake_disable_pch_transcoder(dev_priv, pipe); 5467 5468 if (HAS_PCH_CPT(dev_priv)) { 5469 i915_reg_t reg; 5470 u32 temp; 5471 5472 /* disable TRANS_DP_CTL */ 5473 reg = TRANS_DP_CTL(pipe); 5474 temp = I915_READ(reg); 5475 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 5476 TRANS_DP_PORT_SEL_MASK); 5477 temp |= TRANS_DP_PORT_SEL_NONE; 5478 I915_WRITE(reg, temp); 5479 5480 /* disable DPLL_SEL */ 5481 temp = I915_READ(PCH_DPLL_SEL); 5482 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 5483 I915_WRITE(PCH_DPLL_SEL, temp); 5484 } 5485 5486 ironlake_fdi_pll_disable(intel_crtc); 5487 } 5488 5489 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5490 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5491} 5492 5493static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, 5494 struct drm_atomic_state *old_state) 5495{ 5496 struct drm_crtc *crtc = old_crtc_state->base.crtc; 5497 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5498 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5499 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5500 5501 if (intel_crtc->config->has_pch_encoder) 5502 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5503 false); 5504 5505 intel_encoders_disable(crtc, old_crtc_state, old_state); 5506 5507 drm_crtc_vblank_off(crtc); 5508 assert_vblank_disabled(crtc); 5509 5510 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5511 if (!transcoder_is_dsi(cpu_transcoder)) 5512 intel_disable_pipe(intel_crtc); 5513 5514 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) 5515 intel_ddi_set_vc_payload_alloc(intel_crtc->config, false); 5516 5517 if (!transcoder_is_dsi(cpu_transcoder)) 5518 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5519 5520 if (INTEL_GEN(dev_priv) >= 9) 5521 skylake_scaler_disable(intel_crtc); 5522 else 5523 ironlake_pfit_disable(intel_crtc, false); 5524 5525 if (!transcoder_is_dsi(cpu_transcoder)) 5526 intel_ddi_disable_pipe_clock(intel_crtc->config); 5527 5528 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5529 5530 if (old_crtc_state->has_pch_encoder) 5531 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5532 true); 5533} 5534 5535static void i9xx_pfit_enable(struct intel_crtc *crtc) 5536{ 5537 struct drm_device *dev = crtc->base.dev; 5538 struct drm_i915_private *dev_priv = to_i915(dev); 5539 struct intel_crtc_state *pipe_config = crtc->config; 5540 5541 if (!pipe_config->gmch_pfit.control) 5542 return; 5543 5544 /* 5545 * The panel fitter should only be adjusted whilst the pipe is disabled, 5546 * according to register description and PRM. 5547 */ 5548 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 5549 assert_pipe_disabled(dev_priv, crtc->pipe); 5550 5551 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 5552 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 5553 5554 /* Border color in case we don't scale up to the full screen. Black by 5555 * default, change to something else for debugging. */ 5556 I915_WRITE(BCLRPAT(crtc->pipe), 0); 5557} 5558 5559enum intel_display_power_domain intel_port_to_power_domain(enum port port) 5560{ 5561 switch (port) { 5562 case PORT_A: 5563 return POWER_DOMAIN_PORT_DDI_A_LANES; 5564 case PORT_B: 5565 return POWER_DOMAIN_PORT_DDI_B_LANES; 5566 case PORT_C: 5567 return POWER_DOMAIN_PORT_DDI_C_LANES; 5568 case PORT_D: 5569 return POWER_DOMAIN_PORT_DDI_D_LANES; 5570 case PORT_E: 5571 return POWER_DOMAIN_PORT_DDI_E_LANES; 5572 default: 5573 MISSING_CASE(port); 5574 return POWER_DOMAIN_PORT_OTHER; 5575 } 5576} 5577 5578static u64 get_crtc_power_domains(struct drm_crtc *crtc, 5579 struct intel_crtc_state *crtc_state) 5580{ 5581 struct drm_device *dev = crtc->dev; 5582 struct drm_i915_private *dev_priv = to_i915(dev); 5583 struct drm_encoder *encoder; 5584 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5585 enum pipe pipe = intel_crtc->pipe; 5586 u64 mask; 5587 enum transcoder transcoder = crtc_state->cpu_transcoder; 5588 5589 if (!crtc_state->base.active) 5590 return 0; 5591 5592 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5593 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5594 if (crtc_state->pch_pfit.enabled || 5595 crtc_state->pch_pfit.force_thru) 5596 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 5597 5598 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) { 5599 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5600 5601 mask |= BIT_ULL(intel_encoder->power_domain); 5602 } 5603 5604 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 5605 mask |= BIT(POWER_DOMAIN_AUDIO); 5606 5607 if (crtc_state->shared_dpll) 5608 mask |= BIT_ULL(POWER_DOMAIN_PLLS); 5609 5610 return mask; 5611} 5612 5613static u64 5614modeset_get_crtc_power_domains(struct drm_crtc *crtc, 5615 struct intel_crtc_state *crtc_state) 5616{ 5617 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5618 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5619 enum intel_display_power_domain domain; 5620 u64 domains, new_domains, old_domains; 5621 5622 old_domains = intel_crtc->enabled_power_domains; 5623 intel_crtc->enabled_power_domains = new_domains = 5624 get_crtc_power_domains(crtc, crtc_state); 5625 5626 domains = new_domains & ~old_domains; 5627 5628 for_each_power_domain(domain, domains) 5629 intel_display_power_get(dev_priv, domain); 5630 5631 return old_domains & ~new_domains; 5632} 5633 5634static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 5635 u64 domains) 5636{ 5637 enum intel_display_power_domain domain; 5638 5639 for_each_power_domain(domain, domains) 5640 intel_display_power_put(dev_priv, domain); 5641} 5642 5643static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, 5644 struct drm_atomic_state *old_state) 5645{ 5646 struct intel_atomic_state *old_intel_state = 5647 to_intel_atomic_state(old_state); 5648 struct drm_crtc *crtc = pipe_config->base.crtc; 5649 struct drm_device *dev = crtc->dev; 5650 struct drm_i915_private *dev_priv = to_i915(dev); 5651 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5652 int pipe = intel_crtc->pipe; 5653 5654 if (WARN_ON(intel_crtc->active)) 5655 return; 5656 5657 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5658 intel_dp_set_m_n(intel_crtc, M1_N1); 5659 5660 intel_set_pipe_timings(intel_crtc); 5661 intel_set_pipe_src_size(intel_crtc); 5662 5663 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 5664 struct drm_i915_private *dev_priv = to_i915(dev); 5665 5666 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 5667 I915_WRITE(CHV_CANVAS(pipe), 0); 5668 } 5669 5670 i9xx_set_pipeconf(intel_crtc); 5671 5672 intel_crtc->active = true; 5673 5674 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5675 5676 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); 5677 5678 if (IS_CHERRYVIEW(dev_priv)) { 5679 chv_prepare_pll(intel_crtc, intel_crtc->config); 5680 chv_enable_pll(intel_crtc, intel_crtc->config); 5681 } else { 5682 vlv_prepare_pll(intel_crtc, intel_crtc->config); 5683 vlv_enable_pll(intel_crtc, intel_crtc->config); 5684 } 5685 5686 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5687 5688 i9xx_pfit_enable(intel_crtc); 5689 5690 intel_color_load_luts(&pipe_config->base); 5691 5692 dev_priv->display.initial_watermarks(old_intel_state, 5693 pipe_config); 5694 intel_enable_pipe(intel_crtc); 5695 5696 assert_vblank_disabled(crtc); 5697 drm_crtc_vblank_on(crtc); 5698 5699 intel_encoders_enable(crtc, pipe_config, old_state); 5700} 5701 5702static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 5703{ 5704 struct drm_device *dev = crtc->base.dev; 5705 struct drm_i915_private *dev_priv = to_i915(dev); 5706 5707 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 5708 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 5709} 5710 5711static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, 5712 struct drm_atomic_state *old_state) 5713{ 5714 struct intel_atomic_state *old_intel_state = 5715 to_intel_atomic_state(old_state); 5716 struct drm_crtc *crtc = pipe_config->base.crtc; 5717 struct drm_device *dev = crtc->dev; 5718 struct drm_i915_private *dev_priv = to_i915(dev); 5719 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5720 enum pipe pipe = intel_crtc->pipe; 5721 5722 if (WARN_ON(intel_crtc->active)) 5723 return; 5724 5725 i9xx_set_pll_dividers(intel_crtc); 5726 5727 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5728 intel_dp_set_m_n(intel_crtc, M1_N1); 5729 5730 intel_set_pipe_timings(intel_crtc); 5731 intel_set_pipe_src_size(intel_crtc); 5732 5733 i9xx_set_pipeconf(intel_crtc); 5734 5735 intel_crtc->active = true; 5736 5737 if (!IS_GEN2(dev_priv)) 5738 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5739 5740 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5741 5742 i9xx_enable_pll(intel_crtc); 5743 5744 i9xx_pfit_enable(intel_crtc); 5745 5746 intel_color_load_luts(&pipe_config->base); 5747 5748 if (dev_priv->display.initial_watermarks != NULL) 5749 dev_priv->display.initial_watermarks(old_intel_state, 5750 intel_crtc->config); 5751 else 5752 intel_update_watermarks(intel_crtc); 5753 intel_enable_pipe(intel_crtc); 5754 5755 assert_vblank_disabled(crtc); 5756 drm_crtc_vblank_on(crtc); 5757 5758 intel_encoders_enable(crtc, pipe_config, old_state); 5759} 5760 5761static void i9xx_pfit_disable(struct intel_crtc *crtc) 5762{ 5763 struct drm_device *dev = crtc->base.dev; 5764 struct drm_i915_private *dev_priv = to_i915(dev); 5765 5766 if (!crtc->config->gmch_pfit.control) 5767 return; 5768 5769 assert_pipe_disabled(dev_priv, crtc->pipe); 5770 5771 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 5772 I915_READ(PFIT_CONTROL)); 5773 I915_WRITE(PFIT_CONTROL, 0); 5774} 5775 5776static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, 5777 struct drm_atomic_state *old_state) 5778{ 5779 struct drm_crtc *crtc = old_crtc_state->base.crtc; 5780 struct drm_device *dev = crtc->dev; 5781 struct drm_i915_private *dev_priv = to_i915(dev); 5782 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5783 int pipe = intel_crtc->pipe; 5784 5785 /* 5786 * On gen2 planes are double buffered but the pipe isn't, so we must 5787 * wait for planes to fully turn off before disabling the pipe. 5788 */ 5789 if (IS_GEN2(dev_priv)) 5790 intel_wait_for_vblank(dev_priv, pipe); 5791 5792 intel_encoders_disable(crtc, old_crtc_state, old_state); 5793 5794 drm_crtc_vblank_off(crtc); 5795 assert_vblank_disabled(crtc); 5796 5797 intel_disable_pipe(intel_crtc); 5798 5799 i9xx_pfit_disable(intel_crtc); 5800 5801 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5802 5803 if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) { 5804 if (IS_CHERRYVIEW(dev_priv)) 5805 chv_disable_pll(dev_priv, pipe); 5806 else if (IS_VALLEYVIEW(dev_priv)) 5807 vlv_disable_pll(dev_priv, pipe); 5808 else 5809 i9xx_disable_pll(intel_crtc); 5810 } 5811 5812 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); 5813 5814 if (!IS_GEN2(dev_priv)) 5815 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5816 5817 if (!dev_priv->display.initial_watermarks) 5818 intel_update_watermarks(intel_crtc); 5819 5820 /* clock the pipe down to 640x480@60 to potentially save power */ 5821 if (IS_I830(dev_priv)) 5822 i830_enable_pipe(dev_priv, pipe); 5823} 5824 5825static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, 5826 struct drm_modeset_acquire_ctx *ctx) 5827{ 5828 struct intel_encoder *encoder; 5829 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5830 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5831 enum intel_display_power_domain domain; 5832 u64 domains; 5833 struct drm_atomic_state *state; 5834 struct intel_crtc_state *crtc_state; 5835 int ret; 5836 5837 if (!intel_crtc->active) 5838 return; 5839 5840 if (crtc->primary->state->visible) { 5841 WARN_ON(intel_crtc->flip_work); 5842 5843 intel_pre_disable_primary_noatomic(crtc); 5844 5845 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); 5846 crtc->primary->state->visible = false; 5847 } 5848 5849 state = drm_atomic_state_alloc(crtc->dev); 5850 if (!state) { 5851 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", 5852 crtc->base.id, crtc->name); 5853 return; 5854 } 5855 5856 state->acquire_ctx = ctx; 5857 5858 /* Everything's already locked, -EDEADLK can't happen. */ 5859 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 5860 ret = drm_atomic_add_affected_connectors(state, crtc); 5861 5862 WARN_ON(IS_ERR(crtc_state) || ret); 5863 5864 dev_priv->display.crtc_disable(crtc_state, state); 5865 5866 drm_atomic_state_put(state); 5867 5868 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 5869 crtc->base.id, crtc->name); 5870 5871 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 5872 crtc->state->active = false; 5873 intel_crtc->active = false; 5874 crtc->enabled = false; 5875 crtc->state->connector_mask = 0; 5876 crtc->state->encoder_mask = 0; 5877 5878 for_each_encoder_on_crtc(crtc->dev, crtc, encoder) 5879 encoder->base.crtc = NULL; 5880 5881 intel_fbc_disable(intel_crtc); 5882 intel_update_watermarks(intel_crtc); 5883 intel_disable_shared_dpll(intel_crtc); 5884 5885 domains = intel_crtc->enabled_power_domains; 5886 for_each_power_domain(domain, domains) 5887 intel_display_power_put(dev_priv, domain); 5888 intel_crtc->enabled_power_domains = 0; 5889 5890 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); 5891 dev_priv->min_pixclk[intel_crtc->pipe] = 0; 5892} 5893 5894/* 5895 * turn all crtc's off, but do not adjust state 5896 * This has to be paired with a call to intel_modeset_setup_hw_state. 5897 */ 5898int intel_display_suspend(struct drm_device *dev) 5899{ 5900 struct drm_i915_private *dev_priv = to_i915(dev); 5901 struct drm_atomic_state *state; 5902 int ret; 5903 5904 state = drm_atomic_helper_suspend(dev); 5905 ret = PTR_ERR_OR_ZERO(state); 5906 if (ret) 5907 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 5908 else 5909 dev_priv->modeset_restore_state = state; 5910 return ret; 5911} 5912 5913void intel_encoder_destroy(struct drm_encoder *encoder) 5914{ 5915 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5916 5917 drm_encoder_cleanup(encoder); 5918 kfree(intel_encoder); 5919} 5920 5921/* Cross check the actual hw state with our own modeset state tracking (and it's 5922 * internal consistency). */ 5923static void intel_connector_verify_state(struct drm_crtc_state *crtc_state, 5924 struct drm_connector_state *conn_state) 5925{ 5926 struct intel_connector *connector = to_intel_connector(conn_state->connector); 5927 5928 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 5929 connector->base.base.id, 5930 connector->base.name); 5931 5932 if (connector->get_hw_state(connector)) { 5933 struct intel_encoder *encoder = connector->encoder; 5934 5935 I915_STATE_WARN(!crtc_state, 5936 "connector enabled without attached crtc\n"); 5937 5938 if (!crtc_state) 5939 return; 5940 5941 I915_STATE_WARN(!crtc_state->active, 5942 "connector is active, but attached crtc isn't\n"); 5943 5944 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 5945 return; 5946 5947 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 5948 "atomic encoder doesn't match attached encoder\n"); 5949 5950 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 5951 "attached encoder crtc differs from connector crtc\n"); 5952 } else { 5953 I915_STATE_WARN(crtc_state && crtc_state->active, 5954 "attached crtc is active, but connector isn't\n"); 5955 I915_STATE_WARN(!crtc_state && conn_state->best_encoder, 5956 "best encoder set without crtc!\n"); 5957 } 5958} 5959 5960int intel_connector_init(struct intel_connector *connector) 5961{ 5962 struct intel_digital_connector_state *conn_state; 5963 5964 /* 5965 * Allocate enough memory to hold intel_digital_connector_state, 5966 * This might be a few bytes too many, but for connectors that don't 5967 * need it we'll free the state and allocate a smaller one on the first 5968 * succesful commit anyway. 5969 */ 5970 conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL); 5971 if (!conn_state) 5972 return -ENOMEM; 5973 5974 __drm_atomic_helper_connector_reset(&connector->base, 5975 &conn_state->base); 5976 5977 return 0; 5978} 5979 5980struct intel_connector *intel_connector_alloc(void) 5981{ 5982 struct intel_connector *connector; 5983 5984 connector = kzalloc(sizeof *connector, GFP_KERNEL); 5985 if (!connector) 5986 return NULL; 5987 5988 if (intel_connector_init(connector) < 0) { 5989 kfree(connector); 5990 return NULL; 5991 } 5992 5993 return connector; 5994} 5995 5996/* Simple connector->get_hw_state implementation for encoders that support only 5997 * one connector and no cloning and hence the encoder state determines the state 5998 * of the connector. */ 5999bool intel_connector_get_hw_state(struct intel_connector *connector) 6000{ 6001 enum pipe pipe = 0; 6002 struct intel_encoder *encoder = connector->encoder; 6003 6004 return encoder->get_hw_state(encoder, &pipe); 6005} 6006 6007static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 6008{ 6009 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 6010 return crtc_state->fdi_lanes; 6011 6012 return 0; 6013} 6014 6015static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, 6016 struct intel_crtc_state *pipe_config) 6017{ 6018 struct drm_i915_private *dev_priv = to_i915(dev); 6019 struct drm_atomic_state *state = pipe_config->base.state; 6020 struct intel_crtc *other_crtc; 6021 struct intel_crtc_state *other_crtc_state; 6022 6023 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 6024 pipe_name(pipe), pipe_config->fdi_lanes); 6025 if (pipe_config->fdi_lanes > 4) { 6026 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 6027 pipe_name(pipe), pipe_config->fdi_lanes); 6028 return -EINVAL; 6029 } 6030 6031 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 6032 if (pipe_config->fdi_lanes > 2) { 6033 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 6034 pipe_config->fdi_lanes); 6035 return -EINVAL; 6036 } else { 6037 return 0; 6038 } 6039 } 6040 6041 if (INTEL_INFO(dev_priv)->num_pipes == 2) 6042 return 0; 6043 6044 /* Ivybridge 3 pipe is really complicated */ 6045 switch (pipe) { 6046 case PIPE_A: 6047 return 0; 6048 case PIPE_B: 6049 if (pipe_config->fdi_lanes <= 2) 6050 return 0; 6051 6052 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); 6053 other_crtc_state = 6054 intel_atomic_get_crtc_state(state, other_crtc); 6055 if (IS_ERR(other_crtc_state)) 6056 return PTR_ERR(other_crtc_state); 6057 6058 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 6059 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 6060 pipe_name(pipe), pipe_config->fdi_lanes); 6061 return -EINVAL; 6062 } 6063 return 0; 6064 case PIPE_C: 6065 if (pipe_config->fdi_lanes > 2) { 6066 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 6067 pipe_name(pipe), pipe_config->fdi_lanes); 6068 return -EINVAL; 6069 } 6070 6071 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); 6072 other_crtc_state = 6073 intel_atomic_get_crtc_state(state, other_crtc); 6074 if (IS_ERR(other_crtc_state)) 6075 return PTR_ERR(other_crtc_state); 6076 6077 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 6078 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 6079 return -EINVAL; 6080 } 6081 return 0; 6082 default: 6083 BUG(); 6084 } 6085} 6086 6087#define RETRY 1 6088static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 6089 struct intel_crtc_state *pipe_config) 6090{ 6091 struct drm_device *dev = intel_crtc->base.dev; 6092 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6093 int lane, link_bw, fdi_dotclock, ret; 6094 bool needs_recompute = false; 6095 6096retry: 6097 /* FDI is a binary signal running at ~2.7GHz, encoding 6098 * each output octet as 10 bits. The actual frequency 6099 * is stored as a divider into a 100MHz clock, and the 6100 * mode pixel clock is stored in units of 1KHz. 6101 * Hence the bw of each lane in terms of the mode signal 6102 * is: 6103 */ 6104 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); 6105 6106 fdi_dotclock = adjusted_mode->crtc_clock; 6107 6108 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 6109 pipe_config->pipe_bpp); 6110 6111 pipe_config->fdi_lanes = lane; 6112 6113 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 6114 link_bw, &pipe_config->fdi_m_n, false); 6115 6116 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 6117 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 6118 pipe_config->pipe_bpp -= 2*3; 6119 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 6120 pipe_config->pipe_bpp); 6121 needs_recompute = true; 6122 pipe_config->bw_constrained = true; 6123 6124 goto retry; 6125 } 6126 6127 if (needs_recompute) 6128 return RETRY; 6129 6130 return ret; 6131} 6132 6133static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv, 6134 struct intel_crtc_state *pipe_config) 6135{ 6136 if (pipe_config->pipe_bpp > 24) 6137 return false; 6138 6139 /* HSW can handle pixel rate up to cdclk? */ 6140 if (IS_HASWELL(dev_priv)) 6141 return true; 6142 6143 /* 6144 * We compare against max which means we must take 6145 * the increased cdclk requirement into account when 6146 * calculating the new cdclk. 6147 * 6148 * Should measure whether using a lower cdclk w/o IPS 6149 */ 6150 return pipe_config->pixel_rate <= 6151 dev_priv->max_cdclk_freq * 95 / 100; 6152} 6153 6154static void hsw_compute_ips_config(struct intel_crtc *crtc, 6155 struct intel_crtc_state *pipe_config) 6156{ 6157 struct drm_device *dev = crtc->base.dev; 6158 struct drm_i915_private *dev_priv = to_i915(dev); 6159 6160 pipe_config->ips_enabled = i915.enable_ips && 6161 hsw_crtc_supports_ips(crtc) && 6162 pipe_config_supports_ips(dev_priv, pipe_config); 6163} 6164 6165static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 6166{ 6167 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6168 6169 /* GDG double wide on either pipe, otherwise pipe A only */ 6170 return INTEL_INFO(dev_priv)->gen < 4 && 6171 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 6172} 6173 6174static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) 6175{ 6176 uint32_t pixel_rate; 6177 6178 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; 6179 6180 /* 6181 * We only use IF-ID interlacing. If we ever use 6182 * PF-ID we'll need to adjust the pixel_rate here. 6183 */ 6184 6185 if (pipe_config->pch_pfit.enabled) { 6186 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 6187 uint32_t pfit_size = pipe_config->pch_pfit.size; 6188 6189 pipe_w = pipe_config->pipe_src_w; 6190 pipe_h = pipe_config->pipe_src_h; 6191 6192 pfit_w = (pfit_size >> 16) & 0xFFFF; 6193 pfit_h = pfit_size & 0xFFFF; 6194 if (pipe_w < pfit_w) 6195 pipe_w = pfit_w; 6196 if (pipe_h < pfit_h) 6197 pipe_h = pfit_h; 6198 6199 if (WARN_ON(!pfit_w || !pfit_h)) 6200 return pixel_rate; 6201 6202 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, 6203 pfit_w * pfit_h); 6204 } 6205 6206 return pixel_rate; 6207} 6208 6209static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 6210{ 6211 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 6212 6213 if (HAS_GMCH_DISPLAY(dev_priv)) 6214 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 6215 crtc_state->pixel_rate = 6216 crtc_state->base.adjusted_mode.crtc_clock; 6217 else 6218 crtc_state->pixel_rate = 6219 ilk_pipe_pixel_rate(crtc_state); 6220} 6221 6222static int intel_crtc_compute_config(struct intel_crtc *crtc, 6223 struct intel_crtc_state *pipe_config) 6224{ 6225 struct drm_device *dev = crtc->base.dev; 6226 struct drm_i915_private *dev_priv = to_i915(dev); 6227 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6228 int clock_limit = dev_priv->max_dotclk_freq; 6229 6230 if (INTEL_GEN(dev_priv) < 4) { 6231 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 6232 6233 /* 6234 * Enable double wide mode when the dot clock 6235 * is > 90% of the (display) core speed. 6236 */ 6237 if (intel_crtc_supports_double_wide(crtc) && 6238 adjusted_mode->crtc_clock > clock_limit) { 6239 clock_limit = dev_priv->max_dotclk_freq; 6240 pipe_config->double_wide = true; 6241 } 6242 } 6243 6244 if (adjusted_mode->crtc_clock > clock_limit) { 6245 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 6246 adjusted_mode->crtc_clock, clock_limit, 6247 yesno(pipe_config->double_wide)); 6248 return -EINVAL; 6249 } 6250 6251 /* 6252 * Pipe horizontal size must be even in: 6253 * - DVO ganged mode 6254 * - LVDS dual channel mode 6255 * - Double wide pipe 6256 */ 6257 if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 6258 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 6259 pipe_config->pipe_src_w &= ~1; 6260 6261 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 6262 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 6263 */ 6264 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 6265 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 6266 return -EINVAL; 6267 6268 intel_crtc_compute_pixel_rate(pipe_config); 6269 6270 if (HAS_IPS(dev_priv)) 6271 hsw_compute_ips_config(crtc, pipe_config); 6272 6273 if (pipe_config->has_pch_encoder) 6274 return ironlake_fdi_compute_config(crtc, pipe_config); 6275 6276 return 0; 6277} 6278 6279static void 6280intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 6281{ 6282 while (*num > DATA_LINK_M_N_MASK || 6283 *den > DATA_LINK_M_N_MASK) { 6284 *num >>= 1; 6285 *den >>= 1; 6286 } 6287} 6288 6289static void compute_m_n(unsigned int m, unsigned int n, 6290 uint32_t *ret_m, uint32_t *ret_n, 6291 bool reduce_m_n) 6292{ 6293 /* 6294 * Reduce M/N as much as possible without loss in precision. Several DP 6295 * dongles in particular seem to be fussy about too large *link* M/N 6296 * values. The passed in values are more likely to have the least 6297 * significant bits zero than M after rounding below, so do this first. 6298 */ 6299 if (reduce_m_n) { 6300 while ((m & 1) == 0 && (n & 1) == 0) { 6301 m >>= 1; 6302 n >>= 1; 6303 } 6304 } 6305 6306 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 6307 *ret_m = div_u64((uint64_t) m * *ret_n, n); 6308 intel_reduce_m_n_ratio(ret_m, ret_n); 6309} 6310 6311void 6312intel_link_compute_m_n(int bits_per_pixel, int nlanes, 6313 int pixel_clock, int link_clock, 6314 struct intel_link_m_n *m_n, 6315 bool reduce_m_n) 6316{ 6317 m_n->tu = 64; 6318 6319 compute_m_n(bits_per_pixel * pixel_clock, 6320 link_clock * nlanes * 8, 6321 &m_n->gmch_m, &m_n->gmch_n, 6322 reduce_m_n); 6323 6324 compute_m_n(pixel_clock, link_clock, 6325 &m_n->link_m, &m_n->link_n, 6326 reduce_m_n); 6327} 6328 6329static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 6330{ 6331 if (i915.panel_use_ssc >= 0) 6332 return i915.panel_use_ssc != 0; 6333 return dev_priv->vbt.lvds_use_ssc 6334 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 6335} 6336 6337static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 6338{ 6339 return (1 << dpll->n) << 16 | dpll->m2; 6340} 6341 6342static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 6343{ 6344 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 6345} 6346 6347static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 6348 struct intel_crtc_state *crtc_state, 6349 struct dpll *reduced_clock) 6350{ 6351 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6352 u32 fp, fp2 = 0; 6353 6354 if (IS_PINEVIEW(dev_priv)) { 6355 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 6356 if (reduced_clock) 6357 fp2 = pnv_dpll_compute_fp(reduced_clock); 6358 } else { 6359 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 6360 if (reduced_clock) 6361 fp2 = i9xx_dpll_compute_fp(reduced_clock); 6362 } 6363 6364 crtc_state->dpll_hw_state.fp0 = fp; 6365 6366 crtc->lowfreq_avail = false; 6367 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 6368 reduced_clock) { 6369 crtc_state->dpll_hw_state.fp1 = fp2; 6370 crtc->lowfreq_avail = true; 6371 } else { 6372 crtc_state->dpll_hw_state.fp1 = fp; 6373 } 6374} 6375 6376static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe 6377 pipe) 6378{ 6379 u32 reg_val; 6380 6381 /* 6382 * PLLB opamp always calibrates to max value of 0x3f, force enable it 6383 * and set it to a reasonable value instead. 6384 */ 6385 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6386 reg_val &= 0xffffff00; 6387 reg_val |= 0x00000030; 6388 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6389 6390 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6391 reg_val &= 0x00ffffff; 6392 reg_val |= 0x8c000000; 6393 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6394 6395 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6396 reg_val &= 0xffffff00; 6397 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6398 6399 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6400 reg_val &= 0x00ffffff; 6401 reg_val |= 0xb0000000; 6402 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6403} 6404 6405static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 6406 struct intel_link_m_n *m_n) 6407{ 6408 struct drm_device *dev = crtc->base.dev; 6409 struct drm_i915_private *dev_priv = to_i915(dev); 6410 int pipe = crtc->pipe; 6411 6412 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 6413 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 6414 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 6415 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 6416} 6417 6418static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 6419 struct intel_link_m_n *m_n, 6420 struct intel_link_m_n *m2_n2) 6421{ 6422 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6423 int pipe = crtc->pipe; 6424 enum transcoder transcoder = crtc->config->cpu_transcoder; 6425 6426 if (INTEL_GEN(dev_priv) >= 5) { 6427 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 6428 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 6429 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 6430 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 6431 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 6432 * for gen < 8) and if DRRS is supported (to make sure the 6433 * registers are not unnecessarily accessed). 6434 */ 6435 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) || 6436 INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) { 6437 I915_WRITE(PIPE_DATA_M2(transcoder), 6438 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 6439 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 6440 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 6441 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 6442 } 6443 } else { 6444 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 6445 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 6446 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 6447 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 6448 } 6449} 6450 6451void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) 6452{ 6453 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 6454 6455 if (m_n == M1_N1) { 6456 dp_m_n = &crtc->config->dp_m_n; 6457 dp_m2_n2 = &crtc->config->dp_m2_n2; 6458 } else if (m_n == M2_N2) { 6459 6460 /* 6461 * M2_N2 registers are not supported. Hence m2_n2 divider value 6462 * needs to be programmed into M1_N1. 6463 */ 6464 dp_m_n = &crtc->config->dp_m2_n2; 6465 } else { 6466 DRM_ERROR("Unsupported divider value\n"); 6467 return; 6468 } 6469 6470 if (crtc->config->has_pch_encoder) 6471 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 6472 else 6473 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); 6474} 6475 6476static void vlv_compute_dpll(struct intel_crtc *crtc, 6477 struct intel_crtc_state *pipe_config) 6478{ 6479 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 6480 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 6481 if (crtc->pipe != PIPE_A) 6482 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 6483 6484 /* DPLL not used with DSI, but still need the rest set up */ 6485 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 6486 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 6487 DPLL_EXT_BUFFER_ENABLE_VLV; 6488 6489 pipe_config->dpll_hw_state.dpll_md = 6490 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6491} 6492 6493static void chv_compute_dpll(struct intel_crtc *crtc, 6494 struct intel_crtc_state *pipe_config) 6495{ 6496 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 6497 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 6498 if (crtc->pipe != PIPE_A) 6499 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 6500 6501 /* DPLL not used with DSI, but still need the rest set up */ 6502 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 6503 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 6504 6505 pipe_config->dpll_hw_state.dpll_md = 6506 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6507} 6508 6509static void vlv_prepare_pll(struct intel_crtc *crtc, 6510 const struct intel_crtc_state *pipe_config) 6511{ 6512 struct drm_device *dev = crtc->base.dev; 6513 struct drm_i915_private *dev_priv = to_i915(dev); 6514 enum pipe pipe = crtc->pipe; 6515 u32 mdiv; 6516 u32 bestn, bestm1, bestm2, bestp1, bestp2; 6517 u32 coreclk, reg_val; 6518 6519 /* Enable Refclk */ 6520 I915_WRITE(DPLL(pipe), 6521 pipe_config->dpll_hw_state.dpll & 6522 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 6523 6524 /* No need to actually set up the DPLL with DSI */ 6525 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 6526 return; 6527 6528 mutex_lock(&dev_priv->sb_lock); 6529 6530 bestn = pipe_config->dpll.n; 6531 bestm1 = pipe_config->dpll.m1; 6532 bestm2 = pipe_config->dpll.m2; 6533 bestp1 = pipe_config->dpll.p1; 6534 bestp2 = pipe_config->dpll.p2; 6535 6536 /* See eDP HDMI DPIO driver vbios notes doc */ 6537 6538 /* PLL B needs special handling */ 6539 if (pipe == PIPE_B) 6540 vlv_pllb_recal_opamp(dev_priv, pipe); 6541 6542 /* Set up Tx target for periodic Rcomp update */ 6543 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 6544 6545 /* Disable target IRef on PLL */ 6546 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 6547 reg_val &= 0x00ffffff; 6548 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 6549 6550 /* Disable fast lock */ 6551 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 6552 6553 /* Set idtafcrecal before PLL is enabled */ 6554 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 6555 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 6556 mdiv |= ((bestn << DPIO_N_SHIFT)); 6557 mdiv |= (1 << DPIO_K_SHIFT); 6558 6559 /* 6560 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 6561 * but we don't support that). 6562 * Note: don't use the DAC post divider as it seems unstable. 6563 */ 6564 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 6565 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 6566 6567 mdiv |= DPIO_ENABLE_CALIBRATION; 6568 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 6569 6570 /* Set HBR and RBR LPF coefficients */ 6571 if (pipe_config->port_clock == 162000 || 6572 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) || 6573 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) 6574 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 6575 0x009f0003); 6576 else 6577 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 6578 0x00d0000f); 6579 6580 if (intel_crtc_has_dp_encoder(pipe_config)) { 6581 /* Use SSC source */ 6582 if (pipe == PIPE_A) 6583 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6584 0x0df40000); 6585 else 6586 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6587 0x0df70000); 6588 } else { /* HDMI or VGA */ 6589 /* Use bend source */ 6590 if (pipe == PIPE_A) 6591 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6592 0x0df70000); 6593 else 6594 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6595 0x0df40000); 6596 } 6597 6598 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 6599 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 6600 if (intel_crtc_has_dp_encoder(crtc->config)) 6601 coreclk |= 0x01000000; 6602 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 6603 6604 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 6605 mutex_unlock(&dev_priv->sb_lock); 6606} 6607 6608static void chv_prepare_pll(struct intel_crtc *crtc, 6609 const struct intel_crtc_state *pipe_config) 6610{ 6611 struct drm_device *dev = crtc->base.dev; 6612 struct drm_i915_private *dev_priv = to_i915(dev); 6613 enum pipe pipe = crtc->pipe; 6614 enum dpio_channel port = vlv_pipe_to_channel(pipe); 6615 u32 loopfilter, tribuf_calcntr; 6616 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 6617 u32 dpio_val; 6618 int vco; 6619 6620 /* Enable Refclk and SSC */ 6621 I915_WRITE(DPLL(pipe), 6622 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 6623 6624 /* No need to actually set up the DPLL with DSI */ 6625 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 6626 return; 6627 6628 bestn = pipe_config->dpll.n; 6629 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 6630 bestm1 = pipe_config->dpll.m1; 6631 bestm2 = pipe_config->dpll.m2 >> 22; 6632 bestp1 = pipe_config->dpll.p1; 6633 bestp2 = pipe_config->dpll.p2; 6634 vco = pipe_config->dpll.vco; 6635 dpio_val = 0; 6636 loopfilter = 0; 6637 6638 mutex_lock(&dev_priv->sb_lock); 6639 6640 /* p1 and p2 divider */ 6641 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 6642 5 << DPIO_CHV_S1_DIV_SHIFT | 6643 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 6644 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 6645 1 << DPIO_CHV_K_DIV_SHIFT); 6646 6647 /* Feedback post-divider - m2 */ 6648 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 6649 6650 /* Feedback refclk divider - n and m1 */ 6651 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 6652 DPIO_CHV_M1_DIV_BY_2 | 6653 1 << DPIO_CHV_N_DIV_SHIFT); 6654 6655 /* M2 fraction division */ 6656 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 6657 6658 /* M2 fraction division enable */ 6659 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 6660 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 6661 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 6662 if (bestm2_frac) 6663 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 6664 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 6665 6666 /* Program digital lock detect threshold */ 6667 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 6668 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 6669 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 6670 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 6671 if (!bestm2_frac) 6672 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 6673 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 6674 6675 /* Loop filter */ 6676 if (vco == 5400000) { 6677 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 6678 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 6679 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 6680 tribuf_calcntr = 0x9; 6681 } else if (vco <= 6200000) { 6682 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 6683 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 6684 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6685 tribuf_calcntr = 0x9; 6686 } else if (vco <= 6480000) { 6687 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 6688 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 6689 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6690 tribuf_calcntr = 0x8; 6691 } else { 6692 /* Not supported. Apply the same limits as in the max case */ 6693 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 6694 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 6695 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6696 tribuf_calcntr = 0; 6697 } 6698 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 6699 6700 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 6701 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 6702 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 6703 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 6704 6705 /* AFC Recal */ 6706 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 6707 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 6708 DPIO_AFC_RECAL); 6709 6710 mutex_unlock(&dev_priv->sb_lock); 6711} 6712 6713/** 6714 * vlv_force_pll_on - forcibly enable just the PLL 6715 * @dev_priv: i915 private structure 6716 * @pipe: pipe PLL to enable 6717 * @dpll: PLL configuration 6718 * 6719 * Enable the PLL for @pipe using the supplied @dpll config. To be used 6720 * in cases where we need the PLL enabled even when @pipe is not going to 6721 * be enabled. 6722 */ 6723int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 6724 const struct dpll *dpll) 6725{ 6726 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 6727 struct intel_crtc_state *pipe_config; 6728 6729 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 6730 if (!pipe_config) 6731 return -ENOMEM; 6732 6733 pipe_config->base.crtc = &crtc->base; 6734 pipe_config->pixel_multiplier = 1; 6735 pipe_config->dpll = *dpll; 6736 6737 if (IS_CHERRYVIEW(dev_priv)) { 6738 chv_compute_dpll(crtc, pipe_config); 6739 chv_prepare_pll(crtc, pipe_config); 6740 chv_enable_pll(crtc, pipe_config); 6741 } else { 6742 vlv_compute_dpll(crtc, pipe_config); 6743 vlv_prepare_pll(crtc, pipe_config); 6744 vlv_enable_pll(crtc, pipe_config); 6745 } 6746 6747 kfree(pipe_config); 6748 6749 return 0; 6750} 6751 6752/** 6753 * vlv_force_pll_off - forcibly disable just the PLL 6754 * @dev_priv: i915 private structure 6755 * @pipe: pipe PLL to disable 6756 * 6757 * Disable the PLL for @pipe. To be used in cases where we need 6758 * the PLL enabled even when @pipe is not going to be enabled. 6759 */ 6760void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 6761{ 6762 if (IS_CHERRYVIEW(dev_priv)) 6763 chv_disable_pll(dev_priv, pipe); 6764 else 6765 vlv_disable_pll(dev_priv, pipe); 6766} 6767 6768static void i9xx_compute_dpll(struct intel_crtc *crtc, 6769 struct intel_crtc_state *crtc_state, 6770 struct dpll *reduced_clock) 6771{ 6772 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6773 u32 dpll; 6774 struct dpll *clock = &crtc_state->dpll; 6775 6776 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 6777 6778 dpll = DPLL_VGA_MODE_DIS; 6779 6780 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 6781 dpll |= DPLLB_MODE_LVDS; 6782 else 6783 dpll |= DPLLB_MODE_DAC_SERIAL; 6784 6785 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 6786 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 6787 dpll |= (crtc_state->pixel_multiplier - 1) 6788 << SDVO_MULTIPLIER_SHIFT_HIRES; 6789 } 6790 6791 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 6792 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 6793 dpll |= DPLL_SDVO_HIGH_SPEED; 6794 6795 if (intel_crtc_has_dp_encoder(crtc_state)) 6796 dpll |= DPLL_SDVO_HIGH_SPEED; 6797 6798 /* compute bitmask from p1 value */ 6799 if (IS_PINEVIEW(dev_priv)) 6800 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 6801 else { 6802 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6803 if (IS_G4X(dev_priv) && reduced_clock) 6804 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 6805 } 6806 switch (clock->p2) { 6807 case 5: 6808 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 6809 break; 6810 case 7: 6811 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 6812 break; 6813 case 10: 6814 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 6815 break; 6816 case 14: 6817 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 6818 break; 6819 } 6820 if (INTEL_GEN(dev_priv) >= 4) 6821 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 6822 6823 if (crtc_state->sdvo_tv_clock) 6824 dpll |= PLL_REF_INPUT_TVCLKINBC; 6825 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 6826 intel_panel_use_ssc(dev_priv)) 6827 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6828 else 6829 dpll |= PLL_REF_INPUT_DREFCLK; 6830 6831 dpll |= DPLL_VCO_ENABLE; 6832 crtc_state->dpll_hw_state.dpll = dpll; 6833 6834 if (INTEL_GEN(dev_priv) >= 4) { 6835 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 6836 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6837 crtc_state->dpll_hw_state.dpll_md = dpll_md; 6838 } 6839} 6840 6841static void i8xx_compute_dpll(struct intel_crtc *crtc, 6842 struct intel_crtc_state *crtc_state, 6843 struct dpll *reduced_clock) 6844{ 6845 struct drm_device *dev = crtc->base.dev; 6846 struct drm_i915_private *dev_priv = to_i915(dev); 6847 u32 dpll; 6848 struct dpll *clock = &crtc_state->dpll; 6849 6850 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 6851 6852 dpll = DPLL_VGA_MODE_DIS; 6853 6854 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 6855 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6856 } else { 6857 if (clock->p1 == 2) 6858 dpll |= PLL_P1_DIVIDE_BY_TWO; 6859 else 6860 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6861 if (clock->p2 == 4) 6862 dpll |= PLL_P2_DIVIDE_BY_4; 6863 } 6864 6865 if (!IS_I830(dev_priv) && 6866 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 6867 dpll |= DPLL_DVO_2X_MODE; 6868 6869 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 6870 intel_panel_use_ssc(dev_priv)) 6871 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6872 else 6873 dpll |= PLL_REF_INPUT_DREFCLK; 6874 6875 dpll |= DPLL_VCO_ENABLE; 6876 crtc_state->dpll_hw_state.dpll = dpll; 6877} 6878 6879static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 6880{ 6881 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 6882 enum pipe pipe = intel_crtc->pipe; 6883 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 6884 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 6885 uint32_t crtc_vtotal, crtc_vblank_end; 6886 int vsyncshift = 0; 6887 6888 /* We need to be careful not to changed the adjusted mode, for otherwise 6889 * the hw state checker will get angry at the mismatch. */ 6890 crtc_vtotal = adjusted_mode->crtc_vtotal; 6891 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 6892 6893 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 6894 /* the chip adds 2 halflines automatically */ 6895 crtc_vtotal -= 1; 6896 crtc_vblank_end -= 1; 6897 6898 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 6899 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 6900 else 6901 vsyncshift = adjusted_mode->crtc_hsync_start - 6902 adjusted_mode->crtc_htotal / 2; 6903 if (vsyncshift < 0) 6904 vsyncshift += adjusted_mode->crtc_htotal; 6905 } 6906 6907 if (INTEL_GEN(dev_priv) > 3) 6908 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 6909 6910 I915_WRITE(HTOTAL(cpu_transcoder), 6911 (adjusted_mode->crtc_hdisplay - 1) | 6912 ((adjusted_mode->crtc_htotal - 1) << 16)); 6913 I915_WRITE(HBLANK(cpu_transcoder), 6914 (adjusted_mode->crtc_hblank_start - 1) | 6915 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 6916 I915_WRITE(HSYNC(cpu_transcoder), 6917 (adjusted_mode->crtc_hsync_start - 1) | 6918 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 6919 6920 I915_WRITE(VTOTAL(cpu_transcoder), 6921 (adjusted_mode->crtc_vdisplay - 1) | 6922 ((crtc_vtotal - 1) << 16)); 6923 I915_WRITE(VBLANK(cpu_transcoder), 6924 (adjusted_mode->crtc_vblank_start - 1) | 6925 ((crtc_vblank_end - 1) << 16)); 6926 I915_WRITE(VSYNC(cpu_transcoder), 6927 (adjusted_mode->crtc_vsync_start - 1) | 6928 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 6929 6930 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 6931 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 6932 * documented on the DDI_FUNC_CTL register description, EDP Input Select 6933 * bits. */ 6934 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 6935 (pipe == PIPE_B || pipe == PIPE_C)) 6936 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 6937 6938} 6939 6940static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) 6941{ 6942 struct drm_device *dev = intel_crtc->base.dev; 6943 struct drm_i915_private *dev_priv = to_i915(dev); 6944 enum pipe pipe = intel_crtc->pipe; 6945 6946 /* pipesrc controls the size that is scaled from, which should 6947 * always be the user's requested size. 6948 */ 6949 I915_WRITE(PIPESRC(pipe), 6950 ((intel_crtc->config->pipe_src_w - 1) << 16) | 6951 (intel_crtc->config->pipe_src_h - 1)); 6952} 6953 6954static void intel_get_pipe_timings(struct intel_crtc *crtc, 6955 struct intel_crtc_state *pipe_config) 6956{ 6957 struct drm_device *dev = crtc->base.dev; 6958 struct drm_i915_private *dev_priv = to_i915(dev); 6959 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 6960 uint32_t tmp; 6961 6962 tmp = I915_READ(HTOTAL(cpu_transcoder)); 6963 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 6964 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 6965 tmp = I915_READ(HBLANK(cpu_transcoder)); 6966 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 6967 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 6968 tmp = I915_READ(HSYNC(cpu_transcoder)); 6969 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 6970 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 6971 6972 tmp = I915_READ(VTOTAL(cpu_transcoder)); 6973 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 6974 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 6975 tmp = I915_READ(VBLANK(cpu_transcoder)); 6976 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 6977 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 6978 tmp = I915_READ(VSYNC(cpu_transcoder)); 6979 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 6980 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 6981 6982 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 6983 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 6984 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 6985 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 6986 } 6987} 6988 6989static void intel_get_pipe_src_size(struct intel_crtc *crtc, 6990 struct intel_crtc_state *pipe_config) 6991{ 6992 struct drm_device *dev = crtc->base.dev; 6993 struct drm_i915_private *dev_priv = to_i915(dev); 6994 u32 tmp; 6995 6996 tmp = I915_READ(PIPESRC(crtc->pipe)); 6997 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 6998 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 6999 7000 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 7001 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 7002} 7003 7004void intel_mode_from_pipe_config(struct drm_display_mode *mode, 7005 struct intel_crtc_state *pipe_config) 7006{ 7007 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 7008 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 7009 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 7010 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 7011 7012 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 7013 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 7014 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 7015 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 7016 7017 mode->flags = pipe_config->base.adjusted_mode.flags; 7018 mode->type = DRM_MODE_TYPE_DRIVER; 7019 7020 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 7021 7022 mode->hsync = drm_mode_hsync(mode); 7023 mode->vrefresh = drm_mode_vrefresh(mode); 7024 drm_mode_set_name(mode); 7025} 7026 7027static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7028{ 7029 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 7030 uint32_t pipeconf; 7031 7032 pipeconf = 0; 7033 7034 /* we keep both pipes enabled on 830 */ 7035 if (IS_I830(dev_priv)) 7036 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 7037 7038 if (intel_crtc->config->double_wide) 7039 pipeconf |= PIPECONF_DOUBLE_WIDE; 7040 7041 /* only g4x and later have fancy bpc/dither controls */ 7042 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 7043 IS_CHERRYVIEW(dev_priv)) { 7044 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 7045 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 7046 pipeconf |= PIPECONF_DITHER_EN | 7047 PIPECONF_DITHER_TYPE_SP; 7048 7049 switch (intel_crtc->config->pipe_bpp) { 7050 case 18: 7051 pipeconf |= PIPECONF_6BPC; 7052 break; 7053 case 24: 7054 pipeconf |= PIPECONF_8BPC; 7055 break; 7056 case 30: 7057 pipeconf |= PIPECONF_10BPC; 7058 break; 7059 default: 7060 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7061 BUG(); 7062 } 7063 } 7064 7065 if (HAS_PIPE_CXSR(dev_priv)) { 7066 if (intel_crtc->lowfreq_avail) { 7067 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 7068 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 7069 } else { 7070 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 7071 } 7072 } 7073 7074 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7075 if (INTEL_GEN(dev_priv) < 4 || 7076 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 7077 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7078 else 7079 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7080 } else 7081 pipeconf |= PIPECONF_PROGRESSIVE; 7082 7083 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 7084 intel_crtc->config->limited_color_range) 7085 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 7086 7087 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 7088 POSTING_READ(PIPECONF(intel_crtc->pipe)); 7089} 7090 7091static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 7092 struct intel_crtc_state *crtc_state) 7093{ 7094 struct drm_device *dev = crtc->base.dev; 7095 struct drm_i915_private *dev_priv = to_i915(dev); 7096 const struct intel_limit *limit; 7097 int refclk = 48000; 7098 7099 memset(&crtc_state->dpll_hw_state, 0, 7100 sizeof(crtc_state->dpll_hw_state)); 7101 7102 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7103 if (intel_panel_use_ssc(dev_priv)) { 7104 refclk = dev_priv->vbt.lvds_ssc_freq; 7105 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7106 } 7107 7108 limit = &intel_limits_i8xx_lvds; 7109 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 7110 limit = &intel_limits_i8xx_dvo; 7111 } else { 7112 limit = &intel_limits_i8xx_dac; 7113 } 7114 7115 if (!crtc_state->clock_set && 7116 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7117 refclk, NULL, &crtc_state->dpll)) { 7118 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7119 return -EINVAL; 7120 } 7121 7122 i8xx_compute_dpll(crtc, crtc_state, NULL); 7123 7124 return 0; 7125} 7126 7127static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 7128 struct intel_crtc_state *crtc_state) 7129{ 7130 struct drm_device *dev = crtc->base.dev; 7131 struct drm_i915_private *dev_priv = to_i915(dev); 7132 const struct intel_limit *limit; 7133 int refclk = 96000; 7134 7135 memset(&crtc_state->dpll_hw_state, 0, 7136 sizeof(crtc_state->dpll_hw_state)); 7137 7138 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7139 if (intel_panel_use_ssc(dev_priv)) { 7140 refclk = dev_priv->vbt.lvds_ssc_freq; 7141 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7142 } 7143 7144 if (intel_is_dual_link_lvds(dev)) 7145 limit = &intel_limits_g4x_dual_channel_lvds; 7146 else 7147 limit = &intel_limits_g4x_single_channel_lvds; 7148 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 7149 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 7150 limit = &intel_limits_g4x_hdmi; 7151 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 7152 limit = &intel_limits_g4x_sdvo; 7153 } else { 7154 /* The option is for other outputs */ 7155 limit = &intel_limits_i9xx_sdvo; 7156 } 7157 7158 if (!crtc_state->clock_set && 7159 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7160 refclk, NULL, &crtc_state->dpll)) { 7161 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7162 return -EINVAL; 7163 } 7164 7165 i9xx_compute_dpll(crtc, crtc_state, NULL); 7166 7167 return 0; 7168} 7169 7170static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 7171 struct intel_crtc_state *crtc_state) 7172{ 7173 struct drm_device *dev = crtc->base.dev; 7174 struct drm_i915_private *dev_priv = to_i915(dev); 7175 const struct intel_limit *limit; 7176 int refclk = 96000; 7177 7178 memset(&crtc_state->dpll_hw_state, 0, 7179 sizeof(crtc_state->dpll_hw_state)); 7180 7181 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7182 if (intel_panel_use_ssc(dev_priv)) { 7183 refclk = dev_priv->vbt.lvds_ssc_freq; 7184 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7185 } 7186 7187 limit = &intel_limits_pineview_lvds; 7188 } else { 7189 limit = &intel_limits_pineview_sdvo; 7190 } 7191 7192 if (!crtc_state->clock_set && 7193 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7194 refclk, NULL, &crtc_state->dpll)) { 7195 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7196 return -EINVAL; 7197 } 7198 7199 i9xx_compute_dpll(crtc, crtc_state, NULL); 7200 7201 return 0; 7202} 7203 7204static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 7205 struct intel_crtc_state *crtc_state) 7206{ 7207 struct drm_device *dev = crtc->base.dev; 7208 struct drm_i915_private *dev_priv = to_i915(dev); 7209 const struct intel_limit *limit; 7210 int refclk = 96000; 7211 7212 memset(&crtc_state->dpll_hw_state, 0, 7213 sizeof(crtc_state->dpll_hw_state)); 7214 7215 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7216 if (intel_panel_use_ssc(dev_priv)) { 7217 refclk = dev_priv->vbt.lvds_ssc_freq; 7218 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7219 } 7220 7221 limit = &intel_limits_i9xx_lvds; 7222 } else { 7223 limit = &intel_limits_i9xx_sdvo; 7224 } 7225 7226 if (!crtc_state->clock_set && 7227 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7228 refclk, NULL, &crtc_state->dpll)) { 7229 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7230 return -EINVAL; 7231 } 7232 7233 i9xx_compute_dpll(crtc, crtc_state, NULL); 7234 7235 return 0; 7236} 7237 7238static int chv_crtc_compute_clock(struct intel_crtc *crtc, 7239 struct intel_crtc_state *crtc_state) 7240{ 7241 int refclk = 100000; 7242 const struct intel_limit *limit = &intel_limits_chv; 7243 7244 memset(&crtc_state->dpll_hw_state, 0, 7245 sizeof(crtc_state->dpll_hw_state)); 7246 7247 if (!crtc_state->clock_set && 7248 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7249 refclk, NULL, &crtc_state->dpll)) { 7250 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7251 return -EINVAL; 7252 } 7253 7254 chv_compute_dpll(crtc, crtc_state); 7255 7256 return 0; 7257} 7258 7259static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 7260 struct intel_crtc_state *crtc_state) 7261{ 7262 int refclk = 100000; 7263 const struct intel_limit *limit = &intel_limits_vlv; 7264 7265 memset(&crtc_state->dpll_hw_state, 0, 7266 sizeof(crtc_state->dpll_hw_state)); 7267 7268 if (!crtc_state->clock_set && 7269 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7270 refclk, NULL, &crtc_state->dpll)) { 7271 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7272 return -EINVAL; 7273 } 7274 7275 vlv_compute_dpll(crtc, crtc_state); 7276 7277 return 0; 7278} 7279 7280static void i9xx_get_pfit_config(struct intel_crtc *crtc, 7281 struct intel_crtc_state *pipe_config) 7282{ 7283 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7284 uint32_t tmp; 7285 7286 if (INTEL_GEN(dev_priv) <= 3 && 7287 (IS_I830(dev_priv) || !IS_MOBILE(dev_priv))) 7288 return; 7289 7290 tmp = I915_READ(PFIT_CONTROL); 7291 if (!(tmp & PFIT_ENABLE)) 7292 return; 7293 7294 /* Check whether the pfit is attached to our pipe. */ 7295 if (INTEL_GEN(dev_priv) < 4) { 7296 if (crtc->pipe != PIPE_B) 7297 return; 7298 } else { 7299 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 7300 return; 7301 } 7302 7303 pipe_config->gmch_pfit.control = tmp; 7304 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 7305} 7306 7307static void vlv_crtc_clock_get(struct intel_crtc *crtc, 7308 struct intel_crtc_state *pipe_config) 7309{ 7310 struct drm_device *dev = crtc->base.dev; 7311 struct drm_i915_private *dev_priv = to_i915(dev); 7312 int pipe = pipe_config->cpu_transcoder; 7313 struct dpll clock; 7314 u32 mdiv; 7315 int refclk = 100000; 7316 7317 /* In case of DSI, DPLL will not be used */ 7318 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7319 return; 7320 7321 mutex_lock(&dev_priv->sb_lock); 7322 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 7323 mutex_unlock(&dev_priv->sb_lock); 7324 7325 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 7326 clock.m2 = mdiv & DPIO_M2DIV_MASK; 7327 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 7328 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 7329 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 7330 7331 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 7332} 7333 7334static void 7335i9xx_get_initial_plane_config(struct intel_crtc *crtc, 7336 struct intel_initial_plane_config *plane_config) 7337{ 7338 struct drm_device *dev = crtc->base.dev; 7339 struct drm_i915_private *dev_priv = to_i915(dev); 7340 u32 val, base, offset; 7341 int pipe = crtc->pipe, plane = crtc->plane; 7342 int fourcc, pixel_format; 7343 unsigned int aligned_height; 7344 struct drm_framebuffer *fb; 7345 struct intel_framebuffer *intel_fb; 7346 7347 val = I915_READ(DSPCNTR(plane)); 7348 if (!(val & DISPLAY_PLANE_ENABLE)) 7349 return; 7350 7351 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7352 if (!intel_fb) { 7353 DRM_DEBUG_KMS("failed to alloc fb\n"); 7354 return; 7355 } 7356 7357 fb = &intel_fb->base; 7358 7359 fb->dev = dev; 7360 7361 if (INTEL_GEN(dev_priv) >= 4) { 7362 if (val & DISPPLANE_TILED) { 7363 plane_config->tiling = I915_TILING_X; 7364 fb->modifier = I915_FORMAT_MOD_X_TILED; 7365 } 7366 } 7367 7368 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7369 fourcc = i9xx_format_to_fourcc(pixel_format); 7370 fb->format = drm_format_info(fourcc); 7371 7372 if (INTEL_GEN(dev_priv) >= 4) { 7373 if (plane_config->tiling) 7374 offset = I915_READ(DSPTILEOFF(plane)); 7375 else 7376 offset = I915_READ(DSPLINOFF(plane)); 7377 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 7378 } else { 7379 base = I915_READ(DSPADDR(plane)); 7380 } 7381 plane_config->base = base; 7382 7383 val = I915_READ(PIPESRC(pipe)); 7384 fb->width = ((val >> 16) & 0xfff) + 1; 7385 fb->height = ((val >> 0) & 0xfff) + 1; 7386 7387 val = I915_READ(DSPSTRIDE(pipe)); 7388 fb->pitches[0] = val & 0xffffffc0; 7389 7390 aligned_height = intel_fb_align_height(fb, 0, fb->height); 7391 7392 plane_config->size = fb->pitches[0] * aligned_height; 7393 7394 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7395 pipe_name(pipe), plane, fb->width, fb->height, 7396 fb->format->cpp[0] * 8, base, fb->pitches[0], 7397 plane_config->size); 7398 7399 plane_config->fb = intel_fb; 7400} 7401 7402static void chv_crtc_clock_get(struct intel_crtc *crtc, 7403 struct intel_crtc_state *pipe_config) 7404{ 7405 struct drm_device *dev = crtc->base.dev; 7406 struct drm_i915_private *dev_priv = to_i915(dev); 7407 int pipe = pipe_config->cpu_transcoder; 7408 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7409 struct dpll clock; 7410 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 7411 int refclk = 100000; 7412 7413 /* In case of DSI, DPLL will not be used */ 7414 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7415 return; 7416 7417 mutex_lock(&dev_priv->sb_lock); 7418 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 7419 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 7420 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 7421 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 7422 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7423 mutex_unlock(&dev_priv->sb_lock); 7424 7425 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 7426 clock.m2 = (pll_dw0 & 0xff) << 22; 7427 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 7428 clock.m2 |= pll_dw2 & 0x3fffff; 7429 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 7430 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 7431 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 7432 7433 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 7434} 7435 7436static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 7437 struct intel_crtc_state *pipe_config) 7438{ 7439 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7440 enum intel_display_power_domain power_domain; 7441 uint32_t tmp; 7442 bool ret; 7443 7444 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 7445 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 7446 return false; 7447 7448 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7449 pipe_config->shared_dpll = NULL; 7450 7451 ret = false; 7452 7453 tmp = I915_READ(PIPECONF(crtc->pipe)); 7454 if (!(tmp & PIPECONF_ENABLE)) 7455 goto out; 7456 7457 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 7458 IS_CHERRYVIEW(dev_priv)) { 7459 switch (tmp & PIPECONF_BPC_MASK) { 7460 case PIPECONF_6BPC: 7461 pipe_config->pipe_bpp = 18; 7462 break; 7463 case PIPECONF_8BPC: 7464 pipe_config->pipe_bpp = 24; 7465 break; 7466 case PIPECONF_10BPC: 7467 pipe_config->pipe_bpp = 30; 7468 break; 7469 default: 7470 break; 7471 } 7472 } 7473 7474 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 7475 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 7476 pipe_config->limited_color_range = true; 7477 7478 if (INTEL_GEN(dev_priv) < 4) 7479 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 7480 7481 intel_get_pipe_timings(crtc, pipe_config); 7482 intel_get_pipe_src_size(crtc, pipe_config); 7483 7484 i9xx_get_pfit_config(crtc, pipe_config); 7485 7486 if (INTEL_GEN(dev_priv) >= 4) { 7487 /* No way to read it out on pipes B and C */ 7488 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 7489 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 7490 else 7491 tmp = I915_READ(DPLL_MD(crtc->pipe)); 7492 pipe_config->pixel_multiplier = 7493 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 7494 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 7495 pipe_config->dpll_hw_state.dpll_md = tmp; 7496 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 7497 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 7498 tmp = I915_READ(DPLL(crtc->pipe)); 7499 pipe_config->pixel_multiplier = 7500 ((tmp & SDVO_MULTIPLIER_MASK) 7501 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 7502 } else { 7503 /* Note that on i915G/GM the pixel multiplier is in the sdvo 7504 * port and will be fixed up in the encoder->get_config 7505 * function. */ 7506 pipe_config->pixel_multiplier = 1; 7507 } 7508 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 7509 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 7510 /* 7511 * DPLL_DVO_2X_MODE must be enabled for both DPLLs 7512 * on 830. Filter it out here so that we don't 7513 * report errors due to that. 7514 */ 7515 if (IS_I830(dev_priv)) 7516 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; 7517 7518 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 7519 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 7520 } else { 7521 /* Mask out read-only status bits. */ 7522 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 7523 DPLL_PORTC_READY_MASK | 7524 DPLL_PORTB_READY_MASK); 7525 } 7526 7527 if (IS_CHERRYVIEW(dev_priv)) 7528 chv_crtc_clock_get(crtc, pipe_config); 7529 else if (IS_VALLEYVIEW(dev_priv)) 7530 vlv_crtc_clock_get(crtc, pipe_config); 7531 else 7532 i9xx_crtc_clock_get(crtc, pipe_config); 7533 7534 /* 7535 * Normally the dotclock is filled in by the encoder .get_config() 7536 * but in case the pipe is enabled w/o any ports we need a sane 7537 * default. 7538 */ 7539 pipe_config->base.adjusted_mode.crtc_clock = 7540 pipe_config->port_clock / pipe_config->pixel_multiplier; 7541 7542 ret = true; 7543 7544out: 7545 intel_display_power_put(dev_priv, power_domain); 7546 7547 return ret; 7548} 7549 7550static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) 7551{ 7552 struct intel_encoder *encoder; 7553 int i; 7554 u32 val, final; 7555 bool has_lvds = false; 7556 bool has_cpu_edp = false; 7557 bool has_panel = false; 7558 bool has_ck505 = false; 7559 bool can_ssc = false; 7560 bool using_ssc_source = false; 7561 7562 /* We need to take the global config into account */ 7563 for_each_intel_encoder(&dev_priv->drm, encoder) { 7564 switch (encoder->type) { 7565 case INTEL_OUTPUT_LVDS: 7566 has_panel = true; 7567 has_lvds = true; 7568 break; 7569 case INTEL_OUTPUT_EDP: 7570 has_panel = true; 7571 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 7572 has_cpu_edp = true; 7573 break; 7574 default: 7575 break; 7576 } 7577 } 7578 7579 if (HAS_PCH_IBX(dev_priv)) { 7580 has_ck505 = dev_priv->vbt.display_clock_mode; 7581 can_ssc = has_ck505; 7582 } else { 7583 has_ck505 = false; 7584 can_ssc = true; 7585 } 7586 7587 /* Check if any DPLLs are using the SSC source */ 7588 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 7589 u32 temp = I915_READ(PCH_DPLL(i)); 7590 7591 if (!(temp & DPLL_VCO_ENABLE)) 7592 continue; 7593 7594 if ((temp & PLL_REF_INPUT_MASK) == 7595 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 7596 using_ssc_source = true; 7597 break; 7598 } 7599 } 7600 7601 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 7602 has_panel, has_lvds, has_ck505, using_ssc_source); 7603 7604 /* Ironlake: try to setup display ref clock before DPLL 7605 * enabling. This is only under driver's control after 7606 * PCH B stepping, previous chipset stepping should be 7607 * ignoring this setting. 7608 */ 7609 val = I915_READ(PCH_DREF_CONTROL); 7610 7611 /* As we must carefully and slowly disable/enable each source in turn, 7612 * compute the final state we want first and check if we need to 7613 * make any changes at all. 7614 */ 7615 final = val; 7616 final &= ~DREF_NONSPREAD_SOURCE_MASK; 7617 if (has_ck505) 7618 final |= DREF_NONSPREAD_CK505_ENABLE; 7619 else 7620 final |= DREF_NONSPREAD_SOURCE_ENABLE; 7621 7622 final &= ~DREF_SSC_SOURCE_MASK; 7623 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7624 final &= ~DREF_SSC1_ENABLE; 7625 7626 if (has_panel) { 7627 final |= DREF_SSC_SOURCE_ENABLE; 7628 7629 if (intel_panel_use_ssc(dev_priv) && can_ssc) 7630 final |= DREF_SSC1_ENABLE; 7631 7632 if (has_cpu_edp) { 7633 if (intel_panel_use_ssc(dev_priv) && can_ssc) 7634 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 7635 else 7636 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 7637 } else 7638 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7639 } else if (using_ssc_source) { 7640 final |= DREF_SSC_SOURCE_ENABLE; 7641 final |= DREF_SSC1_ENABLE; 7642 } 7643 7644 if (final == val) 7645 return; 7646 7647 /* Always enable nonspread source */ 7648 val &= ~DREF_NONSPREAD_SOURCE_MASK; 7649 7650 if (has_ck505) 7651 val |= DREF_NONSPREAD_CK505_ENABLE; 7652 else 7653 val |= DREF_NONSPREAD_SOURCE_ENABLE; 7654 7655 if (has_panel) { 7656 val &= ~DREF_SSC_SOURCE_MASK; 7657 val |= DREF_SSC_SOURCE_ENABLE; 7658 7659 /* SSC must be turned on before enabling the CPU output */ 7660 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 7661 DRM_DEBUG_KMS("Using SSC on panel\n"); 7662 val |= DREF_SSC1_ENABLE; 7663 } else 7664 val &= ~DREF_SSC1_ENABLE; 7665 7666 /* Get SSC going before enabling the outputs */ 7667 I915_WRITE(PCH_DREF_CONTROL, val); 7668 POSTING_READ(PCH_DREF_CONTROL); 7669 udelay(200); 7670 7671 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7672 7673 /* Enable CPU source on CPU attached eDP */ 7674 if (has_cpu_edp) { 7675 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 7676 DRM_DEBUG_KMS("Using SSC on eDP\n"); 7677 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 7678 } else 7679 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 7680 } else 7681 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7682 7683 I915_WRITE(PCH_DREF_CONTROL, val); 7684 POSTING_READ(PCH_DREF_CONTROL); 7685 udelay(200); 7686 } else { 7687 DRM_DEBUG_KMS("Disabling CPU source output\n"); 7688 7689 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7690 7691 /* Turn off CPU output */ 7692 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7693 7694 I915_WRITE(PCH_DREF_CONTROL, val); 7695 POSTING_READ(PCH_DREF_CONTROL); 7696 udelay(200); 7697 7698 if (!using_ssc_source) { 7699 DRM_DEBUG_KMS("Disabling SSC source\n"); 7700 7701 /* Turn off the SSC source */ 7702 val &= ~DREF_SSC_SOURCE_MASK; 7703 val |= DREF_SSC_SOURCE_DISABLE; 7704 7705 /* Turn off SSC1 */ 7706 val &= ~DREF_SSC1_ENABLE; 7707 7708 I915_WRITE(PCH_DREF_CONTROL, val); 7709 POSTING_READ(PCH_DREF_CONTROL); 7710 udelay(200); 7711 } 7712 } 7713 7714 BUG_ON(val != final); 7715} 7716 7717static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 7718{ 7719 uint32_t tmp; 7720 7721 tmp = I915_READ(SOUTH_CHICKEN2); 7722 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 7723 I915_WRITE(SOUTH_CHICKEN2, tmp); 7724 7725 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & 7726 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 7727 DRM_ERROR("FDI mPHY reset assert timeout\n"); 7728 7729 tmp = I915_READ(SOUTH_CHICKEN2); 7730 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 7731 I915_WRITE(SOUTH_CHICKEN2, tmp); 7732 7733 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & 7734 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 7735 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 7736} 7737 7738/* WaMPhyProgramming:hsw */ 7739static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 7740{ 7741 uint32_t tmp; 7742 7743 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 7744 tmp &= ~(0xFF << 24); 7745 tmp |= (0x12 << 24); 7746 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 7747 7748 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 7749 tmp |= (1 << 11); 7750 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 7751 7752 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 7753 tmp |= (1 << 11); 7754 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 7755 7756 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 7757 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 7758 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 7759 7760 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 7761 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 7762 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 7763 7764 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 7765 tmp &= ~(7 << 13); 7766 tmp |= (5 << 13); 7767 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 7768 7769 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 7770 tmp &= ~(7 << 13); 7771 tmp |= (5 << 13); 7772 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 7773 7774 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 7775 tmp &= ~0xFF; 7776 tmp |= 0x1C; 7777 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 7778 7779 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 7780 tmp &= ~0xFF; 7781 tmp |= 0x1C; 7782 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 7783 7784 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 7785 tmp &= ~(0xFF << 16); 7786 tmp |= (0x1C << 16); 7787 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 7788 7789 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 7790 tmp &= ~(0xFF << 16); 7791 tmp |= (0x1C << 16); 7792 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 7793 7794 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 7795 tmp |= (1 << 27); 7796 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 7797 7798 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 7799 tmp |= (1 << 27); 7800 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 7801 7802 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 7803 tmp &= ~(0xF << 28); 7804 tmp |= (4 << 28); 7805 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 7806 7807 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 7808 tmp &= ~(0xF << 28); 7809 tmp |= (4 << 28); 7810 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 7811} 7812 7813/* Implements 3 different sequences from BSpec chapter "Display iCLK 7814 * Programming" based on the parameters passed: 7815 * - Sequence to enable CLKOUT_DP 7816 * - Sequence to enable CLKOUT_DP without spread 7817 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 7818 */ 7819static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 7820 bool with_spread, bool with_fdi) 7821{ 7822 uint32_t reg, tmp; 7823 7824 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 7825 with_spread = true; 7826 if (WARN(HAS_PCH_LPT_LP(dev_priv) && 7827 with_fdi, "LP PCH doesn't have FDI\n")) 7828 with_fdi = false; 7829 7830 mutex_lock(&dev_priv->sb_lock); 7831 7832 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7833 tmp &= ~SBI_SSCCTL_DISABLE; 7834 tmp |= SBI_SSCCTL_PATHALT; 7835 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7836 7837 udelay(24); 7838 7839 if (with_spread) { 7840 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7841 tmp &= ~SBI_SSCCTL_PATHALT; 7842 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7843 7844 if (with_fdi) { 7845 lpt_reset_fdi_mphy(dev_priv); 7846 lpt_program_fdi_mphy(dev_priv); 7847 } 7848 } 7849 7850 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 7851 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7852 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7853 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7854 7855 mutex_unlock(&dev_priv->sb_lock); 7856} 7857 7858/* Sequence to disable CLKOUT_DP */ 7859static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 7860{ 7861 uint32_t reg, tmp; 7862 7863 mutex_lock(&dev_priv->sb_lock); 7864 7865 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 7866 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7867 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7868 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7869 7870 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7871 if (!(tmp & SBI_SSCCTL_DISABLE)) { 7872 if (!(tmp & SBI_SSCCTL_PATHALT)) { 7873 tmp |= SBI_SSCCTL_PATHALT; 7874 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7875 udelay(32); 7876 } 7877 tmp |= SBI_SSCCTL_DISABLE; 7878 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7879 } 7880 7881 mutex_unlock(&dev_priv->sb_lock); 7882} 7883 7884#define BEND_IDX(steps) ((50 + (steps)) / 5) 7885 7886static const uint16_t sscdivintphase[] = { 7887 [BEND_IDX( 50)] = 0x3B23, 7888 [BEND_IDX( 45)] = 0x3B23, 7889 [BEND_IDX( 40)] = 0x3C23, 7890 [BEND_IDX( 35)] = 0x3C23, 7891 [BEND_IDX( 30)] = 0x3D23, 7892 [BEND_IDX( 25)] = 0x3D23, 7893 [BEND_IDX( 20)] = 0x3E23, 7894 [BEND_IDX( 15)] = 0x3E23, 7895 [BEND_IDX( 10)] = 0x3F23, 7896 [BEND_IDX( 5)] = 0x3F23, 7897 [BEND_IDX( 0)] = 0x0025, 7898 [BEND_IDX( -5)] = 0x0025, 7899 [BEND_IDX(-10)] = 0x0125, 7900 [BEND_IDX(-15)] = 0x0125, 7901 [BEND_IDX(-20)] = 0x0225, 7902 [BEND_IDX(-25)] = 0x0225, 7903 [BEND_IDX(-30)] = 0x0325, 7904 [BEND_IDX(-35)] = 0x0325, 7905 [BEND_IDX(-40)] = 0x0425, 7906 [BEND_IDX(-45)] = 0x0425, 7907 [BEND_IDX(-50)] = 0x0525, 7908}; 7909 7910/* 7911 * Bend CLKOUT_DP 7912 * steps -50 to 50 inclusive, in steps of 5 7913 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 7914 * change in clock period = -(steps / 10) * 5.787 ps 7915 */ 7916static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 7917{ 7918 uint32_t tmp; 7919 int idx = BEND_IDX(steps); 7920 7921 if (WARN_ON(steps % 5 != 0)) 7922 return; 7923 7924 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 7925 return; 7926 7927 mutex_lock(&dev_priv->sb_lock); 7928 7929 if (steps % 10 != 0) 7930 tmp = 0xAAAAAAAB; 7931 else 7932 tmp = 0x00000000; 7933 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 7934 7935 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 7936 tmp &= 0xffff0000; 7937 tmp |= sscdivintphase[idx]; 7938 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 7939 7940 mutex_unlock(&dev_priv->sb_lock); 7941} 7942 7943#undef BEND_IDX 7944 7945static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 7946{ 7947 struct intel_encoder *encoder; 7948 bool has_vga = false; 7949 7950 for_each_intel_encoder(&dev_priv->drm, encoder) { 7951 switch (encoder->type) { 7952 case INTEL_OUTPUT_ANALOG: 7953 has_vga = true; 7954 break; 7955 default: 7956 break; 7957 } 7958 } 7959 7960 if (has_vga) { 7961 lpt_bend_clkout_dp(dev_priv, 0); 7962 lpt_enable_clkout_dp(dev_priv, true, true); 7963 } else { 7964 lpt_disable_clkout_dp(dev_priv); 7965 } 7966} 7967 7968/* 7969 * Initialize reference clocks when the driver loads 7970 */ 7971void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 7972{ 7973 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 7974 ironlake_init_pch_refclk(dev_priv); 7975 else if (HAS_PCH_LPT(dev_priv)) 7976 lpt_init_pch_refclk(dev_priv); 7977} 7978 7979static void ironlake_set_pipeconf(struct drm_crtc *crtc) 7980{ 7981 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 7982 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7983 int pipe = intel_crtc->pipe; 7984 uint32_t val; 7985 7986 val = 0; 7987 7988 switch (intel_crtc->config->pipe_bpp) { 7989 case 18: 7990 val |= PIPECONF_6BPC; 7991 break; 7992 case 24: 7993 val |= PIPECONF_8BPC; 7994 break; 7995 case 30: 7996 val |= PIPECONF_10BPC; 7997 break; 7998 case 36: 7999 val |= PIPECONF_12BPC; 8000 break; 8001 default: 8002 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8003 BUG(); 8004 } 8005 8006 if (intel_crtc->config->dither) 8007 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8008 8009 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8010 val |= PIPECONF_INTERLACED_ILK; 8011 else 8012 val |= PIPECONF_PROGRESSIVE; 8013 8014 if (intel_crtc->config->limited_color_range) 8015 val |= PIPECONF_COLOR_RANGE_SELECT; 8016 8017 I915_WRITE(PIPECONF(pipe), val); 8018 POSTING_READ(PIPECONF(pipe)); 8019} 8020 8021static void haswell_set_pipeconf(struct drm_crtc *crtc) 8022{ 8023 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8024 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8025 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8026 u32 val = 0; 8027 8028 if (IS_HASWELL(dev_priv) && intel_crtc->config->dither) 8029 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8030 8031 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8032 val |= PIPECONF_INTERLACED_ILK; 8033 else 8034 val |= PIPECONF_PROGRESSIVE; 8035 8036 I915_WRITE(PIPECONF(cpu_transcoder), val); 8037 POSTING_READ(PIPECONF(cpu_transcoder)); 8038} 8039 8040static void haswell_set_pipemisc(struct drm_crtc *crtc) 8041{ 8042 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8043 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8044 8045 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) { 8046 u32 val = 0; 8047 8048 switch (intel_crtc->config->pipe_bpp) { 8049 case 18: 8050 val |= PIPEMISC_DITHER_6_BPC; 8051 break; 8052 case 24: 8053 val |= PIPEMISC_DITHER_8_BPC; 8054 break; 8055 case 30: 8056 val |= PIPEMISC_DITHER_10_BPC; 8057 break; 8058 case 36: 8059 val |= PIPEMISC_DITHER_12_BPC; 8060 break; 8061 default: 8062 /* Case prevented by pipe_config_set_bpp. */ 8063 BUG(); 8064 } 8065 8066 if (intel_crtc->config->dither) 8067 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 8068 8069 I915_WRITE(PIPEMISC(intel_crtc->pipe), val); 8070 } 8071} 8072 8073int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 8074{ 8075 /* 8076 * Account for spread spectrum to avoid 8077 * oversubscribing the link. Max center spread 8078 * is 2.5%; use 5% for safety's sake. 8079 */ 8080 u32 bps = target_clock * bpp * 21 / 20; 8081 return DIV_ROUND_UP(bps, link_bw * 8); 8082} 8083 8084static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 8085{ 8086 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 8087} 8088 8089static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8090 struct intel_crtc_state *crtc_state, 8091 struct dpll *reduced_clock) 8092{ 8093 struct drm_crtc *crtc = &intel_crtc->base; 8094 struct drm_device *dev = crtc->dev; 8095 struct drm_i915_private *dev_priv = to_i915(dev); 8096 u32 dpll, fp, fp2; 8097 int factor; 8098 8099 /* Enable autotuning of the PLL clock (if permissible) */ 8100 factor = 21; 8101 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8102 if ((intel_panel_use_ssc(dev_priv) && 8103 dev_priv->vbt.lvds_ssc_freq == 100000) || 8104 (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev))) 8105 factor = 25; 8106 } else if (crtc_state->sdvo_tv_clock) 8107 factor = 20; 8108 8109 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 8110 8111 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 8112 fp |= FP_CB_TUNE; 8113 8114 if (reduced_clock) { 8115 fp2 = i9xx_dpll_compute_fp(reduced_clock); 8116 8117 if (reduced_clock->m < factor * reduced_clock->n) 8118 fp2 |= FP_CB_TUNE; 8119 } else { 8120 fp2 = fp; 8121 } 8122 8123 dpll = 0; 8124 8125 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8126 dpll |= DPLLB_MODE_LVDS; 8127 else 8128 dpll |= DPLLB_MODE_DAC_SERIAL; 8129 8130 dpll |= (crtc_state->pixel_multiplier - 1) 8131 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 8132 8133 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8134 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8135 dpll |= DPLL_SDVO_HIGH_SPEED; 8136 8137 if (intel_crtc_has_dp_encoder(crtc_state)) 8138 dpll |= DPLL_SDVO_HIGH_SPEED; 8139 8140 /* 8141 * The high speed IO clock is only really required for 8142 * SDVO/HDMI/DP, but we also enable it for CRT to make it 8143 * possible to share the DPLL between CRT and HDMI. Enabling 8144 * the clock needlessly does no real harm, except use up a 8145 * bit of power potentially. 8146 * 8147 * We'll limit this to IVB with 3 pipes, since it has only two 8148 * DPLLs and so DPLL sharing is the only way to get three pipes 8149 * driving PCH ports at the same time. On SNB we could do this, 8150 * and potentially avoid enabling the second DPLL, but it's not 8151 * clear if it''s a win or loss power wise. No point in doing 8152 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 8153 */ 8154 if (INTEL_INFO(dev_priv)->num_pipes == 3 && 8155 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 8156 dpll |= DPLL_SDVO_HIGH_SPEED; 8157 8158 /* compute bitmask from p1 value */ 8159 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8160 /* also FPA1 */ 8161 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8162 8163 switch (crtc_state->dpll.p2) { 8164 case 5: 8165 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8166 break; 8167 case 7: 8168 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8169 break; 8170 case 10: 8171 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8172 break; 8173 case 14: 8174 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8175 break; 8176 } 8177 8178 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8179 intel_panel_use_ssc(dev_priv)) 8180 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8181 else 8182 dpll |= PLL_REF_INPUT_DREFCLK; 8183 8184 dpll |= DPLL_VCO_ENABLE; 8185 8186 crtc_state->dpll_hw_state.dpll = dpll; 8187 crtc_state->dpll_hw_state.fp0 = fp; 8188 crtc_state->dpll_hw_state.fp1 = fp2; 8189} 8190 8191static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 8192 struct intel_crtc_state *crtc_state) 8193{ 8194 struct drm_device *dev = crtc->base.dev; 8195 struct drm_i915_private *dev_priv = to_i915(dev); 8196 const struct intel_limit *limit; 8197 int refclk = 120000; 8198 8199 memset(&crtc_state->dpll_hw_state, 0, 8200 sizeof(crtc_state->dpll_hw_state)); 8201 8202 crtc->lowfreq_avail = false; 8203 8204 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 8205 if (!crtc_state->has_pch_encoder) 8206 return 0; 8207 8208 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8209 if (intel_panel_use_ssc(dev_priv)) { 8210 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 8211 dev_priv->vbt.lvds_ssc_freq); 8212 refclk = dev_priv->vbt.lvds_ssc_freq; 8213 } 8214 8215 if (intel_is_dual_link_lvds(dev)) { 8216 if (refclk == 100000) 8217 limit = &intel_limits_ironlake_dual_lvds_100m; 8218 else 8219 limit = &intel_limits_ironlake_dual_lvds; 8220 } else { 8221 if (refclk == 100000) 8222 limit = &intel_limits_ironlake_single_lvds_100m; 8223 else 8224 limit = &intel_limits_ironlake_single_lvds; 8225 } 8226 } else { 8227 limit = &intel_limits_ironlake_dac; 8228 } 8229 8230 if (!crtc_state->clock_set && 8231 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8232 refclk, NULL, &crtc_state->dpll)) { 8233 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8234 return -EINVAL; 8235 } 8236 8237 ironlake_compute_dpll(crtc, crtc_state, NULL); 8238 8239 if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) { 8240 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 8241 pipe_name(crtc->pipe)); 8242 return -EINVAL; 8243 } 8244 8245 return 0; 8246} 8247 8248static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 8249 struct intel_link_m_n *m_n) 8250{ 8251 struct drm_device *dev = crtc->base.dev; 8252 struct drm_i915_private *dev_priv = to_i915(dev); 8253 enum pipe pipe = crtc->pipe; 8254 8255 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 8256 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 8257 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 8258 & ~TU_SIZE_MASK; 8259 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 8260 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 8261 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8262} 8263 8264static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 8265 enum transcoder transcoder, 8266 struct intel_link_m_n *m_n, 8267 struct intel_link_m_n *m2_n2) 8268{ 8269 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8270 enum pipe pipe = crtc->pipe; 8271 8272 if (INTEL_GEN(dev_priv) >= 5) { 8273 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 8274 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 8275 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 8276 & ~TU_SIZE_MASK; 8277 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 8278 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 8279 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8280 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 8281 * gen < 8) and if DRRS is supported (to make sure the 8282 * registers are not unnecessarily read). 8283 */ 8284 if (m2_n2 && INTEL_GEN(dev_priv) < 8 && 8285 crtc->config->has_drrs) { 8286 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 8287 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 8288 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 8289 & ~TU_SIZE_MASK; 8290 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 8291 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 8292 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8293 } 8294 } else { 8295 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 8296 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 8297 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 8298 & ~TU_SIZE_MASK; 8299 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 8300 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 8301 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8302 } 8303} 8304 8305void intel_dp_get_m_n(struct intel_crtc *crtc, 8306 struct intel_crtc_state *pipe_config) 8307{ 8308 if (pipe_config->has_pch_encoder) 8309 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 8310 else 8311 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8312 &pipe_config->dp_m_n, 8313 &pipe_config->dp_m2_n2); 8314} 8315 8316static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 8317 struct intel_crtc_state *pipe_config) 8318{ 8319 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8320 &pipe_config->fdi_m_n, NULL); 8321} 8322 8323static void skylake_get_pfit_config(struct intel_crtc *crtc, 8324 struct intel_crtc_state *pipe_config) 8325{ 8326 struct drm_device *dev = crtc->base.dev; 8327 struct drm_i915_private *dev_priv = to_i915(dev); 8328 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 8329 uint32_t ps_ctrl = 0; 8330 int id = -1; 8331 int i; 8332 8333 /* find scaler attached to this pipe */ 8334 for (i = 0; i < crtc->num_scalers; i++) { 8335 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 8336 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 8337 id = i; 8338 pipe_config->pch_pfit.enabled = true; 8339 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 8340 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 8341 break; 8342 } 8343 } 8344 8345 scaler_state->scaler_id = id; 8346 if (id >= 0) { 8347 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 8348 } else { 8349 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 8350 } 8351} 8352 8353static void 8354skylake_get_initial_plane_config(struct intel_crtc *crtc, 8355 struct intel_initial_plane_config *plane_config) 8356{ 8357 struct drm_device *dev = crtc->base.dev; 8358 struct drm_i915_private *dev_priv = to_i915(dev); 8359 u32 val, base, offset, stride_mult, tiling; 8360 int pipe = crtc->pipe; 8361 int fourcc, pixel_format; 8362 unsigned int aligned_height; 8363 struct drm_framebuffer *fb; 8364 struct intel_framebuffer *intel_fb; 8365 8366 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8367 if (!intel_fb) { 8368 DRM_DEBUG_KMS("failed to alloc fb\n"); 8369 return; 8370 } 8371 8372 fb = &intel_fb->base; 8373 8374 fb->dev = dev; 8375 8376 val = I915_READ(PLANE_CTL(pipe, 0)); 8377 if (!(val & PLANE_CTL_ENABLE)) 8378 goto error; 8379 8380 pixel_format = val & PLANE_CTL_FORMAT_MASK; 8381 fourcc = skl_format_to_fourcc(pixel_format, 8382 val & PLANE_CTL_ORDER_RGBX, 8383 val & PLANE_CTL_ALPHA_MASK); 8384 fb->format = drm_format_info(fourcc); 8385 8386 tiling = val & PLANE_CTL_TILED_MASK; 8387 switch (tiling) { 8388 case PLANE_CTL_TILED_LINEAR: 8389 fb->modifier = DRM_FORMAT_MOD_LINEAR; 8390 break; 8391 case PLANE_CTL_TILED_X: 8392 plane_config->tiling = I915_TILING_X; 8393 fb->modifier = I915_FORMAT_MOD_X_TILED; 8394 break; 8395 case PLANE_CTL_TILED_Y: 8396 fb->modifier = I915_FORMAT_MOD_Y_TILED; 8397 break; 8398 case PLANE_CTL_TILED_YF: 8399 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 8400 break; 8401 default: 8402 MISSING_CASE(tiling); 8403 goto error; 8404 } 8405 8406 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 8407 plane_config->base = base; 8408 8409 offset = I915_READ(PLANE_OFFSET(pipe, 0)); 8410 8411 val = I915_READ(PLANE_SIZE(pipe, 0)); 8412 fb->height = ((val >> 16) & 0xfff) + 1; 8413 fb->width = ((val >> 0) & 0x1fff) + 1; 8414 8415 val = I915_READ(PLANE_STRIDE(pipe, 0)); 8416 stride_mult = intel_fb_stride_alignment(fb, 0); 8417 fb->pitches[0] = (val & 0x3ff) * stride_mult; 8418 8419 aligned_height = intel_fb_align_height(fb, 0, fb->height); 8420 8421 plane_config->size = fb->pitches[0] * aligned_height; 8422 8423 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8424 pipe_name(pipe), fb->width, fb->height, 8425 fb->format->cpp[0] * 8, base, fb->pitches[0], 8426 plane_config->size); 8427 8428 plane_config->fb = intel_fb; 8429 return; 8430 8431error: 8432 kfree(intel_fb); 8433} 8434 8435static void ironlake_get_pfit_config(struct intel_crtc *crtc, 8436 struct intel_crtc_state *pipe_config) 8437{ 8438 struct drm_device *dev = crtc->base.dev; 8439 struct drm_i915_private *dev_priv = to_i915(dev); 8440 uint32_t tmp; 8441 8442 tmp = I915_READ(PF_CTL(crtc->pipe)); 8443 8444 if (tmp & PF_ENABLE) { 8445 pipe_config->pch_pfit.enabled = true; 8446 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 8447 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 8448 8449 /* We currently do not free assignements of panel fitters on 8450 * ivb/hsw (since we don't use the higher upscaling modes which 8451 * differentiates them) so just WARN about this case for now. */ 8452 if (IS_GEN7(dev_priv)) { 8453 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 8454 PF_PIPE_SEL_IVB(crtc->pipe)); 8455 } 8456 } 8457} 8458 8459static void 8460ironlake_get_initial_plane_config(struct intel_crtc *crtc, 8461 struct intel_initial_plane_config *plane_config) 8462{ 8463 struct drm_device *dev = crtc->base.dev; 8464 struct drm_i915_private *dev_priv = to_i915(dev); 8465 u32 val, base, offset; 8466 int pipe = crtc->pipe; 8467 int fourcc, pixel_format; 8468 unsigned int aligned_height; 8469 struct drm_framebuffer *fb; 8470 struct intel_framebuffer *intel_fb; 8471 8472 val = I915_READ(DSPCNTR(pipe)); 8473 if (!(val & DISPLAY_PLANE_ENABLE)) 8474 return; 8475 8476 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8477 if (!intel_fb) { 8478 DRM_DEBUG_KMS("failed to alloc fb\n"); 8479 return; 8480 } 8481 8482 fb = &intel_fb->base; 8483 8484 fb->dev = dev; 8485 8486 if (INTEL_GEN(dev_priv) >= 4) { 8487 if (val & DISPPLANE_TILED) { 8488 plane_config->tiling = I915_TILING_X; 8489 fb->modifier = I915_FORMAT_MOD_X_TILED; 8490 } 8491 } 8492 8493 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 8494 fourcc = i9xx_format_to_fourcc(pixel_format); 8495 fb->format = drm_format_info(fourcc); 8496 8497 base = I915_READ(DSPSURF(pipe)) & 0xfffff000; 8498 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 8499 offset = I915_READ(DSPOFFSET(pipe)); 8500 } else { 8501 if (plane_config->tiling) 8502 offset = I915_READ(DSPTILEOFF(pipe)); 8503 else 8504 offset = I915_READ(DSPLINOFF(pipe)); 8505 } 8506 plane_config->base = base; 8507 8508 val = I915_READ(PIPESRC(pipe)); 8509 fb->width = ((val >> 16) & 0xfff) + 1; 8510 fb->height = ((val >> 0) & 0xfff) + 1; 8511 8512 val = I915_READ(DSPSTRIDE(pipe)); 8513 fb->pitches[0] = val & 0xffffffc0; 8514 8515 aligned_height = intel_fb_align_height(fb, 0, fb->height); 8516 8517 plane_config->size = fb->pitches[0] * aligned_height; 8518 8519 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8520 pipe_name(pipe), fb->width, fb->height, 8521 fb->format->cpp[0] * 8, base, fb->pitches[0], 8522 plane_config->size); 8523 8524 plane_config->fb = intel_fb; 8525} 8526 8527static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 8528 struct intel_crtc_state *pipe_config) 8529{ 8530 struct drm_device *dev = crtc->base.dev; 8531 struct drm_i915_private *dev_priv = to_i915(dev); 8532 enum intel_display_power_domain power_domain; 8533 uint32_t tmp; 8534 bool ret; 8535 8536 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 8537 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 8538 return false; 8539 8540 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8541 pipe_config->shared_dpll = NULL; 8542 8543 ret = false; 8544 tmp = I915_READ(PIPECONF(crtc->pipe)); 8545 if (!(tmp & PIPECONF_ENABLE)) 8546 goto out; 8547 8548 switch (tmp & PIPECONF_BPC_MASK) { 8549 case PIPECONF_6BPC: 8550 pipe_config->pipe_bpp = 18; 8551 break; 8552 case PIPECONF_8BPC: 8553 pipe_config->pipe_bpp = 24; 8554 break; 8555 case PIPECONF_10BPC: 8556 pipe_config->pipe_bpp = 30; 8557 break; 8558 case PIPECONF_12BPC: 8559 pipe_config->pipe_bpp = 36; 8560 break; 8561 default: 8562 break; 8563 } 8564 8565 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 8566 pipe_config->limited_color_range = true; 8567 8568 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 8569 struct intel_shared_dpll *pll; 8570 enum intel_dpll_id pll_id; 8571 8572 pipe_config->has_pch_encoder = true; 8573 8574 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 8575 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 8576 FDI_DP_PORT_WIDTH_SHIFT) + 1; 8577 8578 ironlake_get_fdi_m_n_config(crtc, pipe_config); 8579 8580 if (HAS_PCH_IBX(dev_priv)) { 8581 /* 8582 * The pipe->pch transcoder and pch transcoder->pll 8583 * mapping is fixed. 8584 */ 8585 pll_id = (enum intel_dpll_id) crtc->pipe; 8586 } else { 8587 tmp = I915_READ(PCH_DPLL_SEL); 8588 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 8589 pll_id = DPLL_ID_PCH_PLL_B; 8590 else 8591 pll_id= DPLL_ID_PCH_PLL_A; 8592 } 8593 8594 pipe_config->shared_dpll = 8595 intel_get_shared_dpll_by_id(dev_priv, pll_id); 8596 pll = pipe_config->shared_dpll; 8597 8598 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, 8599 &pipe_config->dpll_hw_state)); 8600 8601 tmp = pipe_config->dpll_hw_state.dpll; 8602 pipe_config->pixel_multiplier = 8603 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 8604 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 8605 8606 ironlake_pch_clock_get(crtc, pipe_config); 8607 } else { 8608 pipe_config->pixel_multiplier = 1; 8609 } 8610 8611 intel_get_pipe_timings(crtc, pipe_config); 8612 intel_get_pipe_src_size(crtc, pipe_config); 8613 8614 ironlake_get_pfit_config(crtc, pipe_config); 8615 8616 ret = true; 8617 8618out: 8619 intel_display_power_put(dev_priv, power_domain); 8620 8621 return ret; 8622} 8623 8624static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 8625{ 8626 struct drm_device *dev = &dev_priv->drm; 8627 struct intel_crtc *crtc; 8628 8629 for_each_intel_crtc(dev, crtc) 8630 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 8631 pipe_name(crtc->pipe)); 8632 8633 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 8634 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 8635 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 8636 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 8637 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n"); 8638 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 8639 "CPU PWM1 enabled\n"); 8640 if (IS_HASWELL(dev_priv)) 8641 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 8642 "CPU PWM2 enabled\n"); 8643 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 8644 "PCH PWM1 enabled\n"); 8645 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 8646 "Utility pin enabled\n"); 8647 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 8648 8649 /* 8650 * In theory we can still leave IRQs enabled, as long as only the HPD 8651 * interrupts remain enabled. We used to check for that, but since it's 8652 * gen-specific and since we only disable LCPLL after we fully disable 8653 * the interrupts, the check below should be enough. 8654 */ 8655 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 8656} 8657 8658static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 8659{ 8660 if (IS_HASWELL(dev_priv)) 8661 return I915_READ(D_COMP_HSW); 8662 else 8663 return I915_READ(D_COMP_BDW); 8664} 8665 8666static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 8667{ 8668 if (IS_HASWELL(dev_priv)) { 8669 mutex_lock(&dev_priv->rps.hw_lock); 8670 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 8671 val)) 8672 DRM_DEBUG_KMS("Failed to write to D_COMP\n"); 8673 mutex_unlock(&dev_priv->rps.hw_lock); 8674 } else { 8675 I915_WRITE(D_COMP_BDW, val); 8676 POSTING_READ(D_COMP_BDW); 8677 } 8678} 8679 8680/* 8681 * This function implements pieces of two sequences from BSpec: 8682 * - Sequence for display software to disable LCPLL 8683 * - Sequence for display software to allow package C8+ 8684 * The steps implemented here are just the steps that actually touch the LCPLL 8685 * register. Callers should take care of disabling all the display engine 8686 * functions, doing the mode unset, fixing interrupts, etc. 8687 */ 8688static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 8689 bool switch_to_fclk, bool allow_power_down) 8690{ 8691 uint32_t val; 8692 8693 assert_can_disable_lcpll(dev_priv); 8694 8695 val = I915_READ(LCPLL_CTL); 8696 8697 if (switch_to_fclk) { 8698 val |= LCPLL_CD_SOURCE_FCLK; 8699 I915_WRITE(LCPLL_CTL, val); 8700 8701 if (wait_for_us(I915_READ(LCPLL_CTL) & 8702 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 8703 DRM_ERROR("Switching to FCLK failed\n"); 8704 8705 val = I915_READ(LCPLL_CTL); 8706 } 8707 8708 val |= LCPLL_PLL_DISABLE; 8709 I915_WRITE(LCPLL_CTL, val); 8710 POSTING_READ(LCPLL_CTL); 8711 8712 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1)) 8713 DRM_ERROR("LCPLL still locked\n"); 8714 8715 val = hsw_read_dcomp(dev_priv); 8716 val |= D_COMP_COMP_DISABLE; 8717 hsw_write_dcomp(dev_priv, val); 8718 ndelay(100); 8719 8720 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 8721 1)) 8722 DRM_ERROR("D_COMP RCOMP still in progress\n"); 8723 8724 if (allow_power_down) { 8725 val = I915_READ(LCPLL_CTL); 8726 val |= LCPLL_POWER_DOWN_ALLOW; 8727 I915_WRITE(LCPLL_CTL, val); 8728 POSTING_READ(LCPLL_CTL); 8729 } 8730} 8731 8732/* 8733 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 8734 * source. 8735 */ 8736static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 8737{ 8738 uint32_t val; 8739 8740 val = I915_READ(LCPLL_CTL); 8741 8742 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 8743 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 8744 return; 8745 8746 /* 8747 * Make sure we're not on PC8 state before disabling PC8, otherwise 8748 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 8749 */ 8750 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 8751 8752 if (val & LCPLL_POWER_DOWN_ALLOW) { 8753 val &= ~LCPLL_POWER_DOWN_ALLOW; 8754 I915_WRITE(LCPLL_CTL, val); 8755 POSTING_READ(LCPLL_CTL); 8756 } 8757 8758 val = hsw_read_dcomp(dev_priv); 8759 val |= D_COMP_COMP_FORCE; 8760 val &= ~D_COMP_COMP_DISABLE; 8761 hsw_write_dcomp(dev_priv, val); 8762 8763 val = I915_READ(LCPLL_CTL); 8764 val &= ~LCPLL_PLL_DISABLE; 8765 I915_WRITE(LCPLL_CTL, val); 8766 8767 if (intel_wait_for_register(dev_priv, 8768 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 8769 5)) 8770 DRM_ERROR("LCPLL not locked yet\n"); 8771 8772 if (val & LCPLL_CD_SOURCE_FCLK) { 8773 val = I915_READ(LCPLL_CTL); 8774 val &= ~LCPLL_CD_SOURCE_FCLK; 8775 I915_WRITE(LCPLL_CTL, val); 8776 8777 if (wait_for_us((I915_READ(LCPLL_CTL) & 8778 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 8779 DRM_ERROR("Switching back to LCPLL failed\n"); 8780 } 8781 8782 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 8783 intel_update_cdclk(dev_priv); 8784} 8785 8786/* 8787 * Package states C8 and deeper are really deep PC states that can only be 8788 * reached when all the devices on the system allow it, so even if the graphics 8789 * device allows PC8+, it doesn't mean the system will actually get to these 8790 * states. Our driver only allows PC8+ when going into runtime PM. 8791 * 8792 * The requirements for PC8+ are that all the outputs are disabled, the power 8793 * well is disabled and most interrupts are disabled, and these are also 8794 * requirements for runtime PM. When these conditions are met, we manually do 8795 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 8796 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 8797 * hang the machine. 8798 * 8799 * When we really reach PC8 or deeper states (not just when we allow it) we lose 8800 * the state of some registers, so when we come back from PC8+ we need to 8801 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 8802 * need to take care of the registers kept by RC6. Notice that this happens even 8803 * if we don't put the device in PCI D3 state (which is what currently happens 8804 * because of the runtime PM support). 8805 * 8806 * For more, read "Display Sequences for Package C8" on the hardware 8807 * documentation. 8808 */ 8809void hsw_enable_pc8(struct drm_i915_private *dev_priv) 8810{ 8811 uint32_t val; 8812 8813 DRM_DEBUG_KMS("Enabling package C8+\n"); 8814 8815 if (HAS_PCH_LPT_LP(dev_priv)) { 8816 val = I915_READ(SOUTH_DSPCLK_GATE_D); 8817 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 8818 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 8819 } 8820 8821 lpt_disable_clkout_dp(dev_priv); 8822 hsw_disable_lcpll(dev_priv, true, true); 8823} 8824 8825void hsw_disable_pc8(struct drm_i915_private *dev_priv) 8826{ 8827 uint32_t val; 8828 8829 DRM_DEBUG_KMS("Disabling package C8+\n"); 8830 8831 hsw_restore_lcpll(dev_priv); 8832 lpt_init_pch_refclk(dev_priv); 8833 8834 if (HAS_PCH_LPT_LP(dev_priv)) { 8835 val = I915_READ(SOUTH_DSPCLK_GATE_D); 8836 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 8837 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 8838 } 8839} 8840 8841static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 8842 struct intel_crtc_state *crtc_state) 8843{ 8844 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) { 8845 struct intel_encoder *encoder = 8846 intel_ddi_get_crtc_new_encoder(crtc_state); 8847 8848 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) { 8849 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 8850 pipe_name(crtc->pipe)); 8851 return -EINVAL; 8852 } 8853 } 8854 8855 crtc->lowfreq_avail = false; 8856 8857 return 0; 8858} 8859 8860static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv, 8861 enum port port, 8862 struct intel_crtc_state *pipe_config) 8863{ 8864 enum intel_dpll_id id; 8865 u32 temp; 8866 8867 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 8868 id = temp >> (port * 2); 8869 8870 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2)) 8871 return; 8872 8873 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 8874} 8875 8876static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 8877 enum port port, 8878 struct intel_crtc_state *pipe_config) 8879{ 8880 enum intel_dpll_id id; 8881 8882 switch (port) { 8883 case PORT_A: 8884 id = DPLL_ID_SKL_DPLL0; 8885 break; 8886 case PORT_B: 8887 id = DPLL_ID_SKL_DPLL1; 8888 break; 8889 case PORT_C: 8890 id = DPLL_ID_SKL_DPLL2; 8891 break; 8892 default: 8893 DRM_ERROR("Incorrect port type\n"); 8894 return; 8895 } 8896 8897 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 8898} 8899 8900static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 8901 enum port port, 8902 struct intel_crtc_state *pipe_config) 8903{ 8904 enum intel_dpll_id id; 8905 u32 temp; 8906 8907 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 8908 id = temp >> (port * 3 + 1); 8909 8910 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3)) 8911 return; 8912 8913 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 8914} 8915 8916static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 8917 enum port port, 8918 struct intel_crtc_state *pipe_config) 8919{ 8920 enum intel_dpll_id id; 8921 uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 8922 8923 switch (ddi_pll_sel) { 8924 case PORT_CLK_SEL_WRPLL1: 8925 id = DPLL_ID_WRPLL1; 8926 break; 8927 case PORT_CLK_SEL_WRPLL2: 8928 id = DPLL_ID_WRPLL2; 8929 break; 8930 case PORT_CLK_SEL_SPLL: 8931 id = DPLL_ID_SPLL; 8932 break; 8933 case PORT_CLK_SEL_LCPLL_810: 8934 id = DPLL_ID_LCPLL_810; 8935 break; 8936 case PORT_CLK_SEL_LCPLL_1350: 8937 id = DPLL_ID_LCPLL_1350; 8938 break; 8939 case PORT_CLK_SEL_LCPLL_2700: 8940 id = DPLL_ID_LCPLL_2700; 8941 break; 8942 default: 8943 MISSING_CASE(ddi_pll_sel); 8944 /* fall through */ 8945 case PORT_CLK_SEL_NONE: 8946 return; 8947 } 8948 8949 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 8950} 8951 8952static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 8953 struct intel_crtc_state *pipe_config, 8954 u64 *power_domain_mask) 8955{ 8956 struct drm_device *dev = crtc->base.dev; 8957 struct drm_i915_private *dev_priv = to_i915(dev); 8958 enum intel_display_power_domain power_domain; 8959 u32 tmp; 8960 8961 /* 8962 * The pipe->transcoder mapping is fixed with the exception of the eDP 8963 * transcoder handled below. 8964 */ 8965 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8966 8967 /* 8968 * XXX: Do intel_display_power_get_if_enabled before reading this (for 8969 * consistency and less surprising code; it's in always on power). 8970 */ 8971 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 8972 if (tmp & TRANS_DDI_FUNC_ENABLE) { 8973 enum pipe trans_edp_pipe; 8974 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 8975 default: 8976 WARN(1, "unknown pipe linked to edp transcoder\n"); 8977 case TRANS_DDI_EDP_INPUT_A_ONOFF: 8978 case TRANS_DDI_EDP_INPUT_A_ON: 8979 trans_edp_pipe = PIPE_A; 8980 break; 8981 case TRANS_DDI_EDP_INPUT_B_ONOFF: 8982 trans_edp_pipe = PIPE_B; 8983 break; 8984 case TRANS_DDI_EDP_INPUT_C_ONOFF: 8985 trans_edp_pipe = PIPE_C; 8986 break; 8987 } 8988 8989 if (trans_edp_pipe == crtc->pipe) 8990 pipe_config->cpu_transcoder = TRANSCODER_EDP; 8991 } 8992 8993 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 8994 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 8995 return false; 8996 *power_domain_mask |= BIT_ULL(power_domain); 8997 8998 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 8999 9000 return tmp & PIPECONF_ENABLE; 9001} 9002 9003static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 9004 struct intel_crtc_state *pipe_config, 9005 u64 *power_domain_mask) 9006{ 9007 struct drm_device *dev = crtc->base.dev; 9008 struct drm_i915_private *dev_priv = to_i915(dev); 9009 enum intel_display_power_domain power_domain; 9010 enum port port; 9011 enum transcoder cpu_transcoder; 9012 u32 tmp; 9013 9014 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 9015 if (port == PORT_A) 9016 cpu_transcoder = TRANSCODER_DSI_A; 9017 else 9018 cpu_transcoder = TRANSCODER_DSI_C; 9019 9020 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 9021 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9022 continue; 9023 *power_domain_mask |= BIT_ULL(power_domain); 9024 9025 /* 9026 * The PLL needs to be enabled with a valid divider 9027 * configuration, otherwise accessing DSI registers will hang 9028 * the machine. See BSpec North Display Engine 9029 * registers/MIPI[BXT]. We can break out here early, since we 9030 * need the same DSI PLL to be enabled for both DSI ports. 9031 */ 9032 if (!intel_dsi_pll_is_enabled(dev_priv)) 9033 break; 9034 9035 /* XXX: this works for video mode only */ 9036 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); 9037 if (!(tmp & DPI_ENABLE)) 9038 continue; 9039 9040 tmp = I915_READ(MIPI_CTRL(port)); 9041 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 9042 continue; 9043 9044 pipe_config->cpu_transcoder = cpu_transcoder; 9045 break; 9046 } 9047 9048 return transcoder_is_dsi(pipe_config->cpu_transcoder); 9049} 9050 9051static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 9052 struct intel_crtc_state *pipe_config) 9053{ 9054 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9055 struct intel_shared_dpll *pll; 9056 enum port port; 9057 uint32_t tmp; 9058 9059 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 9060 9061 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 9062 9063 if (IS_CANNONLAKE(dev_priv)) 9064 cannonlake_get_ddi_pll(dev_priv, port, pipe_config); 9065 else if (IS_GEN9_BC(dev_priv)) 9066 skylake_get_ddi_pll(dev_priv, port, pipe_config); 9067 else if (IS_GEN9_LP(dev_priv)) 9068 bxt_get_ddi_pll(dev_priv, port, pipe_config); 9069 else 9070 haswell_get_ddi_pll(dev_priv, port, pipe_config); 9071 9072 pll = pipe_config->shared_dpll; 9073 if (pll) { 9074 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, 9075 &pipe_config->dpll_hw_state)); 9076 } 9077 9078 /* 9079 * Haswell has only FDI/PCH transcoder A. It is which is connected to 9080 * DDI E. So just check whether this pipe is wired to DDI E and whether 9081 * the PCH transcoder is on. 9082 */ 9083 if (INTEL_GEN(dev_priv) < 9 && 9084 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 9085 pipe_config->has_pch_encoder = true; 9086 9087 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 9088 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9089 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9090 9091 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9092 } 9093} 9094 9095static bool haswell_get_pipe_config(struct intel_crtc *crtc, 9096 struct intel_crtc_state *pipe_config) 9097{ 9098 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9099 enum intel_display_power_domain power_domain; 9100 u64 power_domain_mask; 9101 bool active; 9102 9103 if (INTEL_GEN(dev_priv) >= 9) { 9104 intel_crtc_init_scalers(crtc, pipe_config); 9105 9106 pipe_config->scaler_state.scaler_id = -1; 9107 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 9108 } 9109 9110 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9111 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9112 return false; 9113 power_domain_mask = BIT_ULL(power_domain); 9114 9115 pipe_config->shared_dpll = NULL; 9116 9117 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); 9118 9119 if (IS_GEN9_LP(dev_priv) && 9120 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) { 9121 WARN_ON(active); 9122 active = true; 9123 } 9124 9125 if (!active) 9126 goto out; 9127 9128 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 9129 haswell_get_ddi_port_state(crtc, pipe_config); 9130 intel_get_pipe_timings(crtc, pipe_config); 9131 } 9132 9133 intel_get_pipe_src_size(crtc, pipe_config); 9134 9135 pipe_config->gamma_mode = 9136 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; 9137 9138 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 9139 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 9140 power_domain_mask |= BIT_ULL(power_domain); 9141 if (INTEL_GEN(dev_priv) >= 9) 9142 skylake_get_pfit_config(crtc, pipe_config); 9143 else 9144 ironlake_get_pfit_config(crtc, pipe_config); 9145 } 9146 9147 if (IS_HASWELL(dev_priv)) 9148 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 9149 (I915_READ(IPS_CTL) & IPS_ENABLE); 9150 9151 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 9152 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 9153 pipe_config->pixel_multiplier = 9154 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 9155 } else { 9156 pipe_config->pixel_multiplier = 1; 9157 } 9158 9159out: 9160 for_each_power_domain(power_domain, power_domain_mask) 9161 intel_display_power_put(dev_priv, power_domain); 9162 9163 return active; 9164} 9165 9166static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 9167{ 9168 struct drm_i915_private *dev_priv = 9169 to_i915(plane_state->base.plane->dev); 9170 const struct drm_framebuffer *fb = plane_state->base.fb; 9171 const struct drm_i915_gem_object *obj = intel_fb_obj(fb); 9172 u32 base; 9173 9174 if (INTEL_INFO(dev_priv)->cursor_needs_physical) 9175 base = obj->phys_handle->busaddr; 9176 else 9177 base = intel_plane_ggtt_offset(plane_state); 9178 9179 base += plane_state->main.offset; 9180 9181 /* ILK+ do this automagically */ 9182 if (HAS_GMCH_DISPLAY(dev_priv) && 9183 plane_state->base.rotation & DRM_MODE_ROTATE_180) 9184 base += (plane_state->base.crtc_h * 9185 plane_state->base.crtc_w - 1) * fb->format->cpp[0]; 9186 9187 return base; 9188} 9189 9190static u32 intel_cursor_position(const struct intel_plane_state *plane_state) 9191{ 9192 int x = plane_state->base.crtc_x; 9193 int y = plane_state->base.crtc_y; 9194 u32 pos = 0; 9195 9196 if (x < 0) { 9197 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 9198 x = -x; 9199 } 9200 pos |= x << CURSOR_X_SHIFT; 9201 9202 if (y < 0) { 9203 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 9204 y = -y; 9205 } 9206 pos |= y << CURSOR_Y_SHIFT; 9207 9208 return pos; 9209} 9210 9211static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) 9212{ 9213 const struct drm_mode_config *config = 9214 &plane_state->base.plane->dev->mode_config; 9215 int width = plane_state->base.crtc_w; 9216 int height = plane_state->base.crtc_h; 9217 9218 return width > 0 && width <= config->cursor_width && 9219 height > 0 && height <= config->cursor_height; 9220} 9221 9222static int intel_check_cursor(struct intel_crtc_state *crtc_state, 9223 struct intel_plane_state *plane_state) 9224{ 9225 const struct drm_framebuffer *fb = plane_state->base.fb; 9226 int src_x, src_y; 9227 u32 offset; 9228 int ret; 9229 9230 ret = drm_plane_helper_check_state(&plane_state->base, 9231 &plane_state->clip, 9232 DRM_PLANE_HELPER_NO_SCALING, 9233 DRM_PLANE_HELPER_NO_SCALING, 9234 true, true); 9235 if (ret) 9236 return ret; 9237 9238 if (!fb) 9239 return 0; 9240 9241 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 9242 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 9243 return -EINVAL; 9244 } 9245 9246 src_x = plane_state->base.src_x >> 16; 9247 src_y = plane_state->base.src_y >> 16; 9248 9249 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 9250 offset = intel_compute_tile_offset(&src_x, &src_y, plane_state, 0); 9251 9252 if (src_x != 0 || src_y != 0) { 9253 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n"); 9254 return -EINVAL; 9255 } 9256 9257 plane_state->main.offset = offset; 9258 9259 return 0; 9260} 9261 9262static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 9263 const struct intel_plane_state *plane_state) 9264{ 9265 const struct drm_framebuffer *fb = plane_state->base.fb; 9266 9267 return CURSOR_ENABLE | 9268 CURSOR_GAMMA_ENABLE | 9269 CURSOR_FORMAT_ARGB | 9270 CURSOR_STRIDE(fb->pitches[0]); 9271} 9272 9273static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 9274{ 9275 int width = plane_state->base.crtc_w; 9276 9277 /* 9278 * 845g/865g are only limited by the width of their cursors, 9279 * the height is arbitrary up to the precision of the register. 9280 */ 9281 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); 9282} 9283 9284static int i845_check_cursor(struct intel_plane *plane, 9285 struct intel_crtc_state *crtc_state, 9286 struct intel_plane_state *plane_state) 9287{ 9288 const struct drm_framebuffer *fb = plane_state->base.fb; 9289 int ret; 9290 9291 ret = intel_check_cursor(crtc_state, plane_state); 9292 if (ret) 9293 return ret; 9294 9295 /* if we want to turn off the cursor ignore width and height */ 9296 if (!fb) 9297 return 0; 9298 9299 /* Check for which cursor types we support */ 9300 if (!i845_cursor_size_ok(plane_state)) { 9301 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 9302 plane_state->base.crtc_w, 9303 plane_state->base.crtc_h); 9304 return -EINVAL; 9305 } 9306 9307 switch (fb->pitches[0]) { 9308 case 256: 9309 case 512: 9310 case 1024: 9311 case 2048: 9312 break; 9313 default: 9314 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n", 9315 fb->pitches[0]); 9316 return -EINVAL; 9317 } 9318 9319 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); 9320 9321 return 0; 9322} 9323 9324static void i845_update_cursor(struct intel_plane *plane, 9325 const struct intel_crtc_state *crtc_state, 9326 const struct intel_plane_state *plane_state) 9327{ 9328 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 9329 u32 cntl = 0, base = 0, pos = 0, size = 0; 9330 unsigned long irqflags; 9331 9332 if (plane_state && plane_state->base.visible) { 9333 unsigned int width = plane_state->base.crtc_w; 9334 unsigned int height = plane_state->base.crtc_h; 9335 9336 cntl = plane_state->ctl; 9337 size = (height << 12) | width; 9338 9339 base = intel_cursor_base(plane_state); 9340 pos = intel_cursor_position(plane_state); 9341 } 9342 9343 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 9344 9345 /* On these chipsets we can only modify the base/size/stride 9346 * whilst the cursor is disabled. 9347 */ 9348 if (plane->cursor.base != base || 9349 plane->cursor.size != size || 9350 plane->cursor.cntl != cntl) { 9351 I915_WRITE_FW(CURCNTR(PIPE_A), 0); 9352 I915_WRITE_FW(CURBASE(PIPE_A), base); 9353 I915_WRITE_FW(CURSIZE, size); 9354 I915_WRITE_FW(CURPOS(PIPE_A), pos); 9355 I915_WRITE_FW(CURCNTR(PIPE_A), cntl); 9356 9357 plane->cursor.base = base; 9358 plane->cursor.size = size; 9359 plane->cursor.cntl = cntl; 9360 } else { 9361 I915_WRITE_FW(CURPOS(PIPE_A), pos); 9362 } 9363 9364 POSTING_READ_FW(CURCNTR(PIPE_A)); 9365 9366 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 9367} 9368 9369static void i845_disable_cursor(struct intel_plane *plane, 9370 struct intel_crtc *crtc) 9371{ 9372 i845_update_cursor(plane, NULL, NULL); 9373} 9374 9375static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 9376 const struct intel_plane_state *plane_state) 9377{ 9378 struct drm_i915_private *dev_priv = 9379 to_i915(plane_state->base.plane->dev); 9380 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9381 u32 cntl; 9382 9383 cntl = MCURSOR_GAMMA_ENABLE; 9384 9385 if (HAS_DDI(dev_priv)) 9386 cntl |= CURSOR_PIPE_CSC_ENABLE; 9387 9388 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 9389 9390 switch (plane_state->base.crtc_w) { 9391 case 64: 9392 cntl |= CURSOR_MODE_64_ARGB_AX; 9393 break; 9394 case 128: 9395 cntl |= CURSOR_MODE_128_ARGB_AX; 9396 break; 9397 case 256: 9398 cntl |= CURSOR_MODE_256_ARGB_AX; 9399 break; 9400 default: 9401 MISSING_CASE(plane_state->base.crtc_w); 9402 return 0; 9403 } 9404 9405 if (plane_state->base.rotation & DRM_MODE_ROTATE_180) 9406 cntl |= CURSOR_ROTATE_180; 9407 9408 return cntl; 9409} 9410 9411static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) 9412{ 9413 struct drm_i915_private *dev_priv = 9414 to_i915(plane_state->base.plane->dev); 9415 int width = plane_state->base.crtc_w; 9416 int height = plane_state->base.crtc_h; 9417 9418 if (!intel_cursor_size_ok(plane_state)) 9419 return false; 9420 9421 /* Cursor width is limited to a few power-of-two sizes */ 9422 switch (width) { 9423 case 256: 9424 case 128: 9425 case 64: 9426 break; 9427 default: 9428 return false; 9429 } 9430 9431 /* 9432 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor 9433 * height from 8 lines up to the cursor width, when the 9434 * cursor is not rotated. Everything else requires square 9435 * cursors. 9436 */ 9437 if (HAS_CUR_FBC(dev_priv) && 9438 plane_state->base.rotation & DRM_MODE_ROTATE_0) { 9439 if (height < 8 || height > width) 9440 return false; 9441 } else { 9442 if (height != width) 9443 return false; 9444 } 9445 9446 return true; 9447} 9448 9449static int i9xx_check_cursor(struct intel_plane *plane, 9450 struct intel_crtc_state *crtc_state, 9451 struct intel_plane_state *plane_state) 9452{ 9453 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 9454 const struct drm_framebuffer *fb = plane_state->base.fb; 9455 enum pipe pipe = plane->pipe; 9456 int ret; 9457 9458 ret = intel_check_cursor(crtc_state, plane_state); 9459 if (ret) 9460 return ret; 9461 9462 /* if we want to turn off the cursor ignore width and height */ 9463 if (!fb) 9464 return 0; 9465 9466 /* Check for which cursor types we support */ 9467 if (!i9xx_cursor_size_ok(plane_state)) { 9468 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 9469 plane_state->base.crtc_w, 9470 plane_state->base.crtc_h); 9471 return -EINVAL; 9472 } 9473 9474 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) { 9475 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n", 9476 fb->pitches[0], plane_state->base.crtc_w); 9477 return -EINVAL; 9478 } 9479 9480 /* 9481 * There's something wrong with the cursor on CHV pipe C. 9482 * If it straddles the left edge of the screen then 9483 * moving it away from the edge or disabling it often 9484 * results in a pipe underrun, and often that can lead to 9485 * dead pipe (constant underrun reported, and it scans 9486 * out just a solid color). To recover from that, the 9487 * display power well must be turned off and on again. 9488 * Refuse the put the cursor into that compromised position. 9489 */ 9490 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 9491 plane_state->base.visible && plane_state->base.crtc_x < 0) { 9492 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 9493 return -EINVAL; 9494 } 9495 9496 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); 9497 9498 return 0; 9499} 9500 9501static void i9xx_update_cursor(struct intel_plane *plane, 9502 const struct intel_crtc_state *crtc_state, 9503 const struct intel_plane_state *plane_state) 9504{ 9505 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 9506 enum pipe pipe = plane->pipe; 9507 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; 9508 unsigned long irqflags; 9509 9510 if (plane_state && plane_state->base.visible) { 9511 cntl = plane_state->ctl; 9512 9513 if (plane_state->base.crtc_h != plane_state->base.crtc_w) 9514 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1); 9515 9516 base = intel_cursor_base(plane_state); 9517 pos = intel_cursor_position(plane_state); 9518 } 9519 9520 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 9521 9522 /* 9523 * On some platforms writing CURCNTR first will also 9524 * cause CURPOS to be armed by the CURBASE write. 9525 * Without the CURCNTR write the CURPOS write would 9526 * arm itself. Thus we always start the full update 9527 * with a CURCNTR write. 9528 * 9529 * On other platforms CURPOS always requires the 9530 * CURBASE write to arm the update. Additonally 9531 * a write to any of the cursor register will cancel 9532 * an already armed cursor update. Thus leaving out 9533 * the CURBASE write after CURPOS could lead to a 9534 * cursor that doesn't appear to move, or even change 9535 * shape. Thus we always write CURBASE. 9536 * 9537 * CURCNTR and CUR_FBC_CTL are always 9538 * armed by the CURBASE write only. 9539 */ 9540 if (plane->cursor.base != base || 9541 plane->cursor.size != fbc_ctl || 9542 plane->cursor.cntl != cntl) { 9543 I915_WRITE_FW(CURCNTR(pipe), cntl); 9544 if (HAS_CUR_FBC(dev_priv)) 9545 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl); 9546 I915_WRITE_FW(CURPOS(pipe), pos); 9547 I915_WRITE_FW(CURBASE(pipe), base); 9548 9549 plane->cursor.base = base; 9550 plane->cursor.size = fbc_ctl; 9551 plane->cursor.cntl = cntl; 9552 } else { 9553 I915_WRITE_FW(CURPOS(pipe), pos); 9554 I915_WRITE_FW(CURBASE(pipe), base); 9555 } 9556 9557 POSTING_READ_FW(CURBASE(pipe)); 9558 9559 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 9560} 9561 9562static void i9xx_disable_cursor(struct intel_plane *plane, 9563 struct intel_crtc *crtc) 9564{ 9565 i9xx_update_cursor(plane, NULL, NULL); 9566} 9567 9568 9569/* VESA 640x480x72Hz mode to set on the pipe */ 9570static struct drm_display_mode load_detect_mode = { 9571 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 9572 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 9573}; 9574 9575struct drm_framebuffer * 9576intel_framebuffer_create(struct drm_i915_gem_object *obj, 9577 struct drm_mode_fb_cmd2 *mode_cmd) 9578{ 9579 struct intel_framebuffer *intel_fb; 9580 int ret; 9581 9582 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9583 if (!intel_fb) 9584 return ERR_PTR(-ENOMEM); 9585 9586 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 9587 if (ret) 9588 goto err; 9589 9590 return &intel_fb->base; 9591 9592err: 9593 kfree(intel_fb); 9594 return ERR_PTR(ret); 9595} 9596 9597static u32 9598intel_framebuffer_pitch_for_width(int width, int bpp) 9599{ 9600 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 9601 return ALIGN(pitch, 64); 9602} 9603 9604static u32 9605intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 9606{ 9607 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 9608 return PAGE_ALIGN(pitch * mode->vdisplay); 9609} 9610 9611static struct drm_framebuffer * 9612intel_framebuffer_create_for_mode(struct drm_device *dev, 9613 struct drm_display_mode *mode, 9614 int depth, int bpp) 9615{ 9616 struct drm_framebuffer *fb; 9617 struct drm_i915_gem_object *obj; 9618 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 9619 9620 obj = i915_gem_object_create(to_i915(dev), 9621 intel_framebuffer_size_for_mode(mode, bpp)); 9622 if (IS_ERR(obj)) 9623 return ERR_CAST(obj); 9624 9625 mode_cmd.width = mode->hdisplay; 9626 mode_cmd.height = mode->vdisplay; 9627 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 9628 bpp); 9629 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 9630 9631 fb = intel_framebuffer_create(obj, &mode_cmd); 9632 if (IS_ERR(fb)) 9633 i915_gem_object_put(obj); 9634 9635 return fb; 9636} 9637 9638static struct drm_framebuffer * 9639mode_fits_in_fbdev(struct drm_device *dev, 9640 struct drm_display_mode *mode) 9641{ 9642#ifdef CONFIG_DRM_FBDEV_EMULATION 9643 struct drm_i915_private *dev_priv = to_i915(dev); 9644 struct drm_i915_gem_object *obj; 9645 struct drm_framebuffer *fb; 9646 9647 if (!dev_priv->fbdev) 9648 return NULL; 9649 9650 if (!dev_priv->fbdev->fb) 9651 return NULL; 9652 9653 obj = dev_priv->fbdev->fb->obj; 9654 BUG_ON(!obj); 9655 9656 fb = &dev_priv->fbdev->fb->base; 9657 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 9658 fb->format->cpp[0] * 8)) 9659 return NULL; 9660 9661 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 9662 return NULL; 9663 9664 drm_framebuffer_reference(fb); 9665 return fb; 9666#else 9667 return NULL; 9668#endif 9669} 9670 9671static int intel_modeset_setup_plane_state(struct drm_atomic_state *state, 9672 struct drm_crtc *crtc, 9673 struct drm_display_mode *mode, 9674 struct drm_framebuffer *fb, 9675 int x, int y) 9676{ 9677 struct drm_plane_state *plane_state; 9678 int hdisplay, vdisplay; 9679 int ret; 9680 9681 plane_state = drm_atomic_get_plane_state(state, crtc->primary); 9682 if (IS_ERR(plane_state)) 9683 return PTR_ERR(plane_state); 9684 9685 if (mode) 9686 drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay); 9687 else 9688 hdisplay = vdisplay = 0; 9689 9690 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL); 9691 if (ret) 9692 return ret; 9693 drm_atomic_set_fb_for_plane(plane_state, fb); 9694 plane_state->crtc_x = 0; 9695 plane_state->crtc_y = 0; 9696 plane_state->crtc_w = hdisplay; 9697 plane_state->crtc_h = vdisplay; 9698 plane_state->src_x = x << 16; 9699 plane_state->src_y = y << 16; 9700 plane_state->src_w = hdisplay << 16; 9701 plane_state->src_h = vdisplay << 16; 9702 9703 return 0; 9704} 9705 9706int intel_get_load_detect_pipe(struct drm_connector *connector, 9707 struct drm_display_mode *mode, 9708 struct intel_load_detect_pipe *old, 9709 struct drm_modeset_acquire_ctx *ctx) 9710{ 9711 struct intel_crtc *intel_crtc; 9712 struct intel_encoder *intel_encoder = 9713 intel_attached_encoder(connector); 9714 struct drm_crtc *possible_crtc; 9715 struct drm_encoder *encoder = &intel_encoder->base; 9716 struct drm_crtc *crtc = NULL; 9717 struct drm_device *dev = encoder->dev; 9718 struct drm_i915_private *dev_priv = to_i915(dev); 9719 struct drm_framebuffer *fb; 9720 struct drm_mode_config *config = &dev->mode_config; 9721 struct drm_atomic_state *state = NULL, *restore_state = NULL; 9722 struct drm_connector_state *connector_state; 9723 struct intel_crtc_state *crtc_state; 9724 int ret, i = -1; 9725 9726 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 9727 connector->base.id, connector->name, 9728 encoder->base.id, encoder->name); 9729 9730 old->restore_state = NULL; 9731 9732 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 9733 9734 /* 9735 * Algorithm gets a little messy: 9736 * 9737 * - if the connector already has an assigned crtc, use it (but make 9738 * sure it's on first) 9739 * 9740 * - try to find the first unused crtc that can drive this connector, 9741 * and use that if we find one 9742 */ 9743 9744 /* See if we already have a CRTC for this connector */ 9745 if (connector->state->crtc) { 9746 crtc = connector->state->crtc; 9747 9748 ret = drm_modeset_lock(&crtc->mutex, ctx); 9749 if (ret) 9750 goto fail; 9751 9752 /* Make sure the crtc and connector are running */ 9753 goto found; 9754 } 9755 9756 /* Find an unused one (if possible) */ 9757 for_each_crtc(dev, possible_crtc) { 9758 i++; 9759 if (!(encoder->possible_crtcs & (1 << i))) 9760 continue; 9761 9762 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 9763 if (ret) 9764 goto fail; 9765 9766 if (possible_crtc->state->enable) { 9767 drm_modeset_unlock(&possible_crtc->mutex); 9768 continue; 9769 } 9770 9771 crtc = possible_crtc; 9772 break; 9773 } 9774 9775 /* 9776 * If we didn't find an unused CRTC, don't use any. 9777 */ 9778 if (!crtc) { 9779 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 9780 ret = -ENODEV; 9781 goto fail; 9782 } 9783 9784found: 9785 intel_crtc = to_intel_crtc(crtc); 9786 9787 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 9788 if (ret) 9789 goto fail; 9790 9791 state = drm_atomic_state_alloc(dev); 9792 restore_state = drm_atomic_state_alloc(dev); 9793 if (!state || !restore_state) { 9794 ret = -ENOMEM; 9795 goto fail; 9796 } 9797 9798 state->acquire_ctx = ctx; 9799 restore_state->acquire_ctx = ctx; 9800 9801 connector_state = drm_atomic_get_connector_state(state, connector); 9802 if (IS_ERR(connector_state)) { 9803 ret = PTR_ERR(connector_state); 9804 goto fail; 9805 } 9806 9807 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 9808 if (ret) 9809 goto fail; 9810 9811 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 9812 if (IS_ERR(crtc_state)) { 9813 ret = PTR_ERR(crtc_state); 9814 goto fail; 9815 } 9816 9817 crtc_state->base.active = crtc_state->base.enable = true; 9818 9819 if (!mode) 9820 mode = &load_detect_mode; 9821 9822 /* We need a framebuffer large enough to accommodate all accesses 9823 * that the plane may generate whilst we perform load detection. 9824 * We can not rely on the fbcon either being present (we get called 9825 * during its initialisation to detect all boot displays, or it may 9826 * not even exist) or that it is large enough to satisfy the 9827 * requested mode. 9828 */ 9829 fb = mode_fits_in_fbdev(dev, mode); 9830 if (fb == NULL) { 9831 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 9832 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 9833 } else 9834 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 9835 if (IS_ERR(fb)) { 9836 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 9837 ret = PTR_ERR(fb); 9838 goto fail; 9839 } 9840 9841 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); 9842 if (ret) 9843 goto fail; 9844 9845 drm_framebuffer_unreference(fb); 9846 9847 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); 9848 if (ret) 9849 goto fail; 9850 9851 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 9852 if (!ret) 9853 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 9854 if (!ret) 9855 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary)); 9856 if (ret) { 9857 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 9858 goto fail; 9859 } 9860 9861 ret = drm_atomic_commit(state); 9862 if (ret) { 9863 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 9864 goto fail; 9865 } 9866 9867 old->restore_state = restore_state; 9868 drm_atomic_state_put(state); 9869 9870 /* let the connector get through one full cycle before testing */ 9871 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 9872 return true; 9873 9874fail: 9875 if (state) { 9876 drm_atomic_state_put(state); 9877 state = NULL; 9878 } 9879 if (restore_state) { 9880 drm_atomic_state_put(restore_state); 9881 restore_state = NULL; 9882 } 9883 9884 if (ret == -EDEADLK) 9885 return ret; 9886 9887 return false; 9888} 9889 9890void intel_release_load_detect_pipe(struct drm_connector *connector, 9891 struct intel_load_detect_pipe *old, 9892 struct drm_modeset_acquire_ctx *ctx) 9893{ 9894 struct intel_encoder *intel_encoder = 9895 intel_attached_encoder(connector); 9896 struct drm_encoder *encoder = &intel_encoder->base; 9897 struct drm_atomic_state *state = old->restore_state; 9898 int ret; 9899 9900 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 9901 connector->base.id, connector->name, 9902 encoder->base.id, encoder->name); 9903 9904 if (!state) 9905 return; 9906 9907 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 9908 if (ret) 9909 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); 9910 drm_atomic_state_put(state); 9911} 9912 9913static int i9xx_pll_refclk(struct drm_device *dev, 9914 const struct intel_crtc_state *pipe_config) 9915{ 9916 struct drm_i915_private *dev_priv = to_i915(dev); 9917 u32 dpll = pipe_config->dpll_hw_state.dpll; 9918 9919 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 9920 return dev_priv->vbt.lvds_ssc_freq; 9921 else if (HAS_PCH_SPLIT(dev_priv)) 9922 return 120000; 9923 else if (!IS_GEN2(dev_priv)) 9924 return 96000; 9925 else 9926 return 48000; 9927} 9928 9929/* Returns the clock of the currently programmed mode of the given pipe. */ 9930static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 9931 struct intel_crtc_state *pipe_config) 9932{ 9933 struct drm_device *dev = crtc->base.dev; 9934 struct drm_i915_private *dev_priv = to_i915(dev); 9935 int pipe = pipe_config->cpu_transcoder; 9936 u32 dpll = pipe_config->dpll_hw_state.dpll; 9937 u32 fp; 9938 struct dpll clock; 9939 int port_clock; 9940 int refclk = i9xx_pll_refclk(dev, pipe_config); 9941 9942 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 9943 fp = pipe_config->dpll_hw_state.fp0; 9944 else 9945 fp = pipe_config->dpll_hw_state.fp1; 9946 9947 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 9948 if (IS_PINEVIEW(dev_priv)) { 9949 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 9950 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 9951 } else { 9952 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 9953 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 9954 } 9955 9956 if (!IS_GEN2(dev_priv)) { 9957 if (IS_PINEVIEW(dev_priv)) 9958 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 9959 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 9960 else 9961 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 9962 DPLL_FPA01_P1_POST_DIV_SHIFT); 9963 9964 switch (dpll & DPLL_MODE_MASK) { 9965 case DPLLB_MODE_DAC_SERIAL: 9966 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 9967 5 : 10; 9968 break; 9969 case DPLLB_MODE_LVDS: 9970 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 9971 7 : 14; 9972 break; 9973 default: 9974 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 9975 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 9976 return; 9977 } 9978 9979 if (IS_PINEVIEW(dev_priv)) 9980 port_clock = pnv_calc_dpll_params(refclk, &clock); 9981 else 9982 port_clock = i9xx_calc_dpll_params(refclk, &clock); 9983 } else { 9984 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS); 9985 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 9986 9987 if (is_lvds) { 9988 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 9989 DPLL_FPA01_P1_POST_DIV_SHIFT); 9990 9991 if (lvds & LVDS_CLKB_POWER_UP) 9992 clock.p2 = 7; 9993 else 9994 clock.p2 = 14; 9995 } else { 9996 if (dpll & PLL_P1_DIVIDE_BY_TWO) 9997 clock.p1 = 2; 9998 else { 9999 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 10000 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 10001 } 10002 if (dpll & PLL_P2_DIVIDE_BY_4) 10003 clock.p2 = 4; 10004 else 10005 clock.p2 = 2; 10006 } 10007 10008 port_clock = i9xx_calc_dpll_params(refclk, &clock); 10009 } 10010 10011 /* 10012 * This value includes pixel_multiplier. We will use 10013 * port_clock to compute adjusted_mode.crtc_clock in the 10014 * encoder's get_config() function. 10015 */ 10016 pipe_config->port_clock = port_clock; 10017} 10018 10019int intel_dotclock_calculate(int link_freq, 10020 const struct intel_link_m_n *m_n) 10021{ 10022 /* 10023 * The calculation for the data clock is: 10024 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 10025 * But we want to avoid losing precison if possible, so: 10026 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 10027 * 10028 * and the link clock is simpler: 10029 * link_clock = (m * link_clock) / n 10030 */ 10031 10032 if (!m_n->link_n) 10033 return 0; 10034 10035 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 10036} 10037 10038static void ironlake_pch_clock_get(struct intel_crtc *crtc, 10039 struct intel_crtc_state *pipe_config) 10040{ 10041 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10042 10043 /* read out port_clock from the DPLL */ 10044 i9xx_crtc_clock_get(crtc, pipe_config); 10045 10046 /* 10047 * In case there is an active pipe without active ports, 10048 * we may need some idea for the dotclock anyway. 10049 * Calculate one based on the FDI configuration. 10050 */ 10051 pipe_config->base.adjusted_mode.crtc_clock = 10052 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 10053 &pipe_config->fdi_m_n); 10054} 10055 10056/** Returns the currently programmed mode of the given pipe. */ 10057struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 10058 struct drm_crtc *crtc) 10059{ 10060 struct drm_i915_private *dev_priv = to_i915(dev); 10061 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10062 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 10063 struct drm_display_mode *mode; 10064 struct intel_crtc_state *pipe_config; 10065 int htot = I915_READ(HTOTAL(cpu_transcoder)); 10066 int hsync = I915_READ(HSYNC(cpu_transcoder)); 10067 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 10068 int vsync = I915_READ(VSYNC(cpu_transcoder)); 10069 enum pipe pipe = intel_crtc->pipe; 10070 10071 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 10072 if (!mode) 10073 return NULL; 10074 10075 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 10076 if (!pipe_config) { 10077 kfree(mode); 10078 return NULL; 10079 } 10080 10081 /* 10082 * Construct a pipe_config sufficient for getting the clock info 10083 * back out of crtc_clock_get. 10084 * 10085 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 10086 * to use a real value here instead. 10087 */ 10088 pipe_config->cpu_transcoder = (enum transcoder) pipe; 10089 pipe_config->pixel_multiplier = 1; 10090 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe)); 10091 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe)); 10092 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 10093 i9xx_crtc_clock_get(intel_crtc, pipe_config); 10094 10095 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; 10096 mode->hdisplay = (htot & 0xffff) + 1; 10097 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 10098 mode->hsync_start = (hsync & 0xffff) + 1; 10099 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 10100 mode->vdisplay = (vtot & 0xffff) + 1; 10101 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 10102 mode->vsync_start = (vsync & 0xffff) + 1; 10103 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 10104 10105 drm_mode_set_name(mode); 10106 10107 kfree(pipe_config); 10108 10109 return mode; 10110} 10111 10112static void intel_crtc_destroy(struct drm_crtc *crtc) 10113{ 10114 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10115 struct drm_device *dev = crtc->dev; 10116 struct intel_flip_work *work; 10117 10118 spin_lock_irq(&dev->event_lock); 10119 work = intel_crtc->flip_work; 10120 intel_crtc->flip_work = NULL; 10121 spin_unlock_irq(&dev->event_lock); 10122 10123 if (work) { 10124 cancel_work_sync(&work->mmio_work); 10125 cancel_work_sync(&work->unpin_work); 10126 kfree(work); 10127 } 10128 10129 drm_crtc_cleanup(crtc); 10130 10131 kfree(intel_crtc); 10132} 10133 10134static void intel_unpin_work_fn(struct work_struct *__work) 10135{ 10136 struct intel_flip_work *work = 10137 container_of(__work, struct intel_flip_work, unpin_work); 10138 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 10139 struct drm_device *dev = crtc->base.dev; 10140 struct drm_plane *primary = crtc->base.primary; 10141 10142 if (is_mmio_work(work)) 10143 flush_work(&work->mmio_work); 10144 10145 mutex_lock(&dev->struct_mutex); 10146 intel_unpin_fb_vma(work->old_vma); 10147 i915_gem_object_put(work->pending_flip_obj); 10148 mutex_unlock(&dev->struct_mutex); 10149 10150 i915_gem_request_put(work->flip_queued_req); 10151 10152 intel_frontbuffer_flip_complete(to_i915(dev), 10153 to_intel_plane(primary)->frontbuffer_bit); 10154 intel_fbc_post_update(crtc); 10155 drm_framebuffer_unreference(work->old_fb); 10156 10157 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0); 10158 atomic_dec(&crtc->unpin_work_count); 10159 10160 kfree(work); 10161} 10162 10163/* Is 'a' after or equal to 'b'? */ 10164static bool g4x_flip_count_after_eq(u32 a, u32 b) 10165{ 10166 return !((a - b) & 0x80000000); 10167} 10168 10169static bool __pageflip_finished_cs(struct intel_crtc *crtc, 10170 struct intel_flip_work *work) 10171{ 10172 struct drm_device *dev = crtc->base.dev; 10173 struct drm_i915_private *dev_priv = to_i915(dev); 10174 10175 if (abort_flip_on_reset(crtc)) 10176 return true; 10177 10178 /* 10179 * The relevant registers doen't exist on pre-ctg. 10180 * As the flip done interrupt doesn't trigger for mmio 10181 * flips on gmch platforms, a flip count check isn't 10182 * really needed there. But since ctg has the registers, 10183 * include it in the check anyway. 10184 */ 10185 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 10186 return true; 10187 10188 /* 10189 * BDW signals flip done immediately if the plane 10190 * is disabled, even if the plane enable is already 10191 * armed to occur at the next vblank :( 10192 */ 10193 10194 /* 10195 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 10196 * used the same base address. In that case the mmio flip might 10197 * have completed, but the CS hasn't even executed the flip yet. 10198 * 10199 * A flip count check isn't enough as the CS might have updated 10200 * the base address just after start of vblank, but before we 10201 * managed to process the interrupt. This means we'd complete the 10202 * CS flip too soon. 10203 * 10204 * Combining both checks should get us a good enough result. It may 10205 * still happen that the CS flip has been executed, but has not 10206 * yet actually completed. But in case the base address is the same 10207 * anyway, we don't really care. 10208 */ 10209 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 10210 crtc->flip_work->gtt_offset && 10211 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), 10212 crtc->flip_work->flip_count); 10213} 10214 10215static bool 10216__pageflip_finished_mmio(struct intel_crtc *crtc, 10217 struct intel_flip_work *work) 10218{ 10219 /* 10220 * MMIO work completes when vblank is different from 10221 * flip_queued_vblank. 10222 * 10223 * Reset counter value doesn't matter, this is handled by 10224 * i915_wait_request finishing early, so no need to handle 10225 * reset here. 10226 */ 10227 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank; 10228} 10229 10230 10231static bool pageflip_finished(struct intel_crtc *crtc, 10232 struct intel_flip_work *work) 10233{ 10234 if (!atomic_read(&work->pending)) 10235 return false; 10236 10237 smp_rmb(); 10238 10239 if (is_mmio_work(work)) 10240 return __pageflip_finished_mmio(crtc, work); 10241 else 10242 return __pageflip_finished_cs(crtc, work); 10243} 10244 10245void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) 10246{ 10247 struct drm_device *dev = &dev_priv->drm; 10248 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 10249 struct intel_flip_work *work; 10250 unsigned long flags; 10251 10252 /* Ignore early vblank irqs */ 10253 if (!crtc) 10254 return; 10255 10256 /* 10257 * This is called both by irq handlers and the reset code (to complete 10258 * lost pageflips) so needs the full irqsave spinlocks. 10259 */ 10260 spin_lock_irqsave(&dev->event_lock, flags); 10261 work = crtc->flip_work; 10262 10263 if (work != NULL && 10264 !is_mmio_work(work) && 10265 pageflip_finished(crtc, work)) 10266 page_flip_completed(crtc); 10267 10268 spin_unlock_irqrestore(&dev->event_lock, flags); 10269} 10270 10271void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) 10272{ 10273 struct drm_device *dev = &dev_priv->drm; 10274 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 10275 struct intel_flip_work *work; 10276 unsigned long flags; 10277 10278 /* Ignore early vblank irqs */ 10279 if (!crtc) 10280 return; 10281 10282 /* 10283 * This is called both by irq handlers and the reset code (to complete 10284 * lost pageflips) so needs the full irqsave spinlocks. 10285 */ 10286 spin_lock_irqsave(&dev->event_lock, flags); 10287 work = crtc->flip_work; 10288 10289 if (work != NULL && 10290 is_mmio_work(work) && 10291 pageflip_finished(crtc, work)) 10292 page_flip_completed(crtc); 10293 10294 spin_unlock_irqrestore(&dev->event_lock, flags); 10295} 10296 10297static inline void intel_mark_page_flip_active(struct intel_crtc *crtc, 10298 struct intel_flip_work *work) 10299{ 10300 work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc); 10301 10302 /* Ensure that the work item is consistent when activating it ... */ 10303 smp_mb__before_atomic(); 10304 atomic_set(&work->pending, 1); 10305} 10306 10307static int intel_gen2_queue_flip(struct drm_device *dev, 10308 struct drm_crtc *crtc, 10309 struct drm_framebuffer *fb, 10310 struct drm_i915_gem_object *obj, 10311 struct drm_i915_gem_request *req, 10312 uint32_t flags) 10313{ 10314 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10315 u32 flip_mask, *cs; 10316 10317 cs = intel_ring_begin(req, 6); 10318 if (IS_ERR(cs)) 10319 return PTR_ERR(cs); 10320 10321 /* Can't queue multiple flips, so wait for the previous 10322 * one to finish before executing the next. 10323 */ 10324 if (intel_crtc->plane) 10325 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 10326 else 10327 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 10328 *cs++ = MI_WAIT_FOR_EVENT | flip_mask; 10329 *cs++ = MI_NOOP; 10330 *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane); 10331 *cs++ = fb->pitches[0]; 10332 *cs++ = intel_crtc->flip_work->gtt_offset; 10333 *cs++ = 0; /* aux display base address, unused */ 10334 10335 return 0; 10336} 10337 10338static int intel_gen3_queue_flip(struct drm_device *dev, 10339 struct drm_crtc *crtc, 10340 struct drm_framebuffer *fb, 10341 struct drm_i915_gem_object *obj, 10342 struct drm_i915_gem_request *req, 10343 uint32_t flags) 10344{ 10345 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10346 u32 flip_mask, *cs; 10347 10348 cs = intel_ring_begin(req, 6); 10349 if (IS_ERR(cs)) 10350 return PTR_ERR(cs); 10351 10352 if (intel_crtc->plane) 10353 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 10354 else 10355 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 10356 *cs++ = MI_WAIT_FOR_EVENT | flip_mask; 10357 *cs++ = MI_NOOP; 10358 *cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane); 10359 *cs++ = fb->pitches[0]; 10360 *cs++ = intel_crtc->flip_work->gtt_offset; 10361 *cs++ = MI_NOOP; 10362 10363 return 0; 10364} 10365 10366static int intel_gen4_queue_flip(struct drm_device *dev, 10367 struct drm_crtc *crtc, 10368 struct drm_framebuffer *fb, 10369 struct drm_i915_gem_object *obj, 10370 struct drm_i915_gem_request *req, 10371 uint32_t flags) 10372{ 10373 struct drm_i915_private *dev_priv = to_i915(dev); 10374 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10375 u32 pf, pipesrc, *cs; 10376 10377 cs = intel_ring_begin(req, 4); 10378 if (IS_ERR(cs)) 10379 return PTR_ERR(cs); 10380 10381 /* i965+ uses the linear or tiled offsets from the 10382 * Display Registers (which do not change across a page-flip) 10383 * so we need only reprogram the base address. 10384 */ 10385 *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane); 10386 *cs++ = fb->pitches[0]; 10387 *cs++ = intel_crtc->flip_work->gtt_offset | 10388 intel_fb_modifier_to_tiling(fb->modifier); 10389 10390 /* XXX Enabling the panel-fitter across page-flip is so far 10391 * untested on non-native modes, so ignore it for now. 10392 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 10393 */ 10394 pf = 0; 10395 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 10396 *cs++ = pf | pipesrc; 10397 10398 return 0; 10399} 10400 10401static int intel_gen6_queue_flip(struct drm_device *dev, 10402 struct drm_crtc *crtc, 10403 struct drm_framebuffer *fb, 10404 struct drm_i915_gem_object *obj, 10405 struct drm_i915_gem_request *req, 10406 uint32_t flags) 10407{ 10408 struct drm_i915_private *dev_priv = to_i915(dev); 10409 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10410 u32 pf, pipesrc, *cs; 10411 10412 cs = intel_ring_begin(req, 4); 10413 if (IS_ERR(cs)) 10414 return PTR_ERR(cs); 10415 10416 *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane); 10417 *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier); 10418 *cs++ = intel_crtc->flip_work->gtt_offset; 10419 10420 /* Contrary to the suggestions in the documentation, 10421 * "Enable Panel Fitter" does not seem to be required when page 10422 * flipping with a non-native mode, and worse causes a normal 10423 * modeset to fail. 10424 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 10425 */ 10426 pf = 0; 10427 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 10428 *cs++ = pf | pipesrc; 10429 10430 return 0; 10431} 10432 10433static int intel_gen7_queue_flip(struct drm_device *dev, 10434 struct drm_crtc *crtc, 10435 struct drm_framebuffer *fb, 10436 struct drm_i915_gem_object *obj, 10437 struct drm_i915_gem_request *req, 10438 uint32_t flags) 10439{ 10440 struct drm_i915_private *dev_priv = to_i915(dev); 10441 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10442 u32 *cs, plane_bit = 0; 10443 int len, ret; 10444 10445 switch (intel_crtc->plane) { 10446 case PLANE_A: 10447 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 10448 break; 10449 case PLANE_B: 10450 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 10451 break; 10452 case PLANE_C: 10453 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 10454 break; 10455 default: 10456 WARN_ONCE(1, "unknown plane in flip command\n"); 10457 return -ENODEV; 10458 } 10459 10460 len = 4; 10461 if (req->engine->id == RCS) { 10462 len += 6; 10463 /* 10464 * On Gen 8, SRM is now taking an extra dword to accommodate 10465 * 48bits addresses, and we need a NOOP for the batch size to 10466 * stay even. 10467 */ 10468 if (IS_GEN8(dev_priv)) 10469 len += 2; 10470 } 10471 10472 /* 10473 * BSpec MI_DISPLAY_FLIP for IVB: 10474 * "The full packet must be contained within the same cache line." 10475 * 10476 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 10477 * cacheline, if we ever start emitting more commands before 10478 * the MI_DISPLAY_FLIP we may need to first emit everything else, 10479 * then do the cacheline alignment, and finally emit the 10480 * MI_DISPLAY_FLIP. 10481 */ 10482 ret = intel_ring_cacheline_align(req); 10483 if (ret) 10484 return ret; 10485 10486 cs = intel_ring_begin(req, len); 10487 if (IS_ERR(cs)) 10488 return PTR_ERR(cs); 10489 10490 /* Unmask the flip-done completion message. Note that the bspec says that 10491 * we should do this for both the BCS and RCS, and that we must not unmask 10492 * more than one flip event at any time (or ensure that one flip message 10493 * can be sent by waiting for flip-done prior to queueing new flips). 10494 * Experimentation says that BCS works despite DERRMR masking all 10495 * flip-done completion events and that unmasking all planes at once 10496 * for the RCS also doesn't appear to drop events. Setting the DERRMR 10497 * to zero does lead to lockups within MI_DISPLAY_FLIP. 10498 */ 10499 if (req->engine->id == RCS) { 10500 *cs++ = MI_LOAD_REGISTER_IMM(1); 10501 *cs++ = i915_mmio_reg_offset(DERRMR); 10502 *cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE | 10503 DERRMR_PIPEB_PRI_FLIP_DONE | 10504 DERRMR_PIPEC_PRI_FLIP_DONE); 10505 if (IS_GEN8(dev_priv)) 10506 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | 10507 MI_SRM_LRM_GLOBAL_GTT; 10508 else 10509 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 10510 *cs++ = i915_mmio_reg_offset(DERRMR); 10511 *cs++ = i915_ggtt_offset(req->engine->scratch) + 256; 10512 if (IS_GEN8(dev_priv)) { 10513 *cs++ = 0; 10514 *cs++ = MI_NOOP; 10515 } 10516 } 10517 10518 *cs++ = MI_DISPLAY_FLIP_I915 | plane_bit; 10519 *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier); 10520 *cs++ = intel_crtc->flip_work->gtt_offset; 10521 *cs++ = MI_NOOP; 10522 10523 return 0; 10524} 10525 10526static bool use_mmio_flip(struct intel_engine_cs *engine, 10527 struct drm_i915_gem_object *obj) 10528{ 10529 /* 10530 * This is not being used for older platforms, because 10531 * non-availability of flip done interrupt forces us to use 10532 * CS flips. Older platforms derive flip done using some clever 10533 * tricks involving the flip_pending status bits and vblank irqs. 10534 * So using MMIO flips there would disrupt this mechanism. 10535 */ 10536 10537 if (engine == NULL) 10538 return true; 10539 10540 if (INTEL_GEN(engine->i915) < 5) 10541 return false; 10542 10543 if (i915.use_mmio_flip < 0) 10544 return false; 10545 else if (i915.use_mmio_flip > 0) 10546 return true; 10547 else if (i915.enable_execlists) 10548 return true; 10549 10550 return engine != i915_gem_object_last_write_engine(obj); 10551} 10552 10553static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 10554 unsigned int rotation, 10555 struct intel_flip_work *work) 10556{ 10557 struct drm_device *dev = intel_crtc->base.dev; 10558 struct drm_i915_private *dev_priv = to_i915(dev); 10559 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 10560 const enum pipe pipe = intel_crtc->pipe; 10561 u32 ctl, stride = skl_plane_stride(fb, 0, rotation); 10562 10563 ctl = I915_READ(PLANE_CTL(pipe, 0)); 10564 ctl &= ~PLANE_CTL_TILED_MASK; 10565 switch (fb->modifier) { 10566 case DRM_FORMAT_MOD_LINEAR: 10567 break; 10568 case I915_FORMAT_MOD_X_TILED: 10569 ctl |= PLANE_CTL_TILED_X; 10570 break; 10571 case I915_FORMAT_MOD_Y_TILED: 10572 ctl |= PLANE_CTL_TILED_Y; 10573 break; 10574 case I915_FORMAT_MOD_Yf_TILED: 10575 ctl |= PLANE_CTL_TILED_YF; 10576 break; 10577 default: 10578 MISSING_CASE(fb->modifier); 10579 } 10580 10581 /* 10582 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on 10583 * PLANE_SURF updates, the update is then guaranteed to be atomic. 10584 */ 10585 I915_WRITE(PLANE_CTL(pipe, 0), ctl); 10586 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 10587 10588 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset); 10589 POSTING_READ(PLANE_SURF(pipe, 0)); 10590} 10591 10592static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, 10593 struct intel_flip_work *work) 10594{ 10595 struct drm_device *dev = intel_crtc->base.dev; 10596 struct drm_i915_private *dev_priv = to_i915(dev); 10597 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 10598 i915_reg_t reg = DSPCNTR(intel_crtc->plane); 10599 u32 dspcntr; 10600 10601 dspcntr = I915_READ(reg); 10602 10603 if (fb->modifier == I915_FORMAT_MOD_X_TILED) 10604 dspcntr |= DISPPLANE_TILED; 10605 else 10606 dspcntr &= ~DISPPLANE_TILED; 10607 10608 I915_WRITE(reg, dspcntr); 10609 10610 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset); 10611 POSTING_READ(DSPSURF(intel_crtc->plane)); 10612} 10613 10614static void intel_mmio_flip_work_func(struct work_struct *w) 10615{ 10616 struct intel_flip_work *work = 10617 container_of(w, struct intel_flip_work, mmio_work); 10618 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 10619 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10620 struct intel_framebuffer *intel_fb = 10621 to_intel_framebuffer(crtc->base.primary->fb); 10622 struct drm_i915_gem_object *obj = intel_fb->obj; 10623 10624 WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0); 10625 10626 intel_pipe_update_start(crtc); 10627 10628 if (INTEL_GEN(dev_priv) >= 9) 10629 skl_do_mmio_flip(crtc, work->rotation, work); 10630 else 10631 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 10632 ilk_do_mmio_flip(crtc, work); 10633 10634 intel_pipe_update_end(crtc, work); 10635} 10636 10637static int intel_default_queue_flip(struct drm_device *dev, 10638 struct drm_crtc *crtc, 10639 struct drm_framebuffer *fb, 10640 struct drm_i915_gem_object *obj, 10641 struct drm_i915_gem_request *req, 10642 uint32_t flags) 10643{ 10644 return -ENODEV; 10645} 10646 10647static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, 10648 struct intel_crtc *intel_crtc, 10649 struct intel_flip_work *work) 10650{ 10651 u32 addr, vblank; 10652 10653 if (!atomic_read(&work->pending)) 10654 return false; 10655 10656 smp_rmb(); 10657 10658 vblank = intel_crtc_get_vblank_counter(intel_crtc); 10659 if (work->flip_ready_vblank == 0) { 10660 if (work->flip_queued_req && 10661 !i915_gem_request_completed(work->flip_queued_req)) 10662 return false; 10663 10664 work->flip_ready_vblank = vblank; 10665 } 10666 10667 if (vblank - work->flip_ready_vblank < 3) 10668 return false; 10669 10670 /* Potential stall - if we see that the flip has happened, 10671 * assume a missed interrupt. */ 10672 if (INTEL_GEN(dev_priv) >= 4) 10673 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 10674 else 10675 addr = I915_READ(DSPADDR(intel_crtc->plane)); 10676 10677 /* There is a potential issue here with a false positive after a flip 10678 * to the same address. We could address this by checking for a 10679 * non-incrementing frame counter. 10680 */ 10681 return addr == work->gtt_offset; 10682} 10683 10684void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) 10685{ 10686 struct drm_device *dev = &dev_priv->drm; 10687 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 10688 struct intel_flip_work *work; 10689 10690 WARN_ON(!in_interrupt()); 10691 10692 if (crtc == NULL) 10693 return; 10694 10695 spin_lock(&dev->event_lock); 10696 work = crtc->flip_work; 10697 10698 if (work != NULL && !is_mmio_work(work) && 10699 __pageflip_stall_check_cs(dev_priv, crtc, work)) { 10700 WARN_ONCE(1, 10701 "Kicking stuck page flip: queued at %d, now %d\n", 10702 work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc)); 10703 page_flip_completed(crtc); 10704 work = NULL; 10705 } 10706 10707 if (work != NULL && !is_mmio_work(work) && 10708 intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1) 10709 intel_queue_rps_boost_for_request(work->flip_queued_req); 10710 spin_unlock(&dev->event_lock); 10711} 10712 10713__maybe_unused 10714static int intel_crtc_page_flip(struct drm_crtc *crtc, 10715 struct drm_framebuffer *fb, 10716 struct drm_pending_vblank_event *event, 10717 uint32_t page_flip_flags) 10718{ 10719 struct drm_device *dev = crtc->dev; 10720 struct drm_i915_private *dev_priv = to_i915(dev); 10721 struct drm_framebuffer *old_fb = crtc->primary->fb; 10722 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 10723 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10724 struct drm_plane *primary = crtc->primary; 10725 enum pipe pipe = intel_crtc->pipe; 10726 struct intel_flip_work *work; 10727 struct intel_engine_cs *engine; 10728 bool mmio_flip; 10729 struct drm_i915_gem_request *request; 10730 struct i915_vma *vma; 10731 int ret; 10732 10733 /* 10734 * drm_mode_page_flip_ioctl() should already catch this, but double 10735 * check to be safe. In the future we may enable pageflipping from 10736 * a disabled primary plane. 10737 */ 10738 if (WARN_ON(intel_fb_obj(old_fb) == NULL)) 10739 return -EBUSY; 10740 10741 /* Can't change pixel format via MI display flips. */ 10742 if (fb->format != crtc->primary->fb->format) 10743 return -EINVAL; 10744 10745 /* 10746 * TILEOFF/LINOFF registers can't be changed via MI display flips. 10747 * Note that pitch changes could also affect these register. 10748 */ 10749 if (INTEL_GEN(dev_priv) > 3 && 10750 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 10751 fb->pitches[0] != crtc->primary->fb->pitches[0])) 10752 return -EINVAL; 10753 10754 if (i915_terminally_wedged(&dev_priv->gpu_error)) 10755 goto out_hang; 10756 10757 work = kzalloc(sizeof(*work), GFP_KERNEL); 10758 if (work == NULL) 10759 return -ENOMEM; 10760 10761 work->event = event; 10762 work->crtc = crtc; 10763 work->old_fb = old_fb; 10764 INIT_WORK(&work->unpin_work, intel_unpin_work_fn); 10765 10766 ret = drm_crtc_vblank_get(crtc); 10767 if (ret) 10768 goto free_work; 10769 10770 /* We borrow the event spin lock for protecting flip_work */ 10771 spin_lock_irq(&dev->event_lock); 10772 if (intel_crtc->flip_work) { 10773 /* Before declaring the flip queue wedged, check if 10774 * the hardware completed the operation behind our backs. 10775 */ 10776 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) { 10777 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 10778 page_flip_completed(intel_crtc); 10779 } else { 10780 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 10781 spin_unlock_irq(&dev->event_lock); 10782 10783 drm_crtc_vblank_put(crtc); 10784 kfree(work); 10785 return -EBUSY; 10786 } 10787 } 10788 intel_crtc->flip_work = work; 10789 spin_unlock_irq(&dev->event_lock); 10790 10791 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 10792 flush_workqueue(dev_priv->wq); 10793 10794 /* Reference the objects for the scheduled work. */ 10795 drm_framebuffer_reference(work->old_fb); 10796 10797 crtc->primary->fb = fb; 10798 update_state_fb(crtc->primary); 10799 10800 work->pending_flip_obj = i915_gem_object_get(obj); 10801 10802 ret = i915_mutex_lock_interruptible(dev); 10803 if (ret) 10804 goto cleanup; 10805 10806 intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error); 10807 if (i915_reset_backoff_or_wedged(&dev_priv->gpu_error)) { 10808 ret = -EIO; 10809 goto unlock; 10810 } 10811 10812 atomic_inc(&intel_crtc->unpin_work_count); 10813 10814 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 10815 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; 10816 10817 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 10818 engine = dev_priv->engine[BCS]; 10819 if (fb->modifier != old_fb->modifier) 10820 /* vlv: DISPLAY_FLIP fails to change tiling */ 10821 engine = NULL; 10822 } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { 10823 engine = dev_priv->engine[BCS]; 10824 } else if (INTEL_GEN(dev_priv) >= 7) { 10825 engine = i915_gem_object_last_write_engine(obj); 10826 if (engine == NULL || engine->id != RCS) 10827 engine = dev_priv->engine[BCS]; 10828 } else { 10829 engine = dev_priv->engine[RCS]; 10830 } 10831 10832 mmio_flip = use_mmio_flip(engine, obj); 10833 10834 vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation); 10835 if (IS_ERR(vma)) { 10836 ret = PTR_ERR(vma); 10837 goto cleanup_pending; 10838 } 10839 10840 work->old_vma = to_intel_plane_state(primary->state)->vma; 10841 to_intel_plane_state(primary->state)->vma = vma; 10842 10843 work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset; 10844 work->rotation = crtc->primary->state->rotation; 10845 10846 /* 10847 * There's the potential that the next frame will not be compatible with 10848 * FBC, so we want to call pre_update() before the actual page flip. 10849 * The problem is that pre_update() caches some information about the fb 10850 * object, so we want to do this only after the object is pinned. Let's 10851 * be on the safe side and do this immediately before scheduling the 10852 * flip. 10853 */ 10854 intel_fbc_pre_update(intel_crtc, intel_crtc->config, 10855 to_intel_plane_state(primary->state)); 10856 10857 if (mmio_flip) { 10858 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); 10859 queue_work(system_unbound_wq, &work->mmio_work); 10860 } else { 10861 request = i915_gem_request_alloc(engine, 10862 dev_priv->kernel_context); 10863 if (IS_ERR(request)) { 10864 ret = PTR_ERR(request); 10865 goto cleanup_unpin; 10866 } 10867 10868 ret = i915_gem_request_await_object(request, obj, false); 10869 if (ret) 10870 goto cleanup_request; 10871 10872 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 10873 page_flip_flags); 10874 if (ret) 10875 goto cleanup_request; 10876 10877 intel_mark_page_flip_active(intel_crtc, work); 10878 10879 work->flip_queued_req = i915_gem_request_get(request); 10880 i915_add_request(request); 10881 } 10882 10883 i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); 10884 i915_gem_track_fb(intel_fb_obj(old_fb), obj, 10885 to_intel_plane(primary)->frontbuffer_bit); 10886 mutex_unlock(&dev->struct_mutex); 10887 10888 intel_frontbuffer_flip_prepare(to_i915(dev), 10889 to_intel_plane(primary)->frontbuffer_bit); 10890 10891 trace_i915_flip_request(intel_crtc->plane, obj); 10892 10893 return 0; 10894 10895cleanup_request: 10896 i915_add_request(request); 10897cleanup_unpin: 10898 to_intel_plane_state(primary->state)->vma = work->old_vma; 10899 intel_unpin_fb_vma(vma); 10900cleanup_pending: 10901 atomic_dec(&intel_crtc->unpin_work_count); 10902unlock: 10903 mutex_unlock(&dev->struct_mutex); 10904cleanup: 10905 crtc->primary->fb = old_fb; 10906 update_state_fb(crtc->primary); 10907 10908 i915_gem_object_put(obj); 10909 drm_framebuffer_unreference(work->old_fb); 10910 10911 spin_lock_irq(&dev->event_lock); 10912 intel_crtc->flip_work = NULL; 10913 spin_unlock_irq(&dev->event_lock); 10914 10915 drm_crtc_vblank_put(crtc); 10916free_work: 10917 kfree(work); 10918 10919 if (ret == -EIO) { 10920 struct drm_atomic_state *state; 10921 struct drm_plane_state *plane_state; 10922 10923out_hang: 10924 state = drm_atomic_state_alloc(dev); 10925 if (!state) 10926 return -ENOMEM; 10927 state->acquire_ctx = dev->mode_config.acquire_ctx; 10928 10929retry: 10930 plane_state = drm_atomic_get_plane_state(state, primary); 10931 ret = PTR_ERR_OR_ZERO(plane_state); 10932 if (!ret) { 10933 drm_atomic_set_fb_for_plane(plane_state, fb); 10934 10935 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 10936 if (!ret) 10937 ret = drm_atomic_commit(state); 10938 } 10939 10940 if (ret == -EDEADLK) { 10941 drm_modeset_backoff(state->acquire_ctx); 10942 drm_atomic_state_clear(state); 10943 goto retry; 10944 } 10945 10946 drm_atomic_state_put(state); 10947 10948 if (ret == 0 && event) { 10949 spin_lock_irq(&dev->event_lock); 10950 drm_crtc_send_vblank_event(crtc, event); 10951 spin_unlock_irq(&dev->event_lock); 10952 } 10953 } 10954 return ret; 10955} 10956 10957 10958/** 10959 * intel_wm_need_update - Check whether watermarks need updating 10960 * @plane: drm plane 10961 * @state: new plane state 10962 * 10963 * Check current plane state versus the new one to determine whether 10964 * watermarks need to be recalculated. 10965 * 10966 * Returns true or false. 10967 */ 10968static bool intel_wm_need_update(struct drm_plane *plane, 10969 struct drm_plane_state *state) 10970{ 10971 struct intel_plane_state *new = to_intel_plane_state(state); 10972 struct intel_plane_state *cur = to_intel_plane_state(plane->state); 10973 10974 /* Update watermarks on tiling or size changes. */ 10975 if (new->base.visible != cur->base.visible) 10976 return true; 10977 10978 if (!cur->base.fb || !new->base.fb) 10979 return false; 10980 10981 if (cur->base.fb->modifier != new->base.fb->modifier || 10982 cur->base.rotation != new->base.rotation || 10983 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) || 10984 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) || 10985 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) || 10986 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst)) 10987 return true; 10988 10989 return false; 10990} 10991 10992static bool needs_scaling(struct intel_plane_state *state) 10993{ 10994 int src_w = drm_rect_width(&state->base.src) >> 16; 10995 int src_h = drm_rect_height(&state->base.src) >> 16; 10996 int dst_w = drm_rect_width(&state->base.dst); 10997 int dst_h = drm_rect_height(&state->base.dst); 10998 10999 return (src_w != dst_w || src_h != dst_h); 11000} 11001 11002int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, 11003 struct drm_plane_state *plane_state) 11004{ 11005 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); 11006 struct drm_crtc *crtc = crtc_state->crtc; 11007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11008 struct intel_plane *plane = to_intel_plane(plane_state->plane); 11009 struct drm_device *dev = crtc->dev; 11010 struct drm_i915_private *dev_priv = to_i915(dev); 11011 struct intel_plane_state *old_plane_state = 11012 to_intel_plane_state(plane->base.state); 11013 bool mode_changed = needs_modeset(crtc_state); 11014 bool was_crtc_enabled = crtc->state->active; 11015 bool is_crtc_enabled = crtc_state->active; 11016 bool turn_off, turn_on, visible, was_visible; 11017 struct drm_framebuffer *fb = plane_state->fb; 11018 int ret; 11019 11020 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 11021 ret = skl_update_scaler_plane( 11022 to_intel_crtc_state(crtc_state), 11023 to_intel_plane_state(plane_state)); 11024 if (ret) 11025 return ret; 11026 } 11027 11028 was_visible = old_plane_state->base.visible; 11029 visible = plane_state->visible; 11030 11031 if (!was_crtc_enabled && WARN_ON(was_visible)) 11032 was_visible = false; 11033 11034 /* 11035 * Visibility is calculated as if the crtc was on, but 11036 * after scaler setup everything depends on it being off 11037 * when the crtc isn't active. 11038 * 11039 * FIXME this is wrong for watermarks. Watermarks should also 11040 * be computed as if the pipe would be active. Perhaps move 11041 * per-plane wm computation to the .check_plane() hook, and 11042 * only combine the results from all planes in the current place? 11043 */ 11044 if (!is_crtc_enabled) { 11045 plane_state->visible = visible = false; 11046 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id); 11047 } 11048 11049 if (!was_visible && !visible) 11050 return 0; 11051 11052 if (fb != old_plane_state->base.fb) 11053 pipe_config->fb_changed = true; 11054 11055 turn_off = was_visible && (!visible || mode_changed); 11056 turn_on = visible && (!was_visible || mode_changed); 11057 11058 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", 11059 intel_crtc->base.base.id, intel_crtc->base.name, 11060 plane->base.base.id, plane->base.name, 11061 fb ? fb->base.id : -1); 11062 11063 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 11064 plane->base.base.id, plane->base.name, 11065 was_visible, visible, 11066 turn_off, turn_on, mode_changed); 11067 11068 if (turn_on) { 11069 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11070 pipe_config->update_wm_pre = true; 11071 11072 /* must disable cxsr around plane enable/disable */ 11073 if (plane->id != PLANE_CURSOR) 11074 pipe_config->disable_cxsr = true; 11075 } else if (turn_off) { 11076 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11077 pipe_config->update_wm_post = true; 11078 11079 /* must disable cxsr around plane enable/disable */ 11080 if (plane->id != PLANE_CURSOR) 11081 pipe_config->disable_cxsr = true; 11082 } else if (intel_wm_need_update(&plane->base, plane_state)) { 11083 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 11084 /* FIXME bollocks */ 11085 pipe_config->update_wm_pre = true; 11086 pipe_config->update_wm_post = true; 11087 } 11088 } 11089 11090 if (visible || was_visible) 11091 pipe_config->fb_bits |= plane->frontbuffer_bit; 11092 11093 /* 11094 * WaCxSRDisabledForSpriteScaling:ivb 11095 * 11096 * cstate->update_wm was already set above, so this flag will 11097 * take effect when we commit and program watermarks. 11098 */ 11099 if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) && 11100 needs_scaling(to_intel_plane_state(plane_state)) && 11101 !needs_scaling(old_plane_state)) 11102 pipe_config->disable_lp_wm = true; 11103 11104 return 0; 11105} 11106 11107static bool encoders_cloneable(const struct intel_encoder *a, 11108 const struct intel_encoder *b) 11109{ 11110 /* masks could be asymmetric, so check both ways */ 11111 return a == b || (a->cloneable & (1 << b->type) && 11112 b->cloneable & (1 << a->type)); 11113} 11114 11115static bool check_single_encoder_cloning(struct drm_atomic_state *state, 11116 struct intel_crtc *crtc, 11117 struct intel_encoder *encoder) 11118{ 11119 struct intel_encoder *source_encoder; 11120 struct drm_connector *connector; 11121 struct drm_connector_state *connector_state; 11122 int i; 11123 11124 for_each_new_connector_in_state(state, connector, connector_state, i) { 11125 if (connector_state->crtc != &crtc->base) 11126 continue; 11127 11128 source_encoder = 11129 to_intel_encoder(connector_state->best_encoder); 11130 if (!encoders_cloneable(encoder, source_encoder)) 11131 return false; 11132 } 11133 11134 return true; 11135} 11136 11137static int intel_crtc_atomic_check(struct drm_crtc *crtc, 11138 struct drm_crtc_state *crtc_state) 11139{ 11140 struct drm_device *dev = crtc->dev; 11141 struct drm_i915_private *dev_priv = to_i915(dev); 11142 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11143 struct intel_crtc_state *pipe_config = 11144 to_intel_crtc_state(crtc_state); 11145 struct drm_atomic_state *state = crtc_state->state; 11146 int ret; 11147 bool mode_changed = needs_modeset(crtc_state); 11148 11149 if (mode_changed && !crtc_state->active) 11150 pipe_config->update_wm_post = true; 11151 11152 if (mode_changed && crtc_state->enable && 11153 dev_priv->display.crtc_compute_clock && 11154 !WARN_ON(pipe_config->shared_dpll)) { 11155 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 11156 pipe_config); 11157 if (ret) 11158 return ret; 11159 } 11160 11161 if (crtc_state->color_mgmt_changed) { 11162 ret = intel_color_check(crtc, crtc_state); 11163 if (ret) 11164 return ret; 11165 11166 /* 11167 * Changing color management on Intel hardware is 11168 * handled as part of planes update. 11169 */ 11170 crtc_state->planes_changed = true; 11171 } 11172 11173 ret = 0; 11174 if (dev_priv->display.compute_pipe_wm) { 11175 ret = dev_priv->display.compute_pipe_wm(pipe_config); 11176 if (ret) { 11177 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); 11178 return ret; 11179 } 11180 } 11181 11182 if (dev_priv->display.compute_intermediate_wm && 11183 !to_intel_atomic_state(state)->skip_intermediate_wm) { 11184 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 11185 return 0; 11186 11187 /* 11188 * Calculate 'intermediate' watermarks that satisfy both the 11189 * old state and the new state. We can program these 11190 * immediately. 11191 */ 11192 ret = dev_priv->display.compute_intermediate_wm(dev, 11193 intel_crtc, 11194 pipe_config); 11195 if (ret) { 11196 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 11197 return ret; 11198 } 11199 } else if (dev_priv->display.compute_intermediate_wm) { 11200 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) 11201 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; 11202 } 11203 11204 if (INTEL_GEN(dev_priv) >= 9) { 11205 if (mode_changed) 11206 ret = skl_update_scaler_crtc(pipe_config); 11207 11208 if (!ret) 11209 ret = skl_check_pipe_max_pixel_rate(intel_crtc, 11210 pipe_config); 11211 if (!ret) 11212 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc, 11213 pipe_config); 11214 } 11215 11216 return ret; 11217} 11218 11219static const struct drm_crtc_helper_funcs intel_helper_funcs = { 11220 .atomic_begin = intel_begin_crtc_commit, 11221 .atomic_flush = intel_finish_crtc_commit, 11222 .atomic_check = intel_crtc_atomic_check, 11223}; 11224 11225static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 11226{ 11227 struct intel_connector *connector; 11228 struct drm_connector_list_iter conn_iter; 11229 11230 drm_connector_list_iter_begin(dev, &conn_iter); 11231 for_each_intel_connector_iter(connector, &conn_iter) { 11232 if (connector->base.state->crtc) 11233 drm_connector_unreference(&connector->base); 11234 11235 if (connector->base.encoder) { 11236 connector->base.state->best_encoder = 11237 connector->base.encoder; 11238 connector->base.state->crtc = 11239 connector->base.encoder->crtc; 11240 11241 drm_connector_reference(&connector->base); 11242 } else { 11243 connector->base.state->best_encoder = NULL; 11244 connector->base.state->crtc = NULL; 11245 } 11246 } 11247 drm_connector_list_iter_end(&conn_iter); 11248} 11249 11250static void 11251connected_sink_compute_bpp(struct intel_connector *connector, 11252 struct intel_crtc_state *pipe_config) 11253{ 11254 const struct drm_display_info *info = &connector->base.display_info; 11255 int bpp = pipe_config->pipe_bpp; 11256 11257 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 11258 connector->base.base.id, 11259 connector->base.name); 11260 11261 /* Don't use an invalid EDID bpc value */ 11262 if (info->bpc != 0 && info->bpc * 3 < bpp) { 11263 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 11264 bpp, info->bpc * 3); 11265 pipe_config->pipe_bpp = info->bpc * 3; 11266 } 11267 11268 /* Clamp bpp to 8 on screens without EDID 1.4 */ 11269 if (info->bpc == 0 && bpp > 24) { 11270 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 11271 bpp); 11272 pipe_config->pipe_bpp = 24; 11273 } 11274} 11275 11276static int 11277compute_baseline_pipe_bpp(struct intel_crtc *crtc, 11278 struct intel_crtc_state *pipe_config) 11279{ 11280 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11281 struct drm_atomic_state *state; 11282 struct drm_connector *connector; 11283 struct drm_connector_state *connector_state; 11284 int bpp, i; 11285 11286 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 11287 IS_CHERRYVIEW(dev_priv))) 11288 bpp = 10*3; 11289 else if (INTEL_GEN(dev_priv) >= 5) 11290 bpp = 12*3; 11291 else 11292 bpp = 8*3; 11293 11294 11295 pipe_config->pipe_bpp = bpp; 11296 11297 state = pipe_config->base.state; 11298 11299 /* Clamp display bpp to EDID value */ 11300 for_each_new_connector_in_state(state, connector, connector_state, i) { 11301 if (connector_state->crtc != &crtc->base) 11302 continue; 11303 11304 connected_sink_compute_bpp(to_intel_connector(connector), 11305 pipe_config); 11306 } 11307 11308 return bpp; 11309} 11310 11311static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 11312{ 11313 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 11314 "type: 0x%x flags: 0x%x\n", 11315 mode->crtc_clock, 11316 mode->crtc_hdisplay, mode->crtc_hsync_start, 11317 mode->crtc_hsync_end, mode->crtc_htotal, 11318 mode->crtc_vdisplay, mode->crtc_vsync_start, 11319 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 11320} 11321 11322static inline void 11323intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id, 11324 unsigned int lane_count, struct intel_link_m_n *m_n) 11325{ 11326 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 11327 id, lane_count, 11328 m_n->gmch_m, m_n->gmch_n, 11329 m_n->link_m, m_n->link_n, m_n->tu); 11330} 11331 11332static void intel_dump_pipe_config(struct intel_crtc *crtc, 11333 struct intel_crtc_state *pipe_config, 11334 const char *context) 11335{ 11336 struct drm_device *dev = crtc->base.dev; 11337 struct drm_i915_private *dev_priv = to_i915(dev); 11338 struct drm_plane *plane; 11339 struct intel_plane *intel_plane; 11340 struct intel_plane_state *state; 11341 struct drm_framebuffer *fb; 11342 11343 DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n", 11344 crtc->base.base.id, crtc->base.name, context); 11345 11346 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 11347 transcoder_name(pipe_config->cpu_transcoder), 11348 pipe_config->pipe_bpp, pipe_config->dither); 11349 11350 if (pipe_config->has_pch_encoder) 11351 intel_dump_m_n_config(pipe_config, "fdi", 11352 pipe_config->fdi_lanes, 11353 &pipe_config->fdi_m_n); 11354 11355 if (intel_crtc_has_dp_encoder(pipe_config)) { 11356 intel_dump_m_n_config(pipe_config, "dp m_n", 11357 pipe_config->lane_count, &pipe_config->dp_m_n); 11358 if (pipe_config->has_drrs) 11359 intel_dump_m_n_config(pipe_config, "dp m2_n2", 11360 pipe_config->lane_count, 11361 &pipe_config->dp_m2_n2); 11362 } 11363 11364 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", 11365 pipe_config->has_audio, pipe_config->has_infoframe); 11366 11367 DRM_DEBUG_KMS("requested mode:\n"); 11368 drm_mode_debug_printmodeline(&pipe_config->base.mode); 11369 DRM_DEBUG_KMS("adjusted mode:\n"); 11370 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 11371 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 11372 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 11373 pipe_config->port_clock, 11374 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 11375 pipe_config->pixel_rate); 11376 11377 if (INTEL_GEN(dev_priv) >= 9) 11378 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 11379 crtc->num_scalers, 11380 pipe_config->scaler_state.scaler_users, 11381 pipe_config->scaler_state.scaler_id); 11382 11383 if (HAS_GMCH_DISPLAY(dev_priv)) 11384 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 11385 pipe_config->gmch_pfit.control, 11386 pipe_config->gmch_pfit.pgm_ratios, 11387 pipe_config->gmch_pfit.lvds_border_bits); 11388 else 11389 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 11390 pipe_config->pch_pfit.pos, 11391 pipe_config->pch_pfit.size, 11392 enableddisabled(pipe_config->pch_pfit.enabled)); 11393 11394 DRM_DEBUG_KMS("ips: %i, double wide: %i\n", 11395 pipe_config->ips_enabled, pipe_config->double_wide); 11396 11397 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 11398 11399 DRM_DEBUG_KMS("planes on this crtc\n"); 11400 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 11401 struct drm_format_name_buf format_name; 11402 intel_plane = to_intel_plane(plane); 11403 if (intel_plane->pipe != crtc->pipe) 11404 continue; 11405 11406 state = to_intel_plane_state(plane->state); 11407 fb = state->base.fb; 11408 if (!fb) { 11409 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n", 11410 plane->base.id, plane->name, state->scaler_id); 11411 continue; 11412 } 11413 11414 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n", 11415 plane->base.id, plane->name, 11416 fb->base.id, fb->width, fb->height, 11417 drm_get_format_name(fb->format->format, &format_name)); 11418 if (INTEL_GEN(dev_priv) >= 9) 11419 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", 11420 state->scaler_id, 11421 state->base.src.x1 >> 16, 11422 state->base.src.y1 >> 16, 11423 drm_rect_width(&state->base.src) >> 16, 11424 drm_rect_height(&state->base.src) >> 16, 11425 state->base.dst.x1, state->base.dst.y1, 11426 drm_rect_width(&state->base.dst), 11427 drm_rect_height(&state->base.dst)); 11428 } 11429} 11430 11431static bool check_digital_port_conflicts(struct drm_atomic_state *state) 11432{ 11433 struct drm_device *dev = state->dev; 11434 struct drm_connector *connector; 11435 struct drm_connector_list_iter conn_iter; 11436 unsigned int used_ports = 0; 11437 unsigned int used_mst_ports = 0; 11438 11439 /* 11440 * Walk the connector list instead of the encoder 11441 * list to detect the problem on ddi platforms 11442 * where there's just one encoder per digital port. 11443 */ 11444 drm_connector_list_iter_begin(dev, &conn_iter); 11445 drm_for_each_connector_iter(connector, &conn_iter) { 11446 struct drm_connector_state *connector_state; 11447 struct intel_encoder *encoder; 11448 11449 connector_state = drm_atomic_get_existing_connector_state(state, connector); 11450 if (!connector_state) 11451 connector_state = connector->state; 11452 11453 if (!connector_state->best_encoder) 11454 continue; 11455 11456 encoder = to_intel_encoder(connector_state->best_encoder); 11457 11458 WARN_ON(!connector_state->crtc); 11459 11460 switch (encoder->type) { 11461 unsigned int port_mask; 11462 case INTEL_OUTPUT_UNKNOWN: 11463 if (WARN_ON(!HAS_DDI(to_i915(dev)))) 11464 break; 11465 case INTEL_OUTPUT_DP: 11466 case INTEL_OUTPUT_HDMI: 11467 case INTEL_OUTPUT_EDP: 11468 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 11469 11470 /* the same port mustn't appear more than once */ 11471 if (used_ports & port_mask) 11472 return false; 11473 11474 used_ports |= port_mask; 11475 break; 11476 case INTEL_OUTPUT_DP_MST: 11477 used_mst_ports |= 11478 1 << enc_to_mst(&encoder->base)->primary->port; 11479 break; 11480 default: 11481 break; 11482 } 11483 } 11484 drm_connector_list_iter_end(&conn_iter); 11485 11486 /* can't mix MST and SST/HDMI on the same port */ 11487 if (used_ports & used_mst_ports) 11488 return false; 11489 11490 return true; 11491} 11492 11493static void 11494clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 11495{ 11496 struct drm_i915_private *dev_priv = 11497 to_i915(crtc_state->base.crtc->dev); 11498 struct intel_crtc_scaler_state scaler_state; 11499 struct intel_dpll_hw_state dpll_hw_state; 11500 struct intel_shared_dpll *shared_dpll; 11501 struct intel_crtc_wm_state wm_state; 11502 bool force_thru; 11503 11504 /* FIXME: before the switch to atomic started, a new pipe_config was 11505 * kzalloc'd. Code that depends on any field being zero should be 11506 * fixed, so that the crtc_state can be safely duplicated. For now, 11507 * only fields that are know to not cause problems are preserved. */ 11508 11509 scaler_state = crtc_state->scaler_state; 11510 shared_dpll = crtc_state->shared_dpll; 11511 dpll_hw_state = crtc_state->dpll_hw_state; 11512 force_thru = crtc_state->pch_pfit.force_thru; 11513 if (IS_G4X(dev_priv) || 11514 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11515 wm_state = crtc_state->wm; 11516 11517 /* Keep base drm_crtc_state intact, only clear our extended struct */ 11518 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base)); 11519 memset(&crtc_state->base + 1, 0, 11520 sizeof(*crtc_state) - sizeof(crtc_state->base)); 11521 11522 crtc_state->scaler_state = scaler_state; 11523 crtc_state->shared_dpll = shared_dpll; 11524 crtc_state->dpll_hw_state = dpll_hw_state; 11525 crtc_state->pch_pfit.force_thru = force_thru; 11526 if (IS_G4X(dev_priv) || 11527 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11528 crtc_state->wm = wm_state; 11529} 11530 11531static int 11532intel_modeset_pipe_config(struct drm_crtc *crtc, 11533 struct intel_crtc_state *pipe_config) 11534{ 11535 struct drm_atomic_state *state = pipe_config->base.state; 11536 struct intel_encoder *encoder; 11537 struct drm_connector *connector; 11538 struct drm_connector_state *connector_state; 11539 int base_bpp, ret = -EINVAL; 11540 int i; 11541 bool retry = true; 11542 11543 clear_intel_crtc_state(pipe_config); 11544 11545 pipe_config->cpu_transcoder = 11546 (enum transcoder) to_intel_crtc(crtc)->pipe; 11547 11548 /* 11549 * Sanitize sync polarity flags based on requested ones. If neither 11550 * positive or negative polarity is requested, treat this as meaning 11551 * negative polarity. 11552 */ 11553 if (!(pipe_config->base.adjusted_mode.flags & 11554 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 11555 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 11556 11557 if (!(pipe_config->base.adjusted_mode.flags & 11558 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 11559 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 11560 11561 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 11562 pipe_config); 11563 if (base_bpp < 0) 11564 goto fail; 11565 11566 /* 11567 * Determine the real pipe dimensions. Note that stereo modes can 11568 * increase the actual pipe size due to the frame doubling and 11569 * insertion of additional space for blanks between the frame. This 11570 * is stored in the crtc timings. We use the requested mode to do this 11571 * computation to clearly distinguish it from the adjusted mode, which 11572 * can be changed by the connectors in the below retry loop. 11573 */ 11574 drm_mode_get_hv_timing(&pipe_config->base.mode, 11575 &pipe_config->pipe_src_w, 11576 &pipe_config->pipe_src_h); 11577 11578 for_each_new_connector_in_state(state, connector, connector_state, i) { 11579 if (connector_state->crtc != crtc) 11580 continue; 11581 11582 encoder = to_intel_encoder(connector_state->best_encoder); 11583 11584 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 11585 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 11586 goto fail; 11587 } 11588 11589 /* 11590 * Determine output_types before calling the .compute_config() 11591 * hooks so that the hooks can use this information safely. 11592 */ 11593 pipe_config->output_types |= 1 << encoder->type; 11594 } 11595 11596encoder_retry: 11597 /* Ensure the port clock defaults are reset when retrying. */ 11598 pipe_config->port_clock = 0; 11599 pipe_config->pixel_multiplier = 1; 11600 11601 /* Fill in default crtc timings, allow encoders to overwrite them. */ 11602 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 11603 CRTC_STEREO_DOUBLE); 11604 11605 /* Pass our mode to the connectors and the CRTC to give them a chance to 11606 * adjust it according to limitations or connector properties, and also 11607 * a chance to reject the mode entirely. 11608 */ 11609 for_each_new_connector_in_state(state, connector, connector_state, i) { 11610 if (connector_state->crtc != crtc) 11611 continue; 11612 11613 encoder = to_intel_encoder(connector_state->best_encoder); 11614 11615 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) { 11616 DRM_DEBUG_KMS("Encoder config failure\n"); 11617 goto fail; 11618 } 11619 } 11620 11621 /* Set default port clock if not overwritten by the encoder. Needs to be 11622 * done afterwards in case the encoder adjusts the mode. */ 11623 if (!pipe_config->port_clock) 11624 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 11625 * pipe_config->pixel_multiplier; 11626 11627 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 11628 if (ret < 0) { 11629 DRM_DEBUG_KMS("CRTC fixup failed\n"); 11630 goto fail; 11631 } 11632 11633 if (ret == RETRY) { 11634 if (WARN(!retry, "loop in pipe configuration computation\n")) { 11635 ret = -EINVAL; 11636 goto fail; 11637 } 11638 11639 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 11640 retry = false; 11641 goto encoder_retry; 11642 } 11643 11644 /* Dithering seems to not pass-through bits correctly when it should, so 11645 * only enable it on 6bpc panels and when its not a compliance 11646 * test requesting 6bpc video pattern. 11647 */ 11648 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 11649 !pipe_config->dither_force_disable; 11650 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 11651 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 11652 11653fail: 11654 return ret; 11655} 11656 11657static void 11658intel_modeset_update_crtc_state(struct drm_atomic_state *state) 11659{ 11660 struct drm_crtc *crtc; 11661 struct drm_crtc_state *new_crtc_state; 11662 int i; 11663 11664 /* Double check state. */ 11665 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11666 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state); 11667 11668 /* 11669 * Update legacy state to satisfy fbc code. This can 11670 * be removed when fbc uses the atomic state. 11671 */ 11672 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { 11673 struct drm_plane_state *plane_state = crtc->primary->state; 11674 11675 crtc->primary->fb = plane_state->fb; 11676 crtc->x = plane_state->src_x >> 16; 11677 crtc->y = plane_state->src_y >> 16; 11678 } 11679 } 11680} 11681 11682static bool intel_fuzzy_clock_check(int clock1, int clock2) 11683{ 11684 int diff; 11685 11686 if (clock1 == clock2) 11687 return true; 11688 11689 if (!clock1 || !clock2) 11690 return false; 11691 11692 diff = abs(clock1 - clock2); 11693 11694 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 11695 return true; 11696 11697 return false; 11698} 11699 11700static bool 11701intel_compare_m_n(unsigned int m, unsigned int n, 11702 unsigned int m2, unsigned int n2, 11703 bool exact) 11704{ 11705 if (m == m2 && n == n2) 11706 return true; 11707 11708 if (exact || !m || !n || !m2 || !n2) 11709 return false; 11710 11711 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 11712 11713 if (n > n2) { 11714 while (n > n2) { 11715 m2 <<= 1; 11716 n2 <<= 1; 11717 } 11718 } else if (n < n2) { 11719 while (n < n2) { 11720 m <<= 1; 11721 n <<= 1; 11722 } 11723 } 11724 11725 if (n != n2) 11726 return false; 11727 11728 return intel_fuzzy_clock_check(m, m2); 11729} 11730 11731static bool 11732intel_compare_link_m_n(const struct intel_link_m_n *m_n, 11733 struct intel_link_m_n *m2_n2, 11734 bool adjust) 11735{ 11736 if (m_n->tu == m2_n2->tu && 11737 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 11738 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) && 11739 intel_compare_m_n(m_n->link_m, m_n->link_n, 11740 m2_n2->link_m, m2_n2->link_n, !adjust)) { 11741 if (adjust) 11742 *m2_n2 = *m_n; 11743 11744 return true; 11745 } 11746 11747 return false; 11748} 11749 11750static void __printf(3, 4) 11751pipe_config_err(bool adjust, const char *name, const char *format, ...) 11752{ 11753 char *level; 11754 unsigned int category; 11755 struct va_format vaf; 11756 va_list args; 11757 11758 if (adjust) { 11759 level = KERN_DEBUG; 11760 category = DRM_UT_KMS; 11761 } else { 11762 level = KERN_ERR; 11763 category = DRM_UT_NONE; 11764 } 11765 11766 va_start(args, format); 11767 vaf.fmt = format; 11768 vaf.va = &args; 11769 11770 drm_printk(level, category, "mismatch in %s %pV", name, &vaf); 11771 11772 va_end(args); 11773} 11774 11775static bool 11776intel_pipe_config_compare(struct drm_i915_private *dev_priv, 11777 struct intel_crtc_state *current_config, 11778 struct intel_crtc_state *pipe_config, 11779 bool adjust) 11780{ 11781 bool ret = true; 11782 11783#define PIPE_CONF_CHECK_X(name) \ 11784 if (current_config->name != pipe_config->name) { \ 11785 pipe_config_err(adjust, __stringify(name), \ 11786 "(expected 0x%08x, found 0x%08x)\n", \ 11787 current_config->name, \ 11788 pipe_config->name); \ 11789 ret = false; \ 11790 } 11791 11792#define PIPE_CONF_CHECK_I(name) \ 11793 if (current_config->name != pipe_config->name) { \ 11794 pipe_config_err(adjust, __stringify(name), \ 11795 "(expected %i, found %i)\n", \ 11796 current_config->name, \ 11797 pipe_config->name); \ 11798 ret = false; \ 11799 } 11800 11801#define PIPE_CONF_CHECK_P(name) \ 11802 if (current_config->name != pipe_config->name) { \ 11803 pipe_config_err(adjust, __stringify(name), \ 11804 "(expected %p, found %p)\n", \ 11805 current_config->name, \ 11806 pipe_config->name); \ 11807 ret = false; \ 11808 } 11809 11810#define PIPE_CONF_CHECK_M_N(name) \ 11811 if (!intel_compare_link_m_n(&current_config->name, \ 11812 &pipe_config->name,\ 11813 adjust)) { \ 11814 pipe_config_err(adjust, __stringify(name), \ 11815 "(expected tu %i gmch %i/%i link %i/%i, " \ 11816 "found tu %i, gmch %i/%i link %i/%i)\n", \ 11817 current_config->name.tu, \ 11818 current_config->name.gmch_m, \ 11819 current_config->name.gmch_n, \ 11820 current_config->name.link_m, \ 11821 current_config->name.link_n, \ 11822 pipe_config->name.tu, \ 11823 pipe_config->name.gmch_m, \ 11824 pipe_config->name.gmch_n, \ 11825 pipe_config->name.link_m, \ 11826 pipe_config->name.link_n); \ 11827 ret = false; \ 11828 } 11829 11830/* This is required for BDW+ where there is only one set of registers for 11831 * switching between high and low RR. 11832 * This macro can be used whenever a comparison has to be made between one 11833 * hw state and multiple sw state variables. 11834 */ 11835#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \ 11836 if (!intel_compare_link_m_n(&current_config->name, \ 11837 &pipe_config->name, adjust) && \ 11838 !intel_compare_link_m_n(&current_config->alt_name, \ 11839 &pipe_config->name, adjust)) { \ 11840 pipe_config_err(adjust, __stringify(name), \ 11841 "(expected tu %i gmch %i/%i link %i/%i, " \ 11842 "or tu %i gmch %i/%i link %i/%i, " \ 11843 "found tu %i, gmch %i/%i link %i/%i)\n", \ 11844 current_config->name.tu, \ 11845 current_config->name.gmch_m, \ 11846 current_config->name.gmch_n, \ 11847 current_config->name.link_m, \ 11848 current_config->name.link_n, \ 11849 current_config->alt_name.tu, \ 11850 current_config->alt_name.gmch_m, \ 11851 current_config->alt_name.gmch_n, \ 11852 current_config->alt_name.link_m, \ 11853 current_config->alt_name.link_n, \ 11854 pipe_config->name.tu, \ 11855 pipe_config->name.gmch_m, \ 11856 pipe_config->name.gmch_n, \ 11857 pipe_config->name.link_m, \ 11858 pipe_config->name.link_n); \ 11859 ret = false; \ 11860 } 11861 11862#define PIPE_CONF_CHECK_FLAGS(name, mask) \ 11863 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 11864 pipe_config_err(adjust, __stringify(name), \ 11865 "(%x) (expected %i, found %i)\n", \ 11866 (mask), \ 11867 current_config->name & (mask), \ 11868 pipe_config->name & (mask)); \ 11869 ret = false; \ 11870 } 11871 11872#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 11873 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 11874 pipe_config_err(adjust, __stringify(name), \ 11875 "(expected %i, found %i)\n", \ 11876 current_config->name, \ 11877 pipe_config->name); \ 11878 ret = false; \ 11879 } 11880 11881#define PIPE_CONF_QUIRK(quirk) \ 11882 ((current_config->quirks | pipe_config->quirks) & (quirk)) 11883 11884 PIPE_CONF_CHECK_I(cpu_transcoder); 11885 11886 PIPE_CONF_CHECK_I(has_pch_encoder); 11887 PIPE_CONF_CHECK_I(fdi_lanes); 11888 PIPE_CONF_CHECK_M_N(fdi_m_n); 11889 11890 PIPE_CONF_CHECK_I(lane_count); 11891 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 11892 11893 if (INTEL_GEN(dev_priv) < 8) { 11894 PIPE_CONF_CHECK_M_N(dp_m_n); 11895 11896 if (current_config->has_drrs) 11897 PIPE_CONF_CHECK_M_N(dp_m2_n2); 11898 } else 11899 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 11900 11901 PIPE_CONF_CHECK_X(output_types); 11902 11903 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 11904 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 11905 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 11906 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 11907 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 11908 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 11909 11910 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 11911 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 11912 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 11913 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 11914 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 11915 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 11916 11917 PIPE_CONF_CHECK_I(pixel_multiplier); 11918 PIPE_CONF_CHECK_I(has_hdmi_sink); 11919 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 11920 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11921 PIPE_CONF_CHECK_I(limited_color_range); 11922 11923 PIPE_CONF_CHECK_I(hdmi_scrambling); 11924 PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio); 11925 PIPE_CONF_CHECK_I(has_infoframe); 11926 11927 PIPE_CONF_CHECK_I(has_audio); 11928 11929 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11930 DRM_MODE_FLAG_INTERLACE); 11931 11932 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 11933 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11934 DRM_MODE_FLAG_PHSYNC); 11935 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11936 DRM_MODE_FLAG_NHSYNC); 11937 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11938 DRM_MODE_FLAG_PVSYNC); 11939 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11940 DRM_MODE_FLAG_NVSYNC); 11941 } 11942 11943 PIPE_CONF_CHECK_X(gmch_pfit.control); 11944 /* pfit ratios are autocomputed by the hw on gen4+ */ 11945 if (INTEL_GEN(dev_priv) < 4) 11946 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 11947 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 11948 11949 if (!adjust) { 11950 PIPE_CONF_CHECK_I(pipe_src_w); 11951 PIPE_CONF_CHECK_I(pipe_src_h); 11952 11953 PIPE_CONF_CHECK_I(pch_pfit.enabled); 11954 if (current_config->pch_pfit.enabled) { 11955 PIPE_CONF_CHECK_X(pch_pfit.pos); 11956 PIPE_CONF_CHECK_X(pch_pfit.size); 11957 } 11958 11959 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 11960 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 11961 } 11962 11963 /* BDW+ don't expose a synchronous way to read the state */ 11964 if (IS_HASWELL(dev_priv)) 11965 PIPE_CONF_CHECK_I(ips_enabled); 11966 11967 PIPE_CONF_CHECK_I(double_wide); 11968 11969 PIPE_CONF_CHECK_P(shared_dpll); 11970 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 11971 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 11972 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 11973 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 11974 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 11975 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 11976 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 11977 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 11978 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 11979 11980 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 11981 PIPE_CONF_CHECK_X(dsi_pll.div); 11982 11983 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 11984 PIPE_CONF_CHECK_I(pipe_bpp); 11985 11986 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 11987 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 11988 11989#undef PIPE_CONF_CHECK_X 11990#undef PIPE_CONF_CHECK_I 11991#undef PIPE_CONF_CHECK_P 11992#undef PIPE_CONF_CHECK_FLAGS 11993#undef PIPE_CONF_CHECK_CLOCK_FUZZY 11994#undef PIPE_CONF_QUIRK 11995 11996 return ret; 11997} 11998 11999static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 12000 const struct intel_crtc_state *pipe_config) 12001{ 12002 if (pipe_config->has_pch_encoder) { 12003 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 12004 &pipe_config->fdi_m_n); 12005 int dotclock = pipe_config->base.adjusted_mode.crtc_clock; 12006 12007 /* 12008 * FDI already provided one idea for the dotclock. 12009 * Yell if the encoder disagrees. 12010 */ 12011 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), 12012 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 12013 fdi_dotclock, dotclock); 12014 } 12015} 12016 12017static void verify_wm_state(struct drm_crtc *crtc, 12018 struct drm_crtc_state *new_state) 12019{ 12020 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 12021 struct skl_ddb_allocation hw_ddb, *sw_ddb; 12022 struct skl_pipe_wm hw_wm, *sw_wm; 12023 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 12024 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 12025 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12026 const enum pipe pipe = intel_crtc->pipe; 12027 int plane, level, max_level = ilk_wm_max_level(dev_priv); 12028 12029 if (INTEL_GEN(dev_priv) < 9 || !new_state->active) 12030 return; 12031 12032 skl_pipe_wm_get_hw_state(crtc, &hw_wm); 12033 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal; 12034 12035 skl_ddb_get_hw_state(dev_priv, &hw_ddb); 12036 sw_ddb = &dev_priv->wm.skl_hw.ddb; 12037 12038 /* planes */ 12039 for_each_universal_plane(dev_priv, pipe, plane) { 12040 hw_plane_wm = &hw_wm.planes[plane]; 12041 sw_plane_wm = &sw_wm->planes[plane]; 12042 12043 /* Watermarks */ 12044 for (level = 0; level <= max_level; level++) { 12045 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 12046 &sw_plane_wm->wm[level])) 12047 continue; 12048 12049 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 12050 pipe_name(pipe), plane + 1, level, 12051 sw_plane_wm->wm[level].plane_en, 12052 sw_plane_wm->wm[level].plane_res_b, 12053 sw_plane_wm->wm[level].plane_res_l, 12054 hw_plane_wm->wm[level].plane_en, 12055 hw_plane_wm->wm[level].plane_res_b, 12056 hw_plane_wm->wm[level].plane_res_l); 12057 } 12058 12059 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 12060 &sw_plane_wm->trans_wm)) { 12061 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 12062 pipe_name(pipe), plane + 1, 12063 sw_plane_wm->trans_wm.plane_en, 12064 sw_plane_wm->trans_wm.plane_res_b, 12065 sw_plane_wm->trans_wm.plane_res_l, 12066 hw_plane_wm->trans_wm.plane_en, 12067 hw_plane_wm->trans_wm.plane_res_b, 12068 hw_plane_wm->trans_wm.plane_res_l); 12069 } 12070 12071 /* DDB */ 12072 hw_ddb_entry = &hw_ddb.plane[pipe][plane]; 12073 sw_ddb_entry = &sw_ddb->plane[pipe][plane]; 12074 12075 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 12076 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 12077 pipe_name(pipe), plane + 1, 12078 sw_ddb_entry->start, sw_ddb_entry->end, 12079 hw_ddb_entry->start, hw_ddb_entry->end); 12080 } 12081 } 12082 12083 /* 12084 * cursor 12085 * If the cursor plane isn't active, we may not have updated it's ddb 12086 * allocation. In that case since the ddb allocation will be updated 12087 * once the plane becomes visible, we can skip this check 12088 */ 12089 if (1) { 12090 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR]; 12091 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 12092 12093 /* Watermarks */ 12094 for (level = 0; level <= max_level; level++) { 12095 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 12096 &sw_plane_wm->wm[level])) 12097 continue; 12098 12099 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 12100 pipe_name(pipe), level, 12101 sw_plane_wm->wm[level].plane_en, 12102 sw_plane_wm->wm[level].plane_res_b, 12103 sw_plane_wm->wm[level].plane_res_l, 12104 hw_plane_wm->wm[level].plane_en, 12105 hw_plane_wm->wm[level].plane_res_b, 12106 hw_plane_wm->wm[level].plane_res_l); 12107 } 12108 12109 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 12110 &sw_plane_wm->trans_wm)) { 12111 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 12112 pipe_name(pipe), 12113 sw_plane_wm->trans_wm.plane_en, 12114 sw_plane_wm->trans_wm.plane_res_b, 12115 sw_plane_wm->trans_wm.plane_res_l, 12116 hw_plane_wm->trans_wm.plane_en, 12117 hw_plane_wm->trans_wm.plane_res_b, 12118 hw_plane_wm->trans_wm.plane_res_l); 12119 } 12120 12121 /* DDB */ 12122 hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; 12123 sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; 12124 12125 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 12126 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 12127 pipe_name(pipe), 12128 sw_ddb_entry->start, sw_ddb_entry->end, 12129 hw_ddb_entry->start, hw_ddb_entry->end); 12130 } 12131 } 12132} 12133 12134static void 12135verify_connector_state(struct drm_device *dev, 12136 struct drm_atomic_state *state, 12137 struct drm_crtc *crtc) 12138{ 12139 struct drm_connector *connector; 12140 struct drm_connector_state *new_conn_state; 12141 int i; 12142 12143 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 12144 struct drm_encoder *encoder = connector->encoder; 12145 struct drm_crtc_state *crtc_state = NULL; 12146 12147 if (new_conn_state->crtc != crtc) 12148 continue; 12149 12150 if (crtc) 12151 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 12152 12153 intel_connector_verify_state(crtc_state, new_conn_state); 12154 12155 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 12156 "connector's atomic encoder doesn't match legacy encoder\n"); 12157 } 12158} 12159 12160static void 12161verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state) 12162{ 12163 struct intel_encoder *encoder; 12164 struct drm_connector *connector; 12165 struct drm_connector_state *old_conn_state, *new_conn_state; 12166 int i; 12167 12168 for_each_intel_encoder(dev, encoder) { 12169 bool enabled = false, found = false; 12170 enum pipe pipe; 12171 12172 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 12173 encoder->base.base.id, 12174 encoder->base.name); 12175 12176 for_each_oldnew_connector_in_state(state, connector, old_conn_state, 12177 new_conn_state, i) { 12178 if (old_conn_state->best_encoder == &encoder->base) 12179 found = true; 12180 12181 if (new_conn_state->best_encoder != &encoder->base) 12182 continue; 12183 found = enabled = true; 12184 12185 I915_STATE_WARN(new_conn_state->crtc != 12186 encoder->base.crtc, 12187 "connector's crtc doesn't match encoder crtc\n"); 12188 } 12189 12190 if (!found) 12191 continue; 12192 12193 I915_STATE_WARN(!!encoder->base.crtc != enabled, 12194 "encoder's enabled state mismatch " 12195 "(expected %i, found %i)\n", 12196 !!encoder->base.crtc, enabled); 12197 12198 if (!encoder->base.crtc) { 12199 bool active; 12200 12201 active = encoder->get_hw_state(encoder, &pipe); 12202 I915_STATE_WARN(active, 12203 "encoder detached but still enabled on pipe %c.\n", 12204 pipe_name(pipe)); 12205 } 12206 } 12207} 12208 12209static void 12210verify_crtc_state(struct drm_crtc *crtc, 12211 struct drm_crtc_state *old_crtc_state, 12212 struct drm_crtc_state *new_crtc_state) 12213{ 12214 struct drm_device *dev = crtc->dev; 12215 struct drm_i915_private *dev_priv = to_i915(dev); 12216 struct intel_encoder *encoder; 12217 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12218 struct intel_crtc_state *pipe_config, *sw_config; 12219 struct drm_atomic_state *old_state; 12220 bool active; 12221 12222 old_state = old_crtc_state->state; 12223 __drm_atomic_helper_crtc_destroy_state(old_crtc_state); 12224 pipe_config = to_intel_crtc_state(old_crtc_state); 12225 memset(pipe_config, 0, sizeof(*pipe_config)); 12226 pipe_config->base.crtc = crtc; 12227 pipe_config->base.state = old_state; 12228 12229 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); 12230 12231 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); 12232 12233 /* we keep both pipes enabled on 830 */ 12234 if (IS_I830(dev_priv)) 12235 active = new_crtc_state->active; 12236 12237 I915_STATE_WARN(new_crtc_state->active != active, 12238 "crtc active state doesn't match with hw state " 12239 "(expected %i, found %i)\n", new_crtc_state->active, active); 12240 12241 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active, 12242 "transitional active state does not match atomic hw state " 12243 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active); 12244 12245 for_each_encoder_on_crtc(dev, crtc, encoder) { 12246 enum pipe pipe; 12247 12248 active = encoder->get_hw_state(encoder, &pipe); 12249 I915_STATE_WARN(active != new_crtc_state->active, 12250 "[ENCODER:%i] active %i with crtc active %i\n", 12251 encoder->base.base.id, active, new_crtc_state->active); 12252 12253 I915_STATE_WARN(active && intel_crtc->pipe != pipe, 12254 "Encoder connected to wrong pipe %c\n", 12255 pipe_name(pipe)); 12256 12257 if (active) { 12258 pipe_config->output_types |= 1 << encoder->type; 12259 encoder->get_config(encoder, pipe_config); 12260 } 12261 } 12262 12263 intel_crtc_compute_pixel_rate(pipe_config); 12264 12265 if (!new_crtc_state->active) 12266 return; 12267 12268 intel_pipe_config_sanity_check(dev_priv, pipe_config); 12269 12270 sw_config = to_intel_crtc_state(new_crtc_state); 12271 if (!intel_pipe_config_compare(dev_priv, sw_config, 12272 pipe_config, false)) { 12273 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 12274 intel_dump_pipe_config(intel_crtc, pipe_config, 12275 "[hw state]"); 12276 intel_dump_pipe_config(intel_crtc, sw_config, 12277 "[sw state]"); 12278 } 12279} 12280 12281static void 12282verify_single_dpll_state(struct drm_i915_private *dev_priv, 12283 struct intel_shared_dpll *pll, 12284 struct drm_crtc *crtc, 12285 struct drm_crtc_state *new_state) 12286{ 12287 struct intel_dpll_hw_state dpll_hw_state; 12288 unsigned crtc_mask; 12289 bool active; 12290 12291 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 12292 12293 DRM_DEBUG_KMS("%s\n", pll->name); 12294 12295 active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state); 12296 12297 if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) { 12298 I915_STATE_WARN(!pll->on && pll->active_mask, 12299 "pll in active use but not on in sw tracking\n"); 12300 I915_STATE_WARN(pll->on && !pll->active_mask, 12301 "pll is on but not used by any active crtc\n"); 12302 I915_STATE_WARN(pll->on != active, 12303 "pll on state mismatch (expected %i, found %i)\n", 12304 pll->on, active); 12305 } 12306 12307 if (!crtc) { 12308 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 12309 "more active pll users than references: %x vs %x\n", 12310 pll->active_mask, pll->state.crtc_mask); 12311 12312 return; 12313 } 12314 12315 crtc_mask = 1 << drm_crtc_index(crtc); 12316 12317 if (new_state->active) 12318 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 12319 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 12320 pipe_name(drm_crtc_index(crtc)), pll->active_mask); 12321 else 12322 I915_STATE_WARN(pll->active_mask & crtc_mask, 12323 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 12324 pipe_name(drm_crtc_index(crtc)), pll->active_mask); 12325 12326 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 12327 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 12328 crtc_mask, pll->state.crtc_mask); 12329 12330 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 12331 &dpll_hw_state, 12332 sizeof(dpll_hw_state)), 12333 "pll hw state mismatch\n"); 12334} 12335 12336static void 12337verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, 12338 struct drm_crtc_state *old_crtc_state, 12339 struct drm_crtc_state *new_crtc_state) 12340{ 12341 struct drm_i915_private *dev_priv = to_i915(dev); 12342 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state); 12343 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state); 12344 12345 if (new_state->shared_dpll) 12346 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state); 12347 12348 if (old_state->shared_dpll && 12349 old_state->shared_dpll != new_state->shared_dpll) { 12350 unsigned crtc_mask = 1 << drm_crtc_index(crtc); 12351 struct intel_shared_dpll *pll = old_state->shared_dpll; 12352 12353 I915_STATE_WARN(pll->active_mask & crtc_mask, 12354 "pll active mismatch (didn't expect pipe %c in active mask)\n", 12355 pipe_name(drm_crtc_index(crtc))); 12356 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 12357 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 12358 pipe_name(drm_crtc_index(crtc))); 12359 } 12360} 12361 12362static void 12363intel_modeset_verify_crtc(struct drm_crtc *crtc, 12364 struct drm_atomic_state *state, 12365 struct drm_crtc_state *old_state, 12366 struct drm_crtc_state *new_state) 12367{ 12368 if (!needs_modeset(new_state) && 12369 !to_intel_crtc_state(new_state)->update_pipe) 12370 return; 12371 12372 verify_wm_state(crtc, new_state); 12373 verify_connector_state(crtc->dev, state, crtc); 12374 verify_crtc_state(crtc, old_state, new_state); 12375 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state); 12376} 12377 12378static void 12379verify_disabled_dpll_state(struct drm_device *dev) 12380{ 12381 struct drm_i915_private *dev_priv = to_i915(dev); 12382 int i; 12383 12384 for (i = 0; i < dev_priv->num_shared_dpll; i++) 12385 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); 12386} 12387 12388static void 12389intel_modeset_verify_disabled(struct drm_device *dev, 12390 struct drm_atomic_state *state) 12391{ 12392 verify_encoder_state(dev, state); 12393 verify_connector_state(dev, state, NULL); 12394 verify_disabled_dpll_state(dev); 12395} 12396 12397static void update_scanline_offset(struct intel_crtc *crtc) 12398{ 12399 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12400 12401 /* 12402 * The scanline counter increments at the leading edge of hsync. 12403 * 12404 * On most platforms it starts counting from vtotal-1 on the 12405 * first active line. That means the scanline counter value is 12406 * always one less than what we would expect. Ie. just after 12407 * start of vblank, which also occurs at start of hsync (on the 12408 * last active line), the scanline counter will read vblank_start-1. 12409 * 12410 * On gen2 the scanline counter starts counting from 1 instead 12411 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 12412 * to keep the value positive), instead of adding one. 12413 * 12414 * On HSW+ the behaviour of the scanline counter depends on the output 12415 * type. For DP ports it behaves like most other platforms, but on HDMI 12416 * there's an extra 1 line difference. So we need to add two instead of 12417 * one to the value. 12418 * 12419 * On VLV/CHV DSI the scanline counter would appear to increment 12420 * approx. 1/3 of a scanline before start of vblank. Unfortunately 12421 * that means we can't tell whether we're in vblank or not while 12422 * we're on that particular line. We must still set scanline_offset 12423 * to 1 so that the vblank timestamps come out correct when we query 12424 * the scanline counter from within the vblank interrupt handler. 12425 * However if queried just before the start of vblank we'll get an 12426 * answer that's slightly in the future. 12427 */ 12428 if (IS_GEN2(dev_priv)) { 12429 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 12430 int vtotal; 12431 12432 vtotal = adjusted_mode->crtc_vtotal; 12433 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 12434 vtotal /= 2; 12435 12436 crtc->scanline_offset = vtotal - 1; 12437 } else if (HAS_DDI(dev_priv) && 12438 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) { 12439 crtc->scanline_offset = 2; 12440 } else 12441 crtc->scanline_offset = 1; 12442} 12443 12444static void intel_modeset_clear_plls(struct drm_atomic_state *state) 12445{ 12446 struct drm_device *dev = state->dev; 12447 struct drm_i915_private *dev_priv = to_i915(dev); 12448 struct drm_crtc *crtc; 12449 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12450 int i; 12451 12452 if (!dev_priv->display.crtc_compute_clock) 12453 return; 12454 12455 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12456 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12457 struct intel_shared_dpll *old_dpll = 12458 to_intel_crtc_state(old_crtc_state)->shared_dpll; 12459 12460 if (!needs_modeset(new_crtc_state)) 12461 continue; 12462 12463 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL; 12464 12465 if (!old_dpll) 12466 continue; 12467 12468 intel_release_shared_dpll(old_dpll, intel_crtc, state); 12469 } 12470} 12471 12472/* 12473 * This implements the workaround described in the "notes" section of the mode 12474 * set sequence documentation. When going from no pipes or single pipe to 12475 * multiple pipes, and planes are enabled after the pipe, we need to wait at 12476 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 12477 */ 12478static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) 12479{ 12480 struct drm_crtc_state *crtc_state; 12481 struct intel_crtc *intel_crtc; 12482 struct drm_crtc *crtc; 12483 struct intel_crtc_state *first_crtc_state = NULL; 12484 struct intel_crtc_state *other_crtc_state = NULL; 12485 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 12486 int i; 12487 12488 /* look at all crtc's that are going to be enabled in during modeset */ 12489 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 12490 intel_crtc = to_intel_crtc(crtc); 12491 12492 if (!crtc_state->active || !needs_modeset(crtc_state)) 12493 continue; 12494 12495 if (first_crtc_state) { 12496 other_crtc_state = to_intel_crtc_state(crtc_state); 12497 break; 12498 } else { 12499 first_crtc_state = to_intel_crtc_state(crtc_state); 12500 first_pipe = intel_crtc->pipe; 12501 } 12502 } 12503 12504 /* No workaround needed? */ 12505 if (!first_crtc_state) 12506 return 0; 12507 12508 /* w/a possibly needed, check how many crtc's are already enabled. */ 12509 for_each_intel_crtc(state->dev, intel_crtc) { 12510 struct intel_crtc_state *pipe_config; 12511 12512 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); 12513 if (IS_ERR(pipe_config)) 12514 return PTR_ERR(pipe_config); 12515 12516 pipe_config->hsw_workaround_pipe = INVALID_PIPE; 12517 12518 if (!pipe_config->base.active || 12519 needs_modeset(&pipe_config->base)) 12520 continue; 12521 12522 /* 2 or more enabled crtcs means no need for w/a */ 12523 if (enabled_pipe != INVALID_PIPE) 12524 return 0; 12525 12526 enabled_pipe = intel_crtc->pipe; 12527 } 12528 12529 if (enabled_pipe != INVALID_PIPE) 12530 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 12531 else if (other_crtc_state) 12532 other_crtc_state->hsw_workaround_pipe = first_pipe; 12533 12534 return 0; 12535} 12536 12537static int intel_lock_all_pipes(struct drm_atomic_state *state) 12538{ 12539 struct drm_crtc *crtc; 12540 12541 /* Add all pipes to the state */ 12542 for_each_crtc(state->dev, crtc) { 12543 struct drm_crtc_state *crtc_state; 12544 12545 crtc_state = drm_atomic_get_crtc_state(state, crtc); 12546 if (IS_ERR(crtc_state)) 12547 return PTR_ERR(crtc_state); 12548 } 12549 12550 return 0; 12551} 12552 12553static int intel_modeset_all_pipes(struct drm_atomic_state *state) 12554{ 12555 struct drm_crtc *crtc; 12556 12557 /* 12558 * Add all pipes to the state, and force 12559 * a modeset on all the active ones. 12560 */ 12561 for_each_crtc(state->dev, crtc) { 12562 struct drm_crtc_state *crtc_state; 12563 int ret; 12564 12565 crtc_state = drm_atomic_get_crtc_state(state, crtc); 12566 if (IS_ERR(crtc_state)) 12567 return PTR_ERR(crtc_state); 12568 12569 if (!crtc_state->active || needs_modeset(crtc_state)) 12570 continue; 12571 12572 crtc_state->mode_changed = true; 12573 12574 ret = drm_atomic_add_affected_connectors(state, crtc); 12575 if (ret) 12576 return ret; 12577 12578 ret = drm_atomic_add_affected_planes(state, crtc); 12579 if (ret) 12580 return ret; 12581 } 12582 12583 return 0; 12584} 12585 12586static int intel_modeset_checks(struct drm_atomic_state *state) 12587{ 12588 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12589 struct drm_i915_private *dev_priv = to_i915(state->dev); 12590 struct drm_crtc *crtc; 12591 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12592 int ret = 0, i; 12593 12594 if (!check_digital_port_conflicts(state)) { 12595 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 12596 return -EINVAL; 12597 } 12598 12599 intel_state->modeset = true; 12600 intel_state->active_crtcs = dev_priv->active_crtcs; 12601 intel_state->cdclk.logical = dev_priv->cdclk.logical; 12602 intel_state->cdclk.actual = dev_priv->cdclk.actual; 12603 12604 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12605 if (new_crtc_state->active) 12606 intel_state->active_crtcs |= 1 << i; 12607 else 12608 intel_state->active_crtcs &= ~(1 << i); 12609 12610 if (old_crtc_state->active != new_crtc_state->active) 12611 intel_state->active_pipe_changes |= drm_crtc_mask(crtc); 12612 } 12613 12614 /* 12615 * See if the config requires any additional preparation, e.g. 12616 * to adjust global state with pipes off. We need to do this 12617 * here so we can get the modeset_pipe updated config for the new 12618 * mode set on this crtc. For other crtcs we need to use the 12619 * adjusted_mode bits in the crtc directly. 12620 */ 12621 if (dev_priv->display.modeset_calc_cdclk) { 12622 ret = dev_priv->display.modeset_calc_cdclk(state); 12623 if (ret < 0) 12624 return ret; 12625 12626 /* 12627 * Writes to dev_priv->cdclk.logical must protected by 12628 * holding all the crtc locks, even if we don't end up 12629 * touching the hardware 12630 */ 12631 if (!intel_cdclk_state_compare(&dev_priv->cdclk.logical, 12632 &intel_state->cdclk.logical)) { 12633 ret = intel_lock_all_pipes(state); 12634 if (ret < 0) 12635 return ret; 12636 } 12637 12638 /* All pipes must be switched off while we change the cdclk. */ 12639 if (!intel_cdclk_state_compare(&dev_priv->cdclk.actual, 12640 &intel_state->cdclk.actual)) { 12641 ret = intel_modeset_all_pipes(state); 12642 if (ret < 0) 12643 return ret; 12644 } 12645 12646 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", 12647 intel_state->cdclk.logical.cdclk, 12648 intel_state->cdclk.actual.cdclk); 12649 } else { 12650 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical; 12651 } 12652 12653 intel_modeset_clear_plls(state); 12654 12655 if (IS_HASWELL(dev_priv)) 12656 return haswell_mode_set_planes_workaround(state); 12657 12658 return 0; 12659} 12660 12661/* 12662 * Handle calculation of various watermark data at the end of the atomic check 12663 * phase. The code here should be run after the per-crtc and per-plane 'check' 12664 * handlers to ensure that all derived state has been updated. 12665 */ 12666static int calc_watermark_data(struct drm_atomic_state *state) 12667{ 12668 struct drm_device *dev = state->dev; 12669 struct drm_i915_private *dev_priv = to_i915(dev); 12670 12671 /* Is there platform-specific watermark information to calculate? */ 12672 if (dev_priv->display.compute_global_watermarks) 12673 return dev_priv->display.compute_global_watermarks(state); 12674 12675 return 0; 12676} 12677 12678/** 12679 * intel_atomic_check - validate state object 12680 * @dev: drm device 12681 * @state: state to validate 12682 */ 12683static int intel_atomic_check(struct drm_device *dev, 12684 struct drm_atomic_state *state) 12685{ 12686 struct drm_i915_private *dev_priv = to_i915(dev); 12687 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12688 struct drm_crtc *crtc; 12689 struct drm_crtc_state *old_crtc_state, *crtc_state; 12690 int ret, i; 12691 bool any_ms = false; 12692 12693 ret = drm_atomic_helper_check_modeset(dev, state); 12694 if (ret) 12695 return ret; 12696 12697 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) { 12698 struct intel_crtc_state *pipe_config = 12699 to_intel_crtc_state(crtc_state); 12700 12701 /* Catch I915_MODE_FLAG_INHERITED */ 12702 if (crtc_state->mode.private_flags != old_crtc_state->mode.private_flags) 12703 crtc_state->mode_changed = true; 12704 12705 if (!needs_modeset(crtc_state)) 12706 continue; 12707 12708 if (!crtc_state->enable) { 12709 any_ms = true; 12710 continue; 12711 } 12712 12713 /* FIXME: For only active_changed we shouldn't need to do any 12714 * state recomputation at all. */ 12715 12716 ret = drm_atomic_add_affected_connectors(state, crtc); 12717 if (ret) 12718 return ret; 12719 12720 ret = intel_modeset_pipe_config(crtc, pipe_config); 12721 if (ret) { 12722 intel_dump_pipe_config(to_intel_crtc(crtc), 12723 pipe_config, "[failed]"); 12724 return ret; 12725 } 12726 12727 if (i915.fastboot && 12728 intel_pipe_config_compare(dev_priv, 12729 to_intel_crtc_state(old_crtc_state), 12730 pipe_config, true)) { 12731 crtc_state->mode_changed = false; 12732 pipe_config->update_pipe = true; 12733 } 12734 12735 if (needs_modeset(crtc_state)) 12736 any_ms = true; 12737 12738 ret = drm_atomic_add_affected_planes(state, crtc); 12739 if (ret) 12740 return ret; 12741 12742 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 12743 needs_modeset(crtc_state) ? 12744 "[modeset]" : "[fastset]"); 12745 } 12746 12747 if (any_ms) { 12748 ret = intel_modeset_checks(state); 12749 12750 if (ret) 12751 return ret; 12752 } else { 12753 intel_state->cdclk.logical = dev_priv->cdclk.logical; 12754 } 12755 12756 ret = drm_atomic_helper_check_planes(dev, state); 12757 if (ret) 12758 return ret; 12759 12760 intel_fbc_choose_crtc(dev_priv, state); 12761 return calc_watermark_data(state); 12762} 12763 12764static int intel_atomic_prepare_commit(struct drm_device *dev, 12765 struct drm_atomic_state *state) 12766{ 12767 struct drm_i915_private *dev_priv = to_i915(dev); 12768 struct drm_crtc_state *crtc_state; 12769 struct drm_crtc *crtc; 12770 int i, ret; 12771 12772 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 12773 if (state->legacy_cursor_update) 12774 continue; 12775 12776 ret = intel_crtc_wait_for_pending_flips(crtc); 12777 if (ret) 12778 return ret; 12779 12780 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2) 12781 flush_workqueue(dev_priv->wq); 12782 } 12783 12784 ret = mutex_lock_interruptible(&dev->struct_mutex); 12785 if (ret) 12786 return ret; 12787 12788 ret = drm_atomic_helper_prepare_planes(dev, state); 12789 mutex_unlock(&dev->struct_mutex); 12790 12791 return ret; 12792} 12793 12794u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 12795{ 12796 struct drm_device *dev = crtc->base.dev; 12797 12798 if (!dev->max_vblank_count) 12799 return drm_accurate_vblank_count(&crtc->base); 12800 12801 return dev->driver->get_vblank_counter(dev, crtc->pipe); 12802} 12803 12804static void intel_atomic_wait_for_vblanks(struct drm_device *dev, 12805 struct drm_i915_private *dev_priv, 12806 unsigned crtc_mask) 12807{ 12808 unsigned last_vblank_count[I915_MAX_PIPES]; 12809 enum pipe pipe; 12810 int ret; 12811 12812 if (!crtc_mask) 12813 return; 12814 12815 for_each_pipe(dev_priv, pipe) { 12816 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, 12817 pipe); 12818 12819 if (!((1 << pipe) & crtc_mask)) 12820 continue; 12821 12822 ret = drm_crtc_vblank_get(&crtc->base); 12823 if (WARN_ON(ret != 0)) { 12824 crtc_mask &= ~(1 << pipe); 12825 continue; 12826 } 12827 12828 last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base); 12829 } 12830 12831 for_each_pipe(dev_priv, pipe) { 12832 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, 12833 pipe); 12834 long lret; 12835 12836 if (!((1 << pipe) & crtc_mask)) 12837 continue; 12838 12839 lret = wait_event_timeout(dev->vblank[pipe].queue, 12840 last_vblank_count[pipe] != 12841 drm_crtc_vblank_count(&crtc->base), 12842 msecs_to_jiffies(50)); 12843 12844 WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe)); 12845 12846 drm_crtc_vblank_put(&crtc->base); 12847 } 12848} 12849 12850static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) 12851{ 12852 /* fb updated, need to unpin old fb */ 12853 if (crtc_state->fb_changed) 12854 return true; 12855 12856 /* wm changes, need vblank before final wm's */ 12857 if (crtc_state->update_wm_post) 12858 return true; 12859 12860 if (crtc_state->wm.need_postvbl_update) 12861 return true; 12862 12863 return false; 12864} 12865 12866static void intel_update_crtc(struct drm_crtc *crtc, 12867 struct drm_atomic_state *state, 12868 struct drm_crtc_state *old_crtc_state, 12869 struct drm_crtc_state *new_crtc_state, 12870 unsigned int *crtc_vblank_mask) 12871{ 12872 struct drm_device *dev = crtc->dev; 12873 struct drm_i915_private *dev_priv = to_i915(dev); 12874 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12875 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); 12876 bool modeset = needs_modeset(new_crtc_state); 12877 12878 if (modeset) { 12879 update_scanline_offset(intel_crtc); 12880 dev_priv->display.crtc_enable(pipe_config, state); 12881 } else { 12882 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), 12883 pipe_config); 12884 } 12885 12886 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { 12887 intel_fbc_enable( 12888 intel_crtc, pipe_config, 12889 to_intel_plane_state(crtc->primary->state)); 12890 } 12891 12892 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); 12893 12894 if (needs_vblank_wait(pipe_config)) 12895 *crtc_vblank_mask |= drm_crtc_mask(crtc); 12896} 12897 12898static void intel_update_crtcs(struct drm_atomic_state *state, 12899 unsigned int *crtc_vblank_mask) 12900{ 12901 struct drm_crtc *crtc; 12902 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12903 int i; 12904 12905 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12906 if (!new_crtc_state->active) 12907 continue; 12908 12909 intel_update_crtc(crtc, state, old_crtc_state, 12910 new_crtc_state, crtc_vblank_mask); 12911 } 12912} 12913 12914static void skl_update_crtcs(struct drm_atomic_state *state, 12915 unsigned int *crtc_vblank_mask) 12916{ 12917 struct drm_i915_private *dev_priv = to_i915(state->dev); 12918 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12919 struct drm_crtc *crtc; 12920 struct intel_crtc *intel_crtc; 12921 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12922 struct intel_crtc_state *cstate; 12923 unsigned int updated = 0; 12924 bool progress; 12925 enum pipe pipe; 12926 int i; 12927 12928 const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {}; 12929 12930 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) 12931 /* ignore allocations for crtc's that have been turned off. */ 12932 if (new_crtc_state->active) 12933 entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; 12934 12935 /* 12936 * Whenever the number of active pipes changes, we need to make sure we 12937 * update the pipes in the right order so that their ddb allocations 12938 * never overlap with eachother inbetween CRTC updates. Otherwise we'll 12939 * cause pipe underruns and other bad stuff. 12940 */ 12941 do { 12942 progress = false; 12943 12944 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12945 bool vbl_wait = false; 12946 unsigned int cmask = drm_crtc_mask(crtc); 12947 12948 intel_crtc = to_intel_crtc(crtc); 12949 cstate = to_intel_crtc_state(crtc->state); 12950 pipe = intel_crtc->pipe; 12951 12952 if (updated & cmask || !cstate->base.active) 12953 continue; 12954 12955 if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i)) 12956 continue; 12957 12958 updated |= cmask; 12959 entries[i] = &cstate->wm.skl.ddb; 12960 12961 /* 12962 * If this is an already active pipe, it's DDB changed, 12963 * and this isn't the last pipe that needs updating 12964 * then we need to wait for a vblank to pass for the 12965 * new ddb allocation to take effect. 12966 */ 12967 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb, 12968 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) && 12969 !new_crtc_state->active_changed && 12970 intel_state->wm_results.dirty_pipes != updated) 12971 vbl_wait = true; 12972 12973 intel_update_crtc(crtc, state, old_crtc_state, 12974 new_crtc_state, crtc_vblank_mask); 12975 12976 if (vbl_wait) 12977 intel_wait_for_vblank(dev_priv, pipe); 12978 12979 progress = true; 12980 } 12981 } while (progress); 12982} 12983 12984static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 12985{ 12986 struct intel_atomic_state *state, *next; 12987 struct llist_node *freed; 12988 12989 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 12990 llist_for_each_entry_safe(state, next, freed, freed) 12991 drm_atomic_state_put(&state->base); 12992} 12993 12994static void intel_atomic_helper_free_state_worker(struct work_struct *work) 12995{ 12996 struct drm_i915_private *dev_priv = 12997 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 12998 12999 intel_atomic_helper_free_state(dev_priv); 13000} 13001 13002static void intel_atomic_commit_tail(struct drm_atomic_state *state) 13003{ 13004 struct drm_device *dev = state->dev; 13005 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13006 struct drm_i915_private *dev_priv = to_i915(dev); 13007 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 13008 struct drm_crtc *crtc; 13009 struct intel_crtc_state *intel_cstate; 13010 bool hw_check = intel_state->modeset; 13011 u64 put_domains[I915_MAX_PIPES] = {}; 13012 unsigned crtc_vblank_mask = 0; 13013 int i; 13014 13015 drm_atomic_helper_wait_for_dependencies(state); 13016 13017 if (intel_state->modeset) 13018 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 13019 13020 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 13021 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13022 13023 if (needs_modeset(new_crtc_state) || 13024 to_intel_crtc_state(new_crtc_state)->update_pipe) { 13025 hw_check = true; 13026 13027 put_domains[to_intel_crtc(crtc)->pipe] = 13028 modeset_get_crtc_power_domains(crtc, 13029 to_intel_crtc_state(new_crtc_state)); 13030 } 13031 13032 if (!needs_modeset(new_crtc_state)) 13033 continue; 13034 13035 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), 13036 to_intel_crtc_state(new_crtc_state)); 13037 13038 if (old_crtc_state->active) { 13039 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); 13040 dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state); 13041 intel_crtc->active = false; 13042 intel_fbc_disable(intel_crtc); 13043 intel_disable_shared_dpll(intel_crtc); 13044 13045 /* 13046 * Underruns don't always raise 13047 * interrupts, so check manually. 13048 */ 13049 intel_check_cpu_fifo_underruns(dev_priv); 13050 intel_check_pch_fifo_underruns(dev_priv); 13051 13052 if (!crtc->state->active) { 13053 /* 13054 * Make sure we don't call initial_watermarks 13055 * for ILK-style watermark updates. 13056 * 13057 * No clue what this is supposed to achieve. 13058 */ 13059 if (INTEL_GEN(dev_priv) >= 9) 13060 dev_priv->display.initial_watermarks(intel_state, 13061 to_intel_crtc_state(crtc->state)); 13062 } 13063 } 13064 } 13065 13066 /* Only after disabling all output pipelines that will be changed can we 13067 * update the the output configuration. */ 13068 intel_modeset_update_crtc_state(state); 13069 13070 if (intel_state->modeset) { 13071 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 13072 13073 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual); 13074 13075 /* 13076 * SKL workaround: bspec recommends we disable the SAGV when we 13077 * have more then one pipe enabled 13078 */ 13079 if (!intel_can_enable_sagv(state)) 13080 intel_disable_sagv(dev_priv); 13081 13082 intel_modeset_verify_disabled(dev, state); 13083 } 13084 13085 /* Complete the events for pipes that have now been disabled */ 13086 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 13087 bool modeset = needs_modeset(new_crtc_state); 13088 13089 /* Complete events for now disable pipes here. */ 13090 if (modeset && !new_crtc_state->active && new_crtc_state->event) { 13091 spin_lock_irq(&dev->event_lock); 13092 drm_crtc_send_vblank_event(crtc, new_crtc_state->event); 13093 spin_unlock_irq(&dev->event_lock); 13094 13095 new_crtc_state->event = NULL; 13096 } 13097 } 13098 13099 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 13100 dev_priv->display.update_crtcs(state, &crtc_vblank_mask); 13101 13102 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 13103 * already, but still need the state for the delayed optimization. To 13104 * fix this: 13105 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 13106 * - schedule that vblank worker _before_ calling hw_done 13107 * - at the start of commit_tail, cancel it _synchrously 13108 * - switch over to the vblank wait helper in the core after that since 13109 * we don't need out special handling any more. 13110 */ 13111 if (!state->legacy_cursor_update) 13112 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); 13113 13114 /* 13115 * Now that the vblank has passed, we can go ahead and program the 13116 * optimal watermarks on platforms that need two-step watermark 13117 * programming. 13118 * 13119 * TODO: Move this (and other cleanup) to an async worker eventually. 13120 */ 13121 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 13122 intel_cstate = to_intel_crtc_state(new_crtc_state); 13123 13124 if (dev_priv->display.optimize_watermarks) 13125 dev_priv->display.optimize_watermarks(intel_state, 13126 intel_cstate); 13127 } 13128 13129 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 13130 intel_post_plane_update(to_intel_crtc_state(old_crtc_state)); 13131 13132 if (put_domains[i]) 13133 modeset_put_power_domains(dev_priv, put_domains[i]); 13134 13135 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 13136 } 13137 13138 if (intel_state->modeset && intel_can_enable_sagv(state)) 13139 intel_enable_sagv(dev_priv); 13140 13141 drm_atomic_helper_commit_hw_done(state); 13142 13143 if (intel_state->modeset) { 13144 /* As one of the primary mmio accessors, KMS has a high 13145 * likelihood of triggering bugs in unclaimed access. After we 13146 * finish modesetting, see if an error has been flagged, and if 13147 * so enable debugging for the next modeset - and hope we catch 13148 * the culprit. 13149 */ 13150 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 13151 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); 13152 } 13153 13154 mutex_lock(&dev->struct_mutex); 13155 drm_atomic_helper_cleanup_planes(dev, state); 13156 mutex_unlock(&dev->struct_mutex); 13157 13158 drm_atomic_helper_commit_cleanup_done(state); 13159 13160 drm_atomic_state_put(state); 13161 13162 intel_atomic_helper_free_state(dev_priv); 13163} 13164 13165static void intel_atomic_commit_work(struct work_struct *work) 13166{ 13167 struct drm_atomic_state *state = 13168 container_of(work, struct drm_atomic_state, commit_work); 13169 13170 intel_atomic_commit_tail(state); 13171} 13172 13173static int __i915_sw_fence_call 13174intel_atomic_commit_ready(struct i915_sw_fence *fence, 13175 enum i915_sw_fence_notify notify) 13176{ 13177 struct intel_atomic_state *state = 13178 container_of(fence, struct intel_atomic_state, commit_ready); 13179 13180 switch (notify) { 13181 case FENCE_COMPLETE: 13182 if (state->base.commit_work.func) 13183 queue_work(system_unbound_wq, &state->base.commit_work); 13184 break; 13185 13186 case FENCE_FREE: 13187 { 13188 struct intel_atomic_helper *helper = 13189 &to_i915(state->base.dev)->atomic_helper; 13190 13191 if (llist_add(&state->freed, &helper->free_list)) 13192 schedule_work(&helper->free_work); 13193 break; 13194 } 13195 } 13196 13197 return NOTIFY_DONE; 13198} 13199 13200static void intel_atomic_track_fbs(struct drm_atomic_state *state) 13201{ 13202 struct drm_plane_state *old_plane_state, *new_plane_state; 13203 struct drm_plane *plane; 13204 int i; 13205 13206 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) 13207 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb), 13208 intel_fb_obj(new_plane_state->fb), 13209 to_intel_plane(plane)->frontbuffer_bit); 13210} 13211 13212/** 13213 * intel_atomic_commit - commit validated state object 13214 * @dev: DRM device 13215 * @state: the top-level driver state object 13216 * @nonblock: nonblocking commit 13217 * 13218 * This function commits a top-level state object that has been validated 13219 * with drm_atomic_helper_check(). 13220 * 13221 * RETURNS 13222 * Zero for success or -errno. 13223 */ 13224static int intel_atomic_commit(struct drm_device *dev, 13225 struct drm_atomic_state *state, 13226 bool nonblock) 13227{ 13228 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13229 struct drm_i915_private *dev_priv = to_i915(dev); 13230 int ret = 0; 13231 13232 ret = drm_atomic_helper_setup_commit(state, nonblock); 13233 if (ret) 13234 return ret; 13235 13236 drm_atomic_state_get(state); 13237 i915_sw_fence_init(&intel_state->commit_ready, 13238 intel_atomic_commit_ready); 13239 13240 ret = intel_atomic_prepare_commit(dev, state); 13241 if (ret) { 13242 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 13243 i915_sw_fence_commit(&intel_state->commit_ready); 13244 return ret; 13245 } 13246 13247 /* 13248 * The intel_legacy_cursor_update() fast path takes care 13249 * of avoiding the vblank waits for simple cursor 13250 * movement and flips. For cursor on/off and size changes, 13251 * we want to perform the vblank waits so that watermark 13252 * updates happen during the correct frames. Gen9+ have 13253 * double buffered watermarks and so shouldn't need this. 13254 * 13255 * Do this after drm_atomic_helper_setup_commit() and 13256 * intel_atomic_prepare_commit() because we still want 13257 * to skip the flip and fb cleanup waits. Although that 13258 * does risk yanking the mapping from under the display 13259 * engine. 13260 * 13261 * FIXME doing watermarks and fb cleanup from a vblank worker 13262 * (assuming we had any) would solve these problems. 13263 */ 13264 if (INTEL_GEN(dev_priv) < 9) 13265 state->legacy_cursor_update = false; 13266 13267 drm_atomic_helper_swap_state(state, true); 13268 dev_priv->wm.distrust_bios_wm = false; 13269 intel_shared_dpll_swap_state(state); 13270 intel_atomic_track_fbs(state); 13271 13272 if (intel_state->modeset) { 13273 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, 13274 sizeof(intel_state->min_pixclk)); 13275 dev_priv->active_crtcs = intel_state->active_crtcs; 13276 dev_priv->cdclk.logical = intel_state->cdclk.logical; 13277 dev_priv->cdclk.actual = intel_state->cdclk.actual; 13278 } 13279 13280 drm_atomic_state_get(state); 13281 INIT_WORK(&state->commit_work, 13282 nonblock ? intel_atomic_commit_work : NULL); 13283 13284 i915_sw_fence_commit(&intel_state->commit_ready); 13285 if (!nonblock) { 13286 i915_sw_fence_wait(&intel_state->commit_ready); 13287 intel_atomic_commit_tail(state); 13288 } 13289 13290 return 0; 13291} 13292 13293static const struct drm_crtc_funcs intel_crtc_funcs = { 13294 .gamma_set = drm_atomic_helper_legacy_gamma_set, 13295 .set_config = drm_atomic_helper_set_config, 13296 .set_property = drm_atomic_helper_crtc_set_property, 13297 .destroy = intel_crtc_destroy, 13298 .page_flip = drm_atomic_helper_page_flip, 13299 .atomic_duplicate_state = intel_crtc_duplicate_state, 13300 .atomic_destroy_state = intel_crtc_destroy_state, 13301 .set_crc_source = intel_crtc_set_crc_source, 13302}; 13303 13304/** 13305 * intel_prepare_plane_fb - Prepare fb for usage on plane 13306 * @plane: drm plane to prepare for 13307 * @fb: framebuffer to prepare for presentation 13308 * 13309 * Prepares a framebuffer for usage on a display plane. Generally this 13310 * involves pinning the underlying object and updating the frontbuffer tracking 13311 * bits. Some older platforms need special physical address handling for 13312 * cursor planes. 13313 * 13314 * Must be called with struct_mutex held. 13315 * 13316 * Returns 0 on success, negative error code on failure. 13317 */ 13318int 13319intel_prepare_plane_fb(struct drm_plane *plane, 13320 struct drm_plane_state *new_state) 13321{ 13322 struct intel_atomic_state *intel_state = 13323 to_intel_atomic_state(new_state->state); 13324 struct drm_i915_private *dev_priv = to_i915(plane->dev); 13325 struct drm_framebuffer *fb = new_state->fb; 13326 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13327 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 13328 int ret; 13329 13330 if (obj) { 13331 if (plane->type == DRM_PLANE_TYPE_CURSOR && 13332 INTEL_INFO(dev_priv)->cursor_needs_physical) { 13333 const int align = intel_cursor_alignment(dev_priv); 13334 13335 ret = i915_gem_object_attach_phys(obj, align); 13336 if (ret) { 13337 DRM_DEBUG_KMS("failed to attach phys object\n"); 13338 return ret; 13339 } 13340 } else { 13341 struct i915_vma *vma; 13342 13343 vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation); 13344 if (IS_ERR(vma)) { 13345 DRM_DEBUG_KMS("failed to pin object\n"); 13346 return PTR_ERR(vma); 13347 } 13348 13349 to_intel_plane_state(new_state)->vma = vma; 13350 } 13351 } 13352 13353 if (!obj && !old_obj) 13354 return 0; 13355 13356 if (old_obj) { 13357 struct drm_crtc_state *crtc_state = 13358 drm_atomic_get_existing_crtc_state(new_state->state, 13359 plane->state->crtc); 13360 13361 /* Big Hammer, we also need to ensure that any pending 13362 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 13363 * current scanout is retired before unpinning the old 13364 * framebuffer. Note that we rely on userspace rendering 13365 * into the buffer attached to the pipe they are waiting 13366 * on. If not, userspace generates a GPU hang with IPEHR 13367 * point to the MI_WAIT_FOR_EVENT. 13368 * 13369 * This should only fail upon a hung GPU, in which case we 13370 * can safely continue. 13371 */ 13372 if (needs_modeset(crtc_state)) { 13373 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 13374 old_obj->resv, NULL, 13375 false, 0, 13376 GFP_KERNEL); 13377 if (ret < 0) 13378 return ret; 13379 } 13380 } 13381 13382 if (new_state->fence) { /* explicit fencing */ 13383 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, 13384 new_state->fence, 13385 I915_FENCE_TIMEOUT, 13386 GFP_KERNEL); 13387 if (ret < 0) 13388 return ret; 13389 } 13390 13391 if (!obj) 13392 return 0; 13393 13394 if (!new_state->fence) { /* implicit fencing */ 13395 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 13396 obj->resv, NULL, 13397 false, I915_FENCE_TIMEOUT, 13398 GFP_KERNEL); 13399 if (ret < 0) 13400 return ret; 13401 13402 i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); 13403 } 13404 13405 return 0; 13406} 13407 13408/** 13409 * intel_cleanup_plane_fb - Cleans up an fb after plane use 13410 * @plane: drm plane to clean up for 13411 * @fb: old framebuffer that was on plane 13412 * 13413 * Cleans up a framebuffer that has just been removed from a plane. 13414 * 13415 * Must be called with struct_mutex held. 13416 */ 13417void 13418intel_cleanup_plane_fb(struct drm_plane *plane, 13419 struct drm_plane_state *old_state) 13420{ 13421 struct i915_vma *vma; 13422 13423 /* Should only be called after a successful intel_prepare_plane_fb()! */ 13424 vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma); 13425 if (vma) 13426 intel_unpin_fb_vma(vma); 13427} 13428 13429int 13430skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) 13431{ 13432 struct drm_i915_private *dev_priv; 13433 int max_scale; 13434 int crtc_clock, max_dotclk; 13435 13436 if (!intel_crtc || !crtc_state->base.enable) 13437 return DRM_PLANE_HELPER_NO_SCALING; 13438 13439 dev_priv = to_i915(intel_crtc->base.dev); 13440 13441 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 13442 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk; 13443 13444 if (IS_GEMINILAKE(dev_priv)) 13445 max_dotclk *= 2; 13446 13447 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock)) 13448 return DRM_PLANE_HELPER_NO_SCALING; 13449 13450 /* 13451 * skl max scale is lower of: 13452 * close to 3 but not 3, -1 is for that purpose 13453 * or 13454 * cdclk/crtc_clock 13455 */ 13456 max_scale = min((1 << 16) * 3 - 1, 13457 (1 << 8) * ((max_dotclk << 8) / crtc_clock)); 13458 13459 return max_scale; 13460} 13461 13462static int 13463intel_check_primary_plane(struct intel_plane *plane, 13464 struct intel_crtc_state *crtc_state, 13465 struct intel_plane_state *state) 13466{ 13467 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 13468 struct drm_crtc *crtc = state->base.crtc; 13469 int min_scale = DRM_PLANE_HELPER_NO_SCALING; 13470 int max_scale = DRM_PLANE_HELPER_NO_SCALING; 13471 bool can_position = false; 13472 int ret; 13473 13474 if (INTEL_GEN(dev_priv) >= 9) { 13475 /* use scaler when colorkey is not required */ 13476 if (state->ckey.flags == I915_SET_COLORKEY_NONE) { 13477 min_scale = 1; 13478 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); 13479 } 13480 can_position = true; 13481 } 13482 13483 ret = drm_plane_helper_check_state(&state->base, 13484 &state->clip, 13485 min_scale, max_scale, 13486 can_position, true); 13487 if (ret) 13488 return ret; 13489 13490 if (!state->base.fb) 13491 return 0; 13492 13493 if (INTEL_GEN(dev_priv) >= 9) { 13494 ret = skl_check_plane_surface(state); 13495 if (ret) 13496 return ret; 13497 13498 state->ctl = skl_plane_ctl(crtc_state, state); 13499 } else { 13500 ret = i9xx_check_plane_surface(state); 13501 if (ret) 13502 return ret; 13503 13504 state->ctl = i9xx_plane_ctl(crtc_state, state); 13505 } 13506 13507 return 0; 13508} 13509 13510static void intel_begin_crtc_commit(struct drm_crtc *crtc, 13511 struct drm_crtc_state *old_crtc_state) 13512{ 13513 struct drm_device *dev = crtc->dev; 13514 struct drm_i915_private *dev_priv = to_i915(dev); 13515 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13516 struct intel_crtc_state *intel_cstate = 13517 to_intel_crtc_state(crtc->state); 13518 struct intel_crtc_state *old_intel_cstate = 13519 to_intel_crtc_state(old_crtc_state); 13520 struct intel_atomic_state *old_intel_state = 13521 to_intel_atomic_state(old_crtc_state->state); 13522 bool modeset = needs_modeset(crtc->state); 13523 13524 if (!modeset && 13525 (intel_cstate->base.color_mgmt_changed || 13526 intel_cstate->update_pipe)) { 13527 intel_color_set_csc(crtc->state); 13528 intel_color_load_luts(crtc->state); 13529 } 13530 13531 /* Perform vblank evasion around commit operation */ 13532 intel_pipe_update_start(intel_crtc); 13533 13534 if (modeset) 13535 goto out; 13536 13537 if (intel_cstate->update_pipe) 13538 intel_update_pipe_config(intel_crtc, old_intel_cstate); 13539 else if (INTEL_GEN(dev_priv) >= 9) 13540 skl_detach_scalers(intel_crtc); 13541 13542out: 13543 if (dev_priv->display.atomic_update_watermarks) 13544 dev_priv->display.atomic_update_watermarks(old_intel_state, 13545 intel_cstate); 13546} 13547 13548static void intel_finish_crtc_commit(struct drm_crtc *crtc, 13549 struct drm_crtc_state *old_crtc_state) 13550{ 13551 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13552 13553 intel_pipe_update_end(intel_crtc, NULL); 13554} 13555 13556/** 13557 * intel_plane_destroy - destroy a plane 13558 * @plane: plane to destroy 13559 * 13560 * Common destruction function for all types of planes (primary, cursor, 13561 * sprite). 13562 */ 13563void intel_plane_destroy(struct drm_plane *plane) 13564{ 13565 drm_plane_cleanup(plane); 13566 kfree(to_intel_plane(plane)); 13567} 13568 13569const struct drm_plane_funcs intel_plane_funcs = { 13570 .update_plane = drm_atomic_helper_update_plane, 13571 .disable_plane = drm_atomic_helper_disable_plane, 13572 .destroy = intel_plane_destroy, 13573 .set_property = drm_atomic_helper_plane_set_property, 13574 .atomic_get_property = intel_plane_atomic_get_property, 13575 .atomic_set_property = intel_plane_atomic_set_property, 13576 .atomic_duplicate_state = intel_plane_duplicate_state, 13577 .atomic_destroy_state = intel_plane_destroy_state, 13578}; 13579 13580static int 13581intel_legacy_cursor_update(struct drm_plane *plane, 13582 struct drm_crtc *crtc, 13583 struct drm_framebuffer *fb, 13584 int crtc_x, int crtc_y, 13585 unsigned int crtc_w, unsigned int crtc_h, 13586 uint32_t src_x, uint32_t src_y, 13587 uint32_t src_w, uint32_t src_h, 13588 struct drm_modeset_acquire_ctx *ctx) 13589{ 13590 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 13591 int ret; 13592 struct drm_plane_state *old_plane_state, *new_plane_state; 13593 struct intel_plane *intel_plane = to_intel_plane(plane); 13594 struct drm_framebuffer *old_fb; 13595 struct drm_crtc_state *crtc_state = crtc->state; 13596 struct i915_vma *old_vma; 13597 13598 /* 13599 * When crtc is inactive or there is a modeset pending, 13600 * wait for it to complete in the slowpath 13601 */ 13602 if (!crtc_state->active || needs_modeset(crtc_state) || 13603 to_intel_crtc_state(crtc_state)->update_pipe) 13604 goto slow; 13605 13606 old_plane_state = plane->state; 13607 13608 /* 13609 * If any parameters change that may affect watermarks, 13610 * take the slowpath. Only changing fb or position should be 13611 * in the fastpath. 13612 */ 13613 if (old_plane_state->crtc != crtc || 13614 old_plane_state->src_w != src_w || 13615 old_plane_state->src_h != src_h || 13616 old_plane_state->crtc_w != crtc_w || 13617 old_plane_state->crtc_h != crtc_h || 13618 !old_plane_state->fb != !fb) 13619 goto slow; 13620 13621 new_plane_state = intel_plane_duplicate_state(plane); 13622 if (!new_plane_state) 13623 return -ENOMEM; 13624 13625 drm_atomic_set_fb_for_plane(new_plane_state, fb); 13626 13627 new_plane_state->src_x = src_x; 13628 new_plane_state->src_y = src_y; 13629 new_plane_state->src_w = src_w; 13630 new_plane_state->src_h = src_h; 13631 new_plane_state->crtc_x = crtc_x; 13632 new_plane_state->crtc_y = crtc_y; 13633 new_plane_state->crtc_w = crtc_w; 13634 new_plane_state->crtc_h = crtc_h; 13635 13636 ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state), 13637 to_intel_plane_state(new_plane_state)); 13638 if (ret) 13639 goto out_free; 13640 13641 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); 13642 if (ret) 13643 goto out_free; 13644 13645 if (INTEL_INFO(dev_priv)->cursor_needs_physical) { 13646 int align = intel_cursor_alignment(dev_priv); 13647 13648 ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align); 13649 if (ret) { 13650 DRM_DEBUG_KMS("failed to attach phys object\n"); 13651 goto out_unlock; 13652 } 13653 } else { 13654 struct i915_vma *vma; 13655 13656 vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation); 13657 if (IS_ERR(vma)) { 13658 DRM_DEBUG_KMS("failed to pin object\n"); 13659 13660 ret = PTR_ERR(vma); 13661 goto out_unlock; 13662 } 13663 13664 to_intel_plane_state(new_plane_state)->vma = vma; 13665 } 13666 13667 old_fb = old_plane_state->fb; 13668 old_vma = to_intel_plane_state(old_plane_state)->vma; 13669 13670 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb), 13671 intel_plane->frontbuffer_bit); 13672 13673 /* Swap plane state */ 13674 new_plane_state->fence = old_plane_state->fence; 13675 *to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state); 13676 new_plane_state->fence = NULL; 13677 new_plane_state->fb = old_fb; 13678 to_intel_plane_state(new_plane_state)->vma = old_vma; 13679 13680 if (plane->state->visible) { 13681 trace_intel_update_plane(plane, to_intel_crtc(crtc)); 13682 intel_plane->update_plane(intel_plane, 13683 to_intel_crtc_state(crtc->state), 13684 to_intel_plane_state(plane->state)); 13685 } else { 13686 trace_intel_disable_plane(plane, to_intel_crtc(crtc)); 13687 intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc)); 13688 } 13689 13690 intel_cleanup_plane_fb(plane, new_plane_state); 13691 13692out_unlock: 13693 mutex_unlock(&dev_priv->drm.struct_mutex); 13694out_free: 13695 intel_plane_destroy_state(plane, new_plane_state); 13696 return ret; 13697 13698slow: 13699 return drm_atomic_helper_update_plane(plane, crtc, fb, 13700 crtc_x, crtc_y, crtc_w, crtc_h, 13701 src_x, src_y, src_w, src_h, ctx); 13702} 13703 13704static const struct drm_plane_funcs intel_cursor_plane_funcs = { 13705 .update_plane = intel_legacy_cursor_update, 13706 .disable_plane = drm_atomic_helper_disable_plane, 13707 .destroy = intel_plane_destroy, 13708 .set_property = drm_atomic_helper_plane_set_property, 13709 .atomic_get_property = intel_plane_atomic_get_property, 13710 .atomic_set_property = intel_plane_atomic_set_property, 13711 .atomic_duplicate_state = intel_plane_duplicate_state, 13712 .atomic_destroy_state = intel_plane_destroy_state, 13713}; 13714 13715static struct intel_plane * 13716intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 13717{ 13718 struct intel_plane *primary = NULL; 13719 struct intel_plane_state *state = NULL; 13720 const uint32_t *intel_primary_formats; 13721 unsigned int supported_rotations; 13722 unsigned int num_formats; 13723 int ret; 13724 13725 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 13726 if (!primary) { 13727 ret = -ENOMEM; 13728 goto fail; 13729 } 13730 13731 state = intel_create_plane_state(&primary->base); 13732 if (!state) { 13733 ret = -ENOMEM; 13734 goto fail; 13735 } 13736 13737 primary->base.state = &state->base; 13738 13739 primary->can_scale = false; 13740 primary->max_downscale = 1; 13741 if (INTEL_GEN(dev_priv) >= 9) { 13742 primary->can_scale = true; 13743 state->scaler_id = -1; 13744 } 13745 primary->pipe = pipe; 13746 /* 13747 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 13748 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 13749 */ 13750 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) 13751 primary->plane = (enum plane) !pipe; 13752 else 13753 primary->plane = (enum plane) pipe; 13754 primary->id = PLANE_PRIMARY; 13755 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); 13756 primary->check_plane = intel_check_primary_plane; 13757 13758 if (INTEL_GEN(dev_priv) >= 9) { 13759 intel_primary_formats = skl_primary_formats; 13760 num_formats = ARRAY_SIZE(skl_primary_formats); 13761 13762 primary->update_plane = skylake_update_primary_plane; 13763 primary->disable_plane = skylake_disable_primary_plane; 13764 } else if (INTEL_GEN(dev_priv) >= 4) { 13765 intel_primary_formats = i965_primary_formats; 13766 num_formats = ARRAY_SIZE(i965_primary_formats); 13767 13768 primary->update_plane = i9xx_update_primary_plane; 13769 primary->disable_plane = i9xx_disable_primary_plane; 13770 } else { 13771 intel_primary_formats = i8xx_primary_formats; 13772 num_formats = ARRAY_SIZE(i8xx_primary_formats); 13773 13774 primary->update_plane = i9xx_update_primary_plane; 13775 primary->disable_plane = i9xx_disable_primary_plane; 13776 } 13777 13778 if (INTEL_GEN(dev_priv) >= 9) 13779 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13780 0, &intel_plane_funcs, 13781 intel_primary_formats, num_formats, 13782 DRM_PLANE_TYPE_PRIMARY, 13783 "plane 1%c", pipe_name(pipe)); 13784 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 13785 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13786 0, &intel_plane_funcs, 13787 intel_primary_formats, num_formats, 13788 DRM_PLANE_TYPE_PRIMARY, 13789 "primary %c", pipe_name(pipe)); 13790 else 13791 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13792 0, &intel_plane_funcs, 13793 intel_primary_formats, num_formats, 13794 DRM_PLANE_TYPE_PRIMARY, 13795 "plane %c", plane_name(primary->plane)); 13796 if (ret) 13797 goto fail; 13798 13799 if (INTEL_GEN(dev_priv) >= 9) { 13800 supported_rotations = 13801 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | 13802 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; 13803 } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 13804 supported_rotations = 13805 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 13806 DRM_MODE_REFLECT_X; 13807 } else if (INTEL_GEN(dev_priv) >= 4) { 13808 supported_rotations = 13809 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 13810 } else { 13811 supported_rotations = DRM_MODE_ROTATE_0; 13812 } 13813 13814 if (INTEL_GEN(dev_priv) >= 4) 13815 drm_plane_create_rotation_property(&primary->base, 13816 DRM_MODE_ROTATE_0, 13817 supported_rotations); 13818 13819 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); 13820 13821 return primary; 13822 13823fail: 13824 kfree(state); 13825 kfree(primary); 13826 13827 return ERR_PTR(ret); 13828} 13829 13830static struct intel_plane * 13831intel_cursor_plane_create(struct drm_i915_private *dev_priv, 13832 enum pipe pipe) 13833{ 13834 struct intel_plane *cursor = NULL; 13835 struct intel_plane_state *state = NULL; 13836 int ret; 13837 13838 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 13839 if (!cursor) { 13840 ret = -ENOMEM; 13841 goto fail; 13842 } 13843 13844 state = intel_create_plane_state(&cursor->base); 13845 if (!state) { 13846 ret = -ENOMEM; 13847 goto fail; 13848 } 13849 13850 cursor->base.state = &state->base; 13851 13852 cursor->can_scale = false; 13853 cursor->max_downscale = 1; 13854 cursor->pipe = pipe; 13855 cursor->plane = pipe; 13856 cursor->id = PLANE_CURSOR; 13857 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); 13858 13859 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 13860 cursor->update_plane = i845_update_cursor; 13861 cursor->disable_plane = i845_disable_cursor; 13862 cursor->check_plane = i845_check_cursor; 13863 } else { 13864 cursor->update_plane = i9xx_update_cursor; 13865 cursor->disable_plane = i9xx_disable_cursor; 13866 cursor->check_plane = i9xx_check_cursor; 13867 } 13868 13869 cursor->cursor.base = ~0; 13870 cursor->cursor.cntl = ~0; 13871 13872 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 13873 cursor->cursor.size = ~0; 13874 13875 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 13876 0, &intel_cursor_plane_funcs, 13877 intel_cursor_formats, 13878 ARRAY_SIZE(intel_cursor_formats), 13879 DRM_PLANE_TYPE_CURSOR, 13880 "cursor %c", pipe_name(pipe)); 13881 if (ret) 13882 goto fail; 13883 13884 if (INTEL_GEN(dev_priv) >= 4) 13885 drm_plane_create_rotation_property(&cursor->base, 13886 DRM_MODE_ROTATE_0, 13887 DRM_MODE_ROTATE_0 | 13888 DRM_MODE_ROTATE_180); 13889 13890 if (INTEL_GEN(dev_priv) >= 9) 13891 state->scaler_id = -1; 13892 13893 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 13894 13895 return cursor; 13896 13897fail: 13898 kfree(state); 13899 kfree(cursor); 13900 13901 return ERR_PTR(ret); 13902} 13903 13904static void intel_crtc_init_scalers(struct intel_crtc *crtc, 13905 struct intel_crtc_state *crtc_state) 13906{ 13907 struct intel_crtc_scaler_state *scaler_state = 13908 &crtc_state->scaler_state; 13909 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13910 int i; 13911 13912 crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe]; 13913 if (!crtc->num_scalers) 13914 return; 13915 13916 for (i = 0; i < crtc->num_scalers; i++) { 13917 struct intel_scaler *scaler = &scaler_state->scalers[i]; 13918 13919 scaler->in_use = 0; 13920 scaler->mode = PS_SCALER_MODE_DYN; 13921 } 13922 13923 scaler_state->scaler_id = -1; 13924} 13925 13926static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) 13927{ 13928 struct intel_crtc *intel_crtc; 13929 struct intel_crtc_state *crtc_state = NULL; 13930 struct intel_plane *primary = NULL; 13931 struct intel_plane *cursor = NULL; 13932 int sprite, ret; 13933 13934 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 13935 if (!intel_crtc) 13936 return -ENOMEM; 13937 13938 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 13939 if (!crtc_state) { 13940 ret = -ENOMEM; 13941 goto fail; 13942 } 13943 intel_crtc->config = crtc_state; 13944 intel_crtc->base.state = &crtc_state->base; 13945 crtc_state->base.crtc = &intel_crtc->base; 13946 13947 primary = intel_primary_plane_create(dev_priv, pipe); 13948 if (IS_ERR(primary)) { 13949 ret = PTR_ERR(primary); 13950 goto fail; 13951 } 13952 intel_crtc->plane_ids_mask |= BIT(primary->id); 13953 13954 for_each_sprite(dev_priv, pipe, sprite) { 13955 struct intel_plane *plane; 13956 13957 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 13958 if (IS_ERR(plane)) { 13959 ret = PTR_ERR(plane); 13960 goto fail; 13961 } 13962 intel_crtc->plane_ids_mask |= BIT(plane->id); 13963 } 13964 13965 cursor = intel_cursor_plane_create(dev_priv, pipe); 13966 if (IS_ERR(cursor)) { 13967 ret = PTR_ERR(cursor); 13968 goto fail; 13969 } 13970 intel_crtc->plane_ids_mask |= BIT(cursor->id); 13971 13972 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, 13973 &primary->base, &cursor->base, 13974 &intel_crtc_funcs, 13975 "pipe %c", pipe_name(pipe)); 13976 if (ret) 13977 goto fail; 13978 13979 intel_crtc->pipe = pipe; 13980 intel_crtc->plane = primary->plane; 13981 13982 /* initialize shared scalers */ 13983 intel_crtc_init_scalers(intel_crtc, crtc_state); 13984 13985 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 13986 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 13987 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc; 13988 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc; 13989 13990 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 13991 13992 intel_color_init(&intel_crtc->base); 13993 13994 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 13995 13996 return 0; 13997 13998fail: 13999 /* 14000 * drm_mode_config_cleanup() will free up any 14001 * crtcs/planes already initialized. 14002 */ 14003 kfree(crtc_state); 14004 kfree(intel_crtc); 14005 14006 return ret; 14007} 14008 14009enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) 14010{ 14011 struct drm_device *dev = connector->base.dev; 14012 14013 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 14014 14015 if (!connector->base.state->crtc) 14016 return INVALID_PIPE; 14017 14018 return to_intel_crtc(connector->base.state->crtc)->pipe; 14019} 14020 14021int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 14022 struct drm_file *file) 14023{ 14024 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 14025 struct drm_crtc *drmmode_crtc; 14026 struct intel_crtc *crtc; 14027 14028 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 14029 if (!drmmode_crtc) 14030 return -ENOENT; 14031 14032 crtc = to_intel_crtc(drmmode_crtc); 14033 pipe_from_crtc_id->pipe = crtc->pipe; 14034 14035 return 0; 14036} 14037 14038static int intel_encoder_clones(struct intel_encoder *encoder) 14039{ 14040 struct drm_device *dev = encoder->base.dev; 14041 struct intel_encoder *source_encoder; 14042 int index_mask = 0; 14043 int entry = 0; 14044 14045 for_each_intel_encoder(dev, source_encoder) { 14046 if (encoders_cloneable(encoder, source_encoder)) 14047 index_mask |= (1 << entry); 14048 14049 entry++; 14050 } 14051 14052 return index_mask; 14053} 14054 14055static bool has_edp_a(struct drm_i915_private *dev_priv) 14056{ 14057 if (!IS_MOBILE(dev_priv)) 14058 return false; 14059 14060 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 14061 return false; 14062 14063 if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 14064 return false; 14065 14066 return true; 14067} 14068 14069static bool intel_crt_present(struct drm_i915_private *dev_priv) 14070{ 14071 if (INTEL_GEN(dev_priv) >= 9) 14072 return false; 14073 14074 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 14075 return false; 14076 14077 if (IS_CHERRYVIEW(dev_priv)) 14078 return false; 14079 14080 if (HAS_PCH_LPT_H(dev_priv) && 14081 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 14082 return false; 14083 14084 /* DDI E can't be used if DDI A requires 4 lanes */ 14085 if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 14086 return false; 14087 14088 if (!dev_priv->vbt.int_crt_support) 14089 return false; 14090 14091 return true; 14092} 14093 14094void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) 14095{ 14096 int pps_num; 14097 int pps_idx; 14098 14099 if (HAS_DDI(dev_priv)) 14100 return; 14101 /* 14102 * This w/a is needed at least on CPT/PPT, but to be sure apply it 14103 * everywhere where registers can be write protected. 14104 */ 14105 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 14106 pps_num = 2; 14107 else 14108 pps_num = 1; 14109 14110 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 14111 u32 val = I915_READ(PP_CONTROL(pps_idx)); 14112 14113 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; 14114 I915_WRITE(PP_CONTROL(pps_idx), val); 14115 } 14116} 14117 14118static void intel_pps_init(struct drm_i915_private *dev_priv) 14119{ 14120 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) 14121 dev_priv->pps_mmio_base = PCH_PPS_BASE; 14122 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 14123 dev_priv->pps_mmio_base = VLV_PPS_BASE; 14124 else 14125 dev_priv->pps_mmio_base = PPS_BASE; 14126 14127 intel_pps_unlock_regs_wa(dev_priv); 14128} 14129 14130static void intel_setup_outputs(struct drm_i915_private *dev_priv) 14131{ 14132 struct intel_encoder *encoder; 14133 bool dpd_is_edp = false; 14134 14135 intel_pps_init(dev_priv); 14136 14137 /* 14138 * intel_edp_init_connector() depends on this completing first, to 14139 * prevent the registeration of both eDP and LVDS and the incorrect 14140 * sharing of the PPS. 14141 */ 14142 intel_lvds_init(dev_priv); 14143 14144 if (intel_crt_present(dev_priv)) 14145 intel_crt_init(dev_priv); 14146 14147 if (IS_GEN9_LP(dev_priv)) { 14148 /* 14149 * FIXME: Broxton doesn't support port detection via the 14150 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 14151 * detect the ports. 14152 */ 14153 intel_ddi_init(dev_priv, PORT_A); 14154 intel_ddi_init(dev_priv, PORT_B); 14155 intel_ddi_init(dev_priv, PORT_C); 14156 14157 intel_dsi_init(dev_priv); 14158 } else if (HAS_DDI(dev_priv)) { 14159 int found; 14160 14161 /* 14162 * Haswell uses DDI functions to detect digital outputs. 14163 * On SKL pre-D0 the strap isn't connected, so we assume 14164 * it's there. 14165 */ 14166 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 14167 /* WaIgnoreDDIAStrap: skl */ 14168 if (found || IS_GEN9_BC(dev_priv)) 14169 intel_ddi_init(dev_priv, PORT_A); 14170 14171 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 14172 * register */ 14173 found = I915_READ(SFUSE_STRAP); 14174 14175 if (found & SFUSE_STRAP_DDIB_DETECTED) 14176 intel_ddi_init(dev_priv, PORT_B); 14177 if (found & SFUSE_STRAP_DDIC_DETECTED) 14178 intel_ddi_init(dev_priv, PORT_C); 14179 if (found & SFUSE_STRAP_DDID_DETECTED) 14180 intel_ddi_init(dev_priv, PORT_D); 14181 /* 14182 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 14183 */ 14184 if (IS_GEN9_BC(dev_priv) && 14185 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || 14186 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || 14187 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) 14188 intel_ddi_init(dev_priv, PORT_E); 14189 14190 } else if (HAS_PCH_SPLIT(dev_priv)) { 14191 int found; 14192 dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D); 14193 14194 if (has_edp_a(dev_priv)) 14195 intel_dp_init(dev_priv, DP_A, PORT_A); 14196 14197 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 14198 /* PCH SDVOB multiplex with HDMIB */ 14199 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 14200 if (!found) 14201 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 14202 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 14203 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 14204 } 14205 14206 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 14207 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 14208 14209 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 14210 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 14211 14212 if (I915_READ(PCH_DP_C) & DP_DETECTED) 14213 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 14214 14215 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14216 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 14217 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 14218 bool has_edp, has_port; 14219 14220 /* 14221 * The DP_DETECTED bit is the latched state of the DDC 14222 * SDA pin at boot. However since eDP doesn't require DDC 14223 * (no way to plug in a DP->HDMI dongle) the DDC pins for 14224 * eDP ports may have been muxed to an alternate function. 14225 * Thus we can't rely on the DP_DETECTED bit alone to detect 14226 * eDP ports. Consult the VBT as well as DP_DETECTED to 14227 * detect eDP ports. 14228 * 14229 * Sadly the straps seem to be missing sometimes even for HDMI 14230 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 14231 * and VBT for the presence of the port. Additionally we can't 14232 * trust the port type the VBT declares as we've seen at least 14233 * HDMI ports that the VBT claim are DP or eDP. 14234 */ 14235 has_edp = intel_dp_is_edp(dev_priv, PORT_B); 14236 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 14237 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 14238 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 14239 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 14240 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 14241 14242 has_edp = intel_dp_is_edp(dev_priv, PORT_C); 14243 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 14244 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 14245 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 14246 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 14247 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 14248 14249 if (IS_CHERRYVIEW(dev_priv)) { 14250 /* 14251 * eDP not supported on port D, 14252 * so no need to worry about it 14253 */ 14254 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 14255 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 14256 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 14257 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 14258 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 14259 } 14260 14261 intel_dsi_init(dev_priv); 14262 } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { 14263 bool found = false; 14264 14265 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14266 DRM_DEBUG_KMS("probing SDVOB\n"); 14267 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 14268 if (!found && IS_G4X(dev_priv)) { 14269 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 14270 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 14271 } 14272 14273 if (!found && IS_G4X(dev_priv)) 14274 intel_dp_init(dev_priv, DP_B, PORT_B); 14275 } 14276 14277 /* Before G4X SDVOC doesn't have its own detect register */ 14278 14279 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14280 DRM_DEBUG_KMS("probing SDVOC\n"); 14281 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 14282 } 14283 14284 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 14285 14286 if (IS_G4X(dev_priv)) { 14287 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 14288 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 14289 } 14290 if (IS_G4X(dev_priv)) 14291 intel_dp_init(dev_priv, DP_C, PORT_C); 14292 } 14293 14294 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) 14295 intel_dp_init(dev_priv, DP_D, PORT_D); 14296 } else if (IS_GEN2(dev_priv)) 14297 intel_dvo_init(dev_priv); 14298 14299 if (SUPPORTS_TV(dev_priv)) 14300 intel_tv_init(dev_priv); 14301 14302 intel_psr_init(dev_priv); 14303 14304 for_each_intel_encoder(&dev_priv->drm, encoder) { 14305 encoder->base.possible_crtcs = encoder->crtc_mask; 14306 encoder->base.possible_clones = 14307 intel_encoder_clones(encoder); 14308 } 14309 14310 intel_init_pch_refclk(dev_priv); 14311 14312 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 14313} 14314 14315static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 14316{ 14317 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14318 14319 drm_framebuffer_cleanup(fb); 14320 14321 i915_gem_object_lock(intel_fb->obj); 14322 WARN_ON(!intel_fb->obj->framebuffer_references--); 14323 i915_gem_object_unlock(intel_fb->obj); 14324 14325 i915_gem_object_put(intel_fb->obj); 14326 14327 kfree(intel_fb); 14328} 14329 14330static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 14331 struct drm_file *file, 14332 unsigned int *handle) 14333{ 14334 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14335 struct drm_i915_gem_object *obj = intel_fb->obj; 14336 14337 if (obj->userptr.mm) { 14338 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 14339 return -EINVAL; 14340 } 14341 14342 return drm_gem_handle_create(file, &obj->base, handle); 14343} 14344 14345static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 14346 struct drm_file *file, 14347 unsigned flags, unsigned color, 14348 struct drm_clip_rect *clips, 14349 unsigned num_clips) 14350{ 14351 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14352 14353 i915_gem_object_flush_if_display(obj); 14354 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 14355 14356 return 0; 14357} 14358 14359static const struct drm_framebuffer_funcs intel_fb_funcs = { 14360 .destroy = intel_user_framebuffer_destroy, 14361 .create_handle = intel_user_framebuffer_create_handle, 14362 .dirty = intel_user_framebuffer_dirty, 14363}; 14364 14365static 14366u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv, 14367 uint64_t fb_modifier, uint32_t pixel_format) 14368{ 14369 u32 gen = INTEL_GEN(dev_priv); 14370 14371 if (gen >= 9) { 14372 int cpp = drm_format_plane_cpp(pixel_format, 0); 14373 14374 /* "The stride in bytes must not exceed the of the size of 8K 14375 * pixels and 32K bytes." 14376 */ 14377 return min(8192 * cpp, 32768); 14378 } else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) { 14379 return 32*1024; 14380 } else if (gen >= 4) { 14381 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14382 return 16*1024; 14383 else 14384 return 32*1024; 14385 } else if (gen >= 3) { 14386 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14387 return 8*1024; 14388 else 14389 return 16*1024; 14390 } else { 14391 /* XXX DSPC is limited to 4k tiled */ 14392 return 8*1024; 14393 } 14394} 14395 14396static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 14397 struct drm_i915_gem_object *obj, 14398 struct drm_mode_fb_cmd2 *mode_cmd) 14399{ 14400 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 14401 struct drm_format_name_buf format_name; 14402 u32 pitch_limit, stride_alignment; 14403 unsigned int tiling, stride; 14404 int ret = -EINVAL; 14405 14406 i915_gem_object_lock(obj); 14407 obj->framebuffer_references++; 14408 tiling = i915_gem_object_get_tiling(obj); 14409 stride = i915_gem_object_get_stride(obj); 14410 i915_gem_object_unlock(obj); 14411 14412 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 14413 /* 14414 * If there's a fence, enforce that 14415 * the fb modifier and tiling mode match. 14416 */ 14417 if (tiling != I915_TILING_NONE && 14418 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 14419 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n"); 14420 goto err; 14421 } 14422 } else { 14423 if (tiling == I915_TILING_X) { 14424 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 14425 } else if (tiling == I915_TILING_Y) { 14426 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n"); 14427 goto err; 14428 } 14429 } 14430 14431 /* Passed in modifier sanity checking. */ 14432 switch (mode_cmd->modifier[0]) { 14433 case I915_FORMAT_MOD_Y_TILED: 14434 case I915_FORMAT_MOD_Yf_TILED: 14435 if (INTEL_GEN(dev_priv) < 9) { 14436 DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n", 14437 mode_cmd->modifier[0]); 14438 goto err; 14439 } 14440 case DRM_FORMAT_MOD_LINEAR: 14441 case I915_FORMAT_MOD_X_TILED: 14442 break; 14443 default: 14444 DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n", 14445 mode_cmd->modifier[0]); 14446 goto err; 14447 } 14448 14449 /* 14450 * gen2/3 display engine uses the fence if present, 14451 * so the tiling mode must match the fb modifier exactly. 14452 */ 14453 if (INTEL_INFO(dev_priv)->gen < 4 && 14454 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 14455 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n"); 14456 goto err; 14457 } 14458 14459 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0], 14460 mode_cmd->pixel_format); 14461 if (mode_cmd->pitches[0] > pitch_limit) { 14462 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", 14463 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 14464 "tiled" : "linear", 14465 mode_cmd->pitches[0], pitch_limit); 14466 goto err; 14467 } 14468 14469 /* 14470 * If there's a fence, enforce that 14471 * the fb pitch and fence stride match. 14472 */ 14473 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 14474 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n", 14475 mode_cmd->pitches[0], stride); 14476 goto err; 14477 } 14478 14479 /* Reject formats not supported by any plane early. */ 14480 switch (mode_cmd->pixel_format) { 14481 case DRM_FORMAT_C8: 14482 case DRM_FORMAT_RGB565: 14483 case DRM_FORMAT_XRGB8888: 14484 case DRM_FORMAT_ARGB8888: 14485 break; 14486 case DRM_FORMAT_XRGB1555: 14487 if (INTEL_GEN(dev_priv) > 3) { 14488 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14489 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14490 goto err; 14491 } 14492 break; 14493 case DRM_FORMAT_ABGR8888: 14494 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 14495 INTEL_GEN(dev_priv) < 9) { 14496 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14497 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14498 goto err; 14499 } 14500 break; 14501 case DRM_FORMAT_XBGR8888: 14502 case DRM_FORMAT_XRGB2101010: 14503 case DRM_FORMAT_XBGR2101010: 14504 if (INTEL_GEN(dev_priv) < 4) { 14505 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14506 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14507 goto err; 14508 } 14509 break; 14510 case DRM_FORMAT_ABGR2101010: 14511 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 14512 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14513 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14514 goto err; 14515 } 14516 break; 14517 case DRM_FORMAT_YUYV: 14518 case DRM_FORMAT_UYVY: 14519 case DRM_FORMAT_YVYU: 14520 case DRM_FORMAT_VYUY: 14521 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 14522 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14523 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14524 goto err; 14525 } 14526 break; 14527 default: 14528 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14529 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14530 goto err; 14531 } 14532 14533 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 14534 if (mode_cmd->offsets[0] != 0) 14535 goto err; 14536 14537 drm_helper_mode_fill_fb_struct(&dev_priv->drm, 14538 &intel_fb->base, mode_cmd); 14539 14540 stride_alignment = intel_fb_stride_alignment(&intel_fb->base, 0); 14541 if (mode_cmd->pitches[0] & (stride_alignment - 1)) { 14542 DRM_DEBUG_KMS("pitch (%d) must be at least %u byte aligned\n", 14543 mode_cmd->pitches[0], stride_alignment); 14544 goto err; 14545 } 14546 14547 intel_fb->obj = obj; 14548 14549 ret = intel_fill_fb_info(dev_priv, &intel_fb->base); 14550 if (ret) 14551 goto err; 14552 14553 ret = drm_framebuffer_init(obj->base.dev, 14554 &intel_fb->base, 14555 &intel_fb_funcs); 14556 if (ret) { 14557 DRM_ERROR("framebuffer init failed %d\n", ret); 14558 goto err; 14559 } 14560 14561 return 0; 14562 14563err: 14564 i915_gem_object_lock(obj); 14565 obj->framebuffer_references--; 14566 i915_gem_object_unlock(obj); 14567 return ret; 14568} 14569 14570static struct drm_framebuffer * 14571intel_user_framebuffer_create(struct drm_device *dev, 14572 struct drm_file *filp, 14573 const struct drm_mode_fb_cmd2 *user_mode_cmd) 14574{ 14575 struct drm_framebuffer *fb; 14576 struct drm_i915_gem_object *obj; 14577 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 14578 14579 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 14580 if (!obj) 14581 return ERR_PTR(-ENOENT); 14582 14583 fb = intel_framebuffer_create(obj, &mode_cmd); 14584 if (IS_ERR(fb)) 14585 i915_gem_object_put(obj); 14586 14587 return fb; 14588} 14589 14590static void intel_atomic_state_free(struct drm_atomic_state *state) 14591{ 14592 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 14593 14594 drm_atomic_state_default_release(state); 14595 14596 i915_sw_fence_fini(&intel_state->commit_ready); 14597 14598 kfree(state); 14599} 14600 14601static const struct drm_mode_config_funcs intel_mode_funcs = { 14602 .fb_create = intel_user_framebuffer_create, 14603 .output_poll_changed = intel_fbdev_output_poll_changed, 14604 .atomic_check = intel_atomic_check, 14605 .atomic_commit = intel_atomic_commit, 14606 .atomic_state_alloc = intel_atomic_state_alloc, 14607 .atomic_state_clear = intel_atomic_state_clear, 14608 .atomic_state_free = intel_atomic_state_free, 14609}; 14610 14611/** 14612 * intel_init_display_hooks - initialize the display modesetting hooks 14613 * @dev_priv: device private 14614 */ 14615void intel_init_display_hooks(struct drm_i915_private *dev_priv) 14616{ 14617 intel_init_cdclk_hooks(dev_priv); 14618 14619 if (INTEL_INFO(dev_priv)->gen >= 9) { 14620 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14621 dev_priv->display.get_initial_plane_config = 14622 skylake_get_initial_plane_config; 14623 dev_priv->display.crtc_compute_clock = 14624 haswell_crtc_compute_clock; 14625 dev_priv->display.crtc_enable = haswell_crtc_enable; 14626 dev_priv->display.crtc_disable = haswell_crtc_disable; 14627 } else if (HAS_DDI(dev_priv)) { 14628 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14629 dev_priv->display.get_initial_plane_config = 14630 ironlake_get_initial_plane_config; 14631 dev_priv->display.crtc_compute_clock = 14632 haswell_crtc_compute_clock; 14633 dev_priv->display.crtc_enable = haswell_crtc_enable; 14634 dev_priv->display.crtc_disable = haswell_crtc_disable; 14635 } else if (HAS_PCH_SPLIT(dev_priv)) { 14636 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 14637 dev_priv->display.get_initial_plane_config = 14638 ironlake_get_initial_plane_config; 14639 dev_priv->display.crtc_compute_clock = 14640 ironlake_crtc_compute_clock; 14641 dev_priv->display.crtc_enable = ironlake_crtc_enable; 14642 dev_priv->display.crtc_disable = ironlake_crtc_disable; 14643 } else if (IS_CHERRYVIEW(dev_priv)) { 14644 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14645 dev_priv->display.get_initial_plane_config = 14646 i9xx_get_initial_plane_config; 14647 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 14648 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14649 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14650 } else if (IS_VALLEYVIEW(dev_priv)) { 14651 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14652 dev_priv->display.get_initial_plane_config = 14653 i9xx_get_initial_plane_config; 14654 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 14655 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14656 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14657 } else if (IS_G4X(dev_priv)) { 14658 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14659 dev_priv->display.get_initial_plane_config = 14660 i9xx_get_initial_plane_config; 14661 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 14662 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14663 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14664 } else if (IS_PINEVIEW(dev_priv)) { 14665 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14666 dev_priv->display.get_initial_plane_config = 14667 i9xx_get_initial_plane_config; 14668 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 14669 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14670 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14671 } else if (!IS_GEN2(dev_priv)) { 14672 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14673 dev_priv->display.get_initial_plane_config = 14674 i9xx_get_initial_plane_config; 14675 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14676 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14677 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14678 } else { 14679 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14680 dev_priv->display.get_initial_plane_config = 14681 i9xx_get_initial_plane_config; 14682 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 14683 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14684 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14685 } 14686 14687 if (IS_GEN5(dev_priv)) { 14688 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 14689 } else if (IS_GEN6(dev_priv)) { 14690 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 14691 } else if (IS_IVYBRIDGE(dev_priv)) { 14692 /* FIXME: detect B0+ stepping and use auto training */ 14693 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 14694 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 14695 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 14696 } 14697 14698 if (dev_priv->info.gen >= 9) 14699 dev_priv->display.update_crtcs = skl_update_crtcs; 14700 else 14701 dev_priv->display.update_crtcs = intel_update_crtcs; 14702 14703 switch (INTEL_INFO(dev_priv)->gen) { 14704 case 2: 14705 dev_priv->display.queue_flip = intel_gen2_queue_flip; 14706 break; 14707 14708 case 3: 14709 dev_priv->display.queue_flip = intel_gen3_queue_flip; 14710 break; 14711 14712 case 4: 14713 case 5: 14714 dev_priv->display.queue_flip = intel_gen4_queue_flip; 14715 break; 14716 14717 case 6: 14718 dev_priv->display.queue_flip = intel_gen6_queue_flip; 14719 break; 14720 case 7: 14721 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 14722 dev_priv->display.queue_flip = intel_gen7_queue_flip; 14723 break; 14724 case 9: 14725 /* Drop through - unsupported since execlist only. */ 14726 default: 14727 /* Default just returns -ENODEV to indicate unsupported */ 14728 dev_priv->display.queue_flip = intel_default_queue_flip; 14729 } 14730} 14731 14732/* 14733 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 14734 */ 14735static void quirk_ssc_force_disable(struct drm_device *dev) 14736{ 14737 struct drm_i915_private *dev_priv = to_i915(dev); 14738 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 14739 DRM_INFO("applying lvds SSC disable quirk\n"); 14740} 14741 14742/* 14743 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 14744 * brightness value 14745 */ 14746static void quirk_invert_brightness(struct drm_device *dev) 14747{ 14748 struct drm_i915_private *dev_priv = to_i915(dev); 14749 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 14750 DRM_INFO("applying inverted panel brightness quirk\n"); 14751} 14752 14753/* Some VBT's incorrectly indicate no backlight is present */ 14754static void quirk_backlight_present(struct drm_device *dev) 14755{ 14756 struct drm_i915_private *dev_priv = to_i915(dev); 14757 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 14758 DRM_INFO("applying backlight present quirk\n"); 14759} 14760 14761struct intel_quirk { 14762 int device; 14763 int subsystem_vendor; 14764 int subsystem_device; 14765 void (*hook)(struct drm_device *dev); 14766}; 14767 14768/* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 14769struct intel_dmi_quirk { 14770 void (*hook)(struct drm_device *dev); 14771 const struct dmi_system_id (*dmi_id_list)[]; 14772}; 14773 14774static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 14775{ 14776 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 14777 return 1; 14778} 14779 14780static const struct intel_dmi_quirk intel_dmi_quirks[] = { 14781 { 14782 .dmi_id_list = &(const struct dmi_system_id[]) { 14783 { 14784 .callback = intel_dmi_reverse_brightness, 14785 .ident = "NCR Corporation", 14786 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 14787 DMI_MATCH(DMI_PRODUCT_NAME, ""), 14788 }, 14789 }, 14790 { } /* terminating entry */ 14791 }, 14792 .hook = quirk_invert_brightness, 14793 }, 14794}; 14795 14796static struct intel_quirk intel_quirks[] = { 14797 /* Lenovo U160 cannot use SSC on LVDS */ 14798 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 14799 14800 /* Sony Vaio Y cannot use SSC on LVDS */ 14801 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 14802 14803 /* Acer Aspire 5734Z must invert backlight brightness */ 14804 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 14805 14806 /* Acer/eMachines G725 */ 14807 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 14808 14809 /* Acer/eMachines e725 */ 14810 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 14811 14812 /* Acer/Packard Bell NCL20 */ 14813 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 14814 14815 /* Acer Aspire 4736Z */ 14816 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 14817 14818 /* Acer Aspire 5336 */ 14819 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 14820 14821 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 14822 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 14823 14824 /* Acer C720 Chromebook (Core i3 4005U) */ 14825 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 14826 14827 /* Apple Macbook 2,1 (Core 2 T7400) */ 14828 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 14829 14830 /* Apple Macbook 4,1 */ 14831 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, 14832 14833 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 14834 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 14835 14836 /* HP Chromebook 14 (Celeron 2955U) */ 14837 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 14838 14839 /* Dell Chromebook 11 */ 14840 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 14841 14842 /* Dell Chromebook 11 (2015 version) */ 14843 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, 14844}; 14845 14846static void intel_init_quirks(struct drm_device *dev) 14847{ 14848 struct pci_dev *d = dev->pdev; 14849 int i; 14850 14851 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 14852 struct intel_quirk *q = &intel_quirks[i]; 14853 14854 if (d->device == q->device && 14855 (d->subsystem_vendor == q->subsystem_vendor || 14856 q->subsystem_vendor == PCI_ANY_ID) && 14857 (d->subsystem_device == q->subsystem_device || 14858 q->subsystem_device == PCI_ANY_ID)) 14859 q->hook(dev); 14860 } 14861 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 14862 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 14863 intel_dmi_quirks[i].hook(dev); 14864 } 14865} 14866 14867/* Disable the VGA plane that we never use */ 14868static void i915_disable_vga(struct drm_i915_private *dev_priv) 14869{ 14870 struct pci_dev *pdev = dev_priv->drm.pdev; 14871 u8 sr1; 14872 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); 14873 14874 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 14875 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 14876 outb(SR01, VGA_SR_INDEX); 14877 sr1 = inb(VGA_SR_DATA); 14878 outb(sr1 | 1<<5, VGA_SR_DATA); 14879 vga_put(pdev, VGA_RSRC_LEGACY_IO); 14880 udelay(300); 14881 14882 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 14883 POSTING_READ(vga_reg); 14884} 14885 14886void intel_modeset_init_hw(struct drm_device *dev) 14887{ 14888 struct drm_i915_private *dev_priv = to_i915(dev); 14889 14890 intel_update_cdclk(dev_priv); 14891 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw; 14892 14893 intel_init_clock_gating(dev_priv); 14894} 14895 14896/* 14897 * Calculate what we think the watermarks should be for the state we've read 14898 * out of the hardware and then immediately program those watermarks so that 14899 * we ensure the hardware settings match our internal state. 14900 * 14901 * We can calculate what we think WM's should be by creating a duplicate of the 14902 * current state (which was constructed during hardware readout) and running it 14903 * through the atomic check code to calculate new watermark values in the 14904 * state object. 14905 */ 14906static void sanitize_watermarks(struct drm_device *dev) 14907{ 14908 struct drm_i915_private *dev_priv = to_i915(dev); 14909 struct drm_atomic_state *state; 14910 struct intel_atomic_state *intel_state; 14911 struct drm_crtc *crtc; 14912 struct drm_crtc_state *cstate; 14913 struct drm_modeset_acquire_ctx ctx; 14914 int ret; 14915 int i; 14916 14917 /* Only supported on platforms that use atomic watermark design */ 14918 if (!dev_priv->display.optimize_watermarks) 14919 return; 14920 14921 /* 14922 * We need to hold connection_mutex before calling duplicate_state so 14923 * that the connector loop is protected. 14924 */ 14925 drm_modeset_acquire_init(&ctx, 0); 14926retry: 14927 ret = drm_modeset_lock_all_ctx(dev, &ctx); 14928 if (ret == -EDEADLK) { 14929 drm_modeset_backoff(&ctx); 14930 goto retry; 14931 } else if (WARN_ON(ret)) { 14932 goto fail; 14933 } 14934 14935 state = drm_atomic_helper_duplicate_state(dev, &ctx); 14936 if (WARN_ON(IS_ERR(state))) 14937 goto fail; 14938 14939 intel_state = to_intel_atomic_state(state); 14940 14941 /* 14942 * Hardware readout is the only time we don't want to calculate 14943 * intermediate watermarks (since we don't trust the current 14944 * watermarks). 14945 */ 14946 if (!HAS_GMCH_DISPLAY(dev_priv)) 14947 intel_state->skip_intermediate_wm = true; 14948 14949 ret = intel_atomic_check(dev, state); 14950 if (ret) { 14951 /* 14952 * If we fail here, it means that the hardware appears to be 14953 * programmed in a way that shouldn't be possible, given our 14954 * understanding of watermark requirements. This might mean a 14955 * mistake in the hardware readout code or a mistake in the 14956 * watermark calculations for a given platform. Raise a WARN 14957 * so that this is noticeable. 14958 * 14959 * If this actually happens, we'll have to just leave the 14960 * BIOS-programmed watermarks untouched and hope for the best. 14961 */ 14962 WARN(true, "Could not determine valid watermarks for inherited state\n"); 14963 goto put_state; 14964 } 14965 14966 /* Write calculated watermark values back */ 14967 for_each_new_crtc_in_state(state, crtc, cstate, i) { 14968 struct intel_crtc_state *cs = to_intel_crtc_state(cstate); 14969 14970 cs->wm.need_postvbl_update = true; 14971 dev_priv->display.optimize_watermarks(intel_state, cs); 14972 } 14973 14974put_state: 14975 drm_atomic_state_put(state); 14976fail: 14977 drm_modeset_drop_locks(&ctx); 14978 drm_modeset_acquire_fini(&ctx); 14979} 14980 14981int intel_modeset_init(struct drm_device *dev) 14982{ 14983 struct drm_i915_private *dev_priv = to_i915(dev); 14984 struct i915_ggtt *ggtt = &dev_priv->ggtt; 14985 enum pipe pipe; 14986 struct intel_crtc *crtc; 14987 14988 drm_mode_config_init(dev); 14989 14990 dev->mode_config.min_width = 0; 14991 dev->mode_config.min_height = 0; 14992 14993 dev->mode_config.preferred_depth = 24; 14994 dev->mode_config.prefer_shadow = 1; 14995 14996 dev->mode_config.allow_fb_modifiers = true; 14997 14998 dev->mode_config.funcs = &intel_mode_funcs; 14999 15000 init_llist_head(&dev_priv->atomic_helper.free_list); 15001 INIT_WORK(&dev_priv->atomic_helper.free_work, 15002 intel_atomic_helper_free_state_worker); 15003 15004 intel_init_quirks(dev); 15005 15006 intel_init_pm(dev_priv); 15007 15008 if (INTEL_INFO(dev_priv)->num_pipes == 0) 15009 return 0; 15010 15011 /* 15012 * There may be no VBT; and if the BIOS enabled SSC we can 15013 * just keep using it to avoid unnecessary flicker. Whereas if the 15014 * BIOS isn't using it, don't assume it will work even if the VBT 15015 * indicates as much. 15016 */ 15017 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 15018 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 15019 DREF_SSC1_ENABLE); 15020 15021 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 15022 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", 15023 bios_lvds_use_ssc ? "en" : "dis", 15024 dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); 15025 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 15026 } 15027 } 15028 15029 if (IS_GEN2(dev_priv)) { 15030 dev->mode_config.max_width = 2048; 15031 dev->mode_config.max_height = 2048; 15032 } else if (IS_GEN3(dev_priv)) { 15033 dev->mode_config.max_width = 4096; 15034 dev->mode_config.max_height = 4096; 15035 } else { 15036 dev->mode_config.max_width = 8192; 15037 dev->mode_config.max_height = 8192; 15038 } 15039 15040 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 15041 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512; 15042 dev->mode_config.cursor_height = 1023; 15043 } else if (IS_GEN2(dev_priv)) { 15044 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 15045 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 15046 } else { 15047 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 15048 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 15049 } 15050 15051 dev->mode_config.fb_base = ggtt->mappable_base; 15052 15053 DRM_DEBUG_KMS("%d display pipe%s available.\n", 15054 INTEL_INFO(dev_priv)->num_pipes, 15055 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); 15056 15057 for_each_pipe(dev_priv, pipe) { 15058 int ret; 15059 15060 ret = intel_crtc_init(dev_priv, pipe); 15061 if (ret) { 15062 drm_mode_config_cleanup(dev); 15063 return ret; 15064 } 15065 } 15066 15067 intel_shared_dpll_init(dev); 15068 15069 intel_update_czclk(dev_priv); 15070 intel_modeset_init_hw(dev); 15071 15072 if (dev_priv->max_cdclk_freq == 0) 15073 intel_update_max_cdclk(dev_priv); 15074 15075 /* Just disable it once at startup */ 15076 i915_disable_vga(dev_priv); 15077 intel_setup_outputs(dev_priv); 15078 15079 drm_modeset_lock_all(dev); 15080 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 15081 drm_modeset_unlock_all(dev); 15082 15083 for_each_intel_crtc(dev, crtc) { 15084 struct intel_initial_plane_config plane_config = {}; 15085 15086 if (!crtc->active) 15087 continue; 15088 15089 /* 15090 * Note that reserving the BIOS fb up front prevents us 15091 * from stuffing other stolen allocations like the ring 15092 * on top. This prevents some ugliness at boot time, and 15093 * can even allow for smooth boot transitions if the BIOS 15094 * fb is large enough for the active pipe configuration. 15095 */ 15096 dev_priv->display.get_initial_plane_config(crtc, 15097 &plane_config); 15098 15099 /* 15100 * If the fb is shared between multiple heads, we'll 15101 * just get the first one. 15102 */ 15103 intel_find_initial_plane_obj(crtc, &plane_config); 15104 } 15105 15106 /* 15107 * Make sure hardware watermarks really match the state we read out. 15108 * Note that we need to do this after reconstructing the BIOS fb's 15109 * since the watermark calculation done here will use pstate->fb. 15110 */ 15111 if (!HAS_GMCH_DISPLAY(dev_priv)) 15112 sanitize_watermarks(dev); 15113 15114 return 0; 15115} 15116 15117void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 15118{ 15119 /* 640x480@60Hz, ~25175 kHz */ 15120 struct dpll clock = { 15121 .m1 = 18, 15122 .m2 = 7, 15123 .p1 = 13, 15124 .p2 = 4, 15125 .n = 2, 15126 }; 15127 u32 dpll, fp; 15128 int i; 15129 15130 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154); 15131 15132 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 15133 pipe_name(pipe), clock.vco, clock.dot); 15134 15135 fp = i9xx_dpll_compute_fp(&clock); 15136 dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) | 15137 DPLL_VGA_MODE_DIS | 15138 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 15139 PLL_P2_DIVIDE_BY_4 | 15140 PLL_REF_INPUT_DREFCLK | 15141 DPLL_VCO_ENABLE; 15142 15143 I915_WRITE(FP0(pipe), fp); 15144 I915_WRITE(FP1(pipe), fp); 15145 15146 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 15147 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 15148 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 15149 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 15150 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 15151 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 15152 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 15153 15154 /* 15155 * Apparently we need to have VGA mode enabled prior to changing 15156 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 15157 * dividers, even though the register value does change. 15158 */ 15159 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 15160 I915_WRITE(DPLL(pipe), dpll); 15161 15162 /* Wait for the clocks to stabilize. */ 15163 POSTING_READ(DPLL(pipe)); 15164 udelay(150); 15165 15166 /* The pixel multiplier can only be updated once the 15167 * DPLL is enabled and the clocks are stable. 15168 * 15169 * So write it again. 15170 */ 15171 I915_WRITE(DPLL(pipe), dpll); 15172 15173 /* We do this three times for luck */ 15174 for (i = 0; i < 3 ; i++) { 15175 I915_WRITE(DPLL(pipe), dpll); 15176 POSTING_READ(DPLL(pipe)); 15177 udelay(150); /* wait for warmup */ 15178 } 15179 15180 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); 15181 POSTING_READ(PIPECONF(pipe)); 15182} 15183 15184void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) 15185{ 15186 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", 15187 pipe_name(pipe)); 15188 15189 assert_plane_disabled(dev_priv, PLANE_A); 15190 assert_plane_disabled(dev_priv, PLANE_B); 15191 15192 I915_WRITE(PIPECONF(pipe), 0); 15193 POSTING_READ(PIPECONF(pipe)); 15194 15195 if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) 15196 DRM_ERROR("pipe %c off wait timed out\n", pipe_name(pipe)); 15197 15198 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 15199 POSTING_READ(DPLL(pipe)); 15200} 15201 15202static bool 15203intel_check_plane_mapping(struct intel_crtc *crtc) 15204{ 15205 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 15206 u32 val; 15207 15208 if (INTEL_INFO(dev_priv)->num_pipes == 1) 15209 return true; 15210 15211 val = I915_READ(DSPCNTR(!crtc->plane)); 15212 15213 if ((val & DISPLAY_PLANE_ENABLE) && 15214 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 15215 return false; 15216 15217 return true; 15218} 15219 15220static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 15221{ 15222 struct drm_device *dev = crtc->base.dev; 15223 struct intel_encoder *encoder; 15224 15225 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 15226 return true; 15227 15228 return false; 15229} 15230 15231static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 15232{ 15233 struct drm_device *dev = encoder->base.dev; 15234 struct intel_connector *connector; 15235 15236 for_each_connector_on_encoder(dev, &encoder->base, connector) 15237 return connector; 15238 15239 return NULL; 15240} 15241 15242static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 15243 enum transcoder pch_transcoder) 15244{ 15245 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 15246 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); 15247} 15248 15249static void intel_sanitize_crtc(struct intel_crtc *crtc, 15250 struct drm_modeset_acquire_ctx *ctx) 15251{ 15252 struct drm_device *dev = crtc->base.dev; 15253 struct drm_i915_private *dev_priv = to_i915(dev); 15254 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 15255 15256 /* Clear any frame start delays used for debugging left by the BIOS */ 15257 if (!transcoder_is_dsi(cpu_transcoder)) { 15258 i915_reg_t reg = PIPECONF(cpu_transcoder); 15259 15260 I915_WRITE(reg, 15261 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 15262 } 15263 15264 /* restore vblank interrupts to correct state */ 15265 drm_crtc_vblank_reset(&crtc->base); 15266 if (crtc->active) { 15267 struct intel_plane *plane; 15268 15269 drm_crtc_vblank_on(&crtc->base); 15270 15271 /* Disable everything but the primary plane */ 15272 for_each_intel_plane_on_crtc(dev, crtc, plane) { 15273 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 15274 continue; 15275 15276 trace_intel_disable_plane(&plane->base, crtc); 15277 plane->disable_plane(plane, crtc); 15278 } 15279 } 15280 15281 /* We need to sanitize the plane -> pipe mapping first because this will 15282 * disable the crtc (and hence change the state) if it is wrong. Note 15283 * that gen4+ has a fixed plane -> pipe mapping. */ 15284 if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) { 15285 bool plane; 15286 15287 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", 15288 crtc->base.base.id, crtc->base.name); 15289 15290 /* Pipe has the wrong plane attached and the plane is active. 15291 * Temporarily change the plane mapping and disable everything 15292 * ... */ 15293 plane = crtc->plane; 15294 crtc->base.primary->state->visible = true; 15295 crtc->plane = !plane; 15296 intel_crtc_disable_noatomic(&crtc->base, ctx); 15297 crtc->plane = plane; 15298 } 15299 15300 /* Adjust the state of the output pipe according to whether we 15301 * have active connectors/encoders. */ 15302 if (crtc->active && !intel_crtc_has_encoders(crtc)) 15303 intel_crtc_disable_noatomic(&crtc->base, ctx); 15304 15305 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { 15306 /* 15307 * We start out with underrun reporting disabled to avoid races. 15308 * For correct bookkeeping mark this on active crtcs. 15309 * 15310 * Also on gmch platforms we dont have any hardware bits to 15311 * disable the underrun reporting. Which means we need to start 15312 * out with underrun reporting disabled also on inactive pipes, 15313 * since otherwise we'll complain about the garbage we read when 15314 * e.g. coming up after runtime pm. 15315 * 15316 * No protection against concurrent access is required - at 15317 * worst a fifo underrun happens which also sets this to false. 15318 */ 15319 crtc->cpu_fifo_underrun_disabled = true; 15320 /* 15321 * We track the PCH trancoder underrun reporting state 15322 * within the crtc. With crtc for pipe A housing the underrun 15323 * reporting state for PCH transcoder A, crtc for pipe B housing 15324 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 15325 * and marking underrun reporting as disabled for the non-existing 15326 * PCH transcoders B and C would prevent enabling the south 15327 * error interrupt (see cpt_can_enable_serr_int()). 15328 */ 15329 if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe)) 15330 crtc->pch_fifo_underrun_disabled = true; 15331 } 15332} 15333 15334static void intel_sanitize_encoder(struct intel_encoder *encoder) 15335{ 15336 struct intel_connector *connector; 15337 15338 /* We need to check both for a crtc link (meaning that the 15339 * encoder is active and trying to read from a pipe) and the 15340 * pipe itself being active. */ 15341 bool has_active_crtc = encoder->base.crtc && 15342 to_intel_crtc(encoder->base.crtc)->active; 15343 15344 connector = intel_encoder_find_connector(encoder); 15345 if (connector && !has_active_crtc) { 15346 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 15347 encoder->base.base.id, 15348 encoder->base.name); 15349 15350 /* Connector is active, but has no active pipe. This is 15351 * fallout from our resume register restoring. Disable 15352 * the encoder manually again. */ 15353 if (encoder->base.crtc) { 15354 struct drm_crtc_state *crtc_state = encoder->base.crtc->state; 15355 15356 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15357 encoder->base.base.id, 15358 encoder->base.name); 15359 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15360 if (encoder->post_disable) 15361 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15362 } 15363 encoder->base.crtc = NULL; 15364 15365 /* Inconsistent output/port/pipe state happens presumably due to 15366 * a bug in one of the get_hw_state functions. Or someplace else 15367 * in our code, like the register restore mess on resume. Clamp 15368 * things to off as a safer default. */ 15369 15370 connector->base.dpms = DRM_MODE_DPMS_OFF; 15371 connector->base.encoder = NULL; 15372 } 15373 /* Enabled encoders without active connectors will be fixed in 15374 * the crtc fixup. */ 15375} 15376 15377void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) 15378{ 15379 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); 15380 15381 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 15382 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 15383 i915_disable_vga(dev_priv); 15384 } 15385} 15386 15387void i915_redisable_vga(struct drm_i915_private *dev_priv) 15388{ 15389 /* This function can be called both from intel_modeset_setup_hw_state or 15390 * at a very early point in our resume sequence, where the power well 15391 * structures are not yet restored. Since this function is at a very 15392 * paranoid "someone might have enabled VGA while we were not looking" 15393 * level, just check if the power well is enabled instead of trying to 15394 * follow the "don't touch the power well if we don't need it" policy 15395 * the rest of the driver uses. */ 15396 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA)) 15397 return; 15398 15399 i915_redisable_vga_power_on(dev_priv); 15400 15401 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); 15402} 15403 15404static bool primary_get_hw_state(struct intel_plane *plane) 15405{ 15406 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15407 15408 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; 15409} 15410 15411/* FIXME read out full plane state for all planes */ 15412static void readout_plane_state(struct intel_crtc *crtc) 15413{ 15414 struct intel_plane *primary = to_intel_plane(crtc->base.primary); 15415 bool visible; 15416 15417 visible = crtc->active && primary_get_hw_state(primary); 15418 15419 intel_set_plane_visible(to_intel_crtc_state(crtc->base.state), 15420 to_intel_plane_state(primary->base.state), 15421 visible); 15422} 15423 15424static void intel_modeset_readout_hw_state(struct drm_device *dev) 15425{ 15426 struct drm_i915_private *dev_priv = to_i915(dev); 15427 enum pipe pipe; 15428 struct intel_crtc *crtc; 15429 struct intel_encoder *encoder; 15430 struct intel_connector *connector; 15431 struct drm_connector_list_iter conn_iter; 15432 int i; 15433 15434 dev_priv->active_crtcs = 0; 15435 15436 for_each_intel_crtc(dev, crtc) { 15437 struct intel_crtc_state *crtc_state = 15438 to_intel_crtc_state(crtc->base.state); 15439 15440 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); 15441 memset(crtc_state, 0, sizeof(*crtc_state)); 15442 crtc_state->base.crtc = &crtc->base; 15443 15444 crtc_state->base.active = crtc_state->base.enable = 15445 dev_priv->display.get_pipe_config(crtc, crtc_state); 15446 15447 crtc->base.enabled = crtc_state->base.enable; 15448 crtc->active = crtc_state->base.active; 15449 15450 if (crtc_state->base.active) 15451 dev_priv->active_crtcs |= 1 << crtc->pipe; 15452 15453 readout_plane_state(crtc); 15454 15455 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 15456 crtc->base.base.id, crtc->base.name, 15457 enableddisabled(crtc_state->base.active)); 15458 } 15459 15460 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15461 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15462 15463 pll->on = pll->funcs.get_hw_state(dev_priv, pll, 15464 &pll->state.hw_state); 15465 pll->state.crtc_mask = 0; 15466 for_each_intel_crtc(dev, crtc) { 15467 struct intel_crtc_state *crtc_state = 15468 to_intel_crtc_state(crtc->base.state); 15469 15470 if (crtc_state->base.active && 15471 crtc_state->shared_dpll == pll) 15472 pll->state.crtc_mask |= 1 << crtc->pipe; 15473 } 15474 pll->active_mask = pll->state.crtc_mask; 15475 15476 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 15477 pll->name, pll->state.crtc_mask, pll->on); 15478 } 15479 15480 for_each_intel_encoder(dev, encoder) { 15481 pipe = 0; 15482 15483 if (encoder->get_hw_state(encoder, &pipe)) { 15484 struct intel_crtc_state *crtc_state; 15485 15486 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15487 crtc_state = to_intel_crtc_state(crtc->base.state); 15488 15489 encoder->base.crtc = &crtc->base; 15490 crtc_state->output_types |= 1 << encoder->type; 15491 encoder->get_config(encoder, crtc_state); 15492 } else { 15493 encoder->base.crtc = NULL; 15494 } 15495 15496 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 15497 encoder->base.base.id, encoder->base.name, 15498 enableddisabled(encoder->base.crtc), 15499 pipe_name(pipe)); 15500 } 15501 15502 drm_connector_list_iter_begin(dev, &conn_iter); 15503 for_each_intel_connector_iter(connector, &conn_iter) { 15504 if (connector->get_hw_state(connector)) { 15505 connector->base.dpms = DRM_MODE_DPMS_ON; 15506 15507 encoder = connector->encoder; 15508 connector->base.encoder = &encoder->base; 15509 15510 if (encoder->base.crtc && 15511 encoder->base.crtc->state->active) { 15512 /* 15513 * This has to be done during hardware readout 15514 * because anything calling .crtc_disable may 15515 * rely on the connector_mask being accurate. 15516 */ 15517 encoder->base.crtc->state->connector_mask |= 15518 1 << drm_connector_index(&connector->base); 15519 encoder->base.crtc->state->encoder_mask |= 15520 1 << drm_encoder_index(&encoder->base); 15521 } 15522 15523 } else { 15524 connector->base.dpms = DRM_MODE_DPMS_OFF; 15525 connector->base.encoder = NULL; 15526 } 15527 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 15528 connector->base.base.id, connector->base.name, 15529 enableddisabled(connector->base.encoder)); 15530 } 15531 drm_connector_list_iter_end(&conn_iter); 15532 15533 for_each_intel_crtc(dev, crtc) { 15534 struct intel_crtc_state *crtc_state = 15535 to_intel_crtc_state(crtc->base.state); 15536 int pixclk = 0; 15537 15538 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 15539 if (crtc_state->base.active) { 15540 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state); 15541 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state); 15542 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 15543 15544 /* 15545 * The initial mode needs to be set in order to keep 15546 * the atomic core happy. It wants a valid mode if the 15547 * crtc's enabled, so we do the above call. 15548 * 15549 * But we don't set all the derived state fully, hence 15550 * set a flag to indicate that a full recalculation is 15551 * needed on the next commit. 15552 */ 15553 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED; 15554 15555 intel_crtc_compute_pixel_rate(crtc_state); 15556 15557 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || 15558 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15559 pixclk = crtc_state->pixel_rate; 15560 else 15561 WARN_ON(dev_priv->display.modeset_calc_cdclk); 15562 15563 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 15564 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) 15565 pixclk = DIV_ROUND_UP(pixclk * 100, 95); 15566 15567 drm_calc_timestamping_constants(&crtc->base, 15568 &crtc_state->base.adjusted_mode); 15569 update_scanline_offset(crtc); 15570 } 15571 15572 dev_priv->min_pixclk[crtc->pipe] = pixclk; 15573 15574 intel_pipe_config_sanity_check(dev_priv, crtc_state); 15575 } 15576} 15577 15578static void 15579get_encoder_power_domains(struct drm_i915_private *dev_priv) 15580{ 15581 struct intel_encoder *encoder; 15582 15583 for_each_intel_encoder(&dev_priv->drm, encoder) { 15584 u64 get_domains; 15585 enum intel_display_power_domain domain; 15586 15587 if (!encoder->get_power_domains) 15588 continue; 15589 15590 get_domains = encoder->get_power_domains(encoder); 15591 for_each_power_domain(domain, get_domains) 15592 intel_display_power_get(dev_priv, domain); 15593 } 15594} 15595 15596/* Scan out the current hw modeset state, 15597 * and sanitizes it to the current state 15598 */ 15599static void 15600intel_modeset_setup_hw_state(struct drm_device *dev, 15601 struct drm_modeset_acquire_ctx *ctx) 15602{ 15603 struct drm_i915_private *dev_priv = to_i915(dev); 15604 enum pipe pipe; 15605 struct intel_crtc *crtc; 15606 struct intel_encoder *encoder; 15607 int i; 15608 15609 intel_modeset_readout_hw_state(dev); 15610 15611 /* HW state is read out, now we need to sanitize this mess. */ 15612 get_encoder_power_domains(dev_priv); 15613 15614 for_each_intel_encoder(dev, encoder) { 15615 intel_sanitize_encoder(encoder); 15616 } 15617 15618 for_each_pipe(dev_priv, pipe) { 15619 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15620 15621 intel_sanitize_crtc(crtc, ctx); 15622 intel_dump_pipe_config(crtc, crtc->config, 15623 "[setup_hw_state]"); 15624 } 15625 15626 intel_modeset_update_connector_atomic_state(dev); 15627 15628 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15629 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15630 15631 if (!pll->on || pll->active_mask) 15632 continue; 15633 15634 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 15635 15636 pll->funcs.disable(dev_priv, pll); 15637 pll->on = false; 15638 } 15639 15640 if (IS_G4X(dev_priv)) { 15641 g4x_wm_get_hw_state(dev); 15642 g4x_wm_sanitize(dev_priv); 15643 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15644 vlv_wm_get_hw_state(dev); 15645 vlv_wm_sanitize(dev_priv); 15646 } else if (IS_GEN9(dev_priv)) { 15647 skl_wm_get_hw_state(dev); 15648 } else if (HAS_PCH_SPLIT(dev_priv)) { 15649 ilk_wm_get_hw_state(dev); 15650 } 15651 15652 for_each_intel_crtc(dev, crtc) { 15653 u64 put_domains; 15654 15655 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config); 15656 if (WARN_ON(put_domains)) 15657 modeset_put_power_domains(dev_priv, put_domains); 15658 } 15659 intel_display_set_init_power(dev_priv, false); 15660 15661 intel_power_domains_verify_state(dev_priv); 15662 15663 intel_fbc_init_pipe_state(dev_priv); 15664} 15665 15666void intel_display_resume(struct drm_device *dev) 15667{ 15668 struct drm_i915_private *dev_priv = to_i915(dev); 15669 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 15670 struct drm_modeset_acquire_ctx ctx; 15671 int ret; 15672 15673 dev_priv->modeset_restore_state = NULL; 15674 if (state) 15675 state->acquire_ctx = &ctx; 15676 15677 drm_modeset_acquire_init(&ctx, 0); 15678 15679 while (1) { 15680 ret = drm_modeset_lock_all_ctx(dev, &ctx); 15681 if (ret != -EDEADLK) 15682 break; 15683 15684 drm_modeset_backoff(&ctx); 15685 } 15686 15687 if (!ret) 15688 ret = __intel_display_resume(dev, state, &ctx); 15689 15690 drm_modeset_drop_locks(&ctx); 15691 drm_modeset_acquire_fini(&ctx); 15692 15693 if (ret) 15694 DRM_ERROR("Restoring old state failed with %i\n", ret); 15695 if (state) 15696 drm_atomic_state_put(state); 15697} 15698 15699void intel_modeset_gem_init(struct drm_device *dev) 15700{ 15701 struct drm_i915_private *dev_priv = to_i915(dev); 15702 15703 intel_init_gt_powersave(dev_priv); 15704 15705 intel_setup_overlay(dev_priv); 15706} 15707 15708int intel_connector_register(struct drm_connector *connector) 15709{ 15710 struct intel_connector *intel_connector = to_intel_connector(connector); 15711 int ret; 15712 15713 ret = intel_backlight_device_register(intel_connector); 15714 if (ret) 15715 goto err; 15716 15717 return 0; 15718 15719err: 15720 return ret; 15721} 15722 15723void intel_connector_unregister(struct drm_connector *connector) 15724{ 15725 struct intel_connector *intel_connector = to_intel_connector(connector); 15726 15727 intel_backlight_device_unregister(intel_connector); 15728 intel_panel_destroy_backlight(connector); 15729} 15730 15731void intel_modeset_cleanup(struct drm_device *dev) 15732{ 15733 struct drm_i915_private *dev_priv = to_i915(dev); 15734 15735 flush_work(&dev_priv->atomic_helper.free_work); 15736 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); 15737 15738 intel_disable_gt_powersave(dev_priv); 15739 15740 /* 15741 * Interrupts and polling as the first thing to avoid creating havoc. 15742 * Too much stuff here (turning of connectors, ...) would 15743 * experience fancy races otherwise. 15744 */ 15745 intel_irq_uninstall(dev_priv); 15746 15747 /* 15748 * Due to the hpd irq storm handling the hotplug work can re-arm the 15749 * poll handlers. Hence disable polling after hpd handling is shut down. 15750 */ 15751 drm_kms_helper_poll_fini(dev); 15752 15753 intel_unregister_dsm_handler(); 15754 15755 intel_fbc_global_disable(dev_priv); 15756 15757 /* flush any delayed tasks or pending work */ 15758 flush_scheduled_work(); 15759 15760 drm_mode_config_cleanup(dev); 15761 15762 intel_cleanup_overlay(dev_priv); 15763 15764 intel_cleanup_gt_powersave(dev_priv); 15765 15766 intel_teardown_gmbus(dev_priv); 15767} 15768 15769void intel_connector_attach_encoder(struct intel_connector *connector, 15770 struct intel_encoder *encoder) 15771{ 15772 connector->encoder = encoder; 15773 drm_mode_connector_attach_encoder(&connector->base, 15774 &encoder->base); 15775} 15776 15777/* 15778 * set vga decode state - true == enable VGA decode 15779 */ 15780int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state) 15781{ 15782 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 15783 u16 gmch_ctrl; 15784 15785 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 15786 DRM_ERROR("failed to read control word\n"); 15787 return -EIO; 15788 } 15789 15790 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 15791 return 0; 15792 15793 if (state) 15794 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 15795 else 15796 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 15797 15798 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 15799 DRM_ERROR("failed to write control word\n"); 15800 return -EIO; 15801 } 15802 15803 return 0; 15804} 15805 15806#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 15807 15808struct intel_display_error_state { 15809 15810 u32 power_well_driver; 15811 15812 int num_transcoders; 15813 15814 struct intel_cursor_error_state { 15815 u32 control; 15816 u32 position; 15817 u32 base; 15818 u32 size; 15819 } cursor[I915_MAX_PIPES]; 15820 15821 struct intel_pipe_error_state { 15822 bool power_domain_on; 15823 u32 source; 15824 u32 stat; 15825 } pipe[I915_MAX_PIPES]; 15826 15827 struct intel_plane_error_state { 15828 u32 control; 15829 u32 stride; 15830 u32 size; 15831 u32 pos; 15832 u32 addr; 15833 u32 surface; 15834 u32 tile_offset; 15835 } plane[I915_MAX_PIPES]; 15836 15837 struct intel_transcoder_error_state { 15838 bool power_domain_on; 15839 enum transcoder cpu_transcoder; 15840 15841 u32 conf; 15842 15843 u32 htotal; 15844 u32 hblank; 15845 u32 hsync; 15846 u32 vtotal; 15847 u32 vblank; 15848 u32 vsync; 15849 } transcoder[4]; 15850}; 15851 15852struct intel_display_error_state * 15853intel_display_capture_error_state(struct drm_i915_private *dev_priv) 15854{ 15855 struct intel_display_error_state *error; 15856 int transcoders[] = { 15857 TRANSCODER_A, 15858 TRANSCODER_B, 15859 TRANSCODER_C, 15860 TRANSCODER_EDP, 15861 }; 15862 int i; 15863 15864 if (INTEL_INFO(dev_priv)->num_pipes == 0) 15865 return NULL; 15866 15867 error = kzalloc(sizeof(*error), GFP_ATOMIC); 15868 if (error == NULL) 15869 return NULL; 15870 15871 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 15872 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 15873 15874 for_each_pipe(dev_priv, i) { 15875 error->pipe[i].power_domain_on = 15876 __intel_display_power_is_enabled(dev_priv, 15877 POWER_DOMAIN_PIPE(i)); 15878 if (!error->pipe[i].power_domain_on) 15879 continue; 15880 15881 error->cursor[i].control = I915_READ(CURCNTR(i)); 15882 error->cursor[i].position = I915_READ(CURPOS(i)); 15883 error->cursor[i].base = I915_READ(CURBASE(i)); 15884 15885 error->plane[i].control = I915_READ(DSPCNTR(i)); 15886 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 15887 if (INTEL_GEN(dev_priv) <= 3) { 15888 error->plane[i].size = I915_READ(DSPSIZE(i)); 15889 error->plane[i].pos = I915_READ(DSPPOS(i)); 15890 } 15891 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 15892 error->plane[i].addr = I915_READ(DSPADDR(i)); 15893 if (INTEL_GEN(dev_priv) >= 4) { 15894 error->plane[i].surface = I915_READ(DSPSURF(i)); 15895 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 15896 } 15897 15898 error->pipe[i].source = I915_READ(PIPESRC(i)); 15899 15900 if (HAS_GMCH_DISPLAY(dev_priv)) 15901 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 15902 } 15903 15904 /* Note: this does not include DSI transcoders. */ 15905 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes; 15906 if (HAS_DDI(dev_priv)) 15907 error->num_transcoders++; /* Account for eDP. */ 15908 15909 for (i = 0; i < error->num_transcoders; i++) { 15910 enum transcoder cpu_transcoder = transcoders[i]; 15911 15912 error->transcoder[i].power_domain_on = 15913 __intel_display_power_is_enabled(dev_priv, 15914 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 15915 if (!error->transcoder[i].power_domain_on) 15916 continue; 15917 15918 error->transcoder[i].cpu_transcoder = cpu_transcoder; 15919 15920 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 15921 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 15922 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 15923 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 15924 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 15925 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 15926 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 15927 } 15928 15929 return error; 15930} 15931 15932#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 15933 15934void 15935intel_display_print_error_state(struct drm_i915_error_state_buf *m, 15936 struct intel_display_error_state *error) 15937{ 15938 struct drm_i915_private *dev_priv = m->i915; 15939 int i; 15940 15941 if (!error) 15942 return; 15943 15944 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes); 15945 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 15946 err_printf(m, "PWR_WELL_CTL2: %08x\n", 15947 error->power_well_driver); 15948 for_each_pipe(dev_priv, i) { 15949 err_printf(m, "Pipe [%d]:\n", i); 15950 err_printf(m, " Power: %s\n", 15951 onoff(error->pipe[i].power_domain_on)); 15952 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 15953 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 15954 15955 err_printf(m, "Plane [%d]:\n", i); 15956 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 15957 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 15958 if (INTEL_GEN(dev_priv) <= 3) { 15959 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 15960 err_printf(m, " POS: %08x\n", error->plane[i].pos); 15961 } 15962 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 15963 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 15964 if (INTEL_GEN(dev_priv) >= 4) { 15965 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 15966 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 15967 } 15968 15969 err_printf(m, "Cursor [%d]:\n", i); 15970 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 15971 err_printf(m, " POS: %08x\n", error->cursor[i].position); 15972 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 15973 } 15974 15975 for (i = 0; i < error->num_transcoders; i++) { 15976 err_printf(m, "CPU transcoder: %s\n", 15977 transcoder_name(error->transcoder[i].cpu_transcoder)); 15978 err_printf(m, " Power: %s\n", 15979 onoff(error->transcoder[i].power_domain_on)); 15980 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 15981 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 15982 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 15983 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 15984 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 15985 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 15986 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 15987 } 15988} 15989 15990#endif