Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.12 15906 lines 458 kB view raw
1/* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27#include <linux/dmi.h> 28#include <linux/module.h> 29#include <linux/input.h> 30#include <linux/i2c.h> 31#include <linux/kernel.h> 32#include <linux/slab.h> 33#include <linux/vgaarb.h> 34#include <drm/drm_edid.h> 35#include <drm/drmP.h> 36#include "intel_drv.h" 37#include "intel_frontbuffer.h" 38#include <drm/i915_drm.h> 39#include "i915_drv.h" 40#include "i915_gem_clflush.h" 41#include "intel_dsi.h" 42#include "i915_trace.h" 43#include <drm/drm_atomic.h> 44#include <drm/drm_atomic_helper.h> 45#include <drm/drm_dp_helper.h> 46#include <drm/drm_crtc_helper.h> 47#include <drm/drm_plane_helper.h> 48#include <drm/drm_rect.h> 49#include <linux/dma_remapping.h> 50#include <linux/reservation.h> 51 52static bool is_mmio_work(struct intel_flip_work *work) 53{ 54 return work->mmio_work.func; 55} 56 57/* Primary plane formats for gen <= 3 */ 58static const uint32_t i8xx_primary_formats[] = { 59 DRM_FORMAT_C8, 60 DRM_FORMAT_RGB565, 61 DRM_FORMAT_XRGB1555, 62 DRM_FORMAT_XRGB8888, 63}; 64 65/* Primary plane formats for gen >= 4 */ 66static const uint32_t i965_primary_formats[] = { 67 DRM_FORMAT_C8, 68 DRM_FORMAT_RGB565, 69 DRM_FORMAT_XRGB8888, 70 DRM_FORMAT_XBGR8888, 71 DRM_FORMAT_XRGB2101010, 72 DRM_FORMAT_XBGR2101010, 73}; 74 75static const uint32_t skl_primary_formats[] = { 76 DRM_FORMAT_C8, 77 DRM_FORMAT_RGB565, 78 DRM_FORMAT_XRGB8888, 79 DRM_FORMAT_XBGR8888, 80 DRM_FORMAT_ARGB8888, 81 DRM_FORMAT_ABGR8888, 82 DRM_FORMAT_XRGB2101010, 83 DRM_FORMAT_XBGR2101010, 84 DRM_FORMAT_YUYV, 85 DRM_FORMAT_YVYU, 86 DRM_FORMAT_UYVY, 87 DRM_FORMAT_VYUY, 88}; 89 90/* Cursor formats */ 91static const uint32_t intel_cursor_formats[] = { 92 DRM_FORMAT_ARGB8888, 93}; 94 95static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 96 struct intel_crtc_state *pipe_config); 97static void ironlake_pch_clock_get(struct intel_crtc *crtc, 98 struct intel_crtc_state *pipe_config); 99 100static int intel_framebuffer_init(struct intel_framebuffer *ifb, 101 struct drm_i915_gem_object *obj, 102 struct drm_mode_fb_cmd2 *mode_cmd); 103static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 104static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 105static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc); 106static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 107 struct intel_link_m_n *m_n, 108 struct intel_link_m_n *m2_n2); 109static void ironlake_set_pipeconf(struct drm_crtc *crtc); 110static void haswell_set_pipeconf(struct drm_crtc *crtc); 111static void haswell_set_pipemisc(struct drm_crtc *crtc); 112static void vlv_prepare_pll(struct intel_crtc *crtc, 113 const struct intel_crtc_state *pipe_config); 114static void chv_prepare_pll(struct intel_crtc *crtc, 115 const struct intel_crtc_state *pipe_config); 116static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 117static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 118static void intel_crtc_init_scalers(struct intel_crtc *crtc, 119 struct intel_crtc_state *crtc_state); 120static void skylake_pfit_enable(struct intel_crtc *crtc); 121static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 122static void ironlake_pfit_enable(struct intel_crtc *crtc); 123static void intel_modeset_setup_hw_state(struct drm_device *dev, 124 struct drm_modeset_acquire_ctx *ctx); 125static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 126 127struct intel_limit { 128 struct { 129 int min, max; 130 } dot, vco, n, m, m1, m2, p, p1; 131 132 struct { 133 int dot_limit; 134 int p2_slow, p2_fast; 135 } p2; 136}; 137 138/* returns HPLL frequency in kHz */ 139int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 140{ 141 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 142 143 /* Obtain SKU information */ 144 mutex_lock(&dev_priv->sb_lock); 145 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 146 CCK_FUSE_HPLL_FREQ_MASK; 147 mutex_unlock(&dev_priv->sb_lock); 148 149 return vco_freq[hpll_freq] * 1000; 150} 151 152int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 153 const char *name, u32 reg, int ref_freq) 154{ 155 u32 val; 156 int divider; 157 158 mutex_lock(&dev_priv->sb_lock); 159 val = vlv_cck_read(dev_priv, reg); 160 mutex_unlock(&dev_priv->sb_lock); 161 162 divider = val & CCK_FREQUENCY_VALUES; 163 164 WARN((val & CCK_FREQUENCY_STATUS) != 165 (divider << CCK_FREQUENCY_STATUS_SHIFT), 166 "%s change in progress\n", name); 167 168 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 169} 170 171int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 172 const char *name, u32 reg) 173{ 174 if (dev_priv->hpll_freq == 0) 175 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 176 177 return vlv_get_cck_clock(dev_priv, name, reg, 178 dev_priv->hpll_freq); 179} 180 181static void intel_update_czclk(struct drm_i915_private *dev_priv) 182{ 183 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 184 return; 185 186 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 187 CCK_CZ_CLOCK_CONTROL); 188 189 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 190} 191 192static inline u32 /* units of 100MHz */ 193intel_fdi_link_freq(struct drm_i915_private *dev_priv, 194 const struct intel_crtc_state *pipe_config) 195{ 196 if (HAS_DDI(dev_priv)) 197 return pipe_config->port_clock; /* SPLL */ 198 else if (IS_GEN5(dev_priv)) 199 return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000; 200 else 201 return 270000; 202} 203 204static const struct intel_limit intel_limits_i8xx_dac = { 205 .dot = { .min = 25000, .max = 350000 }, 206 .vco = { .min = 908000, .max = 1512000 }, 207 .n = { .min = 2, .max = 16 }, 208 .m = { .min = 96, .max = 140 }, 209 .m1 = { .min = 18, .max = 26 }, 210 .m2 = { .min = 6, .max = 16 }, 211 .p = { .min = 4, .max = 128 }, 212 .p1 = { .min = 2, .max = 33 }, 213 .p2 = { .dot_limit = 165000, 214 .p2_slow = 4, .p2_fast = 2 }, 215}; 216 217static const struct intel_limit intel_limits_i8xx_dvo = { 218 .dot = { .min = 25000, .max = 350000 }, 219 .vco = { .min = 908000, .max = 1512000 }, 220 .n = { .min = 2, .max = 16 }, 221 .m = { .min = 96, .max = 140 }, 222 .m1 = { .min = 18, .max = 26 }, 223 .m2 = { .min = 6, .max = 16 }, 224 .p = { .min = 4, .max = 128 }, 225 .p1 = { .min = 2, .max = 33 }, 226 .p2 = { .dot_limit = 165000, 227 .p2_slow = 4, .p2_fast = 4 }, 228}; 229 230static const struct intel_limit intel_limits_i8xx_lvds = { 231 .dot = { .min = 25000, .max = 350000 }, 232 .vco = { .min = 908000, .max = 1512000 }, 233 .n = { .min = 2, .max = 16 }, 234 .m = { .min = 96, .max = 140 }, 235 .m1 = { .min = 18, .max = 26 }, 236 .m2 = { .min = 6, .max = 16 }, 237 .p = { .min = 4, .max = 128 }, 238 .p1 = { .min = 1, .max = 6 }, 239 .p2 = { .dot_limit = 165000, 240 .p2_slow = 14, .p2_fast = 7 }, 241}; 242 243static const struct intel_limit intel_limits_i9xx_sdvo = { 244 .dot = { .min = 20000, .max = 400000 }, 245 .vco = { .min = 1400000, .max = 2800000 }, 246 .n = { .min = 1, .max = 6 }, 247 .m = { .min = 70, .max = 120 }, 248 .m1 = { .min = 8, .max = 18 }, 249 .m2 = { .min = 3, .max = 7 }, 250 .p = { .min = 5, .max = 80 }, 251 .p1 = { .min = 1, .max = 8 }, 252 .p2 = { .dot_limit = 200000, 253 .p2_slow = 10, .p2_fast = 5 }, 254}; 255 256static const struct intel_limit intel_limits_i9xx_lvds = { 257 .dot = { .min = 20000, .max = 400000 }, 258 .vco = { .min = 1400000, .max = 2800000 }, 259 .n = { .min = 1, .max = 6 }, 260 .m = { .min = 70, .max = 120 }, 261 .m1 = { .min = 8, .max = 18 }, 262 .m2 = { .min = 3, .max = 7 }, 263 .p = { .min = 7, .max = 98 }, 264 .p1 = { .min = 1, .max = 8 }, 265 .p2 = { .dot_limit = 112000, 266 .p2_slow = 14, .p2_fast = 7 }, 267}; 268 269 270static const struct intel_limit intel_limits_g4x_sdvo = { 271 .dot = { .min = 25000, .max = 270000 }, 272 .vco = { .min = 1750000, .max = 3500000}, 273 .n = { .min = 1, .max = 4 }, 274 .m = { .min = 104, .max = 138 }, 275 .m1 = { .min = 17, .max = 23 }, 276 .m2 = { .min = 5, .max = 11 }, 277 .p = { .min = 10, .max = 30 }, 278 .p1 = { .min = 1, .max = 3}, 279 .p2 = { .dot_limit = 270000, 280 .p2_slow = 10, 281 .p2_fast = 10 282 }, 283}; 284 285static const struct intel_limit intel_limits_g4x_hdmi = { 286 .dot = { .min = 22000, .max = 400000 }, 287 .vco = { .min = 1750000, .max = 3500000}, 288 .n = { .min = 1, .max = 4 }, 289 .m = { .min = 104, .max = 138 }, 290 .m1 = { .min = 16, .max = 23 }, 291 .m2 = { .min = 5, .max = 11 }, 292 .p = { .min = 5, .max = 80 }, 293 .p1 = { .min = 1, .max = 8}, 294 .p2 = { .dot_limit = 165000, 295 .p2_slow = 10, .p2_fast = 5 }, 296}; 297 298static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 299 .dot = { .min = 20000, .max = 115000 }, 300 .vco = { .min = 1750000, .max = 3500000 }, 301 .n = { .min = 1, .max = 3 }, 302 .m = { .min = 104, .max = 138 }, 303 .m1 = { .min = 17, .max = 23 }, 304 .m2 = { .min = 5, .max = 11 }, 305 .p = { .min = 28, .max = 112 }, 306 .p1 = { .min = 2, .max = 8 }, 307 .p2 = { .dot_limit = 0, 308 .p2_slow = 14, .p2_fast = 14 309 }, 310}; 311 312static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 313 .dot = { .min = 80000, .max = 224000 }, 314 .vco = { .min = 1750000, .max = 3500000 }, 315 .n = { .min = 1, .max = 3 }, 316 .m = { .min = 104, .max = 138 }, 317 .m1 = { .min = 17, .max = 23 }, 318 .m2 = { .min = 5, .max = 11 }, 319 .p = { .min = 14, .max = 42 }, 320 .p1 = { .min = 2, .max = 6 }, 321 .p2 = { .dot_limit = 0, 322 .p2_slow = 7, .p2_fast = 7 323 }, 324}; 325 326static const struct intel_limit intel_limits_pineview_sdvo = { 327 .dot = { .min = 20000, .max = 400000}, 328 .vco = { .min = 1700000, .max = 3500000 }, 329 /* Pineview's Ncounter is a ring counter */ 330 .n = { .min = 3, .max = 6 }, 331 .m = { .min = 2, .max = 256 }, 332 /* Pineview only has one combined m divider, which we treat as m2. */ 333 .m1 = { .min = 0, .max = 0 }, 334 .m2 = { .min = 0, .max = 254 }, 335 .p = { .min = 5, .max = 80 }, 336 .p1 = { .min = 1, .max = 8 }, 337 .p2 = { .dot_limit = 200000, 338 .p2_slow = 10, .p2_fast = 5 }, 339}; 340 341static const struct intel_limit intel_limits_pineview_lvds = { 342 .dot = { .min = 20000, .max = 400000 }, 343 .vco = { .min = 1700000, .max = 3500000 }, 344 .n = { .min = 3, .max = 6 }, 345 .m = { .min = 2, .max = 256 }, 346 .m1 = { .min = 0, .max = 0 }, 347 .m2 = { .min = 0, .max = 254 }, 348 .p = { .min = 7, .max = 112 }, 349 .p1 = { .min = 1, .max = 8 }, 350 .p2 = { .dot_limit = 112000, 351 .p2_slow = 14, .p2_fast = 14 }, 352}; 353 354/* Ironlake / Sandybridge 355 * 356 * We calculate clock using (register_value + 2) for N/M1/M2, so here 357 * the range value for them is (actual_value - 2). 358 */ 359static const struct intel_limit intel_limits_ironlake_dac = { 360 .dot = { .min = 25000, .max = 350000 }, 361 .vco = { .min = 1760000, .max = 3510000 }, 362 .n = { .min = 1, .max = 5 }, 363 .m = { .min = 79, .max = 127 }, 364 .m1 = { .min = 12, .max = 22 }, 365 .m2 = { .min = 5, .max = 9 }, 366 .p = { .min = 5, .max = 80 }, 367 .p1 = { .min = 1, .max = 8 }, 368 .p2 = { .dot_limit = 225000, 369 .p2_slow = 10, .p2_fast = 5 }, 370}; 371 372static const struct intel_limit intel_limits_ironlake_single_lvds = { 373 .dot = { .min = 25000, .max = 350000 }, 374 .vco = { .min = 1760000, .max = 3510000 }, 375 .n = { .min = 1, .max = 3 }, 376 .m = { .min = 79, .max = 118 }, 377 .m1 = { .min = 12, .max = 22 }, 378 .m2 = { .min = 5, .max = 9 }, 379 .p = { .min = 28, .max = 112 }, 380 .p1 = { .min = 2, .max = 8 }, 381 .p2 = { .dot_limit = 225000, 382 .p2_slow = 14, .p2_fast = 14 }, 383}; 384 385static const struct intel_limit intel_limits_ironlake_dual_lvds = { 386 .dot = { .min = 25000, .max = 350000 }, 387 .vco = { .min = 1760000, .max = 3510000 }, 388 .n = { .min = 1, .max = 3 }, 389 .m = { .min = 79, .max = 127 }, 390 .m1 = { .min = 12, .max = 22 }, 391 .m2 = { .min = 5, .max = 9 }, 392 .p = { .min = 14, .max = 56 }, 393 .p1 = { .min = 2, .max = 8 }, 394 .p2 = { .dot_limit = 225000, 395 .p2_slow = 7, .p2_fast = 7 }, 396}; 397 398/* LVDS 100mhz refclk limits. */ 399static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { 400 .dot = { .min = 25000, .max = 350000 }, 401 .vco = { .min = 1760000, .max = 3510000 }, 402 .n = { .min = 1, .max = 2 }, 403 .m = { .min = 79, .max = 126 }, 404 .m1 = { .min = 12, .max = 22 }, 405 .m2 = { .min = 5, .max = 9 }, 406 .p = { .min = 28, .max = 112 }, 407 .p1 = { .min = 2, .max = 8 }, 408 .p2 = { .dot_limit = 225000, 409 .p2_slow = 14, .p2_fast = 14 }, 410}; 411 412static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { 413 .dot = { .min = 25000, .max = 350000 }, 414 .vco = { .min = 1760000, .max = 3510000 }, 415 .n = { .min = 1, .max = 3 }, 416 .m = { .min = 79, .max = 126 }, 417 .m1 = { .min = 12, .max = 22 }, 418 .m2 = { .min = 5, .max = 9 }, 419 .p = { .min = 14, .max = 42 }, 420 .p1 = { .min = 2, .max = 6 }, 421 .p2 = { .dot_limit = 225000, 422 .p2_slow = 7, .p2_fast = 7 }, 423}; 424 425static const struct intel_limit intel_limits_vlv = { 426 /* 427 * These are the data rate limits (measured in fast clocks) 428 * since those are the strictest limits we have. The fast 429 * clock and actual rate limits are more relaxed, so checking 430 * them would make no difference. 431 */ 432 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 433 .vco = { .min = 4000000, .max = 6000000 }, 434 .n = { .min = 1, .max = 7 }, 435 .m1 = { .min = 2, .max = 3 }, 436 .m2 = { .min = 11, .max = 156 }, 437 .p1 = { .min = 2, .max = 3 }, 438 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 439}; 440 441static const struct intel_limit intel_limits_chv = { 442 /* 443 * These are the data rate limits (measured in fast clocks) 444 * since those are the strictest limits we have. The fast 445 * clock and actual rate limits are more relaxed, so checking 446 * them would make no difference. 447 */ 448 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 449 .vco = { .min = 4800000, .max = 6480000 }, 450 .n = { .min = 1, .max = 1 }, 451 .m1 = { .min = 2, .max = 2 }, 452 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 453 .p1 = { .min = 2, .max = 4 }, 454 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 455}; 456 457static const struct intel_limit intel_limits_bxt = { 458 /* FIXME: find real dot limits */ 459 .dot = { .min = 0, .max = INT_MAX }, 460 .vco = { .min = 4800000, .max = 6700000 }, 461 .n = { .min = 1, .max = 1 }, 462 .m1 = { .min = 2, .max = 2 }, 463 /* FIXME: find real m2 limits */ 464 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 465 .p1 = { .min = 2, .max = 4 }, 466 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 467}; 468 469static bool 470needs_modeset(struct drm_crtc_state *state) 471{ 472 return drm_atomic_crtc_needs_modeset(state); 473} 474 475/* 476 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 477 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 478 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 479 * The helpers' return value is the rate of the clock that is fed to the 480 * display engine's pipe which can be the above fast dot clock rate or a 481 * divided-down version of it. 482 */ 483/* m1 is reserved as 0 in Pineview, n is a ring counter */ 484static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 485{ 486 clock->m = clock->m2 + 2; 487 clock->p = clock->p1 * clock->p2; 488 if (WARN_ON(clock->n == 0 || clock->p == 0)) 489 return 0; 490 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 491 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 492 493 return clock->dot; 494} 495 496static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 497{ 498 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 499} 500 501static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 502{ 503 clock->m = i9xx_dpll_compute_m(clock); 504 clock->p = clock->p1 * clock->p2; 505 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 506 return 0; 507 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 508 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 509 510 return clock->dot; 511} 512 513static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 514{ 515 clock->m = clock->m1 * clock->m2; 516 clock->p = clock->p1 * clock->p2; 517 if (WARN_ON(clock->n == 0 || clock->p == 0)) 518 return 0; 519 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 520 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 521 522 return clock->dot / 5; 523} 524 525int chv_calc_dpll_params(int refclk, struct dpll *clock) 526{ 527 clock->m = clock->m1 * clock->m2; 528 clock->p = clock->p1 * clock->p2; 529 if (WARN_ON(clock->n == 0 || clock->p == 0)) 530 return 0; 531 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 532 clock->n << 22); 533 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 534 535 return clock->dot / 5; 536} 537 538#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 539/** 540 * Returns whether the given set of divisors are valid for a given refclk with 541 * the given connectors. 542 */ 543 544static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, 545 const struct intel_limit *limit, 546 const struct dpll *clock) 547{ 548 if (clock->n < limit->n.min || limit->n.max < clock->n) 549 INTELPllInvalid("n out of range\n"); 550 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 551 INTELPllInvalid("p1 out of range\n"); 552 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 553 INTELPllInvalid("m2 out of range\n"); 554 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 555 INTELPllInvalid("m1 out of range\n"); 556 557 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && 558 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) 559 if (clock->m1 <= clock->m2) 560 INTELPllInvalid("m1 <= m2\n"); 561 562 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 563 !IS_GEN9_LP(dev_priv)) { 564 if (clock->p < limit->p.min || limit->p.max < clock->p) 565 INTELPllInvalid("p out of range\n"); 566 if (clock->m < limit->m.min || limit->m.max < clock->m) 567 INTELPllInvalid("m out of range\n"); 568 } 569 570 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 571 INTELPllInvalid("vco out of range\n"); 572 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 573 * connector, etc., rather than just a single range. 574 */ 575 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 576 INTELPllInvalid("dot out of range\n"); 577 578 return true; 579} 580 581static int 582i9xx_select_p2_div(const struct intel_limit *limit, 583 const struct intel_crtc_state *crtc_state, 584 int target) 585{ 586 struct drm_device *dev = crtc_state->base.crtc->dev; 587 588 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 589 /* 590 * For LVDS just rely on its current settings for dual-channel. 591 * We haven't figured out how to reliably set up different 592 * single/dual channel state, if we even can. 593 */ 594 if (intel_is_dual_link_lvds(dev)) 595 return limit->p2.p2_fast; 596 else 597 return limit->p2.p2_slow; 598 } else { 599 if (target < limit->p2.dot_limit) 600 return limit->p2.p2_slow; 601 else 602 return limit->p2.p2_fast; 603 } 604} 605 606/* 607 * Returns a set of divisors for the desired target clock with the given 608 * refclk, or FALSE. The returned values represent the clock equation: 609 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 610 * 611 * Target and reference clocks are specified in kHz. 612 * 613 * If match_clock is provided, then best_clock P divider must match the P 614 * divider from @match_clock used for LVDS downclocking. 615 */ 616static bool 617i9xx_find_best_dpll(const struct intel_limit *limit, 618 struct intel_crtc_state *crtc_state, 619 int target, int refclk, struct dpll *match_clock, 620 struct dpll *best_clock) 621{ 622 struct drm_device *dev = crtc_state->base.crtc->dev; 623 struct dpll clock; 624 int err = target; 625 626 memset(best_clock, 0, sizeof(*best_clock)); 627 628 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 629 630 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 631 clock.m1++) { 632 for (clock.m2 = limit->m2.min; 633 clock.m2 <= limit->m2.max; clock.m2++) { 634 if (clock.m2 >= clock.m1) 635 break; 636 for (clock.n = limit->n.min; 637 clock.n <= limit->n.max; clock.n++) { 638 for (clock.p1 = limit->p1.min; 639 clock.p1 <= limit->p1.max; clock.p1++) { 640 int this_err; 641 642 i9xx_calc_dpll_params(refclk, &clock); 643 if (!intel_PLL_is_valid(to_i915(dev), 644 limit, 645 &clock)) 646 continue; 647 if (match_clock && 648 clock.p != match_clock->p) 649 continue; 650 651 this_err = abs(clock.dot - target); 652 if (this_err < err) { 653 *best_clock = clock; 654 err = this_err; 655 } 656 } 657 } 658 } 659 } 660 661 return (err != target); 662} 663 664/* 665 * Returns a set of divisors for the desired target clock with the given 666 * refclk, or FALSE. The returned values represent the clock equation: 667 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 668 * 669 * Target and reference clocks are specified in kHz. 670 * 671 * If match_clock is provided, then best_clock P divider must match the P 672 * divider from @match_clock used for LVDS downclocking. 673 */ 674static bool 675pnv_find_best_dpll(const struct intel_limit *limit, 676 struct intel_crtc_state *crtc_state, 677 int target, int refclk, struct dpll *match_clock, 678 struct dpll *best_clock) 679{ 680 struct drm_device *dev = crtc_state->base.crtc->dev; 681 struct dpll clock; 682 int err = target; 683 684 memset(best_clock, 0, sizeof(*best_clock)); 685 686 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 687 688 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 689 clock.m1++) { 690 for (clock.m2 = limit->m2.min; 691 clock.m2 <= limit->m2.max; clock.m2++) { 692 for (clock.n = limit->n.min; 693 clock.n <= limit->n.max; clock.n++) { 694 for (clock.p1 = limit->p1.min; 695 clock.p1 <= limit->p1.max; clock.p1++) { 696 int this_err; 697 698 pnv_calc_dpll_params(refclk, &clock); 699 if (!intel_PLL_is_valid(to_i915(dev), 700 limit, 701 &clock)) 702 continue; 703 if (match_clock && 704 clock.p != match_clock->p) 705 continue; 706 707 this_err = abs(clock.dot - target); 708 if (this_err < err) { 709 *best_clock = clock; 710 err = this_err; 711 } 712 } 713 } 714 } 715 } 716 717 return (err != target); 718} 719 720/* 721 * Returns a set of divisors for the desired target clock with the given 722 * refclk, or FALSE. The returned values represent the clock equation: 723 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 724 * 725 * Target and reference clocks are specified in kHz. 726 * 727 * If match_clock is provided, then best_clock P divider must match the P 728 * divider from @match_clock used for LVDS downclocking. 729 */ 730static bool 731g4x_find_best_dpll(const struct intel_limit *limit, 732 struct intel_crtc_state *crtc_state, 733 int target, int refclk, struct dpll *match_clock, 734 struct dpll *best_clock) 735{ 736 struct drm_device *dev = crtc_state->base.crtc->dev; 737 struct dpll clock; 738 int max_n; 739 bool found = false; 740 /* approximately equals target * 0.00585 */ 741 int err_most = (target >> 8) + (target >> 9); 742 743 memset(best_clock, 0, sizeof(*best_clock)); 744 745 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 746 747 max_n = limit->n.max; 748 /* based on hardware requirement, prefer smaller n to precision */ 749 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 750 /* based on hardware requirement, prefere larger m1,m2 */ 751 for (clock.m1 = limit->m1.max; 752 clock.m1 >= limit->m1.min; clock.m1--) { 753 for (clock.m2 = limit->m2.max; 754 clock.m2 >= limit->m2.min; clock.m2--) { 755 for (clock.p1 = limit->p1.max; 756 clock.p1 >= limit->p1.min; clock.p1--) { 757 int this_err; 758 759 i9xx_calc_dpll_params(refclk, &clock); 760 if (!intel_PLL_is_valid(to_i915(dev), 761 limit, 762 &clock)) 763 continue; 764 765 this_err = abs(clock.dot - target); 766 if (this_err < err_most) { 767 *best_clock = clock; 768 err_most = this_err; 769 max_n = clock.n; 770 found = true; 771 } 772 } 773 } 774 } 775 } 776 return found; 777} 778 779/* 780 * Check if the calculated PLL configuration is more optimal compared to the 781 * best configuration and error found so far. Return the calculated error. 782 */ 783static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 784 const struct dpll *calculated_clock, 785 const struct dpll *best_clock, 786 unsigned int best_error_ppm, 787 unsigned int *error_ppm) 788{ 789 /* 790 * For CHV ignore the error and consider only the P value. 791 * Prefer a bigger P value based on HW requirements. 792 */ 793 if (IS_CHERRYVIEW(to_i915(dev))) { 794 *error_ppm = 0; 795 796 return calculated_clock->p > best_clock->p; 797 } 798 799 if (WARN_ON_ONCE(!target_freq)) 800 return false; 801 802 *error_ppm = div_u64(1000000ULL * 803 abs(target_freq - calculated_clock->dot), 804 target_freq); 805 /* 806 * Prefer a better P value over a better (smaller) error if the error 807 * is small. Ensure this preference for future configurations too by 808 * setting the error to 0. 809 */ 810 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 811 *error_ppm = 0; 812 813 return true; 814 } 815 816 return *error_ppm + 10 < best_error_ppm; 817} 818 819/* 820 * Returns a set of divisors for the desired target clock with the given 821 * refclk, or FALSE. The returned values represent the clock equation: 822 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 823 */ 824static bool 825vlv_find_best_dpll(const struct intel_limit *limit, 826 struct intel_crtc_state *crtc_state, 827 int target, int refclk, struct dpll *match_clock, 828 struct dpll *best_clock) 829{ 830 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 831 struct drm_device *dev = crtc->base.dev; 832 struct dpll clock; 833 unsigned int bestppm = 1000000; 834 /* min update 19.2 MHz */ 835 int max_n = min(limit->n.max, refclk / 19200); 836 bool found = false; 837 838 target *= 5; /* fast clock */ 839 840 memset(best_clock, 0, sizeof(*best_clock)); 841 842 /* based on hardware requirement, prefer smaller n to precision */ 843 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 844 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 845 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 846 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 847 clock.p = clock.p1 * clock.p2; 848 /* based on hardware requirement, prefer bigger m1,m2 values */ 849 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 850 unsigned int ppm; 851 852 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 853 refclk * clock.m1); 854 855 vlv_calc_dpll_params(refclk, &clock); 856 857 if (!intel_PLL_is_valid(to_i915(dev), 858 limit, 859 &clock)) 860 continue; 861 862 if (!vlv_PLL_is_optimal(dev, target, 863 &clock, 864 best_clock, 865 bestppm, &ppm)) 866 continue; 867 868 *best_clock = clock; 869 bestppm = ppm; 870 found = true; 871 } 872 } 873 } 874 } 875 876 return found; 877} 878 879/* 880 * Returns a set of divisors for the desired target clock with the given 881 * refclk, or FALSE. The returned values represent the clock equation: 882 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 883 */ 884static bool 885chv_find_best_dpll(const struct intel_limit *limit, 886 struct intel_crtc_state *crtc_state, 887 int target, int refclk, struct dpll *match_clock, 888 struct dpll *best_clock) 889{ 890 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 891 struct drm_device *dev = crtc->base.dev; 892 unsigned int best_error_ppm; 893 struct dpll clock; 894 uint64_t m2; 895 int found = false; 896 897 memset(best_clock, 0, sizeof(*best_clock)); 898 best_error_ppm = 1000000; 899 900 /* 901 * Based on hardware doc, the n always set to 1, and m1 always 902 * set to 2. If requires to support 200Mhz refclk, we need to 903 * revisit this because n may not 1 anymore. 904 */ 905 clock.n = 1, clock.m1 = 2; 906 target *= 5; /* fast clock */ 907 908 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 909 for (clock.p2 = limit->p2.p2_fast; 910 clock.p2 >= limit->p2.p2_slow; 911 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 912 unsigned int error_ppm; 913 914 clock.p = clock.p1 * clock.p2; 915 916 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 917 clock.n) << 22, refclk * clock.m1); 918 919 if (m2 > INT_MAX/clock.m1) 920 continue; 921 922 clock.m2 = m2; 923 924 chv_calc_dpll_params(refclk, &clock); 925 926 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock)) 927 continue; 928 929 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 930 best_error_ppm, &error_ppm)) 931 continue; 932 933 *best_clock = clock; 934 best_error_ppm = error_ppm; 935 found = true; 936 } 937 } 938 939 return found; 940} 941 942bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 943 struct dpll *best_clock) 944{ 945 int refclk = 100000; 946 const struct intel_limit *limit = &intel_limits_bxt; 947 948 return chv_find_best_dpll(limit, crtc_state, 949 target_clock, refclk, NULL, best_clock); 950} 951 952bool intel_crtc_active(struct intel_crtc *crtc) 953{ 954 /* Be paranoid as we can arrive here with only partial 955 * state retrieved from the hardware during setup. 956 * 957 * We can ditch the adjusted_mode.crtc_clock check as soon 958 * as Haswell has gained clock readout/fastboot support. 959 * 960 * We can ditch the crtc->primary->fb check as soon as we can 961 * properly reconstruct framebuffers. 962 * 963 * FIXME: The intel_crtc->active here should be switched to 964 * crtc->state->active once we have proper CRTC states wired up 965 * for atomic. 966 */ 967 return crtc->active && crtc->base.primary->state->fb && 968 crtc->config->base.adjusted_mode.crtc_clock; 969} 970 971enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 972 enum pipe pipe) 973{ 974 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 975 976 return crtc->config->cpu_transcoder; 977} 978 979static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe) 980{ 981 i915_reg_t reg = PIPEDSL(pipe); 982 u32 line1, line2; 983 u32 line_mask; 984 985 if (IS_GEN2(dev_priv)) 986 line_mask = DSL_LINEMASK_GEN2; 987 else 988 line_mask = DSL_LINEMASK_GEN3; 989 990 line1 = I915_READ(reg) & line_mask; 991 msleep(5); 992 line2 = I915_READ(reg) & line_mask; 993 994 return line1 == line2; 995} 996 997/* 998 * intel_wait_for_pipe_off - wait for pipe to turn off 999 * @crtc: crtc whose pipe to wait for 1000 * 1001 * After disabling a pipe, we can't wait for vblank in the usual way, 1002 * spinning on the vblank interrupt status bit, since we won't actually 1003 * see an interrupt when the pipe is disabled. 1004 * 1005 * On Gen4 and above: 1006 * wait for the pipe register state bit to turn off 1007 * 1008 * Otherwise: 1009 * wait for the display line value to settle (it usually 1010 * ends up stopping at the start of the next frame). 1011 * 1012 */ 1013static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 1014{ 1015 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1016 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1017 enum pipe pipe = crtc->pipe; 1018 1019 if (INTEL_GEN(dev_priv) >= 4) { 1020 i915_reg_t reg = PIPECONF(cpu_transcoder); 1021 1022 /* Wait for the Pipe State to go off */ 1023 if (intel_wait_for_register(dev_priv, 1024 reg, I965_PIPECONF_ACTIVE, 0, 1025 100)) 1026 WARN(1, "pipe_off wait timed out\n"); 1027 } else { 1028 /* Wait for the display line to settle */ 1029 if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) 1030 WARN(1, "pipe_off wait timed out\n"); 1031 } 1032} 1033 1034/* Only for pre-ILK configs */ 1035void assert_pll(struct drm_i915_private *dev_priv, 1036 enum pipe pipe, bool state) 1037{ 1038 u32 val; 1039 bool cur_state; 1040 1041 val = I915_READ(DPLL(pipe)); 1042 cur_state = !!(val & DPLL_VCO_ENABLE); 1043 I915_STATE_WARN(cur_state != state, 1044 "PLL state assertion failure (expected %s, current %s)\n", 1045 onoff(state), onoff(cur_state)); 1046} 1047 1048/* XXX: the dsi pll is shared between MIPI DSI ports */ 1049void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1050{ 1051 u32 val; 1052 bool cur_state; 1053 1054 mutex_lock(&dev_priv->sb_lock); 1055 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1056 mutex_unlock(&dev_priv->sb_lock); 1057 1058 cur_state = val & DSI_PLL_VCO_EN; 1059 I915_STATE_WARN(cur_state != state, 1060 "DSI PLL state assertion failure (expected %s, current %s)\n", 1061 onoff(state), onoff(cur_state)); 1062} 1063 1064static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1065 enum pipe pipe, bool state) 1066{ 1067 bool cur_state; 1068 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1069 pipe); 1070 1071 if (HAS_DDI(dev_priv)) { 1072 /* DDI does not have a specific FDI_TX register */ 1073 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1074 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1075 } else { 1076 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1077 cur_state = !!(val & FDI_TX_ENABLE); 1078 } 1079 I915_STATE_WARN(cur_state != state, 1080 "FDI TX state assertion failure (expected %s, current %s)\n", 1081 onoff(state), onoff(cur_state)); 1082} 1083#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1084#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1085 1086static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1087 enum pipe pipe, bool state) 1088{ 1089 u32 val; 1090 bool cur_state; 1091 1092 val = I915_READ(FDI_RX_CTL(pipe)); 1093 cur_state = !!(val & FDI_RX_ENABLE); 1094 I915_STATE_WARN(cur_state != state, 1095 "FDI RX state assertion failure (expected %s, current %s)\n", 1096 onoff(state), onoff(cur_state)); 1097} 1098#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1099#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1100 1101static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1102 enum pipe pipe) 1103{ 1104 u32 val; 1105 1106 /* ILK FDI PLL is always enabled */ 1107 if (IS_GEN5(dev_priv)) 1108 return; 1109 1110 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1111 if (HAS_DDI(dev_priv)) 1112 return; 1113 1114 val = I915_READ(FDI_TX_CTL(pipe)); 1115 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1116} 1117 1118void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1119 enum pipe pipe, bool state) 1120{ 1121 u32 val; 1122 bool cur_state; 1123 1124 val = I915_READ(FDI_RX_CTL(pipe)); 1125 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1126 I915_STATE_WARN(cur_state != state, 1127 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1128 onoff(state), onoff(cur_state)); 1129} 1130 1131void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) 1132{ 1133 i915_reg_t pp_reg; 1134 u32 val; 1135 enum pipe panel_pipe = PIPE_A; 1136 bool locked = true; 1137 1138 if (WARN_ON(HAS_DDI(dev_priv))) 1139 return; 1140 1141 if (HAS_PCH_SPLIT(dev_priv)) { 1142 u32 port_sel; 1143 1144 pp_reg = PP_CONTROL(0); 1145 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1146 1147 if (port_sel == PANEL_PORT_SELECT_LVDS && 1148 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) 1149 panel_pipe = PIPE_B; 1150 /* XXX: else fix for eDP */ 1151 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1152 /* presumably write lock depends on pipe, not port select */ 1153 pp_reg = PP_CONTROL(pipe); 1154 panel_pipe = pipe; 1155 } else { 1156 pp_reg = PP_CONTROL(0); 1157 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) 1158 panel_pipe = PIPE_B; 1159 } 1160 1161 val = I915_READ(pp_reg); 1162 if (!(val & PANEL_POWER_ON) || 1163 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1164 locked = false; 1165 1166 I915_STATE_WARN(panel_pipe == pipe && locked, 1167 "panel assertion failure, pipe %c regs locked\n", 1168 pipe_name(pipe)); 1169} 1170 1171static void assert_cursor(struct drm_i915_private *dev_priv, 1172 enum pipe pipe, bool state) 1173{ 1174 bool cur_state; 1175 1176 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 1177 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 1178 else 1179 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1180 1181 I915_STATE_WARN(cur_state != state, 1182 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1183 pipe_name(pipe), onoff(state), onoff(cur_state)); 1184} 1185#define assert_cursor_enabled(d, p) assert_cursor(d, p, true) 1186#define assert_cursor_disabled(d, p) assert_cursor(d, p, false) 1187 1188void assert_pipe(struct drm_i915_private *dev_priv, 1189 enum pipe pipe, bool state) 1190{ 1191 bool cur_state; 1192 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1193 pipe); 1194 enum intel_display_power_domain power_domain; 1195 1196 /* if we need the pipe quirk it must be always on */ 1197 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1198 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1199 state = true; 1200 1201 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1202 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 1203 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1204 cur_state = !!(val & PIPECONF_ENABLE); 1205 1206 intel_display_power_put(dev_priv, power_domain); 1207 } else { 1208 cur_state = false; 1209 } 1210 1211 I915_STATE_WARN(cur_state != state, 1212 "pipe %c assertion failure (expected %s, current %s)\n", 1213 pipe_name(pipe), onoff(state), onoff(cur_state)); 1214} 1215 1216static void assert_plane(struct drm_i915_private *dev_priv, 1217 enum plane plane, bool state) 1218{ 1219 u32 val; 1220 bool cur_state; 1221 1222 val = I915_READ(DSPCNTR(plane)); 1223 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1224 I915_STATE_WARN(cur_state != state, 1225 "plane %c assertion failure (expected %s, current %s)\n", 1226 plane_name(plane), onoff(state), onoff(cur_state)); 1227} 1228 1229#define assert_plane_enabled(d, p) assert_plane(d, p, true) 1230#define assert_plane_disabled(d, p) assert_plane(d, p, false) 1231 1232static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1233 enum pipe pipe) 1234{ 1235 int i; 1236 1237 /* Primary planes are fixed to pipes on gen4+ */ 1238 if (INTEL_GEN(dev_priv) >= 4) { 1239 u32 val = I915_READ(DSPCNTR(pipe)); 1240 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, 1241 "plane %c assertion failure, should be disabled but not\n", 1242 plane_name(pipe)); 1243 return; 1244 } 1245 1246 /* Need to check both planes against the pipe */ 1247 for_each_pipe(dev_priv, i) { 1248 u32 val = I915_READ(DSPCNTR(i)); 1249 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1250 DISPPLANE_SEL_PIPE_SHIFT; 1251 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1252 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1253 plane_name(i), pipe_name(pipe)); 1254 } 1255} 1256 1257static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1258 enum pipe pipe) 1259{ 1260 int sprite; 1261 1262 if (INTEL_GEN(dev_priv) >= 9) { 1263 for_each_sprite(dev_priv, pipe, sprite) { 1264 u32 val = I915_READ(PLANE_CTL(pipe, sprite)); 1265 I915_STATE_WARN(val & PLANE_CTL_ENABLE, 1266 "plane %d assertion failure, should be off on pipe %c but is still active\n", 1267 sprite, pipe_name(pipe)); 1268 } 1269 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1270 for_each_sprite(dev_priv, pipe, sprite) { 1271 u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite)); 1272 I915_STATE_WARN(val & SP_ENABLE, 1273 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1274 sprite_name(pipe, sprite), pipe_name(pipe)); 1275 } 1276 } else if (INTEL_GEN(dev_priv) >= 7) { 1277 u32 val = I915_READ(SPRCTL(pipe)); 1278 I915_STATE_WARN(val & SPRITE_ENABLE, 1279 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1280 plane_name(pipe), pipe_name(pipe)); 1281 } else if (INTEL_GEN(dev_priv) >= 5) { 1282 u32 val = I915_READ(DVSCNTR(pipe)); 1283 I915_STATE_WARN(val & DVS_ENABLE, 1284 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1285 plane_name(pipe), pipe_name(pipe)); 1286 } 1287} 1288 1289static void assert_vblank_disabled(struct drm_crtc *crtc) 1290{ 1291 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1292 drm_crtc_vblank_put(crtc); 1293} 1294 1295void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1296 enum pipe pipe) 1297{ 1298 u32 val; 1299 bool enabled; 1300 1301 val = I915_READ(PCH_TRANSCONF(pipe)); 1302 enabled = !!(val & TRANS_ENABLE); 1303 I915_STATE_WARN(enabled, 1304 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1305 pipe_name(pipe)); 1306} 1307 1308static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1309 enum pipe pipe, u32 port_sel, u32 val) 1310{ 1311 if ((val & DP_PORT_EN) == 0) 1312 return false; 1313 1314 if (HAS_PCH_CPT(dev_priv)) { 1315 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe)); 1316 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1317 return false; 1318 } else if (IS_CHERRYVIEW(dev_priv)) { 1319 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1320 return false; 1321 } else { 1322 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1323 return false; 1324 } 1325 return true; 1326} 1327 1328static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1329 enum pipe pipe, u32 val) 1330{ 1331 if ((val & SDVO_ENABLE) == 0) 1332 return false; 1333 1334 if (HAS_PCH_CPT(dev_priv)) { 1335 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1336 return false; 1337 } else if (IS_CHERRYVIEW(dev_priv)) { 1338 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1339 return false; 1340 } else { 1341 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1342 return false; 1343 } 1344 return true; 1345} 1346 1347static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1348 enum pipe pipe, u32 val) 1349{ 1350 if ((val & LVDS_PORT_EN) == 0) 1351 return false; 1352 1353 if (HAS_PCH_CPT(dev_priv)) { 1354 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1355 return false; 1356 } else { 1357 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1358 return false; 1359 } 1360 return true; 1361} 1362 1363static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1364 enum pipe pipe, u32 val) 1365{ 1366 if ((val & ADPA_DAC_ENABLE) == 0) 1367 return false; 1368 if (HAS_PCH_CPT(dev_priv)) { 1369 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1370 return false; 1371 } else { 1372 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1373 return false; 1374 } 1375 return true; 1376} 1377 1378static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1379 enum pipe pipe, i915_reg_t reg, 1380 u32 port_sel) 1381{ 1382 u32 val = I915_READ(reg); 1383 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1384 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1385 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1386 1387 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0 1388 && (val & DP_PIPEB_SELECT), 1389 "IBX PCH dp port still using transcoder B\n"); 1390} 1391 1392static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1393 enum pipe pipe, i915_reg_t reg) 1394{ 1395 u32 val = I915_READ(reg); 1396 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1397 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1398 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1399 1400 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0 1401 && (val & SDVO_PIPE_B_SELECT), 1402 "IBX PCH hdmi port still using transcoder B\n"); 1403} 1404 1405static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1406 enum pipe pipe) 1407{ 1408 u32 val; 1409 1410 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1411 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1412 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1413 1414 val = I915_READ(PCH_ADPA); 1415 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1416 "PCH VGA enabled on transcoder %c, should be disabled\n", 1417 pipe_name(pipe)); 1418 1419 val = I915_READ(PCH_LVDS); 1420 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1421 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1422 pipe_name(pipe)); 1423 1424 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1425 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1426 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1427} 1428 1429static void _vlv_enable_pll(struct intel_crtc *crtc, 1430 const struct intel_crtc_state *pipe_config) 1431{ 1432 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1433 enum pipe pipe = crtc->pipe; 1434 1435 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1436 POSTING_READ(DPLL(pipe)); 1437 udelay(150); 1438 1439 if (intel_wait_for_register(dev_priv, 1440 DPLL(pipe), 1441 DPLL_LOCK_VLV, 1442 DPLL_LOCK_VLV, 1443 1)) 1444 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1445} 1446 1447static void vlv_enable_pll(struct intel_crtc *crtc, 1448 const struct intel_crtc_state *pipe_config) 1449{ 1450 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1451 enum pipe pipe = crtc->pipe; 1452 1453 assert_pipe_disabled(dev_priv, pipe); 1454 1455 /* PLL is protected by panel, make sure we can write it */ 1456 assert_panel_unlocked(dev_priv, pipe); 1457 1458 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1459 _vlv_enable_pll(crtc, pipe_config); 1460 1461 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1462 POSTING_READ(DPLL_MD(pipe)); 1463} 1464 1465 1466static void _chv_enable_pll(struct intel_crtc *crtc, 1467 const struct intel_crtc_state *pipe_config) 1468{ 1469 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1470 enum pipe pipe = crtc->pipe; 1471 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1472 u32 tmp; 1473 1474 mutex_lock(&dev_priv->sb_lock); 1475 1476 /* Enable back the 10bit clock to display controller */ 1477 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1478 tmp |= DPIO_DCLKP_EN; 1479 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1480 1481 mutex_unlock(&dev_priv->sb_lock); 1482 1483 /* 1484 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1485 */ 1486 udelay(1); 1487 1488 /* Enable PLL */ 1489 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1490 1491 /* Check PLL is locked */ 1492 if (intel_wait_for_register(dev_priv, 1493 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV, 1494 1)) 1495 DRM_ERROR("PLL %d failed to lock\n", pipe); 1496} 1497 1498static void chv_enable_pll(struct intel_crtc *crtc, 1499 const struct intel_crtc_state *pipe_config) 1500{ 1501 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1502 enum pipe pipe = crtc->pipe; 1503 1504 assert_pipe_disabled(dev_priv, pipe); 1505 1506 /* PLL is protected by panel, make sure we can write it */ 1507 assert_panel_unlocked(dev_priv, pipe); 1508 1509 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1510 _chv_enable_pll(crtc, pipe_config); 1511 1512 if (pipe != PIPE_A) { 1513 /* 1514 * WaPixelRepeatModeFixForC0:chv 1515 * 1516 * DPLLCMD is AWOL. Use chicken bits to propagate 1517 * the value from DPLLBMD to either pipe B or C. 1518 */ 1519 I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C); 1520 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); 1521 I915_WRITE(CBR4_VLV, 0); 1522 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1523 1524 /* 1525 * DPLLB VGA mode also seems to cause problems. 1526 * We should always have it disabled. 1527 */ 1528 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); 1529 } else { 1530 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1531 POSTING_READ(DPLL_MD(pipe)); 1532 } 1533} 1534 1535static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv) 1536{ 1537 struct intel_crtc *crtc; 1538 int count = 0; 1539 1540 for_each_intel_crtc(&dev_priv->drm, crtc) { 1541 count += crtc->base.state->active && 1542 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO); 1543 } 1544 1545 return count; 1546} 1547 1548static void i9xx_enable_pll(struct intel_crtc *crtc) 1549{ 1550 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1551 i915_reg_t reg = DPLL(crtc->pipe); 1552 u32 dpll = crtc->config->dpll_hw_state.dpll; 1553 1554 assert_pipe_disabled(dev_priv, crtc->pipe); 1555 1556 /* PLL is protected by panel, make sure we can write it */ 1557 if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) 1558 assert_panel_unlocked(dev_priv, crtc->pipe); 1559 1560 /* Enable DVO 2x clock on both PLLs if necessary */ 1561 if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) { 1562 /* 1563 * It appears to be important that we don't enable this 1564 * for the current pipe before otherwise configuring the 1565 * PLL. No idea how this should be handled if multiple 1566 * DVO outputs are enabled simultaneosly. 1567 */ 1568 dpll |= DPLL_DVO_2X_MODE; 1569 I915_WRITE(DPLL(!crtc->pipe), 1570 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1571 } 1572 1573 /* 1574 * Apparently we need to have VGA mode enabled prior to changing 1575 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1576 * dividers, even though the register value does change. 1577 */ 1578 I915_WRITE(reg, 0); 1579 1580 I915_WRITE(reg, dpll); 1581 1582 /* Wait for the clocks to stabilize. */ 1583 POSTING_READ(reg); 1584 udelay(150); 1585 1586 if (INTEL_GEN(dev_priv) >= 4) { 1587 I915_WRITE(DPLL_MD(crtc->pipe), 1588 crtc->config->dpll_hw_state.dpll_md); 1589 } else { 1590 /* The pixel multiplier can only be updated once the 1591 * DPLL is enabled and the clocks are stable. 1592 * 1593 * So write it again. 1594 */ 1595 I915_WRITE(reg, dpll); 1596 } 1597 1598 /* We do this three times for luck */ 1599 I915_WRITE(reg, dpll); 1600 POSTING_READ(reg); 1601 udelay(150); /* wait for warmup */ 1602 I915_WRITE(reg, dpll); 1603 POSTING_READ(reg); 1604 udelay(150); /* wait for warmup */ 1605 I915_WRITE(reg, dpll); 1606 POSTING_READ(reg); 1607 udelay(150); /* wait for warmup */ 1608} 1609 1610/** 1611 * i9xx_disable_pll - disable a PLL 1612 * @dev_priv: i915 private structure 1613 * @pipe: pipe PLL to disable 1614 * 1615 * Disable the PLL for @pipe, making sure the pipe is off first. 1616 * 1617 * Note! This is for pre-ILK only. 1618 */ 1619static void i9xx_disable_pll(struct intel_crtc *crtc) 1620{ 1621 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1622 enum pipe pipe = crtc->pipe; 1623 1624 /* Disable DVO 2x clock on both PLLs if necessary */ 1625 if (IS_I830(dev_priv) && 1626 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && 1627 !intel_num_dvo_pipes(dev_priv)) { 1628 I915_WRITE(DPLL(PIPE_B), 1629 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1630 I915_WRITE(DPLL(PIPE_A), 1631 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); 1632 } 1633 1634 /* Don't disable pipe or pipe PLLs if needed */ 1635 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1636 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1637 return; 1638 1639 /* Make sure the pipe isn't still relying on us */ 1640 assert_pipe_disabled(dev_priv, pipe); 1641 1642 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1643 POSTING_READ(DPLL(pipe)); 1644} 1645 1646static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1647{ 1648 u32 val; 1649 1650 /* Make sure the pipe isn't still relying on us */ 1651 assert_pipe_disabled(dev_priv, pipe); 1652 1653 val = DPLL_INTEGRATED_REF_CLK_VLV | 1654 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1655 if (pipe != PIPE_A) 1656 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1657 1658 I915_WRITE(DPLL(pipe), val); 1659 POSTING_READ(DPLL(pipe)); 1660} 1661 1662static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1663{ 1664 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1665 u32 val; 1666 1667 /* Make sure the pipe isn't still relying on us */ 1668 assert_pipe_disabled(dev_priv, pipe); 1669 1670 val = DPLL_SSC_REF_CLK_CHV | 1671 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1672 if (pipe != PIPE_A) 1673 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1674 1675 I915_WRITE(DPLL(pipe), val); 1676 POSTING_READ(DPLL(pipe)); 1677 1678 mutex_lock(&dev_priv->sb_lock); 1679 1680 /* Disable 10bit clock to display controller */ 1681 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1682 val &= ~DPIO_DCLKP_EN; 1683 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1684 1685 mutex_unlock(&dev_priv->sb_lock); 1686} 1687 1688void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1689 struct intel_digital_port *dport, 1690 unsigned int expected_mask) 1691{ 1692 u32 port_mask; 1693 i915_reg_t dpll_reg; 1694 1695 switch (dport->port) { 1696 case PORT_B: 1697 port_mask = DPLL_PORTB_READY_MASK; 1698 dpll_reg = DPLL(0); 1699 break; 1700 case PORT_C: 1701 port_mask = DPLL_PORTC_READY_MASK; 1702 dpll_reg = DPLL(0); 1703 expected_mask <<= 4; 1704 break; 1705 case PORT_D: 1706 port_mask = DPLL_PORTD_READY_MASK; 1707 dpll_reg = DPIO_PHY_STATUS; 1708 break; 1709 default: 1710 BUG(); 1711 } 1712 1713 if (intel_wait_for_register(dev_priv, 1714 dpll_reg, port_mask, expected_mask, 1715 1000)) 1716 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1717 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); 1718} 1719 1720static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1721 enum pipe pipe) 1722{ 1723 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 1724 pipe); 1725 i915_reg_t reg; 1726 uint32_t val, pipeconf_val; 1727 1728 /* Make sure PCH DPLL is enabled */ 1729 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll); 1730 1731 /* FDI must be feeding us bits for PCH ports */ 1732 assert_fdi_tx_enabled(dev_priv, pipe); 1733 assert_fdi_rx_enabled(dev_priv, pipe); 1734 1735 if (HAS_PCH_CPT(dev_priv)) { 1736 /* Workaround: Set the timing override bit before enabling the 1737 * pch transcoder. */ 1738 reg = TRANS_CHICKEN2(pipe); 1739 val = I915_READ(reg); 1740 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1741 I915_WRITE(reg, val); 1742 } 1743 1744 reg = PCH_TRANSCONF(pipe); 1745 val = I915_READ(reg); 1746 pipeconf_val = I915_READ(PIPECONF(pipe)); 1747 1748 if (HAS_PCH_IBX(dev_priv)) { 1749 /* 1750 * Make the BPC in transcoder be consistent with 1751 * that in pipeconf reg. For HDMI we must use 8bpc 1752 * here for both 8bpc and 12bpc. 1753 */ 1754 val &= ~PIPECONF_BPC_MASK; 1755 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI)) 1756 val |= PIPECONF_8BPC; 1757 else 1758 val |= pipeconf_val & PIPECONF_BPC_MASK; 1759 } 1760 1761 val &= ~TRANS_INTERLACE_MASK; 1762 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1763 if (HAS_PCH_IBX(dev_priv) && 1764 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 1765 val |= TRANS_LEGACY_INTERLACED_ILK; 1766 else 1767 val |= TRANS_INTERLACED; 1768 else 1769 val |= TRANS_PROGRESSIVE; 1770 1771 I915_WRITE(reg, val | TRANS_ENABLE); 1772 if (intel_wait_for_register(dev_priv, 1773 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE, 1774 100)) 1775 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1776} 1777 1778static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1779 enum transcoder cpu_transcoder) 1780{ 1781 u32 val, pipeconf_val; 1782 1783 /* FDI must be feeding us bits for PCH ports */ 1784 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1785 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 1786 1787 /* Workaround: set timing override bit. */ 1788 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1789 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1790 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1791 1792 val = TRANS_ENABLE; 1793 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1794 1795 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1796 PIPECONF_INTERLACED_ILK) 1797 val |= TRANS_INTERLACED; 1798 else 1799 val |= TRANS_PROGRESSIVE; 1800 1801 I915_WRITE(LPT_TRANSCONF, val); 1802 if (intel_wait_for_register(dev_priv, 1803 LPT_TRANSCONF, 1804 TRANS_STATE_ENABLE, 1805 TRANS_STATE_ENABLE, 1806 100)) 1807 DRM_ERROR("Failed to enable PCH transcoder\n"); 1808} 1809 1810static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1811 enum pipe pipe) 1812{ 1813 i915_reg_t reg; 1814 uint32_t val; 1815 1816 /* FDI relies on the transcoder */ 1817 assert_fdi_tx_disabled(dev_priv, pipe); 1818 assert_fdi_rx_disabled(dev_priv, pipe); 1819 1820 /* Ports must be off as well */ 1821 assert_pch_ports_disabled(dev_priv, pipe); 1822 1823 reg = PCH_TRANSCONF(pipe); 1824 val = I915_READ(reg); 1825 val &= ~TRANS_ENABLE; 1826 I915_WRITE(reg, val); 1827 /* wait for PCH transcoder off, transcoder state */ 1828 if (intel_wait_for_register(dev_priv, 1829 reg, TRANS_STATE_ENABLE, 0, 1830 50)) 1831 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1832 1833 if (HAS_PCH_CPT(dev_priv)) { 1834 /* Workaround: Clear the timing override chicken bit again. */ 1835 reg = TRANS_CHICKEN2(pipe); 1836 val = I915_READ(reg); 1837 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1838 I915_WRITE(reg, val); 1839 } 1840} 1841 1842void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1843{ 1844 u32 val; 1845 1846 val = I915_READ(LPT_TRANSCONF); 1847 val &= ~TRANS_ENABLE; 1848 I915_WRITE(LPT_TRANSCONF, val); 1849 /* wait for PCH transcoder off, transcoder state */ 1850 if (intel_wait_for_register(dev_priv, 1851 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0, 1852 50)) 1853 DRM_ERROR("Failed to disable PCH transcoder\n"); 1854 1855 /* Workaround: clear timing override bit. */ 1856 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1857 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1858 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1859} 1860 1861enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc) 1862{ 1863 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1864 1865 WARN_ON(!crtc->config->has_pch_encoder); 1866 1867 if (HAS_PCH_LPT(dev_priv)) 1868 return TRANSCODER_A; 1869 else 1870 return (enum transcoder) crtc->pipe; 1871} 1872 1873/** 1874 * intel_enable_pipe - enable a pipe, asserting requirements 1875 * @crtc: crtc responsible for the pipe 1876 * 1877 * Enable @crtc's pipe, making sure that various hardware specific requirements 1878 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1879 */ 1880static void intel_enable_pipe(struct intel_crtc *crtc) 1881{ 1882 struct drm_device *dev = crtc->base.dev; 1883 struct drm_i915_private *dev_priv = to_i915(dev); 1884 enum pipe pipe = crtc->pipe; 1885 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1886 i915_reg_t reg; 1887 u32 val; 1888 1889 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1890 1891 assert_planes_disabled(dev_priv, pipe); 1892 assert_cursor_disabled(dev_priv, pipe); 1893 assert_sprites_disabled(dev_priv, pipe); 1894 1895 /* 1896 * A pipe without a PLL won't actually be able to drive bits from 1897 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1898 * need the check. 1899 */ 1900 if (HAS_GMCH_DISPLAY(dev_priv)) { 1901 if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI)) 1902 assert_dsi_pll_enabled(dev_priv); 1903 else 1904 assert_pll_enabled(dev_priv, pipe); 1905 } else { 1906 if (crtc->config->has_pch_encoder) { 1907 /* if driving the PCH, we need FDI enabled */ 1908 assert_fdi_rx_pll_enabled(dev_priv, 1909 (enum pipe) intel_crtc_pch_transcoder(crtc)); 1910 assert_fdi_tx_pll_enabled(dev_priv, 1911 (enum pipe) cpu_transcoder); 1912 } 1913 /* FIXME: assert CPU port conditions for SNB+ */ 1914 } 1915 1916 reg = PIPECONF(cpu_transcoder); 1917 val = I915_READ(reg); 1918 if (val & PIPECONF_ENABLE) { 1919 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1920 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))); 1921 return; 1922 } 1923 1924 I915_WRITE(reg, val | PIPECONF_ENABLE); 1925 POSTING_READ(reg); 1926 1927 /* 1928 * Until the pipe starts DSL will read as 0, which would cause 1929 * an apparent vblank timestamp jump, which messes up also the 1930 * frame count when it's derived from the timestamps. So let's 1931 * wait for the pipe to start properly before we call 1932 * drm_crtc_vblank_on() 1933 */ 1934 if (dev->max_vblank_count == 0 && 1935 wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50)) 1936 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe)); 1937} 1938 1939/** 1940 * intel_disable_pipe - disable a pipe, asserting requirements 1941 * @crtc: crtc whose pipes is to be disabled 1942 * 1943 * Disable the pipe of @crtc, making sure that various hardware 1944 * specific requirements are met, if applicable, e.g. plane 1945 * disabled, panel fitter off, etc. 1946 * 1947 * Will wait until the pipe has shut down before returning. 1948 */ 1949static void intel_disable_pipe(struct intel_crtc *crtc) 1950{ 1951 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1952 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1953 enum pipe pipe = crtc->pipe; 1954 i915_reg_t reg; 1955 u32 val; 1956 1957 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 1958 1959 /* 1960 * Make sure planes won't keep trying to pump pixels to us, 1961 * or we might hang the display. 1962 */ 1963 assert_planes_disabled(dev_priv, pipe); 1964 assert_cursor_disabled(dev_priv, pipe); 1965 assert_sprites_disabled(dev_priv, pipe); 1966 1967 reg = PIPECONF(cpu_transcoder); 1968 val = I915_READ(reg); 1969 if ((val & PIPECONF_ENABLE) == 0) 1970 return; 1971 1972 /* 1973 * Double wide has implications for planes 1974 * so best keep it disabled when not needed. 1975 */ 1976 if (crtc->config->double_wide) 1977 val &= ~PIPECONF_DOUBLE_WIDE; 1978 1979 /* Don't disable pipe or pipe PLLs if needed */ 1980 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) && 1981 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1982 val &= ~PIPECONF_ENABLE; 1983 1984 I915_WRITE(reg, val); 1985 if ((val & PIPECONF_ENABLE) == 0) 1986 intel_wait_for_pipe_off(crtc); 1987} 1988 1989static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1990{ 1991 return IS_GEN2(dev_priv) ? 2048 : 4096; 1992} 1993 1994static unsigned int 1995intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane) 1996{ 1997 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1998 unsigned int cpp = fb->format->cpp[plane]; 1999 2000 switch (fb->modifier) { 2001 case DRM_FORMAT_MOD_LINEAR: 2002 return cpp; 2003 case I915_FORMAT_MOD_X_TILED: 2004 if (IS_GEN2(dev_priv)) 2005 return 128; 2006 else 2007 return 512; 2008 case I915_FORMAT_MOD_Y_TILED: 2009 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv)) 2010 return 128; 2011 else 2012 return 512; 2013 case I915_FORMAT_MOD_Yf_TILED: 2014 switch (cpp) { 2015 case 1: 2016 return 64; 2017 case 2: 2018 case 4: 2019 return 128; 2020 case 8: 2021 case 16: 2022 return 256; 2023 default: 2024 MISSING_CASE(cpp); 2025 return cpp; 2026 } 2027 break; 2028 default: 2029 MISSING_CASE(fb->modifier); 2030 return cpp; 2031 } 2032} 2033 2034static unsigned int 2035intel_tile_height(const struct drm_framebuffer *fb, int plane) 2036{ 2037 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 2038 return 1; 2039 else 2040 return intel_tile_size(to_i915(fb->dev)) / 2041 intel_tile_width_bytes(fb, plane); 2042} 2043 2044/* Return the tile dimensions in pixel units */ 2045static void intel_tile_dims(const struct drm_framebuffer *fb, int plane, 2046 unsigned int *tile_width, 2047 unsigned int *tile_height) 2048{ 2049 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane); 2050 unsigned int cpp = fb->format->cpp[plane]; 2051 2052 *tile_width = tile_width_bytes / cpp; 2053 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes; 2054} 2055 2056unsigned int 2057intel_fb_align_height(const struct drm_framebuffer *fb, 2058 int plane, unsigned int height) 2059{ 2060 unsigned int tile_height = intel_tile_height(fb, plane); 2061 2062 return ALIGN(height, tile_height); 2063} 2064 2065unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 2066{ 2067 unsigned int size = 0; 2068 int i; 2069 2070 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 2071 size += rot_info->plane[i].width * rot_info->plane[i].height; 2072 2073 return size; 2074} 2075 2076static void 2077intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 2078 const struct drm_framebuffer *fb, 2079 unsigned int rotation) 2080{ 2081 view->type = I915_GGTT_VIEW_NORMAL; 2082 if (drm_rotation_90_or_270(rotation)) { 2083 view->type = I915_GGTT_VIEW_ROTATED; 2084 view->rotated = to_intel_framebuffer(fb)->rot_info; 2085 } 2086} 2087 2088static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2089{ 2090 if (INTEL_INFO(dev_priv)->gen >= 9) 2091 return 256 * 1024; 2092 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2093 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2094 return 128 * 1024; 2095 else if (INTEL_INFO(dev_priv)->gen >= 4) 2096 return 4 * 1024; 2097 else 2098 return 0; 2099} 2100 2101static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2102 int plane) 2103{ 2104 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2105 2106 /* AUX_DIST needs only 4K alignment */ 2107 if (fb->format->format == DRM_FORMAT_NV12 && plane == 1) 2108 return 4096; 2109 2110 switch (fb->modifier) { 2111 case DRM_FORMAT_MOD_LINEAR: 2112 return intel_linear_alignment(dev_priv); 2113 case I915_FORMAT_MOD_X_TILED: 2114 if (INTEL_GEN(dev_priv) >= 9) 2115 return 256 * 1024; 2116 return 0; 2117 case I915_FORMAT_MOD_Y_TILED: 2118 case I915_FORMAT_MOD_Yf_TILED: 2119 return 1 * 1024 * 1024; 2120 default: 2121 MISSING_CASE(fb->modifier); 2122 return 0; 2123 } 2124} 2125 2126struct i915_vma * 2127intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2128{ 2129 struct drm_device *dev = fb->dev; 2130 struct drm_i915_private *dev_priv = to_i915(dev); 2131 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2132 struct i915_ggtt_view view; 2133 struct i915_vma *vma; 2134 u32 alignment; 2135 2136 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2137 2138 alignment = intel_surf_alignment(fb, 0); 2139 2140 intel_fill_fb_ggtt_view(&view, fb, rotation); 2141 2142 /* Note that the w/a also requires 64 PTE of padding following the 2143 * bo. We currently fill all unused PTE with the shadow page and so 2144 * we should always have valid PTE following the scanout preventing 2145 * the VT-d warning. 2146 */ 2147 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2148 alignment = 256 * 1024; 2149 2150 /* 2151 * Global gtt pte registers are special registers which actually forward 2152 * writes to a chunk of system memory. Which means that there is no risk 2153 * that the register values disappear as soon as we call 2154 * intel_runtime_pm_put(), so it is correct to wrap only the 2155 * pin/unpin/fence and not more. 2156 */ 2157 intel_runtime_pm_get(dev_priv); 2158 2159 vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view); 2160 if (IS_ERR(vma)) 2161 goto err; 2162 2163 if (i915_vma_is_map_and_fenceable(vma)) { 2164 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2165 * fence, whereas 965+ only requires a fence if using 2166 * framebuffer compression. For simplicity, we always, when 2167 * possible, install a fence as the cost is not that onerous. 2168 * 2169 * If we fail to fence the tiled scanout, then either the 2170 * modeset will reject the change (which is highly unlikely as 2171 * the affected systems, all but one, do not have unmappable 2172 * space) or we will not be able to enable full powersaving 2173 * techniques (also likely not to apply due to various limits 2174 * FBC and the like impose on the size of the buffer, which 2175 * presumably we violated anyway with this unmappable buffer). 2176 * Anyway, it is presumably better to stumble onwards with 2177 * something and try to run the system in a "less than optimal" 2178 * mode that matches the user configuration. 2179 */ 2180 if (i915_vma_get_fence(vma) == 0) 2181 i915_vma_pin_fence(vma); 2182 } 2183 2184 i915_vma_get(vma); 2185err: 2186 intel_runtime_pm_put(dev_priv); 2187 return vma; 2188} 2189 2190void intel_unpin_fb_vma(struct i915_vma *vma) 2191{ 2192 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 2193 2194 i915_vma_unpin_fence(vma); 2195 i915_gem_object_unpin_from_display_plane(vma); 2196 i915_vma_put(vma); 2197} 2198 2199static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, 2200 unsigned int rotation) 2201{ 2202 if (drm_rotation_90_or_270(rotation)) 2203 return to_intel_framebuffer(fb)->rotated[plane].pitch; 2204 else 2205 return fb->pitches[plane]; 2206} 2207 2208/* 2209 * Convert the x/y offsets into a linear offset. 2210 * Only valid with 0/180 degree rotation, which is fine since linear 2211 * offset is only used with linear buffers on pre-hsw and tiled buffers 2212 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 2213 */ 2214u32 intel_fb_xy_to_linear(int x, int y, 2215 const struct intel_plane_state *state, 2216 int plane) 2217{ 2218 const struct drm_framebuffer *fb = state->base.fb; 2219 unsigned int cpp = fb->format->cpp[plane]; 2220 unsigned int pitch = fb->pitches[plane]; 2221 2222 return y * pitch + x * cpp; 2223} 2224 2225/* 2226 * Add the x/y offsets derived from fb->offsets[] to the user 2227 * specified plane src x/y offsets. The resulting x/y offsets 2228 * specify the start of scanout from the beginning of the gtt mapping. 2229 */ 2230void intel_add_fb_offsets(int *x, int *y, 2231 const struct intel_plane_state *state, 2232 int plane) 2233 2234{ 2235 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb); 2236 unsigned int rotation = state->base.rotation; 2237 2238 if (drm_rotation_90_or_270(rotation)) { 2239 *x += intel_fb->rotated[plane].x; 2240 *y += intel_fb->rotated[plane].y; 2241 } else { 2242 *x += intel_fb->normal[plane].x; 2243 *y += intel_fb->normal[plane].y; 2244 } 2245} 2246 2247/* 2248 * Input tile dimensions and pitch must already be 2249 * rotated to match x and y, and in pixel units. 2250 */ 2251static u32 _intel_adjust_tile_offset(int *x, int *y, 2252 unsigned int tile_width, 2253 unsigned int tile_height, 2254 unsigned int tile_size, 2255 unsigned int pitch_tiles, 2256 u32 old_offset, 2257 u32 new_offset) 2258{ 2259 unsigned int pitch_pixels = pitch_tiles * tile_width; 2260 unsigned int tiles; 2261 2262 WARN_ON(old_offset & (tile_size - 1)); 2263 WARN_ON(new_offset & (tile_size - 1)); 2264 WARN_ON(new_offset > old_offset); 2265 2266 tiles = (old_offset - new_offset) / tile_size; 2267 2268 *y += tiles / pitch_tiles * tile_height; 2269 *x += tiles % pitch_tiles * tile_width; 2270 2271 /* minimize x in case it got needlessly big */ 2272 *y += *x / pitch_pixels * tile_height; 2273 *x %= pitch_pixels; 2274 2275 return new_offset; 2276} 2277 2278/* 2279 * Adjust the tile offset by moving the difference into 2280 * the x/y offsets. 2281 */ 2282static u32 intel_adjust_tile_offset(int *x, int *y, 2283 const struct intel_plane_state *state, int plane, 2284 u32 old_offset, u32 new_offset) 2285{ 2286 const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev); 2287 const struct drm_framebuffer *fb = state->base.fb; 2288 unsigned int cpp = fb->format->cpp[plane]; 2289 unsigned int rotation = state->base.rotation; 2290 unsigned int pitch = intel_fb_pitch(fb, plane, rotation); 2291 2292 WARN_ON(new_offset > old_offset); 2293 2294 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 2295 unsigned int tile_size, tile_width, tile_height; 2296 unsigned int pitch_tiles; 2297 2298 tile_size = intel_tile_size(dev_priv); 2299 intel_tile_dims(fb, plane, &tile_width, &tile_height); 2300 2301 if (drm_rotation_90_or_270(rotation)) { 2302 pitch_tiles = pitch / tile_height; 2303 swap(tile_width, tile_height); 2304 } else { 2305 pitch_tiles = pitch / (tile_width * cpp); 2306 } 2307 2308 _intel_adjust_tile_offset(x, y, tile_width, tile_height, 2309 tile_size, pitch_tiles, 2310 old_offset, new_offset); 2311 } else { 2312 old_offset += *y * pitch + *x * cpp; 2313 2314 *y = (old_offset - new_offset) / pitch; 2315 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 2316 } 2317 2318 return new_offset; 2319} 2320 2321/* 2322 * Computes the linear offset to the base tile and adjusts 2323 * x, y. bytes per pixel is assumed to be a power-of-two. 2324 * 2325 * In the 90/270 rotated case, x and y are assumed 2326 * to be already rotated to match the rotated GTT view, and 2327 * pitch is the tile_height aligned framebuffer height. 2328 * 2329 * This function is used when computing the derived information 2330 * under intel_framebuffer, so using any of that information 2331 * here is not allowed. Anything under drm_framebuffer can be 2332 * used. This is why the user has to pass in the pitch since it 2333 * is specified in the rotated orientation. 2334 */ 2335static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv, 2336 int *x, int *y, 2337 const struct drm_framebuffer *fb, int plane, 2338 unsigned int pitch, 2339 unsigned int rotation, 2340 u32 alignment) 2341{ 2342 uint64_t fb_modifier = fb->modifier; 2343 unsigned int cpp = fb->format->cpp[plane]; 2344 u32 offset, offset_aligned; 2345 2346 if (alignment) 2347 alignment--; 2348 2349 if (fb_modifier != DRM_FORMAT_MOD_LINEAR) { 2350 unsigned int tile_size, tile_width, tile_height; 2351 unsigned int tile_rows, tiles, pitch_tiles; 2352 2353 tile_size = intel_tile_size(dev_priv); 2354 intel_tile_dims(fb, plane, &tile_width, &tile_height); 2355 2356 if (drm_rotation_90_or_270(rotation)) { 2357 pitch_tiles = pitch / tile_height; 2358 swap(tile_width, tile_height); 2359 } else { 2360 pitch_tiles = pitch / (tile_width * cpp); 2361 } 2362 2363 tile_rows = *y / tile_height; 2364 *y %= tile_height; 2365 2366 tiles = *x / tile_width; 2367 *x %= tile_width; 2368 2369 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2370 offset_aligned = offset & ~alignment; 2371 2372 _intel_adjust_tile_offset(x, y, tile_width, tile_height, 2373 tile_size, pitch_tiles, 2374 offset, offset_aligned); 2375 } else { 2376 offset = *y * pitch + *x * cpp; 2377 offset_aligned = offset & ~alignment; 2378 2379 *y = (offset & alignment) / pitch; 2380 *x = ((offset & alignment) - *y * pitch) / cpp; 2381 } 2382 2383 return offset_aligned; 2384} 2385 2386u32 intel_compute_tile_offset(int *x, int *y, 2387 const struct intel_plane_state *state, 2388 int plane) 2389{ 2390 const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev); 2391 const struct drm_framebuffer *fb = state->base.fb; 2392 unsigned int rotation = state->base.rotation; 2393 int pitch = intel_fb_pitch(fb, plane, rotation); 2394 u32 alignment = intel_surf_alignment(fb, plane); 2395 2396 return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch, 2397 rotation, alignment); 2398} 2399 2400/* Convert the fb->offset[] linear offset into x/y offsets */ 2401static void intel_fb_offset_to_xy(int *x, int *y, 2402 const struct drm_framebuffer *fb, int plane) 2403{ 2404 unsigned int cpp = fb->format->cpp[plane]; 2405 unsigned int pitch = fb->pitches[plane]; 2406 u32 linear_offset = fb->offsets[plane]; 2407 2408 *y = linear_offset / pitch; 2409 *x = linear_offset % pitch / cpp; 2410} 2411 2412static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier) 2413{ 2414 switch (fb_modifier) { 2415 case I915_FORMAT_MOD_X_TILED: 2416 return I915_TILING_X; 2417 case I915_FORMAT_MOD_Y_TILED: 2418 return I915_TILING_Y; 2419 default: 2420 return I915_TILING_NONE; 2421 } 2422} 2423 2424static int 2425intel_fill_fb_info(struct drm_i915_private *dev_priv, 2426 struct drm_framebuffer *fb) 2427{ 2428 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2429 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2430 u32 gtt_offset_rotated = 0; 2431 unsigned int max_size = 0; 2432 int i, num_planes = fb->format->num_planes; 2433 unsigned int tile_size = intel_tile_size(dev_priv); 2434 2435 for (i = 0; i < num_planes; i++) { 2436 unsigned int width, height; 2437 unsigned int cpp, size; 2438 u32 offset; 2439 int x, y; 2440 2441 cpp = fb->format->cpp[i]; 2442 width = drm_framebuffer_plane_width(fb->width, fb, i); 2443 height = drm_framebuffer_plane_height(fb->height, fb, i); 2444 2445 intel_fb_offset_to_xy(&x, &y, fb, i); 2446 2447 /* 2448 * The fence (if used) is aligned to the start of the object 2449 * so having the framebuffer wrap around across the edge of the 2450 * fenced region doesn't really work. We have no API to configure 2451 * the fence start offset within the object (nor could we probably 2452 * on gen2/3). So it's just easier if we just require that the 2453 * fb layout agrees with the fence layout. We already check that the 2454 * fb stride matches the fence stride elsewhere. 2455 */ 2456 if (i915_gem_object_is_tiled(intel_fb->obj) && 2457 (x + width) * cpp > fb->pitches[i]) { 2458 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2459 i, fb->offsets[i]); 2460 return -EINVAL; 2461 } 2462 2463 /* 2464 * First pixel of the framebuffer from 2465 * the start of the normal gtt mapping. 2466 */ 2467 intel_fb->normal[i].x = x; 2468 intel_fb->normal[i].y = y; 2469 2470 offset = _intel_compute_tile_offset(dev_priv, &x, &y, 2471 fb, i, fb->pitches[i], 2472 DRM_ROTATE_0, tile_size); 2473 offset /= tile_size; 2474 2475 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 2476 unsigned int tile_width, tile_height; 2477 unsigned int pitch_tiles; 2478 struct drm_rect r; 2479 2480 intel_tile_dims(fb, i, &tile_width, &tile_height); 2481 2482 rot_info->plane[i].offset = offset; 2483 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); 2484 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 2485 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 2486 2487 intel_fb->rotated[i].pitch = 2488 rot_info->plane[i].height * tile_height; 2489 2490 /* how many tiles does this plane need */ 2491 size = rot_info->plane[i].stride * rot_info->plane[i].height; 2492 /* 2493 * If the plane isn't horizontally tile aligned, 2494 * we need one more tile. 2495 */ 2496 if (x != 0) 2497 size++; 2498 2499 /* rotate the x/y offsets to match the GTT view */ 2500 r.x1 = x; 2501 r.y1 = y; 2502 r.x2 = x + width; 2503 r.y2 = y + height; 2504 drm_rect_rotate(&r, 2505 rot_info->plane[i].width * tile_width, 2506 rot_info->plane[i].height * tile_height, 2507 DRM_ROTATE_270); 2508 x = r.x1; 2509 y = r.y1; 2510 2511 /* rotate the tile dimensions to match the GTT view */ 2512 pitch_tiles = intel_fb->rotated[i].pitch / tile_height; 2513 swap(tile_width, tile_height); 2514 2515 /* 2516 * We only keep the x/y offsets, so push all of the 2517 * gtt offset into the x/y offsets. 2518 */ 2519 _intel_adjust_tile_offset(&x, &y, 2520 tile_width, tile_height, 2521 tile_size, pitch_tiles, 2522 gtt_offset_rotated * tile_size, 0); 2523 2524 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; 2525 2526 /* 2527 * First pixel of the framebuffer from 2528 * the start of the rotated gtt mapping. 2529 */ 2530 intel_fb->rotated[i].x = x; 2531 intel_fb->rotated[i].y = y; 2532 } else { 2533 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 2534 x * cpp, tile_size); 2535 } 2536 2537 /* how many tiles in total needed in the bo */ 2538 max_size = max(max_size, offset + size); 2539 } 2540 2541 if (max_size * tile_size > intel_fb->obj->base.size) { 2542 DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n", 2543 max_size * tile_size, intel_fb->obj->base.size); 2544 return -EINVAL; 2545 } 2546 2547 return 0; 2548} 2549 2550static int i9xx_format_to_fourcc(int format) 2551{ 2552 switch (format) { 2553 case DISPPLANE_8BPP: 2554 return DRM_FORMAT_C8; 2555 case DISPPLANE_BGRX555: 2556 return DRM_FORMAT_XRGB1555; 2557 case DISPPLANE_BGRX565: 2558 return DRM_FORMAT_RGB565; 2559 default: 2560 case DISPPLANE_BGRX888: 2561 return DRM_FORMAT_XRGB8888; 2562 case DISPPLANE_RGBX888: 2563 return DRM_FORMAT_XBGR8888; 2564 case DISPPLANE_BGRX101010: 2565 return DRM_FORMAT_XRGB2101010; 2566 case DISPPLANE_RGBX101010: 2567 return DRM_FORMAT_XBGR2101010; 2568 } 2569} 2570 2571static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2572{ 2573 switch (format) { 2574 case PLANE_CTL_FORMAT_RGB_565: 2575 return DRM_FORMAT_RGB565; 2576 default: 2577 case PLANE_CTL_FORMAT_XRGB_8888: 2578 if (rgb_order) { 2579 if (alpha) 2580 return DRM_FORMAT_ABGR8888; 2581 else 2582 return DRM_FORMAT_XBGR8888; 2583 } else { 2584 if (alpha) 2585 return DRM_FORMAT_ARGB8888; 2586 else 2587 return DRM_FORMAT_XRGB8888; 2588 } 2589 case PLANE_CTL_FORMAT_XRGB_2101010: 2590 if (rgb_order) 2591 return DRM_FORMAT_XBGR2101010; 2592 else 2593 return DRM_FORMAT_XRGB2101010; 2594 } 2595} 2596 2597static bool 2598intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 2599 struct intel_initial_plane_config *plane_config) 2600{ 2601 struct drm_device *dev = crtc->base.dev; 2602 struct drm_i915_private *dev_priv = to_i915(dev); 2603 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2604 struct drm_i915_gem_object *obj = NULL; 2605 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2606 struct drm_framebuffer *fb = &plane_config->fb->base; 2607 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 2608 u32 size_aligned = round_up(plane_config->base + plane_config->size, 2609 PAGE_SIZE); 2610 2611 size_aligned -= base_aligned; 2612 2613 if (plane_config->size == 0) 2614 return false; 2615 2616 /* If the FB is too big, just don't use it since fbdev is not very 2617 * important and we should probably use that space with FBC or other 2618 * features. */ 2619 if (size_aligned * 2 > ggtt->stolen_usable_size) 2620 return false; 2621 2622 mutex_lock(&dev->struct_mutex); 2623 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, 2624 base_aligned, 2625 base_aligned, 2626 size_aligned); 2627 mutex_unlock(&dev->struct_mutex); 2628 if (!obj) 2629 return false; 2630 2631 if (plane_config->tiling == I915_TILING_X) 2632 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X; 2633 2634 mode_cmd.pixel_format = fb->format->format; 2635 mode_cmd.width = fb->width; 2636 mode_cmd.height = fb->height; 2637 mode_cmd.pitches[0] = fb->pitches[0]; 2638 mode_cmd.modifier[0] = fb->modifier; 2639 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2640 2641 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) { 2642 DRM_DEBUG_KMS("intel fb init failed\n"); 2643 goto out_unref_obj; 2644 } 2645 2646 2647 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 2648 return true; 2649 2650out_unref_obj: 2651 i915_gem_object_put(obj); 2652 return false; 2653} 2654 2655/* Update plane->state->fb to match plane->fb after driver-internal updates */ 2656static void 2657update_state_fb(struct drm_plane *plane) 2658{ 2659 if (plane->fb == plane->state->fb) 2660 return; 2661 2662 if (plane->state->fb) 2663 drm_framebuffer_unreference(plane->state->fb); 2664 plane->state->fb = plane->fb; 2665 if (plane->state->fb) 2666 drm_framebuffer_reference(plane->state->fb); 2667} 2668 2669static void 2670intel_set_plane_visible(struct intel_crtc_state *crtc_state, 2671 struct intel_plane_state *plane_state, 2672 bool visible) 2673{ 2674 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2675 2676 plane_state->base.visible = visible; 2677 2678 /* FIXME pre-g4x don't work like this */ 2679 if (visible) { 2680 crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base)); 2681 crtc_state->active_planes |= BIT(plane->id); 2682 } else { 2683 crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base)); 2684 crtc_state->active_planes &= ~BIT(plane->id); 2685 } 2686 2687 DRM_DEBUG_KMS("%s active planes 0x%x\n", 2688 crtc_state->base.crtc->name, 2689 crtc_state->active_planes); 2690} 2691 2692static void 2693intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2694 struct intel_initial_plane_config *plane_config) 2695{ 2696 struct drm_device *dev = intel_crtc->base.dev; 2697 struct drm_i915_private *dev_priv = to_i915(dev); 2698 struct drm_crtc *c; 2699 struct drm_i915_gem_object *obj; 2700 struct drm_plane *primary = intel_crtc->base.primary; 2701 struct drm_plane_state *plane_state = primary->state; 2702 struct drm_crtc_state *crtc_state = intel_crtc->base.state; 2703 struct intel_plane *intel_plane = to_intel_plane(primary); 2704 struct intel_plane_state *intel_state = 2705 to_intel_plane_state(plane_state); 2706 struct drm_framebuffer *fb; 2707 2708 if (!plane_config->fb) 2709 return; 2710 2711 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 2712 fb = &plane_config->fb->base; 2713 goto valid_fb; 2714 } 2715 2716 kfree(plane_config->fb); 2717 2718 /* 2719 * Failed to alloc the obj, check to see if we should share 2720 * an fb with another CRTC instead 2721 */ 2722 for_each_crtc(dev, c) { 2723 struct intel_plane_state *state; 2724 2725 if (c == &intel_crtc->base) 2726 continue; 2727 2728 if (!to_intel_crtc(c)->active) 2729 continue; 2730 2731 state = to_intel_plane_state(c->primary->state); 2732 if (!state->vma) 2733 continue; 2734 2735 if (intel_plane_ggtt_offset(state) == plane_config->base) { 2736 fb = c->primary->fb; 2737 drm_framebuffer_reference(fb); 2738 goto valid_fb; 2739 } 2740 } 2741 2742 /* 2743 * We've failed to reconstruct the BIOS FB. Current display state 2744 * indicates that the primary plane is visible, but has a NULL FB, 2745 * which will lead to problems later if we don't fix it up. The 2746 * simplest solution is to just disable the primary plane now and 2747 * pretend the BIOS never had it enabled. 2748 */ 2749 intel_set_plane_visible(to_intel_crtc_state(crtc_state), 2750 to_intel_plane_state(plane_state), 2751 false); 2752 intel_pre_disable_primary_noatomic(&intel_crtc->base); 2753 trace_intel_disable_plane(primary, intel_crtc); 2754 intel_plane->disable_plane(primary, &intel_crtc->base); 2755 2756 return; 2757 2758valid_fb: 2759 mutex_lock(&dev->struct_mutex); 2760 intel_state->vma = 2761 intel_pin_and_fence_fb_obj(fb, primary->state->rotation); 2762 mutex_unlock(&dev->struct_mutex); 2763 if (IS_ERR(intel_state->vma)) { 2764 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", 2765 intel_crtc->pipe, PTR_ERR(intel_state->vma)); 2766 2767 intel_state->vma = NULL; 2768 drm_framebuffer_unreference(fb); 2769 return; 2770 } 2771 2772 plane_state->src_x = 0; 2773 plane_state->src_y = 0; 2774 plane_state->src_w = fb->width << 16; 2775 plane_state->src_h = fb->height << 16; 2776 2777 plane_state->crtc_x = 0; 2778 plane_state->crtc_y = 0; 2779 plane_state->crtc_w = fb->width; 2780 plane_state->crtc_h = fb->height; 2781 2782 intel_state->base.src = drm_plane_state_src(plane_state); 2783 intel_state->base.dst = drm_plane_state_dest(plane_state); 2784 2785 obj = intel_fb_obj(fb); 2786 if (i915_gem_object_is_tiled(obj)) 2787 dev_priv->preserve_bios_swizzle = true; 2788 2789 drm_framebuffer_reference(fb); 2790 primary->fb = primary->state->fb = fb; 2791 primary->crtc = primary->state->crtc = &intel_crtc->base; 2792 2793 intel_set_plane_visible(to_intel_crtc_state(crtc_state), 2794 to_intel_plane_state(plane_state), 2795 true); 2796 2797 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 2798 &obj->frontbuffer_bits); 2799} 2800 2801static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane, 2802 unsigned int rotation) 2803{ 2804 int cpp = fb->format->cpp[plane]; 2805 2806 switch (fb->modifier) { 2807 case DRM_FORMAT_MOD_LINEAR: 2808 case I915_FORMAT_MOD_X_TILED: 2809 switch (cpp) { 2810 case 8: 2811 return 4096; 2812 case 4: 2813 case 2: 2814 case 1: 2815 return 8192; 2816 default: 2817 MISSING_CASE(cpp); 2818 break; 2819 } 2820 break; 2821 case I915_FORMAT_MOD_Y_TILED: 2822 case I915_FORMAT_MOD_Yf_TILED: 2823 switch (cpp) { 2824 case 8: 2825 return 2048; 2826 case 4: 2827 return 4096; 2828 case 2: 2829 case 1: 2830 return 8192; 2831 default: 2832 MISSING_CASE(cpp); 2833 break; 2834 } 2835 break; 2836 default: 2837 MISSING_CASE(fb->modifier); 2838 } 2839 2840 return 2048; 2841} 2842 2843static int skl_check_main_surface(struct intel_plane_state *plane_state) 2844{ 2845 const struct drm_framebuffer *fb = plane_state->base.fb; 2846 unsigned int rotation = plane_state->base.rotation; 2847 int x = plane_state->base.src.x1 >> 16; 2848 int y = plane_state->base.src.y1 >> 16; 2849 int w = drm_rect_width(&plane_state->base.src) >> 16; 2850 int h = drm_rect_height(&plane_state->base.src) >> 16; 2851 int max_width = skl_max_plane_width(fb, 0, rotation); 2852 int max_height = 4096; 2853 u32 alignment, offset, aux_offset = plane_state->aux.offset; 2854 2855 if (w > max_width || h > max_height) { 2856 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", 2857 w, h, max_width, max_height); 2858 return -EINVAL; 2859 } 2860 2861 intel_add_fb_offsets(&x, &y, plane_state, 0); 2862 offset = intel_compute_tile_offset(&x, &y, plane_state, 0); 2863 alignment = intel_surf_alignment(fb, 0); 2864 2865 /* 2866 * AUX surface offset is specified as the distance from the 2867 * main surface offset, and it must be non-negative. Make 2868 * sure that is what we will get. 2869 */ 2870 if (offset > aux_offset) 2871 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, 2872 offset, aux_offset & ~(alignment - 1)); 2873 2874 /* 2875 * When using an X-tiled surface, the plane blows up 2876 * if the x offset + width exceed the stride. 2877 * 2878 * TODO: linear and Y-tiled seem fine, Yf untested, 2879 */ 2880 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 2881 int cpp = fb->format->cpp[0]; 2882 2883 while ((x + w) * cpp > fb->pitches[0]) { 2884 if (offset == 0) { 2885 DRM_DEBUG_KMS("Unable to find suitable display surface offset\n"); 2886 return -EINVAL; 2887 } 2888 2889 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, 2890 offset, offset - alignment); 2891 } 2892 } 2893 2894 plane_state->main.offset = offset; 2895 plane_state->main.x = x; 2896 plane_state->main.y = y; 2897 2898 return 0; 2899} 2900 2901static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 2902{ 2903 const struct drm_framebuffer *fb = plane_state->base.fb; 2904 unsigned int rotation = plane_state->base.rotation; 2905 int max_width = skl_max_plane_width(fb, 1, rotation); 2906 int max_height = 4096; 2907 int x = plane_state->base.src.x1 >> 17; 2908 int y = plane_state->base.src.y1 >> 17; 2909 int w = drm_rect_width(&plane_state->base.src) >> 17; 2910 int h = drm_rect_height(&plane_state->base.src) >> 17; 2911 u32 offset; 2912 2913 intel_add_fb_offsets(&x, &y, plane_state, 1); 2914 offset = intel_compute_tile_offset(&x, &y, plane_state, 1); 2915 2916 /* FIXME not quite sure how/if these apply to the chroma plane */ 2917 if (w > max_width || h > max_height) { 2918 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", 2919 w, h, max_width, max_height); 2920 return -EINVAL; 2921 } 2922 2923 plane_state->aux.offset = offset; 2924 plane_state->aux.x = x; 2925 plane_state->aux.y = y; 2926 2927 return 0; 2928} 2929 2930int skl_check_plane_surface(struct intel_plane_state *plane_state) 2931{ 2932 const struct drm_framebuffer *fb = plane_state->base.fb; 2933 unsigned int rotation = plane_state->base.rotation; 2934 int ret; 2935 2936 if (!plane_state->base.visible) 2937 return 0; 2938 2939 /* Rotate src coordinates to match rotated GTT view */ 2940 if (drm_rotation_90_or_270(rotation)) 2941 drm_rect_rotate(&plane_state->base.src, 2942 fb->width << 16, fb->height << 16, 2943 DRM_ROTATE_270); 2944 2945 /* 2946 * Handle the AUX surface first since 2947 * the main surface setup depends on it. 2948 */ 2949 if (fb->format->format == DRM_FORMAT_NV12) { 2950 ret = skl_check_nv12_aux_surface(plane_state); 2951 if (ret) 2952 return ret; 2953 } else { 2954 plane_state->aux.offset = ~0xfff; 2955 plane_state->aux.x = 0; 2956 plane_state->aux.y = 0; 2957 } 2958 2959 ret = skl_check_main_surface(plane_state); 2960 if (ret) 2961 return ret; 2962 2963 return 0; 2964} 2965 2966static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 2967 const struct intel_plane_state *plane_state) 2968{ 2969 struct drm_i915_private *dev_priv = 2970 to_i915(plane_state->base.plane->dev); 2971 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 2972 const struct drm_framebuffer *fb = plane_state->base.fb; 2973 unsigned int rotation = plane_state->base.rotation; 2974 u32 dspcntr; 2975 2976 dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE; 2977 2978 if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) || 2979 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) 2980 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2981 2982 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 2983 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 2984 2985 if (INTEL_GEN(dev_priv) < 4) { 2986 if (crtc->pipe == PIPE_B) 2987 dspcntr |= DISPPLANE_SEL_PIPE_B; 2988 } 2989 2990 switch (fb->format->format) { 2991 case DRM_FORMAT_C8: 2992 dspcntr |= DISPPLANE_8BPP; 2993 break; 2994 case DRM_FORMAT_XRGB1555: 2995 dspcntr |= DISPPLANE_BGRX555; 2996 break; 2997 case DRM_FORMAT_RGB565: 2998 dspcntr |= DISPPLANE_BGRX565; 2999 break; 3000 case DRM_FORMAT_XRGB8888: 3001 dspcntr |= DISPPLANE_BGRX888; 3002 break; 3003 case DRM_FORMAT_XBGR8888: 3004 dspcntr |= DISPPLANE_RGBX888; 3005 break; 3006 case DRM_FORMAT_XRGB2101010: 3007 dspcntr |= DISPPLANE_BGRX101010; 3008 break; 3009 case DRM_FORMAT_XBGR2101010: 3010 dspcntr |= DISPPLANE_RGBX101010; 3011 break; 3012 default: 3013 MISSING_CASE(fb->format->format); 3014 return 0; 3015 } 3016 3017 if (INTEL_GEN(dev_priv) >= 4 && 3018 fb->modifier == I915_FORMAT_MOD_X_TILED) 3019 dspcntr |= DISPPLANE_TILED; 3020 3021 if (rotation & DRM_ROTATE_180) 3022 dspcntr |= DISPPLANE_ROTATE_180; 3023 3024 if (rotation & DRM_REFLECT_X) 3025 dspcntr |= DISPPLANE_MIRROR; 3026 3027 return dspcntr; 3028} 3029 3030int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 3031{ 3032 struct drm_i915_private *dev_priv = 3033 to_i915(plane_state->base.plane->dev); 3034 int src_x = plane_state->base.src.x1 >> 16; 3035 int src_y = plane_state->base.src.y1 >> 16; 3036 u32 offset; 3037 3038 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 3039 3040 if (INTEL_GEN(dev_priv) >= 4) 3041 offset = intel_compute_tile_offset(&src_x, &src_y, 3042 plane_state, 0); 3043 else 3044 offset = 0; 3045 3046 /* HSW/BDW do this automagically in hardware */ 3047 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 3048 unsigned int rotation = plane_state->base.rotation; 3049 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 3050 int src_h = drm_rect_height(&plane_state->base.src) >> 16; 3051 3052 if (rotation & DRM_ROTATE_180) { 3053 src_x += src_w - 1; 3054 src_y += src_h - 1; 3055 } else if (rotation & DRM_REFLECT_X) { 3056 src_x += src_w - 1; 3057 } 3058 } 3059 3060 plane_state->main.offset = offset; 3061 plane_state->main.x = src_x; 3062 plane_state->main.y = src_y; 3063 3064 return 0; 3065} 3066 3067static void i9xx_update_primary_plane(struct drm_plane *primary, 3068 const struct intel_crtc_state *crtc_state, 3069 const struct intel_plane_state *plane_state) 3070{ 3071 struct drm_i915_private *dev_priv = to_i915(primary->dev); 3072 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 3073 struct drm_framebuffer *fb = plane_state->base.fb; 3074 int plane = intel_crtc->plane; 3075 u32 linear_offset; 3076 u32 dspcntr = plane_state->ctl; 3077 i915_reg_t reg = DSPCNTR(plane); 3078 int x = plane_state->main.x; 3079 int y = plane_state->main.y; 3080 unsigned long irqflags; 3081 3082 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 3083 3084 if (INTEL_GEN(dev_priv) >= 4) 3085 intel_crtc->dspaddr_offset = plane_state->main.offset; 3086 else 3087 intel_crtc->dspaddr_offset = linear_offset; 3088 3089 intel_crtc->adjusted_x = x; 3090 intel_crtc->adjusted_y = y; 3091 3092 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3093 3094 if (INTEL_GEN(dev_priv) < 4) { 3095 /* pipesrc and dspsize control the size that is scaled from, 3096 * which should always be the user's requested size. 3097 */ 3098 I915_WRITE_FW(DSPSIZE(plane), 3099 ((crtc_state->pipe_src_h - 1) << 16) | 3100 (crtc_state->pipe_src_w - 1)); 3101 I915_WRITE_FW(DSPPOS(plane), 0); 3102 } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) { 3103 I915_WRITE_FW(PRIMSIZE(plane), 3104 ((crtc_state->pipe_src_h - 1) << 16) | 3105 (crtc_state->pipe_src_w - 1)); 3106 I915_WRITE_FW(PRIMPOS(plane), 0); 3107 I915_WRITE_FW(PRIMCNSTALPHA(plane), 0); 3108 } 3109 3110 I915_WRITE_FW(reg, dspcntr); 3111 3112 I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]); 3113 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3114 I915_WRITE_FW(DSPSURF(plane), 3115 intel_plane_ggtt_offset(plane_state) + 3116 intel_crtc->dspaddr_offset); 3117 I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x); 3118 } else if (INTEL_GEN(dev_priv) >= 4) { 3119 I915_WRITE_FW(DSPSURF(plane), 3120 intel_plane_ggtt_offset(plane_state) + 3121 intel_crtc->dspaddr_offset); 3122 I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x); 3123 I915_WRITE_FW(DSPLINOFF(plane), linear_offset); 3124 } else { 3125 I915_WRITE_FW(DSPADDR(plane), 3126 intel_plane_ggtt_offset(plane_state) + 3127 intel_crtc->dspaddr_offset); 3128 } 3129 POSTING_READ_FW(reg); 3130 3131 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3132} 3133 3134static void i9xx_disable_primary_plane(struct drm_plane *primary, 3135 struct drm_crtc *crtc) 3136{ 3137 struct drm_device *dev = crtc->dev; 3138 struct drm_i915_private *dev_priv = to_i915(dev); 3139 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3140 int plane = intel_crtc->plane; 3141 unsigned long irqflags; 3142 3143 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3144 3145 I915_WRITE_FW(DSPCNTR(plane), 0); 3146 if (INTEL_INFO(dev_priv)->gen >= 4) 3147 I915_WRITE_FW(DSPSURF(plane), 0); 3148 else 3149 I915_WRITE_FW(DSPADDR(plane), 0); 3150 POSTING_READ_FW(DSPCNTR(plane)); 3151 3152 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3153} 3154 3155static u32 3156intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane) 3157{ 3158 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 3159 return 64; 3160 else 3161 return intel_tile_width_bytes(fb, plane); 3162} 3163 3164static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 3165{ 3166 struct drm_device *dev = intel_crtc->base.dev; 3167 struct drm_i915_private *dev_priv = to_i915(dev); 3168 3169 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 3170 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 3171 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 3172} 3173 3174/* 3175 * This function detaches (aka. unbinds) unused scalers in hardware 3176 */ 3177static void skl_detach_scalers(struct intel_crtc *intel_crtc) 3178{ 3179 struct intel_crtc_scaler_state *scaler_state; 3180 int i; 3181 3182 scaler_state = &intel_crtc->config->scaler_state; 3183 3184 /* loop through and disable scalers that aren't in use */ 3185 for (i = 0; i < intel_crtc->num_scalers; i++) { 3186 if (!scaler_state->scalers[i].in_use) 3187 skl_detach_scaler(intel_crtc, i); 3188 } 3189} 3190 3191u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, 3192 unsigned int rotation) 3193{ 3194 u32 stride; 3195 3196 if (plane >= fb->format->num_planes) 3197 return 0; 3198 3199 stride = intel_fb_pitch(fb, plane, rotation); 3200 3201 /* 3202 * The stride is either expressed as a multiple of 64 bytes chunks for 3203 * linear buffers or in number of tiles for tiled buffers. 3204 */ 3205 if (drm_rotation_90_or_270(rotation)) 3206 stride /= intel_tile_height(fb, plane); 3207 else 3208 stride /= intel_fb_stride_alignment(fb, plane); 3209 3210 return stride; 3211} 3212 3213static u32 skl_plane_ctl_format(uint32_t pixel_format) 3214{ 3215 switch (pixel_format) { 3216 case DRM_FORMAT_C8: 3217 return PLANE_CTL_FORMAT_INDEXED; 3218 case DRM_FORMAT_RGB565: 3219 return PLANE_CTL_FORMAT_RGB_565; 3220 case DRM_FORMAT_XBGR8888: 3221 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 3222 case DRM_FORMAT_XRGB8888: 3223 return PLANE_CTL_FORMAT_XRGB_8888; 3224 /* 3225 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers 3226 * to be already pre-multiplied. We need to add a knob (or a different 3227 * DRM_FORMAT) for user-space to configure that. 3228 */ 3229 case DRM_FORMAT_ABGR8888: 3230 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX | 3231 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3232 case DRM_FORMAT_ARGB8888: 3233 return PLANE_CTL_FORMAT_XRGB_8888 | 3234 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3235 case DRM_FORMAT_XRGB2101010: 3236 return PLANE_CTL_FORMAT_XRGB_2101010; 3237 case DRM_FORMAT_XBGR2101010: 3238 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; 3239 case DRM_FORMAT_YUYV: 3240 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 3241 case DRM_FORMAT_YVYU: 3242 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 3243 case DRM_FORMAT_UYVY: 3244 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 3245 case DRM_FORMAT_VYUY: 3246 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 3247 default: 3248 MISSING_CASE(pixel_format); 3249 } 3250 3251 return 0; 3252} 3253 3254static u32 skl_plane_ctl_tiling(uint64_t fb_modifier) 3255{ 3256 switch (fb_modifier) { 3257 case DRM_FORMAT_MOD_LINEAR: 3258 break; 3259 case I915_FORMAT_MOD_X_TILED: 3260 return PLANE_CTL_TILED_X; 3261 case I915_FORMAT_MOD_Y_TILED: 3262 return PLANE_CTL_TILED_Y; 3263 case I915_FORMAT_MOD_Yf_TILED: 3264 return PLANE_CTL_TILED_YF; 3265 default: 3266 MISSING_CASE(fb_modifier); 3267 } 3268 3269 return 0; 3270} 3271 3272static u32 skl_plane_ctl_rotation(unsigned int rotation) 3273{ 3274 switch (rotation) { 3275 case DRM_ROTATE_0: 3276 break; 3277 /* 3278 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr 3279 * while i915 HW rotation is clockwise, thats why this swapping. 3280 */ 3281 case DRM_ROTATE_90: 3282 return PLANE_CTL_ROTATE_270; 3283 case DRM_ROTATE_180: 3284 return PLANE_CTL_ROTATE_180; 3285 case DRM_ROTATE_270: 3286 return PLANE_CTL_ROTATE_90; 3287 default: 3288 MISSING_CASE(rotation); 3289 } 3290 3291 return 0; 3292} 3293 3294u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 3295 const struct intel_plane_state *plane_state) 3296{ 3297 struct drm_i915_private *dev_priv = 3298 to_i915(plane_state->base.plane->dev); 3299 const struct drm_framebuffer *fb = plane_state->base.fb; 3300 unsigned int rotation = plane_state->base.rotation; 3301 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 3302 u32 plane_ctl; 3303 3304 plane_ctl = PLANE_CTL_ENABLE; 3305 3306 if (!IS_GEMINILAKE(dev_priv)) { 3307 plane_ctl |= 3308 PLANE_CTL_PIPE_GAMMA_ENABLE | 3309 PLANE_CTL_PIPE_CSC_ENABLE | 3310 PLANE_CTL_PLANE_GAMMA_DISABLE; 3311 } 3312 3313 plane_ctl |= skl_plane_ctl_format(fb->format->format); 3314 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 3315 plane_ctl |= skl_plane_ctl_rotation(rotation); 3316 3317 if (key->flags & I915_SET_COLORKEY_DESTINATION) 3318 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 3319 else if (key->flags & I915_SET_COLORKEY_SOURCE) 3320 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 3321 3322 return plane_ctl; 3323} 3324 3325static void skylake_update_primary_plane(struct drm_plane *plane, 3326 const struct intel_crtc_state *crtc_state, 3327 const struct intel_plane_state *plane_state) 3328{ 3329 struct drm_device *dev = plane->dev; 3330 struct drm_i915_private *dev_priv = to_i915(dev); 3331 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 3332 struct drm_framebuffer *fb = plane_state->base.fb; 3333 enum plane_id plane_id = to_intel_plane(plane)->id; 3334 enum pipe pipe = to_intel_plane(plane)->pipe; 3335 u32 plane_ctl = plane_state->ctl; 3336 unsigned int rotation = plane_state->base.rotation; 3337 u32 stride = skl_plane_stride(fb, 0, rotation); 3338 u32 surf_addr = plane_state->main.offset; 3339 int scaler_id = plane_state->scaler_id; 3340 int src_x = plane_state->main.x; 3341 int src_y = plane_state->main.y; 3342 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 3343 int src_h = drm_rect_height(&plane_state->base.src) >> 16; 3344 int dst_x = plane_state->base.dst.x1; 3345 int dst_y = plane_state->base.dst.y1; 3346 int dst_w = drm_rect_width(&plane_state->base.dst); 3347 int dst_h = drm_rect_height(&plane_state->base.dst); 3348 unsigned long irqflags; 3349 3350 /* Sizes are 0 based */ 3351 src_w--; 3352 src_h--; 3353 dst_w--; 3354 dst_h--; 3355 3356 intel_crtc->dspaddr_offset = surf_addr; 3357 3358 intel_crtc->adjusted_x = src_x; 3359 intel_crtc->adjusted_y = src_y; 3360 3361 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3362 3363 if (IS_GEMINILAKE(dev_priv)) { 3364 I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), 3365 PLANE_COLOR_PIPE_GAMMA_ENABLE | 3366 PLANE_COLOR_PIPE_CSC_ENABLE | 3367 PLANE_COLOR_PLANE_GAMMA_DISABLE); 3368 } 3369 3370 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); 3371 I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x); 3372 I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); 3373 I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); 3374 3375 if (scaler_id >= 0) { 3376 uint32_t ps_ctrl = 0; 3377 3378 WARN_ON(!dst_w || !dst_h); 3379 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) | 3380 crtc_state->scaler_state.scalers[scaler_id].mode; 3381 I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 3382 I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0); 3383 I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y); 3384 I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h); 3385 I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0); 3386 } else { 3387 I915_WRITE_FW(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x); 3388 } 3389 3390 I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 3391 intel_plane_ggtt_offset(plane_state) + surf_addr); 3392 3393 POSTING_READ_FW(PLANE_SURF(pipe, plane_id)); 3394 3395 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3396} 3397 3398static void skylake_disable_primary_plane(struct drm_plane *primary, 3399 struct drm_crtc *crtc) 3400{ 3401 struct drm_device *dev = crtc->dev; 3402 struct drm_i915_private *dev_priv = to_i915(dev); 3403 enum plane_id plane_id = to_intel_plane(primary)->id; 3404 enum pipe pipe = to_intel_plane(primary)->pipe; 3405 unsigned long irqflags; 3406 3407 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3408 3409 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); 3410 I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0); 3411 POSTING_READ_FW(PLANE_SURF(pipe, plane_id)); 3412 3413 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3414} 3415 3416static void intel_complete_page_flips(struct drm_i915_private *dev_priv) 3417{ 3418 struct intel_crtc *crtc; 3419 3420 for_each_intel_crtc(&dev_priv->drm, crtc) 3421 intel_finish_page_flip_cs(dev_priv, crtc->pipe); 3422} 3423 3424static void intel_update_primary_planes(struct drm_device *dev) 3425{ 3426 struct drm_crtc *crtc; 3427 3428 for_each_crtc(dev, crtc) { 3429 struct intel_plane *plane = to_intel_plane(crtc->primary); 3430 struct intel_plane_state *plane_state = 3431 to_intel_plane_state(plane->base.state); 3432 3433 if (plane_state->base.visible) { 3434 trace_intel_update_plane(&plane->base, 3435 to_intel_crtc(crtc)); 3436 3437 plane->update_plane(&plane->base, 3438 to_intel_crtc_state(crtc->state), 3439 plane_state); 3440 } 3441 } 3442} 3443 3444static int 3445__intel_display_resume(struct drm_device *dev, 3446 struct drm_atomic_state *state, 3447 struct drm_modeset_acquire_ctx *ctx) 3448{ 3449 struct drm_crtc_state *crtc_state; 3450 struct drm_crtc *crtc; 3451 int i, ret; 3452 3453 intel_modeset_setup_hw_state(dev, ctx); 3454 i915_redisable_vga(to_i915(dev)); 3455 3456 if (!state) 3457 return 0; 3458 3459 /* 3460 * We've duplicated the state, pointers to the old state are invalid. 3461 * 3462 * Don't attempt to use the old state until we commit the duplicated state. 3463 */ 3464 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 3465 /* 3466 * Force recalculation even if we restore 3467 * current state. With fast modeset this may not result 3468 * in a modeset when the state is compatible. 3469 */ 3470 crtc_state->mode_changed = true; 3471 } 3472 3473 /* ignore any reset values/BIOS leftovers in the WM registers */ 3474 if (!HAS_GMCH_DISPLAY(to_i915(dev))) 3475 to_intel_atomic_state(state)->skip_intermediate_wm = true; 3476 3477 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 3478 3479 WARN_ON(ret == -EDEADLK); 3480 return ret; 3481} 3482 3483static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 3484{ 3485 return intel_has_gpu_reset(dev_priv) && 3486 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv); 3487} 3488 3489void intel_prepare_reset(struct drm_i915_private *dev_priv) 3490{ 3491 struct drm_device *dev = &dev_priv->drm; 3492 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3493 struct drm_atomic_state *state; 3494 int ret; 3495 3496 /* 3497 * Need mode_config.mutex so that we don't 3498 * trample ongoing ->detect() and whatnot. 3499 */ 3500 mutex_lock(&dev->mode_config.mutex); 3501 drm_modeset_acquire_init(ctx, 0); 3502 while (1) { 3503 ret = drm_modeset_lock_all_ctx(dev, ctx); 3504 if (ret != -EDEADLK) 3505 break; 3506 3507 drm_modeset_backoff(ctx); 3508 } 3509 3510 /* reset doesn't touch the display, but flips might get nuked anyway, */ 3511 if (!i915.force_reset_modeset_test && 3512 !gpu_reset_clobbers_display(dev_priv)) 3513 return; 3514 3515 /* 3516 * Disabling the crtcs gracefully seems nicer. Also the 3517 * g33 docs say we should at least disable all the planes. 3518 */ 3519 state = drm_atomic_helper_duplicate_state(dev, ctx); 3520 if (IS_ERR(state)) { 3521 ret = PTR_ERR(state); 3522 DRM_ERROR("Duplicating state failed with %i\n", ret); 3523 return; 3524 } 3525 3526 ret = drm_atomic_helper_disable_all(dev, ctx); 3527 if (ret) { 3528 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 3529 drm_atomic_state_put(state); 3530 return; 3531 } 3532 3533 dev_priv->modeset_restore_state = state; 3534 state->acquire_ctx = ctx; 3535} 3536 3537void intel_finish_reset(struct drm_i915_private *dev_priv) 3538{ 3539 struct drm_device *dev = &dev_priv->drm; 3540 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3541 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 3542 int ret; 3543 3544 /* 3545 * Flips in the rings will be nuked by the reset, 3546 * so complete all pending flips so that user space 3547 * will get its events and not get stuck. 3548 */ 3549 intel_complete_page_flips(dev_priv); 3550 3551 dev_priv->modeset_restore_state = NULL; 3552 3553 /* reset doesn't touch the display */ 3554 if (!gpu_reset_clobbers_display(dev_priv)) { 3555 if (!state) { 3556 /* 3557 * Flips in the rings have been nuked by the reset, 3558 * so update the base address of all primary 3559 * planes to the the last fb to make sure we're 3560 * showing the correct fb after a reset. 3561 * 3562 * FIXME: Atomic will make this obsolete since we won't schedule 3563 * CS-based flips (which might get lost in gpu resets) any more. 3564 */ 3565 intel_update_primary_planes(dev); 3566 } else { 3567 ret = __intel_display_resume(dev, state, ctx); 3568 if (ret) 3569 DRM_ERROR("Restoring old state failed with %i\n", ret); 3570 } 3571 } else { 3572 /* 3573 * The display has been reset as well, 3574 * so need a full re-initialization. 3575 */ 3576 intel_runtime_pm_disable_interrupts(dev_priv); 3577 intel_runtime_pm_enable_interrupts(dev_priv); 3578 3579 intel_pps_unlock_regs_wa(dev_priv); 3580 intel_modeset_init_hw(dev); 3581 3582 spin_lock_irq(&dev_priv->irq_lock); 3583 if (dev_priv->display.hpd_irq_setup) 3584 dev_priv->display.hpd_irq_setup(dev_priv); 3585 spin_unlock_irq(&dev_priv->irq_lock); 3586 3587 ret = __intel_display_resume(dev, state, ctx); 3588 if (ret) 3589 DRM_ERROR("Restoring old state failed with %i\n", ret); 3590 3591 intel_hpd_init(dev_priv); 3592 } 3593 3594 if (state) 3595 drm_atomic_state_put(state); 3596 drm_modeset_drop_locks(ctx); 3597 drm_modeset_acquire_fini(ctx); 3598 mutex_unlock(&dev->mode_config.mutex); 3599} 3600 3601static bool abort_flip_on_reset(struct intel_crtc *crtc) 3602{ 3603 struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error; 3604 3605 if (i915_reset_backoff(error)) 3606 return true; 3607 3608 if (crtc->reset_count != i915_reset_count(error)) 3609 return true; 3610 3611 return false; 3612} 3613 3614static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3615{ 3616 struct drm_device *dev = crtc->dev; 3617 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3618 bool pending; 3619 3620 if (abort_flip_on_reset(intel_crtc)) 3621 return false; 3622 3623 spin_lock_irq(&dev->event_lock); 3624 pending = to_intel_crtc(crtc)->flip_work != NULL; 3625 spin_unlock_irq(&dev->event_lock); 3626 3627 return pending; 3628} 3629 3630static void intel_update_pipe_config(struct intel_crtc *crtc, 3631 struct intel_crtc_state *old_crtc_state) 3632{ 3633 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3634 struct intel_crtc_state *pipe_config = 3635 to_intel_crtc_state(crtc->base.state); 3636 3637 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 3638 crtc->base.mode = crtc->base.state->mode; 3639 3640 /* 3641 * Update pipe size and adjust fitter if needed: the reason for this is 3642 * that in compute_mode_changes we check the native mode (not the pfit 3643 * mode) to see if we can flip rather than do a full mode set. In the 3644 * fastboot case, we'll flip, but if we don't update the pipesrc and 3645 * pfit state, we'll end up with a big fb scanned out into the wrong 3646 * sized surface. 3647 */ 3648 3649 I915_WRITE(PIPESRC(crtc->pipe), 3650 ((pipe_config->pipe_src_w - 1) << 16) | 3651 (pipe_config->pipe_src_h - 1)); 3652 3653 /* on skylake this is done by detaching scalers */ 3654 if (INTEL_GEN(dev_priv) >= 9) { 3655 skl_detach_scalers(crtc); 3656 3657 if (pipe_config->pch_pfit.enabled) 3658 skylake_pfit_enable(crtc); 3659 } else if (HAS_PCH_SPLIT(dev_priv)) { 3660 if (pipe_config->pch_pfit.enabled) 3661 ironlake_pfit_enable(crtc); 3662 else if (old_crtc_state->pch_pfit.enabled) 3663 ironlake_pfit_disable(crtc, true); 3664 } 3665} 3666 3667static void intel_fdi_normal_train(struct intel_crtc *crtc) 3668{ 3669 struct drm_device *dev = crtc->base.dev; 3670 struct drm_i915_private *dev_priv = to_i915(dev); 3671 int pipe = crtc->pipe; 3672 i915_reg_t reg; 3673 u32 temp; 3674 3675 /* enable normal train */ 3676 reg = FDI_TX_CTL(pipe); 3677 temp = I915_READ(reg); 3678 if (IS_IVYBRIDGE(dev_priv)) { 3679 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3680 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 3681 } else { 3682 temp &= ~FDI_LINK_TRAIN_NONE; 3683 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 3684 } 3685 I915_WRITE(reg, temp); 3686 3687 reg = FDI_RX_CTL(pipe); 3688 temp = I915_READ(reg); 3689 if (HAS_PCH_CPT(dev_priv)) { 3690 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3691 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 3692 } else { 3693 temp &= ~FDI_LINK_TRAIN_NONE; 3694 temp |= FDI_LINK_TRAIN_NONE; 3695 } 3696 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 3697 3698 /* wait one idle pattern time */ 3699 POSTING_READ(reg); 3700 udelay(1000); 3701 3702 /* IVB wants error correction enabled */ 3703 if (IS_IVYBRIDGE(dev_priv)) 3704 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 3705 FDI_FE_ERRC_ENABLE); 3706} 3707 3708/* The FDI link training functions for ILK/Ibexpeak. */ 3709static void ironlake_fdi_link_train(struct intel_crtc *crtc, 3710 const struct intel_crtc_state *crtc_state) 3711{ 3712 struct drm_device *dev = crtc->base.dev; 3713 struct drm_i915_private *dev_priv = to_i915(dev); 3714 int pipe = crtc->pipe; 3715 i915_reg_t reg; 3716 u32 temp, tries; 3717 3718 /* FDI needs bits from pipe first */ 3719 assert_pipe_enabled(dev_priv, pipe); 3720 3721 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3722 for train result */ 3723 reg = FDI_RX_IMR(pipe); 3724 temp = I915_READ(reg); 3725 temp &= ~FDI_RX_SYMBOL_LOCK; 3726 temp &= ~FDI_RX_BIT_LOCK; 3727 I915_WRITE(reg, temp); 3728 I915_READ(reg); 3729 udelay(150); 3730 3731 /* enable CPU FDI TX and PCH FDI RX */ 3732 reg = FDI_TX_CTL(pipe); 3733 temp = I915_READ(reg); 3734 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3735 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 3736 temp &= ~FDI_LINK_TRAIN_NONE; 3737 temp |= FDI_LINK_TRAIN_PATTERN_1; 3738 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3739 3740 reg = FDI_RX_CTL(pipe); 3741 temp = I915_READ(reg); 3742 temp &= ~FDI_LINK_TRAIN_NONE; 3743 temp |= FDI_LINK_TRAIN_PATTERN_1; 3744 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3745 3746 POSTING_READ(reg); 3747 udelay(150); 3748 3749 /* Ironlake workaround, enable clock pointer after FDI enable*/ 3750 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3751 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 3752 FDI_RX_PHASE_SYNC_POINTER_EN); 3753 3754 reg = FDI_RX_IIR(pipe); 3755 for (tries = 0; tries < 5; tries++) { 3756 temp = I915_READ(reg); 3757 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3758 3759 if ((temp & FDI_RX_BIT_LOCK)) { 3760 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3761 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3762 break; 3763 } 3764 } 3765 if (tries == 5) 3766 DRM_ERROR("FDI train 1 fail!\n"); 3767 3768 /* Train 2 */ 3769 reg = FDI_TX_CTL(pipe); 3770 temp = I915_READ(reg); 3771 temp &= ~FDI_LINK_TRAIN_NONE; 3772 temp |= FDI_LINK_TRAIN_PATTERN_2; 3773 I915_WRITE(reg, temp); 3774 3775 reg = FDI_RX_CTL(pipe); 3776 temp = I915_READ(reg); 3777 temp &= ~FDI_LINK_TRAIN_NONE; 3778 temp |= FDI_LINK_TRAIN_PATTERN_2; 3779 I915_WRITE(reg, temp); 3780 3781 POSTING_READ(reg); 3782 udelay(150); 3783 3784 reg = FDI_RX_IIR(pipe); 3785 for (tries = 0; tries < 5; tries++) { 3786 temp = I915_READ(reg); 3787 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3788 3789 if (temp & FDI_RX_SYMBOL_LOCK) { 3790 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3791 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3792 break; 3793 } 3794 } 3795 if (tries == 5) 3796 DRM_ERROR("FDI train 2 fail!\n"); 3797 3798 DRM_DEBUG_KMS("FDI train done\n"); 3799 3800} 3801 3802static const int snb_b_fdi_train_param[] = { 3803 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 3804 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 3805 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 3806 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 3807}; 3808 3809/* The FDI link training functions for SNB/Cougarpoint. */ 3810static void gen6_fdi_link_train(struct intel_crtc *crtc, 3811 const struct intel_crtc_state *crtc_state) 3812{ 3813 struct drm_device *dev = crtc->base.dev; 3814 struct drm_i915_private *dev_priv = to_i915(dev); 3815 int pipe = crtc->pipe; 3816 i915_reg_t reg; 3817 u32 temp, i, retry; 3818 3819 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3820 for train result */ 3821 reg = FDI_RX_IMR(pipe); 3822 temp = I915_READ(reg); 3823 temp &= ~FDI_RX_SYMBOL_LOCK; 3824 temp &= ~FDI_RX_BIT_LOCK; 3825 I915_WRITE(reg, temp); 3826 3827 POSTING_READ(reg); 3828 udelay(150); 3829 3830 /* enable CPU FDI TX and PCH FDI RX */ 3831 reg = FDI_TX_CTL(pipe); 3832 temp = I915_READ(reg); 3833 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3834 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 3835 temp &= ~FDI_LINK_TRAIN_NONE; 3836 temp |= FDI_LINK_TRAIN_PATTERN_1; 3837 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3838 /* SNB-B */ 3839 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3840 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3841 3842 I915_WRITE(FDI_RX_MISC(pipe), 3843 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3844 3845 reg = FDI_RX_CTL(pipe); 3846 temp = I915_READ(reg); 3847 if (HAS_PCH_CPT(dev_priv)) { 3848 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3849 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3850 } else { 3851 temp &= ~FDI_LINK_TRAIN_NONE; 3852 temp |= FDI_LINK_TRAIN_PATTERN_1; 3853 } 3854 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3855 3856 POSTING_READ(reg); 3857 udelay(150); 3858 3859 for (i = 0; i < 4; i++) { 3860 reg = FDI_TX_CTL(pipe); 3861 temp = I915_READ(reg); 3862 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3863 temp |= snb_b_fdi_train_param[i]; 3864 I915_WRITE(reg, temp); 3865 3866 POSTING_READ(reg); 3867 udelay(500); 3868 3869 for (retry = 0; retry < 5; retry++) { 3870 reg = FDI_RX_IIR(pipe); 3871 temp = I915_READ(reg); 3872 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3873 if (temp & FDI_RX_BIT_LOCK) { 3874 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3875 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3876 break; 3877 } 3878 udelay(50); 3879 } 3880 if (retry < 5) 3881 break; 3882 } 3883 if (i == 4) 3884 DRM_ERROR("FDI train 1 fail!\n"); 3885 3886 /* Train 2 */ 3887 reg = FDI_TX_CTL(pipe); 3888 temp = I915_READ(reg); 3889 temp &= ~FDI_LINK_TRAIN_NONE; 3890 temp |= FDI_LINK_TRAIN_PATTERN_2; 3891 if (IS_GEN6(dev_priv)) { 3892 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3893 /* SNB-B */ 3894 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3895 } 3896 I915_WRITE(reg, temp); 3897 3898 reg = FDI_RX_CTL(pipe); 3899 temp = I915_READ(reg); 3900 if (HAS_PCH_CPT(dev_priv)) { 3901 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3902 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3903 } else { 3904 temp &= ~FDI_LINK_TRAIN_NONE; 3905 temp |= FDI_LINK_TRAIN_PATTERN_2; 3906 } 3907 I915_WRITE(reg, temp); 3908 3909 POSTING_READ(reg); 3910 udelay(150); 3911 3912 for (i = 0; i < 4; i++) { 3913 reg = FDI_TX_CTL(pipe); 3914 temp = I915_READ(reg); 3915 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3916 temp |= snb_b_fdi_train_param[i]; 3917 I915_WRITE(reg, temp); 3918 3919 POSTING_READ(reg); 3920 udelay(500); 3921 3922 for (retry = 0; retry < 5; retry++) { 3923 reg = FDI_RX_IIR(pipe); 3924 temp = I915_READ(reg); 3925 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3926 if (temp & FDI_RX_SYMBOL_LOCK) { 3927 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3928 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3929 break; 3930 } 3931 udelay(50); 3932 } 3933 if (retry < 5) 3934 break; 3935 } 3936 if (i == 4) 3937 DRM_ERROR("FDI train 2 fail!\n"); 3938 3939 DRM_DEBUG_KMS("FDI train done.\n"); 3940} 3941 3942/* Manual link training for Ivy Bridge A0 parts */ 3943static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 3944 const struct intel_crtc_state *crtc_state) 3945{ 3946 struct drm_device *dev = crtc->base.dev; 3947 struct drm_i915_private *dev_priv = to_i915(dev); 3948 int pipe = crtc->pipe; 3949 i915_reg_t reg; 3950 u32 temp, i, j; 3951 3952 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3953 for train result */ 3954 reg = FDI_RX_IMR(pipe); 3955 temp = I915_READ(reg); 3956 temp &= ~FDI_RX_SYMBOL_LOCK; 3957 temp &= ~FDI_RX_BIT_LOCK; 3958 I915_WRITE(reg, temp); 3959 3960 POSTING_READ(reg); 3961 udelay(150); 3962 3963 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 3964 I915_READ(FDI_RX_IIR(pipe))); 3965 3966 /* Try each vswing and preemphasis setting twice before moving on */ 3967 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 3968 /* disable first in case we need to retry */ 3969 reg = FDI_TX_CTL(pipe); 3970 temp = I915_READ(reg); 3971 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 3972 temp &= ~FDI_TX_ENABLE; 3973 I915_WRITE(reg, temp); 3974 3975 reg = FDI_RX_CTL(pipe); 3976 temp = I915_READ(reg); 3977 temp &= ~FDI_LINK_TRAIN_AUTO; 3978 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3979 temp &= ~FDI_RX_ENABLE; 3980 I915_WRITE(reg, temp); 3981 3982 /* enable CPU FDI TX and PCH FDI RX */ 3983 reg = FDI_TX_CTL(pipe); 3984 temp = I915_READ(reg); 3985 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3986 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 3987 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 3988 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3989 temp |= snb_b_fdi_train_param[j/2]; 3990 temp |= FDI_COMPOSITE_SYNC; 3991 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3992 3993 I915_WRITE(FDI_RX_MISC(pipe), 3994 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3995 3996 reg = FDI_RX_CTL(pipe); 3997 temp = I915_READ(reg); 3998 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3999 temp |= FDI_COMPOSITE_SYNC; 4000 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4001 4002 POSTING_READ(reg); 4003 udelay(1); /* should be 0.5us */ 4004 4005 for (i = 0; i < 4; i++) { 4006 reg = FDI_RX_IIR(pipe); 4007 temp = I915_READ(reg); 4008 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4009 4010 if (temp & FDI_RX_BIT_LOCK || 4011 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 4012 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4013 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 4014 i); 4015 break; 4016 } 4017 udelay(1); /* should be 0.5us */ 4018 } 4019 if (i == 4) { 4020 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 4021 continue; 4022 } 4023 4024 /* Train 2 */ 4025 reg = FDI_TX_CTL(pipe); 4026 temp = I915_READ(reg); 4027 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4028 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 4029 I915_WRITE(reg, temp); 4030 4031 reg = FDI_RX_CTL(pipe); 4032 temp = I915_READ(reg); 4033 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4034 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 4035 I915_WRITE(reg, temp); 4036 4037 POSTING_READ(reg); 4038 udelay(2); /* should be 1.5us */ 4039 4040 for (i = 0; i < 4; i++) { 4041 reg = FDI_RX_IIR(pipe); 4042 temp = I915_READ(reg); 4043 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4044 4045 if (temp & FDI_RX_SYMBOL_LOCK || 4046 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 4047 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4048 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 4049 i); 4050 goto train_done; 4051 } 4052 udelay(2); /* should be 1.5us */ 4053 } 4054 if (i == 4) 4055 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 4056 } 4057 4058train_done: 4059 DRM_DEBUG_KMS("FDI train done.\n"); 4060} 4061 4062static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 4063{ 4064 struct drm_device *dev = intel_crtc->base.dev; 4065 struct drm_i915_private *dev_priv = to_i915(dev); 4066 int pipe = intel_crtc->pipe; 4067 i915_reg_t reg; 4068 u32 temp; 4069 4070 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 4071 reg = FDI_RX_CTL(pipe); 4072 temp = I915_READ(reg); 4073 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 4074 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 4075 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4076 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 4077 4078 POSTING_READ(reg); 4079 udelay(200); 4080 4081 /* Switch from Rawclk to PCDclk */ 4082 temp = I915_READ(reg); 4083 I915_WRITE(reg, temp | FDI_PCDCLK); 4084 4085 POSTING_READ(reg); 4086 udelay(200); 4087 4088 /* Enable CPU FDI TX PLL, always on for Ironlake */ 4089 reg = FDI_TX_CTL(pipe); 4090 temp = I915_READ(reg); 4091 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 4092 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 4093 4094 POSTING_READ(reg); 4095 udelay(100); 4096 } 4097} 4098 4099static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 4100{ 4101 struct drm_device *dev = intel_crtc->base.dev; 4102 struct drm_i915_private *dev_priv = to_i915(dev); 4103 int pipe = intel_crtc->pipe; 4104 i915_reg_t reg; 4105 u32 temp; 4106 4107 /* Switch from PCDclk to Rawclk */ 4108 reg = FDI_RX_CTL(pipe); 4109 temp = I915_READ(reg); 4110 I915_WRITE(reg, temp & ~FDI_PCDCLK); 4111 4112 /* Disable CPU FDI TX PLL */ 4113 reg = FDI_TX_CTL(pipe); 4114 temp = I915_READ(reg); 4115 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 4116 4117 POSTING_READ(reg); 4118 udelay(100); 4119 4120 reg = FDI_RX_CTL(pipe); 4121 temp = I915_READ(reg); 4122 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 4123 4124 /* Wait for the clocks to turn off. */ 4125 POSTING_READ(reg); 4126 udelay(100); 4127} 4128 4129static void ironlake_fdi_disable(struct drm_crtc *crtc) 4130{ 4131 struct drm_device *dev = crtc->dev; 4132 struct drm_i915_private *dev_priv = to_i915(dev); 4133 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4134 int pipe = intel_crtc->pipe; 4135 i915_reg_t reg; 4136 u32 temp; 4137 4138 /* disable CPU FDI tx and PCH FDI rx */ 4139 reg = FDI_TX_CTL(pipe); 4140 temp = I915_READ(reg); 4141 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 4142 POSTING_READ(reg); 4143 4144 reg = FDI_RX_CTL(pipe); 4145 temp = I915_READ(reg); 4146 temp &= ~(0x7 << 16); 4147 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4148 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 4149 4150 POSTING_READ(reg); 4151 udelay(100); 4152 4153 /* Ironlake workaround, disable clock pointer after downing FDI */ 4154 if (HAS_PCH_IBX(dev_priv)) 4155 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 4156 4157 /* still set train pattern 1 */ 4158 reg = FDI_TX_CTL(pipe); 4159 temp = I915_READ(reg); 4160 temp &= ~FDI_LINK_TRAIN_NONE; 4161 temp |= FDI_LINK_TRAIN_PATTERN_1; 4162 I915_WRITE(reg, temp); 4163 4164 reg = FDI_RX_CTL(pipe); 4165 temp = I915_READ(reg); 4166 if (HAS_PCH_CPT(dev_priv)) { 4167 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4168 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4169 } else { 4170 temp &= ~FDI_LINK_TRAIN_NONE; 4171 temp |= FDI_LINK_TRAIN_PATTERN_1; 4172 } 4173 /* BPC in FDI rx is consistent with that in PIPECONF */ 4174 temp &= ~(0x07 << 16); 4175 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4176 I915_WRITE(reg, temp); 4177 4178 POSTING_READ(reg); 4179 udelay(100); 4180} 4181 4182bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 4183{ 4184 struct intel_crtc *crtc; 4185 4186 /* Note that we don't need to be called with mode_config.lock here 4187 * as our list of CRTC objects is static for the lifetime of the 4188 * device and so cannot disappear as we iterate. Similarly, we can 4189 * happily treat the predicates as racy, atomic checks as userspace 4190 * cannot claim and pin a new fb without at least acquring the 4191 * struct_mutex and so serialising with us. 4192 */ 4193 for_each_intel_crtc(&dev_priv->drm, crtc) { 4194 if (atomic_read(&crtc->unpin_work_count) == 0) 4195 continue; 4196 4197 if (crtc->flip_work) 4198 intel_wait_for_vblank(dev_priv, crtc->pipe); 4199 4200 return true; 4201 } 4202 4203 return false; 4204} 4205 4206static void page_flip_completed(struct intel_crtc *intel_crtc) 4207{ 4208 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 4209 struct intel_flip_work *work = intel_crtc->flip_work; 4210 4211 intel_crtc->flip_work = NULL; 4212 4213 if (work->event) 4214 drm_crtc_send_vblank_event(&intel_crtc->base, work->event); 4215 4216 drm_crtc_vblank_put(&intel_crtc->base); 4217 4218 wake_up_all(&dev_priv->pending_flip_queue); 4219 trace_i915_flip_complete(intel_crtc->plane, 4220 work->pending_flip_obj); 4221 4222 queue_work(dev_priv->wq, &work->unpin_work); 4223} 4224 4225static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 4226{ 4227 struct drm_device *dev = crtc->dev; 4228 struct drm_i915_private *dev_priv = to_i915(dev); 4229 long ret; 4230 4231 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 4232 4233 ret = wait_event_interruptible_timeout( 4234 dev_priv->pending_flip_queue, 4235 !intel_crtc_has_pending_flip(crtc), 4236 60*HZ); 4237 4238 if (ret < 0) 4239 return ret; 4240 4241 if (ret == 0) { 4242 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4243 struct intel_flip_work *work; 4244 4245 spin_lock_irq(&dev->event_lock); 4246 work = intel_crtc->flip_work; 4247 if (work && !is_mmio_work(work)) { 4248 WARN_ONCE(1, "Removing stuck page flip\n"); 4249 page_flip_completed(intel_crtc); 4250 } 4251 spin_unlock_irq(&dev->event_lock); 4252 } 4253 4254 return 0; 4255} 4256 4257void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 4258{ 4259 u32 temp; 4260 4261 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 4262 4263 mutex_lock(&dev_priv->sb_lock); 4264 4265 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4266 temp |= SBI_SSCCTL_DISABLE; 4267 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4268 4269 mutex_unlock(&dev_priv->sb_lock); 4270} 4271 4272/* Program iCLKIP clock to the desired frequency */ 4273static void lpt_program_iclkip(struct intel_crtc *crtc) 4274{ 4275 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4276 int clock = crtc->config->base.adjusted_mode.crtc_clock; 4277 u32 divsel, phaseinc, auxdiv, phasedir = 0; 4278 u32 temp; 4279 4280 lpt_disable_iclkip(dev_priv); 4281 4282 /* The iCLK virtual clock root frequency is in MHz, 4283 * but the adjusted_mode->crtc_clock in in KHz. To get the 4284 * divisors, it is necessary to divide one by another, so we 4285 * convert the virtual clock precision to KHz here for higher 4286 * precision. 4287 */ 4288 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 4289 u32 iclk_virtual_root_freq = 172800 * 1000; 4290 u32 iclk_pi_range = 64; 4291 u32 desired_divisor; 4292 4293 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 4294 clock << auxdiv); 4295 divsel = (desired_divisor / iclk_pi_range) - 2; 4296 phaseinc = desired_divisor % iclk_pi_range; 4297 4298 /* 4299 * Near 20MHz is a corner case which is 4300 * out of range for the 7-bit divisor 4301 */ 4302 if (divsel <= 0x7f) 4303 break; 4304 } 4305 4306 /* This should not happen with any sane values */ 4307 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 4308 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 4309 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 4310 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 4311 4312 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 4313 clock, 4314 auxdiv, 4315 divsel, 4316 phasedir, 4317 phaseinc); 4318 4319 mutex_lock(&dev_priv->sb_lock); 4320 4321 /* Program SSCDIVINTPHASE6 */ 4322 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 4323 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 4324 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 4325 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 4326 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 4327 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 4328 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 4329 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 4330 4331 /* Program SSCAUXDIV */ 4332 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 4333 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 4334 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 4335 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 4336 4337 /* Enable modulator and associated divider */ 4338 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4339 temp &= ~SBI_SSCCTL_DISABLE; 4340 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4341 4342 mutex_unlock(&dev_priv->sb_lock); 4343 4344 /* Wait for initialization time */ 4345 udelay(24); 4346 4347 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 4348} 4349 4350int lpt_get_iclkip(struct drm_i915_private *dev_priv) 4351{ 4352 u32 divsel, phaseinc, auxdiv; 4353 u32 iclk_virtual_root_freq = 172800 * 1000; 4354 u32 iclk_pi_range = 64; 4355 u32 desired_divisor; 4356 u32 temp; 4357 4358 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 4359 return 0; 4360 4361 mutex_lock(&dev_priv->sb_lock); 4362 4363 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4364 if (temp & SBI_SSCCTL_DISABLE) { 4365 mutex_unlock(&dev_priv->sb_lock); 4366 return 0; 4367 } 4368 4369 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 4370 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 4371 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 4372 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 4373 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 4374 4375 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 4376 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 4377 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 4378 4379 mutex_unlock(&dev_priv->sb_lock); 4380 4381 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 4382 4383 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 4384 desired_divisor << auxdiv); 4385} 4386 4387static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 4388 enum pipe pch_transcoder) 4389{ 4390 struct drm_device *dev = crtc->base.dev; 4391 struct drm_i915_private *dev_priv = to_i915(dev); 4392 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 4393 4394 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 4395 I915_READ(HTOTAL(cpu_transcoder))); 4396 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 4397 I915_READ(HBLANK(cpu_transcoder))); 4398 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 4399 I915_READ(HSYNC(cpu_transcoder))); 4400 4401 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 4402 I915_READ(VTOTAL(cpu_transcoder))); 4403 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 4404 I915_READ(VBLANK(cpu_transcoder))); 4405 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 4406 I915_READ(VSYNC(cpu_transcoder))); 4407 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 4408 I915_READ(VSYNCSHIFT(cpu_transcoder))); 4409} 4410 4411static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 4412{ 4413 struct drm_i915_private *dev_priv = to_i915(dev); 4414 uint32_t temp; 4415 4416 temp = I915_READ(SOUTH_CHICKEN1); 4417 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 4418 return; 4419 4420 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 4421 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 4422 4423 temp &= ~FDI_BC_BIFURCATION_SELECT; 4424 if (enable) 4425 temp |= FDI_BC_BIFURCATION_SELECT; 4426 4427 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 4428 I915_WRITE(SOUTH_CHICKEN1, temp); 4429 POSTING_READ(SOUTH_CHICKEN1); 4430} 4431 4432static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 4433{ 4434 struct drm_device *dev = intel_crtc->base.dev; 4435 4436 switch (intel_crtc->pipe) { 4437 case PIPE_A: 4438 break; 4439 case PIPE_B: 4440 if (intel_crtc->config->fdi_lanes > 2) 4441 cpt_set_fdi_bc_bifurcation(dev, false); 4442 else 4443 cpt_set_fdi_bc_bifurcation(dev, true); 4444 4445 break; 4446 case PIPE_C: 4447 cpt_set_fdi_bc_bifurcation(dev, true); 4448 4449 break; 4450 default: 4451 BUG(); 4452 } 4453} 4454 4455/* Return which DP Port should be selected for Transcoder DP control */ 4456static enum port 4457intel_trans_dp_port_sel(struct intel_crtc *crtc) 4458{ 4459 struct drm_device *dev = crtc->base.dev; 4460 struct intel_encoder *encoder; 4461 4462 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 4463 if (encoder->type == INTEL_OUTPUT_DP || 4464 encoder->type == INTEL_OUTPUT_EDP) 4465 return enc_to_dig_port(&encoder->base)->port; 4466 } 4467 4468 return -1; 4469} 4470 4471/* 4472 * Enable PCH resources required for PCH ports: 4473 * - PCH PLLs 4474 * - FDI training & RX/TX 4475 * - update transcoder timings 4476 * - DP transcoding bits 4477 * - transcoder 4478 */ 4479static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state) 4480{ 4481 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 4482 struct drm_device *dev = crtc->base.dev; 4483 struct drm_i915_private *dev_priv = to_i915(dev); 4484 int pipe = crtc->pipe; 4485 u32 temp; 4486 4487 assert_pch_transcoder_disabled(dev_priv, pipe); 4488 4489 if (IS_IVYBRIDGE(dev_priv)) 4490 ivybridge_update_fdi_bc_bifurcation(crtc); 4491 4492 /* Write the TU size bits before fdi link training, so that error 4493 * detection works. */ 4494 I915_WRITE(FDI_RX_TUSIZE1(pipe), 4495 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 4496 4497 /* For PCH output, training FDI link */ 4498 dev_priv->display.fdi_link_train(crtc, crtc_state); 4499 4500 /* We need to program the right clock selection before writing the pixel 4501 * mutliplier into the DPLL. */ 4502 if (HAS_PCH_CPT(dev_priv)) { 4503 u32 sel; 4504 4505 temp = I915_READ(PCH_DPLL_SEL); 4506 temp |= TRANS_DPLL_ENABLE(pipe); 4507 sel = TRANS_DPLLB_SEL(pipe); 4508 if (crtc_state->shared_dpll == 4509 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 4510 temp |= sel; 4511 else 4512 temp &= ~sel; 4513 I915_WRITE(PCH_DPLL_SEL, temp); 4514 } 4515 4516 /* XXX: pch pll's can be enabled any time before we enable the PCH 4517 * transcoder, and we actually should do this to not upset any PCH 4518 * transcoder that already use the clock when we share it. 4519 * 4520 * Note that enable_shared_dpll tries to do the right thing, but 4521 * get_shared_dpll unconditionally resets the pll - we need that to have 4522 * the right LVDS enable sequence. */ 4523 intel_enable_shared_dpll(crtc); 4524 4525 /* set transcoder timing, panel must allow it */ 4526 assert_panel_unlocked(dev_priv, pipe); 4527 ironlake_pch_transcoder_set_timings(crtc, pipe); 4528 4529 intel_fdi_normal_train(crtc); 4530 4531 /* For PCH DP, enable TRANS_DP_CTL */ 4532 if (HAS_PCH_CPT(dev_priv) && 4533 intel_crtc_has_dp_encoder(crtc_state)) { 4534 const struct drm_display_mode *adjusted_mode = 4535 &crtc_state->base.adjusted_mode; 4536 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4537 i915_reg_t reg = TRANS_DP_CTL(pipe); 4538 temp = I915_READ(reg); 4539 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4540 TRANS_DP_SYNC_MASK | 4541 TRANS_DP_BPC_MASK); 4542 temp |= TRANS_DP_OUTPUT_ENABLE; 4543 temp |= bpc << 9; /* same format but at 11:9 */ 4544 4545 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 4546 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4547 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 4548 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4549 4550 switch (intel_trans_dp_port_sel(crtc)) { 4551 case PORT_B: 4552 temp |= TRANS_DP_PORT_SEL_B; 4553 break; 4554 case PORT_C: 4555 temp |= TRANS_DP_PORT_SEL_C; 4556 break; 4557 case PORT_D: 4558 temp |= TRANS_DP_PORT_SEL_D; 4559 break; 4560 default: 4561 BUG(); 4562 } 4563 4564 I915_WRITE(reg, temp); 4565 } 4566 4567 ironlake_enable_pch_transcoder(dev_priv, pipe); 4568} 4569 4570static void lpt_pch_enable(const struct intel_crtc_state *crtc_state) 4571{ 4572 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 4573 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4574 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 4575 4576 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); 4577 4578 lpt_program_iclkip(crtc); 4579 4580 /* Set transcoder timing. */ 4581 ironlake_pch_transcoder_set_timings(crtc, PIPE_A); 4582 4583 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4584} 4585 4586static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4587{ 4588 struct drm_i915_private *dev_priv = to_i915(dev); 4589 i915_reg_t dslreg = PIPEDSL(pipe); 4590 u32 temp; 4591 4592 temp = I915_READ(dslreg); 4593 udelay(500); 4594 if (wait_for(I915_READ(dslreg) != temp, 5)) { 4595 if (wait_for(I915_READ(dslreg) != temp, 5)) 4596 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 4597 } 4598} 4599 4600static int 4601skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4602 unsigned int scaler_user, int *scaler_id, 4603 int src_w, int src_h, int dst_w, int dst_h) 4604{ 4605 struct intel_crtc_scaler_state *scaler_state = 4606 &crtc_state->scaler_state; 4607 struct intel_crtc *intel_crtc = 4608 to_intel_crtc(crtc_state->base.crtc); 4609 int need_scaling; 4610 4611 /* 4612 * Src coordinates are already rotated by 270 degrees for 4613 * the 90/270 degree plane rotation cases (to match the 4614 * GTT mapping), hence no need to account for rotation here. 4615 */ 4616 need_scaling = src_w != dst_w || src_h != dst_h; 4617 4618 /* 4619 * if plane is being disabled or scaler is no more required or force detach 4620 * - free scaler binded to this plane/crtc 4621 * - in order to do this, update crtc->scaler_usage 4622 * 4623 * Here scaler state in crtc_state is set free so that 4624 * scaler can be assigned to other user. Actual register 4625 * update to free the scaler is done in plane/panel-fit programming. 4626 * For this purpose crtc/plane_state->scaler_id isn't reset here. 4627 */ 4628 if (force_detach || !need_scaling) { 4629 if (*scaler_id >= 0) { 4630 scaler_state->scaler_users &= ~(1 << scaler_user); 4631 scaler_state->scalers[*scaler_id].in_use = 0; 4632 4633 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4634 "Staged freeing scaler id %d scaler_users = 0x%x\n", 4635 intel_crtc->pipe, scaler_user, *scaler_id, 4636 scaler_state->scaler_users); 4637 *scaler_id = -1; 4638 } 4639 return 0; 4640 } 4641 4642 /* range checks */ 4643 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 4644 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 4645 4646 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 4647 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) { 4648 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 4649 "size is out of scaler range\n", 4650 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 4651 return -EINVAL; 4652 } 4653 4654 /* mark this plane as a scaler user in crtc_state */ 4655 scaler_state->scaler_users |= (1 << scaler_user); 4656 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4657 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 4658 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 4659 scaler_state->scaler_users); 4660 4661 return 0; 4662} 4663 4664/** 4665 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 4666 * 4667 * @state: crtc's scaler state 4668 * 4669 * Return 4670 * 0 - scaler_usage updated successfully 4671 * error - requested scaling cannot be supported or other error condition 4672 */ 4673int skl_update_scaler_crtc(struct intel_crtc_state *state) 4674{ 4675 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4676 4677 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4678 &state->scaler_state.scaler_id, 4679 state->pipe_src_w, state->pipe_src_h, 4680 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); 4681} 4682 4683/** 4684 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 4685 * 4686 * @state: crtc's scaler state 4687 * @plane_state: atomic plane state to update 4688 * 4689 * Return 4690 * 0 - scaler_usage updated successfully 4691 * error - requested scaling cannot be supported or other error condition 4692 */ 4693static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 4694 struct intel_plane_state *plane_state) 4695{ 4696 4697 struct intel_plane *intel_plane = 4698 to_intel_plane(plane_state->base.plane); 4699 struct drm_framebuffer *fb = plane_state->base.fb; 4700 int ret; 4701 4702 bool force_detach = !fb || !plane_state->base.visible; 4703 4704 ret = skl_update_scaler(crtc_state, force_detach, 4705 drm_plane_index(&intel_plane->base), 4706 &plane_state->scaler_id, 4707 drm_rect_width(&plane_state->base.src) >> 16, 4708 drm_rect_height(&plane_state->base.src) >> 16, 4709 drm_rect_width(&plane_state->base.dst), 4710 drm_rect_height(&plane_state->base.dst)); 4711 4712 if (ret || plane_state->scaler_id < 0) 4713 return ret; 4714 4715 /* check colorkey */ 4716 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { 4717 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", 4718 intel_plane->base.base.id, 4719 intel_plane->base.name); 4720 return -EINVAL; 4721 } 4722 4723 /* Check src format */ 4724 switch (fb->format->format) { 4725 case DRM_FORMAT_RGB565: 4726 case DRM_FORMAT_XBGR8888: 4727 case DRM_FORMAT_XRGB8888: 4728 case DRM_FORMAT_ABGR8888: 4729 case DRM_FORMAT_ARGB8888: 4730 case DRM_FORMAT_XRGB2101010: 4731 case DRM_FORMAT_XBGR2101010: 4732 case DRM_FORMAT_YUYV: 4733 case DRM_FORMAT_YVYU: 4734 case DRM_FORMAT_UYVY: 4735 case DRM_FORMAT_VYUY: 4736 break; 4737 default: 4738 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 4739 intel_plane->base.base.id, intel_plane->base.name, 4740 fb->base.id, fb->format->format); 4741 return -EINVAL; 4742 } 4743 4744 return 0; 4745} 4746 4747static void skylake_scaler_disable(struct intel_crtc *crtc) 4748{ 4749 int i; 4750 4751 for (i = 0; i < crtc->num_scalers; i++) 4752 skl_detach_scaler(crtc, i); 4753} 4754 4755static void skylake_pfit_enable(struct intel_crtc *crtc) 4756{ 4757 struct drm_device *dev = crtc->base.dev; 4758 struct drm_i915_private *dev_priv = to_i915(dev); 4759 int pipe = crtc->pipe; 4760 struct intel_crtc_scaler_state *scaler_state = 4761 &crtc->config->scaler_state; 4762 4763 if (crtc->config->pch_pfit.enabled) { 4764 int id; 4765 4766 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) 4767 return; 4768 4769 id = scaler_state->scaler_id; 4770 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4771 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4772 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4773 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4774 } 4775} 4776 4777static void ironlake_pfit_enable(struct intel_crtc *crtc) 4778{ 4779 struct drm_device *dev = crtc->base.dev; 4780 struct drm_i915_private *dev_priv = to_i915(dev); 4781 int pipe = crtc->pipe; 4782 4783 if (crtc->config->pch_pfit.enabled) { 4784 /* Force use of hard-coded filter coefficients 4785 * as some pre-programmed values are broken, 4786 * e.g. x201. 4787 */ 4788 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 4789 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 4790 PF_PIPE_SEL_IVB(pipe)); 4791 else 4792 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 4793 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4794 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4795 } 4796} 4797 4798void hsw_enable_ips(struct intel_crtc *crtc) 4799{ 4800 struct drm_device *dev = crtc->base.dev; 4801 struct drm_i915_private *dev_priv = to_i915(dev); 4802 4803 if (!crtc->config->ips_enabled) 4804 return; 4805 4806 /* 4807 * We can only enable IPS after we enable a plane and wait for a vblank 4808 * This function is called from post_plane_update, which is run after 4809 * a vblank wait. 4810 */ 4811 4812 assert_plane_enabled(dev_priv, crtc->plane); 4813 if (IS_BROADWELL(dev_priv)) { 4814 mutex_lock(&dev_priv->rps.hw_lock); 4815 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 4816 mutex_unlock(&dev_priv->rps.hw_lock); 4817 /* Quoting Art Runyan: "its not safe to expect any particular 4818 * value in IPS_CTL bit 31 after enabling IPS through the 4819 * mailbox." Moreover, the mailbox may return a bogus state, 4820 * so we need to just enable it and continue on. 4821 */ 4822 } else { 4823 I915_WRITE(IPS_CTL, IPS_ENABLE); 4824 /* The bit only becomes 1 in the next vblank, so this wait here 4825 * is essentially intel_wait_for_vblank. If we don't have this 4826 * and don't wait for vblanks until the end of crtc_enable, then 4827 * the HW state readout code will complain that the expected 4828 * IPS_CTL value is not the one we read. */ 4829 if (intel_wait_for_register(dev_priv, 4830 IPS_CTL, IPS_ENABLE, IPS_ENABLE, 4831 50)) 4832 DRM_ERROR("Timed out waiting for IPS enable\n"); 4833 } 4834} 4835 4836void hsw_disable_ips(struct intel_crtc *crtc) 4837{ 4838 struct drm_device *dev = crtc->base.dev; 4839 struct drm_i915_private *dev_priv = to_i915(dev); 4840 4841 if (!crtc->config->ips_enabled) 4842 return; 4843 4844 assert_plane_enabled(dev_priv, crtc->plane); 4845 if (IS_BROADWELL(dev_priv)) { 4846 mutex_lock(&dev_priv->rps.hw_lock); 4847 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4848 mutex_unlock(&dev_priv->rps.hw_lock); 4849 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4850 if (intel_wait_for_register(dev_priv, 4851 IPS_CTL, IPS_ENABLE, 0, 4852 42)) 4853 DRM_ERROR("Timed out waiting for IPS disable\n"); 4854 } else { 4855 I915_WRITE(IPS_CTL, 0); 4856 POSTING_READ(IPS_CTL); 4857 } 4858 4859 /* We need to wait for a vblank before we can disable the plane. */ 4860 intel_wait_for_vblank(dev_priv, crtc->pipe); 4861} 4862 4863static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 4864{ 4865 if (intel_crtc->overlay) { 4866 struct drm_device *dev = intel_crtc->base.dev; 4867 struct drm_i915_private *dev_priv = to_i915(dev); 4868 4869 mutex_lock(&dev->struct_mutex); 4870 dev_priv->mm.interruptible = false; 4871 (void) intel_overlay_switch_off(intel_crtc->overlay); 4872 dev_priv->mm.interruptible = true; 4873 mutex_unlock(&dev->struct_mutex); 4874 } 4875 4876 /* Let userspace switch the overlay on again. In most cases userspace 4877 * has to recompute where to put it anyway. 4878 */ 4879} 4880 4881/** 4882 * intel_post_enable_primary - Perform operations after enabling primary plane 4883 * @crtc: the CRTC whose primary plane was just enabled 4884 * 4885 * Performs potentially sleeping operations that must be done after the primary 4886 * plane is enabled, such as updating FBC and IPS. Note that this may be 4887 * called due to an explicit primary plane update, or due to an implicit 4888 * re-enable that is caused when a sprite plane is updated to no longer 4889 * completely hide the primary plane. 4890 */ 4891static void 4892intel_post_enable_primary(struct drm_crtc *crtc) 4893{ 4894 struct drm_device *dev = crtc->dev; 4895 struct drm_i915_private *dev_priv = to_i915(dev); 4896 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4897 int pipe = intel_crtc->pipe; 4898 4899 /* 4900 * FIXME IPS should be fine as long as one plane is 4901 * enabled, but in practice it seems to have problems 4902 * when going from primary only to sprite only and vice 4903 * versa. 4904 */ 4905 hsw_enable_ips(intel_crtc); 4906 4907 /* 4908 * Gen2 reports pipe underruns whenever all planes are disabled. 4909 * So don't enable underrun reporting before at least some planes 4910 * are enabled. 4911 * FIXME: Need to fix the logic to work when we turn off all planes 4912 * but leave the pipe running. 4913 */ 4914 if (IS_GEN2(dev_priv)) 4915 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4916 4917 /* Underruns don't always raise interrupts, so check manually. */ 4918 intel_check_cpu_fifo_underruns(dev_priv); 4919 intel_check_pch_fifo_underruns(dev_priv); 4920} 4921 4922/* FIXME move all this to pre_plane_update() with proper state tracking */ 4923static void 4924intel_pre_disable_primary(struct drm_crtc *crtc) 4925{ 4926 struct drm_device *dev = crtc->dev; 4927 struct drm_i915_private *dev_priv = to_i915(dev); 4928 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4929 int pipe = intel_crtc->pipe; 4930 4931 /* 4932 * Gen2 reports pipe underruns whenever all planes are disabled. 4933 * So diasble underrun reporting before all the planes get disabled. 4934 * FIXME: Need to fix the logic to work when we turn off all planes 4935 * but leave the pipe running. 4936 */ 4937 if (IS_GEN2(dev_priv)) 4938 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4939 4940 /* 4941 * FIXME IPS should be fine as long as one plane is 4942 * enabled, but in practice it seems to have problems 4943 * when going from primary only to sprite only and vice 4944 * versa. 4945 */ 4946 hsw_disable_ips(intel_crtc); 4947} 4948 4949/* FIXME get rid of this and use pre_plane_update */ 4950static void 4951intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) 4952{ 4953 struct drm_device *dev = crtc->dev; 4954 struct drm_i915_private *dev_priv = to_i915(dev); 4955 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4956 int pipe = intel_crtc->pipe; 4957 4958 intel_pre_disable_primary(crtc); 4959 4960 /* 4961 * Vblank time updates from the shadow to live plane control register 4962 * are blocked if the memory self-refresh mode is active at that 4963 * moment. So to make sure the plane gets truly disabled, disable 4964 * first the self-refresh mode. The self-refresh enable bit in turn 4965 * will be checked/applied by the HW only at the next frame start 4966 * event which is after the vblank start event, so we need to have a 4967 * wait-for-vblank between disabling the plane and the pipe. 4968 */ 4969 if (HAS_GMCH_DISPLAY(dev_priv) && 4970 intel_set_memory_cxsr(dev_priv, false)) 4971 intel_wait_for_vblank(dev_priv, pipe); 4972} 4973 4974static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) 4975{ 4976 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4977 struct drm_atomic_state *old_state = old_crtc_state->base.state; 4978 struct intel_crtc_state *pipe_config = 4979 to_intel_crtc_state(crtc->base.state); 4980 struct drm_plane *primary = crtc->base.primary; 4981 struct drm_plane_state *old_pri_state = 4982 drm_atomic_get_existing_plane_state(old_state, primary); 4983 4984 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); 4985 4986 if (pipe_config->update_wm_post && pipe_config->base.active) 4987 intel_update_watermarks(crtc); 4988 4989 if (old_pri_state) { 4990 struct intel_plane_state *primary_state = 4991 to_intel_plane_state(primary->state); 4992 struct intel_plane_state *old_primary_state = 4993 to_intel_plane_state(old_pri_state); 4994 4995 intel_fbc_post_update(crtc); 4996 4997 if (primary_state->base.visible && 4998 (needs_modeset(&pipe_config->base) || 4999 !old_primary_state->base.visible)) 5000 intel_post_enable_primary(&crtc->base); 5001 } 5002} 5003 5004static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, 5005 struct intel_crtc_state *pipe_config) 5006{ 5007 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 5008 struct drm_device *dev = crtc->base.dev; 5009 struct drm_i915_private *dev_priv = to_i915(dev); 5010 struct drm_atomic_state *old_state = old_crtc_state->base.state; 5011 struct drm_plane *primary = crtc->base.primary; 5012 struct drm_plane_state *old_pri_state = 5013 drm_atomic_get_existing_plane_state(old_state, primary); 5014 bool modeset = needs_modeset(&pipe_config->base); 5015 struct intel_atomic_state *old_intel_state = 5016 to_intel_atomic_state(old_state); 5017 5018 if (old_pri_state) { 5019 struct intel_plane_state *primary_state = 5020 to_intel_plane_state(primary->state); 5021 struct intel_plane_state *old_primary_state = 5022 to_intel_plane_state(old_pri_state); 5023 5024 intel_fbc_pre_update(crtc, pipe_config, primary_state); 5025 5026 if (old_primary_state->base.visible && 5027 (modeset || !primary_state->base.visible)) 5028 intel_pre_disable_primary(&crtc->base); 5029 } 5030 5031 /* 5032 * Vblank time updates from the shadow to live plane control register 5033 * are blocked if the memory self-refresh mode is active at that 5034 * moment. So to make sure the plane gets truly disabled, disable 5035 * first the self-refresh mode. The self-refresh enable bit in turn 5036 * will be checked/applied by the HW only at the next frame start 5037 * event which is after the vblank start event, so we need to have a 5038 * wait-for-vblank between disabling the plane and the pipe. 5039 */ 5040 if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active && 5041 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 5042 intel_wait_for_vblank(dev_priv, crtc->pipe); 5043 5044 /* 5045 * IVB workaround: must disable low power watermarks for at least 5046 * one frame before enabling scaling. LP watermarks can be re-enabled 5047 * when scaling is disabled. 5048 * 5049 * WaCxSRDisabledForSpriteScaling:ivb 5050 */ 5051 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev)) 5052 intel_wait_for_vblank(dev_priv, crtc->pipe); 5053 5054 /* 5055 * If we're doing a modeset, we're done. No need to do any pre-vblank 5056 * watermark programming here. 5057 */ 5058 if (needs_modeset(&pipe_config->base)) 5059 return; 5060 5061 /* 5062 * For platforms that support atomic watermarks, program the 5063 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 5064 * will be the intermediate values that are safe for both pre- and 5065 * post- vblank; when vblank happens, the 'active' values will be set 5066 * to the final 'target' values and we'll do this again to get the 5067 * optimal watermarks. For gen9+ platforms, the values we program here 5068 * will be the final target values which will get automatically latched 5069 * at vblank time; no further programming will be necessary. 5070 * 5071 * If a platform hasn't been transitioned to atomic watermarks yet, 5072 * we'll continue to update watermarks the old way, if flags tell 5073 * us to. 5074 */ 5075 if (dev_priv->display.initial_watermarks != NULL) 5076 dev_priv->display.initial_watermarks(old_intel_state, 5077 pipe_config); 5078 else if (pipe_config->update_wm_pre) 5079 intel_update_watermarks(crtc); 5080} 5081 5082static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) 5083{ 5084 struct drm_device *dev = crtc->dev; 5085 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5086 struct drm_plane *p; 5087 int pipe = intel_crtc->pipe; 5088 5089 intel_crtc_dpms_overlay_disable(intel_crtc); 5090 5091 drm_for_each_plane_mask(p, dev, plane_mask) 5092 to_intel_plane(p)->disable_plane(p, crtc); 5093 5094 /* 5095 * FIXME: Once we grow proper nuclear flip support out of this we need 5096 * to compute the mask of flip planes precisely. For the time being 5097 * consider this a flip to a NULL plane. 5098 */ 5099 intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe)); 5100} 5101 5102static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, 5103 struct intel_crtc_state *crtc_state, 5104 struct drm_atomic_state *old_state) 5105{ 5106 struct drm_connector_state *conn_state; 5107 struct drm_connector *conn; 5108 int i; 5109 5110 for_each_new_connector_in_state(old_state, conn, conn_state, i) { 5111 struct intel_encoder *encoder = 5112 to_intel_encoder(conn_state->best_encoder); 5113 5114 if (conn_state->crtc != crtc) 5115 continue; 5116 5117 if (encoder->pre_pll_enable) 5118 encoder->pre_pll_enable(encoder, crtc_state, conn_state); 5119 } 5120} 5121 5122static void intel_encoders_pre_enable(struct drm_crtc *crtc, 5123 struct intel_crtc_state *crtc_state, 5124 struct drm_atomic_state *old_state) 5125{ 5126 struct drm_connector_state *conn_state; 5127 struct drm_connector *conn; 5128 int i; 5129 5130 for_each_new_connector_in_state(old_state, conn, conn_state, i) { 5131 struct intel_encoder *encoder = 5132 to_intel_encoder(conn_state->best_encoder); 5133 5134 if (conn_state->crtc != crtc) 5135 continue; 5136 5137 if (encoder->pre_enable) 5138 encoder->pre_enable(encoder, crtc_state, conn_state); 5139 } 5140} 5141 5142static void intel_encoders_enable(struct drm_crtc *crtc, 5143 struct intel_crtc_state *crtc_state, 5144 struct drm_atomic_state *old_state) 5145{ 5146 struct drm_connector_state *conn_state; 5147 struct drm_connector *conn; 5148 int i; 5149 5150 for_each_new_connector_in_state(old_state, conn, conn_state, i) { 5151 struct intel_encoder *encoder = 5152 to_intel_encoder(conn_state->best_encoder); 5153 5154 if (conn_state->crtc != crtc) 5155 continue; 5156 5157 encoder->enable(encoder, crtc_state, conn_state); 5158 intel_opregion_notify_encoder(encoder, true); 5159 } 5160} 5161 5162static void intel_encoders_disable(struct drm_crtc *crtc, 5163 struct intel_crtc_state *old_crtc_state, 5164 struct drm_atomic_state *old_state) 5165{ 5166 struct drm_connector_state *old_conn_state; 5167 struct drm_connector *conn; 5168 int i; 5169 5170 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { 5171 struct intel_encoder *encoder = 5172 to_intel_encoder(old_conn_state->best_encoder); 5173 5174 if (old_conn_state->crtc != crtc) 5175 continue; 5176 5177 intel_opregion_notify_encoder(encoder, false); 5178 encoder->disable(encoder, old_crtc_state, old_conn_state); 5179 } 5180} 5181 5182static void intel_encoders_post_disable(struct drm_crtc *crtc, 5183 struct intel_crtc_state *old_crtc_state, 5184 struct drm_atomic_state *old_state) 5185{ 5186 struct drm_connector_state *old_conn_state; 5187 struct drm_connector *conn; 5188 int i; 5189 5190 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { 5191 struct intel_encoder *encoder = 5192 to_intel_encoder(old_conn_state->best_encoder); 5193 5194 if (old_conn_state->crtc != crtc) 5195 continue; 5196 5197 if (encoder->post_disable) 5198 encoder->post_disable(encoder, old_crtc_state, old_conn_state); 5199 } 5200} 5201 5202static void intel_encoders_post_pll_disable(struct drm_crtc *crtc, 5203 struct intel_crtc_state *old_crtc_state, 5204 struct drm_atomic_state *old_state) 5205{ 5206 struct drm_connector_state *old_conn_state; 5207 struct drm_connector *conn; 5208 int i; 5209 5210 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { 5211 struct intel_encoder *encoder = 5212 to_intel_encoder(old_conn_state->best_encoder); 5213 5214 if (old_conn_state->crtc != crtc) 5215 continue; 5216 5217 if (encoder->post_pll_disable) 5218 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state); 5219 } 5220} 5221 5222static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, 5223 struct drm_atomic_state *old_state) 5224{ 5225 struct drm_crtc *crtc = pipe_config->base.crtc; 5226 struct drm_device *dev = crtc->dev; 5227 struct drm_i915_private *dev_priv = to_i915(dev); 5228 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5229 int pipe = intel_crtc->pipe; 5230 struct intel_atomic_state *old_intel_state = 5231 to_intel_atomic_state(old_state); 5232 5233 if (WARN_ON(intel_crtc->active)) 5234 return; 5235 5236 /* 5237 * Sometimes spurious CPU pipe underruns happen during FDI 5238 * training, at least with VGA+HDMI cloning. Suppress them. 5239 * 5240 * On ILK we get an occasional spurious CPU pipe underruns 5241 * between eDP port A enable and vdd enable. Also PCH port 5242 * enable seems to result in the occasional CPU pipe underrun. 5243 * 5244 * Spurious PCH underruns also occur during PCH enabling. 5245 */ 5246 if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv)) 5247 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5248 if (intel_crtc->config->has_pch_encoder) 5249 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5250 5251 if (intel_crtc->config->has_pch_encoder) 5252 intel_prepare_shared_dpll(intel_crtc); 5253 5254 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5255 intel_dp_set_m_n(intel_crtc, M1_N1); 5256 5257 intel_set_pipe_timings(intel_crtc); 5258 intel_set_pipe_src_size(intel_crtc); 5259 5260 if (intel_crtc->config->has_pch_encoder) { 5261 intel_cpu_transcoder_set_m_n(intel_crtc, 5262 &intel_crtc->config->fdi_m_n, NULL); 5263 } 5264 5265 ironlake_set_pipeconf(crtc); 5266 5267 intel_crtc->active = true; 5268 5269 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5270 5271 if (intel_crtc->config->has_pch_encoder) { 5272 /* Note: FDI PLL enabling _must_ be done before we enable the 5273 * cpu pipes, hence this is separate from all the other fdi/pch 5274 * enabling. */ 5275 ironlake_fdi_pll_enable(intel_crtc); 5276 } else { 5277 assert_fdi_tx_disabled(dev_priv, pipe); 5278 assert_fdi_rx_disabled(dev_priv, pipe); 5279 } 5280 5281 ironlake_pfit_enable(intel_crtc); 5282 5283 /* 5284 * On ILK+ LUT must be loaded before the pipe is running but with 5285 * clocks enabled 5286 */ 5287 intel_color_load_luts(&pipe_config->base); 5288 5289 if (dev_priv->display.initial_watermarks != NULL) 5290 dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config); 5291 intel_enable_pipe(intel_crtc); 5292 5293 if (intel_crtc->config->has_pch_encoder) 5294 ironlake_pch_enable(pipe_config); 5295 5296 assert_vblank_disabled(crtc); 5297 drm_crtc_vblank_on(crtc); 5298 5299 intel_encoders_enable(crtc, pipe_config, old_state); 5300 5301 if (HAS_PCH_CPT(dev_priv)) 5302 cpt_verify_modeset(dev, intel_crtc->pipe); 5303 5304 /* Must wait for vblank to avoid spurious PCH FIFO underruns */ 5305 if (intel_crtc->config->has_pch_encoder) 5306 intel_wait_for_vblank(dev_priv, pipe); 5307 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5308 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5309} 5310 5311/* IPS only exists on ULT machines and is tied to pipe A. */ 5312static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 5313{ 5314 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 5315} 5316 5317static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, 5318 struct drm_atomic_state *old_state) 5319{ 5320 struct drm_crtc *crtc = pipe_config->base.crtc; 5321 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5322 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5323 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 5324 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5325 struct intel_atomic_state *old_intel_state = 5326 to_intel_atomic_state(old_state); 5327 5328 if (WARN_ON(intel_crtc->active)) 5329 return; 5330 5331 if (intel_crtc->config->has_pch_encoder) 5332 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5333 false); 5334 5335 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); 5336 5337 if (intel_crtc->config->shared_dpll) 5338 intel_enable_shared_dpll(intel_crtc); 5339 5340 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5341 intel_dp_set_m_n(intel_crtc, M1_N1); 5342 5343 if (!transcoder_is_dsi(cpu_transcoder)) 5344 intel_set_pipe_timings(intel_crtc); 5345 5346 intel_set_pipe_src_size(intel_crtc); 5347 5348 if (cpu_transcoder != TRANSCODER_EDP && 5349 !transcoder_is_dsi(cpu_transcoder)) { 5350 I915_WRITE(PIPE_MULT(cpu_transcoder), 5351 intel_crtc->config->pixel_multiplier - 1); 5352 } 5353 5354 if (intel_crtc->config->has_pch_encoder) { 5355 intel_cpu_transcoder_set_m_n(intel_crtc, 5356 &intel_crtc->config->fdi_m_n, NULL); 5357 } 5358 5359 if (!transcoder_is_dsi(cpu_transcoder)) 5360 haswell_set_pipeconf(crtc); 5361 5362 haswell_set_pipemisc(crtc); 5363 5364 intel_color_set_csc(&pipe_config->base); 5365 5366 intel_crtc->active = true; 5367 5368 if (intel_crtc->config->has_pch_encoder) 5369 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5370 else 5371 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5372 5373 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5374 5375 if (intel_crtc->config->has_pch_encoder) 5376 dev_priv->display.fdi_link_train(intel_crtc, pipe_config); 5377 5378 if (!transcoder_is_dsi(cpu_transcoder)) 5379 intel_ddi_enable_pipe_clock(pipe_config); 5380 5381 if (INTEL_GEN(dev_priv) >= 9) 5382 skylake_pfit_enable(intel_crtc); 5383 else 5384 ironlake_pfit_enable(intel_crtc); 5385 5386 /* 5387 * On ILK+ LUT must be loaded before the pipe is running but with 5388 * clocks enabled 5389 */ 5390 intel_color_load_luts(&pipe_config->base); 5391 5392 intel_ddi_set_pipe_settings(pipe_config); 5393 if (!transcoder_is_dsi(cpu_transcoder)) 5394 intel_ddi_enable_transcoder_func(pipe_config); 5395 5396 if (dev_priv->display.initial_watermarks != NULL) 5397 dev_priv->display.initial_watermarks(old_intel_state, pipe_config); 5398 5399 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5400 if (!transcoder_is_dsi(cpu_transcoder)) 5401 intel_enable_pipe(intel_crtc); 5402 5403 if (intel_crtc->config->has_pch_encoder) 5404 lpt_pch_enable(pipe_config); 5405 5406 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) 5407 intel_ddi_set_vc_payload_alloc(pipe_config, true); 5408 5409 assert_vblank_disabled(crtc); 5410 drm_crtc_vblank_on(crtc); 5411 5412 intel_encoders_enable(crtc, pipe_config, old_state); 5413 5414 if (intel_crtc->config->has_pch_encoder) { 5415 intel_wait_for_vblank(dev_priv, pipe); 5416 intel_wait_for_vblank(dev_priv, pipe); 5417 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5418 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5419 true); 5420 } 5421 5422 /* If we change the relative order between pipe/planes enabling, we need 5423 * to change the workaround. */ 5424 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 5425 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 5426 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 5427 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 5428 } 5429} 5430 5431static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) 5432{ 5433 struct drm_device *dev = crtc->base.dev; 5434 struct drm_i915_private *dev_priv = to_i915(dev); 5435 int pipe = crtc->pipe; 5436 5437 /* To avoid upsetting the power well on haswell only disable the pfit if 5438 * it's in use. The hw state code will make sure we get this right. */ 5439 if (force || crtc->config->pch_pfit.enabled) { 5440 I915_WRITE(PF_CTL(pipe), 0); 5441 I915_WRITE(PF_WIN_POS(pipe), 0); 5442 I915_WRITE(PF_WIN_SZ(pipe), 0); 5443 } 5444} 5445 5446static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, 5447 struct drm_atomic_state *old_state) 5448{ 5449 struct drm_crtc *crtc = old_crtc_state->base.crtc; 5450 struct drm_device *dev = crtc->dev; 5451 struct drm_i915_private *dev_priv = to_i915(dev); 5452 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5453 int pipe = intel_crtc->pipe; 5454 5455 /* 5456 * Sometimes spurious CPU pipe underruns happen when the 5457 * pipe is already disabled, but FDI RX/TX is still enabled. 5458 * Happens at least with VGA+HDMI cloning. Suppress them. 5459 */ 5460 if (intel_crtc->config->has_pch_encoder) { 5461 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5462 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5463 } 5464 5465 intel_encoders_disable(crtc, old_crtc_state, old_state); 5466 5467 drm_crtc_vblank_off(crtc); 5468 assert_vblank_disabled(crtc); 5469 5470 intel_disable_pipe(intel_crtc); 5471 5472 ironlake_pfit_disable(intel_crtc, false); 5473 5474 if (intel_crtc->config->has_pch_encoder) 5475 ironlake_fdi_disable(crtc); 5476 5477 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5478 5479 if (intel_crtc->config->has_pch_encoder) { 5480 ironlake_disable_pch_transcoder(dev_priv, pipe); 5481 5482 if (HAS_PCH_CPT(dev_priv)) { 5483 i915_reg_t reg; 5484 u32 temp; 5485 5486 /* disable TRANS_DP_CTL */ 5487 reg = TRANS_DP_CTL(pipe); 5488 temp = I915_READ(reg); 5489 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 5490 TRANS_DP_PORT_SEL_MASK); 5491 temp |= TRANS_DP_PORT_SEL_NONE; 5492 I915_WRITE(reg, temp); 5493 5494 /* disable DPLL_SEL */ 5495 temp = I915_READ(PCH_DPLL_SEL); 5496 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 5497 I915_WRITE(PCH_DPLL_SEL, temp); 5498 } 5499 5500 ironlake_fdi_pll_disable(intel_crtc); 5501 } 5502 5503 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5504 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5505} 5506 5507static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, 5508 struct drm_atomic_state *old_state) 5509{ 5510 struct drm_crtc *crtc = old_crtc_state->base.crtc; 5511 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5512 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5513 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5514 5515 if (intel_crtc->config->has_pch_encoder) 5516 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5517 false); 5518 5519 intel_encoders_disable(crtc, old_crtc_state, old_state); 5520 5521 drm_crtc_vblank_off(crtc); 5522 assert_vblank_disabled(crtc); 5523 5524 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5525 if (!transcoder_is_dsi(cpu_transcoder)) 5526 intel_disable_pipe(intel_crtc); 5527 5528 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) 5529 intel_ddi_set_vc_payload_alloc(intel_crtc->config, false); 5530 5531 if (!transcoder_is_dsi(cpu_transcoder)) 5532 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5533 5534 if (INTEL_GEN(dev_priv) >= 9) 5535 skylake_scaler_disable(intel_crtc); 5536 else 5537 ironlake_pfit_disable(intel_crtc, false); 5538 5539 if (!transcoder_is_dsi(cpu_transcoder)) 5540 intel_ddi_disable_pipe_clock(intel_crtc->config); 5541 5542 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5543 5544 if (old_crtc_state->has_pch_encoder) 5545 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5546 true); 5547} 5548 5549static void i9xx_pfit_enable(struct intel_crtc *crtc) 5550{ 5551 struct drm_device *dev = crtc->base.dev; 5552 struct drm_i915_private *dev_priv = to_i915(dev); 5553 struct intel_crtc_state *pipe_config = crtc->config; 5554 5555 if (!pipe_config->gmch_pfit.control) 5556 return; 5557 5558 /* 5559 * The panel fitter should only be adjusted whilst the pipe is disabled, 5560 * according to register description and PRM. 5561 */ 5562 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 5563 assert_pipe_disabled(dev_priv, crtc->pipe); 5564 5565 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 5566 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 5567 5568 /* Border color in case we don't scale up to the full screen. Black by 5569 * default, change to something else for debugging. */ 5570 I915_WRITE(BCLRPAT(crtc->pipe), 0); 5571} 5572 5573enum intel_display_power_domain intel_port_to_power_domain(enum port port) 5574{ 5575 switch (port) { 5576 case PORT_A: 5577 return POWER_DOMAIN_PORT_DDI_A_LANES; 5578 case PORT_B: 5579 return POWER_DOMAIN_PORT_DDI_B_LANES; 5580 case PORT_C: 5581 return POWER_DOMAIN_PORT_DDI_C_LANES; 5582 case PORT_D: 5583 return POWER_DOMAIN_PORT_DDI_D_LANES; 5584 case PORT_E: 5585 return POWER_DOMAIN_PORT_DDI_E_LANES; 5586 default: 5587 MISSING_CASE(port); 5588 return POWER_DOMAIN_PORT_OTHER; 5589 } 5590} 5591 5592static u64 get_crtc_power_domains(struct drm_crtc *crtc, 5593 struct intel_crtc_state *crtc_state) 5594{ 5595 struct drm_device *dev = crtc->dev; 5596 struct drm_i915_private *dev_priv = to_i915(dev); 5597 struct drm_encoder *encoder; 5598 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5599 enum pipe pipe = intel_crtc->pipe; 5600 u64 mask; 5601 enum transcoder transcoder = crtc_state->cpu_transcoder; 5602 5603 if (!crtc_state->base.active) 5604 return 0; 5605 5606 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5607 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5608 if (crtc_state->pch_pfit.enabled || 5609 crtc_state->pch_pfit.force_thru) 5610 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 5611 5612 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) { 5613 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5614 5615 mask |= BIT_ULL(intel_encoder->power_domain); 5616 } 5617 5618 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 5619 mask |= BIT(POWER_DOMAIN_AUDIO); 5620 5621 if (crtc_state->shared_dpll) 5622 mask |= BIT_ULL(POWER_DOMAIN_PLLS); 5623 5624 return mask; 5625} 5626 5627static u64 5628modeset_get_crtc_power_domains(struct drm_crtc *crtc, 5629 struct intel_crtc_state *crtc_state) 5630{ 5631 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5633 enum intel_display_power_domain domain; 5634 u64 domains, new_domains, old_domains; 5635 5636 old_domains = intel_crtc->enabled_power_domains; 5637 intel_crtc->enabled_power_domains = new_domains = 5638 get_crtc_power_domains(crtc, crtc_state); 5639 5640 domains = new_domains & ~old_domains; 5641 5642 for_each_power_domain(domain, domains) 5643 intel_display_power_get(dev_priv, domain); 5644 5645 return old_domains & ~new_domains; 5646} 5647 5648static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 5649 u64 domains) 5650{ 5651 enum intel_display_power_domain domain; 5652 5653 for_each_power_domain(domain, domains) 5654 intel_display_power_put(dev_priv, domain); 5655} 5656 5657static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, 5658 struct drm_atomic_state *old_state) 5659{ 5660 struct intel_atomic_state *old_intel_state = 5661 to_intel_atomic_state(old_state); 5662 struct drm_crtc *crtc = pipe_config->base.crtc; 5663 struct drm_device *dev = crtc->dev; 5664 struct drm_i915_private *dev_priv = to_i915(dev); 5665 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5666 int pipe = intel_crtc->pipe; 5667 5668 if (WARN_ON(intel_crtc->active)) 5669 return; 5670 5671 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5672 intel_dp_set_m_n(intel_crtc, M1_N1); 5673 5674 intel_set_pipe_timings(intel_crtc); 5675 intel_set_pipe_src_size(intel_crtc); 5676 5677 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 5678 struct drm_i915_private *dev_priv = to_i915(dev); 5679 5680 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 5681 I915_WRITE(CHV_CANVAS(pipe), 0); 5682 } 5683 5684 i9xx_set_pipeconf(intel_crtc); 5685 5686 intel_crtc->active = true; 5687 5688 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5689 5690 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); 5691 5692 if (IS_CHERRYVIEW(dev_priv)) { 5693 chv_prepare_pll(intel_crtc, intel_crtc->config); 5694 chv_enable_pll(intel_crtc, intel_crtc->config); 5695 } else { 5696 vlv_prepare_pll(intel_crtc, intel_crtc->config); 5697 vlv_enable_pll(intel_crtc, intel_crtc->config); 5698 } 5699 5700 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5701 5702 i9xx_pfit_enable(intel_crtc); 5703 5704 intel_color_load_luts(&pipe_config->base); 5705 5706 dev_priv->display.initial_watermarks(old_intel_state, 5707 pipe_config); 5708 intel_enable_pipe(intel_crtc); 5709 5710 assert_vblank_disabled(crtc); 5711 drm_crtc_vblank_on(crtc); 5712 5713 intel_encoders_enable(crtc, pipe_config, old_state); 5714} 5715 5716static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 5717{ 5718 struct drm_device *dev = crtc->base.dev; 5719 struct drm_i915_private *dev_priv = to_i915(dev); 5720 5721 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 5722 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 5723} 5724 5725static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, 5726 struct drm_atomic_state *old_state) 5727{ 5728 struct drm_crtc *crtc = pipe_config->base.crtc; 5729 struct drm_device *dev = crtc->dev; 5730 struct drm_i915_private *dev_priv = to_i915(dev); 5731 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5732 enum pipe pipe = intel_crtc->pipe; 5733 5734 if (WARN_ON(intel_crtc->active)) 5735 return; 5736 5737 i9xx_set_pll_dividers(intel_crtc); 5738 5739 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5740 intel_dp_set_m_n(intel_crtc, M1_N1); 5741 5742 intel_set_pipe_timings(intel_crtc); 5743 intel_set_pipe_src_size(intel_crtc); 5744 5745 i9xx_set_pipeconf(intel_crtc); 5746 5747 intel_crtc->active = true; 5748 5749 if (!IS_GEN2(dev_priv)) 5750 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5751 5752 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5753 5754 i9xx_enable_pll(intel_crtc); 5755 5756 i9xx_pfit_enable(intel_crtc); 5757 5758 intel_color_load_luts(&pipe_config->base); 5759 5760 intel_update_watermarks(intel_crtc); 5761 intel_enable_pipe(intel_crtc); 5762 5763 assert_vblank_disabled(crtc); 5764 drm_crtc_vblank_on(crtc); 5765 5766 intel_encoders_enable(crtc, pipe_config, old_state); 5767} 5768 5769static void i9xx_pfit_disable(struct intel_crtc *crtc) 5770{ 5771 struct drm_device *dev = crtc->base.dev; 5772 struct drm_i915_private *dev_priv = to_i915(dev); 5773 5774 if (!crtc->config->gmch_pfit.control) 5775 return; 5776 5777 assert_pipe_disabled(dev_priv, crtc->pipe); 5778 5779 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 5780 I915_READ(PFIT_CONTROL)); 5781 I915_WRITE(PFIT_CONTROL, 0); 5782} 5783 5784static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, 5785 struct drm_atomic_state *old_state) 5786{ 5787 struct drm_crtc *crtc = old_crtc_state->base.crtc; 5788 struct drm_device *dev = crtc->dev; 5789 struct drm_i915_private *dev_priv = to_i915(dev); 5790 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5791 int pipe = intel_crtc->pipe; 5792 5793 /* 5794 * On gen2 planes are double buffered but the pipe isn't, so we must 5795 * wait for planes to fully turn off before disabling the pipe. 5796 */ 5797 if (IS_GEN2(dev_priv)) 5798 intel_wait_for_vblank(dev_priv, pipe); 5799 5800 intel_encoders_disable(crtc, old_crtc_state, old_state); 5801 5802 drm_crtc_vblank_off(crtc); 5803 assert_vblank_disabled(crtc); 5804 5805 intel_disable_pipe(intel_crtc); 5806 5807 i9xx_pfit_disable(intel_crtc); 5808 5809 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5810 5811 if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) { 5812 if (IS_CHERRYVIEW(dev_priv)) 5813 chv_disable_pll(dev_priv, pipe); 5814 else if (IS_VALLEYVIEW(dev_priv)) 5815 vlv_disable_pll(dev_priv, pipe); 5816 else 5817 i9xx_disable_pll(intel_crtc); 5818 } 5819 5820 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); 5821 5822 if (!IS_GEN2(dev_priv)) 5823 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5824 5825 if (!dev_priv->display.initial_watermarks) 5826 intel_update_watermarks(intel_crtc); 5827} 5828 5829static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, 5830 struct drm_modeset_acquire_ctx *ctx) 5831{ 5832 struct intel_encoder *encoder; 5833 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5834 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5835 enum intel_display_power_domain domain; 5836 u64 domains; 5837 struct drm_atomic_state *state; 5838 struct intel_crtc_state *crtc_state; 5839 int ret; 5840 5841 if (!intel_crtc->active) 5842 return; 5843 5844 if (crtc->primary->state->visible) { 5845 WARN_ON(intel_crtc->flip_work); 5846 5847 intel_pre_disable_primary_noatomic(crtc); 5848 5849 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); 5850 crtc->primary->state->visible = false; 5851 } 5852 5853 state = drm_atomic_state_alloc(crtc->dev); 5854 if (!state) { 5855 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", 5856 crtc->base.id, crtc->name); 5857 return; 5858 } 5859 5860 state->acquire_ctx = ctx; 5861 5862 /* Everything's already locked, -EDEADLK can't happen. */ 5863 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 5864 ret = drm_atomic_add_affected_connectors(state, crtc); 5865 5866 WARN_ON(IS_ERR(crtc_state) || ret); 5867 5868 dev_priv->display.crtc_disable(crtc_state, state); 5869 5870 drm_atomic_state_put(state); 5871 5872 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 5873 crtc->base.id, crtc->name); 5874 5875 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 5876 crtc->state->active = false; 5877 intel_crtc->active = false; 5878 crtc->enabled = false; 5879 crtc->state->connector_mask = 0; 5880 crtc->state->encoder_mask = 0; 5881 5882 for_each_encoder_on_crtc(crtc->dev, crtc, encoder) 5883 encoder->base.crtc = NULL; 5884 5885 intel_fbc_disable(intel_crtc); 5886 intel_update_watermarks(intel_crtc); 5887 intel_disable_shared_dpll(intel_crtc); 5888 5889 domains = intel_crtc->enabled_power_domains; 5890 for_each_power_domain(domain, domains) 5891 intel_display_power_put(dev_priv, domain); 5892 intel_crtc->enabled_power_domains = 0; 5893 5894 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); 5895 dev_priv->min_pixclk[intel_crtc->pipe] = 0; 5896} 5897 5898/* 5899 * turn all crtc's off, but do not adjust state 5900 * This has to be paired with a call to intel_modeset_setup_hw_state. 5901 */ 5902int intel_display_suspend(struct drm_device *dev) 5903{ 5904 struct drm_i915_private *dev_priv = to_i915(dev); 5905 struct drm_atomic_state *state; 5906 int ret; 5907 5908 state = drm_atomic_helper_suspend(dev); 5909 ret = PTR_ERR_OR_ZERO(state); 5910 if (ret) 5911 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 5912 else 5913 dev_priv->modeset_restore_state = state; 5914 return ret; 5915} 5916 5917void intel_encoder_destroy(struct drm_encoder *encoder) 5918{ 5919 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5920 5921 drm_encoder_cleanup(encoder); 5922 kfree(intel_encoder); 5923} 5924 5925/* Cross check the actual hw state with our own modeset state tracking (and it's 5926 * internal consistency). */ 5927static void intel_connector_verify_state(struct intel_connector *connector) 5928{ 5929 struct drm_crtc *crtc = connector->base.state->crtc; 5930 5931 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 5932 connector->base.base.id, 5933 connector->base.name); 5934 5935 if (connector->get_hw_state(connector)) { 5936 struct intel_encoder *encoder = connector->encoder; 5937 struct drm_connector_state *conn_state = connector->base.state; 5938 5939 I915_STATE_WARN(!crtc, 5940 "connector enabled without attached crtc\n"); 5941 5942 if (!crtc) 5943 return; 5944 5945 I915_STATE_WARN(!crtc->state->active, 5946 "connector is active, but attached crtc isn't\n"); 5947 5948 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 5949 return; 5950 5951 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 5952 "atomic encoder doesn't match attached encoder\n"); 5953 5954 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 5955 "attached encoder crtc differs from connector crtc\n"); 5956 } else { 5957 I915_STATE_WARN(crtc && crtc->state->active, 5958 "attached crtc is active, but connector isn't\n"); 5959 I915_STATE_WARN(!crtc && connector->base.state->best_encoder, 5960 "best encoder set without crtc!\n"); 5961 } 5962} 5963 5964int intel_connector_init(struct intel_connector *connector) 5965{ 5966 drm_atomic_helper_connector_reset(&connector->base); 5967 5968 if (!connector->base.state) 5969 return -ENOMEM; 5970 5971 return 0; 5972} 5973 5974struct intel_connector *intel_connector_alloc(void) 5975{ 5976 struct intel_connector *connector; 5977 5978 connector = kzalloc(sizeof *connector, GFP_KERNEL); 5979 if (!connector) 5980 return NULL; 5981 5982 if (intel_connector_init(connector) < 0) { 5983 kfree(connector); 5984 return NULL; 5985 } 5986 5987 return connector; 5988} 5989 5990/* Simple connector->get_hw_state implementation for encoders that support only 5991 * one connector and no cloning and hence the encoder state determines the state 5992 * of the connector. */ 5993bool intel_connector_get_hw_state(struct intel_connector *connector) 5994{ 5995 enum pipe pipe = 0; 5996 struct intel_encoder *encoder = connector->encoder; 5997 5998 return encoder->get_hw_state(encoder, &pipe); 5999} 6000 6001static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 6002{ 6003 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 6004 return crtc_state->fdi_lanes; 6005 6006 return 0; 6007} 6008 6009static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, 6010 struct intel_crtc_state *pipe_config) 6011{ 6012 struct drm_i915_private *dev_priv = to_i915(dev); 6013 struct drm_atomic_state *state = pipe_config->base.state; 6014 struct intel_crtc *other_crtc; 6015 struct intel_crtc_state *other_crtc_state; 6016 6017 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 6018 pipe_name(pipe), pipe_config->fdi_lanes); 6019 if (pipe_config->fdi_lanes > 4) { 6020 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 6021 pipe_name(pipe), pipe_config->fdi_lanes); 6022 return -EINVAL; 6023 } 6024 6025 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 6026 if (pipe_config->fdi_lanes > 2) { 6027 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 6028 pipe_config->fdi_lanes); 6029 return -EINVAL; 6030 } else { 6031 return 0; 6032 } 6033 } 6034 6035 if (INTEL_INFO(dev_priv)->num_pipes == 2) 6036 return 0; 6037 6038 /* Ivybridge 3 pipe is really complicated */ 6039 switch (pipe) { 6040 case PIPE_A: 6041 return 0; 6042 case PIPE_B: 6043 if (pipe_config->fdi_lanes <= 2) 6044 return 0; 6045 6046 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); 6047 other_crtc_state = 6048 intel_atomic_get_crtc_state(state, other_crtc); 6049 if (IS_ERR(other_crtc_state)) 6050 return PTR_ERR(other_crtc_state); 6051 6052 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 6053 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 6054 pipe_name(pipe), pipe_config->fdi_lanes); 6055 return -EINVAL; 6056 } 6057 return 0; 6058 case PIPE_C: 6059 if (pipe_config->fdi_lanes > 2) { 6060 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 6061 pipe_name(pipe), pipe_config->fdi_lanes); 6062 return -EINVAL; 6063 } 6064 6065 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); 6066 other_crtc_state = 6067 intel_atomic_get_crtc_state(state, other_crtc); 6068 if (IS_ERR(other_crtc_state)) 6069 return PTR_ERR(other_crtc_state); 6070 6071 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 6072 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 6073 return -EINVAL; 6074 } 6075 return 0; 6076 default: 6077 BUG(); 6078 } 6079} 6080 6081#define RETRY 1 6082static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 6083 struct intel_crtc_state *pipe_config) 6084{ 6085 struct drm_device *dev = intel_crtc->base.dev; 6086 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6087 int lane, link_bw, fdi_dotclock, ret; 6088 bool needs_recompute = false; 6089 6090retry: 6091 /* FDI is a binary signal running at ~2.7GHz, encoding 6092 * each output octet as 10 bits. The actual frequency 6093 * is stored as a divider into a 100MHz clock, and the 6094 * mode pixel clock is stored in units of 1KHz. 6095 * Hence the bw of each lane in terms of the mode signal 6096 * is: 6097 */ 6098 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); 6099 6100 fdi_dotclock = adjusted_mode->crtc_clock; 6101 6102 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 6103 pipe_config->pipe_bpp); 6104 6105 pipe_config->fdi_lanes = lane; 6106 6107 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 6108 link_bw, &pipe_config->fdi_m_n, false); 6109 6110 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 6111 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 6112 pipe_config->pipe_bpp -= 2*3; 6113 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 6114 pipe_config->pipe_bpp); 6115 needs_recompute = true; 6116 pipe_config->bw_constrained = true; 6117 6118 goto retry; 6119 } 6120 6121 if (needs_recompute) 6122 return RETRY; 6123 6124 return ret; 6125} 6126 6127static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv, 6128 struct intel_crtc_state *pipe_config) 6129{ 6130 if (pipe_config->pipe_bpp > 24) 6131 return false; 6132 6133 /* HSW can handle pixel rate up to cdclk? */ 6134 if (IS_HASWELL(dev_priv)) 6135 return true; 6136 6137 /* 6138 * We compare against max which means we must take 6139 * the increased cdclk requirement into account when 6140 * calculating the new cdclk. 6141 * 6142 * Should measure whether using a lower cdclk w/o IPS 6143 */ 6144 return pipe_config->pixel_rate <= 6145 dev_priv->max_cdclk_freq * 95 / 100; 6146} 6147 6148static void hsw_compute_ips_config(struct intel_crtc *crtc, 6149 struct intel_crtc_state *pipe_config) 6150{ 6151 struct drm_device *dev = crtc->base.dev; 6152 struct drm_i915_private *dev_priv = to_i915(dev); 6153 6154 pipe_config->ips_enabled = i915.enable_ips && 6155 hsw_crtc_supports_ips(crtc) && 6156 pipe_config_supports_ips(dev_priv, pipe_config); 6157} 6158 6159static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 6160{ 6161 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6162 6163 /* GDG double wide on either pipe, otherwise pipe A only */ 6164 return INTEL_INFO(dev_priv)->gen < 4 && 6165 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 6166} 6167 6168static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) 6169{ 6170 uint32_t pixel_rate; 6171 6172 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; 6173 6174 /* 6175 * We only use IF-ID interlacing. If we ever use 6176 * PF-ID we'll need to adjust the pixel_rate here. 6177 */ 6178 6179 if (pipe_config->pch_pfit.enabled) { 6180 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 6181 uint32_t pfit_size = pipe_config->pch_pfit.size; 6182 6183 pipe_w = pipe_config->pipe_src_w; 6184 pipe_h = pipe_config->pipe_src_h; 6185 6186 pfit_w = (pfit_size >> 16) & 0xFFFF; 6187 pfit_h = pfit_size & 0xFFFF; 6188 if (pipe_w < pfit_w) 6189 pipe_w = pfit_w; 6190 if (pipe_h < pfit_h) 6191 pipe_h = pfit_h; 6192 6193 if (WARN_ON(!pfit_w || !pfit_h)) 6194 return pixel_rate; 6195 6196 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, 6197 pfit_w * pfit_h); 6198 } 6199 6200 return pixel_rate; 6201} 6202 6203static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 6204{ 6205 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 6206 6207 if (HAS_GMCH_DISPLAY(dev_priv)) 6208 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 6209 crtc_state->pixel_rate = 6210 crtc_state->base.adjusted_mode.crtc_clock; 6211 else 6212 crtc_state->pixel_rate = 6213 ilk_pipe_pixel_rate(crtc_state); 6214} 6215 6216static int intel_crtc_compute_config(struct intel_crtc *crtc, 6217 struct intel_crtc_state *pipe_config) 6218{ 6219 struct drm_device *dev = crtc->base.dev; 6220 struct drm_i915_private *dev_priv = to_i915(dev); 6221 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6222 int clock_limit = dev_priv->max_dotclk_freq; 6223 6224 if (INTEL_GEN(dev_priv) < 4) { 6225 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 6226 6227 /* 6228 * Enable double wide mode when the dot clock 6229 * is > 90% of the (display) core speed. 6230 */ 6231 if (intel_crtc_supports_double_wide(crtc) && 6232 adjusted_mode->crtc_clock > clock_limit) { 6233 clock_limit = dev_priv->max_dotclk_freq; 6234 pipe_config->double_wide = true; 6235 } 6236 } 6237 6238 if (adjusted_mode->crtc_clock > clock_limit) { 6239 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 6240 adjusted_mode->crtc_clock, clock_limit, 6241 yesno(pipe_config->double_wide)); 6242 return -EINVAL; 6243 } 6244 6245 /* 6246 * Pipe horizontal size must be even in: 6247 * - DVO ganged mode 6248 * - LVDS dual channel mode 6249 * - Double wide pipe 6250 */ 6251 if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 6252 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 6253 pipe_config->pipe_src_w &= ~1; 6254 6255 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 6256 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 6257 */ 6258 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 6259 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 6260 return -EINVAL; 6261 6262 intel_crtc_compute_pixel_rate(pipe_config); 6263 6264 if (HAS_IPS(dev_priv)) 6265 hsw_compute_ips_config(crtc, pipe_config); 6266 6267 if (pipe_config->has_pch_encoder) 6268 return ironlake_fdi_compute_config(crtc, pipe_config); 6269 6270 return 0; 6271} 6272 6273static void 6274intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 6275{ 6276 while (*num > DATA_LINK_M_N_MASK || 6277 *den > DATA_LINK_M_N_MASK) { 6278 *num >>= 1; 6279 *den >>= 1; 6280 } 6281} 6282 6283static void compute_m_n(unsigned int m, unsigned int n, 6284 uint32_t *ret_m, uint32_t *ret_n, 6285 bool reduce_m_n) 6286{ 6287 /* 6288 * Reduce M/N as much as possible without loss in precision. Several DP 6289 * dongles in particular seem to be fussy about too large *link* M/N 6290 * values. The passed in values are more likely to have the least 6291 * significant bits zero than M after rounding below, so do this first. 6292 */ 6293 if (reduce_m_n) { 6294 while ((m & 1) == 0 && (n & 1) == 0) { 6295 m >>= 1; 6296 n >>= 1; 6297 } 6298 } 6299 6300 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 6301 *ret_m = div_u64((uint64_t) m * *ret_n, n); 6302 intel_reduce_m_n_ratio(ret_m, ret_n); 6303} 6304 6305void 6306intel_link_compute_m_n(int bits_per_pixel, int nlanes, 6307 int pixel_clock, int link_clock, 6308 struct intel_link_m_n *m_n, 6309 bool reduce_m_n) 6310{ 6311 m_n->tu = 64; 6312 6313 compute_m_n(bits_per_pixel * pixel_clock, 6314 link_clock * nlanes * 8, 6315 &m_n->gmch_m, &m_n->gmch_n, 6316 reduce_m_n); 6317 6318 compute_m_n(pixel_clock, link_clock, 6319 &m_n->link_m, &m_n->link_n, 6320 reduce_m_n); 6321} 6322 6323static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 6324{ 6325 if (i915.panel_use_ssc >= 0) 6326 return i915.panel_use_ssc != 0; 6327 return dev_priv->vbt.lvds_use_ssc 6328 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 6329} 6330 6331static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 6332{ 6333 return (1 << dpll->n) << 16 | dpll->m2; 6334} 6335 6336static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 6337{ 6338 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 6339} 6340 6341static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 6342 struct intel_crtc_state *crtc_state, 6343 struct dpll *reduced_clock) 6344{ 6345 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6346 u32 fp, fp2 = 0; 6347 6348 if (IS_PINEVIEW(dev_priv)) { 6349 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 6350 if (reduced_clock) 6351 fp2 = pnv_dpll_compute_fp(reduced_clock); 6352 } else { 6353 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 6354 if (reduced_clock) 6355 fp2 = i9xx_dpll_compute_fp(reduced_clock); 6356 } 6357 6358 crtc_state->dpll_hw_state.fp0 = fp; 6359 6360 crtc->lowfreq_avail = false; 6361 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 6362 reduced_clock) { 6363 crtc_state->dpll_hw_state.fp1 = fp2; 6364 crtc->lowfreq_avail = true; 6365 } else { 6366 crtc_state->dpll_hw_state.fp1 = fp; 6367 } 6368} 6369 6370static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe 6371 pipe) 6372{ 6373 u32 reg_val; 6374 6375 /* 6376 * PLLB opamp always calibrates to max value of 0x3f, force enable it 6377 * and set it to a reasonable value instead. 6378 */ 6379 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6380 reg_val &= 0xffffff00; 6381 reg_val |= 0x00000030; 6382 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6383 6384 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6385 reg_val &= 0x8cffffff; 6386 reg_val = 0x8c000000; 6387 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6388 6389 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6390 reg_val &= 0xffffff00; 6391 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6392 6393 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6394 reg_val &= 0x00ffffff; 6395 reg_val |= 0xb0000000; 6396 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6397} 6398 6399static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 6400 struct intel_link_m_n *m_n) 6401{ 6402 struct drm_device *dev = crtc->base.dev; 6403 struct drm_i915_private *dev_priv = to_i915(dev); 6404 int pipe = crtc->pipe; 6405 6406 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 6407 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 6408 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 6409 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 6410} 6411 6412static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 6413 struct intel_link_m_n *m_n, 6414 struct intel_link_m_n *m2_n2) 6415{ 6416 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6417 int pipe = crtc->pipe; 6418 enum transcoder transcoder = crtc->config->cpu_transcoder; 6419 6420 if (INTEL_GEN(dev_priv) >= 5) { 6421 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 6422 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 6423 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 6424 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 6425 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 6426 * for gen < 8) and if DRRS is supported (to make sure the 6427 * registers are not unnecessarily accessed). 6428 */ 6429 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) || 6430 INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) { 6431 I915_WRITE(PIPE_DATA_M2(transcoder), 6432 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 6433 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 6434 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 6435 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 6436 } 6437 } else { 6438 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 6439 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 6440 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 6441 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 6442 } 6443} 6444 6445void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) 6446{ 6447 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 6448 6449 if (m_n == M1_N1) { 6450 dp_m_n = &crtc->config->dp_m_n; 6451 dp_m2_n2 = &crtc->config->dp_m2_n2; 6452 } else if (m_n == M2_N2) { 6453 6454 /* 6455 * M2_N2 registers are not supported. Hence m2_n2 divider value 6456 * needs to be programmed into M1_N1. 6457 */ 6458 dp_m_n = &crtc->config->dp_m2_n2; 6459 } else { 6460 DRM_ERROR("Unsupported divider value\n"); 6461 return; 6462 } 6463 6464 if (crtc->config->has_pch_encoder) 6465 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 6466 else 6467 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); 6468} 6469 6470static void vlv_compute_dpll(struct intel_crtc *crtc, 6471 struct intel_crtc_state *pipe_config) 6472{ 6473 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 6474 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 6475 if (crtc->pipe != PIPE_A) 6476 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 6477 6478 /* DPLL not used with DSI, but still need the rest set up */ 6479 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 6480 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 6481 DPLL_EXT_BUFFER_ENABLE_VLV; 6482 6483 pipe_config->dpll_hw_state.dpll_md = 6484 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6485} 6486 6487static void chv_compute_dpll(struct intel_crtc *crtc, 6488 struct intel_crtc_state *pipe_config) 6489{ 6490 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 6491 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 6492 if (crtc->pipe != PIPE_A) 6493 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 6494 6495 /* DPLL not used with DSI, but still need the rest set up */ 6496 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 6497 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 6498 6499 pipe_config->dpll_hw_state.dpll_md = 6500 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6501} 6502 6503static void vlv_prepare_pll(struct intel_crtc *crtc, 6504 const struct intel_crtc_state *pipe_config) 6505{ 6506 struct drm_device *dev = crtc->base.dev; 6507 struct drm_i915_private *dev_priv = to_i915(dev); 6508 enum pipe pipe = crtc->pipe; 6509 u32 mdiv; 6510 u32 bestn, bestm1, bestm2, bestp1, bestp2; 6511 u32 coreclk, reg_val; 6512 6513 /* Enable Refclk */ 6514 I915_WRITE(DPLL(pipe), 6515 pipe_config->dpll_hw_state.dpll & 6516 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 6517 6518 /* No need to actually set up the DPLL with DSI */ 6519 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 6520 return; 6521 6522 mutex_lock(&dev_priv->sb_lock); 6523 6524 bestn = pipe_config->dpll.n; 6525 bestm1 = pipe_config->dpll.m1; 6526 bestm2 = pipe_config->dpll.m2; 6527 bestp1 = pipe_config->dpll.p1; 6528 bestp2 = pipe_config->dpll.p2; 6529 6530 /* See eDP HDMI DPIO driver vbios notes doc */ 6531 6532 /* PLL B needs special handling */ 6533 if (pipe == PIPE_B) 6534 vlv_pllb_recal_opamp(dev_priv, pipe); 6535 6536 /* Set up Tx target for periodic Rcomp update */ 6537 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 6538 6539 /* Disable target IRef on PLL */ 6540 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 6541 reg_val &= 0x00ffffff; 6542 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 6543 6544 /* Disable fast lock */ 6545 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 6546 6547 /* Set idtafcrecal before PLL is enabled */ 6548 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 6549 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 6550 mdiv |= ((bestn << DPIO_N_SHIFT)); 6551 mdiv |= (1 << DPIO_K_SHIFT); 6552 6553 /* 6554 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 6555 * but we don't support that). 6556 * Note: don't use the DAC post divider as it seems unstable. 6557 */ 6558 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 6559 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 6560 6561 mdiv |= DPIO_ENABLE_CALIBRATION; 6562 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 6563 6564 /* Set HBR and RBR LPF coefficients */ 6565 if (pipe_config->port_clock == 162000 || 6566 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) || 6567 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) 6568 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 6569 0x009f0003); 6570 else 6571 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 6572 0x00d0000f); 6573 6574 if (intel_crtc_has_dp_encoder(pipe_config)) { 6575 /* Use SSC source */ 6576 if (pipe == PIPE_A) 6577 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6578 0x0df40000); 6579 else 6580 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6581 0x0df70000); 6582 } else { /* HDMI or VGA */ 6583 /* Use bend source */ 6584 if (pipe == PIPE_A) 6585 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6586 0x0df70000); 6587 else 6588 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6589 0x0df40000); 6590 } 6591 6592 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 6593 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 6594 if (intel_crtc_has_dp_encoder(crtc->config)) 6595 coreclk |= 0x01000000; 6596 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 6597 6598 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 6599 mutex_unlock(&dev_priv->sb_lock); 6600} 6601 6602static void chv_prepare_pll(struct intel_crtc *crtc, 6603 const struct intel_crtc_state *pipe_config) 6604{ 6605 struct drm_device *dev = crtc->base.dev; 6606 struct drm_i915_private *dev_priv = to_i915(dev); 6607 enum pipe pipe = crtc->pipe; 6608 enum dpio_channel port = vlv_pipe_to_channel(pipe); 6609 u32 loopfilter, tribuf_calcntr; 6610 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 6611 u32 dpio_val; 6612 int vco; 6613 6614 /* Enable Refclk and SSC */ 6615 I915_WRITE(DPLL(pipe), 6616 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 6617 6618 /* No need to actually set up the DPLL with DSI */ 6619 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 6620 return; 6621 6622 bestn = pipe_config->dpll.n; 6623 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 6624 bestm1 = pipe_config->dpll.m1; 6625 bestm2 = pipe_config->dpll.m2 >> 22; 6626 bestp1 = pipe_config->dpll.p1; 6627 bestp2 = pipe_config->dpll.p2; 6628 vco = pipe_config->dpll.vco; 6629 dpio_val = 0; 6630 loopfilter = 0; 6631 6632 mutex_lock(&dev_priv->sb_lock); 6633 6634 /* p1 and p2 divider */ 6635 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 6636 5 << DPIO_CHV_S1_DIV_SHIFT | 6637 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 6638 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 6639 1 << DPIO_CHV_K_DIV_SHIFT); 6640 6641 /* Feedback post-divider - m2 */ 6642 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 6643 6644 /* Feedback refclk divider - n and m1 */ 6645 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 6646 DPIO_CHV_M1_DIV_BY_2 | 6647 1 << DPIO_CHV_N_DIV_SHIFT); 6648 6649 /* M2 fraction division */ 6650 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 6651 6652 /* M2 fraction division enable */ 6653 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 6654 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 6655 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 6656 if (bestm2_frac) 6657 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 6658 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 6659 6660 /* Program digital lock detect threshold */ 6661 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 6662 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 6663 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 6664 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 6665 if (!bestm2_frac) 6666 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 6667 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 6668 6669 /* Loop filter */ 6670 if (vco == 5400000) { 6671 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 6672 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 6673 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 6674 tribuf_calcntr = 0x9; 6675 } else if (vco <= 6200000) { 6676 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 6677 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 6678 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6679 tribuf_calcntr = 0x9; 6680 } else if (vco <= 6480000) { 6681 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 6682 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 6683 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6684 tribuf_calcntr = 0x8; 6685 } else { 6686 /* Not supported. Apply the same limits as in the max case */ 6687 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 6688 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 6689 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6690 tribuf_calcntr = 0; 6691 } 6692 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 6693 6694 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 6695 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 6696 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 6697 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 6698 6699 /* AFC Recal */ 6700 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 6701 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 6702 DPIO_AFC_RECAL); 6703 6704 mutex_unlock(&dev_priv->sb_lock); 6705} 6706 6707/** 6708 * vlv_force_pll_on - forcibly enable just the PLL 6709 * @dev_priv: i915 private structure 6710 * @pipe: pipe PLL to enable 6711 * @dpll: PLL configuration 6712 * 6713 * Enable the PLL for @pipe using the supplied @dpll config. To be used 6714 * in cases where we need the PLL enabled even when @pipe is not going to 6715 * be enabled. 6716 */ 6717int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, 6718 const struct dpll *dpll) 6719{ 6720 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 6721 struct intel_crtc_state *pipe_config; 6722 6723 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 6724 if (!pipe_config) 6725 return -ENOMEM; 6726 6727 pipe_config->base.crtc = &crtc->base; 6728 pipe_config->pixel_multiplier = 1; 6729 pipe_config->dpll = *dpll; 6730 6731 if (IS_CHERRYVIEW(dev_priv)) { 6732 chv_compute_dpll(crtc, pipe_config); 6733 chv_prepare_pll(crtc, pipe_config); 6734 chv_enable_pll(crtc, pipe_config); 6735 } else { 6736 vlv_compute_dpll(crtc, pipe_config); 6737 vlv_prepare_pll(crtc, pipe_config); 6738 vlv_enable_pll(crtc, pipe_config); 6739 } 6740 6741 kfree(pipe_config); 6742 6743 return 0; 6744} 6745 6746/** 6747 * vlv_force_pll_off - forcibly disable just the PLL 6748 * @dev_priv: i915 private structure 6749 * @pipe: pipe PLL to disable 6750 * 6751 * Disable the PLL for @pipe. To be used in cases where we need 6752 * the PLL enabled even when @pipe is not going to be enabled. 6753 */ 6754void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) 6755{ 6756 if (IS_CHERRYVIEW(dev_priv)) 6757 chv_disable_pll(dev_priv, pipe); 6758 else 6759 vlv_disable_pll(dev_priv, pipe); 6760} 6761 6762static void i9xx_compute_dpll(struct intel_crtc *crtc, 6763 struct intel_crtc_state *crtc_state, 6764 struct dpll *reduced_clock) 6765{ 6766 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6767 u32 dpll; 6768 struct dpll *clock = &crtc_state->dpll; 6769 6770 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 6771 6772 dpll = DPLL_VGA_MODE_DIS; 6773 6774 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 6775 dpll |= DPLLB_MODE_LVDS; 6776 else 6777 dpll |= DPLLB_MODE_DAC_SERIAL; 6778 6779 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 6780 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 6781 dpll |= (crtc_state->pixel_multiplier - 1) 6782 << SDVO_MULTIPLIER_SHIFT_HIRES; 6783 } 6784 6785 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 6786 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 6787 dpll |= DPLL_SDVO_HIGH_SPEED; 6788 6789 if (intel_crtc_has_dp_encoder(crtc_state)) 6790 dpll |= DPLL_SDVO_HIGH_SPEED; 6791 6792 /* compute bitmask from p1 value */ 6793 if (IS_PINEVIEW(dev_priv)) 6794 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 6795 else { 6796 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6797 if (IS_G4X(dev_priv) && reduced_clock) 6798 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 6799 } 6800 switch (clock->p2) { 6801 case 5: 6802 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 6803 break; 6804 case 7: 6805 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 6806 break; 6807 case 10: 6808 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 6809 break; 6810 case 14: 6811 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 6812 break; 6813 } 6814 if (INTEL_GEN(dev_priv) >= 4) 6815 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 6816 6817 if (crtc_state->sdvo_tv_clock) 6818 dpll |= PLL_REF_INPUT_TVCLKINBC; 6819 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 6820 intel_panel_use_ssc(dev_priv)) 6821 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6822 else 6823 dpll |= PLL_REF_INPUT_DREFCLK; 6824 6825 dpll |= DPLL_VCO_ENABLE; 6826 crtc_state->dpll_hw_state.dpll = dpll; 6827 6828 if (INTEL_GEN(dev_priv) >= 4) { 6829 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 6830 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6831 crtc_state->dpll_hw_state.dpll_md = dpll_md; 6832 } 6833} 6834 6835static void i8xx_compute_dpll(struct intel_crtc *crtc, 6836 struct intel_crtc_state *crtc_state, 6837 struct dpll *reduced_clock) 6838{ 6839 struct drm_device *dev = crtc->base.dev; 6840 struct drm_i915_private *dev_priv = to_i915(dev); 6841 u32 dpll; 6842 struct dpll *clock = &crtc_state->dpll; 6843 6844 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 6845 6846 dpll = DPLL_VGA_MODE_DIS; 6847 6848 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 6849 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6850 } else { 6851 if (clock->p1 == 2) 6852 dpll |= PLL_P1_DIVIDE_BY_TWO; 6853 else 6854 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6855 if (clock->p2 == 4) 6856 dpll |= PLL_P2_DIVIDE_BY_4; 6857 } 6858 6859 if (!IS_I830(dev_priv) && 6860 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 6861 dpll |= DPLL_DVO_2X_MODE; 6862 6863 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 6864 intel_panel_use_ssc(dev_priv)) 6865 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6866 else 6867 dpll |= PLL_REF_INPUT_DREFCLK; 6868 6869 dpll |= DPLL_VCO_ENABLE; 6870 crtc_state->dpll_hw_state.dpll = dpll; 6871} 6872 6873static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 6874{ 6875 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 6876 enum pipe pipe = intel_crtc->pipe; 6877 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 6878 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 6879 uint32_t crtc_vtotal, crtc_vblank_end; 6880 int vsyncshift = 0; 6881 6882 /* We need to be careful not to changed the adjusted mode, for otherwise 6883 * the hw state checker will get angry at the mismatch. */ 6884 crtc_vtotal = adjusted_mode->crtc_vtotal; 6885 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 6886 6887 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 6888 /* the chip adds 2 halflines automatically */ 6889 crtc_vtotal -= 1; 6890 crtc_vblank_end -= 1; 6891 6892 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 6893 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 6894 else 6895 vsyncshift = adjusted_mode->crtc_hsync_start - 6896 adjusted_mode->crtc_htotal / 2; 6897 if (vsyncshift < 0) 6898 vsyncshift += adjusted_mode->crtc_htotal; 6899 } 6900 6901 if (INTEL_GEN(dev_priv) > 3) 6902 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 6903 6904 I915_WRITE(HTOTAL(cpu_transcoder), 6905 (adjusted_mode->crtc_hdisplay - 1) | 6906 ((adjusted_mode->crtc_htotal - 1) << 16)); 6907 I915_WRITE(HBLANK(cpu_transcoder), 6908 (adjusted_mode->crtc_hblank_start - 1) | 6909 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 6910 I915_WRITE(HSYNC(cpu_transcoder), 6911 (adjusted_mode->crtc_hsync_start - 1) | 6912 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 6913 6914 I915_WRITE(VTOTAL(cpu_transcoder), 6915 (adjusted_mode->crtc_vdisplay - 1) | 6916 ((crtc_vtotal - 1) << 16)); 6917 I915_WRITE(VBLANK(cpu_transcoder), 6918 (adjusted_mode->crtc_vblank_start - 1) | 6919 ((crtc_vblank_end - 1) << 16)); 6920 I915_WRITE(VSYNC(cpu_transcoder), 6921 (adjusted_mode->crtc_vsync_start - 1) | 6922 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 6923 6924 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 6925 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 6926 * documented on the DDI_FUNC_CTL register description, EDP Input Select 6927 * bits. */ 6928 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 6929 (pipe == PIPE_B || pipe == PIPE_C)) 6930 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 6931 6932} 6933 6934static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) 6935{ 6936 struct drm_device *dev = intel_crtc->base.dev; 6937 struct drm_i915_private *dev_priv = to_i915(dev); 6938 enum pipe pipe = intel_crtc->pipe; 6939 6940 /* pipesrc controls the size that is scaled from, which should 6941 * always be the user's requested size. 6942 */ 6943 I915_WRITE(PIPESRC(pipe), 6944 ((intel_crtc->config->pipe_src_w - 1) << 16) | 6945 (intel_crtc->config->pipe_src_h - 1)); 6946} 6947 6948static void intel_get_pipe_timings(struct intel_crtc *crtc, 6949 struct intel_crtc_state *pipe_config) 6950{ 6951 struct drm_device *dev = crtc->base.dev; 6952 struct drm_i915_private *dev_priv = to_i915(dev); 6953 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 6954 uint32_t tmp; 6955 6956 tmp = I915_READ(HTOTAL(cpu_transcoder)); 6957 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 6958 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 6959 tmp = I915_READ(HBLANK(cpu_transcoder)); 6960 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 6961 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 6962 tmp = I915_READ(HSYNC(cpu_transcoder)); 6963 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 6964 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 6965 6966 tmp = I915_READ(VTOTAL(cpu_transcoder)); 6967 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 6968 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 6969 tmp = I915_READ(VBLANK(cpu_transcoder)); 6970 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 6971 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 6972 tmp = I915_READ(VSYNC(cpu_transcoder)); 6973 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 6974 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 6975 6976 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 6977 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 6978 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 6979 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 6980 } 6981} 6982 6983static void intel_get_pipe_src_size(struct intel_crtc *crtc, 6984 struct intel_crtc_state *pipe_config) 6985{ 6986 struct drm_device *dev = crtc->base.dev; 6987 struct drm_i915_private *dev_priv = to_i915(dev); 6988 u32 tmp; 6989 6990 tmp = I915_READ(PIPESRC(crtc->pipe)); 6991 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 6992 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 6993 6994 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 6995 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 6996} 6997 6998void intel_mode_from_pipe_config(struct drm_display_mode *mode, 6999 struct intel_crtc_state *pipe_config) 7000{ 7001 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 7002 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 7003 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 7004 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 7005 7006 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 7007 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 7008 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 7009 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 7010 7011 mode->flags = pipe_config->base.adjusted_mode.flags; 7012 mode->type = DRM_MODE_TYPE_DRIVER; 7013 7014 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 7015 7016 mode->hsync = drm_mode_hsync(mode); 7017 mode->vrefresh = drm_mode_vrefresh(mode); 7018 drm_mode_set_name(mode); 7019} 7020 7021static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7022{ 7023 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 7024 uint32_t pipeconf; 7025 7026 pipeconf = 0; 7027 7028 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 7029 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 7030 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 7031 7032 if (intel_crtc->config->double_wide) 7033 pipeconf |= PIPECONF_DOUBLE_WIDE; 7034 7035 /* only g4x and later have fancy bpc/dither controls */ 7036 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 7037 IS_CHERRYVIEW(dev_priv)) { 7038 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 7039 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 7040 pipeconf |= PIPECONF_DITHER_EN | 7041 PIPECONF_DITHER_TYPE_SP; 7042 7043 switch (intel_crtc->config->pipe_bpp) { 7044 case 18: 7045 pipeconf |= PIPECONF_6BPC; 7046 break; 7047 case 24: 7048 pipeconf |= PIPECONF_8BPC; 7049 break; 7050 case 30: 7051 pipeconf |= PIPECONF_10BPC; 7052 break; 7053 default: 7054 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7055 BUG(); 7056 } 7057 } 7058 7059 if (HAS_PIPE_CXSR(dev_priv)) { 7060 if (intel_crtc->lowfreq_avail) { 7061 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 7062 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 7063 } else { 7064 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 7065 } 7066 } 7067 7068 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7069 if (INTEL_GEN(dev_priv) < 4 || 7070 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 7071 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7072 else 7073 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7074 } else 7075 pipeconf |= PIPECONF_PROGRESSIVE; 7076 7077 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 7078 intel_crtc->config->limited_color_range) 7079 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 7080 7081 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 7082 POSTING_READ(PIPECONF(intel_crtc->pipe)); 7083} 7084 7085static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 7086 struct intel_crtc_state *crtc_state) 7087{ 7088 struct drm_device *dev = crtc->base.dev; 7089 struct drm_i915_private *dev_priv = to_i915(dev); 7090 const struct intel_limit *limit; 7091 int refclk = 48000; 7092 7093 memset(&crtc_state->dpll_hw_state, 0, 7094 sizeof(crtc_state->dpll_hw_state)); 7095 7096 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7097 if (intel_panel_use_ssc(dev_priv)) { 7098 refclk = dev_priv->vbt.lvds_ssc_freq; 7099 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7100 } 7101 7102 limit = &intel_limits_i8xx_lvds; 7103 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 7104 limit = &intel_limits_i8xx_dvo; 7105 } else { 7106 limit = &intel_limits_i8xx_dac; 7107 } 7108 7109 if (!crtc_state->clock_set && 7110 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7111 refclk, NULL, &crtc_state->dpll)) { 7112 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7113 return -EINVAL; 7114 } 7115 7116 i8xx_compute_dpll(crtc, crtc_state, NULL); 7117 7118 return 0; 7119} 7120 7121static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 7122 struct intel_crtc_state *crtc_state) 7123{ 7124 struct drm_device *dev = crtc->base.dev; 7125 struct drm_i915_private *dev_priv = to_i915(dev); 7126 const struct intel_limit *limit; 7127 int refclk = 96000; 7128 7129 memset(&crtc_state->dpll_hw_state, 0, 7130 sizeof(crtc_state->dpll_hw_state)); 7131 7132 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7133 if (intel_panel_use_ssc(dev_priv)) { 7134 refclk = dev_priv->vbt.lvds_ssc_freq; 7135 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7136 } 7137 7138 if (intel_is_dual_link_lvds(dev)) 7139 limit = &intel_limits_g4x_dual_channel_lvds; 7140 else 7141 limit = &intel_limits_g4x_single_channel_lvds; 7142 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 7143 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 7144 limit = &intel_limits_g4x_hdmi; 7145 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 7146 limit = &intel_limits_g4x_sdvo; 7147 } else { 7148 /* The option is for other outputs */ 7149 limit = &intel_limits_i9xx_sdvo; 7150 } 7151 7152 if (!crtc_state->clock_set && 7153 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7154 refclk, NULL, &crtc_state->dpll)) { 7155 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7156 return -EINVAL; 7157 } 7158 7159 i9xx_compute_dpll(crtc, crtc_state, NULL); 7160 7161 return 0; 7162} 7163 7164static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 7165 struct intel_crtc_state *crtc_state) 7166{ 7167 struct drm_device *dev = crtc->base.dev; 7168 struct drm_i915_private *dev_priv = to_i915(dev); 7169 const struct intel_limit *limit; 7170 int refclk = 96000; 7171 7172 memset(&crtc_state->dpll_hw_state, 0, 7173 sizeof(crtc_state->dpll_hw_state)); 7174 7175 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7176 if (intel_panel_use_ssc(dev_priv)) { 7177 refclk = dev_priv->vbt.lvds_ssc_freq; 7178 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7179 } 7180 7181 limit = &intel_limits_pineview_lvds; 7182 } else { 7183 limit = &intel_limits_pineview_sdvo; 7184 } 7185 7186 if (!crtc_state->clock_set && 7187 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7188 refclk, NULL, &crtc_state->dpll)) { 7189 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7190 return -EINVAL; 7191 } 7192 7193 i9xx_compute_dpll(crtc, crtc_state, NULL); 7194 7195 return 0; 7196} 7197 7198static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 7199 struct intel_crtc_state *crtc_state) 7200{ 7201 struct drm_device *dev = crtc->base.dev; 7202 struct drm_i915_private *dev_priv = to_i915(dev); 7203 const struct intel_limit *limit; 7204 int refclk = 96000; 7205 7206 memset(&crtc_state->dpll_hw_state, 0, 7207 sizeof(crtc_state->dpll_hw_state)); 7208 7209 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7210 if (intel_panel_use_ssc(dev_priv)) { 7211 refclk = dev_priv->vbt.lvds_ssc_freq; 7212 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7213 } 7214 7215 limit = &intel_limits_i9xx_lvds; 7216 } else { 7217 limit = &intel_limits_i9xx_sdvo; 7218 } 7219 7220 if (!crtc_state->clock_set && 7221 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7222 refclk, NULL, &crtc_state->dpll)) { 7223 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7224 return -EINVAL; 7225 } 7226 7227 i9xx_compute_dpll(crtc, crtc_state, NULL); 7228 7229 return 0; 7230} 7231 7232static int chv_crtc_compute_clock(struct intel_crtc *crtc, 7233 struct intel_crtc_state *crtc_state) 7234{ 7235 int refclk = 100000; 7236 const struct intel_limit *limit = &intel_limits_chv; 7237 7238 memset(&crtc_state->dpll_hw_state, 0, 7239 sizeof(crtc_state->dpll_hw_state)); 7240 7241 if (!crtc_state->clock_set && 7242 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7243 refclk, NULL, &crtc_state->dpll)) { 7244 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7245 return -EINVAL; 7246 } 7247 7248 chv_compute_dpll(crtc, crtc_state); 7249 7250 return 0; 7251} 7252 7253static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 7254 struct intel_crtc_state *crtc_state) 7255{ 7256 int refclk = 100000; 7257 const struct intel_limit *limit = &intel_limits_vlv; 7258 7259 memset(&crtc_state->dpll_hw_state, 0, 7260 sizeof(crtc_state->dpll_hw_state)); 7261 7262 if (!crtc_state->clock_set && 7263 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7264 refclk, NULL, &crtc_state->dpll)) { 7265 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7266 return -EINVAL; 7267 } 7268 7269 vlv_compute_dpll(crtc, crtc_state); 7270 7271 return 0; 7272} 7273 7274static void i9xx_get_pfit_config(struct intel_crtc *crtc, 7275 struct intel_crtc_state *pipe_config) 7276{ 7277 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7278 uint32_t tmp; 7279 7280 if (INTEL_GEN(dev_priv) <= 3 && 7281 (IS_I830(dev_priv) || !IS_MOBILE(dev_priv))) 7282 return; 7283 7284 tmp = I915_READ(PFIT_CONTROL); 7285 if (!(tmp & PFIT_ENABLE)) 7286 return; 7287 7288 /* Check whether the pfit is attached to our pipe. */ 7289 if (INTEL_GEN(dev_priv) < 4) { 7290 if (crtc->pipe != PIPE_B) 7291 return; 7292 } else { 7293 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 7294 return; 7295 } 7296 7297 pipe_config->gmch_pfit.control = tmp; 7298 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 7299} 7300 7301static void vlv_crtc_clock_get(struct intel_crtc *crtc, 7302 struct intel_crtc_state *pipe_config) 7303{ 7304 struct drm_device *dev = crtc->base.dev; 7305 struct drm_i915_private *dev_priv = to_i915(dev); 7306 int pipe = pipe_config->cpu_transcoder; 7307 struct dpll clock; 7308 u32 mdiv; 7309 int refclk = 100000; 7310 7311 /* In case of DSI, DPLL will not be used */ 7312 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7313 return; 7314 7315 mutex_lock(&dev_priv->sb_lock); 7316 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 7317 mutex_unlock(&dev_priv->sb_lock); 7318 7319 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 7320 clock.m2 = mdiv & DPIO_M2DIV_MASK; 7321 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 7322 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 7323 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 7324 7325 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 7326} 7327 7328static void 7329i9xx_get_initial_plane_config(struct intel_crtc *crtc, 7330 struct intel_initial_plane_config *plane_config) 7331{ 7332 struct drm_device *dev = crtc->base.dev; 7333 struct drm_i915_private *dev_priv = to_i915(dev); 7334 u32 val, base, offset; 7335 int pipe = crtc->pipe, plane = crtc->plane; 7336 int fourcc, pixel_format; 7337 unsigned int aligned_height; 7338 struct drm_framebuffer *fb; 7339 struct intel_framebuffer *intel_fb; 7340 7341 val = I915_READ(DSPCNTR(plane)); 7342 if (!(val & DISPLAY_PLANE_ENABLE)) 7343 return; 7344 7345 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7346 if (!intel_fb) { 7347 DRM_DEBUG_KMS("failed to alloc fb\n"); 7348 return; 7349 } 7350 7351 fb = &intel_fb->base; 7352 7353 fb->dev = dev; 7354 7355 if (INTEL_GEN(dev_priv) >= 4) { 7356 if (val & DISPPLANE_TILED) { 7357 plane_config->tiling = I915_TILING_X; 7358 fb->modifier = I915_FORMAT_MOD_X_TILED; 7359 } 7360 } 7361 7362 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7363 fourcc = i9xx_format_to_fourcc(pixel_format); 7364 fb->format = drm_format_info(fourcc); 7365 7366 if (INTEL_GEN(dev_priv) >= 4) { 7367 if (plane_config->tiling) 7368 offset = I915_READ(DSPTILEOFF(plane)); 7369 else 7370 offset = I915_READ(DSPLINOFF(plane)); 7371 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 7372 } else { 7373 base = I915_READ(DSPADDR(plane)); 7374 } 7375 plane_config->base = base; 7376 7377 val = I915_READ(PIPESRC(pipe)); 7378 fb->width = ((val >> 16) & 0xfff) + 1; 7379 fb->height = ((val >> 0) & 0xfff) + 1; 7380 7381 val = I915_READ(DSPSTRIDE(pipe)); 7382 fb->pitches[0] = val & 0xffffffc0; 7383 7384 aligned_height = intel_fb_align_height(fb, 0, fb->height); 7385 7386 plane_config->size = fb->pitches[0] * aligned_height; 7387 7388 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7389 pipe_name(pipe), plane, fb->width, fb->height, 7390 fb->format->cpp[0] * 8, base, fb->pitches[0], 7391 plane_config->size); 7392 7393 plane_config->fb = intel_fb; 7394} 7395 7396static void chv_crtc_clock_get(struct intel_crtc *crtc, 7397 struct intel_crtc_state *pipe_config) 7398{ 7399 struct drm_device *dev = crtc->base.dev; 7400 struct drm_i915_private *dev_priv = to_i915(dev); 7401 int pipe = pipe_config->cpu_transcoder; 7402 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7403 struct dpll clock; 7404 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 7405 int refclk = 100000; 7406 7407 /* In case of DSI, DPLL will not be used */ 7408 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7409 return; 7410 7411 mutex_lock(&dev_priv->sb_lock); 7412 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 7413 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 7414 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 7415 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 7416 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7417 mutex_unlock(&dev_priv->sb_lock); 7418 7419 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 7420 clock.m2 = (pll_dw0 & 0xff) << 22; 7421 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 7422 clock.m2 |= pll_dw2 & 0x3fffff; 7423 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 7424 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 7425 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 7426 7427 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 7428} 7429 7430static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 7431 struct intel_crtc_state *pipe_config) 7432{ 7433 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7434 enum intel_display_power_domain power_domain; 7435 uint32_t tmp; 7436 bool ret; 7437 7438 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 7439 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 7440 return false; 7441 7442 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7443 pipe_config->shared_dpll = NULL; 7444 7445 ret = false; 7446 7447 tmp = I915_READ(PIPECONF(crtc->pipe)); 7448 if (!(tmp & PIPECONF_ENABLE)) 7449 goto out; 7450 7451 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 7452 IS_CHERRYVIEW(dev_priv)) { 7453 switch (tmp & PIPECONF_BPC_MASK) { 7454 case PIPECONF_6BPC: 7455 pipe_config->pipe_bpp = 18; 7456 break; 7457 case PIPECONF_8BPC: 7458 pipe_config->pipe_bpp = 24; 7459 break; 7460 case PIPECONF_10BPC: 7461 pipe_config->pipe_bpp = 30; 7462 break; 7463 default: 7464 break; 7465 } 7466 } 7467 7468 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 7469 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 7470 pipe_config->limited_color_range = true; 7471 7472 if (INTEL_GEN(dev_priv) < 4) 7473 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 7474 7475 intel_get_pipe_timings(crtc, pipe_config); 7476 intel_get_pipe_src_size(crtc, pipe_config); 7477 7478 i9xx_get_pfit_config(crtc, pipe_config); 7479 7480 if (INTEL_GEN(dev_priv) >= 4) { 7481 /* No way to read it out on pipes B and C */ 7482 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 7483 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 7484 else 7485 tmp = I915_READ(DPLL_MD(crtc->pipe)); 7486 pipe_config->pixel_multiplier = 7487 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 7488 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 7489 pipe_config->dpll_hw_state.dpll_md = tmp; 7490 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 7491 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 7492 tmp = I915_READ(DPLL(crtc->pipe)); 7493 pipe_config->pixel_multiplier = 7494 ((tmp & SDVO_MULTIPLIER_MASK) 7495 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 7496 } else { 7497 /* Note that on i915G/GM the pixel multiplier is in the sdvo 7498 * port and will be fixed up in the encoder->get_config 7499 * function. */ 7500 pipe_config->pixel_multiplier = 1; 7501 } 7502 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 7503 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 7504 /* 7505 * DPLL_DVO_2X_MODE must be enabled for both DPLLs 7506 * on 830. Filter it out here so that we don't 7507 * report errors due to that. 7508 */ 7509 if (IS_I830(dev_priv)) 7510 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; 7511 7512 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 7513 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 7514 } else { 7515 /* Mask out read-only status bits. */ 7516 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 7517 DPLL_PORTC_READY_MASK | 7518 DPLL_PORTB_READY_MASK); 7519 } 7520 7521 if (IS_CHERRYVIEW(dev_priv)) 7522 chv_crtc_clock_get(crtc, pipe_config); 7523 else if (IS_VALLEYVIEW(dev_priv)) 7524 vlv_crtc_clock_get(crtc, pipe_config); 7525 else 7526 i9xx_crtc_clock_get(crtc, pipe_config); 7527 7528 /* 7529 * Normally the dotclock is filled in by the encoder .get_config() 7530 * but in case the pipe is enabled w/o any ports we need a sane 7531 * default. 7532 */ 7533 pipe_config->base.adjusted_mode.crtc_clock = 7534 pipe_config->port_clock / pipe_config->pixel_multiplier; 7535 7536 ret = true; 7537 7538out: 7539 intel_display_power_put(dev_priv, power_domain); 7540 7541 return ret; 7542} 7543 7544static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) 7545{ 7546 struct intel_encoder *encoder; 7547 int i; 7548 u32 val, final; 7549 bool has_lvds = false; 7550 bool has_cpu_edp = false; 7551 bool has_panel = false; 7552 bool has_ck505 = false; 7553 bool can_ssc = false; 7554 bool using_ssc_source = false; 7555 7556 /* We need to take the global config into account */ 7557 for_each_intel_encoder(&dev_priv->drm, encoder) { 7558 switch (encoder->type) { 7559 case INTEL_OUTPUT_LVDS: 7560 has_panel = true; 7561 has_lvds = true; 7562 break; 7563 case INTEL_OUTPUT_EDP: 7564 has_panel = true; 7565 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 7566 has_cpu_edp = true; 7567 break; 7568 default: 7569 break; 7570 } 7571 } 7572 7573 if (HAS_PCH_IBX(dev_priv)) { 7574 has_ck505 = dev_priv->vbt.display_clock_mode; 7575 can_ssc = has_ck505; 7576 } else { 7577 has_ck505 = false; 7578 can_ssc = true; 7579 } 7580 7581 /* Check if any DPLLs are using the SSC source */ 7582 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 7583 u32 temp = I915_READ(PCH_DPLL(i)); 7584 7585 if (!(temp & DPLL_VCO_ENABLE)) 7586 continue; 7587 7588 if ((temp & PLL_REF_INPUT_MASK) == 7589 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 7590 using_ssc_source = true; 7591 break; 7592 } 7593 } 7594 7595 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 7596 has_panel, has_lvds, has_ck505, using_ssc_source); 7597 7598 /* Ironlake: try to setup display ref clock before DPLL 7599 * enabling. This is only under driver's control after 7600 * PCH B stepping, previous chipset stepping should be 7601 * ignoring this setting. 7602 */ 7603 val = I915_READ(PCH_DREF_CONTROL); 7604 7605 /* As we must carefully and slowly disable/enable each source in turn, 7606 * compute the final state we want first and check if we need to 7607 * make any changes at all. 7608 */ 7609 final = val; 7610 final &= ~DREF_NONSPREAD_SOURCE_MASK; 7611 if (has_ck505) 7612 final |= DREF_NONSPREAD_CK505_ENABLE; 7613 else 7614 final |= DREF_NONSPREAD_SOURCE_ENABLE; 7615 7616 final &= ~DREF_SSC_SOURCE_MASK; 7617 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7618 final &= ~DREF_SSC1_ENABLE; 7619 7620 if (has_panel) { 7621 final |= DREF_SSC_SOURCE_ENABLE; 7622 7623 if (intel_panel_use_ssc(dev_priv) && can_ssc) 7624 final |= DREF_SSC1_ENABLE; 7625 7626 if (has_cpu_edp) { 7627 if (intel_panel_use_ssc(dev_priv) && can_ssc) 7628 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 7629 else 7630 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 7631 } else 7632 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7633 } else if (using_ssc_source) { 7634 final |= DREF_SSC_SOURCE_ENABLE; 7635 final |= DREF_SSC1_ENABLE; 7636 } 7637 7638 if (final == val) 7639 return; 7640 7641 /* Always enable nonspread source */ 7642 val &= ~DREF_NONSPREAD_SOURCE_MASK; 7643 7644 if (has_ck505) 7645 val |= DREF_NONSPREAD_CK505_ENABLE; 7646 else 7647 val |= DREF_NONSPREAD_SOURCE_ENABLE; 7648 7649 if (has_panel) { 7650 val &= ~DREF_SSC_SOURCE_MASK; 7651 val |= DREF_SSC_SOURCE_ENABLE; 7652 7653 /* SSC must be turned on before enabling the CPU output */ 7654 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 7655 DRM_DEBUG_KMS("Using SSC on panel\n"); 7656 val |= DREF_SSC1_ENABLE; 7657 } else 7658 val &= ~DREF_SSC1_ENABLE; 7659 7660 /* Get SSC going before enabling the outputs */ 7661 I915_WRITE(PCH_DREF_CONTROL, val); 7662 POSTING_READ(PCH_DREF_CONTROL); 7663 udelay(200); 7664 7665 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7666 7667 /* Enable CPU source on CPU attached eDP */ 7668 if (has_cpu_edp) { 7669 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 7670 DRM_DEBUG_KMS("Using SSC on eDP\n"); 7671 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 7672 } else 7673 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 7674 } else 7675 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7676 7677 I915_WRITE(PCH_DREF_CONTROL, val); 7678 POSTING_READ(PCH_DREF_CONTROL); 7679 udelay(200); 7680 } else { 7681 DRM_DEBUG_KMS("Disabling CPU source output\n"); 7682 7683 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7684 7685 /* Turn off CPU output */ 7686 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7687 7688 I915_WRITE(PCH_DREF_CONTROL, val); 7689 POSTING_READ(PCH_DREF_CONTROL); 7690 udelay(200); 7691 7692 if (!using_ssc_source) { 7693 DRM_DEBUG_KMS("Disabling SSC source\n"); 7694 7695 /* Turn off the SSC source */ 7696 val &= ~DREF_SSC_SOURCE_MASK; 7697 val |= DREF_SSC_SOURCE_DISABLE; 7698 7699 /* Turn off SSC1 */ 7700 val &= ~DREF_SSC1_ENABLE; 7701 7702 I915_WRITE(PCH_DREF_CONTROL, val); 7703 POSTING_READ(PCH_DREF_CONTROL); 7704 udelay(200); 7705 } 7706 } 7707 7708 BUG_ON(val != final); 7709} 7710 7711static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 7712{ 7713 uint32_t tmp; 7714 7715 tmp = I915_READ(SOUTH_CHICKEN2); 7716 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 7717 I915_WRITE(SOUTH_CHICKEN2, tmp); 7718 7719 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & 7720 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 7721 DRM_ERROR("FDI mPHY reset assert timeout\n"); 7722 7723 tmp = I915_READ(SOUTH_CHICKEN2); 7724 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 7725 I915_WRITE(SOUTH_CHICKEN2, tmp); 7726 7727 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & 7728 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 7729 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 7730} 7731 7732/* WaMPhyProgramming:hsw */ 7733static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 7734{ 7735 uint32_t tmp; 7736 7737 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 7738 tmp &= ~(0xFF << 24); 7739 tmp |= (0x12 << 24); 7740 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 7741 7742 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 7743 tmp |= (1 << 11); 7744 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 7745 7746 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 7747 tmp |= (1 << 11); 7748 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 7749 7750 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 7751 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 7752 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 7753 7754 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 7755 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 7756 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 7757 7758 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 7759 tmp &= ~(7 << 13); 7760 tmp |= (5 << 13); 7761 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 7762 7763 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 7764 tmp &= ~(7 << 13); 7765 tmp |= (5 << 13); 7766 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 7767 7768 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 7769 tmp &= ~0xFF; 7770 tmp |= 0x1C; 7771 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 7772 7773 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 7774 tmp &= ~0xFF; 7775 tmp |= 0x1C; 7776 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 7777 7778 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 7779 tmp &= ~(0xFF << 16); 7780 tmp |= (0x1C << 16); 7781 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 7782 7783 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 7784 tmp &= ~(0xFF << 16); 7785 tmp |= (0x1C << 16); 7786 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 7787 7788 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 7789 tmp |= (1 << 27); 7790 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 7791 7792 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 7793 tmp |= (1 << 27); 7794 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 7795 7796 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 7797 tmp &= ~(0xF << 28); 7798 tmp |= (4 << 28); 7799 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 7800 7801 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 7802 tmp &= ~(0xF << 28); 7803 tmp |= (4 << 28); 7804 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 7805} 7806 7807/* Implements 3 different sequences from BSpec chapter "Display iCLK 7808 * Programming" based on the parameters passed: 7809 * - Sequence to enable CLKOUT_DP 7810 * - Sequence to enable CLKOUT_DP without spread 7811 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 7812 */ 7813static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 7814 bool with_spread, bool with_fdi) 7815{ 7816 uint32_t reg, tmp; 7817 7818 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 7819 with_spread = true; 7820 if (WARN(HAS_PCH_LPT_LP(dev_priv) && 7821 with_fdi, "LP PCH doesn't have FDI\n")) 7822 with_fdi = false; 7823 7824 mutex_lock(&dev_priv->sb_lock); 7825 7826 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7827 tmp &= ~SBI_SSCCTL_DISABLE; 7828 tmp |= SBI_SSCCTL_PATHALT; 7829 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7830 7831 udelay(24); 7832 7833 if (with_spread) { 7834 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7835 tmp &= ~SBI_SSCCTL_PATHALT; 7836 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7837 7838 if (with_fdi) { 7839 lpt_reset_fdi_mphy(dev_priv); 7840 lpt_program_fdi_mphy(dev_priv); 7841 } 7842 } 7843 7844 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 7845 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7846 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7847 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7848 7849 mutex_unlock(&dev_priv->sb_lock); 7850} 7851 7852/* Sequence to disable CLKOUT_DP */ 7853static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 7854{ 7855 uint32_t reg, tmp; 7856 7857 mutex_lock(&dev_priv->sb_lock); 7858 7859 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 7860 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7861 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7862 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7863 7864 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7865 if (!(tmp & SBI_SSCCTL_DISABLE)) { 7866 if (!(tmp & SBI_SSCCTL_PATHALT)) { 7867 tmp |= SBI_SSCCTL_PATHALT; 7868 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7869 udelay(32); 7870 } 7871 tmp |= SBI_SSCCTL_DISABLE; 7872 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7873 } 7874 7875 mutex_unlock(&dev_priv->sb_lock); 7876} 7877 7878#define BEND_IDX(steps) ((50 + (steps)) / 5) 7879 7880static const uint16_t sscdivintphase[] = { 7881 [BEND_IDX( 50)] = 0x3B23, 7882 [BEND_IDX( 45)] = 0x3B23, 7883 [BEND_IDX( 40)] = 0x3C23, 7884 [BEND_IDX( 35)] = 0x3C23, 7885 [BEND_IDX( 30)] = 0x3D23, 7886 [BEND_IDX( 25)] = 0x3D23, 7887 [BEND_IDX( 20)] = 0x3E23, 7888 [BEND_IDX( 15)] = 0x3E23, 7889 [BEND_IDX( 10)] = 0x3F23, 7890 [BEND_IDX( 5)] = 0x3F23, 7891 [BEND_IDX( 0)] = 0x0025, 7892 [BEND_IDX( -5)] = 0x0025, 7893 [BEND_IDX(-10)] = 0x0125, 7894 [BEND_IDX(-15)] = 0x0125, 7895 [BEND_IDX(-20)] = 0x0225, 7896 [BEND_IDX(-25)] = 0x0225, 7897 [BEND_IDX(-30)] = 0x0325, 7898 [BEND_IDX(-35)] = 0x0325, 7899 [BEND_IDX(-40)] = 0x0425, 7900 [BEND_IDX(-45)] = 0x0425, 7901 [BEND_IDX(-50)] = 0x0525, 7902}; 7903 7904/* 7905 * Bend CLKOUT_DP 7906 * steps -50 to 50 inclusive, in steps of 5 7907 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 7908 * change in clock period = -(steps / 10) * 5.787 ps 7909 */ 7910static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 7911{ 7912 uint32_t tmp; 7913 int idx = BEND_IDX(steps); 7914 7915 if (WARN_ON(steps % 5 != 0)) 7916 return; 7917 7918 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 7919 return; 7920 7921 mutex_lock(&dev_priv->sb_lock); 7922 7923 if (steps % 10 != 0) 7924 tmp = 0xAAAAAAAB; 7925 else 7926 tmp = 0x00000000; 7927 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 7928 7929 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 7930 tmp &= 0xffff0000; 7931 tmp |= sscdivintphase[idx]; 7932 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 7933 7934 mutex_unlock(&dev_priv->sb_lock); 7935} 7936 7937#undef BEND_IDX 7938 7939static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 7940{ 7941 struct intel_encoder *encoder; 7942 bool has_vga = false; 7943 7944 for_each_intel_encoder(&dev_priv->drm, encoder) { 7945 switch (encoder->type) { 7946 case INTEL_OUTPUT_ANALOG: 7947 has_vga = true; 7948 break; 7949 default: 7950 break; 7951 } 7952 } 7953 7954 if (has_vga) { 7955 lpt_bend_clkout_dp(dev_priv, 0); 7956 lpt_enable_clkout_dp(dev_priv, true, true); 7957 } else { 7958 lpt_disable_clkout_dp(dev_priv); 7959 } 7960} 7961 7962/* 7963 * Initialize reference clocks when the driver loads 7964 */ 7965void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 7966{ 7967 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 7968 ironlake_init_pch_refclk(dev_priv); 7969 else if (HAS_PCH_LPT(dev_priv)) 7970 lpt_init_pch_refclk(dev_priv); 7971} 7972 7973static void ironlake_set_pipeconf(struct drm_crtc *crtc) 7974{ 7975 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 7976 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7977 int pipe = intel_crtc->pipe; 7978 uint32_t val; 7979 7980 val = 0; 7981 7982 switch (intel_crtc->config->pipe_bpp) { 7983 case 18: 7984 val |= PIPECONF_6BPC; 7985 break; 7986 case 24: 7987 val |= PIPECONF_8BPC; 7988 break; 7989 case 30: 7990 val |= PIPECONF_10BPC; 7991 break; 7992 case 36: 7993 val |= PIPECONF_12BPC; 7994 break; 7995 default: 7996 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7997 BUG(); 7998 } 7999 8000 if (intel_crtc->config->dither) 8001 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8002 8003 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8004 val |= PIPECONF_INTERLACED_ILK; 8005 else 8006 val |= PIPECONF_PROGRESSIVE; 8007 8008 if (intel_crtc->config->limited_color_range) 8009 val |= PIPECONF_COLOR_RANGE_SELECT; 8010 8011 I915_WRITE(PIPECONF(pipe), val); 8012 POSTING_READ(PIPECONF(pipe)); 8013} 8014 8015static void haswell_set_pipeconf(struct drm_crtc *crtc) 8016{ 8017 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8018 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8019 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8020 u32 val = 0; 8021 8022 if (IS_HASWELL(dev_priv) && intel_crtc->config->dither) 8023 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8024 8025 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8026 val |= PIPECONF_INTERLACED_ILK; 8027 else 8028 val |= PIPECONF_PROGRESSIVE; 8029 8030 I915_WRITE(PIPECONF(cpu_transcoder), val); 8031 POSTING_READ(PIPECONF(cpu_transcoder)); 8032} 8033 8034static void haswell_set_pipemisc(struct drm_crtc *crtc) 8035{ 8036 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8037 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8038 8039 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) { 8040 u32 val = 0; 8041 8042 switch (intel_crtc->config->pipe_bpp) { 8043 case 18: 8044 val |= PIPEMISC_DITHER_6_BPC; 8045 break; 8046 case 24: 8047 val |= PIPEMISC_DITHER_8_BPC; 8048 break; 8049 case 30: 8050 val |= PIPEMISC_DITHER_10_BPC; 8051 break; 8052 case 36: 8053 val |= PIPEMISC_DITHER_12_BPC; 8054 break; 8055 default: 8056 /* Case prevented by pipe_config_set_bpp. */ 8057 BUG(); 8058 } 8059 8060 if (intel_crtc->config->dither) 8061 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 8062 8063 I915_WRITE(PIPEMISC(intel_crtc->pipe), val); 8064 } 8065} 8066 8067int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 8068{ 8069 /* 8070 * Account for spread spectrum to avoid 8071 * oversubscribing the link. Max center spread 8072 * is 2.5%; use 5% for safety's sake. 8073 */ 8074 u32 bps = target_clock * bpp * 21 / 20; 8075 return DIV_ROUND_UP(bps, link_bw * 8); 8076} 8077 8078static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 8079{ 8080 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 8081} 8082 8083static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8084 struct intel_crtc_state *crtc_state, 8085 struct dpll *reduced_clock) 8086{ 8087 struct drm_crtc *crtc = &intel_crtc->base; 8088 struct drm_device *dev = crtc->dev; 8089 struct drm_i915_private *dev_priv = to_i915(dev); 8090 u32 dpll, fp, fp2; 8091 int factor; 8092 8093 /* Enable autotuning of the PLL clock (if permissible) */ 8094 factor = 21; 8095 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8096 if ((intel_panel_use_ssc(dev_priv) && 8097 dev_priv->vbt.lvds_ssc_freq == 100000) || 8098 (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev))) 8099 factor = 25; 8100 } else if (crtc_state->sdvo_tv_clock) 8101 factor = 20; 8102 8103 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 8104 8105 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 8106 fp |= FP_CB_TUNE; 8107 8108 if (reduced_clock) { 8109 fp2 = i9xx_dpll_compute_fp(reduced_clock); 8110 8111 if (reduced_clock->m < factor * reduced_clock->n) 8112 fp2 |= FP_CB_TUNE; 8113 } else { 8114 fp2 = fp; 8115 } 8116 8117 dpll = 0; 8118 8119 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8120 dpll |= DPLLB_MODE_LVDS; 8121 else 8122 dpll |= DPLLB_MODE_DAC_SERIAL; 8123 8124 dpll |= (crtc_state->pixel_multiplier - 1) 8125 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 8126 8127 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8128 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8129 dpll |= DPLL_SDVO_HIGH_SPEED; 8130 8131 if (intel_crtc_has_dp_encoder(crtc_state)) 8132 dpll |= DPLL_SDVO_HIGH_SPEED; 8133 8134 /* 8135 * The high speed IO clock is only really required for 8136 * SDVO/HDMI/DP, but we also enable it for CRT to make it 8137 * possible to share the DPLL between CRT and HDMI. Enabling 8138 * the clock needlessly does no real harm, except use up a 8139 * bit of power potentially. 8140 * 8141 * We'll limit this to IVB with 3 pipes, since it has only two 8142 * DPLLs and so DPLL sharing is the only way to get three pipes 8143 * driving PCH ports at the same time. On SNB we could do this, 8144 * and potentially avoid enabling the second DPLL, but it's not 8145 * clear if it''s a win or loss power wise. No point in doing 8146 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 8147 */ 8148 if (INTEL_INFO(dev_priv)->num_pipes == 3 && 8149 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 8150 dpll |= DPLL_SDVO_HIGH_SPEED; 8151 8152 /* compute bitmask from p1 value */ 8153 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8154 /* also FPA1 */ 8155 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8156 8157 switch (crtc_state->dpll.p2) { 8158 case 5: 8159 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8160 break; 8161 case 7: 8162 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8163 break; 8164 case 10: 8165 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8166 break; 8167 case 14: 8168 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8169 break; 8170 } 8171 8172 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8173 intel_panel_use_ssc(dev_priv)) 8174 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8175 else 8176 dpll |= PLL_REF_INPUT_DREFCLK; 8177 8178 dpll |= DPLL_VCO_ENABLE; 8179 8180 crtc_state->dpll_hw_state.dpll = dpll; 8181 crtc_state->dpll_hw_state.fp0 = fp; 8182 crtc_state->dpll_hw_state.fp1 = fp2; 8183} 8184 8185static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 8186 struct intel_crtc_state *crtc_state) 8187{ 8188 struct drm_device *dev = crtc->base.dev; 8189 struct drm_i915_private *dev_priv = to_i915(dev); 8190 struct dpll reduced_clock; 8191 bool has_reduced_clock = false; 8192 struct intel_shared_dpll *pll; 8193 const struct intel_limit *limit; 8194 int refclk = 120000; 8195 8196 memset(&crtc_state->dpll_hw_state, 0, 8197 sizeof(crtc_state->dpll_hw_state)); 8198 8199 crtc->lowfreq_avail = false; 8200 8201 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 8202 if (!crtc_state->has_pch_encoder) 8203 return 0; 8204 8205 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8206 if (intel_panel_use_ssc(dev_priv)) { 8207 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 8208 dev_priv->vbt.lvds_ssc_freq); 8209 refclk = dev_priv->vbt.lvds_ssc_freq; 8210 } 8211 8212 if (intel_is_dual_link_lvds(dev)) { 8213 if (refclk == 100000) 8214 limit = &intel_limits_ironlake_dual_lvds_100m; 8215 else 8216 limit = &intel_limits_ironlake_dual_lvds; 8217 } else { 8218 if (refclk == 100000) 8219 limit = &intel_limits_ironlake_single_lvds_100m; 8220 else 8221 limit = &intel_limits_ironlake_single_lvds; 8222 } 8223 } else { 8224 limit = &intel_limits_ironlake_dac; 8225 } 8226 8227 if (!crtc_state->clock_set && 8228 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8229 refclk, NULL, &crtc_state->dpll)) { 8230 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8231 return -EINVAL; 8232 } 8233 8234 ironlake_compute_dpll(crtc, crtc_state, 8235 has_reduced_clock ? &reduced_clock : NULL); 8236 8237 pll = intel_get_shared_dpll(crtc, crtc_state, NULL); 8238 if (pll == NULL) { 8239 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 8240 pipe_name(crtc->pipe)); 8241 return -EINVAL; 8242 } 8243 8244 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8245 has_reduced_clock) 8246 crtc->lowfreq_avail = true; 8247 8248 return 0; 8249} 8250 8251static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 8252 struct intel_link_m_n *m_n) 8253{ 8254 struct drm_device *dev = crtc->base.dev; 8255 struct drm_i915_private *dev_priv = to_i915(dev); 8256 enum pipe pipe = crtc->pipe; 8257 8258 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 8259 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 8260 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 8261 & ~TU_SIZE_MASK; 8262 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 8263 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 8264 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8265} 8266 8267static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 8268 enum transcoder transcoder, 8269 struct intel_link_m_n *m_n, 8270 struct intel_link_m_n *m2_n2) 8271{ 8272 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8273 enum pipe pipe = crtc->pipe; 8274 8275 if (INTEL_GEN(dev_priv) >= 5) { 8276 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 8277 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 8278 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 8279 & ~TU_SIZE_MASK; 8280 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 8281 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 8282 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8283 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 8284 * gen < 8) and if DRRS is supported (to make sure the 8285 * registers are not unnecessarily read). 8286 */ 8287 if (m2_n2 && INTEL_GEN(dev_priv) < 8 && 8288 crtc->config->has_drrs) { 8289 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 8290 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 8291 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 8292 & ~TU_SIZE_MASK; 8293 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 8294 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 8295 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8296 } 8297 } else { 8298 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 8299 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 8300 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 8301 & ~TU_SIZE_MASK; 8302 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 8303 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 8304 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8305 } 8306} 8307 8308void intel_dp_get_m_n(struct intel_crtc *crtc, 8309 struct intel_crtc_state *pipe_config) 8310{ 8311 if (pipe_config->has_pch_encoder) 8312 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 8313 else 8314 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8315 &pipe_config->dp_m_n, 8316 &pipe_config->dp_m2_n2); 8317} 8318 8319static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 8320 struct intel_crtc_state *pipe_config) 8321{ 8322 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8323 &pipe_config->fdi_m_n, NULL); 8324} 8325 8326static void skylake_get_pfit_config(struct intel_crtc *crtc, 8327 struct intel_crtc_state *pipe_config) 8328{ 8329 struct drm_device *dev = crtc->base.dev; 8330 struct drm_i915_private *dev_priv = to_i915(dev); 8331 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 8332 uint32_t ps_ctrl = 0; 8333 int id = -1; 8334 int i; 8335 8336 /* find scaler attached to this pipe */ 8337 for (i = 0; i < crtc->num_scalers; i++) { 8338 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 8339 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 8340 id = i; 8341 pipe_config->pch_pfit.enabled = true; 8342 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 8343 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 8344 break; 8345 } 8346 } 8347 8348 scaler_state->scaler_id = id; 8349 if (id >= 0) { 8350 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 8351 } else { 8352 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 8353 } 8354} 8355 8356static void 8357skylake_get_initial_plane_config(struct intel_crtc *crtc, 8358 struct intel_initial_plane_config *plane_config) 8359{ 8360 struct drm_device *dev = crtc->base.dev; 8361 struct drm_i915_private *dev_priv = to_i915(dev); 8362 u32 val, base, offset, stride_mult, tiling; 8363 int pipe = crtc->pipe; 8364 int fourcc, pixel_format; 8365 unsigned int aligned_height; 8366 struct drm_framebuffer *fb; 8367 struct intel_framebuffer *intel_fb; 8368 8369 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8370 if (!intel_fb) { 8371 DRM_DEBUG_KMS("failed to alloc fb\n"); 8372 return; 8373 } 8374 8375 fb = &intel_fb->base; 8376 8377 fb->dev = dev; 8378 8379 val = I915_READ(PLANE_CTL(pipe, 0)); 8380 if (!(val & PLANE_CTL_ENABLE)) 8381 goto error; 8382 8383 pixel_format = val & PLANE_CTL_FORMAT_MASK; 8384 fourcc = skl_format_to_fourcc(pixel_format, 8385 val & PLANE_CTL_ORDER_RGBX, 8386 val & PLANE_CTL_ALPHA_MASK); 8387 fb->format = drm_format_info(fourcc); 8388 8389 tiling = val & PLANE_CTL_TILED_MASK; 8390 switch (tiling) { 8391 case PLANE_CTL_TILED_LINEAR: 8392 fb->modifier = DRM_FORMAT_MOD_LINEAR; 8393 break; 8394 case PLANE_CTL_TILED_X: 8395 plane_config->tiling = I915_TILING_X; 8396 fb->modifier = I915_FORMAT_MOD_X_TILED; 8397 break; 8398 case PLANE_CTL_TILED_Y: 8399 fb->modifier = I915_FORMAT_MOD_Y_TILED; 8400 break; 8401 case PLANE_CTL_TILED_YF: 8402 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 8403 break; 8404 default: 8405 MISSING_CASE(tiling); 8406 goto error; 8407 } 8408 8409 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 8410 plane_config->base = base; 8411 8412 offset = I915_READ(PLANE_OFFSET(pipe, 0)); 8413 8414 val = I915_READ(PLANE_SIZE(pipe, 0)); 8415 fb->height = ((val >> 16) & 0xfff) + 1; 8416 fb->width = ((val >> 0) & 0x1fff) + 1; 8417 8418 val = I915_READ(PLANE_STRIDE(pipe, 0)); 8419 stride_mult = intel_fb_stride_alignment(fb, 0); 8420 fb->pitches[0] = (val & 0x3ff) * stride_mult; 8421 8422 aligned_height = intel_fb_align_height(fb, 0, fb->height); 8423 8424 plane_config->size = fb->pitches[0] * aligned_height; 8425 8426 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8427 pipe_name(pipe), fb->width, fb->height, 8428 fb->format->cpp[0] * 8, base, fb->pitches[0], 8429 plane_config->size); 8430 8431 plane_config->fb = intel_fb; 8432 return; 8433 8434error: 8435 kfree(intel_fb); 8436} 8437 8438static void ironlake_get_pfit_config(struct intel_crtc *crtc, 8439 struct intel_crtc_state *pipe_config) 8440{ 8441 struct drm_device *dev = crtc->base.dev; 8442 struct drm_i915_private *dev_priv = to_i915(dev); 8443 uint32_t tmp; 8444 8445 tmp = I915_READ(PF_CTL(crtc->pipe)); 8446 8447 if (tmp & PF_ENABLE) { 8448 pipe_config->pch_pfit.enabled = true; 8449 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 8450 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 8451 8452 /* We currently do not free assignements of panel fitters on 8453 * ivb/hsw (since we don't use the higher upscaling modes which 8454 * differentiates them) so just WARN about this case for now. */ 8455 if (IS_GEN7(dev_priv)) { 8456 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 8457 PF_PIPE_SEL_IVB(crtc->pipe)); 8458 } 8459 } 8460} 8461 8462static void 8463ironlake_get_initial_plane_config(struct intel_crtc *crtc, 8464 struct intel_initial_plane_config *plane_config) 8465{ 8466 struct drm_device *dev = crtc->base.dev; 8467 struct drm_i915_private *dev_priv = to_i915(dev); 8468 u32 val, base, offset; 8469 int pipe = crtc->pipe; 8470 int fourcc, pixel_format; 8471 unsigned int aligned_height; 8472 struct drm_framebuffer *fb; 8473 struct intel_framebuffer *intel_fb; 8474 8475 val = I915_READ(DSPCNTR(pipe)); 8476 if (!(val & DISPLAY_PLANE_ENABLE)) 8477 return; 8478 8479 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8480 if (!intel_fb) { 8481 DRM_DEBUG_KMS("failed to alloc fb\n"); 8482 return; 8483 } 8484 8485 fb = &intel_fb->base; 8486 8487 fb->dev = dev; 8488 8489 if (INTEL_GEN(dev_priv) >= 4) { 8490 if (val & DISPPLANE_TILED) { 8491 plane_config->tiling = I915_TILING_X; 8492 fb->modifier = I915_FORMAT_MOD_X_TILED; 8493 } 8494 } 8495 8496 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 8497 fourcc = i9xx_format_to_fourcc(pixel_format); 8498 fb->format = drm_format_info(fourcc); 8499 8500 base = I915_READ(DSPSURF(pipe)) & 0xfffff000; 8501 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 8502 offset = I915_READ(DSPOFFSET(pipe)); 8503 } else { 8504 if (plane_config->tiling) 8505 offset = I915_READ(DSPTILEOFF(pipe)); 8506 else 8507 offset = I915_READ(DSPLINOFF(pipe)); 8508 } 8509 plane_config->base = base; 8510 8511 val = I915_READ(PIPESRC(pipe)); 8512 fb->width = ((val >> 16) & 0xfff) + 1; 8513 fb->height = ((val >> 0) & 0xfff) + 1; 8514 8515 val = I915_READ(DSPSTRIDE(pipe)); 8516 fb->pitches[0] = val & 0xffffffc0; 8517 8518 aligned_height = intel_fb_align_height(fb, 0, fb->height); 8519 8520 plane_config->size = fb->pitches[0] * aligned_height; 8521 8522 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8523 pipe_name(pipe), fb->width, fb->height, 8524 fb->format->cpp[0] * 8, base, fb->pitches[0], 8525 plane_config->size); 8526 8527 plane_config->fb = intel_fb; 8528} 8529 8530static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 8531 struct intel_crtc_state *pipe_config) 8532{ 8533 struct drm_device *dev = crtc->base.dev; 8534 struct drm_i915_private *dev_priv = to_i915(dev); 8535 enum intel_display_power_domain power_domain; 8536 uint32_t tmp; 8537 bool ret; 8538 8539 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 8540 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 8541 return false; 8542 8543 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8544 pipe_config->shared_dpll = NULL; 8545 8546 ret = false; 8547 tmp = I915_READ(PIPECONF(crtc->pipe)); 8548 if (!(tmp & PIPECONF_ENABLE)) 8549 goto out; 8550 8551 switch (tmp & PIPECONF_BPC_MASK) { 8552 case PIPECONF_6BPC: 8553 pipe_config->pipe_bpp = 18; 8554 break; 8555 case PIPECONF_8BPC: 8556 pipe_config->pipe_bpp = 24; 8557 break; 8558 case PIPECONF_10BPC: 8559 pipe_config->pipe_bpp = 30; 8560 break; 8561 case PIPECONF_12BPC: 8562 pipe_config->pipe_bpp = 36; 8563 break; 8564 default: 8565 break; 8566 } 8567 8568 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 8569 pipe_config->limited_color_range = true; 8570 8571 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 8572 struct intel_shared_dpll *pll; 8573 enum intel_dpll_id pll_id; 8574 8575 pipe_config->has_pch_encoder = true; 8576 8577 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 8578 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 8579 FDI_DP_PORT_WIDTH_SHIFT) + 1; 8580 8581 ironlake_get_fdi_m_n_config(crtc, pipe_config); 8582 8583 if (HAS_PCH_IBX(dev_priv)) { 8584 /* 8585 * The pipe->pch transcoder and pch transcoder->pll 8586 * mapping is fixed. 8587 */ 8588 pll_id = (enum intel_dpll_id) crtc->pipe; 8589 } else { 8590 tmp = I915_READ(PCH_DPLL_SEL); 8591 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 8592 pll_id = DPLL_ID_PCH_PLL_B; 8593 else 8594 pll_id= DPLL_ID_PCH_PLL_A; 8595 } 8596 8597 pipe_config->shared_dpll = 8598 intel_get_shared_dpll_by_id(dev_priv, pll_id); 8599 pll = pipe_config->shared_dpll; 8600 8601 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, 8602 &pipe_config->dpll_hw_state)); 8603 8604 tmp = pipe_config->dpll_hw_state.dpll; 8605 pipe_config->pixel_multiplier = 8606 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 8607 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 8608 8609 ironlake_pch_clock_get(crtc, pipe_config); 8610 } else { 8611 pipe_config->pixel_multiplier = 1; 8612 } 8613 8614 intel_get_pipe_timings(crtc, pipe_config); 8615 intel_get_pipe_src_size(crtc, pipe_config); 8616 8617 ironlake_get_pfit_config(crtc, pipe_config); 8618 8619 ret = true; 8620 8621out: 8622 intel_display_power_put(dev_priv, power_domain); 8623 8624 return ret; 8625} 8626 8627static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 8628{ 8629 struct drm_device *dev = &dev_priv->drm; 8630 struct intel_crtc *crtc; 8631 8632 for_each_intel_crtc(dev, crtc) 8633 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 8634 pipe_name(crtc->pipe)); 8635 8636 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 8637 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 8638 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 8639 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 8640 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n"); 8641 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 8642 "CPU PWM1 enabled\n"); 8643 if (IS_HASWELL(dev_priv)) 8644 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 8645 "CPU PWM2 enabled\n"); 8646 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 8647 "PCH PWM1 enabled\n"); 8648 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 8649 "Utility pin enabled\n"); 8650 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 8651 8652 /* 8653 * In theory we can still leave IRQs enabled, as long as only the HPD 8654 * interrupts remain enabled. We used to check for that, but since it's 8655 * gen-specific and since we only disable LCPLL after we fully disable 8656 * the interrupts, the check below should be enough. 8657 */ 8658 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 8659} 8660 8661static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 8662{ 8663 if (IS_HASWELL(dev_priv)) 8664 return I915_READ(D_COMP_HSW); 8665 else 8666 return I915_READ(D_COMP_BDW); 8667} 8668 8669static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 8670{ 8671 if (IS_HASWELL(dev_priv)) { 8672 mutex_lock(&dev_priv->rps.hw_lock); 8673 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 8674 val)) 8675 DRM_DEBUG_KMS("Failed to write to D_COMP\n"); 8676 mutex_unlock(&dev_priv->rps.hw_lock); 8677 } else { 8678 I915_WRITE(D_COMP_BDW, val); 8679 POSTING_READ(D_COMP_BDW); 8680 } 8681} 8682 8683/* 8684 * This function implements pieces of two sequences from BSpec: 8685 * - Sequence for display software to disable LCPLL 8686 * - Sequence for display software to allow package C8+ 8687 * The steps implemented here are just the steps that actually touch the LCPLL 8688 * register. Callers should take care of disabling all the display engine 8689 * functions, doing the mode unset, fixing interrupts, etc. 8690 */ 8691static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 8692 bool switch_to_fclk, bool allow_power_down) 8693{ 8694 uint32_t val; 8695 8696 assert_can_disable_lcpll(dev_priv); 8697 8698 val = I915_READ(LCPLL_CTL); 8699 8700 if (switch_to_fclk) { 8701 val |= LCPLL_CD_SOURCE_FCLK; 8702 I915_WRITE(LCPLL_CTL, val); 8703 8704 if (wait_for_us(I915_READ(LCPLL_CTL) & 8705 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 8706 DRM_ERROR("Switching to FCLK failed\n"); 8707 8708 val = I915_READ(LCPLL_CTL); 8709 } 8710 8711 val |= LCPLL_PLL_DISABLE; 8712 I915_WRITE(LCPLL_CTL, val); 8713 POSTING_READ(LCPLL_CTL); 8714 8715 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1)) 8716 DRM_ERROR("LCPLL still locked\n"); 8717 8718 val = hsw_read_dcomp(dev_priv); 8719 val |= D_COMP_COMP_DISABLE; 8720 hsw_write_dcomp(dev_priv, val); 8721 ndelay(100); 8722 8723 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 8724 1)) 8725 DRM_ERROR("D_COMP RCOMP still in progress\n"); 8726 8727 if (allow_power_down) { 8728 val = I915_READ(LCPLL_CTL); 8729 val |= LCPLL_POWER_DOWN_ALLOW; 8730 I915_WRITE(LCPLL_CTL, val); 8731 POSTING_READ(LCPLL_CTL); 8732 } 8733} 8734 8735/* 8736 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 8737 * source. 8738 */ 8739static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 8740{ 8741 uint32_t val; 8742 8743 val = I915_READ(LCPLL_CTL); 8744 8745 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 8746 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 8747 return; 8748 8749 /* 8750 * Make sure we're not on PC8 state before disabling PC8, otherwise 8751 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 8752 */ 8753 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 8754 8755 if (val & LCPLL_POWER_DOWN_ALLOW) { 8756 val &= ~LCPLL_POWER_DOWN_ALLOW; 8757 I915_WRITE(LCPLL_CTL, val); 8758 POSTING_READ(LCPLL_CTL); 8759 } 8760 8761 val = hsw_read_dcomp(dev_priv); 8762 val |= D_COMP_COMP_FORCE; 8763 val &= ~D_COMP_COMP_DISABLE; 8764 hsw_write_dcomp(dev_priv, val); 8765 8766 val = I915_READ(LCPLL_CTL); 8767 val &= ~LCPLL_PLL_DISABLE; 8768 I915_WRITE(LCPLL_CTL, val); 8769 8770 if (intel_wait_for_register(dev_priv, 8771 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 8772 5)) 8773 DRM_ERROR("LCPLL not locked yet\n"); 8774 8775 if (val & LCPLL_CD_SOURCE_FCLK) { 8776 val = I915_READ(LCPLL_CTL); 8777 val &= ~LCPLL_CD_SOURCE_FCLK; 8778 I915_WRITE(LCPLL_CTL, val); 8779 8780 if (wait_for_us((I915_READ(LCPLL_CTL) & 8781 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 8782 DRM_ERROR("Switching back to LCPLL failed\n"); 8783 } 8784 8785 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 8786 intel_update_cdclk(dev_priv); 8787} 8788 8789/* 8790 * Package states C8 and deeper are really deep PC states that can only be 8791 * reached when all the devices on the system allow it, so even if the graphics 8792 * device allows PC8+, it doesn't mean the system will actually get to these 8793 * states. Our driver only allows PC8+ when going into runtime PM. 8794 * 8795 * The requirements for PC8+ are that all the outputs are disabled, the power 8796 * well is disabled and most interrupts are disabled, and these are also 8797 * requirements for runtime PM. When these conditions are met, we manually do 8798 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 8799 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 8800 * hang the machine. 8801 * 8802 * When we really reach PC8 or deeper states (not just when we allow it) we lose 8803 * the state of some registers, so when we come back from PC8+ we need to 8804 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 8805 * need to take care of the registers kept by RC6. Notice that this happens even 8806 * if we don't put the device in PCI D3 state (which is what currently happens 8807 * because of the runtime PM support). 8808 * 8809 * For more, read "Display Sequences for Package C8" on the hardware 8810 * documentation. 8811 */ 8812void hsw_enable_pc8(struct drm_i915_private *dev_priv) 8813{ 8814 uint32_t val; 8815 8816 DRM_DEBUG_KMS("Enabling package C8+\n"); 8817 8818 if (HAS_PCH_LPT_LP(dev_priv)) { 8819 val = I915_READ(SOUTH_DSPCLK_GATE_D); 8820 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 8821 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 8822 } 8823 8824 lpt_disable_clkout_dp(dev_priv); 8825 hsw_disable_lcpll(dev_priv, true, true); 8826} 8827 8828void hsw_disable_pc8(struct drm_i915_private *dev_priv) 8829{ 8830 uint32_t val; 8831 8832 DRM_DEBUG_KMS("Disabling package C8+\n"); 8833 8834 hsw_restore_lcpll(dev_priv); 8835 lpt_init_pch_refclk(dev_priv); 8836 8837 if (HAS_PCH_LPT_LP(dev_priv)) { 8838 val = I915_READ(SOUTH_DSPCLK_GATE_D); 8839 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 8840 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 8841 } 8842} 8843 8844static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 8845 struct intel_crtc_state *crtc_state) 8846{ 8847 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) { 8848 struct intel_encoder *encoder = 8849 intel_ddi_get_crtc_new_encoder(crtc_state); 8850 8851 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) { 8852 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 8853 pipe_name(crtc->pipe)); 8854 return -EINVAL; 8855 } 8856 } 8857 8858 crtc->lowfreq_avail = false; 8859 8860 return 0; 8861} 8862 8863static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 8864 enum port port, 8865 struct intel_crtc_state *pipe_config) 8866{ 8867 enum intel_dpll_id id; 8868 8869 switch (port) { 8870 case PORT_A: 8871 id = DPLL_ID_SKL_DPLL0; 8872 break; 8873 case PORT_B: 8874 id = DPLL_ID_SKL_DPLL1; 8875 break; 8876 case PORT_C: 8877 id = DPLL_ID_SKL_DPLL2; 8878 break; 8879 default: 8880 DRM_ERROR("Incorrect port type\n"); 8881 return; 8882 } 8883 8884 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 8885} 8886 8887static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 8888 enum port port, 8889 struct intel_crtc_state *pipe_config) 8890{ 8891 enum intel_dpll_id id; 8892 u32 temp; 8893 8894 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 8895 id = temp >> (port * 3 + 1); 8896 8897 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3)) 8898 return; 8899 8900 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 8901} 8902 8903static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 8904 enum port port, 8905 struct intel_crtc_state *pipe_config) 8906{ 8907 enum intel_dpll_id id; 8908 uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 8909 8910 switch (ddi_pll_sel) { 8911 case PORT_CLK_SEL_WRPLL1: 8912 id = DPLL_ID_WRPLL1; 8913 break; 8914 case PORT_CLK_SEL_WRPLL2: 8915 id = DPLL_ID_WRPLL2; 8916 break; 8917 case PORT_CLK_SEL_SPLL: 8918 id = DPLL_ID_SPLL; 8919 break; 8920 case PORT_CLK_SEL_LCPLL_810: 8921 id = DPLL_ID_LCPLL_810; 8922 break; 8923 case PORT_CLK_SEL_LCPLL_1350: 8924 id = DPLL_ID_LCPLL_1350; 8925 break; 8926 case PORT_CLK_SEL_LCPLL_2700: 8927 id = DPLL_ID_LCPLL_2700; 8928 break; 8929 default: 8930 MISSING_CASE(ddi_pll_sel); 8931 /* fall through */ 8932 case PORT_CLK_SEL_NONE: 8933 return; 8934 } 8935 8936 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 8937} 8938 8939static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 8940 struct intel_crtc_state *pipe_config, 8941 u64 *power_domain_mask) 8942{ 8943 struct drm_device *dev = crtc->base.dev; 8944 struct drm_i915_private *dev_priv = to_i915(dev); 8945 enum intel_display_power_domain power_domain; 8946 u32 tmp; 8947 8948 /* 8949 * The pipe->transcoder mapping is fixed with the exception of the eDP 8950 * transcoder handled below. 8951 */ 8952 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8953 8954 /* 8955 * XXX: Do intel_display_power_get_if_enabled before reading this (for 8956 * consistency and less surprising code; it's in always on power). 8957 */ 8958 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 8959 if (tmp & TRANS_DDI_FUNC_ENABLE) { 8960 enum pipe trans_edp_pipe; 8961 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 8962 default: 8963 WARN(1, "unknown pipe linked to edp transcoder\n"); 8964 case TRANS_DDI_EDP_INPUT_A_ONOFF: 8965 case TRANS_DDI_EDP_INPUT_A_ON: 8966 trans_edp_pipe = PIPE_A; 8967 break; 8968 case TRANS_DDI_EDP_INPUT_B_ONOFF: 8969 trans_edp_pipe = PIPE_B; 8970 break; 8971 case TRANS_DDI_EDP_INPUT_C_ONOFF: 8972 trans_edp_pipe = PIPE_C; 8973 break; 8974 } 8975 8976 if (trans_edp_pipe == crtc->pipe) 8977 pipe_config->cpu_transcoder = TRANSCODER_EDP; 8978 } 8979 8980 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 8981 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 8982 return false; 8983 *power_domain_mask |= BIT_ULL(power_domain); 8984 8985 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 8986 8987 return tmp & PIPECONF_ENABLE; 8988} 8989 8990static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 8991 struct intel_crtc_state *pipe_config, 8992 u64 *power_domain_mask) 8993{ 8994 struct drm_device *dev = crtc->base.dev; 8995 struct drm_i915_private *dev_priv = to_i915(dev); 8996 enum intel_display_power_domain power_domain; 8997 enum port port; 8998 enum transcoder cpu_transcoder; 8999 u32 tmp; 9000 9001 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 9002 if (port == PORT_A) 9003 cpu_transcoder = TRANSCODER_DSI_A; 9004 else 9005 cpu_transcoder = TRANSCODER_DSI_C; 9006 9007 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 9008 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9009 continue; 9010 *power_domain_mask |= BIT_ULL(power_domain); 9011 9012 /* 9013 * The PLL needs to be enabled with a valid divider 9014 * configuration, otherwise accessing DSI registers will hang 9015 * the machine. See BSpec North Display Engine 9016 * registers/MIPI[BXT]. We can break out here early, since we 9017 * need the same DSI PLL to be enabled for both DSI ports. 9018 */ 9019 if (!intel_dsi_pll_is_enabled(dev_priv)) 9020 break; 9021 9022 /* XXX: this works for video mode only */ 9023 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); 9024 if (!(tmp & DPI_ENABLE)) 9025 continue; 9026 9027 tmp = I915_READ(MIPI_CTRL(port)); 9028 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 9029 continue; 9030 9031 pipe_config->cpu_transcoder = cpu_transcoder; 9032 break; 9033 } 9034 9035 return transcoder_is_dsi(pipe_config->cpu_transcoder); 9036} 9037 9038static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 9039 struct intel_crtc_state *pipe_config) 9040{ 9041 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9042 struct intel_shared_dpll *pll; 9043 enum port port; 9044 uint32_t tmp; 9045 9046 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 9047 9048 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 9049 9050 if (IS_GEN9_BC(dev_priv)) 9051 skylake_get_ddi_pll(dev_priv, port, pipe_config); 9052 else if (IS_GEN9_LP(dev_priv)) 9053 bxt_get_ddi_pll(dev_priv, port, pipe_config); 9054 else 9055 haswell_get_ddi_pll(dev_priv, port, pipe_config); 9056 9057 pll = pipe_config->shared_dpll; 9058 if (pll) { 9059 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, 9060 &pipe_config->dpll_hw_state)); 9061 } 9062 9063 /* 9064 * Haswell has only FDI/PCH transcoder A. It is which is connected to 9065 * DDI E. So just check whether this pipe is wired to DDI E and whether 9066 * the PCH transcoder is on. 9067 */ 9068 if (INTEL_GEN(dev_priv) < 9 && 9069 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 9070 pipe_config->has_pch_encoder = true; 9071 9072 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 9073 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9074 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9075 9076 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9077 } 9078} 9079 9080static bool haswell_get_pipe_config(struct intel_crtc *crtc, 9081 struct intel_crtc_state *pipe_config) 9082{ 9083 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9084 enum intel_display_power_domain power_domain; 9085 u64 power_domain_mask; 9086 bool active; 9087 9088 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9089 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9090 return false; 9091 power_domain_mask = BIT_ULL(power_domain); 9092 9093 pipe_config->shared_dpll = NULL; 9094 9095 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); 9096 9097 if (IS_GEN9_LP(dev_priv) && 9098 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) { 9099 WARN_ON(active); 9100 active = true; 9101 } 9102 9103 if (!active) 9104 goto out; 9105 9106 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 9107 haswell_get_ddi_port_state(crtc, pipe_config); 9108 intel_get_pipe_timings(crtc, pipe_config); 9109 } 9110 9111 intel_get_pipe_src_size(crtc, pipe_config); 9112 9113 pipe_config->gamma_mode = 9114 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; 9115 9116 if (INTEL_GEN(dev_priv) >= 9) { 9117 intel_crtc_init_scalers(crtc, pipe_config); 9118 9119 pipe_config->scaler_state.scaler_id = -1; 9120 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 9121 } 9122 9123 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 9124 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 9125 power_domain_mask |= BIT_ULL(power_domain); 9126 if (INTEL_GEN(dev_priv) >= 9) 9127 skylake_get_pfit_config(crtc, pipe_config); 9128 else 9129 ironlake_get_pfit_config(crtc, pipe_config); 9130 } 9131 9132 if (IS_HASWELL(dev_priv)) 9133 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 9134 (I915_READ(IPS_CTL) & IPS_ENABLE); 9135 9136 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 9137 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 9138 pipe_config->pixel_multiplier = 9139 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 9140 } else { 9141 pipe_config->pixel_multiplier = 1; 9142 } 9143 9144out: 9145 for_each_power_domain(power_domain, power_domain_mask) 9146 intel_display_power_put(dev_priv, power_domain); 9147 9148 return active; 9149} 9150 9151static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 9152 const struct intel_plane_state *plane_state) 9153{ 9154 unsigned int width = plane_state->base.crtc_w; 9155 unsigned int stride = roundup_pow_of_two(width) * 4; 9156 9157 switch (stride) { 9158 default: 9159 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", 9160 width, stride); 9161 stride = 256; 9162 /* fallthrough */ 9163 case 256: 9164 case 512: 9165 case 1024: 9166 case 2048: 9167 break; 9168 } 9169 9170 return CURSOR_ENABLE | 9171 CURSOR_GAMMA_ENABLE | 9172 CURSOR_FORMAT_ARGB | 9173 CURSOR_STRIDE(stride); 9174} 9175 9176static void i845_update_cursor(struct drm_crtc *crtc, u32 base, 9177 const struct intel_plane_state *plane_state) 9178{ 9179 struct drm_device *dev = crtc->dev; 9180 struct drm_i915_private *dev_priv = to_i915(dev); 9181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9182 uint32_t cntl = 0, size = 0; 9183 9184 if (plane_state && plane_state->base.visible) { 9185 unsigned int width = plane_state->base.crtc_w; 9186 unsigned int height = plane_state->base.crtc_h; 9187 9188 cntl = plane_state->ctl; 9189 size = (height << 12) | width; 9190 } 9191 9192 if (intel_crtc->cursor_cntl != 0 && 9193 (intel_crtc->cursor_base != base || 9194 intel_crtc->cursor_size != size || 9195 intel_crtc->cursor_cntl != cntl)) { 9196 /* On these chipsets we can only modify the base/size/stride 9197 * whilst the cursor is disabled. 9198 */ 9199 I915_WRITE_FW(CURCNTR(PIPE_A), 0); 9200 POSTING_READ_FW(CURCNTR(PIPE_A)); 9201 intel_crtc->cursor_cntl = 0; 9202 } 9203 9204 if (intel_crtc->cursor_base != base) { 9205 I915_WRITE_FW(CURBASE(PIPE_A), base); 9206 intel_crtc->cursor_base = base; 9207 } 9208 9209 if (intel_crtc->cursor_size != size) { 9210 I915_WRITE_FW(CURSIZE, size); 9211 intel_crtc->cursor_size = size; 9212 } 9213 9214 if (intel_crtc->cursor_cntl != cntl) { 9215 I915_WRITE_FW(CURCNTR(PIPE_A), cntl); 9216 POSTING_READ_FW(CURCNTR(PIPE_A)); 9217 intel_crtc->cursor_cntl = cntl; 9218 } 9219} 9220 9221static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 9222 const struct intel_plane_state *plane_state) 9223{ 9224 struct drm_i915_private *dev_priv = 9225 to_i915(plane_state->base.plane->dev); 9226 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9227 enum pipe pipe = crtc->pipe; 9228 u32 cntl; 9229 9230 cntl = MCURSOR_GAMMA_ENABLE; 9231 9232 if (HAS_DDI(dev_priv)) 9233 cntl |= CURSOR_PIPE_CSC_ENABLE; 9234 9235 cntl |= pipe << 28; /* Connect to correct pipe */ 9236 9237 switch (plane_state->base.crtc_w) { 9238 case 64: 9239 cntl |= CURSOR_MODE_64_ARGB_AX; 9240 break; 9241 case 128: 9242 cntl |= CURSOR_MODE_128_ARGB_AX; 9243 break; 9244 case 256: 9245 cntl |= CURSOR_MODE_256_ARGB_AX; 9246 break; 9247 default: 9248 MISSING_CASE(plane_state->base.crtc_w); 9249 return 0; 9250 } 9251 9252 if (plane_state->base.rotation & DRM_ROTATE_180) 9253 cntl |= CURSOR_ROTATE_180; 9254 9255 return cntl; 9256} 9257 9258static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, 9259 const struct intel_plane_state *plane_state) 9260{ 9261 struct drm_device *dev = crtc->dev; 9262 struct drm_i915_private *dev_priv = to_i915(dev); 9263 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9264 int pipe = intel_crtc->pipe; 9265 uint32_t cntl = 0; 9266 9267 if (plane_state && plane_state->base.visible) 9268 cntl = plane_state->ctl; 9269 9270 if (intel_crtc->cursor_cntl != cntl) { 9271 I915_WRITE_FW(CURCNTR(pipe), cntl); 9272 POSTING_READ_FW(CURCNTR(pipe)); 9273 intel_crtc->cursor_cntl = cntl; 9274 } 9275 9276 /* and commit changes on next vblank */ 9277 I915_WRITE_FW(CURBASE(pipe), base); 9278 POSTING_READ_FW(CURBASE(pipe)); 9279 9280 intel_crtc->cursor_base = base; 9281} 9282 9283/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 9284static void intel_crtc_update_cursor(struct drm_crtc *crtc, 9285 const struct intel_plane_state *plane_state) 9286{ 9287 struct drm_device *dev = crtc->dev; 9288 struct drm_i915_private *dev_priv = to_i915(dev); 9289 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9290 int pipe = intel_crtc->pipe; 9291 u32 base = intel_crtc->cursor_addr; 9292 unsigned long irqflags; 9293 u32 pos = 0; 9294 9295 if (plane_state) { 9296 int x = plane_state->base.crtc_x; 9297 int y = plane_state->base.crtc_y; 9298 9299 if (x < 0) { 9300 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 9301 x = -x; 9302 } 9303 pos |= x << CURSOR_X_SHIFT; 9304 9305 if (y < 0) { 9306 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 9307 y = -y; 9308 } 9309 pos |= y << CURSOR_Y_SHIFT; 9310 9311 /* ILK+ do this automagically */ 9312 if (HAS_GMCH_DISPLAY(dev_priv) && 9313 plane_state->base.rotation & DRM_ROTATE_180) { 9314 base += (plane_state->base.crtc_h * 9315 plane_state->base.crtc_w - 1) * 4; 9316 } 9317 } 9318 9319 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 9320 9321 I915_WRITE_FW(CURPOS(pipe), pos); 9322 9323 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 9324 i845_update_cursor(crtc, base, plane_state); 9325 else 9326 i9xx_update_cursor(crtc, base, plane_state); 9327 9328 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 9329} 9330 9331static bool cursor_size_ok(struct drm_i915_private *dev_priv, 9332 uint32_t width, uint32_t height) 9333{ 9334 if (width == 0 || height == 0) 9335 return false; 9336 9337 /* 9338 * 845g/865g are special in that they are only limited by 9339 * the width of their cursors, the height is arbitrary up to 9340 * the precision of the register. Everything else requires 9341 * square cursors, limited to a few power-of-two sizes. 9342 */ 9343 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 9344 if ((width & 63) != 0) 9345 return false; 9346 9347 if (width > (IS_I845G(dev_priv) ? 64 : 512)) 9348 return false; 9349 9350 if (height > 1023) 9351 return false; 9352 } else { 9353 switch (width | height) { 9354 case 256: 9355 case 128: 9356 if (IS_GEN2(dev_priv)) 9357 return false; 9358 case 64: 9359 break; 9360 default: 9361 return false; 9362 } 9363 } 9364 9365 return true; 9366} 9367 9368/* VESA 640x480x72Hz mode to set on the pipe */ 9369static struct drm_display_mode load_detect_mode = { 9370 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 9371 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 9372}; 9373 9374struct drm_framebuffer * 9375intel_framebuffer_create(struct drm_i915_gem_object *obj, 9376 struct drm_mode_fb_cmd2 *mode_cmd) 9377{ 9378 struct intel_framebuffer *intel_fb; 9379 int ret; 9380 9381 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9382 if (!intel_fb) 9383 return ERR_PTR(-ENOMEM); 9384 9385 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 9386 if (ret) 9387 goto err; 9388 9389 return &intel_fb->base; 9390 9391err: 9392 kfree(intel_fb); 9393 return ERR_PTR(ret); 9394} 9395 9396static u32 9397intel_framebuffer_pitch_for_width(int width, int bpp) 9398{ 9399 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 9400 return ALIGN(pitch, 64); 9401} 9402 9403static u32 9404intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 9405{ 9406 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 9407 return PAGE_ALIGN(pitch * mode->vdisplay); 9408} 9409 9410static struct drm_framebuffer * 9411intel_framebuffer_create_for_mode(struct drm_device *dev, 9412 struct drm_display_mode *mode, 9413 int depth, int bpp) 9414{ 9415 struct drm_framebuffer *fb; 9416 struct drm_i915_gem_object *obj; 9417 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 9418 9419 obj = i915_gem_object_create(to_i915(dev), 9420 intel_framebuffer_size_for_mode(mode, bpp)); 9421 if (IS_ERR(obj)) 9422 return ERR_CAST(obj); 9423 9424 mode_cmd.width = mode->hdisplay; 9425 mode_cmd.height = mode->vdisplay; 9426 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 9427 bpp); 9428 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 9429 9430 fb = intel_framebuffer_create(obj, &mode_cmd); 9431 if (IS_ERR(fb)) 9432 i915_gem_object_put(obj); 9433 9434 return fb; 9435} 9436 9437static struct drm_framebuffer * 9438mode_fits_in_fbdev(struct drm_device *dev, 9439 struct drm_display_mode *mode) 9440{ 9441#ifdef CONFIG_DRM_FBDEV_EMULATION 9442 struct drm_i915_private *dev_priv = to_i915(dev); 9443 struct drm_i915_gem_object *obj; 9444 struct drm_framebuffer *fb; 9445 9446 if (!dev_priv->fbdev) 9447 return NULL; 9448 9449 if (!dev_priv->fbdev->fb) 9450 return NULL; 9451 9452 obj = dev_priv->fbdev->fb->obj; 9453 BUG_ON(!obj); 9454 9455 fb = &dev_priv->fbdev->fb->base; 9456 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 9457 fb->format->cpp[0] * 8)) 9458 return NULL; 9459 9460 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 9461 return NULL; 9462 9463 drm_framebuffer_reference(fb); 9464 return fb; 9465#else 9466 return NULL; 9467#endif 9468} 9469 9470static int intel_modeset_setup_plane_state(struct drm_atomic_state *state, 9471 struct drm_crtc *crtc, 9472 struct drm_display_mode *mode, 9473 struct drm_framebuffer *fb, 9474 int x, int y) 9475{ 9476 struct drm_plane_state *plane_state; 9477 int hdisplay, vdisplay; 9478 int ret; 9479 9480 plane_state = drm_atomic_get_plane_state(state, crtc->primary); 9481 if (IS_ERR(plane_state)) 9482 return PTR_ERR(plane_state); 9483 9484 if (mode) 9485 drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay); 9486 else 9487 hdisplay = vdisplay = 0; 9488 9489 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL); 9490 if (ret) 9491 return ret; 9492 drm_atomic_set_fb_for_plane(plane_state, fb); 9493 plane_state->crtc_x = 0; 9494 plane_state->crtc_y = 0; 9495 plane_state->crtc_w = hdisplay; 9496 plane_state->crtc_h = vdisplay; 9497 plane_state->src_x = x << 16; 9498 plane_state->src_y = y << 16; 9499 plane_state->src_w = hdisplay << 16; 9500 plane_state->src_h = vdisplay << 16; 9501 9502 return 0; 9503} 9504 9505int intel_get_load_detect_pipe(struct drm_connector *connector, 9506 struct drm_display_mode *mode, 9507 struct intel_load_detect_pipe *old, 9508 struct drm_modeset_acquire_ctx *ctx) 9509{ 9510 struct intel_crtc *intel_crtc; 9511 struct intel_encoder *intel_encoder = 9512 intel_attached_encoder(connector); 9513 struct drm_crtc *possible_crtc; 9514 struct drm_encoder *encoder = &intel_encoder->base; 9515 struct drm_crtc *crtc = NULL; 9516 struct drm_device *dev = encoder->dev; 9517 struct drm_i915_private *dev_priv = to_i915(dev); 9518 struct drm_framebuffer *fb; 9519 struct drm_mode_config *config = &dev->mode_config; 9520 struct drm_atomic_state *state = NULL, *restore_state = NULL; 9521 struct drm_connector_state *connector_state; 9522 struct intel_crtc_state *crtc_state; 9523 int ret, i = -1; 9524 9525 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 9526 connector->base.id, connector->name, 9527 encoder->base.id, encoder->name); 9528 9529 old->restore_state = NULL; 9530 9531 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 9532 9533 /* 9534 * Algorithm gets a little messy: 9535 * 9536 * - if the connector already has an assigned crtc, use it (but make 9537 * sure it's on first) 9538 * 9539 * - try to find the first unused crtc that can drive this connector, 9540 * and use that if we find one 9541 */ 9542 9543 /* See if we already have a CRTC for this connector */ 9544 if (connector->state->crtc) { 9545 crtc = connector->state->crtc; 9546 9547 ret = drm_modeset_lock(&crtc->mutex, ctx); 9548 if (ret) 9549 goto fail; 9550 9551 /* Make sure the crtc and connector are running */ 9552 goto found; 9553 } 9554 9555 /* Find an unused one (if possible) */ 9556 for_each_crtc(dev, possible_crtc) { 9557 i++; 9558 if (!(encoder->possible_crtcs & (1 << i))) 9559 continue; 9560 9561 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 9562 if (ret) 9563 goto fail; 9564 9565 if (possible_crtc->state->enable) { 9566 drm_modeset_unlock(&possible_crtc->mutex); 9567 continue; 9568 } 9569 9570 crtc = possible_crtc; 9571 break; 9572 } 9573 9574 /* 9575 * If we didn't find an unused CRTC, don't use any. 9576 */ 9577 if (!crtc) { 9578 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 9579 goto fail; 9580 } 9581 9582found: 9583 intel_crtc = to_intel_crtc(crtc); 9584 9585 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 9586 if (ret) 9587 goto fail; 9588 9589 state = drm_atomic_state_alloc(dev); 9590 restore_state = drm_atomic_state_alloc(dev); 9591 if (!state || !restore_state) { 9592 ret = -ENOMEM; 9593 goto fail; 9594 } 9595 9596 state->acquire_ctx = ctx; 9597 restore_state->acquire_ctx = ctx; 9598 9599 connector_state = drm_atomic_get_connector_state(state, connector); 9600 if (IS_ERR(connector_state)) { 9601 ret = PTR_ERR(connector_state); 9602 goto fail; 9603 } 9604 9605 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 9606 if (ret) 9607 goto fail; 9608 9609 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 9610 if (IS_ERR(crtc_state)) { 9611 ret = PTR_ERR(crtc_state); 9612 goto fail; 9613 } 9614 9615 crtc_state->base.active = crtc_state->base.enable = true; 9616 9617 if (!mode) 9618 mode = &load_detect_mode; 9619 9620 /* We need a framebuffer large enough to accommodate all accesses 9621 * that the plane may generate whilst we perform load detection. 9622 * We can not rely on the fbcon either being present (we get called 9623 * during its initialisation to detect all boot displays, or it may 9624 * not even exist) or that it is large enough to satisfy the 9625 * requested mode. 9626 */ 9627 fb = mode_fits_in_fbdev(dev, mode); 9628 if (fb == NULL) { 9629 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 9630 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 9631 } else 9632 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 9633 if (IS_ERR(fb)) { 9634 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 9635 goto fail; 9636 } 9637 9638 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); 9639 if (ret) 9640 goto fail; 9641 9642 drm_framebuffer_unreference(fb); 9643 9644 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); 9645 if (ret) 9646 goto fail; 9647 9648 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 9649 if (!ret) 9650 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 9651 if (!ret) 9652 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary)); 9653 if (ret) { 9654 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 9655 goto fail; 9656 } 9657 9658 ret = drm_atomic_commit(state); 9659 if (ret) { 9660 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 9661 goto fail; 9662 } 9663 9664 old->restore_state = restore_state; 9665 drm_atomic_state_put(state); 9666 9667 /* let the connector get through one full cycle before testing */ 9668 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 9669 return true; 9670 9671fail: 9672 if (state) { 9673 drm_atomic_state_put(state); 9674 state = NULL; 9675 } 9676 if (restore_state) { 9677 drm_atomic_state_put(restore_state); 9678 restore_state = NULL; 9679 } 9680 9681 if (ret == -EDEADLK) 9682 return ret; 9683 9684 return false; 9685} 9686 9687void intel_release_load_detect_pipe(struct drm_connector *connector, 9688 struct intel_load_detect_pipe *old, 9689 struct drm_modeset_acquire_ctx *ctx) 9690{ 9691 struct intel_encoder *intel_encoder = 9692 intel_attached_encoder(connector); 9693 struct drm_encoder *encoder = &intel_encoder->base; 9694 struct drm_atomic_state *state = old->restore_state; 9695 int ret; 9696 9697 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 9698 connector->base.id, connector->name, 9699 encoder->base.id, encoder->name); 9700 9701 if (!state) 9702 return; 9703 9704 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 9705 if (ret) 9706 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); 9707 drm_atomic_state_put(state); 9708} 9709 9710static int i9xx_pll_refclk(struct drm_device *dev, 9711 const struct intel_crtc_state *pipe_config) 9712{ 9713 struct drm_i915_private *dev_priv = to_i915(dev); 9714 u32 dpll = pipe_config->dpll_hw_state.dpll; 9715 9716 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 9717 return dev_priv->vbt.lvds_ssc_freq; 9718 else if (HAS_PCH_SPLIT(dev_priv)) 9719 return 120000; 9720 else if (!IS_GEN2(dev_priv)) 9721 return 96000; 9722 else 9723 return 48000; 9724} 9725 9726/* Returns the clock of the currently programmed mode of the given pipe. */ 9727static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 9728 struct intel_crtc_state *pipe_config) 9729{ 9730 struct drm_device *dev = crtc->base.dev; 9731 struct drm_i915_private *dev_priv = to_i915(dev); 9732 int pipe = pipe_config->cpu_transcoder; 9733 u32 dpll = pipe_config->dpll_hw_state.dpll; 9734 u32 fp; 9735 struct dpll clock; 9736 int port_clock; 9737 int refclk = i9xx_pll_refclk(dev, pipe_config); 9738 9739 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 9740 fp = pipe_config->dpll_hw_state.fp0; 9741 else 9742 fp = pipe_config->dpll_hw_state.fp1; 9743 9744 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 9745 if (IS_PINEVIEW(dev_priv)) { 9746 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 9747 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 9748 } else { 9749 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 9750 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 9751 } 9752 9753 if (!IS_GEN2(dev_priv)) { 9754 if (IS_PINEVIEW(dev_priv)) 9755 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 9756 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 9757 else 9758 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 9759 DPLL_FPA01_P1_POST_DIV_SHIFT); 9760 9761 switch (dpll & DPLL_MODE_MASK) { 9762 case DPLLB_MODE_DAC_SERIAL: 9763 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 9764 5 : 10; 9765 break; 9766 case DPLLB_MODE_LVDS: 9767 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 9768 7 : 14; 9769 break; 9770 default: 9771 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 9772 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 9773 return; 9774 } 9775 9776 if (IS_PINEVIEW(dev_priv)) 9777 port_clock = pnv_calc_dpll_params(refclk, &clock); 9778 else 9779 port_clock = i9xx_calc_dpll_params(refclk, &clock); 9780 } else { 9781 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS); 9782 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 9783 9784 if (is_lvds) { 9785 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 9786 DPLL_FPA01_P1_POST_DIV_SHIFT); 9787 9788 if (lvds & LVDS_CLKB_POWER_UP) 9789 clock.p2 = 7; 9790 else 9791 clock.p2 = 14; 9792 } else { 9793 if (dpll & PLL_P1_DIVIDE_BY_TWO) 9794 clock.p1 = 2; 9795 else { 9796 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 9797 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 9798 } 9799 if (dpll & PLL_P2_DIVIDE_BY_4) 9800 clock.p2 = 4; 9801 else 9802 clock.p2 = 2; 9803 } 9804 9805 port_clock = i9xx_calc_dpll_params(refclk, &clock); 9806 } 9807 9808 /* 9809 * This value includes pixel_multiplier. We will use 9810 * port_clock to compute adjusted_mode.crtc_clock in the 9811 * encoder's get_config() function. 9812 */ 9813 pipe_config->port_clock = port_clock; 9814} 9815 9816int intel_dotclock_calculate(int link_freq, 9817 const struct intel_link_m_n *m_n) 9818{ 9819 /* 9820 * The calculation for the data clock is: 9821 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 9822 * But we want to avoid losing precison if possible, so: 9823 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 9824 * 9825 * and the link clock is simpler: 9826 * link_clock = (m * link_clock) / n 9827 */ 9828 9829 if (!m_n->link_n) 9830 return 0; 9831 9832 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 9833} 9834 9835static void ironlake_pch_clock_get(struct intel_crtc *crtc, 9836 struct intel_crtc_state *pipe_config) 9837{ 9838 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9839 9840 /* read out port_clock from the DPLL */ 9841 i9xx_crtc_clock_get(crtc, pipe_config); 9842 9843 /* 9844 * In case there is an active pipe without active ports, 9845 * we may need some idea for the dotclock anyway. 9846 * Calculate one based on the FDI configuration. 9847 */ 9848 pipe_config->base.adjusted_mode.crtc_clock = 9849 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 9850 &pipe_config->fdi_m_n); 9851} 9852 9853/** Returns the currently programmed mode of the given pipe. */ 9854struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 9855 struct drm_crtc *crtc) 9856{ 9857 struct drm_i915_private *dev_priv = to_i915(dev); 9858 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9859 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 9860 struct drm_display_mode *mode; 9861 struct intel_crtc_state *pipe_config; 9862 int htot = I915_READ(HTOTAL(cpu_transcoder)); 9863 int hsync = I915_READ(HSYNC(cpu_transcoder)); 9864 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 9865 int vsync = I915_READ(VSYNC(cpu_transcoder)); 9866 enum pipe pipe = intel_crtc->pipe; 9867 9868 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 9869 if (!mode) 9870 return NULL; 9871 9872 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 9873 if (!pipe_config) { 9874 kfree(mode); 9875 return NULL; 9876 } 9877 9878 /* 9879 * Construct a pipe_config sufficient for getting the clock info 9880 * back out of crtc_clock_get. 9881 * 9882 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 9883 * to use a real value here instead. 9884 */ 9885 pipe_config->cpu_transcoder = (enum transcoder) pipe; 9886 pipe_config->pixel_multiplier = 1; 9887 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe)); 9888 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe)); 9889 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 9890 i9xx_crtc_clock_get(intel_crtc, pipe_config); 9891 9892 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; 9893 mode->hdisplay = (htot & 0xffff) + 1; 9894 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 9895 mode->hsync_start = (hsync & 0xffff) + 1; 9896 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 9897 mode->vdisplay = (vtot & 0xffff) + 1; 9898 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 9899 mode->vsync_start = (vsync & 0xffff) + 1; 9900 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 9901 9902 drm_mode_set_name(mode); 9903 9904 kfree(pipe_config); 9905 9906 return mode; 9907} 9908 9909static void intel_crtc_destroy(struct drm_crtc *crtc) 9910{ 9911 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9912 struct drm_device *dev = crtc->dev; 9913 struct intel_flip_work *work; 9914 9915 spin_lock_irq(&dev->event_lock); 9916 work = intel_crtc->flip_work; 9917 intel_crtc->flip_work = NULL; 9918 spin_unlock_irq(&dev->event_lock); 9919 9920 if (work) { 9921 cancel_work_sync(&work->mmio_work); 9922 cancel_work_sync(&work->unpin_work); 9923 kfree(work); 9924 } 9925 9926 drm_crtc_cleanup(crtc); 9927 9928 kfree(intel_crtc); 9929} 9930 9931static void intel_unpin_work_fn(struct work_struct *__work) 9932{ 9933 struct intel_flip_work *work = 9934 container_of(__work, struct intel_flip_work, unpin_work); 9935 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 9936 struct drm_device *dev = crtc->base.dev; 9937 struct drm_plane *primary = crtc->base.primary; 9938 9939 if (is_mmio_work(work)) 9940 flush_work(&work->mmio_work); 9941 9942 mutex_lock(&dev->struct_mutex); 9943 intel_unpin_fb_vma(work->old_vma); 9944 i915_gem_object_put(work->pending_flip_obj); 9945 mutex_unlock(&dev->struct_mutex); 9946 9947 i915_gem_request_put(work->flip_queued_req); 9948 9949 intel_frontbuffer_flip_complete(to_i915(dev), 9950 to_intel_plane(primary)->frontbuffer_bit); 9951 intel_fbc_post_update(crtc); 9952 drm_framebuffer_unreference(work->old_fb); 9953 9954 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0); 9955 atomic_dec(&crtc->unpin_work_count); 9956 9957 kfree(work); 9958} 9959 9960/* Is 'a' after or equal to 'b'? */ 9961static bool g4x_flip_count_after_eq(u32 a, u32 b) 9962{ 9963 return !((a - b) & 0x80000000); 9964} 9965 9966static bool __pageflip_finished_cs(struct intel_crtc *crtc, 9967 struct intel_flip_work *work) 9968{ 9969 struct drm_device *dev = crtc->base.dev; 9970 struct drm_i915_private *dev_priv = to_i915(dev); 9971 9972 if (abort_flip_on_reset(crtc)) 9973 return true; 9974 9975 /* 9976 * The relevant registers doen't exist on pre-ctg. 9977 * As the flip done interrupt doesn't trigger for mmio 9978 * flips on gmch platforms, a flip count check isn't 9979 * really needed there. But since ctg has the registers, 9980 * include it in the check anyway. 9981 */ 9982 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 9983 return true; 9984 9985 /* 9986 * BDW signals flip done immediately if the plane 9987 * is disabled, even if the plane enable is already 9988 * armed to occur at the next vblank :( 9989 */ 9990 9991 /* 9992 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 9993 * used the same base address. In that case the mmio flip might 9994 * have completed, but the CS hasn't even executed the flip yet. 9995 * 9996 * A flip count check isn't enough as the CS might have updated 9997 * the base address just after start of vblank, but before we 9998 * managed to process the interrupt. This means we'd complete the 9999 * CS flip too soon. 10000 * 10001 * Combining both checks should get us a good enough result. It may 10002 * still happen that the CS flip has been executed, but has not 10003 * yet actually completed. But in case the base address is the same 10004 * anyway, we don't really care. 10005 */ 10006 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 10007 crtc->flip_work->gtt_offset && 10008 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), 10009 crtc->flip_work->flip_count); 10010} 10011 10012static bool 10013__pageflip_finished_mmio(struct intel_crtc *crtc, 10014 struct intel_flip_work *work) 10015{ 10016 /* 10017 * MMIO work completes when vblank is different from 10018 * flip_queued_vblank. 10019 * 10020 * Reset counter value doesn't matter, this is handled by 10021 * i915_wait_request finishing early, so no need to handle 10022 * reset here. 10023 */ 10024 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank; 10025} 10026 10027 10028static bool pageflip_finished(struct intel_crtc *crtc, 10029 struct intel_flip_work *work) 10030{ 10031 if (!atomic_read(&work->pending)) 10032 return false; 10033 10034 smp_rmb(); 10035 10036 if (is_mmio_work(work)) 10037 return __pageflip_finished_mmio(crtc, work); 10038 else 10039 return __pageflip_finished_cs(crtc, work); 10040} 10041 10042void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) 10043{ 10044 struct drm_device *dev = &dev_priv->drm; 10045 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 10046 struct intel_flip_work *work; 10047 unsigned long flags; 10048 10049 /* Ignore early vblank irqs */ 10050 if (!crtc) 10051 return; 10052 10053 /* 10054 * This is called both by irq handlers and the reset code (to complete 10055 * lost pageflips) so needs the full irqsave spinlocks. 10056 */ 10057 spin_lock_irqsave(&dev->event_lock, flags); 10058 work = crtc->flip_work; 10059 10060 if (work != NULL && 10061 !is_mmio_work(work) && 10062 pageflip_finished(crtc, work)) 10063 page_flip_completed(crtc); 10064 10065 spin_unlock_irqrestore(&dev->event_lock, flags); 10066} 10067 10068void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) 10069{ 10070 struct drm_device *dev = &dev_priv->drm; 10071 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 10072 struct intel_flip_work *work; 10073 unsigned long flags; 10074 10075 /* Ignore early vblank irqs */ 10076 if (!crtc) 10077 return; 10078 10079 /* 10080 * This is called both by irq handlers and the reset code (to complete 10081 * lost pageflips) so needs the full irqsave spinlocks. 10082 */ 10083 spin_lock_irqsave(&dev->event_lock, flags); 10084 work = crtc->flip_work; 10085 10086 if (work != NULL && 10087 is_mmio_work(work) && 10088 pageflip_finished(crtc, work)) 10089 page_flip_completed(crtc); 10090 10091 spin_unlock_irqrestore(&dev->event_lock, flags); 10092} 10093 10094static inline void intel_mark_page_flip_active(struct intel_crtc *crtc, 10095 struct intel_flip_work *work) 10096{ 10097 work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc); 10098 10099 /* Ensure that the work item is consistent when activating it ... */ 10100 smp_mb__before_atomic(); 10101 atomic_set(&work->pending, 1); 10102} 10103 10104static int intel_gen2_queue_flip(struct drm_device *dev, 10105 struct drm_crtc *crtc, 10106 struct drm_framebuffer *fb, 10107 struct drm_i915_gem_object *obj, 10108 struct drm_i915_gem_request *req, 10109 uint32_t flags) 10110{ 10111 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10112 u32 flip_mask, *cs; 10113 10114 cs = intel_ring_begin(req, 6); 10115 if (IS_ERR(cs)) 10116 return PTR_ERR(cs); 10117 10118 /* Can't queue multiple flips, so wait for the previous 10119 * one to finish before executing the next. 10120 */ 10121 if (intel_crtc->plane) 10122 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 10123 else 10124 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 10125 *cs++ = MI_WAIT_FOR_EVENT | flip_mask; 10126 *cs++ = MI_NOOP; 10127 *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane); 10128 *cs++ = fb->pitches[0]; 10129 *cs++ = intel_crtc->flip_work->gtt_offset; 10130 *cs++ = 0; /* aux display base address, unused */ 10131 10132 return 0; 10133} 10134 10135static int intel_gen3_queue_flip(struct drm_device *dev, 10136 struct drm_crtc *crtc, 10137 struct drm_framebuffer *fb, 10138 struct drm_i915_gem_object *obj, 10139 struct drm_i915_gem_request *req, 10140 uint32_t flags) 10141{ 10142 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10143 u32 flip_mask, *cs; 10144 10145 cs = intel_ring_begin(req, 6); 10146 if (IS_ERR(cs)) 10147 return PTR_ERR(cs); 10148 10149 if (intel_crtc->plane) 10150 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 10151 else 10152 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 10153 *cs++ = MI_WAIT_FOR_EVENT | flip_mask; 10154 *cs++ = MI_NOOP; 10155 *cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane); 10156 *cs++ = fb->pitches[0]; 10157 *cs++ = intel_crtc->flip_work->gtt_offset; 10158 *cs++ = MI_NOOP; 10159 10160 return 0; 10161} 10162 10163static int intel_gen4_queue_flip(struct drm_device *dev, 10164 struct drm_crtc *crtc, 10165 struct drm_framebuffer *fb, 10166 struct drm_i915_gem_object *obj, 10167 struct drm_i915_gem_request *req, 10168 uint32_t flags) 10169{ 10170 struct drm_i915_private *dev_priv = to_i915(dev); 10171 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10172 u32 pf, pipesrc, *cs; 10173 10174 cs = intel_ring_begin(req, 4); 10175 if (IS_ERR(cs)) 10176 return PTR_ERR(cs); 10177 10178 /* i965+ uses the linear or tiled offsets from the 10179 * Display Registers (which do not change across a page-flip) 10180 * so we need only reprogram the base address. 10181 */ 10182 *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane); 10183 *cs++ = fb->pitches[0]; 10184 *cs++ = intel_crtc->flip_work->gtt_offset | 10185 intel_fb_modifier_to_tiling(fb->modifier); 10186 10187 /* XXX Enabling the panel-fitter across page-flip is so far 10188 * untested on non-native modes, so ignore it for now. 10189 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 10190 */ 10191 pf = 0; 10192 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 10193 *cs++ = pf | pipesrc; 10194 10195 return 0; 10196} 10197 10198static int intel_gen6_queue_flip(struct drm_device *dev, 10199 struct drm_crtc *crtc, 10200 struct drm_framebuffer *fb, 10201 struct drm_i915_gem_object *obj, 10202 struct drm_i915_gem_request *req, 10203 uint32_t flags) 10204{ 10205 struct drm_i915_private *dev_priv = to_i915(dev); 10206 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10207 u32 pf, pipesrc, *cs; 10208 10209 cs = intel_ring_begin(req, 4); 10210 if (IS_ERR(cs)) 10211 return PTR_ERR(cs); 10212 10213 *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane); 10214 *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier); 10215 *cs++ = intel_crtc->flip_work->gtt_offset; 10216 10217 /* Contrary to the suggestions in the documentation, 10218 * "Enable Panel Fitter" does not seem to be required when page 10219 * flipping with a non-native mode, and worse causes a normal 10220 * modeset to fail. 10221 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 10222 */ 10223 pf = 0; 10224 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 10225 *cs++ = pf | pipesrc; 10226 10227 return 0; 10228} 10229 10230static int intel_gen7_queue_flip(struct drm_device *dev, 10231 struct drm_crtc *crtc, 10232 struct drm_framebuffer *fb, 10233 struct drm_i915_gem_object *obj, 10234 struct drm_i915_gem_request *req, 10235 uint32_t flags) 10236{ 10237 struct drm_i915_private *dev_priv = to_i915(dev); 10238 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10239 u32 *cs, plane_bit = 0; 10240 int len, ret; 10241 10242 switch (intel_crtc->plane) { 10243 case PLANE_A: 10244 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 10245 break; 10246 case PLANE_B: 10247 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 10248 break; 10249 case PLANE_C: 10250 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 10251 break; 10252 default: 10253 WARN_ONCE(1, "unknown plane in flip command\n"); 10254 return -ENODEV; 10255 } 10256 10257 len = 4; 10258 if (req->engine->id == RCS) { 10259 len += 6; 10260 /* 10261 * On Gen 8, SRM is now taking an extra dword to accommodate 10262 * 48bits addresses, and we need a NOOP for the batch size to 10263 * stay even. 10264 */ 10265 if (IS_GEN8(dev_priv)) 10266 len += 2; 10267 } 10268 10269 /* 10270 * BSpec MI_DISPLAY_FLIP for IVB: 10271 * "The full packet must be contained within the same cache line." 10272 * 10273 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 10274 * cacheline, if we ever start emitting more commands before 10275 * the MI_DISPLAY_FLIP we may need to first emit everything else, 10276 * then do the cacheline alignment, and finally emit the 10277 * MI_DISPLAY_FLIP. 10278 */ 10279 ret = intel_ring_cacheline_align(req); 10280 if (ret) 10281 return ret; 10282 10283 cs = intel_ring_begin(req, len); 10284 if (IS_ERR(cs)) 10285 return PTR_ERR(cs); 10286 10287 /* Unmask the flip-done completion message. Note that the bspec says that 10288 * we should do this for both the BCS and RCS, and that we must not unmask 10289 * more than one flip event at any time (or ensure that one flip message 10290 * can be sent by waiting for flip-done prior to queueing new flips). 10291 * Experimentation says that BCS works despite DERRMR masking all 10292 * flip-done completion events and that unmasking all planes at once 10293 * for the RCS also doesn't appear to drop events. Setting the DERRMR 10294 * to zero does lead to lockups within MI_DISPLAY_FLIP. 10295 */ 10296 if (req->engine->id == RCS) { 10297 *cs++ = MI_LOAD_REGISTER_IMM(1); 10298 *cs++ = i915_mmio_reg_offset(DERRMR); 10299 *cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE | 10300 DERRMR_PIPEB_PRI_FLIP_DONE | 10301 DERRMR_PIPEC_PRI_FLIP_DONE); 10302 if (IS_GEN8(dev_priv)) 10303 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | 10304 MI_SRM_LRM_GLOBAL_GTT; 10305 else 10306 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 10307 *cs++ = i915_mmio_reg_offset(DERRMR); 10308 *cs++ = i915_ggtt_offset(req->engine->scratch) + 256; 10309 if (IS_GEN8(dev_priv)) { 10310 *cs++ = 0; 10311 *cs++ = MI_NOOP; 10312 } 10313 } 10314 10315 *cs++ = MI_DISPLAY_FLIP_I915 | plane_bit; 10316 *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier); 10317 *cs++ = intel_crtc->flip_work->gtt_offset; 10318 *cs++ = MI_NOOP; 10319 10320 return 0; 10321} 10322 10323static bool use_mmio_flip(struct intel_engine_cs *engine, 10324 struct drm_i915_gem_object *obj) 10325{ 10326 /* 10327 * This is not being used for older platforms, because 10328 * non-availability of flip done interrupt forces us to use 10329 * CS flips. Older platforms derive flip done using some clever 10330 * tricks involving the flip_pending status bits and vblank irqs. 10331 * So using MMIO flips there would disrupt this mechanism. 10332 */ 10333 10334 if (engine == NULL) 10335 return true; 10336 10337 if (INTEL_GEN(engine->i915) < 5) 10338 return false; 10339 10340 if (i915.use_mmio_flip < 0) 10341 return false; 10342 else if (i915.use_mmio_flip > 0) 10343 return true; 10344 else if (i915.enable_execlists) 10345 return true; 10346 10347 return engine != i915_gem_object_last_write_engine(obj); 10348} 10349 10350static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 10351 unsigned int rotation, 10352 struct intel_flip_work *work) 10353{ 10354 struct drm_device *dev = intel_crtc->base.dev; 10355 struct drm_i915_private *dev_priv = to_i915(dev); 10356 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 10357 const enum pipe pipe = intel_crtc->pipe; 10358 u32 ctl, stride = skl_plane_stride(fb, 0, rotation); 10359 10360 ctl = I915_READ(PLANE_CTL(pipe, 0)); 10361 ctl &= ~PLANE_CTL_TILED_MASK; 10362 switch (fb->modifier) { 10363 case DRM_FORMAT_MOD_LINEAR: 10364 break; 10365 case I915_FORMAT_MOD_X_TILED: 10366 ctl |= PLANE_CTL_TILED_X; 10367 break; 10368 case I915_FORMAT_MOD_Y_TILED: 10369 ctl |= PLANE_CTL_TILED_Y; 10370 break; 10371 case I915_FORMAT_MOD_Yf_TILED: 10372 ctl |= PLANE_CTL_TILED_YF; 10373 break; 10374 default: 10375 MISSING_CASE(fb->modifier); 10376 } 10377 10378 /* 10379 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on 10380 * PLANE_SURF updates, the update is then guaranteed to be atomic. 10381 */ 10382 I915_WRITE(PLANE_CTL(pipe, 0), ctl); 10383 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 10384 10385 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset); 10386 POSTING_READ(PLANE_SURF(pipe, 0)); 10387} 10388 10389static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, 10390 struct intel_flip_work *work) 10391{ 10392 struct drm_device *dev = intel_crtc->base.dev; 10393 struct drm_i915_private *dev_priv = to_i915(dev); 10394 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 10395 i915_reg_t reg = DSPCNTR(intel_crtc->plane); 10396 u32 dspcntr; 10397 10398 dspcntr = I915_READ(reg); 10399 10400 if (fb->modifier == I915_FORMAT_MOD_X_TILED) 10401 dspcntr |= DISPPLANE_TILED; 10402 else 10403 dspcntr &= ~DISPPLANE_TILED; 10404 10405 I915_WRITE(reg, dspcntr); 10406 10407 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset); 10408 POSTING_READ(DSPSURF(intel_crtc->plane)); 10409} 10410 10411static void intel_mmio_flip_work_func(struct work_struct *w) 10412{ 10413 struct intel_flip_work *work = 10414 container_of(w, struct intel_flip_work, mmio_work); 10415 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 10416 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10417 struct intel_framebuffer *intel_fb = 10418 to_intel_framebuffer(crtc->base.primary->fb); 10419 struct drm_i915_gem_object *obj = intel_fb->obj; 10420 10421 WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0); 10422 10423 intel_pipe_update_start(crtc); 10424 10425 if (INTEL_GEN(dev_priv) >= 9) 10426 skl_do_mmio_flip(crtc, work->rotation, work); 10427 else 10428 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 10429 ilk_do_mmio_flip(crtc, work); 10430 10431 intel_pipe_update_end(crtc, work); 10432} 10433 10434static int intel_default_queue_flip(struct drm_device *dev, 10435 struct drm_crtc *crtc, 10436 struct drm_framebuffer *fb, 10437 struct drm_i915_gem_object *obj, 10438 struct drm_i915_gem_request *req, 10439 uint32_t flags) 10440{ 10441 return -ENODEV; 10442} 10443 10444static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, 10445 struct intel_crtc *intel_crtc, 10446 struct intel_flip_work *work) 10447{ 10448 u32 addr, vblank; 10449 10450 if (!atomic_read(&work->pending)) 10451 return false; 10452 10453 smp_rmb(); 10454 10455 vblank = intel_crtc_get_vblank_counter(intel_crtc); 10456 if (work->flip_ready_vblank == 0) { 10457 if (work->flip_queued_req && 10458 !i915_gem_request_completed(work->flip_queued_req)) 10459 return false; 10460 10461 work->flip_ready_vblank = vblank; 10462 } 10463 10464 if (vblank - work->flip_ready_vblank < 3) 10465 return false; 10466 10467 /* Potential stall - if we see that the flip has happened, 10468 * assume a missed interrupt. */ 10469 if (INTEL_GEN(dev_priv) >= 4) 10470 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 10471 else 10472 addr = I915_READ(DSPADDR(intel_crtc->plane)); 10473 10474 /* There is a potential issue here with a false positive after a flip 10475 * to the same address. We could address this by checking for a 10476 * non-incrementing frame counter. 10477 */ 10478 return addr == work->gtt_offset; 10479} 10480 10481void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) 10482{ 10483 struct drm_device *dev = &dev_priv->drm; 10484 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 10485 struct intel_flip_work *work; 10486 10487 WARN_ON(!in_interrupt()); 10488 10489 if (crtc == NULL) 10490 return; 10491 10492 spin_lock(&dev->event_lock); 10493 work = crtc->flip_work; 10494 10495 if (work != NULL && !is_mmio_work(work) && 10496 __pageflip_stall_check_cs(dev_priv, crtc, work)) { 10497 WARN_ONCE(1, 10498 "Kicking stuck page flip: queued at %d, now %d\n", 10499 work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc)); 10500 page_flip_completed(crtc); 10501 work = NULL; 10502 } 10503 10504 if (work != NULL && !is_mmio_work(work) && 10505 intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1) 10506 intel_queue_rps_boost_for_request(work->flip_queued_req); 10507 spin_unlock(&dev->event_lock); 10508} 10509 10510__maybe_unused 10511static int intel_crtc_page_flip(struct drm_crtc *crtc, 10512 struct drm_framebuffer *fb, 10513 struct drm_pending_vblank_event *event, 10514 uint32_t page_flip_flags) 10515{ 10516 struct drm_device *dev = crtc->dev; 10517 struct drm_i915_private *dev_priv = to_i915(dev); 10518 struct drm_framebuffer *old_fb = crtc->primary->fb; 10519 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 10520 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10521 struct drm_plane *primary = crtc->primary; 10522 enum pipe pipe = intel_crtc->pipe; 10523 struct intel_flip_work *work; 10524 struct intel_engine_cs *engine; 10525 bool mmio_flip; 10526 struct drm_i915_gem_request *request; 10527 struct i915_vma *vma; 10528 int ret; 10529 10530 /* 10531 * drm_mode_page_flip_ioctl() should already catch this, but double 10532 * check to be safe. In the future we may enable pageflipping from 10533 * a disabled primary plane. 10534 */ 10535 if (WARN_ON(intel_fb_obj(old_fb) == NULL)) 10536 return -EBUSY; 10537 10538 /* Can't change pixel format via MI display flips. */ 10539 if (fb->format != crtc->primary->fb->format) 10540 return -EINVAL; 10541 10542 /* 10543 * TILEOFF/LINOFF registers can't be changed via MI display flips. 10544 * Note that pitch changes could also affect these register. 10545 */ 10546 if (INTEL_GEN(dev_priv) > 3 && 10547 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 10548 fb->pitches[0] != crtc->primary->fb->pitches[0])) 10549 return -EINVAL; 10550 10551 if (i915_terminally_wedged(&dev_priv->gpu_error)) 10552 goto out_hang; 10553 10554 work = kzalloc(sizeof(*work), GFP_KERNEL); 10555 if (work == NULL) 10556 return -ENOMEM; 10557 10558 work->event = event; 10559 work->crtc = crtc; 10560 work->old_fb = old_fb; 10561 INIT_WORK(&work->unpin_work, intel_unpin_work_fn); 10562 10563 ret = drm_crtc_vblank_get(crtc); 10564 if (ret) 10565 goto free_work; 10566 10567 /* We borrow the event spin lock for protecting flip_work */ 10568 spin_lock_irq(&dev->event_lock); 10569 if (intel_crtc->flip_work) { 10570 /* Before declaring the flip queue wedged, check if 10571 * the hardware completed the operation behind our backs. 10572 */ 10573 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) { 10574 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 10575 page_flip_completed(intel_crtc); 10576 } else { 10577 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 10578 spin_unlock_irq(&dev->event_lock); 10579 10580 drm_crtc_vblank_put(crtc); 10581 kfree(work); 10582 return -EBUSY; 10583 } 10584 } 10585 intel_crtc->flip_work = work; 10586 spin_unlock_irq(&dev->event_lock); 10587 10588 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 10589 flush_workqueue(dev_priv->wq); 10590 10591 /* Reference the objects for the scheduled work. */ 10592 drm_framebuffer_reference(work->old_fb); 10593 10594 crtc->primary->fb = fb; 10595 update_state_fb(crtc->primary); 10596 10597 work->pending_flip_obj = i915_gem_object_get(obj); 10598 10599 ret = i915_mutex_lock_interruptible(dev); 10600 if (ret) 10601 goto cleanup; 10602 10603 intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error); 10604 if (i915_reset_backoff_or_wedged(&dev_priv->gpu_error)) { 10605 ret = -EIO; 10606 goto unlock; 10607 } 10608 10609 atomic_inc(&intel_crtc->unpin_work_count); 10610 10611 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 10612 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; 10613 10614 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 10615 engine = dev_priv->engine[BCS]; 10616 if (fb->modifier != old_fb->modifier) 10617 /* vlv: DISPLAY_FLIP fails to change tiling */ 10618 engine = NULL; 10619 } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { 10620 engine = dev_priv->engine[BCS]; 10621 } else if (INTEL_GEN(dev_priv) >= 7) { 10622 engine = i915_gem_object_last_write_engine(obj); 10623 if (engine == NULL || engine->id != RCS) 10624 engine = dev_priv->engine[BCS]; 10625 } else { 10626 engine = dev_priv->engine[RCS]; 10627 } 10628 10629 mmio_flip = use_mmio_flip(engine, obj); 10630 10631 vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation); 10632 if (IS_ERR(vma)) { 10633 ret = PTR_ERR(vma); 10634 goto cleanup_pending; 10635 } 10636 10637 work->old_vma = to_intel_plane_state(primary->state)->vma; 10638 to_intel_plane_state(primary->state)->vma = vma; 10639 10640 work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset; 10641 work->rotation = crtc->primary->state->rotation; 10642 10643 /* 10644 * There's the potential that the next frame will not be compatible with 10645 * FBC, so we want to call pre_update() before the actual page flip. 10646 * The problem is that pre_update() caches some information about the fb 10647 * object, so we want to do this only after the object is pinned. Let's 10648 * be on the safe side and do this immediately before scheduling the 10649 * flip. 10650 */ 10651 intel_fbc_pre_update(intel_crtc, intel_crtc->config, 10652 to_intel_plane_state(primary->state)); 10653 10654 if (mmio_flip) { 10655 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); 10656 queue_work(system_unbound_wq, &work->mmio_work); 10657 } else { 10658 request = i915_gem_request_alloc(engine, 10659 dev_priv->kernel_context); 10660 if (IS_ERR(request)) { 10661 ret = PTR_ERR(request); 10662 goto cleanup_unpin; 10663 } 10664 10665 ret = i915_gem_request_await_object(request, obj, false); 10666 if (ret) 10667 goto cleanup_request; 10668 10669 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 10670 page_flip_flags); 10671 if (ret) 10672 goto cleanup_request; 10673 10674 intel_mark_page_flip_active(intel_crtc, work); 10675 10676 work->flip_queued_req = i915_gem_request_get(request); 10677 i915_add_request(request); 10678 } 10679 10680 i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); 10681 i915_gem_track_fb(intel_fb_obj(old_fb), obj, 10682 to_intel_plane(primary)->frontbuffer_bit); 10683 mutex_unlock(&dev->struct_mutex); 10684 10685 intel_frontbuffer_flip_prepare(to_i915(dev), 10686 to_intel_plane(primary)->frontbuffer_bit); 10687 10688 trace_i915_flip_request(intel_crtc->plane, obj); 10689 10690 return 0; 10691 10692cleanup_request: 10693 i915_add_request(request); 10694cleanup_unpin: 10695 to_intel_plane_state(primary->state)->vma = work->old_vma; 10696 intel_unpin_fb_vma(vma); 10697cleanup_pending: 10698 atomic_dec(&intel_crtc->unpin_work_count); 10699unlock: 10700 mutex_unlock(&dev->struct_mutex); 10701cleanup: 10702 crtc->primary->fb = old_fb; 10703 update_state_fb(crtc->primary); 10704 10705 i915_gem_object_put(obj); 10706 drm_framebuffer_unreference(work->old_fb); 10707 10708 spin_lock_irq(&dev->event_lock); 10709 intel_crtc->flip_work = NULL; 10710 spin_unlock_irq(&dev->event_lock); 10711 10712 drm_crtc_vblank_put(crtc); 10713free_work: 10714 kfree(work); 10715 10716 if (ret == -EIO) { 10717 struct drm_atomic_state *state; 10718 struct drm_plane_state *plane_state; 10719 10720out_hang: 10721 state = drm_atomic_state_alloc(dev); 10722 if (!state) 10723 return -ENOMEM; 10724 state->acquire_ctx = dev->mode_config.acquire_ctx; 10725 10726retry: 10727 plane_state = drm_atomic_get_plane_state(state, primary); 10728 ret = PTR_ERR_OR_ZERO(plane_state); 10729 if (!ret) { 10730 drm_atomic_set_fb_for_plane(plane_state, fb); 10731 10732 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 10733 if (!ret) 10734 ret = drm_atomic_commit(state); 10735 } 10736 10737 if (ret == -EDEADLK) { 10738 drm_modeset_backoff(state->acquire_ctx); 10739 drm_atomic_state_clear(state); 10740 goto retry; 10741 } 10742 10743 drm_atomic_state_put(state); 10744 10745 if (ret == 0 && event) { 10746 spin_lock_irq(&dev->event_lock); 10747 drm_crtc_send_vblank_event(crtc, event); 10748 spin_unlock_irq(&dev->event_lock); 10749 } 10750 } 10751 return ret; 10752} 10753 10754 10755/** 10756 * intel_wm_need_update - Check whether watermarks need updating 10757 * @plane: drm plane 10758 * @state: new plane state 10759 * 10760 * Check current plane state versus the new one to determine whether 10761 * watermarks need to be recalculated. 10762 * 10763 * Returns true or false. 10764 */ 10765static bool intel_wm_need_update(struct drm_plane *plane, 10766 struct drm_plane_state *state) 10767{ 10768 struct intel_plane_state *new = to_intel_plane_state(state); 10769 struct intel_plane_state *cur = to_intel_plane_state(plane->state); 10770 10771 /* Update watermarks on tiling or size changes. */ 10772 if (new->base.visible != cur->base.visible) 10773 return true; 10774 10775 if (!cur->base.fb || !new->base.fb) 10776 return false; 10777 10778 if (cur->base.fb->modifier != new->base.fb->modifier || 10779 cur->base.rotation != new->base.rotation || 10780 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) || 10781 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) || 10782 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) || 10783 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst)) 10784 return true; 10785 10786 return false; 10787} 10788 10789static bool needs_scaling(struct intel_plane_state *state) 10790{ 10791 int src_w = drm_rect_width(&state->base.src) >> 16; 10792 int src_h = drm_rect_height(&state->base.src) >> 16; 10793 int dst_w = drm_rect_width(&state->base.dst); 10794 int dst_h = drm_rect_height(&state->base.dst); 10795 10796 return (src_w != dst_w || src_h != dst_h); 10797} 10798 10799int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, 10800 struct drm_plane_state *plane_state) 10801{ 10802 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); 10803 struct drm_crtc *crtc = crtc_state->crtc; 10804 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10805 struct intel_plane *plane = to_intel_plane(plane_state->plane); 10806 struct drm_device *dev = crtc->dev; 10807 struct drm_i915_private *dev_priv = to_i915(dev); 10808 struct intel_plane_state *old_plane_state = 10809 to_intel_plane_state(plane->base.state); 10810 bool mode_changed = needs_modeset(crtc_state); 10811 bool was_crtc_enabled = crtc->state->active; 10812 bool is_crtc_enabled = crtc_state->active; 10813 bool turn_off, turn_on, visible, was_visible; 10814 struct drm_framebuffer *fb = plane_state->fb; 10815 int ret; 10816 10817 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 10818 ret = skl_update_scaler_plane( 10819 to_intel_crtc_state(crtc_state), 10820 to_intel_plane_state(plane_state)); 10821 if (ret) 10822 return ret; 10823 } 10824 10825 was_visible = old_plane_state->base.visible; 10826 visible = plane_state->visible; 10827 10828 if (!was_crtc_enabled && WARN_ON(was_visible)) 10829 was_visible = false; 10830 10831 /* 10832 * Visibility is calculated as if the crtc was on, but 10833 * after scaler setup everything depends on it being off 10834 * when the crtc isn't active. 10835 * 10836 * FIXME this is wrong for watermarks. Watermarks should also 10837 * be computed as if the pipe would be active. Perhaps move 10838 * per-plane wm computation to the .check_plane() hook, and 10839 * only combine the results from all planes in the current place? 10840 */ 10841 if (!is_crtc_enabled) { 10842 plane_state->visible = visible = false; 10843 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id); 10844 } 10845 10846 if (!was_visible && !visible) 10847 return 0; 10848 10849 if (fb != old_plane_state->base.fb) 10850 pipe_config->fb_changed = true; 10851 10852 turn_off = was_visible && (!visible || mode_changed); 10853 turn_on = visible && (!was_visible || mode_changed); 10854 10855 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", 10856 intel_crtc->base.base.id, intel_crtc->base.name, 10857 plane->base.base.id, plane->base.name, 10858 fb ? fb->base.id : -1); 10859 10860 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 10861 plane->base.base.id, plane->base.name, 10862 was_visible, visible, 10863 turn_off, turn_on, mode_changed); 10864 10865 if (turn_on) { 10866 if (INTEL_GEN(dev_priv) < 5) 10867 pipe_config->update_wm_pre = true; 10868 10869 /* must disable cxsr around plane enable/disable */ 10870 if (plane->id != PLANE_CURSOR) 10871 pipe_config->disable_cxsr = true; 10872 } else if (turn_off) { 10873 if (INTEL_GEN(dev_priv) < 5) 10874 pipe_config->update_wm_post = true; 10875 10876 /* must disable cxsr around plane enable/disable */ 10877 if (plane->id != PLANE_CURSOR) 10878 pipe_config->disable_cxsr = true; 10879 } else if (intel_wm_need_update(&plane->base, plane_state)) { 10880 if (INTEL_GEN(dev_priv) < 5) { 10881 /* FIXME bollocks */ 10882 pipe_config->update_wm_pre = true; 10883 pipe_config->update_wm_post = true; 10884 } 10885 } 10886 10887 if (visible || was_visible) 10888 pipe_config->fb_bits |= plane->frontbuffer_bit; 10889 10890 /* 10891 * WaCxSRDisabledForSpriteScaling:ivb 10892 * 10893 * cstate->update_wm was already set above, so this flag will 10894 * take effect when we commit and program watermarks. 10895 */ 10896 if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) && 10897 needs_scaling(to_intel_plane_state(plane_state)) && 10898 !needs_scaling(old_plane_state)) 10899 pipe_config->disable_lp_wm = true; 10900 10901 return 0; 10902} 10903 10904static bool encoders_cloneable(const struct intel_encoder *a, 10905 const struct intel_encoder *b) 10906{ 10907 /* masks could be asymmetric, so check both ways */ 10908 return a == b || (a->cloneable & (1 << b->type) && 10909 b->cloneable & (1 << a->type)); 10910} 10911 10912static bool check_single_encoder_cloning(struct drm_atomic_state *state, 10913 struct intel_crtc *crtc, 10914 struct intel_encoder *encoder) 10915{ 10916 struct intel_encoder *source_encoder; 10917 struct drm_connector *connector; 10918 struct drm_connector_state *connector_state; 10919 int i; 10920 10921 for_each_new_connector_in_state(state, connector, connector_state, i) { 10922 if (connector_state->crtc != &crtc->base) 10923 continue; 10924 10925 source_encoder = 10926 to_intel_encoder(connector_state->best_encoder); 10927 if (!encoders_cloneable(encoder, source_encoder)) 10928 return false; 10929 } 10930 10931 return true; 10932} 10933 10934static int intel_crtc_atomic_check(struct drm_crtc *crtc, 10935 struct drm_crtc_state *crtc_state) 10936{ 10937 struct drm_device *dev = crtc->dev; 10938 struct drm_i915_private *dev_priv = to_i915(dev); 10939 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10940 struct intel_crtc_state *pipe_config = 10941 to_intel_crtc_state(crtc_state); 10942 struct drm_atomic_state *state = crtc_state->state; 10943 int ret; 10944 bool mode_changed = needs_modeset(crtc_state); 10945 10946 if (mode_changed && !crtc_state->active) 10947 pipe_config->update_wm_post = true; 10948 10949 if (mode_changed && crtc_state->enable && 10950 dev_priv->display.crtc_compute_clock && 10951 !WARN_ON(pipe_config->shared_dpll)) { 10952 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 10953 pipe_config); 10954 if (ret) 10955 return ret; 10956 } 10957 10958 if (crtc_state->color_mgmt_changed) { 10959 ret = intel_color_check(crtc, crtc_state); 10960 if (ret) 10961 return ret; 10962 10963 /* 10964 * Changing color management on Intel hardware is 10965 * handled as part of planes update. 10966 */ 10967 crtc_state->planes_changed = true; 10968 } 10969 10970 ret = 0; 10971 if (dev_priv->display.compute_pipe_wm) { 10972 ret = dev_priv->display.compute_pipe_wm(pipe_config); 10973 if (ret) { 10974 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); 10975 return ret; 10976 } 10977 } 10978 10979 if (dev_priv->display.compute_intermediate_wm && 10980 !to_intel_atomic_state(state)->skip_intermediate_wm) { 10981 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 10982 return 0; 10983 10984 /* 10985 * Calculate 'intermediate' watermarks that satisfy both the 10986 * old state and the new state. We can program these 10987 * immediately. 10988 */ 10989 ret = dev_priv->display.compute_intermediate_wm(dev, 10990 intel_crtc, 10991 pipe_config); 10992 if (ret) { 10993 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 10994 return ret; 10995 } 10996 } else if (dev_priv->display.compute_intermediate_wm) { 10997 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) 10998 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; 10999 } 11000 11001 if (INTEL_GEN(dev_priv) >= 9) { 11002 if (mode_changed) 11003 ret = skl_update_scaler_crtc(pipe_config); 11004 11005 if (!ret) 11006 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc, 11007 pipe_config); 11008 } 11009 11010 return ret; 11011} 11012 11013static const struct drm_crtc_helper_funcs intel_helper_funcs = { 11014 .atomic_begin = intel_begin_crtc_commit, 11015 .atomic_flush = intel_finish_crtc_commit, 11016 .atomic_check = intel_crtc_atomic_check, 11017}; 11018 11019static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 11020{ 11021 struct intel_connector *connector; 11022 struct drm_connector_list_iter conn_iter; 11023 11024 drm_connector_list_iter_begin(dev, &conn_iter); 11025 for_each_intel_connector_iter(connector, &conn_iter) { 11026 if (connector->base.state->crtc) 11027 drm_connector_unreference(&connector->base); 11028 11029 if (connector->base.encoder) { 11030 connector->base.state->best_encoder = 11031 connector->base.encoder; 11032 connector->base.state->crtc = 11033 connector->base.encoder->crtc; 11034 11035 drm_connector_reference(&connector->base); 11036 } else { 11037 connector->base.state->best_encoder = NULL; 11038 connector->base.state->crtc = NULL; 11039 } 11040 } 11041 drm_connector_list_iter_end(&conn_iter); 11042} 11043 11044static void 11045connected_sink_compute_bpp(struct intel_connector *connector, 11046 struct intel_crtc_state *pipe_config) 11047{ 11048 const struct drm_display_info *info = &connector->base.display_info; 11049 int bpp = pipe_config->pipe_bpp; 11050 11051 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 11052 connector->base.base.id, 11053 connector->base.name); 11054 11055 /* Don't use an invalid EDID bpc value */ 11056 if (info->bpc != 0 && info->bpc * 3 < bpp) { 11057 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 11058 bpp, info->bpc * 3); 11059 pipe_config->pipe_bpp = info->bpc * 3; 11060 } 11061 11062 /* Clamp bpp to 8 on screens without EDID 1.4 */ 11063 if (info->bpc == 0 && bpp > 24) { 11064 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 11065 bpp); 11066 pipe_config->pipe_bpp = 24; 11067 } 11068} 11069 11070static int 11071compute_baseline_pipe_bpp(struct intel_crtc *crtc, 11072 struct intel_crtc_state *pipe_config) 11073{ 11074 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11075 struct drm_atomic_state *state; 11076 struct drm_connector *connector; 11077 struct drm_connector_state *connector_state; 11078 int bpp, i; 11079 11080 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 11081 IS_CHERRYVIEW(dev_priv))) 11082 bpp = 10*3; 11083 else if (INTEL_GEN(dev_priv) >= 5) 11084 bpp = 12*3; 11085 else 11086 bpp = 8*3; 11087 11088 11089 pipe_config->pipe_bpp = bpp; 11090 11091 state = pipe_config->base.state; 11092 11093 /* Clamp display bpp to EDID value */ 11094 for_each_new_connector_in_state(state, connector, connector_state, i) { 11095 if (connector_state->crtc != &crtc->base) 11096 continue; 11097 11098 connected_sink_compute_bpp(to_intel_connector(connector), 11099 pipe_config); 11100 } 11101 11102 return bpp; 11103} 11104 11105static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 11106{ 11107 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 11108 "type: 0x%x flags: 0x%x\n", 11109 mode->crtc_clock, 11110 mode->crtc_hdisplay, mode->crtc_hsync_start, 11111 mode->crtc_hsync_end, mode->crtc_htotal, 11112 mode->crtc_vdisplay, mode->crtc_vsync_start, 11113 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 11114} 11115 11116static inline void 11117intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id, 11118 unsigned int lane_count, struct intel_link_m_n *m_n) 11119{ 11120 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 11121 id, lane_count, 11122 m_n->gmch_m, m_n->gmch_n, 11123 m_n->link_m, m_n->link_n, m_n->tu); 11124} 11125 11126static void intel_dump_pipe_config(struct intel_crtc *crtc, 11127 struct intel_crtc_state *pipe_config, 11128 const char *context) 11129{ 11130 struct drm_device *dev = crtc->base.dev; 11131 struct drm_i915_private *dev_priv = to_i915(dev); 11132 struct drm_plane *plane; 11133 struct intel_plane *intel_plane; 11134 struct intel_plane_state *state; 11135 struct drm_framebuffer *fb; 11136 11137 DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n", 11138 crtc->base.base.id, crtc->base.name, context); 11139 11140 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 11141 transcoder_name(pipe_config->cpu_transcoder), 11142 pipe_config->pipe_bpp, pipe_config->dither); 11143 11144 if (pipe_config->has_pch_encoder) 11145 intel_dump_m_n_config(pipe_config, "fdi", 11146 pipe_config->fdi_lanes, 11147 &pipe_config->fdi_m_n); 11148 11149 if (intel_crtc_has_dp_encoder(pipe_config)) { 11150 intel_dump_m_n_config(pipe_config, "dp m_n", 11151 pipe_config->lane_count, &pipe_config->dp_m_n); 11152 if (pipe_config->has_drrs) 11153 intel_dump_m_n_config(pipe_config, "dp m2_n2", 11154 pipe_config->lane_count, 11155 &pipe_config->dp_m2_n2); 11156 } 11157 11158 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", 11159 pipe_config->has_audio, pipe_config->has_infoframe); 11160 11161 DRM_DEBUG_KMS("requested mode:\n"); 11162 drm_mode_debug_printmodeline(&pipe_config->base.mode); 11163 DRM_DEBUG_KMS("adjusted mode:\n"); 11164 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 11165 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 11166 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 11167 pipe_config->port_clock, 11168 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 11169 pipe_config->pixel_rate); 11170 11171 if (INTEL_GEN(dev_priv) >= 9) 11172 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 11173 crtc->num_scalers, 11174 pipe_config->scaler_state.scaler_users, 11175 pipe_config->scaler_state.scaler_id); 11176 11177 if (HAS_GMCH_DISPLAY(dev_priv)) 11178 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 11179 pipe_config->gmch_pfit.control, 11180 pipe_config->gmch_pfit.pgm_ratios, 11181 pipe_config->gmch_pfit.lvds_border_bits); 11182 else 11183 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 11184 pipe_config->pch_pfit.pos, 11185 pipe_config->pch_pfit.size, 11186 enableddisabled(pipe_config->pch_pfit.enabled)); 11187 11188 DRM_DEBUG_KMS("ips: %i, double wide: %i\n", 11189 pipe_config->ips_enabled, pipe_config->double_wide); 11190 11191 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 11192 11193 DRM_DEBUG_KMS("planes on this crtc\n"); 11194 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 11195 struct drm_format_name_buf format_name; 11196 intel_plane = to_intel_plane(plane); 11197 if (intel_plane->pipe != crtc->pipe) 11198 continue; 11199 11200 state = to_intel_plane_state(plane->state); 11201 fb = state->base.fb; 11202 if (!fb) { 11203 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n", 11204 plane->base.id, plane->name, state->scaler_id); 11205 continue; 11206 } 11207 11208 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n", 11209 plane->base.id, plane->name, 11210 fb->base.id, fb->width, fb->height, 11211 drm_get_format_name(fb->format->format, &format_name)); 11212 if (INTEL_GEN(dev_priv) >= 9) 11213 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", 11214 state->scaler_id, 11215 state->base.src.x1 >> 16, 11216 state->base.src.y1 >> 16, 11217 drm_rect_width(&state->base.src) >> 16, 11218 drm_rect_height(&state->base.src) >> 16, 11219 state->base.dst.x1, state->base.dst.y1, 11220 drm_rect_width(&state->base.dst), 11221 drm_rect_height(&state->base.dst)); 11222 } 11223} 11224 11225static bool check_digital_port_conflicts(struct drm_atomic_state *state) 11226{ 11227 struct drm_device *dev = state->dev; 11228 struct drm_connector *connector; 11229 unsigned int used_ports = 0; 11230 unsigned int used_mst_ports = 0; 11231 11232 /* 11233 * Walk the connector list instead of the encoder 11234 * list to detect the problem on ddi platforms 11235 * where there's just one encoder per digital port. 11236 */ 11237 drm_for_each_connector(connector, dev) { 11238 struct drm_connector_state *connector_state; 11239 struct intel_encoder *encoder; 11240 11241 connector_state = drm_atomic_get_existing_connector_state(state, connector); 11242 if (!connector_state) 11243 connector_state = connector->state; 11244 11245 if (!connector_state->best_encoder) 11246 continue; 11247 11248 encoder = to_intel_encoder(connector_state->best_encoder); 11249 11250 WARN_ON(!connector_state->crtc); 11251 11252 switch (encoder->type) { 11253 unsigned int port_mask; 11254 case INTEL_OUTPUT_UNKNOWN: 11255 if (WARN_ON(!HAS_DDI(to_i915(dev)))) 11256 break; 11257 case INTEL_OUTPUT_DP: 11258 case INTEL_OUTPUT_HDMI: 11259 case INTEL_OUTPUT_EDP: 11260 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 11261 11262 /* the same port mustn't appear more than once */ 11263 if (used_ports & port_mask) 11264 return false; 11265 11266 used_ports |= port_mask; 11267 break; 11268 case INTEL_OUTPUT_DP_MST: 11269 used_mst_ports |= 11270 1 << enc_to_mst(&encoder->base)->primary->port; 11271 break; 11272 default: 11273 break; 11274 } 11275 } 11276 11277 /* can't mix MST and SST/HDMI on the same port */ 11278 if (used_ports & used_mst_ports) 11279 return false; 11280 11281 return true; 11282} 11283 11284static void 11285clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 11286{ 11287 struct drm_i915_private *dev_priv = 11288 to_i915(crtc_state->base.crtc->dev); 11289 struct intel_crtc_scaler_state scaler_state; 11290 struct intel_dpll_hw_state dpll_hw_state; 11291 struct intel_shared_dpll *shared_dpll; 11292 struct intel_crtc_wm_state wm_state; 11293 bool force_thru; 11294 11295 /* FIXME: before the switch to atomic started, a new pipe_config was 11296 * kzalloc'd. Code that depends on any field being zero should be 11297 * fixed, so that the crtc_state can be safely duplicated. For now, 11298 * only fields that are know to not cause problems are preserved. */ 11299 11300 scaler_state = crtc_state->scaler_state; 11301 shared_dpll = crtc_state->shared_dpll; 11302 dpll_hw_state = crtc_state->dpll_hw_state; 11303 force_thru = crtc_state->pch_pfit.force_thru; 11304 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11305 wm_state = crtc_state->wm; 11306 11307 /* Keep base drm_crtc_state intact, only clear our extended struct */ 11308 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base)); 11309 memset(&crtc_state->base + 1, 0, 11310 sizeof(*crtc_state) - sizeof(crtc_state->base)); 11311 11312 crtc_state->scaler_state = scaler_state; 11313 crtc_state->shared_dpll = shared_dpll; 11314 crtc_state->dpll_hw_state = dpll_hw_state; 11315 crtc_state->pch_pfit.force_thru = force_thru; 11316 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11317 crtc_state->wm = wm_state; 11318} 11319 11320static int 11321intel_modeset_pipe_config(struct drm_crtc *crtc, 11322 struct intel_crtc_state *pipe_config) 11323{ 11324 struct drm_atomic_state *state = pipe_config->base.state; 11325 struct intel_encoder *encoder; 11326 struct drm_connector *connector; 11327 struct drm_connector_state *connector_state; 11328 int base_bpp, ret = -EINVAL; 11329 int i; 11330 bool retry = true; 11331 11332 clear_intel_crtc_state(pipe_config); 11333 11334 pipe_config->cpu_transcoder = 11335 (enum transcoder) to_intel_crtc(crtc)->pipe; 11336 11337 /* 11338 * Sanitize sync polarity flags based on requested ones. If neither 11339 * positive or negative polarity is requested, treat this as meaning 11340 * negative polarity. 11341 */ 11342 if (!(pipe_config->base.adjusted_mode.flags & 11343 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 11344 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 11345 11346 if (!(pipe_config->base.adjusted_mode.flags & 11347 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 11348 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 11349 11350 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 11351 pipe_config); 11352 if (base_bpp < 0) 11353 goto fail; 11354 11355 /* 11356 * Determine the real pipe dimensions. Note that stereo modes can 11357 * increase the actual pipe size due to the frame doubling and 11358 * insertion of additional space for blanks between the frame. This 11359 * is stored in the crtc timings. We use the requested mode to do this 11360 * computation to clearly distinguish it from the adjusted mode, which 11361 * can be changed by the connectors in the below retry loop. 11362 */ 11363 drm_mode_get_hv_timing(&pipe_config->base.mode, 11364 &pipe_config->pipe_src_w, 11365 &pipe_config->pipe_src_h); 11366 11367 for_each_new_connector_in_state(state, connector, connector_state, i) { 11368 if (connector_state->crtc != crtc) 11369 continue; 11370 11371 encoder = to_intel_encoder(connector_state->best_encoder); 11372 11373 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 11374 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 11375 goto fail; 11376 } 11377 11378 /* 11379 * Determine output_types before calling the .compute_config() 11380 * hooks so that the hooks can use this information safely. 11381 */ 11382 pipe_config->output_types |= 1 << encoder->type; 11383 } 11384 11385encoder_retry: 11386 /* Ensure the port clock defaults are reset when retrying. */ 11387 pipe_config->port_clock = 0; 11388 pipe_config->pixel_multiplier = 1; 11389 11390 /* Fill in default crtc timings, allow encoders to overwrite them. */ 11391 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 11392 CRTC_STEREO_DOUBLE); 11393 11394 /* Pass our mode to the connectors and the CRTC to give them a chance to 11395 * adjust it according to limitations or connector properties, and also 11396 * a chance to reject the mode entirely. 11397 */ 11398 for_each_new_connector_in_state(state, connector, connector_state, i) { 11399 if (connector_state->crtc != crtc) 11400 continue; 11401 11402 encoder = to_intel_encoder(connector_state->best_encoder); 11403 11404 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) { 11405 DRM_DEBUG_KMS("Encoder config failure\n"); 11406 goto fail; 11407 } 11408 } 11409 11410 /* Set default port clock if not overwritten by the encoder. Needs to be 11411 * done afterwards in case the encoder adjusts the mode. */ 11412 if (!pipe_config->port_clock) 11413 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 11414 * pipe_config->pixel_multiplier; 11415 11416 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 11417 if (ret < 0) { 11418 DRM_DEBUG_KMS("CRTC fixup failed\n"); 11419 goto fail; 11420 } 11421 11422 if (ret == RETRY) { 11423 if (WARN(!retry, "loop in pipe configuration computation\n")) { 11424 ret = -EINVAL; 11425 goto fail; 11426 } 11427 11428 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 11429 retry = false; 11430 goto encoder_retry; 11431 } 11432 11433 /* Dithering seems to not pass-through bits correctly when it should, so 11434 * only enable it on 6bpc panels and when its not a compliance 11435 * test requesting 6bpc video pattern. 11436 */ 11437 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 11438 !pipe_config->dither_force_disable; 11439 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 11440 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 11441 11442fail: 11443 return ret; 11444} 11445 11446static void 11447intel_modeset_update_crtc_state(struct drm_atomic_state *state) 11448{ 11449 struct drm_crtc *crtc; 11450 struct drm_crtc_state *new_crtc_state; 11451 int i; 11452 11453 /* Double check state. */ 11454 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11455 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state); 11456 11457 /* Update hwmode for vblank functions */ 11458 if (new_crtc_state->active) 11459 crtc->hwmode = new_crtc_state->adjusted_mode; 11460 else 11461 crtc->hwmode.crtc_clock = 0; 11462 11463 /* 11464 * Update legacy state to satisfy fbc code. This can 11465 * be removed when fbc uses the atomic state. 11466 */ 11467 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { 11468 struct drm_plane_state *plane_state = crtc->primary->state; 11469 11470 crtc->primary->fb = plane_state->fb; 11471 crtc->x = plane_state->src_x >> 16; 11472 crtc->y = plane_state->src_y >> 16; 11473 } 11474 } 11475} 11476 11477static bool intel_fuzzy_clock_check(int clock1, int clock2) 11478{ 11479 int diff; 11480 11481 if (clock1 == clock2) 11482 return true; 11483 11484 if (!clock1 || !clock2) 11485 return false; 11486 11487 diff = abs(clock1 - clock2); 11488 11489 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 11490 return true; 11491 11492 return false; 11493} 11494 11495static bool 11496intel_compare_m_n(unsigned int m, unsigned int n, 11497 unsigned int m2, unsigned int n2, 11498 bool exact) 11499{ 11500 if (m == m2 && n == n2) 11501 return true; 11502 11503 if (exact || !m || !n || !m2 || !n2) 11504 return false; 11505 11506 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 11507 11508 if (n > n2) { 11509 while (n > n2) { 11510 m2 <<= 1; 11511 n2 <<= 1; 11512 } 11513 } else if (n < n2) { 11514 while (n < n2) { 11515 m <<= 1; 11516 n <<= 1; 11517 } 11518 } 11519 11520 if (n != n2) 11521 return false; 11522 11523 return intel_fuzzy_clock_check(m, m2); 11524} 11525 11526static bool 11527intel_compare_link_m_n(const struct intel_link_m_n *m_n, 11528 struct intel_link_m_n *m2_n2, 11529 bool adjust) 11530{ 11531 if (m_n->tu == m2_n2->tu && 11532 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 11533 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) && 11534 intel_compare_m_n(m_n->link_m, m_n->link_n, 11535 m2_n2->link_m, m2_n2->link_n, !adjust)) { 11536 if (adjust) 11537 *m2_n2 = *m_n; 11538 11539 return true; 11540 } 11541 11542 return false; 11543} 11544 11545static void __printf(3, 4) 11546pipe_config_err(bool adjust, const char *name, const char *format, ...) 11547{ 11548 char *level; 11549 unsigned int category; 11550 struct va_format vaf; 11551 va_list args; 11552 11553 if (adjust) { 11554 level = KERN_DEBUG; 11555 category = DRM_UT_KMS; 11556 } else { 11557 level = KERN_ERR; 11558 category = DRM_UT_NONE; 11559 } 11560 11561 va_start(args, format); 11562 vaf.fmt = format; 11563 vaf.va = &args; 11564 11565 drm_printk(level, category, "mismatch in %s %pV", name, &vaf); 11566 11567 va_end(args); 11568} 11569 11570static bool 11571intel_pipe_config_compare(struct drm_i915_private *dev_priv, 11572 struct intel_crtc_state *current_config, 11573 struct intel_crtc_state *pipe_config, 11574 bool adjust) 11575{ 11576 bool ret = true; 11577 11578#define PIPE_CONF_CHECK_X(name) \ 11579 if (current_config->name != pipe_config->name) { \ 11580 pipe_config_err(adjust, __stringify(name), \ 11581 "(expected 0x%08x, found 0x%08x)\n", \ 11582 current_config->name, \ 11583 pipe_config->name); \ 11584 ret = false; \ 11585 } 11586 11587#define PIPE_CONF_CHECK_I(name) \ 11588 if (current_config->name != pipe_config->name) { \ 11589 pipe_config_err(adjust, __stringify(name), \ 11590 "(expected %i, found %i)\n", \ 11591 current_config->name, \ 11592 pipe_config->name); \ 11593 ret = false; \ 11594 } 11595 11596#define PIPE_CONF_CHECK_P(name) \ 11597 if (current_config->name != pipe_config->name) { \ 11598 pipe_config_err(adjust, __stringify(name), \ 11599 "(expected %p, found %p)\n", \ 11600 current_config->name, \ 11601 pipe_config->name); \ 11602 ret = false; \ 11603 } 11604 11605#define PIPE_CONF_CHECK_M_N(name) \ 11606 if (!intel_compare_link_m_n(&current_config->name, \ 11607 &pipe_config->name,\ 11608 adjust)) { \ 11609 pipe_config_err(adjust, __stringify(name), \ 11610 "(expected tu %i gmch %i/%i link %i/%i, " \ 11611 "found tu %i, gmch %i/%i link %i/%i)\n", \ 11612 current_config->name.tu, \ 11613 current_config->name.gmch_m, \ 11614 current_config->name.gmch_n, \ 11615 current_config->name.link_m, \ 11616 current_config->name.link_n, \ 11617 pipe_config->name.tu, \ 11618 pipe_config->name.gmch_m, \ 11619 pipe_config->name.gmch_n, \ 11620 pipe_config->name.link_m, \ 11621 pipe_config->name.link_n); \ 11622 ret = false; \ 11623 } 11624 11625/* This is required for BDW+ where there is only one set of registers for 11626 * switching between high and low RR. 11627 * This macro can be used whenever a comparison has to be made between one 11628 * hw state and multiple sw state variables. 11629 */ 11630#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \ 11631 if (!intel_compare_link_m_n(&current_config->name, \ 11632 &pipe_config->name, adjust) && \ 11633 !intel_compare_link_m_n(&current_config->alt_name, \ 11634 &pipe_config->name, adjust)) { \ 11635 pipe_config_err(adjust, __stringify(name), \ 11636 "(expected tu %i gmch %i/%i link %i/%i, " \ 11637 "or tu %i gmch %i/%i link %i/%i, " \ 11638 "found tu %i, gmch %i/%i link %i/%i)\n", \ 11639 current_config->name.tu, \ 11640 current_config->name.gmch_m, \ 11641 current_config->name.gmch_n, \ 11642 current_config->name.link_m, \ 11643 current_config->name.link_n, \ 11644 current_config->alt_name.tu, \ 11645 current_config->alt_name.gmch_m, \ 11646 current_config->alt_name.gmch_n, \ 11647 current_config->alt_name.link_m, \ 11648 current_config->alt_name.link_n, \ 11649 pipe_config->name.tu, \ 11650 pipe_config->name.gmch_m, \ 11651 pipe_config->name.gmch_n, \ 11652 pipe_config->name.link_m, \ 11653 pipe_config->name.link_n); \ 11654 ret = false; \ 11655 } 11656 11657#define PIPE_CONF_CHECK_FLAGS(name, mask) \ 11658 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 11659 pipe_config_err(adjust, __stringify(name), \ 11660 "(%x) (expected %i, found %i)\n", \ 11661 (mask), \ 11662 current_config->name & (mask), \ 11663 pipe_config->name & (mask)); \ 11664 ret = false; \ 11665 } 11666 11667#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 11668 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 11669 pipe_config_err(adjust, __stringify(name), \ 11670 "(expected %i, found %i)\n", \ 11671 current_config->name, \ 11672 pipe_config->name); \ 11673 ret = false; \ 11674 } 11675 11676#define PIPE_CONF_QUIRK(quirk) \ 11677 ((current_config->quirks | pipe_config->quirks) & (quirk)) 11678 11679 PIPE_CONF_CHECK_I(cpu_transcoder); 11680 11681 PIPE_CONF_CHECK_I(has_pch_encoder); 11682 PIPE_CONF_CHECK_I(fdi_lanes); 11683 PIPE_CONF_CHECK_M_N(fdi_m_n); 11684 11685 PIPE_CONF_CHECK_I(lane_count); 11686 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 11687 11688 if (INTEL_GEN(dev_priv) < 8) { 11689 PIPE_CONF_CHECK_M_N(dp_m_n); 11690 11691 if (current_config->has_drrs) 11692 PIPE_CONF_CHECK_M_N(dp_m2_n2); 11693 } else 11694 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 11695 11696 PIPE_CONF_CHECK_X(output_types); 11697 11698 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 11699 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 11700 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 11701 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 11702 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 11703 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 11704 11705 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 11706 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 11707 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 11708 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 11709 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 11710 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 11711 11712 PIPE_CONF_CHECK_I(pixel_multiplier); 11713 PIPE_CONF_CHECK_I(has_hdmi_sink); 11714 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 11715 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11716 PIPE_CONF_CHECK_I(limited_color_range); 11717 11718 PIPE_CONF_CHECK_I(hdmi_scrambling); 11719 PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio); 11720 PIPE_CONF_CHECK_I(has_infoframe); 11721 11722 PIPE_CONF_CHECK_I(has_audio); 11723 11724 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11725 DRM_MODE_FLAG_INTERLACE); 11726 11727 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 11728 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11729 DRM_MODE_FLAG_PHSYNC); 11730 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11731 DRM_MODE_FLAG_NHSYNC); 11732 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11733 DRM_MODE_FLAG_PVSYNC); 11734 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11735 DRM_MODE_FLAG_NVSYNC); 11736 } 11737 11738 PIPE_CONF_CHECK_X(gmch_pfit.control); 11739 /* pfit ratios are autocomputed by the hw on gen4+ */ 11740 if (INTEL_GEN(dev_priv) < 4) 11741 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 11742 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 11743 11744 if (!adjust) { 11745 PIPE_CONF_CHECK_I(pipe_src_w); 11746 PIPE_CONF_CHECK_I(pipe_src_h); 11747 11748 PIPE_CONF_CHECK_I(pch_pfit.enabled); 11749 if (current_config->pch_pfit.enabled) { 11750 PIPE_CONF_CHECK_X(pch_pfit.pos); 11751 PIPE_CONF_CHECK_X(pch_pfit.size); 11752 } 11753 11754 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 11755 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 11756 } 11757 11758 /* BDW+ don't expose a synchronous way to read the state */ 11759 if (IS_HASWELL(dev_priv)) 11760 PIPE_CONF_CHECK_I(ips_enabled); 11761 11762 PIPE_CONF_CHECK_I(double_wide); 11763 11764 PIPE_CONF_CHECK_P(shared_dpll); 11765 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 11766 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 11767 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 11768 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 11769 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 11770 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 11771 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 11772 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 11773 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 11774 11775 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 11776 PIPE_CONF_CHECK_X(dsi_pll.div); 11777 11778 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 11779 PIPE_CONF_CHECK_I(pipe_bpp); 11780 11781 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 11782 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 11783 11784#undef PIPE_CONF_CHECK_X 11785#undef PIPE_CONF_CHECK_I 11786#undef PIPE_CONF_CHECK_P 11787#undef PIPE_CONF_CHECK_FLAGS 11788#undef PIPE_CONF_CHECK_CLOCK_FUZZY 11789#undef PIPE_CONF_QUIRK 11790 11791 return ret; 11792} 11793 11794static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 11795 const struct intel_crtc_state *pipe_config) 11796{ 11797 if (pipe_config->has_pch_encoder) { 11798 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 11799 &pipe_config->fdi_m_n); 11800 int dotclock = pipe_config->base.adjusted_mode.crtc_clock; 11801 11802 /* 11803 * FDI already provided one idea for the dotclock. 11804 * Yell if the encoder disagrees. 11805 */ 11806 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), 11807 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 11808 fdi_dotclock, dotclock); 11809 } 11810} 11811 11812static void verify_wm_state(struct drm_crtc *crtc, 11813 struct drm_crtc_state *new_state) 11814{ 11815 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 11816 struct skl_ddb_allocation hw_ddb, *sw_ddb; 11817 struct skl_pipe_wm hw_wm, *sw_wm; 11818 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 11819 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 11820 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11821 const enum pipe pipe = intel_crtc->pipe; 11822 int plane, level, max_level = ilk_wm_max_level(dev_priv); 11823 11824 if (INTEL_GEN(dev_priv) < 9 || !new_state->active) 11825 return; 11826 11827 skl_pipe_wm_get_hw_state(crtc, &hw_wm); 11828 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal; 11829 11830 skl_ddb_get_hw_state(dev_priv, &hw_ddb); 11831 sw_ddb = &dev_priv->wm.skl_hw.ddb; 11832 11833 /* planes */ 11834 for_each_universal_plane(dev_priv, pipe, plane) { 11835 hw_plane_wm = &hw_wm.planes[plane]; 11836 sw_plane_wm = &sw_wm->planes[plane]; 11837 11838 /* Watermarks */ 11839 for (level = 0; level <= max_level; level++) { 11840 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 11841 &sw_plane_wm->wm[level])) 11842 continue; 11843 11844 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11845 pipe_name(pipe), plane + 1, level, 11846 sw_plane_wm->wm[level].plane_en, 11847 sw_plane_wm->wm[level].plane_res_b, 11848 sw_plane_wm->wm[level].plane_res_l, 11849 hw_plane_wm->wm[level].plane_en, 11850 hw_plane_wm->wm[level].plane_res_b, 11851 hw_plane_wm->wm[level].plane_res_l); 11852 } 11853 11854 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 11855 &sw_plane_wm->trans_wm)) { 11856 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11857 pipe_name(pipe), plane + 1, 11858 sw_plane_wm->trans_wm.plane_en, 11859 sw_plane_wm->trans_wm.plane_res_b, 11860 sw_plane_wm->trans_wm.plane_res_l, 11861 hw_plane_wm->trans_wm.plane_en, 11862 hw_plane_wm->trans_wm.plane_res_b, 11863 hw_plane_wm->trans_wm.plane_res_l); 11864 } 11865 11866 /* DDB */ 11867 hw_ddb_entry = &hw_ddb.plane[pipe][plane]; 11868 sw_ddb_entry = &sw_ddb->plane[pipe][plane]; 11869 11870 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 11871 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 11872 pipe_name(pipe), plane + 1, 11873 sw_ddb_entry->start, sw_ddb_entry->end, 11874 hw_ddb_entry->start, hw_ddb_entry->end); 11875 } 11876 } 11877 11878 /* 11879 * cursor 11880 * If the cursor plane isn't active, we may not have updated it's ddb 11881 * allocation. In that case since the ddb allocation will be updated 11882 * once the plane becomes visible, we can skip this check 11883 */ 11884 if (intel_crtc->cursor_addr) { 11885 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR]; 11886 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 11887 11888 /* Watermarks */ 11889 for (level = 0; level <= max_level; level++) { 11890 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 11891 &sw_plane_wm->wm[level])) 11892 continue; 11893 11894 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11895 pipe_name(pipe), level, 11896 sw_plane_wm->wm[level].plane_en, 11897 sw_plane_wm->wm[level].plane_res_b, 11898 sw_plane_wm->wm[level].plane_res_l, 11899 hw_plane_wm->wm[level].plane_en, 11900 hw_plane_wm->wm[level].plane_res_b, 11901 hw_plane_wm->wm[level].plane_res_l); 11902 } 11903 11904 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 11905 &sw_plane_wm->trans_wm)) { 11906 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11907 pipe_name(pipe), 11908 sw_plane_wm->trans_wm.plane_en, 11909 sw_plane_wm->trans_wm.plane_res_b, 11910 sw_plane_wm->trans_wm.plane_res_l, 11911 hw_plane_wm->trans_wm.plane_en, 11912 hw_plane_wm->trans_wm.plane_res_b, 11913 hw_plane_wm->trans_wm.plane_res_l); 11914 } 11915 11916 /* DDB */ 11917 hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; 11918 sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; 11919 11920 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 11921 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 11922 pipe_name(pipe), 11923 sw_ddb_entry->start, sw_ddb_entry->end, 11924 hw_ddb_entry->start, hw_ddb_entry->end); 11925 } 11926 } 11927} 11928 11929static void 11930verify_connector_state(struct drm_device *dev, 11931 struct drm_atomic_state *state, 11932 struct drm_crtc *crtc) 11933{ 11934 struct drm_connector *connector; 11935 struct drm_connector_state *new_conn_state; 11936 int i; 11937 11938 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 11939 struct drm_encoder *encoder = connector->encoder; 11940 11941 if (new_conn_state->crtc != crtc) 11942 continue; 11943 11944 intel_connector_verify_state(to_intel_connector(connector)); 11945 11946 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 11947 "connector's atomic encoder doesn't match legacy encoder\n"); 11948 } 11949} 11950 11951static void 11952verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state) 11953{ 11954 struct intel_encoder *encoder; 11955 struct drm_connector *connector; 11956 struct drm_connector_state *old_conn_state, *new_conn_state; 11957 int i; 11958 11959 for_each_intel_encoder(dev, encoder) { 11960 bool enabled = false, found = false; 11961 enum pipe pipe; 11962 11963 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 11964 encoder->base.base.id, 11965 encoder->base.name); 11966 11967 for_each_oldnew_connector_in_state(state, connector, old_conn_state, 11968 new_conn_state, i) { 11969 if (old_conn_state->best_encoder == &encoder->base) 11970 found = true; 11971 11972 if (new_conn_state->best_encoder != &encoder->base) 11973 continue; 11974 found = enabled = true; 11975 11976 I915_STATE_WARN(new_conn_state->crtc != 11977 encoder->base.crtc, 11978 "connector's crtc doesn't match encoder crtc\n"); 11979 } 11980 11981 if (!found) 11982 continue; 11983 11984 I915_STATE_WARN(!!encoder->base.crtc != enabled, 11985 "encoder's enabled state mismatch " 11986 "(expected %i, found %i)\n", 11987 !!encoder->base.crtc, enabled); 11988 11989 if (!encoder->base.crtc) { 11990 bool active; 11991 11992 active = encoder->get_hw_state(encoder, &pipe); 11993 I915_STATE_WARN(active, 11994 "encoder detached but still enabled on pipe %c.\n", 11995 pipe_name(pipe)); 11996 } 11997 } 11998} 11999 12000static void 12001verify_crtc_state(struct drm_crtc *crtc, 12002 struct drm_crtc_state *old_crtc_state, 12003 struct drm_crtc_state *new_crtc_state) 12004{ 12005 struct drm_device *dev = crtc->dev; 12006 struct drm_i915_private *dev_priv = to_i915(dev); 12007 struct intel_encoder *encoder; 12008 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12009 struct intel_crtc_state *pipe_config, *sw_config; 12010 struct drm_atomic_state *old_state; 12011 bool active; 12012 12013 old_state = old_crtc_state->state; 12014 __drm_atomic_helper_crtc_destroy_state(old_crtc_state); 12015 pipe_config = to_intel_crtc_state(old_crtc_state); 12016 memset(pipe_config, 0, sizeof(*pipe_config)); 12017 pipe_config->base.crtc = crtc; 12018 pipe_config->base.state = old_state; 12019 12020 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); 12021 12022 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); 12023 12024 /* hw state is inconsistent with the pipe quirk */ 12025 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 12026 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 12027 active = new_crtc_state->active; 12028 12029 I915_STATE_WARN(new_crtc_state->active != active, 12030 "crtc active state doesn't match with hw state " 12031 "(expected %i, found %i)\n", new_crtc_state->active, active); 12032 12033 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active, 12034 "transitional active state does not match atomic hw state " 12035 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active); 12036 12037 for_each_encoder_on_crtc(dev, crtc, encoder) { 12038 enum pipe pipe; 12039 12040 active = encoder->get_hw_state(encoder, &pipe); 12041 I915_STATE_WARN(active != new_crtc_state->active, 12042 "[ENCODER:%i] active %i with crtc active %i\n", 12043 encoder->base.base.id, active, new_crtc_state->active); 12044 12045 I915_STATE_WARN(active && intel_crtc->pipe != pipe, 12046 "Encoder connected to wrong pipe %c\n", 12047 pipe_name(pipe)); 12048 12049 if (active) { 12050 pipe_config->output_types |= 1 << encoder->type; 12051 encoder->get_config(encoder, pipe_config); 12052 } 12053 } 12054 12055 intel_crtc_compute_pixel_rate(pipe_config); 12056 12057 if (!new_crtc_state->active) 12058 return; 12059 12060 intel_pipe_config_sanity_check(dev_priv, pipe_config); 12061 12062 sw_config = to_intel_crtc_state(crtc->state); 12063 if (!intel_pipe_config_compare(dev_priv, sw_config, 12064 pipe_config, false)) { 12065 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 12066 intel_dump_pipe_config(intel_crtc, pipe_config, 12067 "[hw state]"); 12068 intel_dump_pipe_config(intel_crtc, sw_config, 12069 "[sw state]"); 12070 } 12071} 12072 12073static void 12074verify_single_dpll_state(struct drm_i915_private *dev_priv, 12075 struct intel_shared_dpll *pll, 12076 struct drm_crtc *crtc, 12077 struct drm_crtc_state *new_state) 12078{ 12079 struct intel_dpll_hw_state dpll_hw_state; 12080 unsigned crtc_mask; 12081 bool active; 12082 12083 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 12084 12085 DRM_DEBUG_KMS("%s\n", pll->name); 12086 12087 active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state); 12088 12089 if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) { 12090 I915_STATE_WARN(!pll->on && pll->active_mask, 12091 "pll in active use but not on in sw tracking\n"); 12092 I915_STATE_WARN(pll->on && !pll->active_mask, 12093 "pll is on but not used by any active crtc\n"); 12094 I915_STATE_WARN(pll->on != active, 12095 "pll on state mismatch (expected %i, found %i)\n", 12096 pll->on, active); 12097 } 12098 12099 if (!crtc) { 12100 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 12101 "more active pll users than references: %x vs %x\n", 12102 pll->active_mask, pll->state.crtc_mask); 12103 12104 return; 12105 } 12106 12107 crtc_mask = 1 << drm_crtc_index(crtc); 12108 12109 if (new_state->active) 12110 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 12111 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 12112 pipe_name(drm_crtc_index(crtc)), pll->active_mask); 12113 else 12114 I915_STATE_WARN(pll->active_mask & crtc_mask, 12115 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 12116 pipe_name(drm_crtc_index(crtc)), pll->active_mask); 12117 12118 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 12119 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 12120 crtc_mask, pll->state.crtc_mask); 12121 12122 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 12123 &dpll_hw_state, 12124 sizeof(dpll_hw_state)), 12125 "pll hw state mismatch\n"); 12126} 12127 12128static void 12129verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, 12130 struct drm_crtc_state *old_crtc_state, 12131 struct drm_crtc_state *new_crtc_state) 12132{ 12133 struct drm_i915_private *dev_priv = to_i915(dev); 12134 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state); 12135 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state); 12136 12137 if (new_state->shared_dpll) 12138 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state); 12139 12140 if (old_state->shared_dpll && 12141 old_state->shared_dpll != new_state->shared_dpll) { 12142 unsigned crtc_mask = 1 << drm_crtc_index(crtc); 12143 struct intel_shared_dpll *pll = old_state->shared_dpll; 12144 12145 I915_STATE_WARN(pll->active_mask & crtc_mask, 12146 "pll active mismatch (didn't expect pipe %c in active mask)\n", 12147 pipe_name(drm_crtc_index(crtc))); 12148 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 12149 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 12150 pipe_name(drm_crtc_index(crtc))); 12151 } 12152} 12153 12154static void 12155intel_modeset_verify_crtc(struct drm_crtc *crtc, 12156 struct drm_atomic_state *state, 12157 struct drm_crtc_state *old_state, 12158 struct drm_crtc_state *new_state) 12159{ 12160 if (!needs_modeset(new_state) && 12161 !to_intel_crtc_state(new_state)->update_pipe) 12162 return; 12163 12164 verify_wm_state(crtc, new_state); 12165 verify_connector_state(crtc->dev, state, crtc); 12166 verify_crtc_state(crtc, old_state, new_state); 12167 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state); 12168} 12169 12170static void 12171verify_disabled_dpll_state(struct drm_device *dev) 12172{ 12173 struct drm_i915_private *dev_priv = to_i915(dev); 12174 int i; 12175 12176 for (i = 0; i < dev_priv->num_shared_dpll; i++) 12177 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); 12178} 12179 12180static void 12181intel_modeset_verify_disabled(struct drm_device *dev, 12182 struct drm_atomic_state *state) 12183{ 12184 verify_encoder_state(dev, state); 12185 verify_connector_state(dev, state, NULL); 12186 verify_disabled_dpll_state(dev); 12187} 12188 12189static void update_scanline_offset(struct intel_crtc *crtc) 12190{ 12191 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12192 12193 /* 12194 * The scanline counter increments at the leading edge of hsync. 12195 * 12196 * On most platforms it starts counting from vtotal-1 on the 12197 * first active line. That means the scanline counter value is 12198 * always one less than what we would expect. Ie. just after 12199 * start of vblank, which also occurs at start of hsync (on the 12200 * last active line), the scanline counter will read vblank_start-1. 12201 * 12202 * On gen2 the scanline counter starts counting from 1 instead 12203 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 12204 * to keep the value positive), instead of adding one. 12205 * 12206 * On HSW+ the behaviour of the scanline counter depends on the output 12207 * type. For DP ports it behaves like most other platforms, but on HDMI 12208 * there's an extra 1 line difference. So we need to add two instead of 12209 * one to the value. 12210 * 12211 * On VLV/CHV DSI the scanline counter would appear to increment 12212 * approx. 1/3 of a scanline before start of vblank. Unfortunately 12213 * that means we can't tell whether we're in vblank or not while 12214 * we're on that particular line. We must still set scanline_offset 12215 * to 1 so that the vblank timestamps come out correct when we query 12216 * the scanline counter from within the vblank interrupt handler. 12217 * However if queried just before the start of vblank we'll get an 12218 * answer that's slightly in the future. 12219 */ 12220 if (IS_GEN2(dev_priv)) { 12221 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 12222 int vtotal; 12223 12224 vtotal = adjusted_mode->crtc_vtotal; 12225 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 12226 vtotal /= 2; 12227 12228 crtc->scanline_offset = vtotal - 1; 12229 } else if (HAS_DDI(dev_priv) && 12230 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) { 12231 crtc->scanline_offset = 2; 12232 } else 12233 crtc->scanline_offset = 1; 12234} 12235 12236static void intel_modeset_clear_plls(struct drm_atomic_state *state) 12237{ 12238 struct drm_device *dev = state->dev; 12239 struct drm_i915_private *dev_priv = to_i915(dev); 12240 struct drm_crtc *crtc; 12241 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12242 int i; 12243 12244 if (!dev_priv->display.crtc_compute_clock) 12245 return; 12246 12247 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12248 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12249 struct intel_shared_dpll *old_dpll = 12250 to_intel_crtc_state(old_crtc_state)->shared_dpll; 12251 12252 if (!needs_modeset(new_crtc_state)) 12253 continue; 12254 12255 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL; 12256 12257 if (!old_dpll) 12258 continue; 12259 12260 intel_release_shared_dpll(old_dpll, intel_crtc, state); 12261 } 12262} 12263 12264/* 12265 * This implements the workaround described in the "notes" section of the mode 12266 * set sequence documentation. When going from no pipes or single pipe to 12267 * multiple pipes, and planes are enabled after the pipe, we need to wait at 12268 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 12269 */ 12270static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) 12271{ 12272 struct drm_crtc_state *crtc_state; 12273 struct intel_crtc *intel_crtc; 12274 struct drm_crtc *crtc; 12275 struct intel_crtc_state *first_crtc_state = NULL; 12276 struct intel_crtc_state *other_crtc_state = NULL; 12277 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 12278 int i; 12279 12280 /* look at all crtc's that are going to be enabled in during modeset */ 12281 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 12282 intel_crtc = to_intel_crtc(crtc); 12283 12284 if (!crtc_state->active || !needs_modeset(crtc_state)) 12285 continue; 12286 12287 if (first_crtc_state) { 12288 other_crtc_state = to_intel_crtc_state(crtc_state); 12289 break; 12290 } else { 12291 first_crtc_state = to_intel_crtc_state(crtc_state); 12292 first_pipe = intel_crtc->pipe; 12293 } 12294 } 12295 12296 /* No workaround needed? */ 12297 if (!first_crtc_state) 12298 return 0; 12299 12300 /* w/a possibly needed, check how many crtc's are already enabled. */ 12301 for_each_intel_crtc(state->dev, intel_crtc) { 12302 struct intel_crtc_state *pipe_config; 12303 12304 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); 12305 if (IS_ERR(pipe_config)) 12306 return PTR_ERR(pipe_config); 12307 12308 pipe_config->hsw_workaround_pipe = INVALID_PIPE; 12309 12310 if (!pipe_config->base.active || 12311 needs_modeset(&pipe_config->base)) 12312 continue; 12313 12314 /* 2 or more enabled crtcs means no need for w/a */ 12315 if (enabled_pipe != INVALID_PIPE) 12316 return 0; 12317 12318 enabled_pipe = intel_crtc->pipe; 12319 } 12320 12321 if (enabled_pipe != INVALID_PIPE) 12322 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 12323 else if (other_crtc_state) 12324 other_crtc_state->hsw_workaround_pipe = first_pipe; 12325 12326 return 0; 12327} 12328 12329static int intel_lock_all_pipes(struct drm_atomic_state *state) 12330{ 12331 struct drm_crtc *crtc; 12332 12333 /* Add all pipes to the state */ 12334 for_each_crtc(state->dev, crtc) { 12335 struct drm_crtc_state *crtc_state; 12336 12337 crtc_state = drm_atomic_get_crtc_state(state, crtc); 12338 if (IS_ERR(crtc_state)) 12339 return PTR_ERR(crtc_state); 12340 } 12341 12342 return 0; 12343} 12344 12345static int intel_modeset_all_pipes(struct drm_atomic_state *state) 12346{ 12347 struct drm_crtc *crtc; 12348 12349 /* 12350 * Add all pipes to the state, and force 12351 * a modeset on all the active ones. 12352 */ 12353 for_each_crtc(state->dev, crtc) { 12354 struct drm_crtc_state *crtc_state; 12355 int ret; 12356 12357 crtc_state = drm_atomic_get_crtc_state(state, crtc); 12358 if (IS_ERR(crtc_state)) 12359 return PTR_ERR(crtc_state); 12360 12361 if (!crtc_state->active || needs_modeset(crtc_state)) 12362 continue; 12363 12364 crtc_state->mode_changed = true; 12365 12366 ret = drm_atomic_add_affected_connectors(state, crtc); 12367 if (ret) 12368 return ret; 12369 12370 ret = drm_atomic_add_affected_planes(state, crtc); 12371 if (ret) 12372 return ret; 12373 } 12374 12375 return 0; 12376} 12377 12378static int intel_modeset_checks(struct drm_atomic_state *state) 12379{ 12380 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12381 struct drm_i915_private *dev_priv = to_i915(state->dev); 12382 struct drm_crtc *crtc; 12383 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12384 int ret = 0, i; 12385 12386 if (!check_digital_port_conflicts(state)) { 12387 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 12388 return -EINVAL; 12389 } 12390 12391 intel_state->modeset = true; 12392 intel_state->active_crtcs = dev_priv->active_crtcs; 12393 intel_state->cdclk.logical = dev_priv->cdclk.logical; 12394 intel_state->cdclk.actual = dev_priv->cdclk.actual; 12395 12396 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12397 if (new_crtc_state->active) 12398 intel_state->active_crtcs |= 1 << i; 12399 else 12400 intel_state->active_crtcs &= ~(1 << i); 12401 12402 if (old_crtc_state->active != new_crtc_state->active) 12403 intel_state->active_pipe_changes |= drm_crtc_mask(crtc); 12404 } 12405 12406 /* 12407 * See if the config requires any additional preparation, e.g. 12408 * to adjust global state with pipes off. We need to do this 12409 * here so we can get the modeset_pipe updated config for the new 12410 * mode set on this crtc. For other crtcs we need to use the 12411 * adjusted_mode bits in the crtc directly. 12412 */ 12413 if (dev_priv->display.modeset_calc_cdclk) { 12414 ret = dev_priv->display.modeset_calc_cdclk(state); 12415 if (ret < 0) 12416 return ret; 12417 12418 /* 12419 * Writes to dev_priv->cdclk.logical must protected by 12420 * holding all the crtc locks, even if we don't end up 12421 * touching the hardware 12422 */ 12423 if (!intel_cdclk_state_compare(&dev_priv->cdclk.logical, 12424 &intel_state->cdclk.logical)) { 12425 ret = intel_lock_all_pipes(state); 12426 if (ret < 0) 12427 return ret; 12428 } 12429 12430 /* All pipes must be switched off while we change the cdclk. */ 12431 if (!intel_cdclk_state_compare(&dev_priv->cdclk.actual, 12432 &intel_state->cdclk.actual)) { 12433 ret = intel_modeset_all_pipes(state); 12434 if (ret < 0) 12435 return ret; 12436 } 12437 12438 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", 12439 intel_state->cdclk.logical.cdclk, 12440 intel_state->cdclk.actual.cdclk); 12441 } else { 12442 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical; 12443 } 12444 12445 intel_modeset_clear_plls(state); 12446 12447 if (IS_HASWELL(dev_priv)) 12448 return haswell_mode_set_planes_workaround(state); 12449 12450 return 0; 12451} 12452 12453/* 12454 * Handle calculation of various watermark data at the end of the atomic check 12455 * phase. The code here should be run after the per-crtc and per-plane 'check' 12456 * handlers to ensure that all derived state has been updated. 12457 */ 12458static int calc_watermark_data(struct drm_atomic_state *state) 12459{ 12460 struct drm_device *dev = state->dev; 12461 struct drm_i915_private *dev_priv = to_i915(dev); 12462 12463 /* Is there platform-specific watermark information to calculate? */ 12464 if (dev_priv->display.compute_global_watermarks) 12465 return dev_priv->display.compute_global_watermarks(state); 12466 12467 return 0; 12468} 12469 12470/** 12471 * intel_atomic_check - validate state object 12472 * @dev: drm device 12473 * @state: state to validate 12474 */ 12475static int intel_atomic_check(struct drm_device *dev, 12476 struct drm_atomic_state *state) 12477{ 12478 struct drm_i915_private *dev_priv = to_i915(dev); 12479 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12480 struct drm_crtc *crtc; 12481 struct drm_crtc_state *old_crtc_state, *crtc_state; 12482 int ret, i; 12483 bool any_ms = false; 12484 12485 ret = drm_atomic_helper_check_modeset(dev, state); 12486 if (ret) 12487 return ret; 12488 12489 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) { 12490 struct intel_crtc_state *pipe_config = 12491 to_intel_crtc_state(crtc_state); 12492 12493 /* Catch I915_MODE_FLAG_INHERITED */ 12494 if (crtc_state->mode.private_flags != old_crtc_state->mode.private_flags) 12495 crtc_state->mode_changed = true; 12496 12497 if (!needs_modeset(crtc_state)) 12498 continue; 12499 12500 if (!crtc_state->enable) { 12501 any_ms = true; 12502 continue; 12503 } 12504 12505 /* FIXME: For only active_changed we shouldn't need to do any 12506 * state recomputation at all. */ 12507 12508 ret = drm_atomic_add_affected_connectors(state, crtc); 12509 if (ret) 12510 return ret; 12511 12512 ret = intel_modeset_pipe_config(crtc, pipe_config); 12513 if (ret) { 12514 intel_dump_pipe_config(to_intel_crtc(crtc), 12515 pipe_config, "[failed]"); 12516 return ret; 12517 } 12518 12519 if (i915.fastboot && 12520 intel_pipe_config_compare(dev_priv, 12521 to_intel_crtc_state(old_crtc_state), 12522 pipe_config, true)) { 12523 crtc_state->mode_changed = false; 12524 pipe_config->update_pipe = true; 12525 } 12526 12527 if (needs_modeset(crtc_state)) 12528 any_ms = true; 12529 12530 ret = drm_atomic_add_affected_planes(state, crtc); 12531 if (ret) 12532 return ret; 12533 12534 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 12535 needs_modeset(crtc_state) ? 12536 "[modeset]" : "[fastset]"); 12537 } 12538 12539 if (any_ms) { 12540 ret = intel_modeset_checks(state); 12541 12542 if (ret) 12543 return ret; 12544 } else { 12545 intel_state->cdclk.logical = dev_priv->cdclk.logical; 12546 } 12547 12548 ret = drm_atomic_helper_check_planes(dev, state); 12549 if (ret) 12550 return ret; 12551 12552 intel_fbc_choose_crtc(dev_priv, state); 12553 return calc_watermark_data(state); 12554} 12555 12556static int intel_atomic_prepare_commit(struct drm_device *dev, 12557 struct drm_atomic_state *state) 12558{ 12559 struct drm_i915_private *dev_priv = to_i915(dev); 12560 struct drm_crtc_state *crtc_state; 12561 struct drm_crtc *crtc; 12562 int i, ret; 12563 12564 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 12565 if (state->legacy_cursor_update) 12566 continue; 12567 12568 ret = intel_crtc_wait_for_pending_flips(crtc); 12569 if (ret) 12570 return ret; 12571 12572 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2) 12573 flush_workqueue(dev_priv->wq); 12574 } 12575 12576 ret = mutex_lock_interruptible(&dev->struct_mutex); 12577 if (ret) 12578 return ret; 12579 12580 ret = drm_atomic_helper_prepare_planes(dev, state); 12581 mutex_unlock(&dev->struct_mutex); 12582 12583 return ret; 12584} 12585 12586u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 12587{ 12588 struct drm_device *dev = crtc->base.dev; 12589 12590 if (!dev->max_vblank_count) 12591 return drm_accurate_vblank_count(&crtc->base); 12592 12593 return dev->driver->get_vblank_counter(dev, crtc->pipe); 12594} 12595 12596static void intel_atomic_wait_for_vblanks(struct drm_device *dev, 12597 struct drm_i915_private *dev_priv, 12598 unsigned crtc_mask) 12599{ 12600 unsigned last_vblank_count[I915_MAX_PIPES]; 12601 enum pipe pipe; 12602 int ret; 12603 12604 if (!crtc_mask) 12605 return; 12606 12607 for_each_pipe(dev_priv, pipe) { 12608 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, 12609 pipe); 12610 12611 if (!((1 << pipe) & crtc_mask)) 12612 continue; 12613 12614 ret = drm_crtc_vblank_get(&crtc->base); 12615 if (WARN_ON(ret != 0)) { 12616 crtc_mask &= ~(1 << pipe); 12617 continue; 12618 } 12619 12620 last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base); 12621 } 12622 12623 for_each_pipe(dev_priv, pipe) { 12624 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, 12625 pipe); 12626 long lret; 12627 12628 if (!((1 << pipe) & crtc_mask)) 12629 continue; 12630 12631 lret = wait_event_timeout(dev->vblank[pipe].queue, 12632 last_vblank_count[pipe] != 12633 drm_crtc_vblank_count(&crtc->base), 12634 msecs_to_jiffies(50)); 12635 12636 WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe)); 12637 12638 drm_crtc_vblank_put(&crtc->base); 12639 } 12640} 12641 12642static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) 12643{ 12644 /* fb updated, need to unpin old fb */ 12645 if (crtc_state->fb_changed) 12646 return true; 12647 12648 /* wm changes, need vblank before final wm's */ 12649 if (crtc_state->update_wm_post) 12650 return true; 12651 12652 if (crtc_state->wm.need_postvbl_update) 12653 return true; 12654 12655 return false; 12656} 12657 12658static void intel_update_crtc(struct drm_crtc *crtc, 12659 struct drm_atomic_state *state, 12660 struct drm_crtc_state *old_crtc_state, 12661 struct drm_crtc_state *new_crtc_state, 12662 unsigned int *crtc_vblank_mask) 12663{ 12664 struct drm_device *dev = crtc->dev; 12665 struct drm_i915_private *dev_priv = to_i915(dev); 12666 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12667 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); 12668 bool modeset = needs_modeset(new_crtc_state); 12669 12670 if (modeset) { 12671 update_scanline_offset(intel_crtc); 12672 dev_priv->display.crtc_enable(pipe_config, state); 12673 } else { 12674 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), 12675 pipe_config); 12676 } 12677 12678 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { 12679 intel_fbc_enable( 12680 intel_crtc, pipe_config, 12681 to_intel_plane_state(crtc->primary->state)); 12682 } 12683 12684 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); 12685 12686 if (needs_vblank_wait(pipe_config)) 12687 *crtc_vblank_mask |= drm_crtc_mask(crtc); 12688} 12689 12690static void intel_update_crtcs(struct drm_atomic_state *state, 12691 unsigned int *crtc_vblank_mask) 12692{ 12693 struct drm_crtc *crtc; 12694 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12695 int i; 12696 12697 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12698 if (!new_crtc_state->active) 12699 continue; 12700 12701 intel_update_crtc(crtc, state, old_crtc_state, 12702 new_crtc_state, crtc_vblank_mask); 12703 } 12704} 12705 12706static void skl_update_crtcs(struct drm_atomic_state *state, 12707 unsigned int *crtc_vblank_mask) 12708{ 12709 struct drm_i915_private *dev_priv = to_i915(state->dev); 12710 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12711 struct drm_crtc *crtc; 12712 struct intel_crtc *intel_crtc; 12713 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12714 struct intel_crtc_state *cstate; 12715 unsigned int updated = 0; 12716 bool progress; 12717 enum pipe pipe; 12718 int i; 12719 12720 const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {}; 12721 12722 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) 12723 /* ignore allocations for crtc's that have been turned off. */ 12724 if (new_crtc_state->active) 12725 entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; 12726 12727 /* 12728 * Whenever the number of active pipes changes, we need to make sure we 12729 * update the pipes in the right order so that their ddb allocations 12730 * never overlap with eachother inbetween CRTC updates. Otherwise we'll 12731 * cause pipe underruns and other bad stuff. 12732 */ 12733 do { 12734 progress = false; 12735 12736 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12737 bool vbl_wait = false; 12738 unsigned int cmask = drm_crtc_mask(crtc); 12739 12740 intel_crtc = to_intel_crtc(crtc); 12741 cstate = to_intel_crtc_state(crtc->state); 12742 pipe = intel_crtc->pipe; 12743 12744 if (updated & cmask || !cstate->base.active) 12745 continue; 12746 12747 if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i)) 12748 continue; 12749 12750 updated |= cmask; 12751 entries[i] = &cstate->wm.skl.ddb; 12752 12753 /* 12754 * If this is an already active pipe, it's DDB changed, 12755 * and this isn't the last pipe that needs updating 12756 * then we need to wait for a vblank to pass for the 12757 * new ddb allocation to take effect. 12758 */ 12759 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb, 12760 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) && 12761 !new_crtc_state->active_changed && 12762 intel_state->wm_results.dirty_pipes != updated) 12763 vbl_wait = true; 12764 12765 intel_update_crtc(crtc, state, old_crtc_state, 12766 new_crtc_state, crtc_vblank_mask); 12767 12768 if (vbl_wait) 12769 intel_wait_for_vblank(dev_priv, pipe); 12770 12771 progress = true; 12772 } 12773 } while (progress); 12774} 12775 12776static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 12777{ 12778 struct intel_atomic_state *state, *next; 12779 struct llist_node *freed; 12780 12781 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 12782 llist_for_each_entry_safe(state, next, freed, freed) 12783 drm_atomic_state_put(&state->base); 12784} 12785 12786static void intel_atomic_helper_free_state_worker(struct work_struct *work) 12787{ 12788 struct drm_i915_private *dev_priv = 12789 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 12790 12791 intel_atomic_helper_free_state(dev_priv); 12792} 12793 12794static void intel_atomic_commit_tail(struct drm_atomic_state *state) 12795{ 12796 struct drm_device *dev = state->dev; 12797 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12798 struct drm_i915_private *dev_priv = to_i915(dev); 12799 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12800 struct drm_crtc *crtc; 12801 struct intel_crtc_state *intel_cstate; 12802 bool hw_check = intel_state->modeset; 12803 u64 put_domains[I915_MAX_PIPES] = {}; 12804 unsigned crtc_vblank_mask = 0; 12805 int i; 12806 12807 drm_atomic_helper_wait_for_dependencies(state); 12808 12809 if (intel_state->modeset) 12810 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 12811 12812 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12813 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12814 12815 if (needs_modeset(new_crtc_state) || 12816 to_intel_crtc_state(new_crtc_state)->update_pipe) { 12817 hw_check = true; 12818 12819 put_domains[to_intel_crtc(crtc)->pipe] = 12820 modeset_get_crtc_power_domains(crtc, 12821 to_intel_crtc_state(new_crtc_state)); 12822 } 12823 12824 if (!needs_modeset(new_crtc_state)) 12825 continue; 12826 12827 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), 12828 to_intel_crtc_state(new_crtc_state)); 12829 12830 if (old_crtc_state->active) { 12831 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); 12832 dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state); 12833 intel_crtc->active = false; 12834 intel_fbc_disable(intel_crtc); 12835 intel_disable_shared_dpll(intel_crtc); 12836 12837 /* 12838 * Underruns don't always raise 12839 * interrupts, so check manually. 12840 */ 12841 intel_check_cpu_fifo_underruns(dev_priv); 12842 intel_check_pch_fifo_underruns(dev_priv); 12843 12844 if (!crtc->state->active) { 12845 /* 12846 * Make sure we don't call initial_watermarks 12847 * for ILK-style watermark updates. 12848 * 12849 * No clue what this is supposed to achieve. 12850 */ 12851 if (INTEL_GEN(dev_priv) >= 9) 12852 dev_priv->display.initial_watermarks(intel_state, 12853 to_intel_crtc_state(crtc->state)); 12854 } 12855 } 12856 } 12857 12858 /* Only after disabling all output pipelines that will be changed can we 12859 * update the the output configuration. */ 12860 intel_modeset_update_crtc_state(state); 12861 12862 if (intel_state->modeset) { 12863 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 12864 12865 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual); 12866 12867 /* 12868 * SKL workaround: bspec recommends we disable the SAGV when we 12869 * have more then one pipe enabled 12870 */ 12871 if (!intel_can_enable_sagv(state)) 12872 intel_disable_sagv(dev_priv); 12873 12874 intel_modeset_verify_disabled(dev, state); 12875 } 12876 12877 /* Complete the events for pipes that have now been disabled */ 12878 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 12879 bool modeset = needs_modeset(new_crtc_state); 12880 12881 /* Complete events for now disable pipes here. */ 12882 if (modeset && !new_crtc_state->active && new_crtc_state->event) { 12883 spin_lock_irq(&dev->event_lock); 12884 drm_crtc_send_vblank_event(crtc, new_crtc_state->event); 12885 spin_unlock_irq(&dev->event_lock); 12886 12887 new_crtc_state->event = NULL; 12888 } 12889 } 12890 12891 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 12892 dev_priv->display.update_crtcs(state, &crtc_vblank_mask); 12893 12894 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 12895 * already, but still need the state for the delayed optimization. To 12896 * fix this: 12897 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 12898 * - schedule that vblank worker _before_ calling hw_done 12899 * - at the start of commit_tail, cancel it _synchrously 12900 * - switch over to the vblank wait helper in the core after that since 12901 * we don't need out special handling any more. 12902 */ 12903 if (!state->legacy_cursor_update) 12904 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); 12905 12906 /* 12907 * Now that the vblank has passed, we can go ahead and program the 12908 * optimal watermarks on platforms that need two-step watermark 12909 * programming. 12910 * 12911 * TODO: Move this (and other cleanup) to an async worker eventually. 12912 */ 12913 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 12914 intel_cstate = to_intel_crtc_state(new_crtc_state); 12915 12916 if (dev_priv->display.optimize_watermarks) 12917 dev_priv->display.optimize_watermarks(intel_state, 12918 intel_cstate); 12919 } 12920 12921 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12922 intel_post_plane_update(to_intel_crtc_state(old_crtc_state)); 12923 12924 if (put_domains[i]) 12925 modeset_put_power_domains(dev_priv, put_domains[i]); 12926 12927 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 12928 } 12929 12930 if (intel_state->modeset && intel_can_enable_sagv(state)) 12931 intel_enable_sagv(dev_priv); 12932 12933 drm_atomic_helper_commit_hw_done(state); 12934 12935 if (intel_state->modeset) 12936 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); 12937 12938 mutex_lock(&dev->struct_mutex); 12939 drm_atomic_helper_cleanup_planes(dev, state); 12940 mutex_unlock(&dev->struct_mutex); 12941 12942 drm_atomic_helper_commit_cleanup_done(state); 12943 12944 drm_atomic_state_put(state); 12945 12946 /* As one of the primary mmio accessors, KMS has a high likelihood 12947 * of triggering bugs in unclaimed access. After we finish 12948 * modesetting, see if an error has been flagged, and if so 12949 * enable debugging for the next modeset - and hope we catch 12950 * the culprit. 12951 * 12952 * XXX note that we assume display power is on at this point. 12953 * This might hold true now but we need to add pm helper to check 12954 * unclaimed only when the hardware is on, as atomic commits 12955 * can happen also when the device is completely off. 12956 */ 12957 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 12958 12959 intel_atomic_helper_free_state(dev_priv); 12960} 12961 12962static void intel_atomic_commit_work(struct work_struct *work) 12963{ 12964 struct drm_atomic_state *state = 12965 container_of(work, struct drm_atomic_state, commit_work); 12966 12967 intel_atomic_commit_tail(state); 12968} 12969 12970static int __i915_sw_fence_call 12971intel_atomic_commit_ready(struct i915_sw_fence *fence, 12972 enum i915_sw_fence_notify notify) 12973{ 12974 struct intel_atomic_state *state = 12975 container_of(fence, struct intel_atomic_state, commit_ready); 12976 12977 switch (notify) { 12978 case FENCE_COMPLETE: 12979 if (state->base.commit_work.func) 12980 queue_work(system_unbound_wq, &state->base.commit_work); 12981 break; 12982 12983 case FENCE_FREE: 12984 { 12985 struct intel_atomic_helper *helper = 12986 &to_i915(state->base.dev)->atomic_helper; 12987 12988 if (llist_add(&state->freed, &helper->free_list)) 12989 schedule_work(&helper->free_work); 12990 break; 12991 } 12992 } 12993 12994 return NOTIFY_DONE; 12995} 12996 12997static void intel_atomic_track_fbs(struct drm_atomic_state *state) 12998{ 12999 struct drm_plane_state *old_plane_state, *new_plane_state; 13000 struct drm_plane *plane; 13001 int i; 13002 13003 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) 13004 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb), 13005 intel_fb_obj(new_plane_state->fb), 13006 to_intel_plane(plane)->frontbuffer_bit); 13007} 13008 13009/** 13010 * intel_atomic_commit - commit validated state object 13011 * @dev: DRM device 13012 * @state: the top-level driver state object 13013 * @nonblock: nonblocking commit 13014 * 13015 * This function commits a top-level state object that has been validated 13016 * with drm_atomic_helper_check(). 13017 * 13018 * RETURNS 13019 * Zero for success or -errno. 13020 */ 13021static int intel_atomic_commit(struct drm_device *dev, 13022 struct drm_atomic_state *state, 13023 bool nonblock) 13024{ 13025 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13026 struct drm_i915_private *dev_priv = to_i915(dev); 13027 int ret = 0; 13028 13029 ret = drm_atomic_helper_setup_commit(state, nonblock); 13030 if (ret) 13031 return ret; 13032 13033 drm_atomic_state_get(state); 13034 i915_sw_fence_init(&intel_state->commit_ready, 13035 intel_atomic_commit_ready); 13036 13037 ret = intel_atomic_prepare_commit(dev, state); 13038 if (ret) { 13039 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 13040 i915_sw_fence_commit(&intel_state->commit_ready); 13041 return ret; 13042 } 13043 13044 /* 13045 * The intel_legacy_cursor_update() fast path takes care 13046 * of avoiding the vblank waits for simple cursor 13047 * movement and flips. For cursor on/off and size changes, 13048 * we want to perform the vblank waits so that watermark 13049 * updates happen during the correct frames. Gen9+ have 13050 * double buffered watermarks and so shouldn't need this. 13051 * 13052 * Do this after drm_atomic_helper_setup_commit() and 13053 * intel_atomic_prepare_commit() because we still want 13054 * to skip the flip and fb cleanup waits. Although that 13055 * does risk yanking the mapping from under the display 13056 * engine. 13057 * 13058 * FIXME doing watermarks and fb cleanup from a vblank worker 13059 * (assuming we had any) would solve these problems. 13060 */ 13061 if (INTEL_GEN(dev_priv) < 9) 13062 state->legacy_cursor_update = false; 13063 13064 drm_atomic_helper_swap_state(state, true); 13065 dev_priv->wm.distrust_bios_wm = false; 13066 intel_shared_dpll_swap_state(state); 13067 intel_atomic_track_fbs(state); 13068 13069 if (intel_state->modeset) { 13070 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, 13071 sizeof(intel_state->min_pixclk)); 13072 dev_priv->active_crtcs = intel_state->active_crtcs; 13073 dev_priv->cdclk.logical = intel_state->cdclk.logical; 13074 dev_priv->cdclk.actual = intel_state->cdclk.actual; 13075 } 13076 13077 drm_atomic_state_get(state); 13078 INIT_WORK(&state->commit_work, 13079 nonblock ? intel_atomic_commit_work : NULL); 13080 13081 i915_sw_fence_commit(&intel_state->commit_ready); 13082 if (!nonblock) { 13083 i915_sw_fence_wait(&intel_state->commit_ready); 13084 intel_atomic_commit_tail(state); 13085 } 13086 13087 return 0; 13088} 13089 13090void intel_crtc_restore_mode(struct drm_crtc *crtc) 13091{ 13092 struct drm_device *dev = crtc->dev; 13093 struct drm_atomic_state *state; 13094 struct drm_crtc_state *crtc_state; 13095 int ret; 13096 13097 state = drm_atomic_state_alloc(dev); 13098 if (!state) { 13099 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory", 13100 crtc->base.id, crtc->name); 13101 return; 13102 } 13103 13104 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; 13105 13106retry: 13107 crtc_state = drm_atomic_get_crtc_state(state, crtc); 13108 ret = PTR_ERR_OR_ZERO(crtc_state); 13109 if (!ret) { 13110 if (!crtc_state->active) 13111 goto out; 13112 13113 crtc_state->mode_changed = true; 13114 ret = drm_atomic_commit(state); 13115 } 13116 13117 if (ret == -EDEADLK) { 13118 drm_atomic_state_clear(state); 13119 drm_modeset_backoff(state->acquire_ctx); 13120 goto retry; 13121 } 13122 13123out: 13124 drm_atomic_state_put(state); 13125} 13126 13127static const struct drm_crtc_funcs intel_crtc_funcs = { 13128 .gamma_set = drm_atomic_helper_legacy_gamma_set, 13129 .set_config = drm_atomic_helper_set_config, 13130 .set_property = drm_atomic_helper_crtc_set_property, 13131 .destroy = intel_crtc_destroy, 13132 .page_flip = drm_atomic_helper_page_flip, 13133 .atomic_duplicate_state = intel_crtc_duplicate_state, 13134 .atomic_destroy_state = intel_crtc_destroy_state, 13135 .set_crc_source = intel_crtc_set_crc_source, 13136}; 13137 13138/** 13139 * intel_prepare_plane_fb - Prepare fb for usage on plane 13140 * @plane: drm plane to prepare for 13141 * @fb: framebuffer to prepare for presentation 13142 * 13143 * Prepares a framebuffer for usage on a display plane. Generally this 13144 * involves pinning the underlying object and updating the frontbuffer tracking 13145 * bits. Some older platforms need special physical address handling for 13146 * cursor planes. 13147 * 13148 * Must be called with struct_mutex held. 13149 * 13150 * Returns 0 on success, negative error code on failure. 13151 */ 13152int 13153intel_prepare_plane_fb(struct drm_plane *plane, 13154 struct drm_plane_state *new_state) 13155{ 13156 struct intel_atomic_state *intel_state = 13157 to_intel_atomic_state(new_state->state); 13158 struct drm_i915_private *dev_priv = to_i915(plane->dev); 13159 struct drm_framebuffer *fb = new_state->fb; 13160 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13161 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 13162 int ret; 13163 13164 if (obj) { 13165 if (plane->type == DRM_PLANE_TYPE_CURSOR && 13166 INTEL_INFO(dev_priv)->cursor_needs_physical) { 13167 const int align = IS_I830(dev_priv) ? 16 * 1024 : 256; 13168 13169 ret = i915_gem_object_attach_phys(obj, align); 13170 if (ret) { 13171 DRM_DEBUG_KMS("failed to attach phys object\n"); 13172 return ret; 13173 } 13174 } else { 13175 struct i915_vma *vma; 13176 13177 vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation); 13178 if (IS_ERR(vma)) { 13179 DRM_DEBUG_KMS("failed to pin object\n"); 13180 return PTR_ERR(vma); 13181 } 13182 13183 to_intel_plane_state(new_state)->vma = vma; 13184 } 13185 } 13186 13187 if (!obj && !old_obj) 13188 return 0; 13189 13190 if (old_obj) { 13191 struct drm_crtc_state *crtc_state = 13192 drm_atomic_get_existing_crtc_state(new_state->state, 13193 plane->state->crtc); 13194 13195 /* Big Hammer, we also need to ensure that any pending 13196 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 13197 * current scanout is retired before unpinning the old 13198 * framebuffer. Note that we rely on userspace rendering 13199 * into the buffer attached to the pipe they are waiting 13200 * on. If not, userspace generates a GPU hang with IPEHR 13201 * point to the MI_WAIT_FOR_EVENT. 13202 * 13203 * This should only fail upon a hung GPU, in which case we 13204 * can safely continue. 13205 */ 13206 if (needs_modeset(crtc_state)) { 13207 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 13208 old_obj->resv, NULL, 13209 false, 0, 13210 GFP_KERNEL); 13211 if (ret < 0) 13212 return ret; 13213 } 13214 } 13215 13216 if (new_state->fence) { /* explicit fencing */ 13217 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, 13218 new_state->fence, 13219 I915_FENCE_TIMEOUT, 13220 GFP_KERNEL); 13221 if (ret < 0) 13222 return ret; 13223 } 13224 13225 if (!obj) 13226 return 0; 13227 13228 if (!new_state->fence) { /* implicit fencing */ 13229 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 13230 obj->resv, NULL, 13231 false, I915_FENCE_TIMEOUT, 13232 GFP_KERNEL); 13233 if (ret < 0) 13234 return ret; 13235 13236 i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); 13237 } 13238 13239 return 0; 13240} 13241 13242/** 13243 * intel_cleanup_plane_fb - Cleans up an fb after plane use 13244 * @plane: drm plane to clean up for 13245 * @fb: old framebuffer that was on plane 13246 * 13247 * Cleans up a framebuffer that has just been removed from a plane. 13248 * 13249 * Must be called with struct_mutex held. 13250 */ 13251void 13252intel_cleanup_plane_fb(struct drm_plane *plane, 13253 struct drm_plane_state *old_state) 13254{ 13255 struct i915_vma *vma; 13256 13257 /* Should only be called after a successful intel_prepare_plane_fb()! */ 13258 vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma); 13259 if (vma) 13260 intel_unpin_fb_vma(vma); 13261} 13262 13263int 13264skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) 13265{ 13266 struct drm_i915_private *dev_priv; 13267 int max_scale; 13268 int crtc_clock, max_dotclk; 13269 13270 if (!intel_crtc || !crtc_state->base.enable) 13271 return DRM_PLANE_HELPER_NO_SCALING; 13272 13273 dev_priv = to_i915(intel_crtc->base.dev); 13274 13275 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 13276 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk; 13277 13278 if (IS_GEMINILAKE(dev_priv)) 13279 max_dotclk *= 2; 13280 13281 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock)) 13282 return DRM_PLANE_HELPER_NO_SCALING; 13283 13284 /* 13285 * skl max scale is lower of: 13286 * close to 3 but not 3, -1 is for that purpose 13287 * or 13288 * cdclk/crtc_clock 13289 */ 13290 max_scale = min((1 << 16) * 3 - 1, 13291 (1 << 8) * ((max_dotclk << 8) / crtc_clock)); 13292 13293 return max_scale; 13294} 13295 13296static int 13297intel_check_primary_plane(struct drm_plane *plane, 13298 struct intel_crtc_state *crtc_state, 13299 struct intel_plane_state *state) 13300{ 13301 struct drm_i915_private *dev_priv = to_i915(plane->dev); 13302 struct drm_crtc *crtc = state->base.crtc; 13303 int min_scale = DRM_PLANE_HELPER_NO_SCALING; 13304 int max_scale = DRM_PLANE_HELPER_NO_SCALING; 13305 bool can_position = false; 13306 int ret; 13307 13308 if (INTEL_GEN(dev_priv) >= 9) { 13309 /* use scaler when colorkey is not required */ 13310 if (state->ckey.flags == I915_SET_COLORKEY_NONE) { 13311 min_scale = 1; 13312 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); 13313 } 13314 can_position = true; 13315 } 13316 13317 ret = drm_plane_helper_check_state(&state->base, 13318 &state->clip, 13319 min_scale, max_scale, 13320 can_position, true); 13321 if (ret) 13322 return ret; 13323 13324 if (!state->base.fb) 13325 return 0; 13326 13327 if (INTEL_GEN(dev_priv) >= 9) { 13328 ret = skl_check_plane_surface(state); 13329 if (ret) 13330 return ret; 13331 13332 state->ctl = skl_plane_ctl(crtc_state, state); 13333 } else { 13334 ret = i9xx_check_plane_surface(state); 13335 if (ret) 13336 return ret; 13337 13338 state->ctl = i9xx_plane_ctl(crtc_state, state); 13339 } 13340 13341 return 0; 13342} 13343 13344static void intel_begin_crtc_commit(struct drm_crtc *crtc, 13345 struct drm_crtc_state *old_crtc_state) 13346{ 13347 struct drm_device *dev = crtc->dev; 13348 struct drm_i915_private *dev_priv = to_i915(dev); 13349 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13350 struct intel_crtc_state *intel_cstate = 13351 to_intel_crtc_state(crtc->state); 13352 struct intel_crtc_state *old_intel_cstate = 13353 to_intel_crtc_state(old_crtc_state); 13354 struct intel_atomic_state *old_intel_state = 13355 to_intel_atomic_state(old_crtc_state->state); 13356 bool modeset = needs_modeset(crtc->state); 13357 13358 if (!modeset && 13359 (intel_cstate->base.color_mgmt_changed || 13360 intel_cstate->update_pipe)) { 13361 intel_color_set_csc(crtc->state); 13362 intel_color_load_luts(crtc->state); 13363 } 13364 13365 /* Perform vblank evasion around commit operation */ 13366 intel_pipe_update_start(intel_crtc); 13367 13368 if (modeset) 13369 goto out; 13370 13371 if (intel_cstate->update_pipe) 13372 intel_update_pipe_config(intel_crtc, old_intel_cstate); 13373 else if (INTEL_GEN(dev_priv) >= 9) 13374 skl_detach_scalers(intel_crtc); 13375 13376out: 13377 if (dev_priv->display.atomic_update_watermarks) 13378 dev_priv->display.atomic_update_watermarks(old_intel_state, 13379 intel_cstate); 13380} 13381 13382static void intel_finish_crtc_commit(struct drm_crtc *crtc, 13383 struct drm_crtc_state *old_crtc_state) 13384{ 13385 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13386 13387 intel_pipe_update_end(intel_crtc, NULL); 13388} 13389 13390/** 13391 * intel_plane_destroy - destroy a plane 13392 * @plane: plane to destroy 13393 * 13394 * Common destruction function for all types of planes (primary, cursor, 13395 * sprite). 13396 */ 13397void intel_plane_destroy(struct drm_plane *plane) 13398{ 13399 drm_plane_cleanup(plane); 13400 kfree(to_intel_plane(plane)); 13401} 13402 13403const struct drm_plane_funcs intel_plane_funcs = { 13404 .update_plane = drm_atomic_helper_update_plane, 13405 .disable_plane = drm_atomic_helper_disable_plane, 13406 .destroy = intel_plane_destroy, 13407 .set_property = drm_atomic_helper_plane_set_property, 13408 .atomic_get_property = intel_plane_atomic_get_property, 13409 .atomic_set_property = intel_plane_atomic_set_property, 13410 .atomic_duplicate_state = intel_plane_duplicate_state, 13411 .atomic_destroy_state = intel_plane_destroy_state, 13412}; 13413 13414static int 13415intel_legacy_cursor_update(struct drm_plane *plane, 13416 struct drm_crtc *crtc, 13417 struct drm_framebuffer *fb, 13418 int crtc_x, int crtc_y, 13419 unsigned int crtc_w, unsigned int crtc_h, 13420 uint32_t src_x, uint32_t src_y, 13421 uint32_t src_w, uint32_t src_h, 13422 struct drm_modeset_acquire_ctx *ctx) 13423{ 13424 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 13425 int ret; 13426 struct drm_plane_state *old_plane_state, *new_plane_state; 13427 struct intel_plane *intel_plane = to_intel_plane(plane); 13428 struct drm_framebuffer *old_fb; 13429 struct drm_crtc_state *crtc_state = crtc->state; 13430 struct i915_vma *old_vma; 13431 13432 /* 13433 * When crtc is inactive or there is a modeset pending, 13434 * wait for it to complete in the slowpath 13435 */ 13436 if (!crtc_state->active || needs_modeset(crtc_state) || 13437 to_intel_crtc_state(crtc_state)->update_pipe) 13438 goto slow; 13439 13440 old_plane_state = plane->state; 13441 13442 /* 13443 * If any parameters change that may affect watermarks, 13444 * take the slowpath. Only changing fb or position should be 13445 * in the fastpath. 13446 */ 13447 if (old_plane_state->crtc != crtc || 13448 old_plane_state->src_w != src_w || 13449 old_plane_state->src_h != src_h || 13450 old_plane_state->crtc_w != crtc_w || 13451 old_plane_state->crtc_h != crtc_h || 13452 !old_plane_state->fb != !fb) 13453 goto slow; 13454 13455 new_plane_state = intel_plane_duplicate_state(plane); 13456 if (!new_plane_state) 13457 return -ENOMEM; 13458 13459 drm_atomic_set_fb_for_plane(new_plane_state, fb); 13460 13461 new_plane_state->src_x = src_x; 13462 new_plane_state->src_y = src_y; 13463 new_plane_state->src_w = src_w; 13464 new_plane_state->src_h = src_h; 13465 new_plane_state->crtc_x = crtc_x; 13466 new_plane_state->crtc_y = crtc_y; 13467 new_plane_state->crtc_w = crtc_w; 13468 new_plane_state->crtc_h = crtc_h; 13469 13470 ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state), 13471 to_intel_plane_state(new_plane_state)); 13472 if (ret) 13473 goto out_free; 13474 13475 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); 13476 if (ret) 13477 goto out_free; 13478 13479 if (INTEL_INFO(dev_priv)->cursor_needs_physical) { 13480 int align = IS_I830(dev_priv) ? 16 * 1024 : 256; 13481 13482 ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align); 13483 if (ret) { 13484 DRM_DEBUG_KMS("failed to attach phys object\n"); 13485 goto out_unlock; 13486 } 13487 } else { 13488 struct i915_vma *vma; 13489 13490 vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation); 13491 if (IS_ERR(vma)) { 13492 DRM_DEBUG_KMS("failed to pin object\n"); 13493 13494 ret = PTR_ERR(vma); 13495 goto out_unlock; 13496 } 13497 13498 to_intel_plane_state(new_plane_state)->vma = vma; 13499 } 13500 13501 old_fb = old_plane_state->fb; 13502 old_vma = to_intel_plane_state(old_plane_state)->vma; 13503 13504 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb), 13505 intel_plane->frontbuffer_bit); 13506 13507 /* Swap plane state */ 13508 new_plane_state->fence = old_plane_state->fence; 13509 *to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state); 13510 new_plane_state->fence = NULL; 13511 new_plane_state->fb = old_fb; 13512 to_intel_plane_state(new_plane_state)->vma = old_vma; 13513 13514 if (plane->state->visible) { 13515 trace_intel_update_plane(plane, to_intel_crtc(crtc)); 13516 intel_plane->update_plane(plane, 13517 to_intel_crtc_state(crtc->state), 13518 to_intel_plane_state(plane->state)); 13519 } else { 13520 trace_intel_disable_plane(plane, to_intel_crtc(crtc)); 13521 intel_plane->disable_plane(plane, crtc); 13522 } 13523 13524 intel_cleanup_plane_fb(plane, new_plane_state); 13525 13526out_unlock: 13527 mutex_unlock(&dev_priv->drm.struct_mutex); 13528out_free: 13529 intel_plane_destroy_state(plane, new_plane_state); 13530 return ret; 13531 13532slow: 13533 return drm_atomic_helper_update_plane(plane, crtc, fb, 13534 crtc_x, crtc_y, crtc_w, crtc_h, 13535 src_x, src_y, src_w, src_h, ctx); 13536} 13537 13538static const struct drm_plane_funcs intel_cursor_plane_funcs = { 13539 .update_plane = intel_legacy_cursor_update, 13540 .disable_plane = drm_atomic_helper_disable_plane, 13541 .destroy = intel_plane_destroy, 13542 .set_property = drm_atomic_helper_plane_set_property, 13543 .atomic_get_property = intel_plane_atomic_get_property, 13544 .atomic_set_property = intel_plane_atomic_set_property, 13545 .atomic_duplicate_state = intel_plane_duplicate_state, 13546 .atomic_destroy_state = intel_plane_destroy_state, 13547}; 13548 13549static struct intel_plane * 13550intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 13551{ 13552 struct intel_plane *primary = NULL; 13553 struct intel_plane_state *state = NULL; 13554 const uint32_t *intel_primary_formats; 13555 unsigned int supported_rotations; 13556 unsigned int num_formats; 13557 int ret; 13558 13559 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 13560 if (!primary) { 13561 ret = -ENOMEM; 13562 goto fail; 13563 } 13564 13565 state = intel_create_plane_state(&primary->base); 13566 if (!state) { 13567 ret = -ENOMEM; 13568 goto fail; 13569 } 13570 13571 primary->base.state = &state->base; 13572 13573 primary->can_scale = false; 13574 primary->max_downscale = 1; 13575 if (INTEL_GEN(dev_priv) >= 9) { 13576 primary->can_scale = true; 13577 state->scaler_id = -1; 13578 } 13579 primary->pipe = pipe; 13580 /* 13581 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 13582 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 13583 */ 13584 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) 13585 primary->plane = (enum plane) !pipe; 13586 else 13587 primary->plane = (enum plane) pipe; 13588 primary->id = PLANE_PRIMARY; 13589 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); 13590 primary->check_plane = intel_check_primary_plane; 13591 13592 if (INTEL_GEN(dev_priv) >= 9) { 13593 intel_primary_formats = skl_primary_formats; 13594 num_formats = ARRAY_SIZE(skl_primary_formats); 13595 13596 primary->update_plane = skylake_update_primary_plane; 13597 primary->disable_plane = skylake_disable_primary_plane; 13598 } else if (INTEL_GEN(dev_priv) >= 4) { 13599 intel_primary_formats = i965_primary_formats; 13600 num_formats = ARRAY_SIZE(i965_primary_formats); 13601 13602 primary->update_plane = i9xx_update_primary_plane; 13603 primary->disable_plane = i9xx_disable_primary_plane; 13604 } else { 13605 intel_primary_formats = i8xx_primary_formats; 13606 num_formats = ARRAY_SIZE(i8xx_primary_formats); 13607 13608 primary->update_plane = i9xx_update_primary_plane; 13609 primary->disable_plane = i9xx_disable_primary_plane; 13610 } 13611 13612 if (INTEL_GEN(dev_priv) >= 9) 13613 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13614 0, &intel_plane_funcs, 13615 intel_primary_formats, num_formats, 13616 DRM_PLANE_TYPE_PRIMARY, 13617 "plane 1%c", pipe_name(pipe)); 13618 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 13619 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13620 0, &intel_plane_funcs, 13621 intel_primary_formats, num_formats, 13622 DRM_PLANE_TYPE_PRIMARY, 13623 "primary %c", pipe_name(pipe)); 13624 else 13625 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13626 0, &intel_plane_funcs, 13627 intel_primary_formats, num_formats, 13628 DRM_PLANE_TYPE_PRIMARY, 13629 "plane %c", plane_name(primary->plane)); 13630 if (ret) 13631 goto fail; 13632 13633 if (INTEL_GEN(dev_priv) >= 9) { 13634 supported_rotations = 13635 DRM_ROTATE_0 | DRM_ROTATE_90 | 13636 DRM_ROTATE_180 | DRM_ROTATE_270; 13637 } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 13638 supported_rotations = 13639 DRM_ROTATE_0 | DRM_ROTATE_180 | 13640 DRM_REFLECT_X; 13641 } else if (INTEL_GEN(dev_priv) >= 4) { 13642 supported_rotations = 13643 DRM_ROTATE_0 | DRM_ROTATE_180; 13644 } else { 13645 supported_rotations = DRM_ROTATE_0; 13646 } 13647 13648 if (INTEL_GEN(dev_priv) >= 4) 13649 drm_plane_create_rotation_property(&primary->base, 13650 DRM_ROTATE_0, 13651 supported_rotations); 13652 13653 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); 13654 13655 return primary; 13656 13657fail: 13658 kfree(state); 13659 kfree(primary); 13660 13661 return ERR_PTR(ret); 13662} 13663 13664static int 13665intel_check_cursor_plane(struct drm_plane *plane, 13666 struct intel_crtc_state *crtc_state, 13667 struct intel_plane_state *state) 13668{ 13669 struct drm_i915_private *dev_priv = to_i915(plane->dev); 13670 struct drm_framebuffer *fb = state->base.fb; 13671 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13672 enum pipe pipe = to_intel_plane(plane)->pipe; 13673 unsigned stride; 13674 int ret; 13675 13676 ret = drm_plane_helper_check_state(&state->base, 13677 &state->clip, 13678 DRM_PLANE_HELPER_NO_SCALING, 13679 DRM_PLANE_HELPER_NO_SCALING, 13680 true, true); 13681 if (ret) 13682 return ret; 13683 13684 /* if we want to turn off the cursor ignore width and height */ 13685 if (!obj) 13686 return 0; 13687 13688 /* Check for which cursor types we support */ 13689 if (!cursor_size_ok(dev_priv, state->base.crtc_w, 13690 state->base.crtc_h)) { 13691 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 13692 state->base.crtc_w, state->base.crtc_h); 13693 return -EINVAL; 13694 } 13695 13696 stride = roundup_pow_of_two(state->base.crtc_w) * 4; 13697 if (obj->base.size < stride * state->base.crtc_h) { 13698 DRM_DEBUG_KMS("buffer is too small\n"); 13699 return -ENOMEM; 13700 } 13701 13702 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 13703 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 13704 return -EINVAL; 13705 } 13706 13707 /* 13708 * There's something wrong with the cursor on CHV pipe C. 13709 * If it straddles the left edge of the screen then 13710 * moving it away from the edge or disabling it often 13711 * results in a pipe underrun, and often that can lead to 13712 * dead pipe (constant underrun reported, and it scans 13713 * out just a solid color). To recover from that, the 13714 * display power well must be turned off and on again. 13715 * Refuse the put the cursor into that compromised position. 13716 */ 13717 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 13718 state->base.visible && state->base.crtc_x < 0) { 13719 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 13720 return -EINVAL; 13721 } 13722 13723 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 13724 state->ctl = i845_cursor_ctl(crtc_state, state); 13725 else 13726 state->ctl = i9xx_cursor_ctl(crtc_state, state); 13727 13728 return 0; 13729} 13730 13731static void 13732intel_disable_cursor_plane(struct drm_plane *plane, 13733 struct drm_crtc *crtc) 13734{ 13735 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13736 13737 intel_crtc->cursor_addr = 0; 13738 intel_crtc_update_cursor(crtc, NULL); 13739} 13740 13741static void 13742intel_update_cursor_plane(struct drm_plane *plane, 13743 const struct intel_crtc_state *crtc_state, 13744 const struct intel_plane_state *state) 13745{ 13746 struct drm_crtc *crtc = crtc_state->base.crtc; 13747 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13748 struct drm_i915_private *dev_priv = to_i915(plane->dev); 13749 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); 13750 uint32_t addr; 13751 13752 if (!obj) 13753 addr = 0; 13754 else if (!INTEL_INFO(dev_priv)->cursor_needs_physical) 13755 addr = intel_plane_ggtt_offset(state); 13756 else 13757 addr = obj->phys_handle->busaddr; 13758 13759 intel_crtc->cursor_addr = addr; 13760 intel_crtc_update_cursor(crtc, state); 13761} 13762 13763static struct intel_plane * 13764intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 13765{ 13766 struct intel_plane *cursor = NULL; 13767 struct intel_plane_state *state = NULL; 13768 int ret; 13769 13770 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 13771 if (!cursor) { 13772 ret = -ENOMEM; 13773 goto fail; 13774 } 13775 13776 state = intel_create_plane_state(&cursor->base); 13777 if (!state) { 13778 ret = -ENOMEM; 13779 goto fail; 13780 } 13781 13782 cursor->base.state = &state->base; 13783 13784 cursor->can_scale = false; 13785 cursor->max_downscale = 1; 13786 cursor->pipe = pipe; 13787 cursor->plane = pipe; 13788 cursor->id = PLANE_CURSOR; 13789 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); 13790 cursor->check_plane = intel_check_cursor_plane; 13791 cursor->update_plane = intel_update_cursor_plane; 13792 cursor->disable_plane = intel_disable_cursor_plane; 13793 13794 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 13795 0, &intel_cursor_plane_funcs, 13796 intel_cursor_formats, 13797 ARRAY_SIZE(intel_cursor_formats), 13798 DRM_PLANE_TYPE_CURSOR, 13799 "cursor %c", pipe_name(pipe)); 13800 if (ret) 13801 goto fail; 13802 13803 if (INTEL_GEN(dev_priv) >= 4) 13804 drm_plane_create_rotation_property(&cursor->base, 13805 DRM_ROTATE_0, 13806 DRM_ROTATE_0 | 13807 DRM_ROTATE_180); 13808 13809 if (INTEL_GEN(dev_priv) >= 9) 13810 state->scaler_id = -1; 13811 13812 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 13813 13814 return cursor; 13815 13816fail: 13817 kfree(state); 13818 kfree(cursor); 13819 13820 return ERR_PTR(ret); 13821} 13822 13823static void intel_crtc_init_scalers(struct intel_crtc *crtc, 13824 struct intel_crtc_state *crtc_state) 13825{ 13826 struct intel_crtc_scaler_state *scaler_state = 13827 &crtc_state->scaler_state; 13828 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13829 int i; 13830 13831 crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe]; 13832 if (!crtc->num_scalers) 13833 return; 13834 13835 for (i = 0; i < crtc->num_scalers; i++) { 13836 struct intel_scaler *scaler = &scaler_state->scalers[i]; 13837 13838 scaler->in_use = 0; 13839 scaler->mode = PS_SCALER_MODE_DYN; 13840 } 13841 13842 scaler_state->scaler_id = -1; 13843} 13844 13845static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) 13846{ 13847 struct intel_crtc *intel_crtc; 13848 struct intel_crtc_state *crtc_state = NULL; 13849 struct intel_plane *primary = NULL; 13850 struct intel_plane *cursor = NULL; 13851 int sprite, ret; 13852 13853 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 13854 if (!intel_crtc) 13855 return -ENOMEM; 13856 13857 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 13858 if (!crtc_state) { 13859 ret = -ENOMEM; 13860 goto fail; 13861 } 13862 intel_crtc->config = crtc_state; 13863 intel_crtc->base.state = &crtc_state->base; 13864 crtc_state->base.crtc = &intel_crtc->base; 13865 13866 primary = intel_primary_plane_create(dev_priv, pipe); 13867 if (IS_ERR(primary)) { 13868 ret = PTR_ERR(primary); 13869 goto fail; 13870 } 13871 intel_crtc->plane_ids_mask |= BIT(primary->id); 13872 13873 for_each_sprite(dev_priv, pipe, sprite) { 13874 struct intel_plane *plane; 13875 13876 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 13877 if (IS_ERR(plane)) { 13878 ret = PTR_ERR(plane); 13879 goto fail; 13880 } 13881 intel_crtc->plane_ids_mask |= BIT(plane->id); 13882 } 13883 13884 cursor = intel_cursor_plane_create(dev_priv, pipe); 13885 if (IS_ERR(cursor)) { 13886 ret = PTR_ERR(cursor); 13887 goto fail; 13888 } 13889 intel_crtc->plane_ids_mask |= BIT(cursor->id); 13890 13891 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, 13892 &primary->base, &cursor->base, 13893 &intel_crtc_funcs, 13894 "pipe %c", pipe_name(pipe)); 13895 if (ret) 13896 goto fail; 13897 13898 intel_crtc->pipe = pipe; 13899 intel_crtc->plane = primary->plane; 13900 13901 intel_crtc->cursor_base = ~0; 13902 intel_crtc->cursor_cntl = ~0; 13903 intel_crtc->cursor_size = ~0; 13904 13905 /* initialize shared scalers */ 13906 intel_crtc_init_scalers(intel_crtc, crtc_state); 13907 13908 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 13909 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 13910 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc; 13911 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc; 13912 13913 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 13914 13915 intel_color_init(&intel_crtc->base); 13916 13917 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 13918 13919 return 0; 13920 13921fail: 13922 /* 13923 * drm_mode_config_cleanup() will free up any 13924 * crtcs/planes already initialized. 13925 */ 13926 kfree(crtc_state); 13927 kfree(intel_crtc); 13928 13929 return ret; 13930} 13931 13932enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) 13933{ 13934 struct drm_device *dev = connector->base.dev; 13935 13936 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 13937 13938 if (!connector->base.state->crtc) 13939 return INVALID_PIPE; 13940 13941 return to_intel_crtc(connector->base.state->crtc)->pipe; 13942} 13943 13944int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 13945 struct drm_file *file) 13946{ 13947 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 13948 struct drm_crtc *drmmode_crtc; 13949 struct intel_crtc *crtc; 13950 13951 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 13952 if (!drmmode_crtc) 13953 return -ENOENT; 13954 13955 crtc = to_intel_crtc(drmmode_crtc); 13956 pipe_from_crtc_id->pipe = crtc->pipe; 13957 13958 return 0; 13959} 13960 13961static int intel_encoder_clones(struct intel_encoder *encoder) 13962{ 13963 struct drm_device *dev = encoder->base.dev; 13964 struct intel_encoder *source_encoder; 13965 int index_mask = 0; 13966 int entry = 0; 13967 13968 for_each_intel_encoder(dev, source_encoder) { 13969 if (encoders_cloneable(encoder, source_encoder)) 13970 index_mask |= (1 << entry); 13971 13972 entry++; 13973 } 13974 13975 return index_mask; 13976} 13977 13978static bool has_edp_a(struct drm_i915_private *dev_priv) 13979{ 13980 if (!IS_MOBILE(dev_priv)) 13981 return false; 13982 13983 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 13984 return false; 13985 13986 if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 13987 return false; 13988 13989 return true; 13990} 13991 13992static bool intel_crt_present(struct drm_i915_private *dev_priv) 13993{ 13994 if (INTEL_GEN(dev_priv) >= 9) 13995 return false; 13996 13997 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 13998 return false; 13999 14000 if (IS_CHERRYVIEW(dev_priv)) 14001 return false; 14002 14003 if (HAS_PCH_LPT_H(dev_priv) && 14004 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 14005 return false; 14006 14007 /* DDI E can't be used if DDI A requires 4 lanes */ 14008 if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 14009 return false; 14010 14011 if (!dev_priv->vbt.int_crt_support) 14012 return false; 14013 14014 return true; 14015} 14016 14017void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) 14018{ 14019 int pps_num; 14020 int pps_idx; 14021 14022 if (HAS_DDI(dev_priv)) 14023 return; 14024 /* 14025 * This w/a is needed at least on CPT/PPT, but to be sure apply it 14026 * everywhere where registers can be write protected. 14027 */ 14028 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 14029 pps_num = 2; 14030 else 14031 pps_num = 1; 14032 14033 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 14034 u32 val = I915_READ(PP_CONTROL(pps_idx)); 14035 14036 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; 14037 I915_WRITE(PP_CONTROL(pps_idx), val); 14038 } 14039} 14040 14041static void intel_pps_init(struct drm_i915_private *dev_priv) 14042{ 14043 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) 14044 dev_priv->pps_mmio_base = PCH_PPS_BASE; 14045 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 14046 dev_priv->pps_mmio_base = VLV_PPS_BASE; 14047 else 14048 dev_priv->pps_mmio_base = PPS_BASE; 14049 14050 intel_pps_unlock_regs_wa(dev_priv); 14051} 14052 14053static void intel_setup_outputs(struct drm_i915_private *dev_priv) 14054{ 14055 struct intel_encoder *encoder; 14056 bool dpd_is_edp = false; 14057 14058 intel_pps_init(dev_priv); 14059 14060 /* 14061 * intel_edp_init_connector() depends on this completing first, to 14062 * prevent the registeration of both eDP and LVDS and the incorrect 14063 * sharing of the PPS. 14064 */ 14065 intel_lvds_init(dev_priv); 14066 14067 if (intel_crt_present(dev_priv)) 14068 intel_crt_init(dev_priv); 14069 14070 if (IS_GEN9_LP(dev_priv)) { 14071 /* 14072 * FIXME: Broxton doesn't support port detection via the 14073 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 14074 * detect the ports. 14075 */ 14076 intel_ddi_init(dev_priv, PORT_A); 14077 intel_ddi_init(dev_priv, PORT_B); 14078 intel_ddi_init(dev_priv, PORT_C); 14079 14080 intel_dsi_init(dev_priv); 14081 } else if (HAS_DDI(dev_priv)) { 14082 int found; 14083 14084 /* 14085 * Haswell uses DDI functions to detect digital outputs. 14086 * On SKL pre-D0 the strap isn't connected, so we assume 14087 * it's there. 14088 */ 14089 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 14090 /* WaIgnoreDDIAStrap: skl */ 14091 if (found || IS_GEN9_BC(dev_priv)) 14092 intel_ddi_init(dev_priv, PORT_A); 14093 14094 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 14095 * register */ 14096 found = I915_READ(SFUSE_STRAP); 14097 14098 if (found & SFUSE_STRAP_DDIB_DETECTED) 14099 intel_ddi_init(dev_priv, PORT_B); 14100 if (found & SFUSE_STRAP_DDIC_DETECTED) 14101 intel_ddi_init(dev_priv, PORT_C); 14102 if (found & SFUSE_STRAP_DDID_DETECTED) 14103 intel_ddi_init(dev_priv, PORT_D); 14104 /* 14105 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 14106 */ 14107 if (IS_GEN9_BC(dev_priv) && 14108 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || 14109 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || 14110 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) 14111 intel_ddi_init(dev_priv, PORT_E); 14112 14113 } else if (HAS_PCH_SPLIT(dev_priv)) { 14114 int found; 14115 dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D); 14116 14117 if (has_edp_a(dev_priv)) 14118 intel_dp_init(dev_priv, DP_A, PORT_A); 14119 14120 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 14121 /* PCH SDVOB multiplex with HDMIB */ 14122 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 14123 if (!found) 14124 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 14125 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 14126 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 14127 } 14128 14129 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 14130 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 14131 14132 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 14133 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 14134 14135 if (I915_READ(PCH_DP_C) & DP_DETECTED) 14136 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 14137 14138 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14139 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 14140 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 14141 bool has_edp, has_port; 14142 14143 /* 14144 * The DP_DETECTED bit is the latched state of the DDC 14145 * SDA pin at boot. However since eDP doesn't require DDC 14146 * (no way to plug in a DP->HDMI dongle) the DDC pins for 14147 * eDP ports may have been muxed to an alternate function. 14148 * Thus we can't rely on the DP_DETECTED bit alone to detect 14149 * eDP ports. Consult the VBT as well as DP_DETECTED to 14150 * detect eDP ports. 14151 * 14152 * Sadly the straps seem to be missing sometimes even for HDMI 14153 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 14154 * and VBT for the presence of the port. Additionally we can't 14155 * trust the port type the VBT declares as we've seen at least 14156 * HDMI ports that the VBT claim are DP or eDP. 14157 */ 14158 has_edp = intel_dp_is_edp(dev_priv, PORT_B); 14159 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 14160 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 14161 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 14162 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 14163 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 14164 14165 has_edp = intel_dp_is_edp(dev_priv, PORT_C); 14166 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 14167 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 14168 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 14169 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 14170 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 14171 14172 if (IS_CHERRYVIEW(dev_priv)) { 14173 /* 14174 * eDP not supported on port D, 14175 * so no need to worry about it 14176 */ 14177 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 14178 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 14179 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 14180 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 14181 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 14182 } 14183 14184 intel_dsi_init(dev_priv); 14185 } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { 14186 bool found = false; 14187 14188 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14189 DRM_DEBUG_KMS("probing SDVOB\n"); 14190 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 14191 if (!found && IS_G4X(dev_priv)) { 14192 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 14193 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 14194 } 14195 14196 if (!found && IS_G4X(dev_priv)) 14197 intel_dp_init(dev_priv, DP_B, PORT_B); 14198 } 14199 14200 /* Before G4X SDVOC doesn't have its own detect register */ 14201 14202 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14203 DRM_DEBUG_KMS("probing SDVOC\n"); 14204 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 14205 } 14206 14207 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 14208 14209 if (IS_G4X(dev_priv)) { 14210 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 14211 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 14212 } 14213 if (IS_G4X(dev_priv)) 14214 intel_dp_init(dev_priv, DP_C, PORT_C); 14215 } 14216 14217 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) 14218 intel_dp_init(dev_priv, DP_D, PORT_D); 14219 } else if (IS_GEN2(dev_priv)) 14220 intel_dvo_init(dev_priv); 14221 14222 if (SUPPORTS_TV(dev_priv)) 14223 intel_tv_init(dev_priv); 14224 14225 intel_psr_init(dev_priv); 14226 14227 for_each_intel_encoder(&dev_priv->drm, encoder) { 14228 encoder->base.possible_crtcs = encoder->crtc_mask; 14229 encoder->base.possible_clones = 14230 intel_encoder_clones(encoder); 14231 } 14232 14233 intel_init_pch_refclk(dev_priv); 14234 14235 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 14236} 14237 14238static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 14239{ 14240 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14241 14242 drm_framebuffer_cleanup(fb); 14243 14244 i915_gem_object_lock(intel_fb->obj); 14245 WARN_ON(!intel_fb->obj->framebuffer_references--); 14246 i915_gem_object_unlock(intel_fb->obj); 14247 14248 i915_gem_object_put(intel_fb->obj); 14249 14250 kfree(intel_fb); 14251} 14252 14253static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 14254 struct drm_file *file, 14255 unsigned int *handle) 14256{ 14257 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14258 struct drm_i915_gem_object *obj = intel_fb->obj; 14259 14260 if (obj->userptr.mm) { 14261 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 14262 return -EINVAL; 14263 } 14264 14265 return drm_gem_handle_create(file, &obj->base, handle); 14266} 14267 14268static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 14269 struct drm_file *file, 14270 unsigned flags, unsigned color, 14271 struct drm_clip_rect *clips, 14272 unsigned num_clips) 14273{ 14274 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14275 14276 i915_gem_object_flush_if_display(obj); 14277 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 14278 14279 return 0; 14280} 14281 14282static const struct drm_framebuffer_funcs intel_fb_funcs = { 14283 .destroy = intel_user_framebuffer_destroy, 14284 .create_handle = intel_user_framebuffer_create_handle, 14285 .dirty = intel_user_framebuffer_dirty, 14286}; 14287 14288static 14289u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv, 14290 uint64_t fb_modifier, uint32_t pixel_format) 14291{ 14292 u32 gen = INTEL_GEN(dev_priv); 14293 14294 if (gen >= 9) { 14295 int cpp = drm_format_plane_cpp(pixel_format, 0); 14296 14297 /* "The stride in bytes must not exceed the of the size of 8K 14298 * pixels and 32K bytes." 14299 */ 14300 return min(8192 * cpp, 32768); 14301 } else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) { 14302 return 32*1024; 14303 } else if (gen >= 4) { 14304 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14305 return 16*1024; 14306 else 14307 return 32*1024; 14308 } else if (gen >= 3) { 14309 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14310 return 8*1024; 14311 else 14312 return 16*1024; 14313 } else { 14314 /* XXX DSPC is limited to 4k tiled */ 14315 return 8*1024; 14316 } 14317} 14318 14319static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 14320 struct drm_i915_gem_object *obj, 14321 struct drm_mode_fb_cmd2 *mode_cmd) 14322{ 14323 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 14324 struct drm_format_name_buf format_name; 14325 u32 pitch_limit, stride_alignment; 14326 unsigned int tiling, stride; 14327 int ret = -EINVAL; 14328 14329 i915_gem_object_lock(obj); 14330 obj->framebuffer_references++; 14331 tiling = i915_gem_object_get_tiling(obj); 14332 stride = i915_gem_object_get_stride(obj); 14333 i915_gem_object_unlock(obj); 14334 14335 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 14336 /* 14337 * If there's a fence, enforce that 14338 * the fb modifier and tiling mode match. 14339 */ 14340 if (tiling != I915_TILING_NONE && 14341 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 14342 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n"); 14343 goto err; 14344 } 14345 } else { 14346 if (tiling == I915_TILING_X) { 14347 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 14348 } else if (tiling == I915_TILING_Y) { 14349 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n"); 14350 goto err; 14351 } 14352 } 14353 14354 /* Passed in modifier sanity checking. */ 14355 switch (mode_cmd->modifier[0]) { 14356 case I915_FORMAT_MOD_Y_TILED: 14357 case I915_FORMAT_MOD_Yf_TILED: 14358 if (INTEL_GEN(dev_priv) < 9) { 14359 DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n", 14360 mode_cmd->modifier[0]); 14361 goto err; 14362 } 14363 case DRM_FORMAT_MOD_LINEAR: 14364 case I915_FORMAT_MOD_X_TILED: 14365 break; 14366 default: 14367 DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n", 14368 mode_cmd->modifier[0]); 14369 goto err; 14370 } 14371 14372 /* 14373 * gen2/3 display engine uses the fence if present, 14374 * so the tiling mode must match the fb modifier exactly. 14375 */ 14376 if (INTEL_INFO(dev_priv)->gen < 4 && 14377 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 14378 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n"); 14379 goto err; 14380 } 14381 14382 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0], 14383 mode_cmd->pixel_format); 14384 if (mode_cmd->pitches[0] > pitch_limit) { 14385 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", 14386 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 14387 "tiled" : "linear", 14388 mode_cmd->pitches[0], pitch_limit); 14389 goto err; 14390 } 14391 14392 /* 14393 * If there's a fence, enforce that 14394 * the fb pitch and fence stride match. 14395 */ 14396 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 14397 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n", 14398 mode_cmd->pitches[0], stride); 14399 goto err; 14400 } 14401 14402 /* Reject formats not supported by any plane early. */ 14403 switch (mode_cmd->pixel_format) { 14404 case DRM_FORMAT_C8: 14405 case DRM_FORMAT_RGB565: 14406 case DRM_FORMAT_XRGB8888: 14407 case DRM_FORMAT_ARGB8888: 14408 break; 14409 case DRM_FORMAT_XRGB1555: 14410 if (INTEL_GEN(dev_priv) > 3) { 14411 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14412 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14413 goto err; 14414 } 14415 break; 14416 case DRM_FORMAT_ABGR8888: 14417 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 14418 INTEL_GEN(dev_priv) < 9) { 14419 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14420 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14421 goto err; 14422 } 14423 break; 14424 case DRM_FORMAT_XBGR8888: 14425 case DRM_FORMAT_XRGB2101010: 14426 case DRM_FORMAT_XBGR2101010: 14427 if (INTEL_GEN(dev_priv) < 4) { 14428 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14429 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14430 goto err; 14431 } 14432 break; 14433 case DRM_FORMAT_ABGR2101010: 14434 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 14435 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14436 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14437 goto err; 14438 } 14439 break; 14440 case DRM_FORMAT_YUYV: 14441 case DRM_FORMAT_UYVY: 14442 case DRM_FORMAT_YVYU: 14443 case DRM_FORMAT_VYUY: 14444 if (INTEL_GEN(dev_priv) < 5) { 14445 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14446 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14447 goto err; 14448 } 14449 break; 14450 default: 14451 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14452 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14453 goto err; 14454 } 14455 14456 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 14457 if (mode_cmd->offsets[0] != 0) 14458 goto err; 14459 14460 drm_helper_mode_fill_fb_struct(&dev_priv->drm, 14461 &intel_fb->base, mode_cmd); 14462 14463 stride_alignment = intel_fb_stride_alignment(&intel_fb->base, 0); 14464 if (mode_cmd->pitches[0] & (stride_alignment - 1)) { 14465 DRM_DEBUG_KMS("pitch (%d) must be at least %u byte aligned\n", 14466 mode_cmd->pitches[0], stride_alignment); 14467 goto err; 14468 } 14469 14470 intel_fb->obj = obj; 14471 14472 ret = intel_fill_fb_info(dev_priv, &intel_fb->base); 14473 if (ret) 14474 goto err; 14475 14476 ret = drm_framebuffer_init(obj->base.dev, 14477 &intel_fb->base, 14478 &intel_fb_funcs); 14479 if (ret) { 14480 DRM_ERROR("framebuffer init failed %d\n", ret); 14481 goto err; 14482 } 14483 14484 return 0; 14485 14486err: 14487 i915_gem_object_lock(obj); 14488 obj->framebuffer_references--; 14489 i915_gem_object_unlock(obj); 14490 return ret; 14491} 14492 14493static struct drm_framebuffer * 14494intel_user_framebuffer_create(struct drm_device *dev, 14495 struct drm_file *filp, 14496 const struct drm_mode_fb_cmd2 *user_mode_cmd) 14497{ 14498 struct drm_framebuffer *fb; 14499 struct drm_i915_gem_object *obj; 14500 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 14501 14502 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 14503 if (!obj) 14504 return ERR_PTR(-ENOENT); 14505 14506 fb = intel_framebuffer_create(obj, &mode_cmd); 14507 if (IS_ERR(fb)) 14508 i915_gem_object_put(obj); 14509 14510 return fb; 14511} 14512 14513static void intel_atomic_state_free(struct drm_atomic_state *state) 14514{ 14515 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 14516 14517 drm_atomic_state_default_release(state); 14518 14519 i915_sw_fence_fini(&intel_state->commit_ready); 14520 14521 kfree(state); 14522} 14523 14524static const struct drm_mode_config_funcs intel_mode_funcs = { 14525 .fb_create = intel_user_framebuffer_create, 14526 .output_poll_changed = intel_fbdev_output_poll_changed, 14527 .atomic_check = intel_atomic_check, 14528 .atomic_commit = intel_atomic_commit, 14529 .atomic_state_alloc = intel_atomic_state_alloc, 14530 .atomic_state_clear = intel_atomic_state_clear, 14531 .atomic_state_free = intel_atomic_state_free, 14532}; 14533 14534/** 14535 * intel_init_display_hooks - initialize the display modesetting hooks 14536 * @dev_priv: device private 14537 */ 14538void intel_init_display_hooks(struct drm_i915_private *dev_priv) 14539{ 14540 intel_init_cdclk_hooks(dev_priv); 14541 14542 if (INTEL_INFO(dev_priv)->gen >= 9) { 14543 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14544 dev_priv->display.get_initial_plane_config = 14545 skylake_get_initial_plane_config; 14546 dev_priv->display.crtc_compute_clock = 14547 haswell_crtc_compute_clock; 14548 dev_priv->display.crtc_enable = haswell_crtc_enable; 14549 dev_priv->display.crtc_disable = haswell_crtc_disable; 14550 } else if (HAS_DDI(dev_priv)) { 14551 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14552 dev_priv->display.get_initial_plane_config = 14553 ironlake_get_initial_plane_config; 14554 dev_priv->display.crtc_compute_clock = 14555 haswell_crtc_compute_clock; 14556 dev_priv->display.crtc_enable = haswell_crtc_enable; 14557 dev_priv->display.crtc_disable = haswell_crtc_disable; 14558 } else if (HAS_PCH_SPLIT(dev_priv)) { 14559 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 14560 dev_priv->display.get_initial_plane_config = 14561 ironlake_get_initial_plane_config; 14562 dev_priv->display.crtc_compute_clock = 14563 ironlake_crtc_compute_clock; 14564 dev_priv->display.crtc_enable = ironlake_crtc_enable; 14565 dev_priv->display.crtc_disable = ironlake_crtc_disable; 14566 } else if (IS_CHERRYVIEW(dev_priv)) { 14567 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14568 dev_priv->display.get_initial_plane_config = 14569 i9xx_get_initial_plane_config; 14570 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 14571 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14572 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14573 } else if (IS_VALLEYVIEW(dev_priv)) { 14574 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14575 dev_priv->display.get_initial_plane_config = 14576 i9xx_get_initial_plane_config; 14577 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 14578 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14579 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14580 } else if (IS_G4X(dev_priv)) { 14581 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14582 dev_priv->display.get_initial_plane_config = 14583 i9xx_get_initial_plane_config; 14584 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 14585 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14586 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14587 } else if (IS_PINEVIEW(dev_priv)) { 14588 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14589 dev_priv->display.get_initial_plane_config = 14590 i9xx_get_initial_plane_config; 14591 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 14592 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14593 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14594 } else if (!IS_GEN2(dev_priv)) { 14595 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14596 dev_priv->display.get_initial_plane_config = 14597 i9xx_get_initial_plane_config; 14598 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14599 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14600 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14601 } else { 14602 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14603 dev_priv->display.get_initial_plane_config = 14604 i9xx_get_initial_plane_config; 14605 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 14606 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14607 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14608 } 14609 14610 if (IS_GEN5(dev_priv)) { 14611 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 14612 } else if (IS_GEN6(dev_priv)) { 14613 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 14614 } else if (IS_IVYBRIDGE(dev_priv)) { 14615 /* FIXME: detect B0+ stepping and use auto training */ 14616 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 14617 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 14618 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 14619 } 14620 14621 if (dev_priv->info.gen >= 9) 14622 dev_priv->display.update_crtcs = skl_update_crtcs; 14623 else 14624 dev_priv->display.update_crtcs = intel_update_crtcs; 14625 14626 switch (INTEL_INFO(dev_priv)->gen) { 14627 case 2: 14628 dev_priv->display.queue_flip = intel_gen2_queue_flip; 14629 break; 14630 14631 case 3: 14632 dev_priv->display.queue_flip = intel_gen3_queue_flip; 14633 break; 14634 14635 case 4: 14636 case 5: 14637 dev_priv->display.queue_flip = intel_gen4_queue_flip; 14638 break; 14639 14640 case 6: 14641 dev_priv->display.queue_flip = intel_gen6_queue_flip; 14642 break; 14643 case 7: 14644 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 14645 dev_priv->display.queue_flip = intel_gen7_queue_flip; 14646 break; 14647 case 9: 14648 /* Drop through - unsupported since execlist only. */ 14649 default: 14650 /* Default just returns -ENODEV to indicate unsupported */ 14651 dev_priv->display.queue_flip = intel_default_queue_flip; 14652 } 14653} 14654 14655/* 14656 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 14657 * resume, or other times. This quirk makes sure that's the case for 14658 * affected systems. 14659 */ 14660static void quirk_pipea_force(struct drm_device *dev) 14661{ 14662 struct drm_i915_private *dev_priv = to_i915(dev); 14663 14664 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 14665 DRM_INFO("applying pipe a force quirk\n"); 14666} 14667 14668static void quirk_pipeb_force(struct drm_device *dev) 14669{ 14670 struct drm_i915_private *dev_priv = to_i915(dev); 14671 14672 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 14673 DRM_INFO("applying pipe b force quirk\n"); 14674} 14675 14676/* 14677 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 14678 */ 14679static void quirk_ssc_force_disable(struct drm_device *dev) 14680{ 14681 struct drm_i915_private *dev_priv = to_i915(dev); 14682 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 14683 DRM_INFO("applying lvds SSC disable quirk\n"); 14684} 14685 14686/* 14687 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 14688 * brightness value 14689 */ 14690static void quirk_invert_brightness(struct drm_device *dev) 14691{ 14692 struct drm_i915_private *dev_priv = to_i915(dev); 14693 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 14694 DRM_INFO("applying inverted panel brightness quirk\n"); 14695} 14696 14697/* Some VBT's incorrectly indicate no backlight is present */ 14698static void quirk_backlight_present(struct drm_device *dev) 14699{ 14700 struct drm_i915_private *dev_priv = to_i915(dev); 14701 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 14702 DRM_INFO("applying backlight present quirk\n"); 14703} 14704 14705struct intel_quirk { 14706 int device; 14707 int subsystem_vendor; 14708 int subsystem_device; 14709 void (*hook)(struct drm_device *dev); 14710}; 14711 14712/* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 14713struct intel_dmi_quirk { 14714 void (*hook)(struct drm_device *dev); 14715 const struct dmi_system_id (*dmi_id_list)[]; 14716}; 14717 14718static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 14719{ 14720 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 14721 return 1; 14722} 14723 14724static const struct intel_dmi_quirk intel_dmi_quirks[] = { 14725 { 14726 .dmi_id_list = &(const struct dmi_system_id[]) { 14727 { 14728 .callback = intel_dmi_reverse_brightness, 14729 .ident = "NCR Corporation", 14730 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 14731 DMI_MATCH(DMI_PRODUCT_NAME, ""), 14732 }, 14733 }, 14734 { } /* terminating entry */ 14735 }, 14736 .hook = quirk_invert_brightness, 14737 }, 14738}; 14739 14740static struct intel_quirk intel_quirks[] = { 14741 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 14742 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 14743 14744 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 14745 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 14746 14747 /* 830 needs to leave pipe A & dpll A up */ 14748 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 14749 14750 /* 830 needs to leave pipe B & dpll B up */ 14751 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force }, 14752 14753 /* Lenovo U160 cannot use SSC on LVDS */ 14754 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 14755 14756 /* Sony Vaio Y cannot use SSC on LVDS */ 14757 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 14758 14759 /* Acer Aspire 5734Z must invert backlight brightness */ 14760 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 14761 14762 /* Acer/eMachines G725 */ 14763 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 14764 14765 /* Acer/eMachines e725 */ 14766 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 14767 14768 /* Acer/Packard Bell NCL20 */ 14769 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 14770 14771 /* Acer Aspire 4736Z */ 14772 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 14773 14774 /* Acer Aspire 5336 */ 14775 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 14776 14777 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 14778 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 14779 14780 /* Acer C720 Chromebook (Core i3 4005U) */ 14781 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 14782 14783 /* Apple Macbook 2,1 (Core 2 T7400) */ 14784 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 14785 14786 /* Apple Macbook 4,1 */ 14787 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, 14788 14789 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 14790 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 14791 14792 /* HP Chromebook 14 (Celeron 2955U) */ 14793 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 14794 14795 /* Dell Chromebook 11 */ 14796 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 14797 14798 /* Dell Chromebook 11 (2015 version) */ 14799 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, 14800}; 14801 14802static void intel_init_quirks(struct drm_device *dev) 14803{ 14804 struct pci_dev *d = dev->pdev; 14805 int i; 14806 14807 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 14808 struct intel_quirk *q = &intel_quirks[i]; 14809 14810 if (d->device == q->device && 14811 (d->subsystem_vendor == q->subsystem_vendor || 14812 q->subsystem_vendor == PCI_ANY_ID) && 14813 (d->subsystem_device == q->subsystem_device || 14814 q->subsystem_device == PCI_ANY_ID)) 14815 q->hook(dev); 14816 } 14817 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 14818 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 14819 intel_dmi_quirks[i].hook(dev); 14820 } 14821} 14822 14823/* Disable the VGA plane that we never use */ 14824static void i915_disable_vga(struct drm_i915_private *dev_priv) 14825{ 14826 struct pci_dev *pdev = dev_priv->drm.pdev; 14827 u8 sr1; 14828 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); 14829 14830 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 14831 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 14832 outb(SR01, VGA_SR_INDEX); 14833 sr1 = inb(VGA_SR_DATA); 14834 outb(sr1 | 1<<5, VGA_SR_DATA); 14835 vga_put(pdev, VGA_RSRC_LEGACY_IO); 14836 udelay(300); 14837 14838 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 14839 POSTING_READ(vga_reg); 14840} 14841 14842void intel_modeset_init_hw(struct drm_device *dev) 14843{ 14844 struct drm_i915_private *dev_priv = to_i915(dev); 14845 14846 intel_update_cdclk(dev_priv); 14847 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw; 14848 14849 intel_init_clock_gating(dev_priv); 14850} 14851 14852/* 14853 * Calculate what we think the watermarks should be for the state we've read 14854 * out of the hardware and then immediately program those watermarks so that 14855 * we ensure the hardware settings match our internal state. 14856 * 14857 * We can calculate what we think WM's should be by creating a duplicate of the 14858 * current state (which was constructed during hardware readout) and running it 14859 * through the atomic check code to calculate new watermark values in the 14860 * state object. 14861 */ 14862static void sanitize_watermarks(struct drm_device *dev) 14863{ 14864 struct drm_i915_private *dev_priv = to_i915(dev); 14865 struct drm_atomic_state *state; 14866 struct intel_atomic_state *intel_state; 14867 struct drm_crtc *crtc; 14868 struct drm_crtc_state *cstate; 14869 struct drm_modeset_acquire_ctx ctx; 14870 int ret; 14871 int i; 14872 14873 /* Only supported on platforms that use atomic watermark design */ 14874 if (!dev_priv->display.optimize_watermarks) 14875 return; 14876 14877 /* 14878 * We need to hold connection_mutex before calling duplicate_state so 14879 * that the connector loop is protected. 14880 */ 14881 drm_modeset_acquire_init(&ctx, 0); 14882retry: 14883 ret = drm_modeset_lock_all_ctx(dev, &ctx); 14884 if (ret == -EDEADLK) { 14885 drm_modeset_backoff(&ctx); 14886 goto retry; 14887 } else if (WARN_ON(ret)) { 14888 goto fail; 14889 } 14890 14891 state = drm_atomic_helper_duplicate_state(dev, &ctx); 14892 if (WARN_ON(IS_ERR(state))) 14893 goto fail; 14894 14895 intel_state = to_intel_atomic_state(state); 14896 14897 /* 14898 * Hardware readout is the only time we don't want to calculate 14899 * intermediate watermarks (since we don't trust the current 14900 * watermarks). 14901 */ 14902 if (!HAS_GMCH_DISPLAY(dev_priv)) 14903 intel_state->skip_intermediate_wm = true; 14904 14905 ret = intel_atomic_check(dev, state); 14906 if (ret) { 14907 /* 14908 * If we fail here, it means that the hardware appears to be 14909 * programmed in a way that shouldn't be possible, given our 14910 * understanding of watermark requirements. This might mean a 14911 * mistake in the hardware readout code or a mistake in the 14912 * watermark calculations for a given platform. Raise a WARN 14913 * so that this is noticeable. 14914 * 14915 * If this actually happens, we'll have to just leave the 14916 * BIOS-programmed watermarks untouched and hope for the best. 14917 */ 14918 WARN(true, "Could not determine valid watermarks for inherited state\n"); 14919 goto put_state; 14920 } 14921 14922 /* Write calculated watermark values back */ 14923 for_each_new_crtc_in_state(state, crtc, cstate, i) { 14924 struct intel_crtc_state *cs = to_intel_crtc_state(cstate); 14925 14926 cs->wm.need_postvbl_update = true; 14927 dev_priv->display.optimize_watermarks(intel_state, cs); 14928 } 14929 14930put_state: 14931 drm_atomic_state_put(state); 14932fail: 14933 drm_modeset_drop_locks(&ctx); 14934 drm_modeset_acquire_fini(&ctx); 14935} 14936 14937int intel_modeset_init(struct drm_device *dev) 14938{ 14939 struct drm_i915_private *dev_priv = to_i915(dev); 14940 struct i915_ggtt *ggtt = &dev_priv->ggtt; 14941 enum pipe pipe; 14942 struct intel_crtc *crtc; 14943 14944 drm_mode_config_init(dev); 14945 14946 dev->mode_config.min_width = 0; 14947 dev->mode_config.min_height = 0; 14948 14949 dev->mode_config.preferred_depth = 24; 14950 dev->mode_config.prefer_shadow = 1; 14951 14952 dev->mode_config.allow_fb_modifiers = true; 14953 14954 dev->mode_config.funcs = &intel_mode_funcs; 14955 14956 INIT_WORK(&dev_priv->atomic_helper.free_work, 14957 intel_atomic_helper_free_state_worker); 14958 14959 intel_init_quirks(dev); 14960 14961 intel_init_pm(dev_priv); 14962 14963 if (INTEL_INFO(dev_priv)->num_pipes == 0) 14964 return 0; 14965 14966 /* 14967 * There may be no VBT; and if the BIOS enabled SSC we can 14968 * just keep using it to avoid unnecessary flicker. Whereas if the 14969 * BIOS isn't using it, don't assume it will work even if the VBT 14970 * indicates as much. 14971 */ 14972 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 14973 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 14974 DREF_SSC1_ENABLE); 14975 14976 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 14977 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", 14978 bios_lvds_use_ssc ? "en" : "dis", 14979 dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); 14980 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 14981 } 14982 } 14983 14984 if (IS_GEN2(dev_priv)) { 14985 dev->mode_config.max_width = 2048; 14986 dev->mode_config.max_height = 2048; 14987 } else if (IS_GEN3(dev_priv)) { 14988 dev->mode_config.max_width = 4096; 14989 dev->mode_config.max_height = 4096; 14990 } else { 14991 dev->mode_config.max_width = 8192; 14992 dev->mode_config.max_height = 8192; 14993 } 14994 14995 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 14996 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512; 14997 dev->mode_config.cursor_height = 1023; 14998 } else if (IS_GEN2(dev_priv)) { 14999 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 15000 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 15001 } else { 15002 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 15003 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 15004 } 15005 15006 dev->mode_config.fb_base = ggtt->mappable_base; 15007 15008 DRM_DEBUG_KMS("%d display pipe%s available.\n", 15009 INTEL_INFO(dev_priv)->num_pipes, 15010 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); 15011 15012 for_each_pipe(dev_priv, pipe) { 15013 int ret; 15014 15015 ret = intel_crtc_init(dev_priv, pipe); 15016 if (ret) { 15017 drm_mode_config_cleanup(dev); 15018 return ret; 15019 } 15020 } 15021 15022 intel_shared_dpll_init(dev); 15023 15024 intel_update_czclk(dev_priv); 15025 intel_modeset_init_hw(dev); 15026 15027 if (dev_priv->max_cdclk_freq == 0) 15028 intel_update_max_cdclk(dev_priv); 15029 15030 /* Just disable it once at startup */ 15031 i915_disable_vga(dev_priv); 15032 intel_setup_outputs(dev_priv); 15033 15034 drm_modeset_lock_all(dev); 15035 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 15036 drm_modeset_unlock_all(dev); 15037 15038 for_each_intel_crtc(dev, crtc) { 15039 struct intel_initial_plane_config plane_config = {}; 15040 15041 if (!crtc->active) 15042 continue; 15043 15044 /* 15045 * Note that reserving the BIOS fb up front prevents us 15046 * from stuffing other stolen allocations like the ring 15047 * on top. This prevents some ugliness at boot time, and 15048 * can even allow for smooth boot transitions if the BIOS 15049 * fb is large enough for the active pipe configuration. 15050 */ 15051 dev_priv->display.get_initial_plane_config(crtc, 15052 &plane_config); 15053 15054 /* 15055 * If the fb is shared between multiple heads, we'll 15056 * just get the first one. 15057 */ 15058 intel_find_initial_plane_obj(crtc, &plane_config); 15059 } 15060 15061 /* 15062 * Make sure hardware watermarks really match the state we read out. 15063 * Note that we need to do this after reconstructing the BIOS fb's 15064 * since the watermark calculation done here will use pstate->fb. 15065 */ 15066 if (!HAS_GMCH_DISPLAY(dev_priv)) 15067 sanitize_watermarks(dev); 15068 15069 return 0; 15070} 15071 15072static void intel_enable_pipe_a(struct drm_device *dev, 15073 struct drm_modeset_acquire_ctx *ctx) 15074{ 15075 struct intel_connector *connector; 15076 struct drm_connector_list_iter conn_iter; 15077 struct drm_connector *crt = NULL; 15078 struct intel_load_detect_pipe load_detect_temp; 15079 int ret; 15080 15081 /* We can't just switch on the pipe A, we need to set things up with a 15082 * proper mode and output configuration. As a gross hack, enable pipe A 15083 * by enabling the load detect pipe once. */ 15084 drm_connector_list_iter_begin(dev, &conn_iter); 15085 for_each_intel_connector_iter(connector, &conn_iter) { 15086 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 15087 crt = &connector->base; 15088 break; 15089 } 15090 } 15091 drm_connector_list_iter_end(&conn_iter); 15092 15093 if (!crt) 15094 return; 15095 15096 ret = intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx); 15097 WARN(ret < 0, "All modeset mutexes are locked, but intel_get_load_detect_pipe failed\n"); 15098 15099 if (ret > 0) 15100 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); 15101} 15102 15103static bool 15104intel_check_plane_mapping(struct intel_crtc *crtc) 15105{ 15106 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 15107 u32 val; 15108 15109 if (INTEL_INFO(dev_priv)->num_pipes == 1) 15110 return true; 15111 15112 val = I915_READ(DSPCNTR(!crtc->plane)); 15113 15114 if ((val & DISPLAY_PLANE_ENABLE) && 15115 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 15116 return false; 15117 15118 return true; 15119} 15120 15121static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 15122{ 15123 struct drm_device *dev = crtc->base.dev; 15124 struct intel_encoder *encoder; 15125 15126 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 15127 return true; 15128 15129 return false; 15130} 15131 15132static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 15133{ 15134 struct drm_device *dev = encoder->base.dev; 15135 struct intel_connector *connector; 15136 15137 for_each_connector_on_encoder(dev, &encoder->base, connector) 15138 return connector; 15139 15140 return NULL; 15141} 15142 15143static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 15144 enum transcoder pch_transcoder) 15145{ 15146 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 15147 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); 15148} 15149 15150static void intel_sanitize_crtc(struct intel_crtc *crtc, 15151 struct drm_modeset_acquire_ctx *ctx) 15152{ 15153 struct drm_device *dev = crtc->base.dev; 15154 struct drm_i915_private *dev_priv = to_i915(dev); 15155 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 15156 15157 /* Clear any frame start delays used for debugging left by the BIOS */ 15158 if (!transcoder_is_dsi(cpu_transcoder)) { 15159 i915_reg_t reg = PIPECONF(cpu_transcoder); 15160 15161 I915_WRITE(reg, 15162 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 15163 } 15164 15165 /* restore vblank interrupts to correct state */ 15166 drm_crtc_vblank_reset(&crtc->base); 15167 if (crtc->active) { 15168 struct intel_plane *plane; 15169 15170 drm_crtc_vblank_on(&crtc->base); 15171 15172 /* Disable everything but the primary plane */ 15173 for_each_intel_plane_on_crtc(dev, crtc, plane) { 15174 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 15175 continue; 15176 15177 trace_intel_disable_plane(&plane->base, crtc); 15178 plane->disable_plane(&plane->base, &crtc->base); 15179 } 15180 } 15181 15182 /* We need to sanitize the plane -> pipe mapping first because this will 15183 * disable the crtc (and hence change the state) if it is wrong. Note 15184 * that gen4+ has a fixed plane -> pipe mapping. */ 15185 if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) { 15186 bool plane; 15187 15188 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", 15189 crtc->base.base.id, crtc->base.name); 15190 15191 /* Pipe has the wrong plane attached and the plane is active. 15192 * Temporarily change the plane mapping and disable everything 15193 * ... */ 15194 plane = crtc->plane; 15195 crtc->base.primary->state->visible = true; 15196 crtc->plane = !plane; 15197 intel_crtc_disable_noatomic(&crtc->base, ctx); 15198 crtc->plane = plane; 15199 } 15200 15201 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 15202 crtc->pipe == PIPE_A && !crtc->active) { 15203 /* BIOS forgot to enable pipe A, this mostly happens after 15204 * resume. Force-enable the pipe to fix this, the update_dpms 15205 * call below we restore the pipe to the right state, but leave 15206 * the required bits on. */ 15207 intel_enable_pipe_a(dev, ctx); 15208 } 15209 15210 /* Adjust the state of the output pipe according to whether we 15211 * have active connectors/encoders. */ 15212 if (crtc->active && !intel_crtc_has_encoders(crtc)) 15213 intel_crtc_disable_noatomic(&crtc->base, ctx); 15214 15215 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { 15216 /* 15217 * We start out with underrun reporting disabled to avoid races. 15218 * For correct bookkeeping mark this on active crtcs. 15219 * 15220 * Also on gmch platforms we dont have any hardware bits to 15221 * disable the underrun reporting. Which means we need to start 15222 * out with underrun reporting disabled also on inactive pipes, 15223 * since otherwise we'll complain about the garbage we read when 15224 * e.g. coming up after runtime pm. 15225 * 15226 * No protection against concurrent access is required - at 15227 * worst a fifo underrun happens which also sets this to false. 15228 */ 15229 crtc->cpu_fifo_underrun_disabled = true; 15230 /* 15231 * We track the PCH trancoder underrun reporting state 15232 * within the crtc. With crtc for pipe A housing the underrun 15233 * reporting state for PCH transcoder A, crtc for pipe B housing 15234 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 15235 * and marking underrun reporting as disabled for the non-existing 15236 * PCH transcoders B and C would prevent enabling the south 15237 * error interrupt (see cpt_can_enable_serr_int()). 15238 */ 15239 if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe)) 15240 crtc->pch_fifo_underrun_disabled = true; 15241 } 15242} 15243 15244static void intel_sanitize_encoder(struct intel_encoder *encoder) 15245{ 15246 struct intel_connector *connector; 15247 15248 /* We need to check both for a crtc link (meaning that the 15249 * encoder is active and trying to read from a pipe) and the 15250 * pipe itself being active. */ 15251 bool has_active_crtc = encoder->base.crtc && 15252 to_intel_crtc(encoder->base.crtc)->active; 15253 15254 connector = intel_encoder_find_connector(encoder); 15255 if (connector && !has_active_crtc) { 15256 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 15257 encoder->base.base.id, 15258 encoder->base.name); 15259 15260 /* Connector is active, but has no active pipe. This is 15261 * fallout from our resume register restoring. Disable 15262 * the encoder manually again. */ 15263 if (encoder->base.crtc) { 15264 struct drm_crtc_state *crtc_state = encoder->base.crtc->state; 15265 15266 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15267 encoder->base.base.id, 15268 encoder->base.name); 15269 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15270 if (encoder->post_disable) 15271 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15272 } 15273 encoder->base.crtc = NULL; 15274 15275 /* Inconsistent output/port/pipe state happens presumably due to 15276 * a bug in one of the get_hw_state functions. Or someplace else 15277 * in our code, like the register restore mess on resume. Clamp 15278 * things to off as a safer default. */ 15279 15280 connector->base.dpms = DRM_MODE_DPMS_OFF; 15281 connector->base.encoder = NULL; 15282 } 15283 /* Enabled encoders without active connectors will be fixed in 15284 * the crtc fixup. */ 15285} 15286 15287void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) 15288{ 15289 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); 15290 15291 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 15292 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 15293 i915_disable_vga(dev_priv); 15294 } 15295} 15296 15297void i915_redisable_vga(struct drm_i915_private *dev_priv) 15298{ 15299 /* This function can be called both from intel_modeset_setup_hw_state or 15300 * at a very early point in our resume sequence, where the power well 15301 * structures are not yet restored. Since this function is at a very 15302 * paranoid "someone might have enabled VGA while we were not looking" 15303 * level, just check if the power well is enabled instead of trying to 15304 * follow the "don't touch the power well if we don't need it" policy 15305 * the rest of the driver uses. */ 15306 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA)) 15307 return; 15308 15309 i915_redisable_vga_power_on(dev_priv); 15310 15311 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); 15312} 15313 15314static bool primary_get_hw_state(struct intel_plane *plane) 15315{ 15316 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15317 15318 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; 15319} 15320 15321/* FIXME read out full plane state for all planes */ 15322static void readout_plane_state(struct intel_crtc *crtc) 15323{ 15324 struct intel_plane *primary = to_intel_plane(crtc->base.primary); 15325 bool visible; 15326 15327 visible = crtc->active && primary_get_hw_state(primary); 15328 15329 intel_set_plane_visible(to_intel_crtc_state(crtc->base.state), 15330 to_intel_plane_state(primary->base.state), 15331 visible); 15332} 15333 15334static void intel_modeset_readout_hw_state(struct drm_device *dev) 15335{ 15336 struct drm_i915_private *dev_priv = to_i915(dev); 15337 enum pipe pipe; 15338 struct intel_crtc *crtc; 15339 struct intel_encoder *encoder; 15340 struct intel_connector *connector; 15341 struct drm_connector_list_iter conn_iter; 15342 int i; 15343 15344 dev_priv->active_crtcs = 0; 15345 15346 for_each_intel_crtc(dev, crtc) { 15347 struct intel_crtc_state *crtc_state = 15348 to_intel_crtc_state(crtc->base.state); 15349 15350 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); 15351 memset(crtc_state, 0, sizeof(*crtc_state)); 15352 crtc_state->base.crtc = &crtc->base; 15353 15354 crtc_state->base.active = crtc_state->base.enable = 15355 dev_priv->display.get_pipe_config(crtc, crtc_state); 15356 15357 crtc->base.enabled = crtc_state->base.enable; 15358 crtc->active = crtc_state->base.active; 15359 15360 if (crtc_state->base.active) 15361 dev_priv->active_crtcs |= 1 << crtc->pipe; 15362 15363 readout_plane_state(crtc); 15364 15365 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 15366 crtc->base.base.id, crtc->base.name, 15367 enableddisabled(crtc_state->base.active)); 15368 } 15369 15370 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15371 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15372 15373 pll->on = pll->funcs.get_hw_state(dev_priv, pll, 15374 &pll->state.hw_state); 15375 pll->state.crtc_mask = 0; 15376 for_each_intel_crtc(dev, crtc) { 15377 struct intel_crtc_state *crtc_state = 15378 to_intel_crtc_state(crtc->base.state); 15379 15380 if (crtc_state->base.active && 15381 crtc_state->shared_dpll == pll) 15382 pll->state.crtc_mask |= 1 << crtc->pipe; 15383 } 15384 pll->active_mask = pll->state.crtc_mask; 15385 15386 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 15387 pll->name, pll->state.crtc_mask, pll->on); 15388 } 15389 15390 for_each_intel_encoder(dev, encoder) { 15391 pipe = 0; 15392 15393 if (encoder->get_hw_state(encoder, &pipe)) { 15394 struct intel_crtc_state *crtc_state; 15395 15396 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15397 crtc_state = to_intel_crtc_state(crtc->base.state); 15398 15399 encoder->base.crtc = &crtc->base; 15400 crtc_state->output_types |= 1 << encoder->type; 15401 encoder->get_config(encoder, crtc_state); 15402 } else { 15403 encoder->base.crtc = NULL; 15404 } 15405 15406 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 15407 encoder->base.base.id, encoder->base.name, 15408 enableddisabled(encoder->base.crtc), 15409 pipe_name(pipe)); 15410 } 15411 15412 drm_connector_list_iter_begin(dev, &conn_iter); 15413 for_each_intel_connector_iter(connector, &conn_iter) { 15414 if (connector->get_hw_state(connector)) { 15415 connector->base.dpms = DRM_MODE_DPMS_ON; 15416 15417 encoder = connector->encoder; 15418 connector->base.encoder = &encoder->base; 15419 15420 if (encoder->base.crtc && 15421 encoder->base.crtc->state->active) { 15422 /* 15423 * This has to be done during hardware readout 15424 * because anything calling .crtc_disable may 15425 * rely on the connector_mask being accurate. 15426 */ 15427 encoder->base.crtc->state->connector_mask |= 15428 1 << drm_connector_index(&connector->base); 15429 encoder->base.crtc->state->encoder_mask |= 15430 1 << drm_encoder_index(&encoder->base); 15431 } 15432 15433 } else { 15434 connector->base.dpms = DRM_MODE_DPMS_OFF; 15435 connector->base.encoder = NULL; 15436 } 15437 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 15438 connector->base.base.id, connector->base.name, 15439 enableddisabled(connector->base.encoder)); 15440 } 15441 drm_connector_list_iter_end(&conn_iter); 15442 15443 for_each_intel_crtc(dev, crtc) { 15444 struct intel_crtc_state *crtc_state = 15445 to_intel_crtc_state(crtc->base.state); 15446 int pixclk = 0; 15447 15448 crtc->base.hwmode = crtc_state->base.adjusted_mode; 15449 15450 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 15451 if (crtc_state->base.active) { 15452 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state); 15453 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state); 15454 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 15455 15456 /* 15457 * The initial mode needs to be set in order to keep 15458 * the atomic core happy. It wants a valid mode if the 15459 * crtc's enabled, so we do the above call. 15460 * 15461 * But we don't set all the derived state fully, hence 15462 * set a flag to indicate that a full recalculation is 15463 * needed on the next commit. 15464 */ 15465 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED; 15466 15467 intel_crtc_compute_pixel_rate(crtc_state); 15468 15469 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || 15470 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15471 pixclk = crtc_state->pixel_rate; 15472 else 15473 WARN_ON(dev_priv->display.modeset_calc_cdclk); 15474 15475 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 15476 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) 15477 pixclk = DIV_ROUND_UP(pixclk * 100, 95); 15478 15479 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); 15480 update_scanline_offset(crtc); 15481 } 15482 15483 dev_priv->min_pixclk[crtc->pipe] = pixclk; 15484 15485 intel_pipe_config_sanity_check(dev_priv, crtc_state); 15486 } 15487} 15488 15489static void 15490get_encoder_power_domains(struct drm_i915_private *dev_priv) 15491{ 15492 struct intel_encoder *encoder; 15493 15494 for_each_intel_encoder(&dev_priv->drm, encoder) { 15495 u64 get_domains; 15496 enum intel_display_power_domain domain; 15497 15498 if (!encoder->get_power_domains) 15499 continue; 15500 15501 get_domains = encoder->get_power_domains(encoder); 15502 for_each_power_domain(domain, get_domains) 15503 intel_display_power_get(dev_priv, domain); 15504 } 15505} 15506 15507/* Scan out the current hw modeset state, 15508 * and sanitizes it to the current state 15509 */ 15510static void 15511intel_modeset_setup_hw_state(struct drm_device *dev, 15512 struct drm_modeset_acquire_ctx *ctx) 15513{ 15514 struct drm_i915_private *dev_priv = to_i915(dev); 15515 enum pipe pipe; 15516 struct intel_crtc *crtc; 15517 struct intel_encoder *encoder; 15518 int i; 15519 15520 intel_modeset_readout_hw_state(dev); 15521 15522 /* HW state is read out, now we need to sanitize this mess. */ 15523 get_encoder_power_domains(dev_priv); 15524 15525 for_each_intel_encoder(dev, encoder) { 15526 intel_sanitize_encoder(encoder); 15527 } 15528 15529 for_each_pipe(dev_priv, pipe) { 15530 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15531 15532 intel_sanitize_crtc(crtc, ctx); 15533 intel_dump_pipe_config(crtc, crtc->config, 15534 "[setup_hw_state]"); 15535 } 15536 15537 intel_modeset_update_connector_atomic_state(dev); 15538 15539 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15540 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15541 15542 if (!pll->on || pll->active_mask) 15543 continue; 15544 15545 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 15546 15547 pll->funcs.disable(dev_priv, pll); 15548 pll->on = false; 15549 } 15550 15551 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15552 vlv_wm_get_hw_state(dev); 15553 vlv_wm_sanitize(dev_priv); 15554 } else if (IS_GEN9(dev_priv)) { 15555 skl_wm_get_hw_state(dev); 15556 } else if (HAS_PCH_SPLIT(dev_priv)) { 15557 ilk_wm_get_hw_state(dev); 15558 } 15559 15560 for_each_intel_crtc(dev, crtc) { 15561 u64 put_domains; 15562 15563 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config); 15564 if (WARN_ON(put_domains)) 15565 modeset_put_power_domains(dev_priv, put_domains); 15566 } 15567 intel_display_set_init_power(dev_priv, false); 15568 15569 intel_power_domains_verify_state(dev_priv); 15570 15571 intel_fbc_init_pipe_state(dev_priv); 15572} 15573 15574void intel_display_resume(struct drm_device *dev) 15575{ 15576 struct drm_i915_private *dev_priv = to_i915(dev); 15577 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 15578 struct drm_modeset_acquire_ctx ctx; 15579 int ret; 15580 15581 dev_priv->modeset_restore_state = NULL; 15582 if (state) 15583 state->acquire_ctx = &ctx; 15584 15585 /* 15586 * This is a cludge because with real atomic modeset mode_config.mutex 15587 * won't be taken. Unfortunately some probed state like 15588 * audio_codec_enable is still protected by mode_config.mutex, so lock 15589 * it here for now. 15590 */ 15591 mutex_lock(&dev->mode_config.mutex); 15592 drm_modeset_acquire_init(&ctx, 0); 15593 15594 while (1) { 15595 ret = drm_modeset_lock_all_ctx(dev, &ctx); 15596 if (ret != -EDEADLK) 15597 break; 15598 15599 drm_modeset_backoff(&ctx); 15600 } 15601 15602 if (!ret) 15603 ret = __intel_display_resume(dev, state, &ctx); 15604 15605 drm_modeset_drop_locks(&ctx); 15606 drm_modeset_acquire_fini(&ctx); 15607 mutex_unlock(&dev->mode_config.mutex); 15608 15609 if (ret) 15610 DRM_ERROR("Restoring old state failed with %i\n", ret); 15611 if (state) 15612 drm_atomic_state_put(state); 15613} 15614 15615void intel_modeset_gem_init(struct drm_device *dev) 15616{ 15617 struct drm_i915_private *dev_priv = to_i915(dev); 15618 15619 intel_init_gt_powersave(dev_priv); 15620 15621 intel_setup_overlay(dev_priv); 15622} 15623 15624int intel_connector_register(struct drm_connector *connector) 15625{ 15626 struct intel_connector *intel_connector = to_intel_connector(connector); 15627 int ret; 15628 15629 ret = intel_backlight_device_register(intel_connector); 15630 if (ret) 15631 goto err; 15632 15633 return 0; 15634 15635err: 15636 return ret; 15637} 15638 15639void intel_connector_unregister(struct drm_connector *connector) 15640{ 15641 struct intel_connector *intel_connector = to_intel_connector(connector); 15642 15643 intel_backlight_device_unregister(intel_connector); 15644 intel_panel_destroy_backlight(connector); 15645} 15646 15647void intel_modeset_cleanup(struct drm_device *dev) 15648{ 15649 struct drm_i915_private *dev_priv = to_i915(dev); 15650 15651 flush_work(&dev_priv->atomic_helper.free_work); 15652 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); 15653 15654 intel_disable_gt_powersave(dev_priv); 15655 15656 /* 15657 * Interrupts and polling as the first thing to avoid creating havoc. 15658 * Too much stuff here (turning of connectors, ...) would 15659 * experience fancy races otherwise. 15660 */ 15661 intel_irq_uninstall(dev_priv); 15662 15663 /* 15664 * Due to the hpd irq storm handling the hotplug work can re-arm the 15665 * poll handlers. Hence disable polling after hpd handling is shut down. 15666 */ 15667 drm_kms_helper_poll_fini(dev); 15668 15669 intel_unregister_dsm_handler(); 15670 15671 intel_fbc_global_disable(dev_priv); 15672 15673 /* flush any delayed tasks or pending work */ 15674 flush_scheduled_work(); 15675 15676 drm_mode_config_cleanup(dev); 15677 15678 intel_cleanup_overlay(dev_priv); 15679 15680 intel_cleanup_gt_powersave(dev_priv); 15681 15682 intel_teardown_gmbus(dev_priv); 15683} 15684 15685void intel_connector_attach_encoder(struct intel_connector *connector, 15686 struct intel_encoder *encoder) 15687{ 15688 connector->encoder = encoder; 15689 drm_mode_connector_attach_encoder(&connector->base, 15690 &encoder->base); 15691} 15692 15693/* 15694 * set vga decode state - true == enable VGA decode 15695 */ 15696int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state) 15697{ 15698 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 15699 u16 gmch_ctrl; 15700 15701 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 15702 DRM_ERROR("failed to read control word\n"); 15703 return -EIO; 15704 } 15705 15706 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 15707 return 0; 15708 15709 if (state) 15710 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 15711 else 15712 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 15713 15714 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 15715 DRM_ERROR("failed to write control word\n"); 15716 return -EIO; 15717 } 15718 15719 return 0; 15720} 15721 15722#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 15723 15724struct intel_display_error_state { 15725 15726 u32 power_well_driver; 15727 15728 int num_transcoders; 15729 15730 struct intel_cursor_error_state { 15731 u32 control; 15732 u32 position; 15733 u32 base; 15734 u32 size; 15735 } cursor[I915_MAX_PIPES]; 15736 15737 struct intel_pipe_error_state { 15738 bool power_domain_on; 15739 u32 source; 15740 u32 stat; 15741 } pipe[I915_MAX_PIPES]; 15742 15743 struct intel_plane_error_state { 15744 u32 control; 15745 u32 stride; 15746 u32 size; 15747 u32 pos; 15748 u32 addr; 15749 u32 surface; 15750 u32 tile_offset; 15751 } plane[I915_MAX_PIPES]; 15752 15753 struct intel_transcoder_error_state { 15754 bool power_domain_on; 15755 enum transcoder cpu_transcoder; 15756 15757 u32 conf; 15758 15759 u32 htotal; 15760 u32 hblank; 15761 u32 hsync; 15762 u32 vtotal; 15763 u32 vblank; 15764 u32 vsync; 15765 } transcoder[4]; 15766}; 15767 15768struct intel_display_error_state * 15769intel_display_capture_error_state(struct drm_i915_private *dev_priv) 15770{ 15771 struct intel_display_error_state *error; 15772 int transcoders[] = { 15773 TRANSCODER_A, 15774 TRANSCODER_B, 15775 TRANSCODER_C, 15776 TRANSCODER_EDP, 15777 }; 15778 int i; 15779 15780 if (INTEL_INFO(dev_priv)->num_pipes == 0) 15781 return NULL; 15782 15783 error = kzalloc(sizeof(*error), GFP_ATOMIC); 15784 if (error == NULL) 15785 return NULL; 15786 15787 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 15788 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 15789 15790 for_each_pipe(dev_priv, i) { 15791 error->pipe[i].power_domain_on = 15792 __intel_display_power_is_enabled(dev_priv, 15793 POWER_DOMAIN_PIPE(i)); 15794 if (!error->pipe[i].power_domain_on) 15795 continue; 15796 15797 error->cursor[i].control = I915_READ(CURCNTR(i)); 15798 error->cursor[i].position = I915_READ(CURPOS(i)); 15799 error->cursor[i].base = I915_READ(CURBASE(i)); 15800 15801 error->plane[i].control = I915_READ(DSPCNTR(i)); 15802 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 15803 if (INTEL_GEN(dev_priv) <= 3) { 15804 error->plane[i].size = I915_READ(DSPSIZE(i)); 15805 error->plane[i].pos = I915_READ(DSPPOS(i)); 15806 } 15807 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 15808 error->plane[i].addr = I915_READ(DSPADDR(i)); 15809 if (INTEL_GEN(dev_priv) >= 4) { 15810 error->plane[i].surface = I915_READ(DSPSURF(i)); 15811 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 15812 } 15813 15814 error->pipe[i].source = I915_READ(PIPESRC(i)); 15815 15816 if (HAS_GMCH_DISPLAY(dev_priv)) 15817 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 15818 } 15819 15820 /* Note: this does not include DSI transcoders. */ 15821 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes; 15822 if (HAS_DDI(dev_priv)) 15823 error->num_transcoders++; /* Account for eDP. */ 15824 15825 for (i = 0; i < error->num_transcoders; i++) { 15826 enum transcoder cpu_transcoder = transcoders[i]; 15827 15828 error->transcoder[i].power_domain_on = 15829 __intel_display_power_is_enabled(dev_priv, 15830 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 15831 if (!error->transcoder[i].power_domain_on) 15832 continue; 15833 15834 error->transcoder[i].cpu_transcoder = cpu_transcoder; 15835 15836 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 15837 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 15838 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 15839 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 15840 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 15841 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 15842 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 15843 } 15844 15845 return error; 15846} 15847 15848#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 15849 15850void 15851intel_display_print_error_state(struct drm_i915_error_state_buf *m, 15852 struct intel_display_error_state *error) 15853{ 15854 struct drm_i915_private *dev_priv = m->i915; 15855 int i; 15856 15857 if (!error) 15858 return; 15859 15860 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes); 15861 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 15862 err_printf(m, "PWR_WELL_CTL2: %08x\n", 15863 error->power_well_driver); 15864 for_each_pipe(dev_priv, i) { 15865 err_printf(m, "Pipe [%d]:\n", i); 15866 err_printf(m, " Power: %s\n", 15867 onoff(error->pipe[i].power_domain_on)); 15868 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 15869 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 15870 15871 err_printf(m, "Plane [%d]:\n", i); 15872 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 15873 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 15874 if (INTEL_GEN(dev_priv) <= 3) { 15875 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 15876 err_printf(m, " POS: %08x\n", error->plane[i].pos); 15877 } 15878 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 15879 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 15880 if (INTEL_GEN(dev_priv) >= 4) { 15881 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 15882 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 15883 } 15884 15885 err_printf(m, "Cursor [%d]:\n", i); 15886 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 15887 err_printf(m, " POS: %08x\n", error->cursor[i].position); 15888 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 15889 } 15890 15891 for (i = 0; i < error->num_transcoders; i++) { 15892 err_printf(m, "CPU transcoder: %s\n", 15893 transcoder_name(error->transcoder[i].cpu_transcoder)); 15894 err_printf(m, " Power: %s\n", 15895 onoff(error->transcoder[i].power_domain_on)); 15896 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 15897 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 15898 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 15899 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 15900 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 15901 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 15902 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 15903 } 15904} 15905 15906#endif