Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2021-01-04' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

- Display hotplug fix for gen2/gen3 (Chris)
- Remove trailing semicolon (Tom)
- Suppress display warnings for old ifwi presend on our CI (Chris)
- OA/Perf related workaround (Lionel)
- Replace I915_READ/WRITE per new uncore and display read/write functions (Jani)
- PSR improvements (Jose)
- HDR and other color changes on LSPCON (Uma, Ville)
- FBC fixes for TGL (Uma)
- Record plane update times for debugging (Chris)
- Refactor panel backlight control functions (Dave)
- Display power improvements (Imre)
- Add VRR register definition (Manasi)
- Atomic modeset improvements for bigjoiner pipes (Ville)
- Switch off the scanout during driver unregister (Chris)
- Clean-up DP's FEW enable (Manasi)
- Fix VDSCP slice count (Manasi)
- Fix and clean up around rc_model_size for DSC (Jani)
- Remove Type-C noisy debug warn message (Sean)
- Display HPD code clean-up (Ville)
- Refactor Intel Display (Dave)
- Start adding support for Intel's eDP backlight controls (Lyude)

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210104211018.GA1094707@intel.com

+3184 -3040
+28 -2
drivers/gpu/drm/drm_dsc.c
··· 50 50 EXPORT_SYMBOL(drm_dsc_dp_pps_header_init); 51 51 52 52 /** 53 + * drm_dsc_dp_rc_buffer_size - get rc buffer size in bytes 54 + * @rc_buffer_block_size: block size code, according to DPCD offset 62h 55 + * @rc_buffer_size: number of blocks - 1, according to DPCD offset 63h 56 + * 57 + * return: 58 + * buffer size in bytes, or 0 on invalid input 59 + */ 60 + int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size) 61 + { 62 + int size = 1024 * (rc_buffer_size + 1); 63 + 64 + switch (rc_buffer_block_size) { 65 + case DP_DSC_RC_BUF_BLK_SIZE_1: 66 + return 1 * size; 67 + case DP_DSC_RC_BUF_BLK_SIZE_4: 68 + return 4 * size; 69 + case DP_DSC_RC_BUF_BLK_SIZE_16: 70 + return 16 * size; 71 + case DP_DSC_RC_BUF_BLK_SIZE_64: 72 + return 64 * size; 73 + default: 74 + return 0; 75 + } 76 + } 77 + EXPORT_SYMBOL(drm_dsc_dp_rc_buffer_size); 78 + 79 + /** 53 80 * drm_dsc_pps_payload_pack() - Populates the DSC PPS 54 81 * 55 82 * @pps_payload: ··· 213 186 pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp; 214 187 215 188 /* PPS 38, 39 */ 216 - pps_payload->rc_model_size = 217 - cpu_to_be16(DSC_RC_MODEL_SIZE_CONST); 189 + pps_payload->rc_model_size = cpu_to_be16(dsc_cfg->rc_model_size); 218 190 219 191 /* PPS 40 */ 220 192 pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
+3 -1
drivers/gpu/drm/i915/Makefile
··· 197 197 display/intel_combo_phy.o \ 198 198 display/intel_connector.o \ 199 199 display/intel_csr.o \ 200 + display/intel_cursor.o \ 200 201 display/intel_display.o \ 201 202 display/intel_display_power.o \ 202 203 display/intel_dpio_phy.o \ ··· 215 214 display/intel_quirks.o \ 216 215 display/intel_sprite.o \ 217 216 display/intel_tc.o \ 218 - display/intel_vga.o 217 + display/intel_vga.o \ 218 + display/i9xx_plane.o 219 219 i915-$(CONFIG_ACPI) += \ 220 220 display/intel_acpi.o \ 221 221 display/intel_opregion.o
+704
drivers/gpu/drm/i915/display/i9xx_plane.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2020 Intel Corporation 4 + */ 5 + #include <linux/kernel.h> 6 + 7 + #include <drm/drm_atomic_helper.h> 8 + #include <drm/drm_fourcc.h> 9 + #include <drm/drm_plane_helper.h> 10 + 11 + #include "intel_atomic.h" 12 + #include "intel_atomic_plane.h" 13 + #include "intel_display_types.h" 14 + #include "intel_sprite.h" 15 + #include "i9xx_plane.h" 16 + 17 + /* Primary plane formats for gen <= 3 */ 18 + static const u32 i8xx_primary_formats[] = { 19 + DRM_FORMAT_C8, 20 + DRM_FORMAT_XRGB1555, 21 + DRM_FORMAT_RGB565, 22 + DRM_FORMAT_XRGB8888, 23 + }; 24 + 25 + /* Primary plane formats for ivb (no fp16 due to hw issue) */ 26 + static const u32 ivb_primary_formats[] = { 27 + DRM_FORMAT_C8, 28 + DRM_FORMAT_RGB565, 29 + DRM_FORMAT_XRGB8888, 30 + DRM_FORMAT_XBGR8888, 31 + DRM_FORMAT_XRGB2101010, 32 + DRM_FORMAT_XBGR2101010, 33 + }; 34 + 35 + /* Primary plane formats for gen >= 4, except ivb */ 36 + static const u32 i965_primary_formats[] = { 37 + DRM_FORMAT_C8, 38 + DRM_FORMAT_RGB565, 39 + DRM_FORMAT_XRGB8888, 40 + DRM_FORMAT_XBGR8888, 41 + DRM_FORMAT_XRGB2101010, 42 + DRM_FORMAT_XBGR2101010, 43 + DRM_FORMAT_XBGR16161616F, 44 + }; 45 + 46 + /* Primary plane formats for vlv/chv */ 47 + static const u32 vlv_primary_formats[] = { 48 + DRM_FORMAT_C8, 49 + DRM_FORMAT_RGB565, 50 + DRM_FORMAT_XRGB8888, 51 + DRM_FORMAT_XBGR8888, 52 + DRM_FORMAT_ARGB8888, 53 + DRM_FORMAT_ABGR8888, 54 + DRM_FORMAT_XRGB2101010, 55 + DRM_FORMAT_XBGR2101010, 56 + DRM_FORMAT_ARGB2101010, 57 + DRM_FORMAT_ABGR2101010, 58 + DRM_FORMAT_XBGR16161616F, 59 + }; 60 + 61 + static const u64 i9xx_format_modifiers[] = { 62 + I915_FORMAT_MOD_X_TILED, 63 + DRM_FORMAT_MOD_LINEAR, 64 + DRM_FORMAT_MOD_INVALID 65 + }; 66 + 67 + static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, 68 + u32 format, u64 modifier) 69 + { 70 + switch (modifier) { 71 + case DRM_FORMAT_MOD_LINEAR: 72 + case I915_FORMAT_MOD_X_TILED: 73 + break; 74 + default: 75 + return false; 76 + } 77 + 78 + switch (format) { 79 + case DRM_FORMAT_C8: 80 + case DRM_FORMAT_RGB565: 81 + case DRM_FORMAT_XRGB1555: 82 + case DRM_FORMAT_XRGB8888: 83 + return modifier == DRM_FORMAT_MOD_LINEAR || 84 + modifier == I915_FORMAT_MOD_X_TILED; 85 + default: 86 + return false; 87 + } 88 + } 89 + 90 + static bool i965_plane_format_mod_supported(struct drm_plane *_plane, 91 + u32 format, u64 modifier) 92 + { 93 + switch (modifier) { 94 + case DRM_FORMAT_MOD_LINEAR: 95 + case I915_FORMAT_MOD_X_TILED: 96 + break; 97 + default: 98 + return false; 99 + } 100 + 101 + switch (format) { 102 + case DRM_FORMAT_C8: 103 + case DRM_FORMAT_RGB565: 104 + case DRM_FORMAT_XRGB8888: 105 + case DRM_FORMAT_XBGR8888: 106 + case DRM_FORMAT_ARGB8888: 107 + case DRM_FORMAT_ABGR8888: 108 + case DRM_FORMAT_XRGB2101010: 109 + case DRM_FORMAT_XBGR2101010: 110 + case DRM_FORMAT_ARGB2101010: 111 + case DRM_FORMAT_ABGR2101010: 112 + case DRM_FORMAT_XBGR16161616F: 113 + return modifier == DRM_FORMAT_MOD_LINEAR || 114 + modifier == I915_FORMAT_MOD_X_TILED; 115 + default: 116 + return false; 117 + } 118 + } 119 + 120 + static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, 121 + enum i9xx_plane_id i9xx_plane) 122 + { 123 + if (!HAS_FBC(dev_priv)) 124 + return false; 125 + 126 + if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 127 + return i9xx_plane == PLANE_A; /* tied to pipe A */ 128 + else if (IS_IVYBRIDGE(dev_priv)) 129 + return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || 130 + i9xx_plane == PLANE_C; 131 + else if (INTEL_GEN(dev_priv) >= 4) 132 + return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; 133 + else 134 + return i9xx_plane == PLANE_A; 135 + } 136 + 137 + static bool i9xx_plane_has_windowing(struct intel_plane *plane) 138 + { 139 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 140 + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 141 + 142 + if (IS_CHERRYVIEW(dev_priv)) 143 + return i9xx_plane == PLANE_B; 144 + else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 145 + return false; 146 + else if (IS_GEN(dev_priv, 4)) 147 + return i9xx_plane == PLANE_C; 148 + else 149 + return i9xx_plane == PLANE_B || 150 + i9xx_plane == PLANE_C; 151 + } 152 + 153 + static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 154 + const struct intel_plane_state *plane_state) 155 + { 156 + struct drm_i915_private *dev_priv = 157 + to_i915(plane_state->uapi.plane->dev); 158 + const struct drm_framebuffer *fb = plane_state->hw.fb; 159 + unsigned int rotation = plane_state->hw.rotation; 160 + u32 dspcntr; 161 + 162 + dspcntr = DISPLAY_PLANE_ENABLE; 163 + 164 + if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || 165 + IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 166 + dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 167 + 168 + switch (fb->format->format) { 169 + case DRM_FORMAT_C8: 170 + dspcntr |= DISPPLANE_8BPP; 171 + break; 172 + case DRM_FORMAT_XRGB1555: 173 + dspcntr |= DISPPLANE_BGRX555; 174 + break; 175 + case DRM_FORMAT_ARGB1555: 176 + dspcntr |= DISPPLANE_BGRA555; 177 + break; 178 + case DRM_FORMAT_RGB565: 179 + dspcntr |= DISPPLANE_BGRX565; 180 + break; 181 + case DRM_FORMAT_XRGB8888: 182 + dspcntr |= DISPPLANE_BGRX888; 183 + break; 184 + case DRM_FORMAT_XBGR8888: 185 + dspcntr |= DISPPLANE_RGBX888; 186 + break; 187 + case DRM_FORMAT_ARGB8888: 188 + dspcntr |= DISPPLANE_BGRA888; 189 + break; 190 + case DRM_FORMAT_ABGR8888: 191 + dspcntr |= DISPPLANE_RGBA888; 192 + break; 193 + case DRM_FORMAT_XRGB2101010: 194 + dspcntr |= DISPPLANE_BGRX101010; 195 + break; 196 + case DRM_FORMAT_XBGR2101010: 197 + dspcntr |= DISPPLANE_RGBX101010; 198 + break; 199 + case DRM_FORMAT_ARGB2101010: 200 + dspcntr |= DISPPLANE_BGRA101010; 201 + break; 202 + case DRM_FORMAT_ABGR2101010: 203 + dspcntr |= DISPPLANE_RGBA101010; 204 + break; 205 + case DRM_FORMAT_XBGR16161616F: 206 + dspcntr |= DISPPLANE_RGBX161616; 207 + break; 208 + default: 209 + MISSING_CASE(fb->format->format); 210 + return 0; 211 + } 212 + 213 + if (INTEL_GEN(dev_priv) >= 4 && 214 + fb->modifier == I915_FORMAT_MOD_X_TILED) 215 + dspcntr |= DISPPLANE_TILED; 216 + 217 + if (rotation & DRM_MODE_ROTATE_180) 218 + dspcntr |= DISPPLANE_ROTATE_180; 219 + 220 + if (rotation & DRM_MODE_REFLECT_X) 221 + dspcntr |= DISPPLANE_MIRROR; 222 + 223 + return dspcntr; 224 + } 225 + 226 + int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 227 + { 228 + struct drm_i915_private *dev_priv = 229 + to_i915(plane_state->uapi.plane->dev); 230 + const struct drm_framebuffer *fb = plane_state->hw.fb; 231 + int src_x, src_y, src_w; 232 + u32 offset; 233 + int ret; 234 + 235 + ret = intel_plane_compute_gtt(plane_state); 236 + if (ret) 237 + return ret; 238 + 239 + if (!plane_state->uapi.visible) 240 + return 0; 241 + 242 + src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 243 + src_x = plane_state->uapi.src.x1 >> 16; 244 + src_y = plane_state->uapi.src.y1 >> 16; 245 + 246 + /* Undocumented hardware limit on i965/g4x/vlv/chv */ 247 + if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048) 248 + return -EINVAL; 249 + 250 + intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 251 + 252 + if (INTEL_GEN(dev_priv) >= 4) 253 + offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 254 + plane_state, 0); 255 + else 256 + offset = 0; 257 + 258 + /* 259 + * Put the final coordinates back so that the src 260 + * coordinate checks will see the right values. 261 + */ 262 + drm_rect_translate_to(&plane_state->uapi.src, 263 + src_x << 16, src_y << 16); 264 + 265 + /* HSW/BDW do this automagically in hardware */ 266 + if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 267 + unsigned int rotation = plane_state->hw.rotation; 268 + int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 269 + int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 270 + 271 + if (rotation & DRM_MODE_ROTATE_180) { 272 + src_x += src_w - 1; 273 + src_y += src_h - 1; 274 + } else if (rotation & DRM_MODE_REFLECT_X) { 275 + src_x += src_w - 1; 276 + } 277 + } 278 + 279 + plane_state->color_plane[0].offset = offset; 280 + plane_state->color_plane[0].x = src_x; 281 + plane_state->color_plane[0].y = src_y; 282 + 283 + return 0; 284 + } 285 + 286 + static int 287 + i9xx_plane_check(struct intel_crtc_state *crtc_state, 288 + struct intel_plane_state *plane_state) 289 + { 290 + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 291 + int ret; 292 + 293 + ret = chv_plane_check_rotation(plane_state); 294 + if (ret) 295 + return ret; 296 + 297 + ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, 298 + DRM_PLANE_HELPER_NO_SCALING, 299 + DRM_PLANE_HELPER_NO_SCALING, 300 + i9xx_plane_has_windowing(plane)); 301 + if (ret) 302 + return ret; 303 + 304 + ret = i9xx_check_plane_surface(plane_state); 305 + if (ret) 306 + return ret; 307 + 308 + if (!plane_state->uapi.visible) 309 + return 0; 310 + 311 + ret = intel_plane_check_src_coordinates(plane_state); 312 + if (ret) 313 + return ret; 314 + 315 + plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); 316 + 317 + return 0; 318 + } 319 + 320 + static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 321 + { 322 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 323 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 324 + u32 dspcntr = 0; 325 + 326 + if (crtc_state->gamma_enable) 327 + dspcntr |= DISPPLANE_GAMMA_ENABLE; 328 + 329 + if (crtc_state->csc_enable) 330 + dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 331 + 332 + if (INTEL_GEN(dev_priv) < 5) 333 + dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 334 + 335 + return dspcntr; 336 + } 337 + 338 + static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state, 339 + const struct intel_plane_state *plane_state, 340 + unsigned int *num, unsigned int *den) 341 + { 342 + const struct drm_framebuffer *fb = plane_state->hw.fb; 343 + unsigned int cpp = fb->format->cpp[0]; 344 + 345 + /* 346 + * g4x bspec says 64bpp pixel rate can't exceed 80% 347 + * of cdclk when the sprite plane is enabled on the 348 + * same pipe. ilk/snb bspec says 64bpp pixel rate is 349 + * never allowed to exceed 80% of cdclk. Let's just go 350 + * with the ilk/snb limit always. 351 + */ 352 + if (cpp == 8) { 353 + *num = 10; 354 + *den = 8; 355 + } else { 356 + *num = 1; 357 + *den = 1; 358 + } 359 + } 360 + 361 + static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, 362 + const struct intel_plane_state *plane_state) 363 + { 364 + unsigned int pixel_rate; 365 + unsigned int num, den; 366 + 367 + /* 368 + * Note that crtc_state->pixel_rate accounts for both 369 + * horizontal and vertical panel fitter downscaling factors. 370 + * Pre-HSW bspec tells us to only consider the horizontal 371 + * downscaling factor here. We ignore that and just consider 372 + * both for simplicity. 373 + */ 374 + pixel_rate = crtc_state->pixel_rate; 375 + 376 + i9xx_plane_ratio(crtc_state, plane_state, &num, &den); 377 + 378 + /* two pixels per clock with double wide pipe */ 379 + if (crtc_state->double_wide) 380 + den *= 2; 381 + 382 + return DIV_ROUND_UP(pixel_rate * num, den); 383 + } 384 + 385 + static void i9xx_update_plane(struct intel_plane *plane, 386 + const struct intel_crtc_state *crtc_state, 387 + const struct intel_plane_state *plane_state) 388 + { 389 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 390 + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 391 + u32 linear_offset; 392 + int x = plane_state->color_plane[0].x; 393 + int y = plane_state->color_plane[0].y; 394 + int crtc_x = plane_state->uapi.dst.x1; 395 + int crtc_y = plane_state->uapi.dst.y1; 396 + int crtc_w = drm_rect_width(&plane_state->uapi.dst); 397 + int crtc_h = drm_rect_height(&plane_state->uapi.dst); 398 + unsigned long irqflags; 399 + u32 dspaddr_offset; 400 + u32 dspcntr; 401 + 402 + dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); 403 + 404 + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 405 + 406 + if (INTEL_GEN(dev_priv) >= 4) 407 + dspaddr_offset = plane_state->color_plane[0].offset; 408 + else 409 + dspaddr_offset = linear_offset; 410 + 411 + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 412 + 413 + intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), 414 + plane_state->color_plane[0].stride); 415 + 416 + if (INTEL_GEN(dev_priv) < 4) { 417 + /* 418 + * PLANE_A doesn't actually have a full window 419 + * generator but let's assume we still need to 420 + * program whatever is there. 421 + */ 422 + intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane), 423 + (crtc_y << 16) | crtc_x); 424 + intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), 425 + ((crtc_h - 1) << 16) | (crtc_w - 1)); 426 + } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { 427 + intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), 428 + (crtc_y << 16) | crtc_x); 429 + intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), 430 + ((crtc_h - 1) << 16) | (crtc_w - 1)); 431 + intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0); 432 + } 433 + 434 + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 435 + intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane), 436 + (y << 16) | x); 437 + } else if (INTEL_GEN(dev_priv) >= 4) { 438 + intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane), 439 + linear_offset); 440 + intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane), 441 + (y << 16) | x); 442 + } 443 + 444 + /* 445 + * The control register self-arms if the plane was previously 446 + * disabled. Try to make the plane enable atomic by writing 447 + * the control register just before the surface register. 448 + */ 449 + intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); 450 + if (INTEL_GEN(dev_priv) >= 4) 451 + intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 452 + intel_plane_ggtt_offset(plane_state) + dspaddr_offset); 453 + else 454 + intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 455 + intel_plane_ggtt_offset(plane_state) + dspaddr_offset); 456 + 457 + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 458 + } 459 + 460 + static void i9xx_disable_plane(struct intel_plane *plane, 461 + const struct intel_crtc_state *crtc_state) 462 + { 463 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 464 + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 465 + unsigned long irqflags; 466 + u32 dspcntr; 467 + 468 + /* 469 + * DSPCNTR pipe gamma enable on g4x+ and pipe csc 470 + * enable on ilk+ affect the pipe bottom color as 471 + * well, so we must configure them even if the plane 472 + * is disabled. 473 + * 474 + * On pre-g4x there is no way to gamma correct the 475 + * pipe bottom color but we'll keep on doing this 476 + * anyway so that the crtc state readout works correctly. 477 + */ 478 + dspcntr = i9xx_plane_ctl_crtc(crtc_state); 479 + 480 + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 481 + 482 + intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); 483 + if (INTEL_GEN(dev_priv) >= 4) 484 + intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0); 485 + else 486 + intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0); 487 + 488 + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 489 + } 490 + 491 + static bool i9xx_plane_get_hw_state(struct intel_plane *plane, 492 + enum pipe *pipe) 493 + { 494 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 495 + enum intel_display_power_domain power_domain; 496 + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 497 + intel_wakeref_t wakeref; 498 + bool ret; 499 + u32 val; 500 + 501 + /* 502 + * Not 100% correct for planes that can move between pipes, 503 + * but that's only the case for gen2-4 which don't have any 504 + * display power wells. 505 + */ 506 + power_domain = POWER_DOMAIN_PIPE(plane->pipe); 507 + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 508 + if (!wakeref) 509 + return false; 510 + 511 + val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 512 + 513 + ret = val & DISPLAY_PLANE_ENABLE; 514 + 515 + if (INTEL_GEN(dev_priv) >= 5) 516 + *pipe = plane->pipe; 517 + else 518 + *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 519 + DISPPLANE_SEL_PIPE_SHIFT; 520 + 521 + intel_display_power_put(dev_priv, power_domain, wakeref); 522 + 523 + return ret; 524 + } 525 + 526 + unsigned int 527 + i9xx_plane_max_stride(struct intel_plane *plane, 528 + u32 pixel_format, u64 modifier, 529 + unsigned int rotation) 530 + { 531 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 532 + 533 + if (!HAS_GMCH(dev_priv)) { 534 + return 32*1024; 535 + } else if (INTEL_GEN(dev_priv) >= 4) { 536 + if (modifier == I915_FORMAT_MOD_X_TILED) 537 + return 16*1024; 538 + else 539 + return 32*1024; 540 + } else if (INTEL_GEN(dev_priv) >= 3) { 541 + if (modifier == I915_FORMAT_MOD_X_TILED) 542 + return 8*1024; 543 + else 544 + return 16*1024; 545 + } else { 546 + if (plane->i9xx_plane == PLANE_C) 547 + return 4*1024; 548 + else 549 + return 8*1024; 550 + } 551 + } 552 + 553 + static const struct drm_plane_funcs i965_plane_funcs = { 554 + .update_plane = drm_atomic_helper_update_plane, 555 + .disable_plane = drm_atomic_helper_disable_plane, 556 + .destroy = intel_plane_destroy, 557 + .atomic_duplicate_state = intel_plane_duplicate_state, 558 + .atomic_destroy_state = intel_plane_destroy_state, 559 + .format_mod_supported = i965_plane_format_mod_supported, 560 + }; 561 + 562 + static const struct drm_plane_funcs i8xx_plane_funcs = { 563 + .update_plane = drm_atomic_helper_update_plane, 564 + .disable_plane = drm_atomic_helper_disable_plane, 565 + .destroy = intel_plane_destroy, 566 + .atomic_duplicate_state = intel_plane_duplicate_state, 567 + .atomic_destroy_state = intel_plane_destroy_state, 568 + .format_mod_supported = i8xx_plane_format_mod_supported, 569 + }; 570 + 571 + struct intel_plane * 572 + intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 573 + { 574 + struct intel_plane *plane; 575 + const struct drm_plane_funcs *plane_funcs; 576 + unsigned int supported_rotations; 577 + const u32 *formats; 578 + int num_formats; 579 + int ret, zpos; 580 + 581 + if (INTEL_GEN(dev_priv) >= 9) 582 + return skl_universal_plane_create(dev_priv, pipe, 583 + PLANE_PRIMARY); 584 + 585 + plane = intel_plane_alloc(); 586 + if (IS_ERR(plane)) 587 + return plane; 588 + 589 + plane->pipe = pipe; 590 + /* 591 + * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 592 + * port is hooked to pipe B. Hence we want plane A feeding pipe B. 593 + */ 594 + if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 && 595 + INTEL_NUM_PIPES(dev_priv) == 2) 596 + plane->i9xx_plane = (enum i9xx_plane_id) !pipe; 597 + else 598 + plane->i9xx_plane = (enum i9xx_plane_id) pipe; 599 + plane->id = PLANE_PRIMARY; 600 + plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); 601 + 602 + plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); 603 + if (plane->has_fbc) { 604 + struct intel_fbc *fbc = &dev_priv->fbc; 605 + 606 + fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 607 + } 608 + 609 + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 610 + formats = vlv_primary_formats; 611 + num_formats = ARRAY_SIZE(vlv_primary_formats); 612 + } else if (INTEL_GEN(dev_priv) >= 4) { 613 + /* 614 + * WaFP16GammaEnabling:ivb 615 + * "Workaround : When using the 64-bit format, the plane 616 + * output on each color channel has one quarter amplitude. 617 + * It can be brought up to full amplitude by using pipe 618 + * gamma correction or pipe color space conversion to 619 + * multiply the plane output by four." 620 + * 621 + * There is no dedicated plane gamma for the primary plane, 622 + * and using the pipe gamma/csc could conflict with other 623 + * planes, so we choose not to expose fp16 on IVB primary 624 + * planes. HSW primary planes no longer have this problem. 625 + */ 626 + if (IS_IVYBRIDGE(dev_priv)) { 627 + formats = ivb_primary_formats; 628 + num_formats = ARRAY_SIZE(ivb_primary_formats); 629 + } else { 630 + formats = i965_primary_formats; 631 + num_formats = ARRAY_SIZE(i965_primary_formats); 632 + } 633 + } else { 634 + formats = i8xx_primary_formats; 635 + num_formats = ARRAY_SIZE(i8xx_primary_formats); 636 + } 637 + 638 + if (INTEL_GEN(dev_priv) >= 4) 639 + plane_funcs = &i965_plane_funcs; 640 + else 641 + plane_funcs = &i8xx_plane_funcs; 642 + 643 + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 644 + plane->min_cdclk = vlv_plane_min_cdclk; 645 + else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 646 + plane->min_cdclk = hsw_plane_min_cdclk; 647 + else if (IS_IVYBRIDGE(dev_priv)) 648 + plane->min_cdclk = ivb_plane_min_cdclk; 649 + else 650 + plane->min_cdclk = i9xx_plane_min_cdclk; 651 + 652 + plane->max_stride = i9xx_plane_max_stride; 653 + plane->update_plane = i9xx_update_plane; 654 + plane->disable_plane = i9xx_disable_plane; 655 + plane->get_hw_state = i9xx_plane_get_hw_state; 656 + plane->check_plane = i9xx_plane_check; 657 + 658 + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 659 + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 660 + 0, plane_funcs, 661 + formats, num_formats, 662 + i9xx_format_modifiers, 663 + DRM_PLANE_TYPE_PRIMARY, 664 + "primary %c", pipe_name(pipe)); 665 + else 666 + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 667 + 0, plane_funcs, 668 + formats, num_formats, 669 + i9xx_format_modifiers, 670 + DRM_PLANE_TYPE_PRIMARY, 671 + "plane %c", 672 + plane_name(plane->i9xx_plane)); 673 + if (ret) 674 + goto fail; 675 + 676 + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 677 + supported_rotations = 678 + DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 679 + DRM_MODE_REFLECT_X; 680 + } else if (INTEL_GEN(dev_priv) >= 4) { 681 + supported_rotations = 682 + DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 683 + } else { 684 + supported_rotations = DRM_MODE_ROTATE_0; 685 + } 686 + 687 + if (INTEL_GEN(dev_priv) >= 4) 688 + drm_plane_create_rotation_property(&plane->base, 689 + DRM_MODE_ROTATE_0, 690 + supported_rotations); 691 + 692 + zpos = 0; 693 + drm_plane_create_zpos_immutable_property(&plane->base, zpos); 694 + 695 + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); 696 + 697 + return plane; 698 + 699 + fail: 700 + intel_plane_free(plane); 701 + 702 + return ERR_PTR(ret); 703 + } 704 +
+24
drivers/gpu/drm/i915/display/i9xx_plane.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2020 Intel Corporation 4 + */ 5 + 6 + #ifndef _I9XX_PLANE_H_ 7 + #define _I9XX_PLANE_H_ 8 + 9 + #include <linux/types.h> 10 + 11 + enum pipe; 12 + struct drm_i915_private; 13 + struct intel_plane; 14 + struct intel_plane_state; 15 + 16 + unsigned int i9xx_plane_max_stride(struct intel_plane *plane, 17 + u32 pixel_format, u64 modifier, 18 + unsigned int rotation); 19 + int i9xx_check_plane_surface(struct intel_plane_state *plane_state); 20 + 21 + struct intel_plane * 22 + intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe); 23 + 24 + #endif
+3 -4
drivers/gpu/drm/i915/display/icl_dsi.c
··· 1535 1535 1536 1536 vdsc_cfg->convert_rgb = true; 1537 1537 1538 + /* FIXME: initialize from VBT */ 1539 + vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1540 + 1538 1541 ret = intel_dsc_compute_params(encoder, crtc_state); 1539 1542 if (ret) 1540 1543 return ret; ··· 1619 1616 1620 1617 get_dsi_io_power_domains(i915, 1621 1618 enc_to_intel_dsi(encoder)); 1622 - 1623 - if (crtc_state->dsc.compression_enable) 1624 - intel_display_power_get(i915, 1625 - intel_dsc_power_domain(crtc_state)); 1626 1619 } 1627 1620 1628 1621 static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
+3
drivers/gpu/drm/i915/display/intel_atomic_plane.c
··· 312 312 int ret; 313 313 314 314 intel_plane_set_invisible(new_crtc_state, new_plane_state); 315 + new_crtc_state->enabled_planes &= ~BIT(plane->id); 315 316 316 317 if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc) 317 318 return 0; 319 + 320 + new_crtc_state->enabled_planes |= BIT(plane->id); 318 321 319 322 ret = plane->check_plane(new_crtc_state, new_plane_state); 320 323 if (ret)
+3 -8
drivers/gpu/drm/i915/display/intel_bios.c
··· 2555 2555 crtc_state->dsc.slice_count); 2556 2556 2557 2557 /* 2558 - * FIXME: Use VBT rc_buffer_block_size and rc_buffer_size for the 2559 - * implementation specific physical rate buffer size. Currently we use 2560 - * the required rate buffer model size calculated in 2561 - * drm_dsc_compute_rc_parameters() according to VESA DSC Annex E. 2562 - * 2563 2558 * The VBT rc_buffer_block_size and rc_buffer_size definitions 2564 - * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. The DP DSC 2565 - * implementation should also use the DPCD (or perhaps VBT for eDP) 2566 - * provided value for the buffer size. 2559 + * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. 2567 2560 */ 2561 + vdsc_cfg->rc_model_size = drm_dsc_dp_rc_buffer_size(dsc->rc_buffer_block_size, 2562 + dsc->rc_buffer_size); 2568 2563 2569 2564 /* FIXME: DSI spec says bpc + 1 for this one */ 2570 2565 vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth);
+3 -4
drivers/gpu/drm/i915/display/intel_cdclk.c
··· 2415 2415 if (ret) 2416 2416 return ret; 2417 2417 2418 - ret = drm_atomic_add_affected_planes(&state->base, 2419 - &crtc->base); 2418 + ret = intel_atomic_add_affected_planes(state, crtc); 2420 2419 if (ret) 2421 2420 return ret; 2422 2421 ··· 2709 2710 * DG1 always uses a 38.4 MHz rawclk. The bspec tells us 2710 2711 * "Program Numerator=2, Denominator=4, Divider=37 decimal." 2711 2712 */ 2712 - I915_WRITE(PCH_RAWCLK_FREQ, 2713 - CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2)); 2713 + intel_de_write(dev_priv, PCH_RAWCLK_FREQ, 2714 + CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2)); 2714 2715 2715 2716 return 38400; 2716 2717 }
+16 -4
drivers/gpu/drm/i915/display/intel_combo_phy.c
··· 427 427 u32 val; 428 428 429 429 if (phy == PHY_A && 430 - !icl_combo_phy_verify_state(dev_priv, phy)) 431 - drm_warn(&dev_priv->drm, 432 - "Combo PHY %c HW state changed unexpectedly\n", 433 - phy_name(phy)); 430 + !icl_combo_phy_verify_state(dev_priv, phy)) { 431 + if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) { 432 + /* 433 + * A known problem with old ifwi: 434 + * https://gitlab.freedesktop.org/drm/intel/-/issues/2411 435 + * Suppress the warning for CI. Remove ASAP! 436 + */ 437 + drm_dbg_kms(&dev_priv->drm, 438 + "Combo PHY %c HW state changed unexpectedly\n", 439 + phy_name(phy)); 440 + } else { 441 + drm_warn(&dev_priv->drm, 442 + "Combo PHY %c HW state changed unexpectedly\n", 443 + phy_name(phy)); 444 + } 445 + } 434 446 435 447 if (!has_phy_misc(dev_priv, phy)) 436 448 goto skip_phy_misc;
+11 -18
drivers/gpu/drm/i915/display/intel_connector.c
··· 279 279 } 280 280 281 281 void 282 - intel_attach_colorspace_property(struct drm_connector *connector) 282 + intel_attach_hdmi_colorspace_property(struct drm_connector *connector) 283 283 { 284 - switch (connector->connector_type) { 285 - case DRM_MODE_CONNECTOR_HDMIA: 286 - case DRM_MODE_CONNECTOR_HDMIB: 287 - if (drm_mode_create_hdmi_colorspace_property(connector)) 288 - return; 289 - break; 290 - case DRM_MODE_CONNECTOR_DisplayPort: 291 - case DRM_MODE_CONNECTOR_eDP: 292 - if (drm_mode_create_dp_colorspace_property(connector)) 293 - return; 294 - break; 295 - default: 296 - MISSING_CASE(connector->connector_type); 297 - return; 298 - } 284 + if (!drm_mode_create_hdmi_colorspace_property(connector)) 285 + drm_object_attach_property(&connector->base, 286 + connector->colorspace_property, 0); 287 + } 299 288 300 - drm_object_attach_property(&connector->base, 301 - connector->colorspace_property, 0); 289 + void 290 + intel_attach_dp_colorspace_property(struct drm_connector *connector) 291 + { 292 + if (!drm_mode_create_dp_colorspace_property(connector)) 293 + drm_object_attach_property(&connector->base, 294 + connector->colorspace_property, 0); 302 295 }
+2 -1
drivers/gpu/drm/i915/display/intel_connector.h
··· 30 30 void intel_attach_force_audio_property(struct drm_connector *connector); 31 31 void intel_attach_broadcast_rgb_property(struct drm_connector *connector); 32 32 void intel_attach_aspect_ratio_property(struct drm_connector *connector); 33 - void intel_attach_colorspace_property(struct drm_connector *connector); 33 + void intel_attach_hdmi_colorspace_property(struct drm_connector *connector); 34 + void intel_attach_dp_colorspace_property(struct drm_connector *connector); 34 35 35 36 #endif /* __INTEL_CONNECTOR_H__ */
+806
drivers/gpu/drm/i915/display/intel_cursor.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2020 Intel Corporation 4 + */ 5 + #include <linux/kernel.h> 6 + 7 + #include <drm/drm_atomic_helper.h> 8 + #include <drm/drm_atomic_uapi.h> 9 + #include <drm/drm_damage_helper.h> 10 + #include <drm/drm_plane_helper.h> 11 + #include <drm/drm_fourcc.h> 12 + 13 + #include "intel_atomic.h" 14 + #include "intel_atomic_plane.h" 15 + #include "intel_cursor.h" 16 + #include "intel_display_types.h" 17 + #include "intel_display.h" 18 + 19 + #include "intel_frontbuffer.h" 20 + #include "intel_pm.h" 21 + #include "intel_psr.h" 22 + #include "intel_sprite.h" 23 + 24 + /* Cursor formats */ 25 + static const u32 intel_cursor_formats[] = { 26 + DRM_FORMAT_ARGB8888, 27 + }; 28 + 29 + static const u64 cursor_format_modifiers[] = { 30 + DRM_FORMAT_MOD_LINEAR, 31 + DRM_FORMAT_MOD_INVALID 32 + }; 33 + 34 + static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 35 + { 36 + struct drm_i915_private *dev_priv = 37 + to_i915(plane_state->uapi.plane->dev); 38 + const struct drm_framebuffer *fb = plane_state->hw.fb; 39 + const struct drm_i915_gem_object *obj = intel_fb_obj(fb); 40 + u32 base; 41 + 42 + if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) 43 + base = sg_dma_address(obj->mm.pages->sgl); 44 + else 45 + base = intel_plane_ggtt_offset(plane_state); 46 + 47 + return base + plane_state->color_plane[0].offset; 48 + } 49 + 50 + static u32 intel_cursor_position(const struct intel_plane_state *plane_state) 51 + { 52 + int x = plane_state->uapi.dst.x1; 53 + int y = plane_state->uapi.dst.y1; 54 + u32 pos = 0; 55 + 56 + if (x < 0) { 57 + pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 58 + x = -x; 59 + } 60 + pos |= x << CURSOR_X_SHIFT; 61 + 62 + if (y < 0) { 63 + pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 64 + y = -y; 65 + } 66 + pos |= y << CURSOR_Y_SHIFT; 67 + 68 + return pos; 69 + } 70 + 71 + static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) 72 + { 73 + const struct drm_mode_config *config = 74 + &plane_state->uapi.plane->dev->mode_config; 75 + int width = drm_rect_width(&plane_state->uapi.dst); 76 + int height = drm_rect_height(&plane_state->uapi.dst); 77 + 78 + return width > 0 && width <= config->cursor_width && 79 + height > 0 && height <= config->cursor_height; 80 + } 81 + 82 + static int intel_cursor_check_surface(struct intel_plane_state *plane_state) 83 + { 84 + struct drm_i915_private *dev_priv = 85 + to_i915(plane_state->uapi.plane->dev); 86 + unsigned int rotation = plane_state->hw.rotation; 87 + int src_x, src_y; 88 + u32 offset; 89 + int ret; 90 + 91 + ret = intel_plane_compute_gtt(plane_state); 92 + if (ret) 93 + return ret; 94 + 95 + if (!plane_state->uapi.visible) 96 + return 0; 97 + 98 + src_x = plane_state->uapi.src.x1 >> 16; 99 + src_y = plane_state->uapi.src.y1 >> 16; 100 + 101 + intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 102 + offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 103 + plane_state, 0); 104 + 105 + if (src_x != 0 || src_y != 0) { 106 + drm_dbg_kms(&dev_priv->drm, 107 + "Arbitrary cursor panning not supported\n"); 108 + return -EINVAL; 109 + } 110 + 111 + /* 112 + * Put the final coordinates back so that the src 113 + * coordinate checks will see the right values. 114 + */ 115 + drm_rect_translate_to(&plane_state->uapi.src, 116 + src_x << 16, src_y << 16); 117 + 118 + /* ILK+ do this automagically in hardware */ 119 + if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) { 120 + const struct drm_framebuffer *fb = plane_state->hw.fb; 121 + int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 122 + int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 123 + 124 + offset += (src_h * src_w - 1) * fb->format->cpp[0]; 125 + } 126 + 127 + plane_state->color_plane[0].offset = offset; 128 + plane_state->color_plane[0].x = src_x; 129 + plane_state->color_plane[0].y = src_y; 130 + 131 + return 0; 132 + } 133 + 134 + static int intel_check_cursor(struct intel_crtc_state *crtc_state, 135 + struct intel_plane_state *plane_state) 136 + { 137 + const struct drm_framebuffer *fb = plane_state->hw.fb; 138 + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 139 + const struct drm_rect src = plane_state->uapi.src; 140 + const struct drm_rect dst = plane_state->uapi.dst; 141 + int ret; 142 + 143 + if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { 144 + drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n"); 145 + return -EINVAL; 146 + } 147 + 148 + ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, 149 + DRM_PLANE_HELPER_NO_SCALING, 150 + DRM_PLANE_HELPER_NO_SCALING, 151 + true); 152 + if (ret) 153 + return ret; 154 + 155 + /* Use the unclipped src/dst rectangles, which we program to hw */ 156 + plane_state->uapi.src = src; 157 + plane_state->uapi.dst = dst; 158 + 159 + ret = intel_cursor_check_surface(plane_state); 160 + if (ret) 161 + return ret; 162 + 163 + if (!plane_state->uapi.visible) 164 + return 0; 165 + 166 + ret = intel_plane_check_src_coordinates(plane_state); 167 + if (ret) 168 + return ret; 169 + 170 + return 0; 171 + } 172 + 173 + static unsigned int 174 + i845_cursor_max_stride(struct intel_plane *plane, 175 + u32 pixel_format, u64 modifier, 176 + unsigned int rotation) 177 + { 178 + return 2048; 179 + } 180 + 181 + static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 182 + { 183 + u32 cntl = 0; 184 + 185 + if (crtc_state->gamma_enable) 186 + cntl |= CURSOR_GAMMA_ENABLE; 187 + 188 + return cntl; 189 + } 190 + 191 + static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 192 + const struct intel_plane_state *plane_state) 193 + { 194 + return CURSOR_ENABLE | 195 + CURSOR_FORMAT_ARGB | 196 + CURSOR_STRIDE(plane_state->color_plane[0].stride); 197 + } 198 + 199 + static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 200 + { 201 + int width = drm_rect_width(&plane_state->uapi.dst); 202 + 203 + /* 204 + * 845g/865g are only limited by the width of their cursors, 205 + * the height is arbitrary up to the precision of the register. 206 + */ 207 + return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); 208 + } 209 + 210 + static int i845_check_cursor(struct intel_crtc_state *crtc_state, 211 + struct intel_plane_state *plane_state) 212 + { 213 + const struct drm_framebuffer *fb = plane_state->hw.fb; 214 + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 215 + int ret; 216 + 217 + ret = intel_check_cursor(crtc_state, plane_state); 218 + if (ret) 219 + return ret; 220 + 221 + /* if we want to turn off the cursor ignore width and height */ 222 + if (!fb) 223 + return 0; 224 + 225 + /* Check for which cursor types we support */ 226 + if (!i845_cursor_size_ok(plane_state)) { 227 + drm_dbg_kms(&i915->drm, 228 + "Cursor dimension %dx%d not supported\n", 229 + drm_rect_width(&plane_state->uapi.dst), 230 + drm_rect_height(&plane_state->uapi.dst)); 231 + return -EINVAL; 232 + } 233 + 234 + drm_WARN_ON(&i915->drm, plane_state->uapi.visible && 235 + plane_state->color_plane[0].stride != fb->pitches[0]); 236 + 237 + switch (fb->pitches[0]) { 238 + case 256: 239 + case 512: 240 + case 1024: 241 + case 2048: 242 + break; 243 + default: 244 + drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n", 245 + fb->pitches[0]); 246 + return -EINVAL; 247 + } 248 + 249 + plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); 250 + 251 + return 0; 252 + } 253 + 254 + static void i845_update_cursor(struct intel_plane *plane, 255 + const struct intel_crtc_state *crtc_state, 256 + const struct intel_plane_state *plane_state) 257 + { 258 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 259 + u32 cntl = 0, base = 0, pos = 0, size = 0; 260 + unsigned long irqflags; 261 + 262 + if (plane_state && plane_state->uapi.visible) { 263 + unsigned int width = drm_rect_width(&plane_state->uapi.dst); 264 + unsigned int height = drm_rect_height(&plane_state->uapi.dst); 265 + 266 + cntl = plane_state->ctl | 267 + i845_cursor_ctl_crtc(crtc_state); 268 + 269 + size = (height << 12) | width; 270 + 271 + base = intel_cursor_base(plane_state); 272 + pos = intel_cursor_position(plane_state); 273 + } 274 + 275 + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 276 + 277 + /* On these chipsets we can only modify the base/size/stride 278 + * whilst the cursor is disabled. 279 + */ 280 + if (plane->cursor.base != base || 281 + plane->cursor.size != size || 282 + plane->cursor.cntl != cntl) { 283 + intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0); 284 + intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base); 285 + intel_de_write_fw(dev_priv, CURSIZE, size); 286 + intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); 287 + intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl); 288 + 289 + plane->cursor.base = base; 290 + plane->cursor.size = size; 291 + plane->cursor.cntl = cntl; 292 + } else { 293 + intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); 294 + } 295 + 296 + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 297 + } 298 + 299 + static void i845_disable_cursor(struct intel_plane *plane, 300 + const struct intel_crtc_state *crtc_state) 301 + { 302 + i845_update_cursor(plane, crtc_state, NULL); 303 + } 304 + 305 + static bool i845_cursor_get_hw_state(struct intel_plane *plane, 306 + enum pipe *pipe) 307 + { 308 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 309 + enum intel_display_power_domain power_domain; 310 + intel_wakeref_t wakeref; 311 + bool ret; 312 + 313 + power_domain = POWER_DOMAIN_PIPE(PIPE_A); 314 + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 315 + if (!wakeref) 316 + return false; 317 + 318 + ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE; 319 + 320 + *pipe = PIPE_A; 321 + 322 + intel_display_power_put(dev_priv, power_domain, wakeref); 323 + 324 + return ret; 325 + } 326 + 327 + static unsigned int 328 + i9xx_cursor_max_stride(struct intel_plane *plane, 329 + u32 pixel_format, u64 modifier, 330 + unsigned int rotation) 331 + { 332 + return plane->base.dev->mode_config.cursor_width * 4; 333 + } 334 + 335 + static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 336 + { 337 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 338 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 339 + u32 cntl = 0; 340 + 341 + if (INTEL_GEN(dev_priv) >= 11) 342 + return cntl; 343 + 344 + if (crtc_state->gamma_enable) 345 + cntl = MCURSOR_GAMMA_ENABLE; 346 + 347 + if (crtc_state->csc_enable) 348 + cntl |= MCURSOR_PIPE_CSC_ENABLE; 349 + 350 + if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 351 + cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 352 + 353 + return cntl; 354 + } 355 + 356 + static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 357 + const struct intel_plane_state *plane_state) 358 + { 359 + struct drm_i915_private *dev_priv = 360 + to_i915(plane_state->uapi.plane->dev); 361 + u32 cntl = 0; 362 + 363 + if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 364 + cntl |= MCURSOR_TRICKLE_FEED_DISABLE; 365 + 366 + switch (drm_rect_width(&plane_state->uapi.dst)) { 367 + case 64: 368 + cntl |= MCURSOR_MODE_64_ARGB_AX; 369 + break; 370 + case 128: 371 + cntl |= MCURSOR_MODE_128_ARGB_AX; 372 + break; 373 + case 256: 374 + cntl |= MCURSOR_MODE_256_ARGB_AX; 375 + break; 376 + default: 377 + MISSING_CASE(drm_rect_width(&plane_state->uapi.dst)); 378 + return 0; 379 + } 380 + 381 + if (plane_state->hw.rotation & DRM_MODE_ROTATE_180) 382 + cntl |= MCURSOR_ROTATE_180; 383 + 384 + return cntl; 385 + } 386 + 387 + static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) 388 + { 389 + struct drm_i915_private *dev_priv = 390 + to_i915(plane_state->uapi.plane->dev); 391 + int width = drm_rect_width(&plane_state->uapi.dst); 392 + int height = drm_rect_height(&plane_state->uapi.dst); 393 + 394 + if (!intel_cursor_size_ok(plane_state)) 395 + return false; 396 + 397 + /* Cursor width is limited to a few power-of-two sizes */ 398 + switch (width) { 399 + case 256: 400 + case 128: 401 + case 64: 402 + break; 403 + default: 404 + return false; 405 + } 406 + 407 + /* 408 + * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor 409 + * height from 8 lines up to the cursor width, when the 410 + * cursor is not rotated. Everything else requires square 411 + * cursors. 412 + */ 413 + if (HAS_CUR_FBC(dev_priv) && 414 + plane_state->hw.rotation & DRM_MODE_ROTATE_0) { 415 + if (height < 8 || height > width) 416 + return false; 417 + } else { 418 + if (height != width) 419 + return false; 420 + } 421 + 422 + return true; 423 + } 424 + 425 + static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, 426 + struct intel_plane_state *plane_state) 427 + { 428 + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 429 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 430 + const struct drm_framebuffer *fb = plane_state->hw.fb; 431 + enum pipe pipe = plane->pipe; 432 + int ret; 433 + 434 + ret = intel_check_cursor(crtc_state, plane_state); 435 + if (ret) 436 + return ret; 437 + 438 + /* if we want to turn off the cursor ignore width and height */ 439 + if (!fb) 440 + return 0; 441 + 442 + /* Check for which cursor types we support */ 443 + if (!i9xx_cursor_size_ok(plane_state)) { 444 + drm_dbg(&dev_priv->drm, 445 + "Cursor dimension %dx%d not supported\n", 446 + drm_rect_width(&plane_state->uapi.dst), 447 + drm_rect_height(&plane_state->uapi.dst)); 448 + return -EINVAL; 449 + } 450 + 451 + drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible && 452 + plane_state->color_plane[0].stride != fb->pitches[0]); 453 + 454 + if (fb->pitches[0] != 455 + drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { 456 + drm_dbg_kms(&dev_priv->drm, 457 + "Invalid cursor stride (%u) (cursor width %d)\n", 458 + fb->pitches[0], 459 + drm_rect_width(&plane_state->uapi.dst)); 460 + return -EINVAL; 461 + } 462 + 463 + /* 464 + * There's something wrong with the cursor on CHV pipe C. 465 + * If it straddles the left edge of the screen then 466 + * moving it away from the edge or disabling it often 467 + * results in a pipe underrun, and often that can lead to 468 + * dead pipe (constant underrun reported, and it scans 469 + * out just a solid color). To recover from that, the 470 + * display power well must be turned off and on again. 471 + * Refuse the put the cursor into that compromised position. 472 + */ 473 + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 474 + plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) { 475 + drm_dbg_kms(&dev_priv->drm, 476 + "CHV cursor C not allowed to straddle the left screen edge\n"); 477 + return -EINVAL; 478 + } 479 + 480 + plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); 481 + 482 + return 0; 483 + } 484 + 485 + static void i9xx_update_cursor(struct intel_plane *plane, 486 + const struct intel_crtc_state *crtc_state, 487 + const struct intel_plane_state *plane_state) 488 + { 489 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 490 + enum pipe pipe = plane->pipe; 491 + u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; 492 + unsigned long irqflags; 493 + 494 + if (plane_state && plane_state->uapi.visible) { 495 + unsigned width = drm_rect_width(&plane_state->uapi.dst); 496 + unsigned height = drm_rect_height(&plane_state->uapi.dst); 497 + 498 + cntl = plane_state->ctl | 499 + i9xx_cursor_ctl_crtc(crtc_state); 500 + 501 + if (width != height) 502 + fbc_ctl = CUR_FBC_CTL_EN | (height - 1); 503 + 504 + base = intel_cursor_base(plane_state); 505 + pos = intel_cursor_position(plane_state); 506 + } 507 + 508 + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 509 + 510 + /* 511 + * On some platforms writing CURCNTR first will also 512 + * cause CURPOS to be armed by the CURBASE write. 513 + * Without the CURCNTR write the CURPOS write would 514 + * arm itself. Thus we always update CURCNTR before 515 + * CURPOS. 516 + * 517 + * On other platforms CURPOS always requires the 518 + * CURBASE write to arm the update. Additonally 519 + * a write to any of the cursor register will cancel 520 + * an already armed cursor update. Thus leaving out 521 + * the CURBASE write after CURPOS could lead to a 522 + * cursor that doesn't appear to move, or even change 523 + * shape. Thus we always write CURBASE. 524 + * 525 + * The other registers are armed by by the CURBASE write 526 + * except when the plane is getting enabled at which time 527 + * the CURCNTR write arms the update. 528 + */ 529 + 530 + if (INTEL_GEN(dev_priv) >= 9) 531 + skl_write_cursor_wm(plane, crtc_state); 532 + 533 + if (!intel_crtc_needs_modeset(crtc_state)) 534 + intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0); 535 + 536 + if (plane->cursor.base != base || 537 + plane->cursor.size != fbc_ctl || 538 + plane->cursor.cntl != cntl) { 539 + if (HAS_CUR_FBC(dev_priv)) 540 + intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe), 541 + fbc_ctl); 542 + intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl); 543 + intel_de_write_fw(dev_priv, CURPOS(pipe), pos); 544 + intel_de_write_fw(dev_priv, CURBASE(pipe), base); 545 + 546 + plane->cursor.base = base; 547 + plane->cursor.size = fbc_ctl; 548 + plane->cursor.cntl = cntl; 549 + } else { 550 + intel_de_write_fw(dev_priv, CURPOS(pipe), pos); 551 + intel_de_write_fw(dev_priv, CURBASE(pipe), base); 552 + } 553 + 554 + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 555 + } 556 + 557 + static void i9xx_disable_cursor(struct intel_plane *plane, 558 + const struct intel_crtc_state *crtc_state) 559 + { 560 + i9xx_update_cursor(plane, crtc_state, NULL); 561 + } 562 + 563 + static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, 564 + enum pipe *pipe) 565 + { 566 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 567 + enum intel_display_power_domain power_domain; 568 + intel_wakeref_t wakeref; 569 + bool ret; 570 + u32 val; 571 + 572 + /* 573 + * Not 100% correct for planes that can move between pipes, 574 + * but that's only the case for gen2-3 which don't have any 575 + * display power wells. 576 + */ 577 + power_domain = POWER_DOMAIN_PIPE(plane->pipe); 578 + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 579 + if (!wakeref) 580 + return false; 581 + 582 + val = intel_de_read(dev_priv, CURCNTR(plane->pipe)); 583 + 584 + ret = val & MCURSOR_MODE; 585 + 586 + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 587 + *pipe = plane->pipe; 588 + else 589 + *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> 590 + MCURSOR_PIPE_SELECT_SHIFT; 591 + 592 + intel_display_power_put(dev_priv, power_domain, wakeref); 593 + 594 + return ret; 595 + } 596 + 597 + static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 598 + u32 format, u64 modifier) 599 + { 600 + return modifier == DRM_FORMAT_MOD_LINEAR && 601 + format == DRM_FORMAT_ARGB8888; 602 + } 603 + 604 + static int 605 + intel_legacy_cursor_update(struct drm_plane *_plane, 606 + struct drm_crtc *_crtc, 607 + struct drm_framebuffer *fb, 608 + int crtc_x, int crtc_y, 609 + unsigned int crtc_w, unsigned int crtc_h, 610 + u32 src_x, u32 src_y, 611 + u32 src_w, u32 src_h, 612 + struct drm_modeset_acquire_ctx *ctx) 613 + { 614 + struct intel_plane *plane = to_intel_plane(_plane); 615 + struct intel_crtc *crtc = to_intel_crtc(_crtc); 616 + struct intel_plane_state *old_plane_state = 617 + to_intel_plane_state(plane->base.state); 618 + struct intel_plane_state *new_plane_state; 619 + struct intel_crtc_state *crtc_state = 620 + to_intel_crtc_state(crtc->base.state); 621 + struct intel_crtc_state *new_crtc_state; 622 + int ret; 623 + 624 + /* 625 + * When crtc is inactive or there is a modeset pending, 626 + * wait for it to complete in the slowpath 627 + * 628 + * FIXME bigjoiner fastpath would be good 629 + */ 630 + if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) || 631 + crtc_state->update_pipe || crtc_state->bigjoiner) 632 + goto slow; 633 + 634 + /* 635 + * Don't do an async update if there is an outstanding commit modifying 636 + * the plane. This prevents our async update's changes from getting 637 + * overridden by a previous synchronous update's state. 638 + */ 639 + if (old_plane_state->uapi.commit && 640 + !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done)) 641 + goto slow; 642 + 643 + /* 644 + * If any parameters change that may affect watermarks, 645 + * take the slowpath. Only changing fb or position should be 646 + * in the fastpath. 647 + */ 648 + if (old_plane_state->uapi.crtc != &crtc->base || 649 + old_plane_state->uapi.src_w != src_w || 650 + old_plane_state->uapi.src_h != src_h || 651 + old_plane_state->uapi.crtc_w != crtc_w || 652 + old_plane_state->uapi.crtc_h != crtc_h || 653 + !old_plane_state->uapi.fb != !fb) 654 + goto slow; 655 + 656 + new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base)); 657 + if (!new_plane_state) 658 + return -ENOMEM; 659 + 660 + new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base)); 661 + if (!new_crtc_state) { 662 + ret = -ENOMEM; 663 + goto out_free; 664 + } 665 + 666 + drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb); 667 + 668 + new_plane_state->uapi.src_x = src_x; 669 + new_plane_state->uapi.src_y = src_y; 670 + new_plane_state->uapi.src_w = src_w; 671 + new_plane_state->uapi.src_h = src_h; 672 + new_plane_state->uapi.crtc_x = crtc_x; 673 + new_plane_state->uapi.crtc_y = crtc_y; 674 + new_plane_state->uapi.crtc_w = crtc_w; 675 + new_plane_state->uapi.crtc_h = crtc_h; 676 + 677 + intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc); 678 + 679 + ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, 680 + old_plane_state, new_plane_state); 681 + if (ret) 682 + goto out_free; 683 + 684 + ret = intel_plane_pin_fb(new_plane_state); 685 + if (ret) 686 + goto out_free; 687 + 688 + intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb), 689 + ORIGIN_FLIP); 690 + intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 691 + to_intel_frontbuffer(new_plane_state->hw.fb), 692 + plane->frontbuffer_bit); 693 + 694 + /* Swap plane state */ 695 + plane->base.state = &new_plane_state->uapi; 696 + 697 + /* 698 + * We cannot swap crtc_state as it may be in use by an atomic commit or 699 + * page flip that's running simultaneously. If we swap crtc_state and 700 + * destroy the old state, we will cause a use-after-free there. 701 + * 702 + * Only update active_planes, which is needed for our internal 703 + * bookkeeping. Either value will do the right thing when updating 704 + * planes atomically. If the cursor was part of the atomic update then 705 + * we would have taken the slowpath. 706 + */ 707 + crtc_state->active_planes = new_crtc_state->active_planes; 708 + 709 + if (new_plane_state->uapi.visible) 710 + intel_update_plane(plane, crtc_state, new_plane_state); 711 + else 712 + intel_disable_plane(plane, crtc_state); 713 + 714 + intel_plane_unpin_fb(old_plane_state); 715 + 716 + out_free: 717 + if (new_crtc_state) 718 + intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi); 719 + if (ret) 720 + intel_plane_destroy_state(&plane->base, &new_plane_state->uapi); 721 + else 722 + intel_plane_destroy_state(&plane->base, &old_plane_state->uapi); 723 + return ret; 724 + 725 + slow: 726 + return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb, 727 + crtc_x, crtc_y, crtc_w, crtc_h, 728 + src_x, src_y, src_w, src_h, ctx); 729 + } 730 + 731 + static const struct drm_plane_funcs intel_cursor_plane_funcs = { 732 + .update_plane = intel_legacy_cursor_update, 733 + .disable_plane = drm_atomic_helper_disable_plane, 734 + .destroy = intel_plane_destroy, 735 + .atomic_duplicate_state = intel_plane_duplicate_state, 736 + .atomic_destroy_state = intel_plane_destroy_state, 737 + .format_mod_supported = intel_cursor_format_mod_supported, 738 + }; 739 + 740 + struct intel_plane * 741 + intel_cursor_plane_create(struct drm_i915_private *dev_priv, 742 + enum pipe pipe) 743 + { 744 + struct intel_plane *cursor; 745 + int ret, zpos; 746 + 747 + cursor = intel_plane_alloc(); 748 + if (IS_ERR(cursor)) 749 + return cursor; 750 + 751 + cursor->pipe = pipe; 752 + cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 753 + cursor->id = PLANE_CURSOR; 754 + cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); 755 + 756 + if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 757 + cursor->max_stride = i845_cursor_max_stride; 758 + cursor->update_plane = i845_update_cursor; 759 + cursor->disable_plane = i845_disable_cursor; 760 + cursor->get_hw_state = i845_cursor_get_hw_state; 761 + cursor->check_plane = i845_check_cursor; 762 + } else { 763 + cursor->max_stride = i9xx_cursor_max_stride; 764 + cursor->update_plane = i9xx_update_cursor; 765 + cursor->disable_plane = i9xx_disable_cursor; 766 + cursor->get_hw_state = i9xx_cursor_get_hw_state; 767 + cursor->check_plane = i9xx_check_cursor; 768 + } 769 + 770 + cursor->cursor.base = ~0; 771 + cursor->cursor.cntl = ~0; 772 + 773 + if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 774 + cursor->cursor.size = ~0; 775 + 776 + ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 777 + 0, &intel_cursor_plane_funcs, 778 + intel_cursor_formats, 779 + ARRAY_SIZE(intel_cursor_formats), 780 + cursor_format_modifiers, 781 + DRM_PLANE_TYPE_CURSOR, 782 + "cursor %c", pipe_name(pipe)); 783 + if (ret) 784 + goto fail; 785 + 786 + if (INTEL_GEN(dev_priv) >= 4) 787 + drm_plane_create_rotation_property(&cursor->base, 788 + DRM_MODE_ROTATE_0, 789 + DRM_MODE_ROTATE_0 | 790 + DRM_MODE_ROTATE_180); 791 + 792 + zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; 793 + drm_plane_create_zpos_immutable_property(&cursor->base, zpos); 794 + 795 + if (INTEL_GEN(dev_priv) >= 12) 796 + drm_plane_enable_fb_damage_clips(&cursor->base); 797 + 798 + drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 799 + 800 + return cursor; 801 + 802 + fail: 803 + intel_plane_free(cursor); 804 + 805 + return ERR_PTR(ret); 806 + }
+17
drivers/gpu/drm/i915/display/intel_cursor.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2020 Intel Corporation 4 + */ 5 + 6 + #ifndef _INTEL_CURSOR_H_ 7 + #define _INTEL_CURSOR_H_ 8 + 9 + enum pipe; 10 + struct drm_i915_private; 11 + struct intel_plane; 12 + 13 + struct intel_plane * 14 + intel_cursor_plane_create(struct drm_i915_private *dev_priv, 15 + enum pipe pipe); 16 + 17 + #endif
+51 -32
drivers/gpu/drm/i915/display/intel_ddi.c
··· 2285 2285 dig_port = enc_to_dig_port(encoder); 2286 2286 2287 2287 if (!intel_phy_is_tc(dev_priv, phy) || 2288 - dig_port->tc_mode != TC_PORT_TBT_ALT) 2289 - intel_display_power_get(dev_priv, 2290 - dig_port->ddi_io_power_domain); 2288 + dig_port->tc_mode != TC_PORT_TBT_ALT) { 2289 + drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); 2290 + dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, 2291 + dig_port->ddi_io_power_domain); 2292 + } 2291 2293 2292 2294 /* 2293 2295 * AUX power is only needed for (e)DP mode, and for HDMI mode on TC 2294 2296 * ports. 2295 2297 */ 2296 2298 if (intel_crtc_has_dp_encoder(crtc_state) || 2297 - intel_phy_is_tc(dev_priv, phy)) 2298 - intel_display_power_get(dev_priv, 2299 - intel_ddi_main_link_aux_domain(dig_port)); 2299 + intel_phy_is_tc(dev_priv, phy)) { 2300 + drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref); 2301 + dig_port->aux_wakeref = 2302 + intel_display_power_get(dev_priv, 2303 + intel_ddi_main_link_aux_domain(dig_port)); 2304 + } 2300 2305 } 2301 2306 2302 2307 void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, ··· 3512 3507 val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); 3513 3508 val |= DP_TP_CTL_FEC_ENABLE; 3514 3509 intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); 3515 - 3516 - if (intel_de_wait_for_set(dev_priv, 3517 - dp_tp_status_reg(encoder, crtc_state), 3518 - DP_TP_STATUS_FEC_ENABLE_LIVE, 1)) 3519 - drm_err(&dev_priv->drm, 3520 - "Timed out waiting for FEC Enable Status\n"); 3521 3510 } 3522 3511 3523 3512 static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, ··· 3576 3577 3577 3578 /* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */ 3578 3579 if (!intel_phy_is_tc(dev_priv, phy) || 3579 - dig_port->tc_mode != TC_PORT_TBT_ALT) 3580 - intel_display_power_get(dev_priv, 3581 - dig_port->ddi_io_power_domain); 3580 + dig_port->tc_mode != TC_PORT_TBT_ALT) { 3581 + drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); 3582 + dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, 3583 + dig_port->ddi_io_power_domain); 3584 + } 3582 3585 3583 3586 /* 6. Program DP_MODE */ 3584 3587 icl_program_mg_dp_mode(dig_port, crtc_state); ··· 3703 3702 intel_ddi_clk_select(encoder, crtc_state); 3704 3703 3705 3704 if (!intel_phy_is_tc(dev_priv, phy) || 3706 - dig_port->tc_mode != TC_PORT_TBT_ALT) 3707 - intel_display_power_get(dev_priv, 3708 - dig_port->ddi_io_power_domain); 3705 + dig_port->tc_mode != TC_PORT_TBT_ALT) { 3706 + drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); 3707 + dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, 3708 + dig_port->ddi_io_power_domain); 3709 + } 3709 3710 3710 3711 icl_program_mg_dp_mode(dig_port, crtc_state); 3711 3712 ··· 3785 3782 intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); 3786 3783 intel_ddi_clk_select(encoder, crtc_state); 3787 3784 3788 - intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); 3785 + drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); 3786 + dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, 3787 + dig_port->ddi_io_power_domain); 3789 3788 3790 3789 icl_program_mg_dp_mode(dig_port, crtc_state); 3791 3790 ··· 3945 3940 3946 3941 if (!intel_phy_is_tc(dev_priv, phy) || 3947 3942 dig_port->tc_mode != TC_PORT_TBT_ALT) 3948 - intel_display_power_put_unchecked(dev_priv, 3949 - dig_port->ddi_io_power_domain); 3943 + intel_display_power_put(dev_priv, 3944 + dig_port->ddi_io_power_domain, 3945 + fetch_and_zero(&dig_port->ddi_io_wakeref)); 3950 3946 3951 3947 intel_ddi_clk_disable(encoder); 3952 3948 } ··· 3968 3962 3969 3963 intel_disable_ddi_buf(encoder, old_crtc_state); 3970 3964 3971 - intel_display_power_put_unchecked(dev_priv, 3972 - dig_port->ddi_io_power_domain); 3965 + intel_display_power_put(dev_priv, 3966 + dig_port->ddi_io_power_domain, 3967 + fetch_and_zero(&dig_port->ddi_io_wakeref)); 3973 3968 3974 3969 intel_ddi_clk_disable(encoder); 3975 3970 ··· 4043 4036 icl_unmap_plls_to_ports(encoder); 4044 4037 4045 4038 if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port) 4046 - intel_display_power_put_unchecked(dev_priv, 4047 - intel_ddi_main_link_aux_domain(dig_port)); 4039 + intel_display_power_put(dev_priv, 4040 + intel_ddi_main_link_aux_domain(dig_port), 4041 + fetch_and_zero(&dig_port->aux_wakeref)); 4048 4042 4049 4043 if (is_tc_port) 4050 4044 intel_tc_port_put_link(dig_port); ··· 4130 4122 { 4131 4123 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4132 4124 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 4125 + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4133 4126 enum port port = encoder->port; 4134 4127 4135 4128 if (port == PORT_A && INTEL_GEN(dev_priv) < 9) ··· 4138 4129 4139 4130 intel_edp_backlight_on(crtc_state, conn_state); 4140 4131 intel_psr_enable(intel_dp, crtc_state, conn_state); 4141 - intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); 4132 + 4133 + if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink) 4134 + intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); 4135 + 4142 4136 intel_edp_drrs_enable(intel_dp, crtc_state); 4143 4137 4144 4138 if (crtc_state->has_audio) ··· 4384 4372 if (is_tc_port) 4385 4373 intel_tc_port_get_link(dig_port, crtc_state->lane_count); 4386 4374 4387 - if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) 4388 - intel_display_power_get(dev_priv, 4389 - intel_ddi_main_link_aux_domain(dig_port)); 4375 + if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) { 4376 + drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref); 4377 + dig_port->aux_wakeref = 4378 + intel_display_power_get(dev_priv, 4379 + intel_ddi_main_link_aux_domain(dig_port)); 4380 + } 4390 4381 4391 4382 if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT) 4392 4383 /* ··· 4602 4587 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4603 4588 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); 4604 4589 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 4590 + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4605 4591 u32 temp, flags = 0; 4606 4592 4607 4593 temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); ··· 4677 4661 pipe_config->fec_enable); 4678 4662 } 4679 4663 4680 - pipe_config->infoframes.enable |= 4681 - intel_hdmi_infoframes_enabled(encoder, pipe_config); 4682 - 4664 + if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink) 4665 + pipe_config->infoframes.enable |= 4666 + intel_lspcon_infoframes_enabled(encoder, pipe_config); 4667 + else 4668 + pipe_config->infoframes.enable |= 4669 + intel_hdmi_infoframes_enabled(encoder, pipe_config); 4683 4670 break; 4684 4671 case TRANS_DDI_MODE_SELECT_DP_MST: 4685 4672 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
+138 -1612
drivers/gpu/drm/i915/display/intel_display.c
··· 45 45 46 46 #include "display/intel_crt.h" 47 47 #include "display/intel_ddi.h" 48 + #include "display/intel_display_debugfs.h" 48 49 #include "display/intel_dp.h" 49 50 #include "display/intel_dp_mst.h" 50 51 #include "display/intel_dpll_mgr.h" ··· 69 68 #include "intel_cdclk.h" 70 69 #include "intel_color.h" 71 70 #include "intel_csr.h" 71 + #include "intel_cursor.h" 72 72 #include "intel_display_types.h" 73 73 #include "intel_dp_link_training.h" 74 74 #include "intel_fbc.h" ··· 87 85 #include "intel_sprite.h" 88 86 #include "intel_tc.h" 89 87 #include "intel_vga.h" 90 - 91 - /* Primary plane formats for gen <= 3 */ 92 - static const u32 i8xx_primary_formats[] = { 93 - DRM_FORMAT_C8, 94 - DRM_FORMAT_XRGB1555, 95 - DRM_FORMAT_RGB565, 96 - DRM_FORMAT_XRGB8888, 97 - }; 98 - 99 - /* Primary plane formats for ivb (no fp16 due to hw issue) */ 100 - static const u32 ivb_primary_formats[] = { 101 - DRM_FORMAT_C8, 102 - DRM_FORMAT_RGB565, 103 - DRM_FORMAT_XRGB8888, 104 - DRM_FORMAT_XBGR8888, 105 - DRM_FORMAT_XRGB2101010, 106 - DRM_FORMAT_XBGR2101010, 107 - }; 108 - 109 - /* Primary plane formats for gen >= 4, except ivb */ 110 - static const u32 i965_primary_formats[] = { 111 - DRM_FORMAT_C8, 112 - DRM_FORMAT_RGB565, 113 - DRM_FORMAT_XRGB8888, 114 - DRM_FORMAT_XBGR8888, 115 - DRM_FORMAT_XRGB2101010, 116 - DRM_FORMAT_XBGR2101010, 117 - DRM_FORMAT_XBGR16161616F, 118 - }; 119 - 120 - /* Primary plane formats for vlv/chv */ 121 - static const u32 vlv_primary_formats[] = { 122 - DRM_FORMAT_C8, 123 - DRM_FORMAT_RGB565, 124 - DRM_FORMAT_XRGB8888, 125 - DRM_FORMAT_XBGR8888, 126 - DRM_FORMAT_ARGB8888, 127 - DRM_FORMAT_ABGR8888, 128 - DRM_FORMAT_XRGB2101010, 129 - DRM_FORMAT_XBGR2101010, 130 - DRM_FORMAT_ARGB2101010, 131 - DRM_FORMAT_ABGR2101010, 132 - DRM_FORMAT_XBGR16161616F, 133 - }; 134 - 135 - static const u64 i9xx_format_modifiers[] = { 136 - I915_FORMAT_MOD_X_TILED, 137 - DRM_FORMAT_MOD_LINEAR, 138 - DRM_FORMAT_MOD_INVALID 139 - }; 140 - 141 - /* Cursor formats */ 142 - static const u32 intel_cursor_formats[] = { 143 - DRM_FORMAT_ARGB8888, 144 - }; 145 - 146 - static const u64 cursor_format_modifiers[] = { 147 - DRM_FORMAT_MOD_LINEAR, 148 - DRM_FORMAT_MOD_INVALID 149 - }; 88 + #include "i9xx_plane.h" 150 89 151 90 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 152 91 struct intel_crtc_state *pipe_config); ··· 482 539 else 483 540 intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), 484 541 intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); 485 - } 486 - 487 - static bool 488 - needs_modeset(const struct intel_crtc_state *state) 489 - { 490 - return drm_atomic_crtc_needs_modeset(&state->uapi); 491 542 } 492 543 493 544 static bool ··· 931 994 * set to 2. If requires to support 200Mhz refclk, we need to 932 995 * revisit this because n may not 1 anymore. 933 996 */ 934 - clock.n = 1, clock.m1 = 2; 997 + clock.n = 1; 998 + clock.m1 = 2; 935 999 target *= 5; /* fast clock */ 936 1000 937 1001 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { ··· 2470 2532 return offset_aligned; 2471 2533 } 2472 2534 2473 - static u32 intel_plane_compute_aligned_offset(int *x, int *y, 2474 - const struct intel_plane_state *state, 2475 - int color_plane) 2535 + u32 intel_plane_compute_aligned_offset(int *x, int *y, 2536 + const struct intel_plane_state *state, 2537 + int color_plane) 2476 2538 { 2477 2539 struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane); 2478 2540 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); ··· 3209 3271 } 3210 3272 } 3211 3273 3212 - static int 3274 + int 3213 3275 intel_plane_compute_gtt(struct intel_plane_state *plane_state) 3214 3276 { 3215 3277 const struct intel_framebuffer *fb = ··· 3489 3551 crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); 3490 3552 } 3491 3553 3492 - static void fixup_active_planes(struct intel_crtc_state *crtc_state) 3554 + static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state) 3493 3555 { 3494 3556 struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); 3495 3557 struct drm_plane *plane; ··· 3499 3561 * have been used on the same (or wrong) pipe. plane_mask uses 3500 3562 * unique ids, hence we can use that to reconstruct active_planes. 3501 3563 */ 3564 + crtc_state->enabled_planes = 0; 3502 3565 crtc_state->active_planes = 0; 3503 3566 3504 3567 drm_for_each_plane_mask(plane, &dev_priv->drm, 3505 - crtc_state->uapi.plane_mask) 3568 + crtc_state->uapi.plane_mask) { 3569 + crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); 3506 3570 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); 3571 + } 3507 3572 } 3508 3573 3509 3574 static void intel_plane_disable_noatomic(struct intel_crtc *crtc, ··· 3524 3583 crtc->base.base.id, crtc->base.name); 3525 3584 3526 3585 intel_set_plane_visible(crtc_state, plane_state, false); 3527 - fixup_active_planes(crtc_state); 3586 + fixup_plane_bitmasks(crtc_state); 3528 3587 crtc_state->data_rate[plane->id] = 0; 3529 3588 crtc_state->min_cdclk[plane->id] = 0; 3530 3589 ··· 3552 3611 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 3553 3612 3554 3613 intel_disable_plane(plane, crtc_state); 3555 - } 3556 - 3557 - static struct intel_frontbuffer * 3558 - to_intel_frontbuffer(struct drm_framebuffer *fb) 3559 - { 3560 - return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; 3561 3614 } 3562 3615 3563 3616 static void ··· 3996 4061 return ret; 3997 4062 3998 4063 return 0; 3999 - } 4000 - 4001 - static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state, 4002 - const struct intel_plane_state *plane_state, 4003 - unsigned int *num, unsigned int *den) 4004 - { 4005 - const struct drm_framebuffer *fb = plane_state->hw.fb; 4006 - unsigned int cpp = fb->format->cpp[0]; 4007 - 4008 - /* 4009 - * g4x bspec says 64bpp pixel rate can't exceed 80% 4010 - * of cdclk when the sprite plane is enabled on the 4011 - * same pipe. ilk/snb bspec says 64bpp pixel rate is 4012 - * never allowed to exceed 80% of cdclk. Let's just go 4013 - * with the ilk/snb limit always. 4014 - */ 4015 - if (cpp == 8) { 4016 - *num = 10; 4017 - *den = 8; 4018 - } else { 4019 - *num = 1; 4020 - *den = 1; 4021 - } 4022 - } 4023 - 4024 - static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, 4025 - const struct intel_plane_state *plane_state) 4026 - { 4027 - unsigned int pixel_rate; 4028 - unsigned int num, den; 4029 - 4030 - /* 4031 - * Note that crtc_state->pixel_rate accounts for both 4032 - * horizontal and vertical panel fitter downscaling factors. 4033 - * Pre-HSW bspec tells us to only consider the horizontal 4034 - * downscaling factor here. We ignore that and just consider 4035 - * both for simplicity. 4036 - */ 4037 - pixel_rate = crtc_state->pixel_rate; 4038 - 4039 - i9xx_plane_ratio(crtc_state, plane_state, &num, &den); 4040 - 4041 - /* two pixels per clock with double wide pipe */ 4042 - if (crtc_state->double_wide) 4043 - den *= 2; 4044 - 4045 - return DIV_ROUND_UP(pixel_rate * num, den); 4046 - } 4047 - 4048 - unsigned int 4049 - i9xx_plane_max_stride(struct intel_plane *plane, 4050 - u32 pixel_format, u64 modifier, 4051 - unsigned int rotation) 4052 - { 4053 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4054 - 4055 - if (!HAS_GMCH(dev_priv)) { 4056 - return 32*1024; 4057 - } else if (INTEL_GEN(dev_priv) >= 4) { 4058 - if (modifier == I915_FORMAT_MOD_X_TILED) 4059 - return 16*1024; 4060 - else 4061 - return 32*1024; 4062 - } else if (INTEL_GEN(dev_priv) >= 3) { 4063 - if (modifier == I915_FORMAT_MOD_X_TILED) 4064 - return 8*1024; 4065 - else 4066 - return 16*1024; 4067 - } else { 4068 - if (plane->i9xx_plane == PLANE_C) 4069 - return 4*1024; 4070 - else 4071 - return 8*1024; 4072 - } 4073 - } 4074 - 4075 - static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) 4076 - { 4077 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 4078 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4079 - u32 dspcntr = 0; 4080 - 4081 - if (crtc_state->gamma_enable) 4082 - dspcntr |= DISPPLANE_GAMMA_ENABLE; 4083 - 4084 - if (crtc_state->csc_enable) 4085 - dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 4086 - 4087 - if (INTEL_GEN(dev_priv) < 5) 4088 - dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 4089 - 4090 - return dspcntr; 4091 - } 4092 - 4093 - static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 4094 - const struct intel_plane_state *plane_state) 4095 - { 4096 - struct drm_i915_private *dev_priv = 4097 - to_i915(plane_state->uapi.plane->dev); 4098 - const struct drm_framebuffer *fb = plane_state->hw.fb; 4099 - unsigned int rotation = plane_state->hw.rotation; 4100 - u32 dspcntr; 4101 - 4102 - dspcntr = DISPLAY_PLANE_ENABLE; 4103 - 4104 - if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || 4105 - IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 4106 - dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 4107 - 4108 - switch (fb->format->format) { 4109 - case DRM_FORMAT_C8: 4110 - dspcntr |= DISPPLANE_8BPP; 4111 - break; 4112 - case DRM_FORMAT_XRGB1555: 4113 - dspcntr |= DISPPLANE_BGRX555; 4114 - break; 4115 - case DRM_FORMAT_ARGB1555: 4116 - dspcntr |= DISPPLANE_BGRA555; 4117 - break; 4118 - case DRM_FORMAT_RGB565: 4119 - dspcntr |= DISPPLANE_BGRX565; 4120 - break; 4121 - case DRM_FORMAT_XRGB8888: 4122 - dspcntr |= DISPPLANE_BGRX888; 4123 - break; 4124 - case DRM_FORMAT_XBGR8888: 4125 - dspcntr |= DISPPLANE_RGBX888; 4126 - break; 4127 - case DRM_FORMAT_ARGB8888: 4128 - dspcntr |= DISPPLANE_BGRA888; 4129 - break; 4130 - case DRM_FORMAT_ABGR8888: 4131 - dspcntr |= DISPPLANE_RGBA888; 4132 - break; 4133 - case DRM_FORMAT_XRGB2101010: 4134 - dspcntr |= DISPPLANE_BGRX101010; 4135 - break; 4136 - case DRM_FORMAT_XBGR2101010: 4137 - dspcntr |= DISPPLANE_RGBX101010; 4138 - break; 4139 - case DRM_FORMAT_ARGB2101010: 4140 - dspcntr |= DISPPLANE_BGRA101010; 4141 - break; 4142 - case DRM_FORMAT_ABGR2101010: 4143 - dspcntr |= DISPPLANE_RGBA101010; 4144 - break; 4145 - case DRM_FORMAT_XBGR16161616F: 4146 - dspcntr |= DISPPLANE_RGBX161616; 4147 - break; 4148 - default: 4149 - MISSING_CASE(fb->format->format); 4150 - return 0; 4151 - } 4152 - 4153 - if (INTEL_GEN(dev_priv) >= 4 && 4154 - fb->modifier == I915_FORMAT_MOD_X_TILED) 4155 - dspcntr |= DISPPLANE_TILED; 4156 - 4157 - if (rotation & DRM_MODE_ROTATE_180) 4158 - dspcntr |= DISPPLANE_ROTATE_180; 4159 - 4160 - if (rotation & DRM_MODE_REFLECT_X) 4161 - dspcntr |= DISPPLANE_MIRROR; 4162 - 4163 - return dspcntr; 4164 - } 4165 - 4166 - int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 4167 - { 4168 - struct drm_i915_private *dev_priv = 4169 - to_i915(plane_state->uapi.plane->dev); 4170 - const struct drm_framebuffer *fb = plane_state->hw.fb; 4171 - int src_x, src_y, src_w; 4172 - u32 offset; 4173 - int ret; 4174 - 4175 - ret = intel_plane_compute_gtt(plane_state); 4176 - if (ret) 4177 - return ret; 4178 - 4179 - if (!plane_state->uapi.visible) 4180 - return 0; 4181 - 4182 - src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 4183 - src_x = plane_state->uapi.src.x1 >> 16; 4184 - src_y = plane_state->uapi.src.y1 >> 16; 4185 - 4186 - /* Undocumented hardware limit on i965/g4x/vlv/chv */ 4187 - if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048) 4188 - return -EINVAL; 4189 - 4190 - intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 4191 - 4192 - if (INTEL_GEN(dev_priv) >= 4) 4193 - offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 4194 - plane_state, 0); 4195 - else 4196 - offset = 0; 4197 - 4198 - /* 4199 - * Put the final coordinates back so that the src 4200 - * coordinate checks will see the right values. 4201 - */ 4202 - drm_rect_translate_to(&plane_state->uapi.src, 4203 - src_x << 16, src_y << 16); 4204 - 4205 - /* HSW/BDW do this automagically in hardware */ 4206 - if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 4207 - unsigned int rotation = plane_state->hw.rotation; 4208 - int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 4209 - int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 4210 - 4211 - if (rotation & DRM_MODE_ROTATE_180) { 4212 - src_x += src_w - 1; 4213 - src_y += src_h - 1; 4214 - } else if (rotation & DRM_MODE_REFLECT_X) { 4215 - src_x += src_w - 1; 4216 - } 4217 - } 4218 - 4219 - plane_state->color_plane[0].offset = offset; 4220 - plane_state->color_plane[0].x = src_x; 4221 - plane_state->color_plane[0].y = src_y; 4222 - 4223 - return 0; 4224 - } 4225 - 4226 - static bool i9xx_plane_has_windowing(struct intel_plane *plane) 4227 - { 4228 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4229 - enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4230 - 4231 - if (IS_CHERRYVIEW(dev_priv)) 4232 - return i9xx_plane == PLANE_B; 4233 - else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 4234 - return false; 4235 - else if (IS_GEN(dev_priv, 4)) 4236 - return i9xx_plane == PLANE_C; 4237 - else 4238 - return i9xx_plane == PLANE_B || 4239 - i9xx_plane == PLANE_C; 4240 - } 4241 - 4242 - static int 4243 - i9xx_plane_check(struct intel_crtc_state *crtc_state, 4244 - struct intel_plane_state *plane_state) 4245 - { 4246 - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 4247 - int ret; 4248 - 4249 - ret = chv_plane_check_rotation(plane_state); 4250 - if (ret) 4251 - return ret; 4252 - 4253 - ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, 4254 - DRM_PLANE_HELPER_NO_SCALING, 4255 - DRM_PLANE_HELPER_NO_SCALING, 4256 - i9xx_plane_has_windowing(plane)); 4257 - if (ret) 4258 - return ret; 4259 - 4260 - ret = i9xx_check_plane_surface(plane_state); 4261 - if (ret) 4262 - return ret; 4263 - 4264 - if (!plane_state->uapi.visible) 4265 - return 0; 4266 - 4267 - ret = intel_plane_check_src_coordinates(plane_state); 4268 - if (ret) 4269 - return ret; 4270 - 4271 - plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); 4272 - 4273 - return 0; 4274 - } 4275 - 4276 - static void i9xx_update_plane(struct intel_plane *plane, 4277 - const struct intel_crtc_state *crtc_state, 4278 - const struct intel_plane_state *plane_state) 4279 - { 4280 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4281 - enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4282 - u32 linear_offset; 4283 - int x = plane_state->color_plane[0].x; 4284 - int y = plane_state->color_plane[0].y; 4285 - int crtc_x = plane_state->uapi.dst.x1; 4286 - int crtc_y = plane_state->uapi.dst.y1; 4287 - int crtc_w = drm_rect_width(&plane_state->uapi.dst); 4288 - int crtc_h = drm_rect_height(&plane_state->uapi.dst); 4289 - unsigned long irqflags; 4290 - u32 dspaddr_offset; 4291 - u32 dspcntr; 4292 - 4293 - dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); 4294 - 4295 - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 4296 - 4297 - if (INTEL_GEN(dev_priv) >= 4) 4298 - dspaddr_offset = plane_state->color_plane[0].offset; 4299 - else 4300 - dspaddr_offset = linear_offset; 4301 - 4302 - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4303 - 4304 - intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), 4305 - plane_state->color_plane[0].stride); 4306 - 4307 - if (INTEL_GEN(dev_priv) < 4) { 4308 - /* 4309 - * PLANE_A doesn't actually have a full window 4310 - * generator but let's assume we still need to 4311 - * program whatever is there. 4312 - */ 4313 - intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane), 4314 - (crtc_y << 16) | crtc_x); 4315 - intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), 4316 - ((crtc_h - 1) << 16) | (crtc_w - 1)); 4317 - } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { 4318 - intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), 4319 - (crtc_y << 16) | crtc_x); 4320 - intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), 4321 - ((crtc_h - 1) << 16) | (crtc_w - 1)); 4322 - intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0); 4323 - } 4324 - 4325 - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 4326 - intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane), 4327 - (y << 16) | x); 4328 - } else if (INTEL_GEN(dev_priv) >= 4) { 4329 - intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane), 4330 - linear_offset); 4331 - intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane), 4332 - (y << 16) | x); 4333 - } 4334 - 4335 - /* 4336 - * The control register self-arms if the plane was previously 4337 - * disabled. Try to make the plane enable atomic by writing 4338 - * the control register just before the surface register. 4339 - */ 4340 - intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); 4341 - if (INTEL_GEN(dev_priv) >= 4) 4342 - intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 4343 - intel_plane_ggtt_offset(plane_state) + dspaddr_offset); 4344 - else 4345 - intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 4346 - intel_plane_ggtt_offset(plane_state) + dspaddr_offset); 4347 - 4348 - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4349 - } 4350 - 4351 - static void i9xx_disable_plane(struct intel_plane *plane, 4352 - const struct intel_crtc_state *crtc_state) 4353 - { 4354 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4355 - enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4356 - unsigned long irqflags; 4357 - u32 dspcntr; 4358 - 4359 - /* 4360 - * DSPCNTR pipe gamma enable on g4x+ and pipe csc 4361 - * enable on ilk+ affect the pipe bottom color as 4362 - * well, so we must configure them even if the plane 4363 - * is disabled. 4364 - * 4365 - * On pre-g4x there is no way to gamma correct the 4366 - * pipe bottom color but we'll keep on doing this 4367 - * anyway so that the crtc state readout works correctly. 4368 - */ 4369 - dspcntr = i9xx_plane_ctl_crtc(crtc_state); 4370 - 4371 - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 4372 - 4373 - intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); 4374 - if (INTEL_GEN(dev_priv) >= 4) 4375 - intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0); 4376 - else 4377 - intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0); 4378 - 4379 - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 4380 - } 4381 - 4382 - static bool i9xx_plane_get_hw_state(struct intel_plane *plane, 4383 - enum pipe *pipe) 4384 - { 4385 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 4386 - enum intel_display_power_domain power_domain; 4387 - enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 4388 - intel_wakeref_t wakeref; 4389 - bool ret; 4390 - u32 val; 4391 - 4392 - /* 4393 - * Not 100% correct for planes that can move between pipes, 4394 - * but that's only the case for gen2-4 which don't have any 4395 - * display power wells. 4396 - */ 4397 - power_domain = POWER_DOMAIN_PIPE(plane->pipe); 4398 - wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 4399 - if (!wakeref) 4400 - return false; 4401 - 4402 - val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); 4403 - 4404 - ret = val & DISPLAY_PLANE_ENABLE; 4405 - 4406 - if (INTEL_GEN(dev_priv) >= 5) 4407 - *pipe = plane->pipe; 4408 - else 4409 - *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 4410 - DISPPLANE_SEL_PIPE_SHIFT; 4411 - 4412 - intel_display_power_put(dev_priv, power_domain, wakeref); 4413 - 4414 - return ret; 4415 4064 } 4416 4065 4417 4066 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) ··· 5989 6470 if (!old_crtc_state->ips_enabled) 5990 6471 return false; 5991 6472 5992 - if (needs_modeset(new_crtc_state)) 6473 + if (intel_crtc_needs_modeset(new_crtc_state)) 5993 6474 return true; 5994 6475 5995 6476 /* ··· 6016 6497 if (!new_crtc_state->ips_enabled) 6017 6498 return false; 6018 6499 6019 - if (needs_modeset(new_crtc_state)) 6500 + if (intel_crtc_needs_modeset(new_crtc_state)) 6020 6501 return true; 6021 6502 6022 6503 /* ··· 6069 6550 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 6070 6551 const struct intel_crtc_state *new_crtc_state) 6071 6552 { 6072 - return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) && 6553 + return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) && 6073 6554 new_crtc_state->active_planes; 6074 6555 } 6075 6556 ··· 6077 6558 const struct intel_crtc_state *new_crtc_state) 6078 6559 { 6079 6560 return old_crtc_state->active_planes && 6080 - (!new_crtc_state->active_planes || needs_modeset(new_crtc_state)); 6561 + (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)); 6081 6562 } 6082 6563 6083 6564 static void intel_post_plane_update(struct intel_atomic_state *state, ··· 6200 6681 * If we're doing a modeset we don't need to do any 6201 6682 * pre-vblank watermark programming here. 6202 6683 */ 6203 - if (!needs_modeset(new_crtc_state)) { 6684 + if (!intel_crtc_needs_modeset(new_crtc_state)) { 6204 6685 /* 6205 6686 * For platforms that support atomic watermarks, program the 6206 6687 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these ··· 7094 7575 enum intel_display_power_domain domain; 7095 7576 u64 domains, new_domains, old_domains; 7096 7577 7097 - old_domains = crtc->enabled_power_domains; 7098 - crtc->enabled_power_domains = new_domains = 7099 - get_crtc_power_domains(crtc_state); 7578 + domains = get_crtc_power_domains(crtc_state); 7100 7579 7101 - domains = new_domains & ~old_domains; 7580 + new_domains = domains & ~crtc->enabled_power_domains.mask; 7581 + old_domains = crtc->enabled_power_domains.mask & ~domains; 7102 7582 7103 - for_each_power_domain(domain, domains) 7104 - intel_display_power_get(dev_priv, domain); 7583 + for_each_power_domain(domain, new_domains) 7584 + intel_display_power_get_in_set(dev_priv, 7585 + &crtc->enabled_power_domains, 7586 + domain); 7105 7587 7106 - return old_domains & ~new_domains; 7588 + return old_domains; 7107 7589 } 7108 7590 7109 - static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 7110 - u64 domains) 7591 + static void modeset_put_crtc_power_domains(struct intel_crtc *crtc, 7592 + u64 domains) 7111 7593 { 7112 - enum intel_display_power_domain domain; 7113 - 7114 - for_each_power_domain(domain, domains) 7115 - intel_display_power_put_unchecked(dev_priv, domain); 7594 + intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), 7595 + &crtc->enabled_power_domains, 7596 + domains); 7116 7597 } 7117 7598 7118 7599 static void valleyview_crtc_enable(struct intel_atomic_state *state, ··· 7308 7789 to_intel_dbuf_state(dev_priv->dbuf.obj.state); 7309 7790 struct intel_crtc_state *crtc_state = 7310 7791 to_intel_crtc_state(crtc->base.state); 7311 - enum intel_display_power_domain domain; 7312 7792 struct intel_plane *plane; 7313 7793 struct drm_atomic_state *state; 7314 7794 struct intel_crtc_state *temp_crtc_state; 7315 7795 enum pipe pipe = crtc->pipe; 7316 - u64 domains; 7317 7796 int ret; 7318 7797 7319 7798 if (!crtc_state->hw.active) ··· 7367 7850 intel_update_watermarks(crtc); 7368 7851 intel_disable_shared_dpll(crtc_state); 7369 7852 7370 - domains = crtc->enabled_power_domains; 7371 - for_each_power_domain(domain, domains) 7372 - intel_display_power_put_unchecked(dev_priv, domain); 7373 - crtc->enabled_power_domains = 0; 7853 + intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains); 7374 7854 7375 7855 dev_priv->active_pipes &= ~BIT(pipe); 7376 7856 cdclk_state->min_cdclk[pipe] = 0; ··· 10740 11226 10741 11227 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 10742 11228 struct intel_crtc_state *pipe_config, 10743 - u64 *power_domain_mask, 10744 - intel_wakeref_t *wakerefs) 11229 + struct intel_display_power_domain_set *power_domain_set) 10745 11230 { 10746 11231 struct drm_device *dev = crtc->base.dev; 10747 11232 struct drm_i915_private *dev_priv = to_i915(dev); 10748 - enum intel_display_power_domain power_domain; 10749 11233 unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP); 10750 11234 unsigned long enabled_panel_transcoders = 0; 10751 11235 enum transcoder panel_transcoder; 10752 - intel_wakeref_t wf; 10753 11236 u32 tmp; 10754 11237 10755 11238 if (INTEL_GEN(dev_priv) >= 11) ··· 10817 11306 drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && 10818 11307 enabled_panel_transcoders != BIT(TRANSCODER_EDP)); 10819 11308 10820 - power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 10821 - drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain)); 10822 - 10823 - wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10824 - if (!wf) 11309 + if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 11310 + POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 10825 11311 return false; 10826 - 10827 - wakerefs[power_domain] = wf; 10828 - *power_domain_mask |= BIT_ULL(power_domain); 10829 11312 10830 11313 tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder)); 10831 11314 ··· 10828 11323 10829 11324 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 10830 11325 struct intel_crtc_state *pipe_config, 10831 - u64 *power_domain_mask, 10832 - intel_wakeref_t *wakerefs) 11326 + struct intel_display_power_domain_set *power_domain_set) 10833 11327 { 10834 11328 struct drm_device *dev = crtc->base.dev; 10835 11329 struct drm_i915_private *dev_priv = to_i915(dev); 10836 - enum intel_display_power_domain power_domain; 10837 11330 enum transcoder cpu_transcoder; 10838 - intel_wakeref_t wf; 10839 11331 enum port port; 10840 11332 u32 tmp; 10841 11333 ··· 10842 11340 else 10843 11341 cpu_transcoder = TRANSCODER_DSI_C; 10844 11342 10845 - power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 10846 - drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain)); 10847 - 10848 - wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10849 - if (!wf) 11343 + if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, 11344 + POWER_DOMAIN_TRANSCODER(cpu_transcoder))) 10850 11345 continue; 10851 - 10852 - wakerefs[power_domain] = wf; 10853 - *power_domain_mask |= BIT_ULL(power_domain); 10854 11346 10855 11347 /* 10856 11348 * The PLL needs to be enabled with a valid divider ··· 10928 11432 struct intel_crtc_state *pipe_config) 10929 11433 { 10930 11434 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10931 - intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf; 10932 - enum intel_display_power_domain power_domain; 10933 - u64 power_domain_mask; 11435 + struct intel_display_power_domain_set power_domain_set = { }; 10934 11436 bool active; 10935 11437 u32 tmp; 10936 11438 10937 11439 pipe_config->master_transcoder = INVALID_TRANSCODER; 10938 11440 10939 - power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10940 - wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 10941 - if (!wf) 11441 + if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set, 11442 + POWER_DOMAIN_PIPE(crtc->pipe))) 10942 11443 return false; 10943 - 10944 - wakerefs[power_domain] = wf; 10945 - power_domain_mask = BIT_ULL(power_domain); 10946 11444 10947 11445 pipe_config->shared_dpll = NULL; 10948 11446 10949 - active = hsw_get_transcoder_state(crtc, pipe_config, 10950 - &power_domain_mask, wakerefs); 11447 + active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set); 10951 11448 10952 11449 if (IS_GEN9_LP(dev_priv) && 10953 - bxt_get_dsi_transcoder_state(crtc, pipe_config, 10954 - &power_domain_mask, wakerefs)) { 11450 + bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) { 10955 11451 drm_WARN_ON(&dev_priv->drm, active); 10956 11452 active = true; 10957 11453 } ··· 11007 11519 pipe_config->ips_linetime = 11008 11520 REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); 11009 11521 11010 - power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 11011 - drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain)); 11012 - 11013 - wf = intel_display_power_get_if_enabled(dev_priv, power_domain); 11014 - if (wf) { 11015 - wakerefs[power_domain] = wf; 11016 - power_domain_mask |= BIT_ULL(power_domain); 11017 - 11522 + if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set, 11523 + POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { 11018 11524 if (INTEL_GEN(dev_priv) >= 9) 11019 11525 skl_get_pfit_config(pipe_config); 11020 11526 else ··· 11042 11560 } 11043 11561 11044 11562 out: 11045 - for_each_power_domain(power_domain, power_domain_mask) 11046 - intel_display_power_put(dev_priv, 11047 - power_domain, wakerefs[power_domain]); 11563 + intel_display_power_put_all_in_set(dev_priv, &power_domain_set); 11048 11564 11049 11565 return active; 11050 11566 } ··· 11060 11580 intel_crtc_readout_derived_state(crtc_state); 11061 11581 11062 11582 return true; 11063 - } 11064 - 11065 - static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 11066 - { 11067 - struct drm_i915_private *dev_priv = 11068 - to_i915(plane_state->uapi.plane->dev); 11069 - const struct drm_framebuffer *fb = plane_state->hw.fb; 11070 - const struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11071 - u32 base; 11072 - 11073 - if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) 11074 - base = sg_dma_address(obj->mm.pages->sgl); 11075 - else 11076 - base = intel_plane_ggtt_offset(plane_state); 11077 - 11078 - return base + plane_state->color_plane[0].offset; 11079 - } 11080 - 11081 - static u32 intel_cursor_position(const struct intel_plane_state *plane_state) 11082 - { 11083 - int x = plane_state->uapi.dst.x1; 11084 - int y = plane_state->uapi.dst.y1; 11085 - u32 pos = 0; 11086 - 11087 - if (x < 0) { 11088 - pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 11089 - x = -x; 11090 - } 11091 - pos |= x << CURSOR_X_SHIFT; 11092 - 11093 - if (y < 0) { 11094 - pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 11095 - y = -y; 11096 - } 11097 - pos |= y << CURSOR_Y_SHIFT; 11098 - 11099 - return pos; 11100 - } 11101 - 11102 - static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) 11103 - { 11104 - const struct drm_mode_config *config = 11105 - &plane_state->uapi.plane->dev->mode_config; 11106 - int width = drm_rect_width(&plane_state->uapi.dst); 11107 - int height = drm_rect_height(&plane_state->uapi.dst); 11108 - 11109 - return width > 0 && width <= config->cursor_width && 11110 - height > 0 && height <= config->cursor_height; 11111 - } 11112 - 11113 - static int intel_cursor_check_surface(struct intel_plane_state *plane_state) 11114 - { 11115 - struct drm_i915_private *dev_priv = 11116 - to_i915(plane_state->uapi.plane->dev); 11117 - unsigned int rotation = plane_state->hw.rotation; 11118 - int src_x, src_y; 11119 - u32 offset; 11120 - int ret; 11121 - 11122 - ret = intel_plane_compute_gtt(plane_state); 11123 - if (ret) 11124 - return ret; 11125 - 11126 - if (!plane_state->uapi.visible) 11127 - return 0; 11128 - 11129 - src_x = plane_state->uapi.src.x1 >> 16; 11130 - src_y = plane_state->uapi.src.y1 >> 16; 11131 - 11132 - intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 11133 - offset = intel_plane_compute_aligned_offset(&src_x, &src_y, 11134 - plane_state, 0); 11135 - 11136 - if (src_x != 0 || src_y != 0) { 11137 - drm_dbg_kms(&dev_priv->drm, 11138 - "Arbitrary cursor panning not supported\n"); 11139 - return -EINVAL; 11140 - } 11141 - 11142 - /* 11143 - * Put the final coordinates back so that the src 11144 - * coordinate checks will see the right values. 11145 - */ 11146 - drm_rect_translate_to(&plane_state->uapi.src, 11147 - src_x << 16, src_y << 16); 11148 - 11149 - /* ILK+ do this automagically in hardware */ 11150 - if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) { 11151 - const struct drm_framebuffer *fb = plane_state->hw.fb; 11152 - int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 11153 - int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 11154 - 11155 - offset += (src_h * src_w - 1) * fb->format->cpp[0]; 11156 - } 11157 - 11158 - plane_state->color_plane[0].offset = offset; 11159 - plane_state->color_plane[0].x = src_x; 11160 - plane_state->color_plane[0].y = src_y; 11161 - 11162 - return 0; 11163 - } 11164 - 11165 - static int intel_check_cursor(struct intel_crtc_state *crtc_state, 11166 - struct intel_plane_state *plane_state) 11167 - { 11168 - const struct drm_framebuffer *fb = plane_state->hw.fb; 11169 - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 11170 - const struct drm_rect src = plane_state->uapi.src; 11171 - const struct drm_rect dst = plane_state->uapi.dst; 11172 - int ret; 11173 - 11174 - if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { 11175 - drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n"); 11176 - return -EINVAL; 11177 - } 11178 - 11179 - ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, 11180 - DRM_PLANE_HELPER_NO_SCALING, 11181 - DRM_PLANE_HELPER_NO_SCALING, 11182 - true); 11183 - if (ret) 11184 - return ret; 11185 - 11186 - /* Use the unclipped src/dst rectangles, which we program to hw */ 11187 - plane_state->uapi.src = src; 11188 - plane_state->uapi.dst = dst; 11189 - 11190 - ret = intel_cursor_check_surface(plane_state); 11191 - if (ret) 11192 - return ret; 11193 - 11194 - if (!plane_state->uapi.visible) 11195 - return 0; 11196 - 11197 - ret = intel_plane_check_src_coordinates(plane_state); 11198 - if (ret) 11199 - return ret; 11200 - 11201 - return 0; 11202 - } 11203 - 11204 - static unsigned int 11205 - i845_cursor_max_stride(struct intel_plane *plane, 11206 - u32 pixel_format, u64 modifier, 11207 - unsigned int rotation) 11208 - { 11209 - return 2048; 11210 - } 11211 - 11212 - static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 11213 - { 11214 - u32 cntl = 0; 11215 - 11216 - if (crtc_state->gamma_enable) 11217 - cntl |= CURSOR_GAMMA_ENABLE; 11218 - 11219 - return cntl; 11220 - } 11221 - 11222 - static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 11223 - const struct intel_plane_state *plane_state) 11224 - { 11225 - return CURSOR_ENABLE | 11226 - CURSOR_FORMAT_ARGB | 11227 - CURSOR_STRIDE(plane_state->color_plane[0].stride); 11228 - } 11229 - 11230 - static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 11231 - { 11232 - int width = drm_rect_width(&plane_state->uapi.dst); 11233 - 11234 - /* 11235 - * 845g/865g are only limited by the width of their cursors, 11236 - * the height is arbitrary up to the precision of the register. 11237 - */ 11238 - return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); 11239 - } 11240 - 11241 - static int i845_check_cursor(struct intel_crtc_state *crtc_state, 11242 - struct intel_plane_state *plane_state) 11243 - { 11244 - const struct drm_framebuffer *fb = plane_state->hw.fb; 11245 - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 11246 - int ret; 11247 - 11248 - ret = intel_check_cursor(crtc_state, plane_state); 11249 - if (ret) 11250 - return ret; 11251 - 11252 - /* if we want to turn off the cursor ignore width and height */ 11253 - if (!fb) 11254 - return 0; 11255 - 11256 - /* Check for which cursor types we support */ 11257 - if (!i845_cursor_size_ok(plane_state)) { 11258 - drm_dbg_kms(&i915->drm, 11259 - "Cursor dimension %dx%d not supported\n", 11260 - drm_rect_width(&plane_state->uapi.dst), 11261 - drm_rect_height(&plane_state->uapi.dst)); 11262 - return -EINVAL; 11263 - } 11264 - 11265 - drm_WARN_ON(&i915->drm, plane_state->uapi.visible && 11266 - plane_state->color_plane[0].stride != fb->pitches[0]); 11267 - 11268 - switch (fb->pitches[0]) { 11269 - case 256: 11270 - case 512: 11271 - case 1024: 11272 - case 2048: 11273 - break; 11274 - default: 11275 - drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n", 11276 - fb->pitches[0]); 11277 - return -EINVAL; 11278 - } 11279 - 11280 - plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); 11281 - 11282 - return 0; 11283 - } 11284 - 11285 - static void i845_update_cursor(struct intel_plane *plane, 11286 - const struct intel_crtc_state *crtc_state, 11287 - const struct intel_plane_state *plane_state) 11288 - { 11289 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11290 - u32 cntl = 0, base = 0, pos = 0, size = 0; 11291 - unsigned long irqflags; 11292 - 11293 - if (plane_state && plane_state->uapi.visible) { 11294 - unsigned int width = drm_rect_width(&plane_state->uapi.dst); 11295 - unsigned int height = drm_rect_height(&plane_state->uapi.dst); 11296 - 11297 - cntl = plane_state->ctl | 11298 - i845_cursor_ctl_crtc(crtc_state); 11299 - 11300 - size = (height << 12) | width; 11301 - 11302 - base = intel_cursor_base(plane_state); 11303 - pos = intel_cursor_position(plane_state); 11304 - } 11305 - 11306 - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11307 - 11308 - /* On these chipsets we can only modify the base/size/stride 11309 - * whilst the cursor is disabled. 11310 - */ 11311 - if (plane->cursor.base != base || 11312 - plane->cursor.size != size || 11313 - plane->cursor.cntl != cntl) { 11314 - intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0); 11315 - intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base); 11316 - intel_de_write_fw(dev_priv, CURSIZE, size); 11317 - intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); 11318 - intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl); 11319 - 11320 - plane->cursor.base = base; 11321 - plane->cursor.size = size; 11322 - plane->cursor.cntl = cntl; 11323 - } else { 11324 - intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); 11325 - } 11326 - 11327 - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11328 - } 11329 - 11330 - static void i845_disable_cursor(struct intel_plane *plane, 11331 - const struct intel_crtc_state *crtc_state) 11332 - { 11333 - i845_update_cursor(plane, crtc_state, NULL); 11334 - } 11335 - 11336 - static bool i845_cursor_get_hw_state(struct intel_plane *plane, 11337 - enum pipe *pipe) 11338 - { 11339 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11340 - enum intel_display_power_domain power_domain; 11341 - intel_wakeref_t wakeref; 11342 - bool ret; 11343 - 11344 - power_domain = POWER_DOMAIN_PIPE(PIPE_A); 11345 - wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11346 - if (!wakeref) 11347 - return false; 11348 - 11349 - ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE; 11350 - 11351 - *pipe = PIPE_A; 11352 - 11353 - intel_display_power_put(dev_priv, power_domain, wakeref); 11354 - 11355 - return ret; 11356 - } 11357 - 11358 - static unsigned int 11359 - i9xx_cursor_max_stride(struct intel_plane *plane, 11360 - u32 pixel_format, u64 modifier, 11361 - unsigned int rotation) 11362 - { 11363 - return plane->base.dev->mode_config.cursor_width * 4; 11364 - } 11365 - 11366 - static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) 11367 - { 11368 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 11369 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11370 - u32 cntl = 0; 11371 - 11372 - if (INTEL_GEN(dev_priv) >= 11) 11373 - return cntl; 11374 - 11375 - if (crtc_state->gamma_enable) 11376 - cntl = MCURSOR_GAMMA_ENABLE; 11377 - 11378 - if (crtc_state->csc_enable) 11379 - cntl |= MCURSOR_PIPE_CSC_ENABLE; 11380 - 11381 - if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 11382 - cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 11383 - 11384 - return cntl; 11385 - } 11386 - 11387 - static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 11388 - const struct intel_plane_state *plane_state) 11389 - { 11390 - struct drm_i915_private *dev_priv = 11391 - to_i915(plane_state->uapi.plane->dev); 11392 - u32 cntl = 0; 11393 - 11394 - if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) 11395 - cntl |= MCURSOR_TRICKLE_FEED_DISABLE; 11396 - 11397 - switch (drm_rect_width(&plane_state->uapi.dst)) { 11398 - case 64: 11399 - cntl |= MCURSOR_MODE_64_ARGB_AX; 11400 - break; 11401 - case 128: 11402 - cntl |= MCURSOR_MODE_128_ARGB_AX; 11403 - break; 11404 - case 256: 11405 - cntl |= MCURSOR_MODE_256_ARGB_AX; 11406 - break; 11407 - default: 11408 - MISSING_CASE(drm_rect_width(&plane_state->uapi.dst)); 11409 - return 0; 11410 - } 11411 - 11412 - if (plane_state->hw.rotation & DRM_MODE_ROTATE_180) 11413 - cntl |= MCURSOR_ROTATE_180; 11414 - 11415 - return cntl; 11416 - } 11417 - 11418 - static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) 11419 - { 11420 - struct drm_i915_private *dev_priv = 11421 - to_i915(plane_state->uapi.plane->dev); 11422 - int width = drm_rect_width(&plane_state->uapi.dst); 11423 - int height = drm_rect_height(&plane_state->uapi.dst); 11424 - 11425 - if (!intel_cursor_size_ok(plane_state)) 11426 - return false; 11427 - 11428 - /* Cursor width is limited to a few power-of-two sizes */ 11429 - switch (width) { 11430 - case 256: 11431 - case 128: 11432 - case 64: 11433 - break; 11434 - default: 11435 - return false; 11436 - } 11437 - 11438 - /* 11439 - * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor 11440 - * height from 8 lines up to the cursor width, when the 11441 - * cursor is not rotated. Everything else requires square 11442 - * cursors. 11443 - */ 11444 - if (HAS_CUR_FBC(dev_priv) && 11445 - plane_state->hw.rotation & DRM_MODE_ROTATE_0) { 11446 - if (height < 8 || height > width) 11447 - return false; 11448 - } else { 11449 - if (height != width) 11450 - return false; 11451 - } 11452 - 11453 - return true; 11454 - } 11455 - 11456 - static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, 11457 - struct intel_plane_state *plane_state) 11458 - { 11459 - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 11460 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11461 - const struct drm_framebuffer *fb = plane_state->hw.fb; 11462 - enum pipe pipe = plane->pipe; 11463 - int ret; 11464 - 11465 - ret = intel_check_cursor(crtc_state, plane_state); 11466 - if (ret) 11467 - return ret; 11468 - 11469 - /* if we want to turn off the cursor ignore width and height */ 11470 - if (!fb) 11471 - return 0; 11472 - 11473 - /* Check for which cursor types we support */ 11474 - if (!i9xx_cursor_size_ok(plane_state)) { 11475 - drm_dbg(&dev_priv->drm, 11476 - "Cursor dimension %dx%d not supported\n", 11477 - drm_rect_width(&plane_state->uapi.dst), 11478 - drm_rect_height(&plane_state->uapi.dst)); 11479 - return -EINVAL; 11480 - } 11481 - 11482 - drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible && 11483 - plane_state->color_plane[0].stride != fb->pitches[0]); 11484 - 11485 - if (fb->pitches[0] != 11486 - drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { 11487 - drm_dbg_kms(&dev_priv->drm, 11488 - "Invalid cursor stride (%u) (cursor width %d)\n", 11489 - fb->pitches[0], 11490 - drm_rect_width(&plane_state->uapi.dst)); 11491 - return -EINVAL; 11492 - } 11493 - 11494 - /* 11495 - * There's something wrong with the cursor on CHV pipe C. 11496 - * If it straddles the left edge of the screen then 11497 - * moving it away from the edge or disabling it often 11498 - * results in a pipe underrun, and often that can lead to 11499 - * dead pipe (constant underrun reported, and it scans 11500 - * out just a solid color). To recover from that, the 11501 - * display power well must be turned off and on again. 11502 - * Refuse the put the cursor into that compromised position. 11503 - */ 11504 - if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 11505 - plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) { 11506 - drm_dbg_kms(&dev_priv->drm, 11507 - "CHV cursor C not allowed to straddle the left screen edge\n"); 11508 - return -EINVAL; 11509 - } 11510 - 11511 - plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); 11512 - 11513 - return 0; 11514 - } 11515 - 11516 - static void i9xx_update_cursor(struct intel_plane *plane, 11517 - const struct intel_crtc_state *crtc_state, 11518 - const struct intel_plane_state *plane_state) 11519 - { 11520 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11521 - enum pipe pipe = plane->pipe; 11522 - u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; 11523 - unsigned long irqflags; 11524 - 11525 - if (plane_state && plane_state->uapi.visible) { 11526 - unsigned width = drm_rect_width(&plane_state->uapi.dst); 11527 - unsigned height = drm_rect_height(&plane_state->uapi.dst); 11528 - 11529 - cntl = plane_state->ctl | 11530 - i9xx_cursor_ctl_crtc(crtc_state); 11531 - 11532 - if (width != height) 11533 - fbc_ctl = CUR_FBC_CTL_EN | (height - 1); 11534 - 11535 - base = intel_cursor_base(plane_state); 11536 - pos = intel_cursor_position(plane_state); 11537 - } 11538 - 11539 - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 11540 - 11541 - /* 11542 - * On some platforms writing CURCNTR first will also 11543 - * cause CURPOS to be armed by the CURBASE write. 11544 - * Without the CURCNTR write the CURPOS write would 11545 - * arm itself. Thus we always update CURCNTR before 11546 - * CURPOS. 11547 - * 11548 - * On other platforms CURPOS always requires the 11549 - * CURBASE write to arm the update. Additonally 11550 - * a write to any of the cursor register will cancel 11551 - * an already armed cursor update. Thus leaving out 11552 - * the CURBASE write after CURPOS could lead to a 11553 - * cursor that doesn't appear to move, or even change 11554 - * shape. Thus we always write CURBASE. 11555 - * 11556 - * The other registers are armed by by the CURBASE write 11557 - * except when the plane is getting enabled at which time 11558 - * the CURCNTR write arms the update. 11559 - */ 11560 - 11561 - if (INTEL_GEN(dev_priv) >= 9) 11562 - skl_write_cursor_wm(plane, crtc_state); 11563 - 11564 - if (!needs_modeset(crtc_state)) 11565 - intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0); 11566 - 11567 - if (plane->cursor.base != base || 11568 - plane->cursor.size != fbc_ctl || 11569 - plane->cursor.cntl != cntl) { 11570 - if (HAS_CUR_FBC(dev_priv)) 11571 - intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe), 11572 - fbc_ctl); 11573 - intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl); 11574 - intel_de_write_fw(dev_priv, CURPOS(pipe), pos); 11575 - intel_de_write_fw(dev_priv, CURBASE(pipe), base); 11576 - 11577 - plane->cursor.base = base; 11578 - plane->cursor.size = fbc_ctl; 11579 - plane->cursor.cntl = cntl; 11580 - } else { 11581 - intel_de_write_fw(dev_priv, CURPOS(pipe), pos); 11582 - intel_de_write_fw(dev_priv, CURBASE(pipe), base); 11583 - } 11584 - 11585 - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 11586 - } 11587 - 11588 - static void i9xx_disable_cursor(struct intel_plane *plane, 11589 - const struct intel_crtc_state *crtc_state) 11590 - { 11591 - i9xx_update_cursor(plane, crtc_state, NULL); 11592 - } 11593 - 11594 - static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, 11595 - enum pipe *pipe) 11596 - { 11597 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 11598 - enum intel_display_power_domain power_domain; 11599 - intel_wakeref_t wakeref; 11600 - bool ret; 11601 - u32 val; 11602 - 11603 - /* 11604 - * Not 100% correct for planes that can move between pipes, 11605 - * but that's only the case for gen2-3 which don't have any 11606 - * display power wells. 11607 - */ 11608 - power_domain = POWER_DOMAIN_PIPE(plane->pipe); 11609 - wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); 11610 - if (!wakeref) 11611 - return false; 11612 - 11613 - val = intel_de_read(dev_priv, CURCNTR(plane->pipe)); 11614 - 11615 - ret = val & MCURSOR_MODE; 11616 - 11617 - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 11618 - *pipe = plane->pipe; 11619 - else 11620 - *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> 11621 - MCURSOR_PIPE_SELECT_SHIFT; 11622 - 11623 - intel_display_power_put(dev_priv, power_domain, wakeref); 11624 - 11625 - return ret; 11626 11583 } 11627 11584 11628 11585 /* VESA 640x480x72Hz mode to set on the pipe */ ··· 11568 12651 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 11569 12652 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 11570 12653 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11571 - bool mode_changed = needs_modeset(crtc_state); 12654 + bool mode_changed = intel_crtc_needs_modeset(crtc_state); 11572 12655 bool was_crtc_enabled = old_crtc_state->hw.active; 11573 12656 bool is_crtc_enabled = crtc_state->hw.active; 11574 12657 bool turn_off, turn_on, visible, was_visible; ··· 11759 12842 11760 12843 plane_state->planar_linked_plane = NULL; 11761 12844 if (plane_state->planar_slave && !plane_state->uapi.visible) { 12845 + crtc_state->enabled_planes &= ~BIT(plane->id); 11762 12846 crtc_state->active_planes &= ~BIT(plane->id); 11763 12847 crtc_state->update_planes |= BIT(plane->id); 11764 12848 } ··· 11803 12885 11804 12886 linked_state->planar_slave = true; 11805 12887 linked_state->planar_linked_plane = plane; 12888 + crtc_state->enabled_planes |= BIT(linked->id); 11806 12889 crtc_state->active_planes |= BIT(linked->id); 11807 12890 crtc_state->update_planes |= BIT(linked->id); 11808 12891 drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", ··· 11932 13013 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11933 13014 struct intel_crtc_state *crtc_state = 11934 13015 intel_atomic_get_new_crtc_state(state, crtc); 11935 - bool mode_changed = needs_modeset(crtc_state); 13016 + bool mode_changed = intel_crtc_needs_modeset(crtc_state); 11936 13017 int ret; 11937 13018 11938 13019 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && ··· 13764 14845 struct intel_crtc_state *old_crtc_state, 13765 14846 struct intel_crtc_state *new_crtc_state) 13766 14847 { 13767 - if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) 14848 + if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) 13768 14849 return; 13769 14850 13770 14851 verify_wm_state(crtc, new_crtc_state); ··· 13859 14940 return; 13860 14941 13861 14942 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 13862 - if (!needs_modeset(new_crtc_state)) 14943 + if (!intel_crtc_needs_modeset(new_crtc_state)) 13863 14944 continue; 13864 14945 13865 14946 intel_release_shared_dplls(state, crtc); ··· 13884 14965 /* look at all crtc's that are going to be enabled in during modeset */ 13885 14966 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 13886 14967 if (!crtc_state->hw.active || 13887 - !needs_modeset(crtc_state)) 14968 + !intel_crtc_needs_modeset(crtc_state)) 13888 14969 continue; 13889 14970 13890 14971 if (first_crtc_state) { ··· 13909 14990 crtc_state->hsw_workaround_pipe = INVALID_PIPE; 13910 14991 13911 14992 if (!crtc_state->hw.active || 13912 - needs_modeset(crtc_state)) 14993 + intel_crtc_needs_modeset(crtc_state)) 13913 14994 continue; 13914 14995 13915 14996 /* 2 or more enabled crtcs means no need for w/a */ ··· 14019 15100 } 14020 15101 14021 15102 return 0; 15103 + } 15104 + 15105 + int intel_atomic_add_affected_planes(struct intel_atomic_state *state, 15106 + struct intel_crtc *crtc) 15107 + { 15108 + const struct intel_crtc_state *old_crtc_state = 15109 + intel_atomic_get_old_crtc_state(state, crtc); 15110 + const struct intel_crtc_state *new_crtc_state = 15111 + intel_atomic_get_new_crtc_state(state, crtc); 15112 + 15113 + return intel_crtc_add_planes_to_state(state, crtc, 15114 + old_crtc_state->enabled_planes | 15115 + new_crtc_state->enabled_planes); 14022 15116 } 14023 15117 14024 15118 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) ··· 14228 15296 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14229 15297 if (new_crtc_state->hw.enable && 14230 15298 transcoders & BIT(new_crtc_state->cpu_transcoder) && 14231 - needs_modeset(new_crtc_state)) 15299 + intel_crtc_needs_modeset(new_crtc_state)) 14232 15300 return true; 14233 15301 } 14234 15302 ··· 14249 15317 slave = crtc; 14250 15318 master = old_crtc_state->bigjoiner_linked_crtc; 14251 15319 master_crtc_state = intel_atomic_get_new_crtc_state(state, master); 14252 - if (!master_crtc_state || !needs_modeset(master_crtc_state)) 15320 + if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state)) 14253 15321 goto claimed; 14254 15322 } 14255 15323 ··· 14287 15355 return -EINVAL; 14288 15356 } 14289 15357 14290 - static int kill_bigjoiner_slave(struct intel_atomic_state *state, 14291 - struct intel_crtc_state *master_crtc_state) 15358 + static void kill_bigjoiner_slave(struct intel_atomic_state *state, 15359 + struct intel_crtc_state *master_crtc_state) 14292 15360 { 14293 15361 struct intel_crtc_state *slave_crtc_state = 14294 - intel_atomic_get_crtc_state(&state->base, 14295 - master_crtc_state->bigjoiner_linked_crtc); 14296 - 14297 - if (IS_ERR(slave_crtc_state)) 14298 - return PTR_ERR(slave_crtc_state); 15362 + intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc); 14299 15363 14300 15364 slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false; 14301 15365 slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false; 14302 15366 slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL; 14303 15367 intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state); 14304 - return 0; 14305 15368 } 14306 15369 14307 15370 /** ··· 14328 15401 14329 15402 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14330 15403 new_crtc_state, i) { 14331 - if (needs_modeset(new_crtc_state)) { 15404 + if (intel_crtc_needs_modeset(new_crtc_state)) { 14332 15405 drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n"); 14333 15406 return -EINVAL; 14334 15407 } ··· 14434 15507 14435 15508 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) 14436 15509 { 14437 - const struct intel_crtc_state *crtc_state; 15510 + struct intel_crtc_state *crtc_state; 14438 15511 struct intel_crtc *crtc; 14439 15512 int i; 14440 15513 14441 15514 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14442 15515 struct intel_crtc_state *linked_crtc_state; 15516 + struct intel_crtc *linked_crtc; 15517 + int ret; 14443 15518 14444 15519 if (!crtc_state->bigjoiner) 14445 15520 continue; 14446 15521 14447 - linked_crtc_state = intel_atomic_get_crtc_state(&state->base, 14448 - crtc_state->bigjoiner_linked_crtc); 15522 + linked_crtc = crtc_state->bigjoiner_linked_crtc; 15523 + linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc); 14449 15524 if (IS_ERR(linked_crtc_state)) 14450 15525 return PTR_ERR(linked_crtc_state); 15526 + 15527 + if (!intel_crtc_needs_modeset(crtc_state)) 15528 + continue; 15529 + 15530 + linked_crtc_state->uapi.mode_changed = true; 15531 + 15532 + ret = drm_atomic_add_affected_connectors(&state->base, 15533 + &linked_crtc->base); 15534 + if (ret) 15535 + return ret; 15536 + 15537 + ret = intel_atomic_add_affected_planes(state, linked_crtc); 15538 + if (ret) 15539 + return ret; 15540 + } 15541 + 15542 + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 15543 + /* Kill old bigjoiner link, we may re-establish afterwards */ 15544 + if (intel_crtc_needs_modeset(crtc_state) && 15545 + crtc_state->bigjoiner && !crtc_state->bigjoiner_slave) 15546 + kill_bigjoiner_slave(state, crtc_state); 14451 15547 } 14452 15548 14453 15549 return 0; ··· 14507 15557 14508 15558 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14509 15559 new_crtc_state, i) { 14510 - if (!needs_modeset(new_crtc_state)) { 15560 + if (!intel_crtc_needs_modeset(new_crtc_state)) { 14511 15561 /* Light copy */ 14512 15562 intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state); 14513 15563 14514 15564 continue; 14515 - } 14516 - 14517 - /* Kill old bigjoiner link, we may re-establish afterwards */ 14518 - if (old_crtc_state->bigjoiner && !old_crtc_state->bigjoiner_slave) { 14519 - ret = kill_bigjoiner_slave(state, new_crtc_state); 14520 - if (ret) 14521 - goto fail; 14522 15565 } 14523 15566 14524 15567 if (!new_crtc_state->uapi.enable) { ··· 14538 15595 14539 15596 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14540 15597 new_crtc_state, i) { 14541 - if (!needs_modeset(new_crtc_state)) 15598 + if (!intel_crtc_needs_modeset(new_crtc_state)) 14542 15599 continue; 14543 15600 14544 15601 ret = intel_modeset_pipe_config_late(new_crtc_state); ··· 14560 15617 * forced a full modeset. 14561 15618 */ 14562 15619 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 14563 - if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state)) 15620 + if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 14564 15621 continue; 14565 15622 14566 15623 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { ··· 14583 15640 new_crtc_state->update_pipe = false; 14584 15641 } 14585 15642 } 15643 + 15644 + if (new_crtc_state->bigjoiner) { 15645 + struct intel_crtc_state *linked_crtc_state = 15646 + intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc); 15647 + 15648 + if (intel_crtc_needs_modeset(linked_crtc_state)) { 15649 + new_crtc_state->uapi.mode_changed = true; 15650 + new_crtc_state->update_pipe = false; 15651 + } 15652 + } 14586 15653 } 14587 15654 14588 15655 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14589 15656 new_crtc_state, i) { 14590 - if (needs_modeset(new_crtc_state)) { 15657 + if (intel_crtc_needs_modeset(new_crtc_state)) { 14591 15658 any_ms = true; 14592 15659 continue; 14593 15660 } ··· 14674 15721 goto fail; 14675 15722 } 14676 15723 14677 - if (!needs_modeset(new_crtc_state) && 15724 + if (!intel_crtc_needs_modeset(new_crtc_state) && 14678 15725 !new_crtc_state->update_pipe) 14679 15726 continue; 14680 15727 14681 15728 intel_dump_pipe_config(new_crtc_state, state, 14682 - needs_modeset(new_crtc_state) ? 15729 + intel_crtc_needs_modeset(new_crtc_state) ? 14683 15730 "[modeset]" : "[fastset]"); 14684 15731 } 14685 15732 ··· 14711 15758 return ret; 14712 15759 14713 15760 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 14714 - bool mode_changed = needs_modeset(crtc_state); 15761 + bool mode_changed = intel_crtc_needs_modeset(crtc_state); 14715 15762 14716 15763 if (mode_changed || crtc_state->update_pipe || 14717 15764 crtc_state->uapi.color_mgmt_changed) { ··· 14802 15849 intel_atomic_get_old_crtc_state(state, crtc); 14803 15850 const struct intel_crtc_state *new_crtc_state = 14804 15851 intel_atomic_get_new_crtc_state(state, crtc); 14805 - bool modeset = needs_modeset(new_crtc_state); 15852 + bool modeset = intel_crtc_needs_modeset(new_crtc_state); 14806 15853 14807 15854 /* 14808 15855 * During modesets pipe configuration was programmed as the ··· 14836 15883 const struct intel_crtc_state *new_crtc_state = 14837 15884 intel_atomic_get_new_crtc_state(state, crtc); 14838 15885 14839 - if (!needs_modeset(new_crtc_state)) 15886 + if (!intel_crtc_needs_modeset(new_crtc_state)) 14840 15887 return; 14841 15888 14842 15889 intel_crtc_update_active_timings(new_crtc_state); ··· 14858 15905 intel_atomic_get_old_crtc_state(state, crtc); 14859 15906 struct intel_crtc_state *new_crtc_state = 14860 15907 intel_atomic_get_new_crtc_state(state, crtc); 14861 - bool modeset = needs_modeset(new_crtc_state); 15908 + bool modeset = intel_crtc_needs_modeset(new_crtc_state); 14862 15909 14863 15910 if (!modeset) { 14864 15911 if (new_crtc_state->preload_luts && ··· 14950 15997 /* Only disable port sync and MST slaves */ 14951 15998 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14952 15999 new_crtc_state, i) { 14953 - if (!needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner) 16000 + if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner) 14954 16001 continue; 14955 16002 14956 16003 if (!old_crtc_state->hw.active) ··· 14974 16021 /* Disable everything else left on */ 14975 16022 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 14976 16023 new_crtc_state, i) { 14977 - if (!needs_modeset(new_crtc_state) || 16024 + if (!intel_crtc_needs_modeset(new_crtc_state) || 14978 16025 (handled & BIT(crtc->pipe)) || 14979 16026 old_crtc_state->bigjoiner_slave) 14980 16027 continue; ··· 15024 16071 continue; 15025 16072 15026 16073 /* ignore allocations for crtc's that have been turned off. */ 15027 - if (!needs_modeset(new_crtc_state)) { 16074 + if (!intel_crtc_needs_modeset(new_crtc_state)) { 15028 16075 entries[pipe] = old_crtc_state->wm.skl.ddb; 15029 16076 update_pipes |= BIT(pipe); 15030 16077 } else { ··· 15219 16266 15220 16267 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 15221 16268 new_crtc_state, i) { 15222 - if (needs_modeset(new_crtc_state) || 16269 + if (intel_crtc_needs_modeset(new_crtc_state) || 15223 16270 new_crtc_state->update_pipe) { 15224 16271 15225 16272 put_domains[crtc->pipe] = ··· 15245 16292 15246 16293 /* Complete the events for pipes that have now been disabled */ 15247 16294 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 15248 - bool modeset = needs_modeset(new_crtc_state); 16295 + bool modeset = intel_crtc_needs_modeset(new_crtc_state); 15249 16296 15250 16297 /* Complete events for now disable pipes here. */ 15251 16298 if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { ··· 15293 16340 skl_disable_flip_done(crtc); 15294 16341 15295 16342 if (new_crtc_state->hw.active && 15296 - !needs_modeset(new_crtc_state) && 16343 + !intel_crtc_needs_modeset(new_crtc_state) && 15297 16344 !new_crtc_state->preload_luts && 15298 16345 (new_crtc_state->uapi.color_mgmt_changed || 15299 16346 new_crtc_state->update_pipe)) ··· 15329 16376 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 15330 16377 intel_post_plane_update(state, crtc); 15331 16378 15332 - if (put_domains[i]) 15333 - modeset_put_power_domains(dev_priv, put_domains[i]); 16379 + modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]); 15334 16380 15335 16381 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 15336 16382 ··· 15572 16620 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 15573 16621 } 15574 16622 15575 - static int intel_plane_pin_fb(struct intel_plane_state *plane_state) 16623 + int intel_plane_pin_fb(struct intel_plane_state *plane_state) 15576 16624 { 15577 16625 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 15578 16626 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); ··· 15602 16650 return 0; 15603 16651 } 15604 16652 15605 - static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 16653 + void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 15606 16654 { 15607 16655 struct i915_vma *vma; 15608 16656 ··· 15664 16712 * This should only fail upon a hung GPU, in which case we 15665 16713 * can safely continue. 15666 16714 */ 15667 - if (needs_modeset(crtc_state)) { 16715 + if (intel_crtc_needs_modeset(crtc_state)) { 15668 16716 ret = i915_sw_fence_await_reservation(&state->commit_ready, 15669 16717 old_obj->base.resv, NULL, 15670 16718 false, 0, ··· 15785 16833 kfree(to_intel_plane(plane)); 15786 16834 } 15787 16835 15788 - static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, 15789 - u32 format, u64 modifier) 16836 + static int intel_crtc_late_register(struct drm_crtc *crtc) 15790 16837 { 15791 - switch (modifier) { 15792 - case DRM_FORMAT_MOD_LINEAR: 15793 - case I915_FORMAT_MOD_X_TILED: 15794 - break; 15795 - default: 15796 - return false; 15797 - } 15798 - 15799 - switch (format) { 15800 - case DRM_FORMAT_C8: 15801 - case DRM_FORMAT_RGB565: 15802 - case DRM_FORMAT_XRGB1555: 15803 - case DRM_FORMAT_XRGB8888: 15804 - return modifier == DRM_FORMAT_MOD_LINEAR || 15805 - modifier == I915_FORMAT_MOD_X_TILED; 15806 - default: 15807 - return false; 15808 - } 15809 - } 15810 - 15811 - static bool i965_plane_format_mod_supported(struct drm_plane *_plane, 15812 - u32 format, u64 modifier) 15813 - { 15814 - switch (modifier) { 15815 - case DRM_FORMAT_MOD_LINEAR: 15816 - case I915_FORMAT_MOD_X_TILED: 15817 - break; 15818 - default: 15819 - return false; 15820 - } 15821 - 15822 - switch (format) { 15823 - case DRM_FORMAT_C8: 15824 - case DRM_FORMAT_RGB565: 15825 - case DRM_FORMAT_XRGB8888: 15826 - case DRM_FORMAT_XBGR8888: 15827 - case DRM_FORMAT_ARGB8888: 15828 - case DRM_FORMAT_ABGR8888: 15829 - case DRM_FORMAT_XRGB2101010: 15830 - case DRM_FORMAT_XBGR2101010: 15831 - case DRM_FORMAT_ARGB2101010: 15832 - case DRM_FORMAT_ABGR2101010: 15833 - case DRM_FORMAT_XBGR16161616F: 15834 - return modifier == DRM_FORMAT_MOD_LINEAR || 15835 - modifier == I915_FORMAT_MOD_X_TILED; 15836 - default: 15837 - return false; 15838 - } 15839 - } 15840 - 15841 - static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 15842 - u32 format, u64 modifier) 15843 - { 15844 - return modifier == DRM_FORMAT_MOD_LINEAR && 15845 - format == DRM_FORMAT_ARGB8888; 15846 - } 15847 - 15848 - static const struct drm_plane_funcs i965_plane_funcs = { 15849 - .update_plane = drm_atomic_helper_update_plane, 15850 - .disable_plane = drm_atomic_helper_disable_plane, 15851 - .destroy = intel_plane_destroy, 15852 - .atomic_duplicate_state = intel_plane_duplicate_state, 15853 - .atomic_destroy_state = intel_plane_destroy_state, 15854 - .format_mod_supported = i965_plane_format_mod_supported, 15855 - }; 15856 - 15857 - static const struct drm_plane_funcs i8xx_plane_funcs = { 15858 - .update_plane = drm_atomic_helper_update_plane, 15859 - .disable_plane = drm_atomic_helper_disable_plane, 15860 - .destroy = intel_plane_destroy, 15861 - .atomic_duplicate_state = intel_plane_duplicate_state, 15862 - .atomic_destroy_state = intel_plane_destroy_state, 15863 - .format_mod_supported = i8xx_plane_format_mod_supported, 15864 - }; 15865 - 15866 - static int 15867 - intel_legacy_cursor_update(struct drm_plane *_plane, 15868 - struct drm_crtc *_crtc, 15869 - struct drm_framebuffer *fb, 15870 - int crtc_x, int crtc_y, 15871 - unsigned int crtc_w, unsigned int crtc_h, 15872 - u32 src_x, u32 src_y, 15873 - u32 src_w, u32 src_h, 15874 - struct drm_modeset_acquire_ctx *ctx) 15875 - { 15876 - struct intel_plane *plane = to_intel_plane(_plane); 15877 - struct intel_crtc *crtc = to_intel_crtc(_crtc); 15878 - struct intel_plane_state *old_plane_state = 15879 - to_intel_plane_state(plane->base.state); 15880 - struct intel_plane_state *new_plane_state; 15881 - struct intel_crtc_state *crtc_state = 15882 - to_intel_crtc_state(crtc->base.state); 15883 - struct intel_crtc_state *new_crtc_state; 15884 - int ret; 15885 - 15886 - /* 15887 - * When crtc is inactive or there is a modeset pending, 15888 - * wait for it to complete in the slowpath 15889 - * 15890 - * FIXME bigjoiner fastpath would be good 15891 - */ 15892 - if (!crtc_state->hw.active || needs_modeset(crtc_state) || 15893 - crtc_state->update_pipe || crtc_state->bigjoiner) 15894 - goto slow; 15895 - 15896 - /* 15897 - * Don't do an async update if there is an outstanding commit modifying 15898 - * the plane. This prevents our async update's changes from getting 15899 - * overridden by a previous synchronous update's state. 15900 - */ 15901 - if (old_plane_state->uapi.commit && 15902 - !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done)) 15903 - goto slow; 15904 - 15905 - /* 15906 - * If any parameters change that may affect watermarks, 15907 - * take the slowpath. Only changing fb or position should be 15908 - * in the fastpath. 15909 - */ 15910 - if (old_plane_state->uapi.crtc != &crtc->base || 15911 - old_plane_state->uapi.src_w != src_w || 15912 - old_plane_state->uapi.src_h != src_h || 15913 - old_plane_state->uapi.crtc_w != crtc_w || 15914 - old_plane_state->uapi.crtc_h != crtc_h || 15915 - !old_plane_state->uapi.fb != !fb) 15916 - goto slow; 15917 - 15918 - new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base)); 15919 - if (!new_plane_state) 15920 - return -ENOMEM; 15921 - 15922 - new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base)); 15923 - if (!new_crtc_state) { 15924 - ret = -ENOMEM; 15925 - goto out_free; 15926 - } 15927 - 15928 - drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb); 15929 - 15930 - new_plane_state->uapi.src_x = src_x; 15931 - new_plane_state->uapi.src_y = src_y; 15932 - new_plane_state->uapi.src_w = src_w; 15933 - new_plane_state->uapi.src_h = src_h; 15934 - new_plane_state->uapi.crtc_x = crtc_x; 15935 - new_plane_state->uapi.crtc_y = crtc_y; 15936 - new_plane_state->uapi.crtc_w = crtc_w; 15937 - new_plane_state->uapi.crtc_h = crtc_h; 15938 - 15939 - intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc); 15940 - 15941 - ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, 15942 - old_plane_state, new_plane_state); 15943 - if (ret) 15944 - goto out_free; 15945 - 15946 - ret = intel_plane_pin_fb(new_plane_state); 15947 - if (ret) 15948 - goto out_free; 15949 - 15950 - intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb), 15951 - ORIGIN_FLIP); 15952 - intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), 15953 - to_intel_frontbuffer(new_plane_state->hw.fb), 15954 - plane->frontbuffer_bit); 15955 - 15956 - /* Swap plane state */ 15957 - plane->base.state = &new_plane_state->uapi; 15958 - 15959 - /* 15960 - * We cannot swap crtc_state as it may be in use by an atomic commit or 15961 - * page flip that's running simultaneously. If we swap crtc_state and 15962 - * destroy the old state, we will cause a use-after-free there. 15963 - * 15964 - * Only update active_planes, which is needed for our internal 15965 - * bookkeeping. Either value will do the right thing when updating 15966 - * planes atomically. If the cursor was part of the atomic update then 15967 - * we would have taken the slowpath. 15968 - */ 15969 - crtc_state->active_planes = new_crtc_state->active_planes; 15970 - 15971 - if (new_plane_state->uapi.visible) 15972 - intel_update_plane(plane, crtc_state, new_plane_state); 15973 - else 15974 - intel_disable_plane(plane, crtc_state); 15975 - 15976 - intel_plane_unpin_fb(old_plane_state); 15977 - 15978 - out_free: 15979 - if (new_crtc_state) 15980 - intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi); 15981 - if (ret) 15982 - intel_plane_destroy_state(&plane->base, &new_plane_state->uapi); 15983 - else 15984 - intel_plane_destroy_state(&plane->base, &old_plane_state->uapi); 15985 - return ret; 15986 - 15987 - slow: 15988 - return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb, 15989 - crtc_x, crtc_y, crtc_w, crtc_h, 15990 - src_x, src_y, src_w, src_h, ctx); 15991 - } 15992 - 15993 - static const struct drm_plane_funcs intel_cursor_plane_funcs = { 15994 - .update_plane = intel_legacy_cursor_update, 15995 - .disable_plane = drm_atomic_helper_disable_plane, 15996 - .destroy = intel_plane_destroy, 15997 - .atomic_duplicate_state = intel_plane_duplicate_state, 15998 - .atomic_destroy_state = intel_plane_destroy_state, 15999 - .format_mod_supported = intel_cursor_format_mod_supported, 16000 - }; 16001 - 16002 - static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, 16003 - enum i9xx_plane_id i9xx_plane) 16004 - { 16005 - if (!HAS_FBC(dev_priv)) 16006 - return false; 16007 - 16008 - if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 16009 - return i9xx_plane == PLANE_A; /* tied to pipe A */ 16010 - else if (IS_IVYBRIDGE(dev_priv)) 16011 - return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || 16012 - i9xx_plane == PLANE_C; 16013 - else if (INTEL_GEN(dev_priv) >= 4) 16014 - return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; 16015 - else 16016 - return i9xx_plane == PLANE_A; 16017 - } 16018 - 16019 - static struct intel_plane * 16020 - intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) 16021 - { 16022 - struct intel_plane *plane; 16023 - const struct drm_plane_funcs *plane_funcs; 16024 - unsigned int supported_rotations; 16025 - const u32 *formats; 16026 - int num_formats; 16027 - int ret, zpos; 16028 - 16029 - if (INTEL_GEN(dev_priv) >= 9) 16030 - return skl_universal_plane_create(dev_priv, pipe, 16031 - PLANE_PRIMARY); 16032 - 16033 - plane = intel_plane_alloc(); 16034 - if (IS_ERR(plane)) 16035 - return plane; 16036 - 16037 - plane->pipe = pipe; 16038 - /* 16039 - * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 16040 - * port is hooked to pipe B. Hence we want plane A feeding pipe B. 16041 - */ 16042 - if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 && 16043 - INTEL_NUM_PIPES(dev_priv) == 2) 16044 - plane->i9xx_plane = (enum i9xx_plane_id) !pipe; 16045 - else 16046 - plane->i9xx_plane = (enum i9xx_plane_id) pipe; 16047 - plane->id = PLANE_PRIMARY; 16048 - plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); 16049 - 16050 - plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); 16051 - if (plane->has_fbc) { 16052 - struct intel_fbc *fbc = &dev_priv->fbc; 16053 - 16054 - fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 16055 - } 16056 - 16057 - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 16058 - formats = vlv_primary_formats; 16059 - num_formats = ARRAY_SIZE(vlv_primary_formats); 16060 - } else if (INTEL_GEN(dev_priv) >= 4) { 16061 - /* 16062 - * WaFP16GammaEnabling:ivb 16063 - * "Workaround : When using the 64-bit format, the plane 16064 - * output on each color channel has one quarter amplitude. 16065 - * It can be brought up to full amplitude by using pipe 16066 - * gamma correction or pipe color space conversion to 16067 - * multiply the plane output by four." 16068 - * 16069 - * There is no dedicated plane gamma for the primary plane, 16070 - * and using the pipe gamma/csc could conflict with other 16071 - * planes, so we choose not to expose fp16 on IVB primary 16072 - * planes. HSW primary planes no longer have this problem. 16073 - */ 16074 - if (IS_IVYBRIDGE(dev_priv)) { 16075 - formats = ivb_primary_formats; 16076 - num_formats = ARRAY_SIZE(ivb_primary_formats); 16077 - } else { 16078 - formats = i965_primary_formats; 16079 - num_formats = ARRAY_SIZE(i965_primary_formats); 16080 - } 16081 - } else { 16082 - formats = i8xx_primary_formats; 16083 - num_formats = ARRAY_SIZE(i8xx_primary_formats); 16084 - } 16085 - 16086 - if (INTEL_GEN(dev_priv) >= 4) 16087 - plane_funcs = &i965_plane_funcs; 16088 - else 16089 - plane_funcs = &i8xx_plane_funcs; 16090 - 16091 - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16092 - plane->min_cdclk = vlv_plane_min_cdclk; 16093 - else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 16094 - plane->min_cdclk = hsw_plane_min_cdclk; 16095 - else if (IS_IVYBRIDGE(dev_priv)) 16096 - plane->min_cdclk = ivb_plane_min_cdclk; 16097 - else 16098 - plane->min_cdclk = i9xx_plane_min_cdclk; 16099 - 16100 - plane->max_stride = i9xx_plane_max_stride; 16101 - plane->update_plane = i9xx_update_plane; 16102 - plane->disable_plane = i9xx_disable_plane; 16103 - plane->get_hw_state = i9xx_plane_get_hw_state; 16104 - plane->check_plane = i9xx_plane_check; 16105 - 16106 - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 16107 - ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 16108 - 0, plane_funcs, 16109 - formats, num_formats, 16110 - i9xx_format_modifiers, 16111 - DRM_PLANE_TYPE_PRIMARY, 16112 - "primary %c", pipe_name(pipe)); 16113 - else 16114 - ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 16115 - 0, plane_funcs, 16116 - formats, num_formats, 16117 - i9xx_format_modifiers, 16118 - DRM_PLANE_TYPE_PRIMARY, 16119 - "plane %c", 16120 - plane_name(plane->i9xx_plane)); 16121 - if (ret) 16122 - goto fail; 16123 - 16124 - if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 16125 - supported_rotations = 16126 - DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 16127 - DRM_MODE_REFLECT_X; 16128 - } else if (INTEL_GEN(dev_priv) >= 4) { 16129 - supported_rotations = 16130 - DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 16131 - } else { 16132 - supported_rotations = DRM_MODE_ROTATE_0; 16133 - } 16134 - 16135 - if (INTEL_GEN(dev_priv) >= 4) 16136 - drm_plane_create_rotation_property(&plane->base, 16137 - DRM_MODE_ROTATE_0, 16138 - supported_rotations); 16139 - 16140 - zpos = 0; 16141 - drm_plane_create_zpos_immutable_property(&plane->base, zpos); 16142 - 16143 - drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); 16144 - 16145 - return plane; 16146 - 16147 - fail: 16148 - intel_plane_free(plane); 16149 - 16150 - return ERR_PTR(ret); 16151 - } 16152 - 16153 - static struct intel_plane * 16154 - intel_cursor_plane_create(struct drm_i915_private *dev_priv, 16155 - enum pipe pipe) 16156 - { 16157 - struct intel_plane *cursor; 16158 - int ret, zpos; 16159 - 16160 - cursor = intel_plane_alloc(); 16161 - if (IS_ERR(cursor)) 16162 - return cursor; 16163 - 16164 - cursor->pipe = pipe; 16165 - cursor->i9xx_plane = (enum i9xx_plane_id) pipe; 16166 - cursor->id = PLANE_CURSOR; 16167 - cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); 16168 - 16169 - if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 16170 - cursor->max_stride = i845_cursor_max_stride; 16171 - cursor->update_plane = i845_update_cursor; 16172 - cursor->disable_plane = i845_disable_cursor; 16173 - cursor->get_hw_state = i845_cursor_get_hw_state; 16174 - cursor->check_plane = i845_check_cursor; 16175 - } else { 16176 - cursor->max_stride = i9xx_cursor_max_stride; 16177 - cursor->update_plane = i9xx_update_cursor; 16178 - cursor->disable_plane = i9xx_disable_cursor; 16179 - cursor->get_hw_state = i9xx_cursor_get_hw_state; 16180 - cursor->check_plane = i9xx_check_cursor; 16181 - } 16182 - 16183 - cursor->cursor.base = ~0; 16184 - cursor->cursor.cntl = ~0; 16185 - 16186 - if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 16187 - cursor->cursor.size = ~0; 16188 - 16189 - ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 16190 - 0, &intel_cursor_plane_funcs, 16191 - intel_cursor_formats, 16192 - ARRAY_SIZE(intel_cursor_formats), 16193 - cursor_format_modifiers, 16194 - DRM_PLANE_TYPE_CURSOR, 16195 - "cursor %c", pipe_name(pipe)); 16196 - if (ret) 16197 - goto fail; 16198 - 16199 - if (INTEL_GEN(dev_priv) >= 4) 16200 - drm_plane_create_rotation_property(&cursor->base, 16201 - DRM_MODE_ROTATE_0, 16202 - DRM_MODE_ROTATE_0 | 16203 - DRM_MODE_ROTATE_180); 16204 - 16205 - zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; 16206 - drm_plane_create_zpos_immutable_property(&cursor->base, zpos); 16207 - 16208 - if (INTEL_GEN(dev_priv) >= 12) 16209 - drm_plane_enable_fb_damage_clips(&cursor->base); 16210 - 16211 - drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 16212 - 16213 - return cursor; 16214 - 16215 - fail: 16216 - intel_plane_free(cursor); 16217 - 16218 - return ERR_PTR(ret); 16838 + intel_crtc_debugfs_add(crtc); 16839 + return 0; 16219 16840 } 16220 16841 16221 16842 #define INTEL_CRTC_FUNCS \ ··· 15799 17274 .atomic_destroy_state = intel_crtc_destroy_state, \ 15800 17275 .set_crc_source = intel_crtc_set_crc_source, \ 15801 17276 .verify_crc_source = intel_crtc_verify_crc_source, \ 15802 - .get_crc_sources = intel_crtc_get_crc_sources 17277 + .get_crc_sources = intel_crtc_get_crc_sources, \ 17278 + .late_register = intel_crtc_late_register 15803 17279 15804 17280 static const struct drm_crtc_funcs bdw_crtc_funcs = { 15805 17281 INTEL_CRTC_FUNCS, ··· 17690 19164 struct intel_crtc_state *crtc_state = 17691 19165 to_intel_crtc_state(crtc->base.state); 17692 19166 17693 - fixup_active_planes(crtc_state); 19167 + fixup_plane_bitmasks(crtc_state); 17694 19168 } 17695 19169 } 17696 19170 ··· 18113 19587 18114 19588 put_domains = modeset_get_crtc_power_domains(crtc_state); 18115 19589 if (drm_WARN_ON(dev, put_domains)) 18116 - modeset_put_power_domains(dev_priv, put_domains); 19590 + modeset_put_crtc_power_domains(crtc, put_domains); 18117 19591 } 18118 19592 18119 19593 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+9 -4
drivers/gpu/drm/i915/display/intel_display.h
··· 499 499 ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \ 500 500 (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1)) 501 501 502 + int intel_atomic_add_affected_planes(struct intel_atomic_state *state, 503 + struct intel_crtc *crtc); 502 504 u8 intel_calc_active_pipes(struct intel_atomic_state *state, 503 505 u8 active_pipes); 504 506 void intel_link_compute_m_n(u16 bpp, int nlanes, ··· 630 628 u32 skl_plane_stride(const struct intel_plane_state *plane_state, 631 629 int plane); 632 630 int skl_check_plane_surface(struct intel_plane_state *plane_state); 633 - int i9xx_check_plane_surface(struct intel_plane_state *plane_state); 634 631 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha); 635 - unsigned int i9xx_plane_max_stride(struct intel_plane *plane, 636 - u32 pixel_format, u64 modifier, 637 - unsigned int rotation); 638 632 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc); 639 633 unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state); 640 634 ··· 642 644 bool 643 645 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, 644 646 uint64_t modifier); 647 + 648 + int intel_plane_compute_gtt(struct intel_plane_state *plane_state); 649 + u32 intel_plane_compute_aligned_offset(int *x, int *y, 650 + const struct intel_plane_state *state, 651 + int color_plane); 652 + int intel_plane_pin_fb(struct intel_plane_state *plane_state); 653 + void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state); 645 654 646 655 /* modesetting */ 647 656 void intel_modeset_init_hw(struct drm_i915_private *i915);
+124
drivers/gpu/drm/i915/display/intel_display_debugfs.c
··· 18 18 #include "intel_pm.h" 19 19 #include "intel_psr.h" 20 20 #include "intel_sideband.h" 21 + #include "intel_sprite.h" 21 22 22 23 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 23 24 { ··· 866 865 } 867 866 } 868 867 868 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) 869 + static void crtc_updates_info(struct seq_file *m, 870 + struct intel_crtc *crtc, 871 + const char *hdr) 872 + { 873 + u64 count; 874 + int row; 875 + 876 + count = 0; 877 + for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) 878 + count += crtc->debug.vbl.times[row]; 879 + seq_printf(m, "%sUpdates: %llu\n", hdr, count); 880 + if (!count) 881 + return; 882 + 883 + for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) { 884 + char columns[80] = " |"; 885 + unsigned int x; 886 + 887 + if (row & 1) { 888 + const char *units; 889 + 890 + if (row > 10) { 891 + x = 1000000; 892 + units = "ms"; 893 + } else { 894 + x = 1000; 895 + units = "us"; 896 + } 897 + 898 + snprintf(columns, sizeof(columns), "%4ld%s |", 899 + DIV_ROUND_CLOSEST(BIT(row + 9), x), units); 900 + } 901 + 902 + if (crtc->debug.vbl.times[row]) { 903 + x = ilog2(crtc->debug.vbl.times[row]); 904 + memset(columns + 8, '*', x); 905 + columns[8 + x] = '\0'; 906 + } 907 + 908 + seq_printf(m, "%s%s\n", hdr, columns); 909 + } 910 + 911 + seq_printf(m, "%sMin update: %lluns\n", 912 + hdr, crtc->debug.vbl.min); 913 + seq_printf(m, "%sMax update: %lluns\n", 914 + hdr, crtc->debug.vbl.max); 915 + seq_printf(m, "%sAverage update: %lluns\n", 916 + hdr, div64_u64(crtc->debug.vbl.sum, count)); 917 + seq_printf(m, "%sOverruns > %uus: %u\n", 918 + hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over); 919 + } 920 + 921 + static int crtc_updates_show(struct seq_file *m, void *data) 922 + { 923 + crtc_updates_info(m, m->private, ""); 924 + return 0; 925 + } 926 + 927 + static int crtc_updates_open(struct inode *inode, struct file *file) 928 + { 929 + return single_open(file, crtc_updates_show, inode->i_private); 930 + } 931 + 932 + static ssize_t crtc_updates_write(struct file *file, 933 + const char __user *ubuf, 934 + size_t len, loff_t *offp) 935 + { 936 + struct seq_file *m = file->private_data; 937 + struct intel_crtc *crtc = m->private; 938 + 939 + /* May race with an update. Meh. */ 940 + memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl)); 941 + 942 + return len; 943 + } 944 + 945 + static const struct file_operations crtc_updates_fops = { 946 + .owner = THIS_MODULE, 947 + .open = crtc_updates_open, 948 + .read = seq_read, 949 + .llseek = seq_lseek, 950 + .release = single_release, 951 + .write = crtc_updates_write 952 + }; 953 + 954 + static void crtc_updates_add(struct drm_crtc *crtc) 955 + { 956 + debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry, 957 + to_intel_crtc(crtc), &crtc_updates_fops); 958 + } 959 + 960 + #else 961 + static void crtc_updates_info(struct seq_file *m, 962 + struct intel_crtc *crtc, 963 + const char *hdr) 964 + { 965 + } 966 + 967 + static void crtc_updates_add(struct drm_crtc *crtc) 968 + { 969 + } 970 + #endif 971 + 869 972 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) 870 973 { 871 974 struct drm_i915_private *dev_priv = node_to_i915(m->private); ··· 1012 907 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n", 1013 908 yesno(!crtc->cpu_fifo_underrun_disabled), 1014 909 yesno(!crtc->pch_fifo_underrun_disabled)); 910 + 911 + crtc_updates_info(m, crtc, "\t"); 1015 912 } 1016 913 1017 914 static int i915_display_info(struct seq_file *m, void *unused) ··· 2383 2276 debugfs_create_file("i915_lpsp_capability", 0444, root, 2384 2277 connector, &i915_lpsp_capability_fops); 2385 2278 2279 + return 0; 2280 + } 2281 + 2282 + /** 2283 + * intel_crtc_debugfs_add - add i915 specific crtc debugfs files 2284 + * @crtc: pointer to a drm_crtc 2285 + * 2286 + * Returns 0 on success, negative error codes on error. 2287 + * 2288 + * Failure to add debugfs entries should generally be ignored. 2289 + */ 2290 + int intel_crtc_debugfs_add(struct drm_crtc *crtc) 2291 + { 2292 + if (!crtc->debugfs_entry) 2293 + return -ENODEV; 2294 + 2295 + crtc_updates_add(crtc); 2386 2296 return 0; 2387 2297 }
+3
drivers/gpu/drm/i915/display/intel_display_debugfs.h
··· 7 7 #define __INTEL_DISPLAY_DEBUGFS_H__ 8 8 9 9 struct drm_connector; 10 + struct drm_crtc; 10 11 struct drm_i915_private; 11 12 12 13 #ifdef CONFIG_DEBUG_FS 13 14 void intel_display_debugfs_register(struct drm_i915_private *i915); 14 15 int intel_connector_debugfs_add(struct drm_connector *connector); 16 + int intel_crtc_debugfs_add(struct drm_crtc *crtc); 15 17 #else 16 18 static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {} 17 19 static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; } 20 + static inline int intel_crtc_debugfs_add(struct drm_crtc *crtc) { return 0; } 18 21 #endif 19 22 20 23 #endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
+95 -32
drivers/gpu/drm/i915/display/intel_display_power.c
··· 2184 2184 mutex_unlock(&power_domains->lock); 2185 2185 } 2186 2186 2187 - /** 2188 - * intel_display_power_put_unchecked - release an unchecked power domain reference 2189 - * @dev_priv: i915 device instance 2190 - * @domain: power domain to reference 2191 - * 2192 - * This function drops the power domain reference obtained by 2193 - * intel_display_power_get() and might power down the corresponding hardware 2194 - * block right away if this is the last reference. 2195 - * 2196 - * This function exists only for historical reasons and should be avoided in 2197 - * new code, as the correctness of its use cannot be checked. Always use 2198 - * intel_display_power_put() instead. 2199 - */ 2200 - void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2201 - enum intel_display_power_domain domain) 2202 - { 2203 - __intel_display_power_put(dev_priv, domain); 2204 - intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2205 - } 2206 - 2207 2187 static void 2208 2188 queue_async_put_domains_work(struct i915_power_domains *power_domains, 2209 2189 intel_wakeref_t wakeref) ··· 2390 2410 __intel_display_power_put(dev_priv, domain); 2391 2411 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2392 2412 } 2413 + #else 2414 + /** 2415 + * intel_display_power_put_unchecked - release an unchecked power domain reference 2416 + * @dev_priv: i915 device instance 2417 + * @domain: power domain to reference 2418 + * 2419 + * This function drops the power domain reference obtained by 2420 + * intel_display_power_get() and might power down the corresponding hardware 2421 + * block right away if this is the last reference. 2422 + * 2423 + * This function is only for the power domain code's internal use to suppress wakeref 2424 + * tracking when the correspondig debug kconfig option is disabled, should not 2425 + * be used otherwise. 2426 + */ 2427 + void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2428 + enum intel_display_power_domain domain) 2429 + { 2430 + __intel_display_power_put(dev_priv, domain); 2431 + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2432 + } 2393 2433 #endif 2434 + 2435 + void 2436 + intel_display_power_get_in_set(struct drm_i915_private *i915, 2437 + struct intel_display_power_domain_set *power_domain_set, 2438 + enum intel_display_power_domain domain) 2439 + { 2440 + intel_wakeref_t __maybe_unused wf; 2441 + 2442 + drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2443 + 2444 + wf = intel_display_power_get(i915, domain); 2445 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2446 + power_domain_set->wakerefs[domain] = wf; 2447 + #endif 2448 + power_domain_set->mask |= BIT_ULL(domain); 2449 + } 2450 + 2451 + bool 2452 + intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, 2453 + struct intel_display_power_domain_set *power_domain_set, 2454 + enum intel_display_power_domain domain) 2455 + { 2456 + intel_wakeref_t wf; 2457 + 2458 + drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2459 + 2460 + wf = intel_display_power_get_if_enabled(i915, domain); 2461 + if (!wf) 2462 + return false; 2463 + 2464 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2465 + power_domain_set->wakerefs[domain] = wf; 2466 + #endif 2467 + power_domain_set->mask |= BIT_ULL(domain); 2468 + 2469 + return true; 2470 + } 2471 + 2472 + void 2473 + intel_display_power_put_mask_in_set(struct drm_i915_private *i915, 2474 + struct intel_display_power_domain_set *power_domain_set, 2475 + u64 mask) 2476 + { 2477 + enum intel_display_power_domain domain; 2478 + 2479 + drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask); 2480 + 2481 + for_each_power_domain(domain, mask) { 2482 + intel_wakeref_t __maybe_unused wf = -1; 2483 + 2484 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2485 + wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); 2486 + #endif 2487 + intel_display_power_put(i915, domain, wf); 2488 + power_domain_set->mask &= ~BIT_ULL(domain); 2489 + } 2490 + } 2394 2491 2395 2492 #define I830_PIPES_POWER_DOMAINS ( \ 2396 2493 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ ··· 5658 5601 * resources powered until display HW readout is complete. We drop 5659 5602 * this reference in intel_power_domains_enable(). 5660 5603 */ 5661 - power_domains->wakeref = 5604 + drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 5605 + power_domains->init_wakeref = 5662 5606 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5663 5607 5664 5608 /* Disable power support if the user asked so. */ 5665 - if (!i915->params.disable_power_well) 5666 - intel_display_power_get(i915, POWER_DOMAIN_INIT); 5609 + if (!i915->params.disable_power_well) { 5610 + drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); 5611 + i915->power_domains.disable_wakeref = intel_display_power_get(i915, 5612 + POWER_DOMAIN_INIT); 5613 + } 5667 5614 intel_power_domains_sync_hw(i915); 5668 5615 5669 5616 power_domains->initializing = false; ··· 5687 5626 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 5688 5627 { 5689 5628 intel_wakeref_t wakeref __maybe_unused = 5690 - fetch_and_zero(&i915->power_domains.wakeref); 5629 + fetch_and_zero(&i915->power_domains.init_wakeref); 5691 5630 5692 5631 /* Remove the refcount we took to keep power well support disabled. */ 5693 5632 if (!i915->params.disable_power_well) 5694 - intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5633 + intel_display_power_put(i915, POWER_DOMAIN_INIT, 5634 + fetch_and_zero(&i915->power_domains.disable_wakeref)); 5695 5635 5696 5636 intel_display_power_flush_work_sync(i915); 5697 5637 ··· 5717 5655 void intel_power_domains_enable(struct drm_i915_private *i915) 5718 5656 { 5719 5657 intel_wakeref_t wakeref __maybe_unused = 5720 - fetch_and_zero(&i915->power_domains.wakeref); 5658 + fetch_and_zero(&i915->power_domains.init_wakeref); 5721 5659 5722 5660 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5723 5661 intel_power_domains_verify_state(i915); ··· 5734 5672 { 5735 5673 struct i915_power_domains *power_domains = &i915->power_domains; 5736 5674 5737 - drm_WARN_ON(&i915->drm, power_domains->wakeref); 5738 - power_domains->wakeref = 5675 + drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 5676 + power_domains->init_wakeref = 5739 5677 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5740 5678 5741 5679 intel_power_domains_verify_state(i915); ··· 5757 5695 { 5758 5696 struct i915_power_domains *power_domains = &i915->power_domains; 5759 5697 intel_wakeref_t wakeref __maybe_unused = 5760 - fetch_and_zero(&power_domains->wakeref); 5698 + fetch_and_zero(&power_domains->init_wakeref); 5761 5699 5762 5700 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5763 5701 ··· 5781 5719 * power wells if power domains must be deinitialized for suspend. 5782 5720 */ 5783 5721 if (!i915->params.disable_power_well) 5784 - intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5722 + intel_display_power_put(i915, POWER_DOMAIN_INIT, 5723 + fetch_and_zero(&i915->power_domains.disable_wakeref)); 5785 5724 5786 5725 intel_display_power_flush_work(i915); 5787 5726 intel_power_domains_verify_state(i915); ··· 5817 5754 intel_power_domains_init_hw(i915, true); 5818 5755 power_domains->display_core_suspended = false; 5819 5756 } else { 5820 - drm_WARN_ON(&i915->drm, power_domains->wakeref); 5821 - power_domains->wakeref = 5757 + drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 5758 + power_domains->init_wakeref = 5822 5759 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5823 5760 } 5824 5761
+34 -3
drivers/gpu/drm/i915/display/intel_display_power.h
··· 212 212 bool display_core_suspended; 213 213 int power_well_count; 214 214 215 - intel_wakeref_t wakeref; 215 + intel_wakeref_t init_wakeref; 216 + intel_wakeref_t disable_wakeref; 216 217 217 218 struct mutex lock; 218 219 int domain_use_count[POWER_DOMAIN_NUM]; ··· 223 222 u64 async_put_domains[2]; 224 223 225 224 struct i915_power_well *power_wells; 225 + }; 226 + 227 + struct intel_display_power_domain_set { 228 + u64 mask; 229 + #ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM 230 + intel_wakeref_t wakerefs[POWER_DOMAIN_NUM]; 231 + #endif 226 232 }; 227 233 228 234 #define for_each_power_domain(domain, mask) \ ··· 287 279 intel_wakeref_t 288 280 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 289 281 enum intel_display_power_domain domain); 290 - void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 291 - enum intel_display_power_domain domain); 292 282 void __intel_display_power_put_async(struct drm_i915_private *i915, 293 283 enum intel_display_power_domain domain, 294 284 intel_wakeref_t wakeref); ··· 303 297 __intel_display_power_put_async(i915, domain, wakeref); 304 298 } 305 299 #else 300 + void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 301 + enum intel_display_power_domain domain); 302 + 306 303 static inline void 307 304 intel_display_power_put(struct drm_i915_private *i915, 308 305 enum intel_display_power_domain domain, ··· 322 313 __intel_display_power_put_async(i915, domain, -1); 323 314 } 324 315 #endif 316 + 317 + void 318 + intel_display_power_get_in_set(struct drm_i915_private *i915, 319 + struct intel_display_power_domain_set *power_domain_set, 320 + enum intel_display_power_domain domain); 321 + 322 + bool 323 + intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, 324 + struct intel_display_power_domain_set *power_domain_set, 325 + enum intel_display_power_domain domain); 326 + 327 + void 328 + intel_display_power_put_mask_in_set(struct drm_i915_private *i915, 329 + struct intel_display_power_domain_set *power_domain_set, 330 + u64 mask); 331 + 332 + static inline void 333 + intel_display_power_put_all_in_set(struct drm_i915_private *i915, 334 + struct intel_display_power_domain_set *power_domain_set) 335 + { 336 + intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask); 337 + } 325 338 326 339 enum dbuf_slice { 327 340 DBUF_S1,
+47 -10
drivers/gpu/drm/i915/display/intel_display_types.h
··· 225 225 const struct drm_connector *audio_connector; 226 226 }; 227 227 228 + struct intel_panel_bl_funcs { 229 + /* Connector and platform specific backlight functions */ 230 + int (*setup)(struct intel_connector *connector, enum pipe pipe); 231 + u32 (*get)(struct intel_connector *connector); 232 + void (*set)(const struct drm_connector_state *conn_state, u32 level); 233 + void (*disable)(const struct drm_connector_state *conn_state, u32 level); 234 + void (*enable)(const struct intel_crtc_state *crtc_state, 235 + const struct drm_connector_state *conn_state, u32 level); 236 + u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz); 237 + }; 238 + 228 239 struct intel_panel { 229 240 struct drm_display_mode *fixed_mode; 230 241 struct drm_display_mode *downclock_mode; ··· 262 251 263 252 struct backlight_device *device; 264 253 265 - /* Connector and platform specific backlight functions */ 266 - int (*setup)(struct intel_connector *connector, enum pipe pipe); 267 - u32 (*get)(struct intel_connector *connector); 268 - void (*set)(const struct drm_connector_state *conn_state, u32 level); 269 - void (*disable)(const struct drm_connector_state *conn_state); 270 - void (*enable)(const struct intel_crtc_state *crtc_state, 271 - const struct drm_connector_state *conn_state); 272 - u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz); 254 + const struct intel_panel_bl_funcs *funcs; 273 255 void (*power)(struct intel_connector *, bool enable); 274 256 } backlight; 275 257 }; ··· 608 604 u32 planar_slave; 609 605 610 606 struct drm_intel_sprite_colorkey ckey; 607 + 608 + struct drm_rect psr2_sel_fetch_area; 611 609 }; 612 610 613 611 struct intel_initial_plane_config { ··· 1053 1047 u32 cgm_mode; 1054 1048 }; 1055 1049 1056 - /* bitmask of visible planes (enum plane_id) */ 1050 + /* bitmask of logically enabled planes (enum plane_id) */ 1051 + u8 enabled_planes; 1052 + 1053 + /* bitmask of actually visible planes (enum plane_id) */ 1057 1054 u8 active_planes; 1058 1055 u8 nv12_planes; 1059 1056 u8 c8_planes; ··· 1169 1160 /* I915_MODE_FLAG_* */ 1170 1161 u8 mode_flags; 1171 1162 1172 - unsigned long long enabled_power_domains; 1163 + struct intel_display_power_domain_set enabled_power_domains; 1173 1164 struct intel_overlay *overlay; 1174 1165 1175 1166 struct intel_crtc_state *config; ··· 1195 1186 ktime_t start_vbl_time; 1196 1187 int min_vbl, max_vbl; 1197 1188 int scanline_start; 1189 + #ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE 1190 + struct { 1191 + u64 min; 1192 + u64 max; 1193 + u64 sum; 1194 + unsigned int over; 1195 + unsigned int times[17]; /* [1us, 16ms] */ 1196 + } vbl; 1197 + #endif 1198 1198 } debug; 1199 1199 1200 1200 /* scalers available on this crtc */ ··· 1393 1375 unsigned long last_power_on; 1394 1376 unsigned long last_backlight_off; 1395 1377 ktime_t panel_power_off_time; 1378 + intel_wakeref_t vdd_wakeref; 1396 1379 1397 1380 /* 1398 1381 * Pipe whose power sequencer is currently locked into ··· 1463 1444 bool rgb_to_ycbcr; 1464 1445 } dfp; 1465 1446 1447 + /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1448 + struct pm_qos_request pm_qos; 1449 + 1466 1450 /* Display stream compression testing */ 1467 1451 bool force_dsc_en; 1468 1452 ··· 1482 1460 1483 1461 struct intel_lspcon { 1484 1462 bool active; 1463 + bool hdr_supported; 1485 1464 enum drm_lspcon_mode mode; 1486 1465 enum lspcon_vendor vendor; 1487 1466 }; ··· 1499 1476 /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */ 1500 1477 enum aux_ch aux_ch; 1501 1478 enum intel_display_power_domain ddi_io_power_domain; 1479 + intel_wakeref_t ddi_io_wakeref; 1480 + intel_wakeref_t aux_wakeref; 1502 1481 struct mutex tc_lock; /* protects the TypeC port mode */ 1503 1482 intel_wakeref_t tc_lock_wakeref; 1504 1483 int tc_link_refcount; ··· 1790 1765 (1 << INTEL_OUTPUT_EDP)); 1791 1766 } 1792 1767 1768 + static inline bool 1769 + intel_crtc_needs_modeset(const struct intel_crtc_state *crtc_state) 1770 + { 1771 + return drm_atomic_crtc_needs_modeset(&crtc_state->uapi); 1772 + } 1773 + 1793 1774 static inline void 1794 1775 intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) 1795 1776 { ··· 1816 1785 static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state) 1817 1786 { 1818 1787 return i915_ggtt_offset(state->vma); 1788 + } 1789 + 1790 + static inline struct intel_frontbuffer * 1791 + to_intel_frontbuffer(struct drm_framebuffer *fb) 1792 + { 1793 + return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; 1819 1794 } 1820 1795 1821 1796 #endif /* __INTEL_DISPLAY_TYPES_H__ */
+84 -9
drivers/gpu/drm/i915/display/intel_dp.c
··· 1512 1512 * lowest possible wakeup latency and so prevent the cpu from going into 1513 1513 * deep sleep states. 1514 1514 */ 1515 - cpu_latency_qos_update_request(&i915->pm_qos, 0); 1515 + cpu_latency_qos_update_request(&intel_dp->pm_qos, 0); 1516 1516 1517 1517 intel_dp_check_edp(intel_dp); 1518 1518 ··· 1645 1645 1646 1646 ret = recv_bytes; 1647 1647 out: 1648 - cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); 1648 + cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 1649 1649 1650 1650 if (vdd) 1651 1651 edp_panel_vdd_off(intel_dp, false); ··· 1921 1921 static void 1922 1922 intel_dp_aux_fini(struct intel_dp *intel_dp) 1923 1923 { 1924 + if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) 1925 + cpu_latency_qos_remove_request(&intel_dp->pm_qos); 1926 + 1924 1927 kfree(intel_dp->aux.name); 1925 1928 } 1926 1929 ··· 1976 1973 encoder->base.name); 1977 1974 1978 1975 intel_dp->aux.transfer = intel_dp_aux_transfer; 1976 + cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); 1979 1977 } 1980 1978 1981 1979 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) ··· 2315 2311 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 2316 2312 u8 line_buf_depth; 2317 2313 int ret; 2314 + 2315 + /* 2316 + * RC_MODEL_SIZE is currently a constant across all configurations. 2317 + * 2318 + * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 2319 + * DP_DSC_RC_BUF_SIZE for this. 2320 + */ 2321 + vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 2318 2322 2319 2323 ret = intel_dsc_compute_params(encoder, crtc_state); 2320 2324 if (ret) ··· 3129 3117 if (edp_have_panel_vdd(intel_dp)) 3130 3118 return need_to_disable; 3131 3119 3132 - intel_display_power_get(dev_priv, 3133 - intel_aux_power_domain(dig_port)); 3120 + drm_WARN_ON(&dev_priv->drm, intel_dp->vdd_wakeref); 3121 + intel_dp->vdd_wakeref = intel_display_power_get(dev_priv, 3122 + intel_aux_power_domain(dig_port)); 3134 3123 3135 3124 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", 3136 3125 dig_port->base.base.base.id, ··· 3224 3211 if ((pp & PANEL_POWER_ON) == 0) 3225 3212 intel_dp->panel_power_off_time = ktime_get_boottime(); 3226 3213 3227 - intel_display_power_put_unchecked(dev_priv, 3228 - intel_aux_power_domain(dig_port)); 3214 + intel_display_power_put(dev_priv, 3215 + intel_aux_power_domain(dig_port), 3216 + fetch_and_zero(&intel_dp->vdd_wakeref)); 3229 3217 } 3230 3218 3231 3219 static void edp_panel_vdd_work(struct work_struct *__work) ··· 3378 3364 intel_dp->panel_power_off_time = ktime_get_boottime(); 3379 3365 3380 3366 /* We got a reference when we enabled the VDD. */ 3381 - intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); 3367 + intel_display_power_put(dev_priv, 3368 + intel_aux_power_domain(dig_port), 3369 + fetch_and_zero(&intel_dp->vdd_wakeref)); 3382 3370 } 3383 3371 3384 3372 void intel_edp_panel_off(struct intel_dp *intel_dp) ··· 3618 3602 enable ? "enable" : "disable"); 3619 3603 } 3620 3604 3605 + static void 3606 + intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) 3607 + { 3608 + struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3609 + u8 oui[] = { 0x00, 0xaa, 0x01 }; 3610 + u8 buf[3] = { 0 }; 3611 + 3612 + /* 3613 + * During driver init, we want to be careful and avoid changing the source OUI if it's 3614 + * already set to what we want, so as to avoid clearing any state by accident 3615 + */ 3616 + if (careful) { 3617 + if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 3618 + drm_err(&i915->drm, "Failed to read source OUI\n"); 3619 + 3620 + if (memcmp(oui, buf, sizeof(oui)) == 0) 3621 + return; 3622 + } 3623 + 3624 + if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) 3625 + drm_err(&i915->drm, "Failed to write source OUI\n"); 3626 + } 3627 + 3621 3628 /* If the device supports it, try to set the power state appropriately */ 3622 3629 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 3623 3630 { ··· 3661 3622 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 3662 3623 3663 3624 lspcon_resume(dp_to_dig_port(intel_dp)); 3625 + 3626 + /* Write the source OUI as early as possible */ 3627 + if (intel_dp_is_edp(intel_dp)) 3628 + intel_edp_init_source_oui(intel_dp, false); 3664 3629 3665 3630 /* 3666 3631 * When turning on, we need to retry for 1ms to give the sink ··· 5239 5196 /* Read the eDP DSC DPCD registers */ 5240 5197 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) 5241 5198 intel_dp_get_dsc_sink_cap(intel_dp); 5199 + 5200 + /* 5201 + * If needed, program our source OUI so we can make various Intel-specific AUX services 5202 + * available (such as HDR backlight controls) 5203 + */ 5204 + intel_edp_init_source_oui(intel_dp, true); 5242 5205 5243 5206 return true; 5244 5207 } ··· 7226 7177 { 7227 7178 struct drm_i915_private *i915 = to_i915(connector->dev); 7228 7179 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 7180 + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 7181 + struct intel_lspcon *lspcon = &dig_port->lspcon; 7229 7182 int ret; 7230 7183 7231 7184 ret = intel_connector_register(connector); ··· 7241 7190 ret = drm_dp_aux_register(&intel_dp->aux); 7242 7191 if (!ret) 7243 7192 drm_dp_cec_register_connector(&intel_dp->aux, connector); 7193 + 7194 + if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) 7195 + return ret; 7196 + 7197 + /* 7198 + * ToDo: Clean this up to handle lspcon init and resume more 7199 + * efficiently and streamlined. 7200 + */ 7201 + if (lspcon_init(dig_port)) { 7202 + lspcon_detect_hdr_capability(lspcon); 7203 + if (lspcon->hdr_supported) 7204 + drm_object_attach_property(&connector->base, 7205 + connector->dev->mode_config.hdr_output_metadata_property, 7206 + 0); 7207 + } 7208 + 7244 7209 return ret; 7245 7210 } 7246 7211 ··· 7346 7279 */ 7347 7280 drm_dbg_kms(&dev_priv->drm, 7348 7281 "VDD left on by BIOS, adjusting state tracking\n"); 7349 - intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); 7282 + drm_WARN_ON(&dev_priv->drm, intel_dp->vdd_wakeref); 7283 + intel_dp->vdd_wakeref = intel_display_power_get(dev_priv, 7284 + intel_aux_power_domain(dig_port)); 7350 7285 7351 7286 edp_panel_vdd_schedule_off(intel_dp); 7352 7287 } ··· 7647 7578 else if (INTEL_GEN(dev_priv) >= 5) 7648 7579 drm_connector_attach_max_bpc_property(connector, 6, 12); 7649 7580 7650 - intel_attach_colorspace_property(connector); 7581 + /* Register HDMI colorspace for case of lspcon */ 7582 + if (intel_bios_is_lspcon_present(dev_priv, port)) { 7583 + drm_connector_attach_content_type_property(connector); 7584 + intel_attach_hdmi_colorspace_property(connector); 7585 + } else { 7586 + intel_attach_dp_colorspace_property(connector); 7587 + } 7651 7588 7652 7589 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) 7653 7590 drm_object_attach_property(&connector->base,
+87 -28
drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
··· 25 25 #include "intel_display_types.h" 26 26 #include "intel_dp_aux_backlight.h" 27 27 28 - static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) 28 + /* 29 + * DP AUX registers for Intel's proprietary HDR backlight interface. We define 30 + * them here since we'll likely be the only driver to ever use these. 31 + */ 32 + #define INTEL_EDP_HDR_TCON_CAP0 0x340 33 + 34 + #define INTEL_EDP_HDR_TCON_CAP1 0x341 35 + # define INTEL_EDP_HDR_TCON_2084_DECODE_CAP BIT(0) 36 + # define INTEL_EDP_HDR_TCON_2020_GAMUT_CAP BIT(1) 37 + # define INTEL_EDP_HDR_TCON_TONE_MAPPING_CAP BIT(2) 38 + # define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_CAP BIT(3) 39 + # define INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP BIT(4) 40 + # define INTEL_EDP_HDR_TCON_OPTIMIZATION_CAP BIT(5) 41 + # define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_CAP BIT(6) 42 + # define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_CONVERSION_CAP BIT(7) 43 + 44 + #define INTEL_EDP_HDR_TCON_CAP2 0x342 45 + # define INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP BIT(0) 46 + 47 + #define INTEL_EDP_HDR_TCON_CAP3 0x343 48 + 49 + #define INTEL_EDP_HDR_GETSET_CTRL_PARAMS 0x344 50 + # define INTEL_EDP_HDR_TCON_2084_DECODE_ENABLE BIT(0) 51 + # define INTEL_EDP_HDR_TCON_2020_GAMUT_ENABLE BIT(1) 52 + # define INTEL_EDP_HDR_TCON_TONE_MAPPING_ENABLE BIT(2) /* Pre-TGL+ */ 53 + # define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_ENABLE BIT(3) 54 + # define INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE BIT(4) 55 + # define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_ENABLE BIT(5) 56 + /* Bit 6 is reserved */ 57 + # define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_ENABLE BIT(7) 58 + 59 + #define INTEL_EDP_HDR_CONTENT_LUMINANCE 0x346 /* Pre-TGL+ */ 60 + #define INTEL_EDP_HDR_PANEL_LUMINANCE_OVERRIDE 0x34A 61 + #define INTEL_EDP_SDR_LUMINANCE_LEVEL 0x352 62 + #define INTEL_EDP_BRIGHTNESS_NITS_LSB 0x354 63 + #define INTEL_EDP_BRIGHTNESS_NITS_MSB 0x355 64 + #define INTEL_EDP_BRIGHTNESS_DELAY_FRAMES 0x356 65 + #define INTEL_EDP_BRIGHTNESS_PER_FRAME_STEPS 0x357 66 + 67 + #define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_0 0x358 68 + # define INTEL_EDP_TCON_USAGE_MASK GENMASK(0, 3) 69 + # define INTEL_EDP_TCON_USAGE_UNKNOWN 0x0 70 + # define INTEL_EDP_TCON_USAGE_DESKTOP 0x1 71 + # define INTEL_EDP_TCON_USAGE_FULL_SCREEN_MEDIA 0x2 72 + # define INTEL_EDP_TCON_USAGE_FULL_SCREEN_GAMING 0x3 73 + # define INTEL_EDP_TCON_POWER_MASK BIT(4) 74 + # define INTEL_EDP_TCON_POWER_DC (0 << 4) 75 + # define INTEL_EDP_TCON_POWER_AC (1 << 4) 76 + # define INTEL_EDP_TCON_OPTIMIZATION_STRENGTH_MASK GENMASK(5, 7) 77 + 78 + #define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359 79 + 80 + /* VESA backlight callbacks */ 81 + static void set_vesa_backlight_enable(struct intel_dp *intel_dp, bool enable) 29 82 { 30 83 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 31 84 u8 reg_val = 0; ··· 105 52 } 106 53 } 107 54 108 - static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector) 55 + static bool intel_dp_aux_vesa_backlight_dpcd_mode(struct intel_connector *connector) 109 56 { 110 57 struct intel_dp *intel_dp = intel_attached_dp(connector); 111 58 struct drm_i915_private *i915 = dp_to_i915(intel_dp); ··· 128 75 * Read the current backlight value from DPCD register(s) based 129 76 * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported 130 77 */ 131 - static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) 78 + static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector) 132 79 { 133 80 struct intel_dp *intel_dp = intel_attached_dp(connector); 134 81 struct drm_i915_private *i915 = dp_to_i915(intel_dp); ··· 139 86 * If we're not in DPCD control mode yet, the programmed brightness 140 87 * value is meaningless and we should assume max brightness 141 88 */ 142 - if (!intel_dp_aux_backlight_dpcd_mode(connector)) 89 + if (!intel_dp_aux_vesa_backlight_dpcd_mode(connector)) 143 90 return connector->panel.backlight.max; 144 91 145 92 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, ··· 160 107 * 8-bit or 16 bit value (MSB and LSB) 161 108 */ 162 109 static void 163 - intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level) 110 + intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, 111 + u32 level) 164 112 { 165 113 struct intel_connector *connector = to_intel_connector(conn_state->connector); 166 114 struct intel_dp *intel_dp = intel_attached_dp(connector); ··· 191 137 * - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the 192 138 * EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h) 193 139 */ 194 - static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector) 140 + static bool intel_dp_aux_vesa_set_pwm_freq(struct intel_connector *connector) 195 141 { 196 142 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 197 143 struct intel_dp *intel_dp = intel_attached_dp(connector); ··· 227 173 return true; 228 174 } 229 175 230 - static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state, 231 - const struct drm_connector_state *conn_state) 176 + static void 177 + intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state, 178 + const struct drm_connector_state *conn_state, u32 level) 232 179 { 233 180 struct intel_connector *connector = to_intel_connector(conn_state->connector); 234 181 struct intel_dp *intel_dp = intel_attached_dp(connector); ··· 269 214 } 270 215 271 216 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP) 272 - if (intel_dp_aux_set_pwm_freq(connector)) 217 + if (intel_dp_aux_vesa_set_pwm_freq(connector)) 273 218 new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE; 274 219 275 220 if (new_dpcd_buf != dpcd_buf) { ··· 280 225 } 281 226 } 282 227 283 - intel_dp_aux_set_backlight(conn_state, 284 - connector->panel.backlight.level); 285 - set_aux_backlight_enable(intel_dp, true); 228 + intel_dp_aux_vesa_set_backlight(conn_state, level); 229 + set_vesa_backlight_enable(intel_dp, true); 286 230 } 287 231 288 - static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state) 232 + static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state *old_conn_state, 233 + u32 level) 289 234 { 290 - set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)), 291 - false); 235 + set_vesa_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)), 236 + false); 292 237 } 293 238 294 - static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector) 239 + static u32 intel_dp_aux_vesa_calc_max_backlight(struct intel_connector *connector) 295 240 { 296 241 struct drm_i915_private *i915 = to_i915(connector->base.dev); 297 242 struct intel_dp *intel_dp = intel_attached_dp(connector); ··· 371 316 return max_backlight; 372 317 } 373 318 374 - static int intel_dp_aux_setup_backlight(struct intel_connector *connector, 375 - enum pipe pipe) 319 + static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, 320 + enum pipe pipe) 376 321 { 377 322 struct intel_panel *panel = &connector->panel; 378 323 379 - panel->backlight.max = intel_dp_aux_calc_max_backlight(connector); 324 + panel->backlight.max = intel_dp_aux_vesa_calc_max_backlight(connector); 380 325 if (!panel->backlight.max) 381 326 return -ENODEV; 382 327 383 328 panel->backlight.min = 0; 384 - panel->backlight.level = intel_dp_aux_get_backlight(connector); 385 - panel->backlight.enabled = intel_dp_aux_backlight_dpcd_mode(connector) && 329 + panel->backlight.level = intel_dp_aux_vesa_get_backlight(connector); 330 + panel->backlight.enabled = intel_dp_aux_vesa_backlight_dpcd_mode(connector) && 386 331 panel->backlight.level != 0; 387 332 388 333 return 0; 389 334 } 390 335 391 336 static bool 392 - intel_dp_aux_display_control_capable(struct intel_connector *connector) 337 + intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector) 393 338 { 394 339 struct intel_dp *intel_dp = intel_attached_dp(connector); 395 340 struct drm_i915_private *i915 = dp_to_i915(intel_dp); ··· 405 350 return false; 406 351 } 407 352 353 + static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = { 354 + .setup = intel_dp_aux_vesa_setup_backlight, 355 + .enable = intel_dp_aux_vesa_enable_backlight, 356 + .disable = intel_dp_aux_vesa_disable_backlight, 357 + .set = intel_dp_aux_vesa_set_backlight, 358 + .get = intel_dp_aux_vesa_get_backlight, 359 + }; 360 + 408 361 int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) 409 362 { 410 363 struct intel_panel *panel = &intel_connector->panel; ··· 420 357 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 421 358 422 359 if (i915->params.enable_dpcd_backlight == 0 || 423 - !intel_dp_aux_display_control_capable(intel_connector)) 360 + !intel_dp_aux_supports_vesa_backlight(intel_connector)) 424 361 return -ENODEV; 425 362 426 363 /* ··· 442 379 return -ENODEV; 443 380 } 444 381 445 - panel->backlight.setup = intel_dp_aux_setup_backlight; 446 - panel->backlight.enable = intel_dp_aux_enable_backlight; 447 - panel->backlight.disable = intel_dp_aux_disable_backlight; 448 - panel->backlight.set = intel_dp_aux_set_backlight; 449 - panel->backlight.get = intel_dp_aux_get_backlight; 382 + panel->backlight.funcs = &intel_dp_vesa_bl_funcs; 450 383 451 384 return 0; 452 385 }
+12 -9
drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
··· 77 77 } 78 78 } 79 79 80 - static void dcs_disable_backlight(const struct drm_connector_state *conn_state) 80 + static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level) 81 81 { 82 82 struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); 83 83 struct mipi_dsi_device *dsi_device; ··· 111 111 } 112 112 113 113 static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, 114 - const struct drm_connector_state *conn_state) 114 + const struct drm_connector_state *conn_state, u32 level) 115 115 { 116 116 struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); 117 - struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; 118 117 struct mipi_dsi_device *dsi_device; 119 118 enum port port; 120 119 ··· 141 142 &cabc, sizeof(cabc)); 142 143 } 143 144 144 - dcs_set_backlight(conn_state, panel->backlight.level); 145 + dcs_set_backlight(conn_state, level); 145 146 } 146 147 147 148 static int dcs_setup_backlight(struct intel_connector *connector, ··· 154 155 155 156 return 0; 156 157 } 158 + 159 + static const struct intel_panel_bl_funcs dcs_bl_funcs = { 160 + .setup = dcs_setup_backlight, 161 + .enable = dcs_enable_backlight, 162 + .disable = dcs_disable_backlight, 163 + .set = dcs_set_backlight, 164 + .get = dcs_get_backlight, 165 + }; 157 166 158 167 int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector) 159 168 { ··· 176 169 if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI)) 177 170 return -EINVAL; 178 171 179 - panel->backlight.setup = dcs_setup_backlight; 180 - panel->backlight.enable = dcs_enable_backlight; 181 - panel->backlight.disable = dcs_disable_backlight; 182 - panel->backlight.set = dcs_set_backlight; 183 - panel->backlight.get = dcs_get_backlight; 172 + panel->backlight.funcs = &dcs_bl_funcs; 184 173 185 174 return 0; 186 175 }
-4
drivers/gpu/drm/i915/display/intel_dvo.c
··· 301 301 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 302 302 dvo_val |= DVO_VSYNC_ACTIVE_HIGH; 303 303 304 - /*I915_WRITE(DVOB_SRCDIM, 305 - (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | 306 - (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ 307 304 intel_de_write(dev_priv, dvo_srcdim_reg, 308 305 (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT)); 309 - /*I915_WRITE(DVOB, dvo_val);*/ 310 306 intel_de_write(dev_priv, dvo_reg, dvo_val); 311 307 } 312 308
+12 -7
drivers/gpu/drm/i915/display/intel_fbc.c
··· 742 742 cache->fence_id = plane_state->vma->fence->id; 743 743 else 744 744 cache->fence_id = -1; 745 + 746 + cache->psr2_active = crtc_state->has_psr2; 745 747 } 746 748 747 749 static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) ··· 913 911 if (INTEL_GEN(dev_priv) >= 11 && 914 912 (cache->plane.src_h + cache->plane.adjusted_y) % 4) { 915 913 fbc->no_fbc_reason = "plane height + offset is non-modulo of 4"; 914 + return false; 915 + } 916 + 917 + /* 918 + * Tigerlake is not supporting FBC with PSR2. 919 + * Recommendation is to keep this combination disabled 920 + * Bspec: 50422 HSD: 14010260002 921 + */ 922 + if (fbc->state_cache.psr2_active && IS_TIGERLAKE(dev_priv)) { 923 + fbc->no_fbc_reason = "not supported with PSR2"; 916 924 return false; 917 925 } 918 926 ··· 1443 1431 return !!dev_priv->params.enable_fbc; 1444 1432 1445 1433 if (!HAS_FBC(dev_priv)) 1446 - return 0; 1447 - 1448 - /* 1449 - * Fbc is causing random underruns in CI execution on TGL platforms. 1450 - * Disabling the same while the problem is being debugged and analyzed. 1451 - */ 1452 - if (IS_TIGERLAKE(dev_priv)) 1453 1434 return 0; 1454 1435 1455 1436 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
+8 -18
drivers/gpu/drm/i915/display/intel_hdmi.c
··· 518 518 VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); 519 519 } 520 520 521 - static void hsw_write_infoframe(struct intel_encoder *encoder, 522 - const struct intel_crtc_state *crtc_state, 523 - unsigned int type, 524 - const void *frame, ssize_t len) 521 + void hsw_write_infoframe(struct intel_encoder *encoder, 522 + const struct intel_crtc_state *crtc_state, 523 + unsigned int type, 524 + const void *frame, ssize_t len) 525 525 { 526 526 const u32 *data = frame; 527 527 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); ··· 555 555 intel_de_posting_read(dev_priv, ctl_reg); 556 556 } 557 557 558 - static void hsw_read_infoframe(struct intel_encoder *encoder, 559 - const struct intel_crtc_state *crtc_state, 560 - unsigned int type, 561 - void *frame, ssize_t len) 558 + void hsw_read_infoframe(struct intel_encoder *encoder, 559 + const struct intel_crtc_state *crtc_state, 560 + unsigned int type, void *frame, ssize_t len) 562 561 { 563 562 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 564 563 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; ··· 2949 2950 intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) 2950 2951 { 2951 2952 struct drm_i915_private *dev_priv = to_i915(connector->dev); 2952 - struct intel_digital_port *dig_port = 2953 - hdmi_to_dig_port(intel_hdmi); 2954 2953 2955 2954 intel_attach_force_audio_property(connector); 2956 2955 intel_attach_broadcast_rgb_property(connector); 2957 2956 intel_attach_aspect_ratio_property(connector); 2958 2957 2959 - /* 2960 - * Attach Colorspace property for Non LSPCON based device 2961 - * ToDo: This needs to be extended for LSPCON implementation 2962 - * as well. Will be implemented separately. 2963 - */ 2964 - if (!dig_port->lspcon.active) 2965 - intel_attach_colorspace_property(connector); 2966 - 2958 + intel_attach_hdmi_colorspace_property(connector); 2967 2959 drm_connector_attach_content_type_property(connector); 2968 2960 2969 2961 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+141 -25
drivers/gpu/drm/i915/display/intel_lspcon.c
··· 30 30 #include "intel_display_types.h" 31 31 #include "intel_dp.h" 32 32 #include "intel_lspcon.h" 33 + #include "intel_hdmi.h" 33 34 34 35 /* LSPCON OUI Vendor ID(signatures) */ 35 36 #define LSPCON_VENDOR_PARADE_OUI 0x001CF8 36 37 #define LSPCON_VENDOR_MCA_OUI 0x0060AD 38 + 39 + #define DPCD_MCA_LSPCON_HDR_STATUS 0x70003 40 + #define DPCD_PARADE_LSPCON_HDR_STATUS 0x00511 37 41 38 42 /* AUX addresses to write MCA AVI IF */ 39 43 #define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0 ··· 106 102 } 107 103 108 104 return true; 105 + } 106 + 107 + static u32 get_hdr_status_reg(struct intel_lspcon *lspcon) 108 + { 109 + if (lspcon->vendor == LSPCON_VENDOR_MCA) 110 + return DPCD_MCA_LSPCON_HDR_STATUS; 111 + else 112 + return DPCD_PARADE_LSPCON_HDR_STATUS; 113 + } 114 + 115 + void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon) 116 + { 117 + struct intel_digital_port *dig_port = 118 + container_of(lspcon, struct intel_digital_port, lspcon); 119 + struct drm_device *dev = dig_port->base.base.dev; 120 + struct intel_dp *dp = lspcon_to_intel_dp(lspcon); 121 + u8 hdr_caps; 122 + int ret; 123 + 124 + ret = drm_dp_dpcd_read(&dp->aux, get_hdr_status_reg(lspcon), 125 + &hdr_caps, 1); 126 + 127 + if (ret < 0) { 128 + drm_dbg_kms(dev, "HDR capability detection failed\n"); 129 + lspcon->hdr_supported = false; 130 + } else if (hdr_caps & 0x1) { 131 + drm_dbg_kms(dev, "LSPCON capable of HDR\n"); 132 + lspcon->hdr_supported = true; 133 + } 109 134 } 110 135 111 136 static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) ··· 451 418 unsigned int type, 452 419 const void *frame, ssize_t len) 453 420 { 454 - bool ret; 421 + bool ret = true; 455 422 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 456 423 struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); 457 424 458 - /* LSPCON only needs AVI IF */ 459 - if (type != HDMI_INFOFRAME_TYPE_AVI) 460 - return; 461 - 462 - if (lspcon->vendor == LSPCON_VENDOR_MCA) 463 - ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux, 464 - frame, len); 465 - else 466 - ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux, 467 - frame, len); 468 - 469 - if (!ret) { 470 - DRM_ERROR("Failed to write AVI infoframes\n"); 425 + switch (type) { 426 + case HDMI_INFOFRAME_TYPE_AVI: 427 + if (lspcon->vendor == LSPCON_VENDOR_MCA) 428 + ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux, 429 + frame, len); 430 + else 431 + ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux, 432 + frame, len); 433 + break; 434 + case HDMI_PACKET_TYPE_GAMUT_METADATA: 435 + drm_dbg_kms(encoder->base.dev, "Update HDR metadata for lspcon\n"); 436 + /* It uses the legacy hsw implementation for the same */ 437 + hsw_write_infoframe(encoder, crtc_state, type, frame, len); 438 + break; 439 + default: 471 440 return; 472 441 } 473 442 474 - DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n"); 443 + if (!ret) { 444 + DRM_ERROR("Failed to write infoframes\n"); 445 + return; 446 + } 475 447 } 476 448 477 449 void lspcon_read_infoframe(struct intel_encoder *encoder, ··· 484 446 unsigned int type, 485 447 void *frame, ssize_t len) 486 448 { 487 - /* FIXME implement this */ 449 + /* FIXME implement for AVI Infoframe as well */ 450 + if (type == HDMI_PACKET_TYPE_GAMUT_METADATA) 451 + hsw_read_infoframe(encoder, crtc_state, type, 452 + frame, len); 488 453 } 489 454 490 455 void lspcon_set_infoframes(struct intel_encoder *encoder, ··· 532 491 else 533 492 frame.avi.colorspace = HDMI_COLORSPACE_RGB; 534 493 535 - drm_hdmi_avi_infoframe_quant_range(&frame.avi, 536 - conn_state->connector, 537 - adjusted_mode, 538 - crtc_state->limited_color_range ? 539 - HDMI_QUANTIZATION_RANGE_LIMITED : 540 - HDMI_QUANTIZATION_RANGE_FULL); 494 + /* Set the Colorspace as per the HDMI spec */ 495 + drm_hdmi_avi_infoframe_colorspace(&frame.avi, conn_state); 496 + 497 + /* nonsense combination */ 498 + drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range && 499 + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); 500 + 501 + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) { 502 + drm_hdmi_avi_infoframe_quant_range(&frame.avi, 503 + conn_state->connector, 504 + adjusted_mode, 505 + crtc_state->limited_color_range ? 506 + HDMI_QUANTIZATION_RANGE_LIMITED : 507 + HDMI_QUANTIZATION_RANGE_FULL); 508 + } else { 509 + frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; 510 + frame.avi.ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED; 511 + } 512 + 513 + drm_hdmi_avi_infoframe_content_type(&frame.avi, conn_state); 541 514 542 515 ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf)); 543 516 if (ret < 0) { ··· 563 508 buf, ret); 564 509 } 565 510 511 + static bool _lspcon_read_avi_infoframe_enabled_mca(struct drm_dp_aux *aux) 512 + { 513 + int ret; 514 + u32 val = 0; 515 + u16 reg = LSPCON_MCA_AVI_IF_CTRL; 516 + 517 + ret = drm_dp_dpcd_read(aux, reg, &val, 1); 518 + if (ret < 0) { 519 + DRM_ERROR("DPCD read failed, address 0x%x\n", reg); 520 + return false; 521 + } 522 + 523 + return val & LSPCON_MCA_AVI_IF_KICKOFF; 524 + } 525 + 526 + static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux) 527 + { 528 + int ret; 529 + u32 val = 0; 530 + u16 reg = LSPCON_PARADE_AVI_IF_CTRL; 531 + 532 + ret = drm_dp_dpcd_read(aux, reg, &val, 1); 533 + if (ret < 0) { 534 + DRM_ERROR("DPCD read failed, address 0x%x\n", reg); 535 + return false; 536 + } 537 + 538 + return val & LSPCON_PARADE_AVI_IF_KICKOFF; 539 + } 540 + 566 541 u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, 567 542 const struct intel_crtc_state *pipe_config) 568 543 { 569 - /* FIXME actually read this from the hw */ 570 - return 0; 544 + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 545 + struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); 546 + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 547 + bool infoframes_enabled; 548 + u32 val = 0; 549 + u32 mask, tmp; 550 + 551 + if (lspcon->vendor == LSPCON_VENDOR_MCA) 552 + infoframes_enabled = _lspcon_read_avi_infoframe_enabled_mca(&intel_dp->aux); 553 + else 554 + infoframes_enabled = _lspcon_read_avi_infoframe_enabled_parade(&intel_dp->aux); 555 + 556 + if (infoframes_enabled) 557 + val |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); 558 + 559 + if (lspcon->hdr_supported) { 560 + tmp = intel_de_read(dev_priv, 561 + HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder)); 562 + mask = VIDEO_DIP_ENABLE_GMP_HSW; 563 + 564 + if (tmp & mask) 565 + val |= intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 566 + } 567 + 568 + return val; 571 569 } 572 570 573 571 void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon) ··· 628 520 lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON); 629 521 } 630 522 631 - static bool lspcon_init(struct intel_digital_port *dig_port) 523 + bool lspcon_init(struct intel_digital_port *dig_port) 632 524 { 633 525 struct intel_dp *dp = &dig_port->dp; 634 526 struct intel_lspcon *lspcon = &dig_port->lspcon; ··· 656 548 lspcon->active = true; 657 549 DRM_DEBUG_KMS("Success: LSPCON init\n"); 658 550 return true; 551 + } 552 + 553 + u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder, 554 + const struct intel_crtc_state *pipe_config) 555 + { 556 + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 557 + 558 + return dig_port->infoframes_enabled(encoder, pipe_config); 659 559 } 660 560 661 561 void lspcon_resume(struct intel_digital_port *dig_port)
+12
drivers/gpu/drm/i915/display/intel_lspcon.h
··· 15 15 struct intel_encoder; 16 16 struct intel_lspcon; 17 17 18 + bool lspcon_init(struct intel_digital_port *dig_port); 19 + void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon); 18 20 void lspcon_resume(struct intel_digital_port *dig_port); 19 21 void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); 20 22 void lspcon_write_infoframe(struct intel_encoder *encoder, ··· 33 31 const struct drm_connector_state *conn_state); 34 32 u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, 35 33 const struct intel_crtc_state *pipe_config); 34 + u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder, 35 + const struct intel_crtc_state *pipe_config); 36 + void hsw_write_infoframe(struct intel_encoder *encoder, 37 + const struct intel_crtc_state *crtc_state, 38 + unsigned int type, 39 + const void *frame, ssize_t len); 40 + void hsw_read_infoframe(struct intel_encoder *encoder, 41 + const struct intel_crtc_state *crtc_state, 42 + unsigned int type, 43 + void *frame, ssize_t len); 36 44 37 45 #endif /* __INTEL_LSPCON_H__ */
+133 -93
drivers/gpu/drm/i915/display/intel_panel.c
··· 589 589 BXT_BLC_PWM_DUTY(panel->backlight.controller)); 590 590 } 591 591 592 - static u32 pwm_get_backlight(struct intel_connector *connector) 592 + static u32 ext_pwm_get_backlight(struct intel_connector *connector) 593 593 { 594 594 struct intel_panel *panel = &connector->panel; 595 595 struct pwm_state state; ··· 666 666 BXT_BLC_PWM_DUTY(panel->backlight.controller), level); 667 667 } 668 668 669 - static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level) 669 + static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level) 670 670 { 671 671 struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; 672 672 ··· 684 684 drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", level); 685 685 686 686 level = intel_panel_compute_brightness(connector, level); 687 - panel->backlight.set(conn_state, level); 687 + panel->backlight.funcs->set(conn_state, level); 688 688 } 689 689 690 690 /* set backlight brightness to level in range [0..max], assuming hw min is ··· 726 726 mutex_unlock(&dev_priv->backlight_lock); 727 727 } 728 728 729 - static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state) 729 + static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) 730 730 { 731 731 struct intel_connector *connector = to_intel_connector(old_conn_state->connector); 732 732 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 733 733 u32 tmp; 734 734 735 - intel_panel_actually_set_backlight(old_conn_state, 0); 735 + intel_panel_actually_set_backlight(old_conn_state, level); 736 736 737 737 /* 738 738 * Although we don't support or enable CPU PWM with LPT/SPT based ··· 754 754 intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); 755 755 } 756 756 757 - static void pch_disable_backlight(const struct drm_connector_state *old_conn_state) 757 + static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) 758 758 { 759 759 struct intel_connector *connector = to_intel_connector(old_conn_state->connector); 760 760 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 761 761 u32 tmp; 762 762 763 - intel_panel_actually_set_backlight(old_conn_state, 0); 763 + intel_panel_actually_set_backlight(old_conn_state, val); 764 764 765 765 tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); 766 766 intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); ··· 769 769 intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); 770 770 } 771 771 772 - static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state) 772 + static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) 773 773 { 774 - intel_panel_actually_set_backlight(old_conn_state, 0); 774 + intel_panel_actually_set_backlight(old_conn_state, val); 775 775 } 776 776 777 - static void i965_disable_backlight(const struct drm_connector_state *old_conn_state) 777 + static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) 778 778 { 779 779 struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev); 780 780 u32 tmp; 781 781 782 - intel_panel_actually_set_backlight(old_conn_state, 0); 782 + intel_panel_actually_set_backlight(old_conn_state, val); 783 783 784 784 tmp = intel_de_read(dev_priv, BLC_PWM_CTL2); 785 785 intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE); 786 786 } 787 787 788 - static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state) 788 + static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) 789 789 { 790 790 struct intel_connector *connector = to_intel_connector(old_conn_state->connector); 791 791 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 792 792 enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe; 793 793 u32 tmp; 794 794 795 - intel_panel_actually_set_backlight(old_conn_state, 0); 795 + intel_panel_actually_set_backlight(old_conn_state, val); 796 796 797 797 tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); 798 798 intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), 799 799 tmp & ~BLM_PWM_ENABLE); 800 800 } 801 801 802 - static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state) 802 + static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) 803 803 { 804 804 struct intel_connector *connector = to_intel_connector(old_conn_state->connector); 805 805 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 806 806 struct intel_panel *panel = &connector->panel; 807 - u32 tmp, val; 807 + u32 tmp; 808 808 809 - intel_panel_actually_set_backlight(old_conn_state, 0); 809 + intel_panel_actually_set_backlight(old_conn_state, val); 810 810 811 811 tmp = intel_de_read(dev_priv, 812 812 BXT_BLC_PWM_CTL(panel->backlight.controller)); ··· 820 820 } 821 821 } 822 822 823 - static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state) 823 + static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) 824 824 { 825 825 struct intel_connector *connector = to_intel_connector(old_conn_state->connector); 826 826 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 827 827 struct intel_panel *panel = &connector->panel; 828 828 u32 tmp; 829 829 830 - intel_panel_actually_set_backlight(old_conn_state, 0); 830 + intel_panel_actually_set_backlight(old_conn_state, val); 831 831 832 832 tmp = intel_de_read(dev_priv, 833 833 BXT_BLC_PWM_CTL(panel->backlight.controller)); ··· 835 835 tmp & ~BXT_BLC_PWM_ENABLE); 836 836 } 837 837 838 - static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state) 838 + static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) 839 839 { 840 840 struct intel_connector *connector = to_intel_connector(old_conn_state->connector); 841 841 struct intel_panel *panel = &connector->panel; ··· 870 870 if (panel->backlight.device) 871 871 panel->backlight.device->props.power = FB_BLANK_POWERDOWN; 872 872 panel->backlight.enabled = false; 873 - panel->backlight.disable(old_conn_state); 873 + panel->backlight.funcs->disable(old_conn_state, 0); 874 874 875 875 mutex_unlock(&dev_priv->backlight_lock); 876 876 } 877 877 878 878 static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state, 879 - const struct drm_connector_state *conn_state) 879 + const struct drm_connector_state *conn_state, u32 level) 880 880 { 881 881 struct intel_connector *connector = to_intel_connector(conn_state->connector); 882 882 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); ··· 923 923 pch_ctl1 | BLM_PCH_PWM_ENABLE); 924 924 925 925 /* This won't stick until the above enable. */ 926 - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); 926 + intel_panel_actually_set_backlight(conn_state, level); 927 927 } 928 928 929 929 static void pch_enable_backlight(const struct intel_crtc_state *crtc_state, 930 - const struct drm_connector_state *conn_state) 930 + const struct drm_connector_state *conn_state, u32 level) 931 931 { 932 932 struct intel_connector *connector = to_intel_connector(conn_state->connector); 933 933 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); ··· 958 958 intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE); 959 959 960 960 /* This won't stick until the above enable. */ 961 - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); 961 + intel_panel_actually_set_backlight(conn_state, level); 962 962 963 963 pch_ctl2 = panel->backlight.max << 16; 964 964 intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2); ··· 974 974 } 975 975 976 976 static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, 977 - const struct drm_connector_state *conn_state) 977 + const struct drm_connector_state *conn_state, u32 level) 978 978 { 979 979 struct intel_connector *connector = to_intel_connector(conn_state->connector); 980 980 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); ··· 1001 1001 intel_de_posting_read(dev_priv, BLC_PWM_CTL); 1002 1002 1003 1003 /* XXX: combine this into above write? */ 1004 - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); 1004 + intel_panel_actually_set_backlight(conn_state, level); 1005 1005 1006 1006 /* 1007 1007 * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is ··· 1013 1013 } 1014 1014 1015 1015 static void i965_enable_backlight(const struct intel_crtc_state *crtc_state, 1016 - const struct drm_connector_state *conn_state) 1016 + const struct drm_connector_state *conn_state, u32 level) 1017 1017 { 1018 1018 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1019 1019 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); ··· 1044 1044 intel_de_posting_read(dev_priv, BLC_PWM_CTL2); 1045 1045 intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE); 1046 1046 1047 - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); 1047 + intel_panel_actually_set_backlight(conn_state, level); 1048 1048 } 1049 1049 1050 1050 static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state, 1051 - const struct drm_connector_state *conn_state) 1051 + const struct drm_connector_state *conn_state, u32 level) 1052 1052 { 1053 1053 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1054 1054 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); ··· 1067 1067 intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl); 1068 1068 1069 1069 /* XXX: combine this into above write? */ 1070 - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); 1070 + intel_panel_actually_set_backlight(conn_state, level); 1071 1071 1072 1072 ctl2 = 0; 1073 1073 if (panel->backlight.active_low_pwm) ··· 1079 1079 } 1080 1080 1081 1081 static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, 1082 - const struct drm_connector_state *conn_state) 1082 + const struct drm_connector_state *conn_state, u32 level) 1083 1083 { 1084 1084 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1085 1085 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); ··· 1118 1118 BXT_BLC_PWM_FREQ(panel->backlight.controller), 1119 1119 panel->backlight.max); 1120 1120 1121 - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); 1121 + intel_panel_actually_set_backlight(conn_state, level); 1122 1122 1123 1123 pwm_ctl = 0; 1124 1124 if (panel->backlight.active_low_pwm) ··· 1133 1133 } 1134 1134 1135 1135 static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state, 1136 - const struct drm_connector_state *conn_state) 1136 + const struct drm_connector_state *conn_state, u32 level) 1137 1137 { 1138 1138 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1139 1139 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); ··· 1154 1154 BXT_BLC_PWM_FREQ(panel->backlight.controller), 1155 1155 panel->backlight.max); 1156 1156 1157 - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); 1157 + intel_panel_actually_set_backlight(conn_state, level); 1158 1158 1159 1159 pwm_ctl = 0; 1160 1160 if (panel->backlight.active_low_pwm) ··· 1168 1168 pwm_ctl | BXT_BLC_PWM_ENABLE); 1169 1169 } 1170 1170 1171 - static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state, 1172 - const struct drm_connector_state *conn_state) 1171 + static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state, 1172 + const struct drm_connector_state *conn_state, u32 level) 1173 1173 { 1174 1174 struct intel_connector *connector = to_intel_connector(conn_state->connector); 1175 1175 struct intel_panel *panel = &connector->panel; 1176 - int level = panel->backlight.level; 1177 1176 1178 1177 level = intel_panel_compute_brightness(connector, level); 1179 1178 pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); ··· 1197 1198 panel->backlight.device->props.max_brightness); 1198 1199 } 1199 1200 1200 - panel->backlight.enable(crtc_state, conn_state); 1201 + panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level); 1201 1202 panel->backlight.enabled = true; 1202 1203 if (panel->backlight.device) 1203 1204 panel->backlight.device->props.power = FB_BLANK_UNBLANK; ··· 1233 1234 mutex_lock(&dev_priv->backlight_lock); 1234 1235 1235 1236 if (panel->backlight.enabled) { 1236 - val = panel->backlight.get(connector); 1237 + val = panel->backlight.funcs->get(connector); 1237 1238 val = intel_panel_compute_brightness(connector, val); 1238 1239 } 1239 1240 ··· 1566 1567 u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv); 1567 1568 u32 pwm; 1568 1569 1569 - if (!panel->backlight.hz_to_pwm) { 1570 + if (!panel->backlight.funcs->hz_to_pwm) { 1570 1571 drm_dbg_kms(&dev_priv->drm, 1571 1572 "backlight frequency conversion not supported\n"); 1572 1573 return 0; 1573 1574 } 1574 1575 1575 - pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz); 1576 + pwm = panel->backlight.funcs->hz_to_pwm(connector, pwm_freq_hz); 1576 1577 if (!pwm) { 1577 1578 drm_dbg_kms(&dev_priv->drm, 1578 1579 "backlight frequency conversion failed\n"); ··· 1889 1890 return 0; 1890 1891 } 1891 1892 1892 - static int pwm_setup_backlight(struct intel_connector *connector, 1893 - enum pipe pipe) 1893 + static int ext_pwm_setup_backlight(struct intel_connector *connector, 1894 + enum pipe pipe) 1894 1895 { 1895 1896 struct drm_device *dev = connector->base.dev; 1896 1897 struct drm_i915_private *dev_priv = to_i915(dev); ··· 1980 1981 } 1981 1982 1982 1983 /* ensure intel_panel has been initialized first */ 1983 - if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.setup)) 1984 + if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs)) 1984 1985 return -ENODEV; 1985 1986 1986 1987 /* set level and max in panel struct */ 1987 1988 mutex_lock(&dev_priv->backlight_lock); 1988 - ret = panel->backlight.setup(intel_connector, pipe); 1989 + ret = panel->backlight.funcs->setup(intel_connector, pipe); 1989 1990 mutex_unlock(&dev_priv->backlight_lock); 1990 1991 1991 1992 if (ret) { ··· 2015 2016 panel->backlight.present = false; 2016 2017 } 2017 2018 2019 + static const struct intel_panel_bl_funcs bxt_funcs = { 2020 + .setup = bxt_setup_backlight, 2021 + .enable = bxt_enable_backlight, 2022 + .disable = bxt_disable_backlight, 2023 + .set = bxt_set_backlight, 2024 + .get = bxt_get_backlight, 2025 + .hz_to_pwm = bxt_hz_to_pwm, 2026 + }; 2027 + 2028 + static const struct intel_panel_bl_funcs cnp_funcs = { 2029 + .setup = cnp_setup_backlight, 2030 + .enable = cnp_enable_backlight, 2031 + .disable = cnp_disable_backlight, 2032 + .set = bxt_set_backlight, 2033 + .get = bxt_get_backlight, 2034 + .hz_to_pwm = cnp_hz_to_pwm, 2035 + }; 2036 + 2037 + static const struct intel_panel_bl_funcs lpt_funcs = { 2038 + .setup = lpt_setup_backlight, 2039 + .enable = lpt_enable_backlight, 2040 + .disable = lpt_disable_backlight, 2041 + .set = lpt_set_backlight, 2042 + .get = lpt_get_backlight, 2043 + .hz_to_pwm = lpt_hz_to_pwm, 2044 + }; 2045 + 2046 + static const struct intel_panel_bl_funcs spt_funcs = { 2047 + .setup = lpt_setup_backlight, 2048 + .enable = lpt_enable_backlight, 2049 + .disable = lpt_disable_backlight, 2050 + .set = lpt_set_backlight, 2051 + .get = lpt_get_backlight, 2052 + .hz_to_pwm = spt_hz_to_pwm, 2053 + }; 2054 + 2055 + static const struct intel_panel_bl_funcs pch_funcs = { 2056 + .setup = pch_setup_backlight, 2057 + .enable = pch_enable_backlight, 2058 + .disable = pch_disable_backlight, 2059 + .set = pch_set_backlight, 2060 + .get = pch_get_backlight, 2061 + .hz_to_pwm = pch_hz_to_pwm, 2062 + }; 2063 + 2064 + static const struct intel_panel_bl_funcs ext_pwm_funcs = { 2065 + .setup = ext_pwm_setup_backlight, 2066 + .enable = ext_pwm_enable_backlight, 2067 + .disable = ext_pwm_disable_backlight, 2068 + .set = ext_pwm_set_backlight, 2069 + .get = ext_pwm_get_backlight, 2070 + }; 2071 + 2072 + static const struct intel_panel_bl_funcs vlv_funcs = { 2073 + .setup = vlv_setup_backlight, 2074 + .enable = vlv_enable_backlight, 2075 + .disable = vlv_disable_backlight, 2076 + .set = vlv_set_backlight, 2077 + .get = vlv_get_backlight, 2078 + .hz_to_pwm = vlv_hz_to_pwm, 2079 + }; 2080 + 2081 + static const struct intel_panel_bl_funcs i965_funcs = { 2082 + .setup = i965_setup_backlight, 2083 + .enable = i965_enable_backlight, 2084 + .disable = i965_disable_backlight, 2085 + .set = i9xx_set_backlight, 2086 + .get = i9xx_get_backlight, 2087 + .hz_to_pwm = i965_hz_to_pwm, 2088 + }; 2089 + 2090 + static const struct intel_panel_bl_funcs i9xx_funcs = { 2091 + .setup = i9xx_setup_backlight, 2092 + .enable = i9xx_enable_backlight, 2093 + .disable = i9xx_disable_backlight, 2094 + .set = i9xx_set_backlight, 2095 + .get = i9xx_get_backlight, 2096 + .hz_to_pwm = i9xx_hz_to_pwm, 2097 + }; 2098 + 2018 2099 /* Set up chip specific backlight functions */ 2019 2100 static void 2020 2101 intel_panel_init_backlight_funcs(struct intel_panel *panel) ··· 2112 2033 return; 2113 2034 2114 2035 if (IS_GEN9_LP(dev_priv)) { 2115 - panel->backlight.setup = bxt_setup_backlight; 2116 - panel->backlight.enable = bxt_enable_backlight; 2117 - panel->backlight.disable = bxt_disable_backlight; 2118 - panel->backlight.set = bxt_set_backlight; 2119 - panel->backlight.get = bxt_get_backlight; 2120 - panel->backlight.hz_to_pwm = bxt_hz_to_pwm; 2036 + panel->backlight.funcs = &bxt_funcs; 2121 2037 } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) { 2122 - panel->backlight.setup = cnp_setup_backlight; 2123 - panel->backlight.enable = cnp_enable_backlight; 2124 - panel->backlight.disable = cnp_disable_backlight; 2125 - panel->backlight.set = bxt_set_backlight; 2126 - panel->backlight.get = bxt_get_backlight; 2127 - panel->backlight.hz_to_pwm = cnp_hz_to_pwm; 2038 + panel->backlight.funcs = &cnp_funcs; 2128 2039 } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) { 2129 - panel->backlight.setup = lpt_setup_backlight; 2130 - panel->backlight.enable = lpt_enable_backlight; 2131 - panel->backlight.disable = lpt_disable_backlight; 2132 - panel->backlight.set = lpt_set_backlight; 2133 - panel->backlight.get = lpt_get_backlight; 2134 2040 if (HAS_PCH_LPT(dev_priv)) 2135 - panel->backlight.hz_to_pwm = lpt_hz_to_pwm; 2041 + panel->backlight.funcs = &lpt_funcs; 2136 2042 else 2137 - panel->backlight.hz_to_pwm = spt_hz_to_pwm; 2043 + panel->backlight.funcs = &spt_funcs; 2138 2044 } else if (HAS_PCH_SPLIT(dev_priv)) { 2139 - panel->backlight.setup = pch_setup_backlight; 2140 - panel->backlight.enable = pch_enable_backlight; 2141 - panel->backlight.disable = pch_disable_backlight; 2142 - panel->backlight.set = pch_set_backlight; 2143 - panel->backlight.get = pch_get_backlight; 2144 - panel->backlight.hz_to_pwm = pch_hz_to_pwm; 2045 + panel->backlight.funcs = &pch_funcs; 2145 2046 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 2146 2047 if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) { 2147 - panel->backlight.setup = pwm_setup_backlight; 2148 - panel->backlight.enable = pwm_enable_backlight; 2149 - panel->backlight.disable = pwm_disable_backlight; 2150 - panel->backlight.set = pwm_set_backlight; 2151 - panel->backlight.get = pwm_get_backlight; 2048 + panel->backlight.funcs = &ext_pwm_funcs; 2152 2049 } else { 2153 - panel->backlight.setup = vlv_setup_backlight; 2154 - panel->backlight.enable = vlv_enable_backlight; 2155 - panel->backlight.disable = vlv_disable_backlight; 2156 - panel->backlight.set = vlv_set_backlight; 2157 - panel->backlight.get = vlv_get_backlight; 2158 - panel->backlight.hz_to_pwm = vlv_hz_to_pwm; 2050 + panel->backlight.funcs = &vlv_funcs; 2159 2051 } 2160 2052 } else if (IS_GEN(dev_priv, 4)) { 2161 - panel->backlight.setup = i965_setup_backlight; 2162 - panel->backlight.enable = i965_enable_backlight; 2163 - panel->backlight.disable = i965_disable_backlight; 2164 - panel->backlight.set = i9xx_set_backlight; 2165 - panel->backlight.get = i9xx_get_backlight; 2166 - panel->backlight.hz_to_pwm = i965_hz_to_pwm; 2053 + panel->backlight.funcs = &i965_funcs; 2167 2054 } else { 2168 - panel->backlight.setup = i9xx_setup_backlight; 2169 - panel->backlight.enable = i9xx_enable_backlight; 2170 - panel->backlight.disable = i9xx_disable_backlight; 2171 - panel->backlight.set = i9xx_set_backlight; 2172 - panel->backlight.get = i9xx_get_backlight; 2173 - panel->backlight.hz_to_pwm = i9xx_hz_to_pwm; 2055 + panel->backlight.funcs = &i9xx_funcs; 2174 2056 } 2175 2057 } 2176 2058
+16 -6
drivers/gpu/drm/i915/display/intel_psr.c
··· 1185 1185 { 1186 1186 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1187 1187 enum pipe pipe = plane->pipe; 1188 + const struct drm_rect *clip; 1188 1189 u32 val; 1189 1190 1190 1191 if (!crtc_state->enable_psr2_sel_fetch) ··· 1197 1196 if (!val || plane->id == PLANE_CURSOR) 1198 1197 return; 1199 1198 1200 - val = plane_state->uapi.dst.y1 << 16 | plane_state->uapi.dst.x1; 1199 + clip = &plane_state->psr2_sel_fetch_area; 1200 + 1201 + val = (clip->y1 + plane_state->uapi.dst.y1) << 16; 1202 + val |= plane_state->uapi.dst.x1; 1201 1203 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val); 1202 1204 1203 - val = plane_state->color_plane[color_plane].y << 16; 1205 + /* TODO: consider tiling and auxiliary surfaces */ 1206 + val = (clip->y1 + plane_state->color_plane[color_plane].y) << 16; 1204 1207 val |= plane_state->color_plane[color_plane].x; 1205 1208 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id), 1206 1209 val); 1207 1210 1208 1211 /* Sizes are 0 based */ 1209 - val = ((drm_rect_height(&plane_state->uapi.src) >> 16) - 1) << 16; 1212 + val = (drm_rect_height(clip) - 1) << 16; 1210 1213 val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; 1211 1214 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); 1212 1215 } ··· 1284 1279 1285 1280 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 1286 1281 new_plane_state, i) { 1287 - struct drm_rect temp; 1282 + struct drm_rect *sel_fetch_area, temp; 1288 1283 1289 1284 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc) 1290 1285 continue; ··· 1307 1302 * For now doing a selective fetch in the whole plane area, 1308 1303 * optimizations will come in the future. 1309 1304 */ 1310 - temp.y1 = new_plane_state->uapi.dst.y1; 1311 - temp.y2 = new_plane_state->uapi.dst.y2; 1305 + sel_fetch_area = &new_plane_state->psr2_sel_fetch_area; 1306 + sel_fetch_area->y1 = new_plane_state->uapi.src.y1 >> 16; 1307 + sel_fetch_area->y2 = new_plane_state->uapi.src.y2 >> 16; 1308 + 1309 + temp = *sel_fetch_area; 1310 + temp.y1 += new_plane_state->uapi.dst.y1; 1311 + temp.y2 += new_plane_state->uapi.dst.y2; 1312 1312 clip_area_update(&pipe_clip, &temp); 1313 1313 } 1314 1314
+33 -17
drivers/gpu/drm/i915/display/intel_sprite.c
··· 49 49 #include "intel_psr.h" 50 50 #include "intel_dsi.h" 51 51 #include "intel_sprite.h" 52 + #include "i9xx_plane.h" 52 53 53 54 int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 54 55 int usecs) ··· 61 60 return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock, 62 61 1000 * adjusted_mode->crtc_htotal); 63 62 } 64 - 65 - /* FIXME: We should instead only take spinlocks once for the entire update 66 - * instead of once per mmio. */ 67 - #if IS_ENABLED(CONFIG_PROVE_LOCKING) 68 - #define VBLANK_EVASION_TIME_US 250 69 - #else 70 - #define VBLANK_EVASION_TIME_US 100 71 - #endif 72 63 73 64 /** 74 65 * intel_pipe_update_start() - start update of a set of display registers ··· 180 187 local_irq_disable(); 181 188 } 182 189 190 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) 191 + static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) 192 + { 193 + u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time)); 194 + unsigned int h; 195 + 196 + h = ilog2(delta >> 9); 197 + if (h >= ARRAY_SIZE(crtc->debug.vbl.times)) 198 + h = ARRAY_SIZE(crtc->debug.vbl.times) - 1; 199 + crtc->debug.vbl.times[h]++; 200 + 201 + crtc->debug.vbl.sum += delta; 202 + if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min) 203 + crtc->debug.vbl.min = delta; 204 + if (delta > crtc->debug.vbl.max) 205 + crtc->debug.vbl.max = delta; 206 + 207 + if (delta > 1000 * VBLANK_EVASION_TIME_US) { 208 + drm_dbg_kms(crtc->base.dev, 209 + "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n", 210 + pipe_name(crtc->pipe), 211 + div_u64(delta, 1000), 212 + VBLANK_EVASION_TIME_US); 213 + crtc->debug.vbl.over++; 214 + } 215 + } 216 + #else 217 + static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {} 218 + #endif 219 + 183 220 /** 184 221 * intel_pipe_update_end() - end update of a set of display registers 185 222 * @new_crtc_state: the new crtc state ··· 272 249 crtc->debug.min_vbl, crtc->debug.max_vbl, 273 250 crtc->debug.scanline_start, scanline_end); 274 251 } 275 - #ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE 276 - else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) > 277 - VBLANK_EVASION_TIME_US) 278 - drm_warn(&dev_priv->drm, 279 - "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n", 280 - pipe_name(pipe), 281 - ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time), 282 - VBLANK_EVASION_TIME_US); 283 - #endif 252 + 253 + dbg_vblank_evade(crtc, end_vbl_time); 284 254 } 285 255 286 256 int intel_plane_check_stride(const struct intel_plane_state *plane_state)
+10
drivers/gpu/drm/i915/display/intel_sprite.h
··· 17 17 struct intel_crtc_state; 18 18 struct intel_plane_state; 19 19 20 + /* 21 + * FIXME: We should instead only take spinlocks once for the entire update 22 + * instead of once per mmio. 23 + */ 24 + #if IS_ENABLED(CONFIG_PROVE_LOCKING) 25 + #define VBLANK_EVASION_TIME_US 250 26 + #else 27 + #define VBLANK_EVASION_TIME_US 100 28 + #endif 29 + 20 30 int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 21 31 int usecs); 22 32 struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+1 -1
drivers/gpu/drm/i915/display/intel_tc.c
··· 262 262 mask |= BIT(TC_PORT_LEGACY); 263 263 264 264 /* The sink can be connected only in a single mode. */ 265 - if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1)) 265 + if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1)) 266 266 tc_port_fixup_legacy_flag(dig_port, mask); 267 267 268 268 return mask;
+1 -3
drivers/gpu/drm/i915/display/intel_vdsc.c
··· 454 454 else if (vdsc_cfg->bits_per_component == 12) 455 455 vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC; 456 456 457 - /* RC_MODEL_SIZE is a constant across all configurations */ 458 - vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 459 457 /* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */ 460 458 vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) / 461 459 (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset); ··· 739 741 740 742 /* Populate PICTURE_PARAMETER_SET_9 registers */ 741 743 pps_val = 0; 742 - pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) | 744 + pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) | 743 745 DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST); 744 746 drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val); 745 747 if (!is_pipe_dsc(crtc_state)) {
+1 -1
drivers/gpu/drm/i915/gvt/handlers.c
··· 3693 3693 struct drm_i915_private *dev_priv = gvt->gt->i915; 3694 3694 3695 3695 if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE) 3696 - I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); 3696 + intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset)); 3697 3697 3698 3698 return 0; 3699 3699 }
+29 -551
drivers/gpu/drm/i915/i915_debugfs.c
··· 378 378 return 0; 379 379 } 380 380 381 - static void gen8_display_interrupt_info(struct seq_file *m) 382 - { 383 - struct drm_i915_private *dev_priv = node_to_i915(m->private); 384 - enum pipe pipe; 385 - 386 - for_each_pipe(dev_priv, pipe) { 387 - enum intel_display_power_domain power_domain; 388 - intel_wakeref_t wakeref; 389 - 390 - power_domain = POWER_DOMAIN_PIPE(pipe); 391 - wakeref = intel_display_power_get_if_enabled(dev_priv, 392 - power_domain); 393 - if (!wakeref) { 394 - seq_printf(m, "Pipe %c power disabled\n", 395 - pipe_name(pipe)); 396 - continue; 397 - } 398 - seq_printf(m, "Pipe %c IMR:\t%08x\n", 399 - pipe_name(pipe), 400 - I915_READ(GEN8_DE_PIPE_IMR(pipe))); 401 - seq_printf(m, "Pipe %c IIR:\t%08x\n", 402 - pipe_name(pipe), 403 - I915_READ(GEN8_DE_PIPE_IIR(pipe))); 404 - seq_printf(m, "Pipe %c IER:\t%08x\n", 405 - pipe_name(pipe), 406 - I915_READ(GEN8_DE_PIPE_IER(pipe))); 407 - 408 - intel_display_power_put(dev_priv, power_domain, wakeref); 409 - } 410 - 411 - seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 412 - I915_READ(GEN8_DE_PORT_IMR)); 413 - seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 414 - I915_READ(GEN8_DE_PORT_IIR)); 415 - seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 416 - I915_READ(GEN8_DE_PORT_IER)); 417 - 418 - seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 419 - I915_READ(GEN8_DE_MISC_IMR)); 420 - seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 421 - I915_READ(GEN8_DE_MISC_IIR)); 422 - seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 423 - I915_READ(GEN8_DE_MISC_IER)); 424 - 425 - seq_printf(m, "PCU interrupt mask:\t%08x\n", 426 - I915_READ(GEN8_PCU_IMR)); 427 - seq_printf(m, "PCU interrupt identity:\t%08x\n", 428 - I915_READ(GEN8_PCU_IIR)); 429 - seq_printf(m, "PCU interrupt enable:\t%08x\n", 430 - I915_READ(GEN8_PCU_IER)); 431 - } 432 - 433 - static int i915_interrupt_info(struct seq_file *m, void *data) 434 - { 435 - struct drm_i915_private *dev_priv = node_to_i915(m->private); 436 - struct intel_engine_cs *engine; 437 - intel_wakeref_t wakeref; 438 - int i, pipe; 439 - 440 - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 441 - 442 - if (IS_CHERRYVIEW(dev_priv)) { 443 - intel_wakeref_t pref; 444 - 445 - seq_printf(m, "Master Interrupt Control:\t%08x\n", 446 - I915_READ(GEN8_MASTER_IRQ)); 447 - 448 - seq_printf(m, "Display IER:\t%08x\n", 449 - I915_READ(VLV_IER)); 450 - seq_printf(m, "Display IIR:\t%08x\n", 451 - I915_READ(VLV_IIR)); 452 - seq_printf(m, "Display IIR_RW:\t%08x\n", 453 - I915_READ(VLV_IIR_RW)); 454 - seq_printf(m, "Display IMR:\t%08x\n", 455 - I915_READ(VLV_IMR)); 456 - for_each_pipe(dev_priv, pipe) { 457 - enum intel_display_power_domain power_domain; 458 - 459 - power_domain = POWER_DOMAIN_PIPE(pipe); 460 - pref = intel_display_power_get_if_enabled(dev_priv, 461 - power_domain); 462 - if (!pref) { 463 - seq_printf(m, "Pipe %c power disabled\n", 464 - pipe_name(pipe)); 465 - continue; 466 - } 467 - 468 - seq_printf(m, "Pipe %c stat:\t%08x\n", 469 - pipe_name(pipe), 470 - I915_READ(PIPESTAT(pipe))); 471 - 472 - intel_display_power_put(dev_priv, power_domain, pref); 473 - } 474 - 475 - pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 476 - seq_printf(m, "Port hotplug:\t%08x\n", 477 - I915_READ(PORT_HOTPLUG_EN)); 478 - seq_printf(m, "DPFLIPSTAT:\t%08x\n", 479 - I915_READ(VLV_DPFLIPSTAT)); 480 - seq_printf(m, "DPINVGTT:\t%08x\n", 481 - I915_READ(DPINVGTT)); 482 - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref); 483 - 484 - for (i = 0; i < 4; i++) { 485 - seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 486 - i, I915_READ(GEN8_GT_IMR(i))); 487 - seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 488 - i, I915_READ(GEN8_GT_IIR(i))); 489 - seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 490 - i, I915_READ(GEN8_GT_IER(i))); 491 - } 492 - 493 - seq_printf(m, "PCU interrupt mask:\t%08x\n", 494 - I915_READ(GEN8_PCU_IMR)); 495 - seq_printf(m, "PCU interrupt identity:\t%08x\n", 496 - I915_READ(GEN8_PCU_IIR)); 497 - seq_printf(m, "PCU interrupt enable:\t%08x\n", 498 - I915_READ(GEN8_PCU_IER)); 499 - } else if (INTEL_GEN(dev_priv) >= 11) { 500 - if (HAS_MASTER_UNIT_IRQ(dev_priv)) 501 - seq_printf(m, "Master Unit Interrupt Control: %08x\n", 502 - I915_READ(DG1_MSTR_UNIT_INTR)); 503 - 504 - seq_printf(m, "Master Interrupt Control: %08x\n", 505 - I915_READ(GEN11_GFX_MSTR_IRQ)); 506 - 507 - seq_printf(m, "Render/Copy Intr Enable: %08x\n", 508 - I915_READ(GEN11_RENDER_COPY_INTR_ENABLE)); 509 - seq_printf(m, "VCS/VECS Intr Enable: %08x\n", 510 - I915_READ(GEN11_VCS_VECS_INTR_ENABLE)); 511 - seq_printf(m, "GUC/SG Intr Enable:\t %08x\n", 512 - I915_READ(GEN11_GUC_SG_INTR_ENABLE)); 513 - seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n", 514 - I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE)); 515 - seq_printf(m, "Crypto Intr Enable:\t %08x\n", 516 - I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE)); 517 - seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n", 518 - I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE)); 519 - 520 - seq_printf(m, "Display Interrupt Control:\t%08x\n", 521 - I915_READ(GEN11_DISPLAY_INT_CTL)); 522 - 523 - gen8_display_interrupt_info(m); 524 - } else if (INTEL_GEN(dev_priv) >= 8) { 525 - seq_printf(m, "Master Interrupt Control:\t%08x\n", 526 - I915_READ(GEN8_MASTER_IRQ)); 527 - 528 - for (i = 0; i < 4; i++) { 529 - seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 530 - i, I915_READ(GEN8_GT_IMR(i))); 531 - seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 532 - i, I915_READ(GEN8_GT_IIR(i))); 533 - seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 534 - i, I915_READ(GEN8_GT_IER(i))); 535 - } 536 - 537 - gen8_display_interrupt_info(m); 538 - } else if (IS_VALLEYVIEW(dev_priv)) { 539 - intel_wakeref_t pref; 540 - 541 - seq_printf(m, "Display IER:\t%08x\n", 542 - I915_READ(VLV_IER)); 543 - seq_printf(m, "Display IIR:\t%08x\n", 544 - I915_READ(VLV_IIR)); 545 - seq_printf(m, "Display IIR_RW:\t%08x\n", 546 - I915_READ(VLV_IIR_RW)); 547 - seq_printf(m, "Display IMR:\t%08x\n", 548 - I915_READ(VLV_IMR)); 549 - for_each_pipe(dev_priv, pipe) { 550 - enum intel_display_power_domain power_domain; 551 - 552 - power_domain = POWER_DOMAIN_PIPE(pipe); 553 - pref = intel_display_power_get_if_enabled(dev_priv, 554 - power_domain); 555 - if (!pref) { 556 - seq_printf(m, "Pipe %c power disabled\n", 557 - pipe_name(pipe)); 558 - continue; 559 - } 560 - 561 - seq_printf(m, "Pipe %c stat:\t%08x\n", 562 - pipe_name(pipe), 563 - I915_READ(PIPESTAT(pipe))); 564 - intel_display_power_put(dev_priv, power_domain, pref); 565 - } 566 - 567 - seq_printf(m, "Master IER:\t%08x\n", 568 - I915_READ(VLV_MASTER_IER)); 569 - 570 - seq_printf(m, "Render IER:\t%08x\n", 571 - I915_READ(GTIER)); 572 - seq_printf(m, "Render IIR:\t%08x\n", 573 - I915_READ(GTIIR)); 574 - seq_printf(m, "Render IMR:\t%08x\n", 575 - I915_READ(GTIMR)); 576 - 577 - seq_printf(m, "PM IER:\t\t%08x\n", 578 - I915_READ(GEN6_PMIER)); 579 - seq_printf(m, "PM IIR:\t\t%08x\n", 580 - I915_READ(GEN6_PMIIR)); 581 - seq_printf(m, "PM IMR:\t\t%08x\n", 582 - I915_READ(GEN6_PMIMR)); 583 - 584 - pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 585 - seq_printf(m, "Port hotplug:\t%08x\n", 586 - I915_READ(PORT_HOTPLUG_EN)); 587 - seq_printf(m, "DPFLIPSTAT:\t%08x\n", 588 - I915_READ(VLV_DPFLIPSTAT)); 589 - seq_printf(m, "DPINVGTT:\t%08x\n", 590 - I915_READ(DPINVGTT)); 591 - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref); 592 - 593 - } else if (!HAS_PCH_SPLIT(dev_priv)) { 594 - seq_printf(m, "Interrupt enable: %08x\n", 595 - I915_READ(GEN2_IER)); 596 - seq_printf(m, "Interrupt identity: %08x\n", 597 - I915_READ(GEN2_IIR)); 598 - seq_printf(m, "Interrupt mask: %08x\n", 599 - I915_READ(GEN2_IMR)); 600 - for_each_pipe(dev_priv, pipe) 601 - seq_printf(m, "Pipe %c stat: %08x\n", 602 - pipe_name(pipe), 603 - I915_READ(PIPESTAT(pipe))); 604 - } else { 605 - seq_printf(m, "North Display Interrupt enable: %08x\n", 606 - I915_READ(DEIER)); 607 - seq_printf(m, "North Display Interrupt identity: %08x\n", 608 - I915_READ(DEIIR)); 609 - seq_printf(m, "North Display Interrupt mask: %08x\n", 610 - I915_READ(DEIMR)); 611 - seq_printf(m, "South Display Interrupt enable: %08x\n", 612 - I915_READ(SDEIER)); 613 - seq_printf(m, "South Display Interrupt identity: %08x\n", 614 - I915_READ(SDEIIR)); 615 - seq_printf(m, "South Display Interrupt mask: %08x\n", 616 - I915_READ(SDEIMR)); 617 - seq_printf(m, "Graphics Interrupt enable: %08x\n", 618 - I915_READ(GTIER)); 619 - seq_printf(m, "Graphics Interrupt identity: %08x\n", 620 - I915_READ(GTIIR)); 621 - seq_printf(m, "Graphics Interrupt mask: %08x\n", 622 - I915_READ(GTIMR)); 623 - } 624 - 625 - if (INTEL_GEN(dev_priv) >= 11) { 626 - seq_printf(m, "RCS Intr Mask:\t %08x\n", 627 - I915_READ(GEN11_RCS0_RSVD_INTR_MASK)); 628 - seq_printf(m, "BCS Intr Mask:\t %08x\n", 629 - I915_READ(GEN11_BCS_RSVD_INTR_MASK)); 630 - seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n", 631 - I915_READ(GEN11_VCS0_VCS1_INTR_MASK)); 632 - seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n", 633 - I915_READ(GEN11_VCS2_VCS3_INTR_MASK)); 634 - seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n", 635 - I915_READ(GEN11_VECS0_VECS1_INTR_MASK)); 636 - seq_printf(m, "GUC/SG Intr Mask:\t %08x\n", 637 - I915_READ(GEN11_GUC_SG_INTR_MASK)); 638 - seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n", 639 - I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK)); 640 - seq_printf(m, "Crypto Intr Mask:\t %08x\n", 641 - I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK)); 642 - seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n", 643 - I915_READ(GEN11_GUNIT_CSME_INTR_MASK)); 644 - 645 - } else if (INTEL_GEN(dev_priv) >= 6) { 646 - for_each_uabi_engine(engine, dev_priv) { 647 - seq_printf(m, 648 - "Graphics Interrupt mask (%s): %08x\n", 649 - engine->name, ENGINE_READ(engine, RING_IMR)); 650 - } 651 - } 652 - 653 - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 654 - 655 - return 0; 656 - } 657 - 658 - static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 659 - { 660 - struct drm_i915_private *i915 = node_to_i915(m->private); 661 - unsigned int i; 662 - 663 - seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences); 664 - 665 - rcu_read_lock(); 666 - for (i = 0; i < i915->ggtt.num_fences; i++) { 667 - struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i]; 668 - struct i915_vma *vma = reg->vma; 669 - 670 - seq_printf(m, "Fence %d, pin count = %d, object = ", 671 - i, atomic_read(&reg->pin_count)); 672 - if (!vma) 673 - seq_puts(m, "unused"); 674 - else 675 - i915_debugfs_describe_obj(m, vma->obj); 676 - seq_putc(m, '\n'); 677 - } 678 - rcu_read_unlock(); 679 - 680 - return 0; 681 - } 682 - 683 381 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 684 382 static ssize_t gpu_state_read(struct file *file, char __user *ubuf, 685 383 size_t count, loff_t *pos) ··· 500 802 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 501 803 u32 rpmodectl, freq_sts; 502 804 503 - rpmodectl = I915_READ(GEN6_RP_CONTROL); 805 + rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL); 504 806 seq_printf(m, "Video Turbo Mode: %s\n", 505 807 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 506 808 seq_printf(m, "HW control enabled: %s\n", ··· 545 847 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 546 848 int max_freq; 547 849 548 - rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 850 + rp_state_limits = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_LIMITS); 549 851 if (IS_GEN9_LP(dev_priv)) { 550 - rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 551 - gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); 852 + rp_state_cap = intel_uncore_read(&dev_priv->uncore, BXT_RP_STATE_CAP); 853 + gt_perf_status = intel_uncore_read(&dev_priv->uncore, BXT_GT_PERF_STATUS); 552 854 } else { 553 - rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 554 - gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 855 + rp_state_cap = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_CAP); 856 + gt_perf_status = intel_uncore_read(&dev_priv->uncore, GEN6_GT_PERF_STATUS); 555 857 } 556 858 557 859 /* RPSTAT1 is in the GT power well */ 558 860 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 559 861 560 - reqf = I915_READ(GEN6_RPNSWREQ); 862 + reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ); 561 863 if (INTEL_GEN(dev_priv) >= 9) 562 864 reqf >>= 23; 563 865 else { ··· 569 871 } 570 872 reqf = intel_gpu_freq(rps, reqf); 571 873 572 - rpmodectl = I915_READ(GEN6_RP_CONTROL); 573 - rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 574 - rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 874 + rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL); 875 + rpinclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_UP_THRESHOLD); 876 + rpdeclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_DOWN_THRESHOLD); 575 877 576 - rpstat = I915_READ(GEN6_RPSTAT1); 577 - rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 578 - rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 579 - rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 580 - rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 581 - rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 582 - rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 878 + rpstat = intel_uncore_read(&dev_priv->uncore, GEN6_RPSTAT1); 879 + rpupei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 880 + rpcurup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 881 + rpprevup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 882 + rpdownei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 883 + rpcurdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 884 + rpprevdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 583 885 cagf = intel_rps_read_actual_frequency(rps); 584 886 585 887 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 586 888 587 889 if (INTEL_GEN(dev_priv) >= 11) { 588 - pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE); 589 - pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK); 890 + pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); 891 + pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK); 590 892 /* 591 893 * The equivalent to the PM ISR & IIR cannot be read 592 894 * without affecting the current state of the system ··· 594 896 pm_isr = 0; 595 897 pm_iir = 0; 596 898 } else if (INTEL_GEN(dev_priv) >= 8) { 597 - pm_ier = I915_READ(GEN8_GT_IER(2)); 598 - pm_imr = I915_READ(GEN8_GT_IMR(2)); 599 - pm_isr = I915_READ(GEN8_GT_ISR(2)); 600 - pm_iir = I915_READ(GEN8_GT_IIR(2)); 899 + pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2)); 900 + pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2)); 901 + pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2)); 902 + pm_iir = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IIR(2)); 601 903 } else { 602 - pm_ier = I915_READ(GEN6_PMIER); 603 - pm_imr = I915_READ(GEN6_PMIMR); 604 - pm_isr = I915_READ(GEN6_PMISR); 605 - pm_iir = I915_READ(GEN6_PMIIR); 904 + pm_ier = intel_uncore_read(&dev_priv->uncore, GEN6_PMIER); 905 + pm_imr = intel_uncore_read(&dev_priv->uncore, GEN6_PMIMR); 906 + pm_isr = intel_uncore_read(&dev_priv->uncore, GEN6_PMISR); 907 + pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); 606 908 } 607 - pm_mask = I915_READ(GEN6_PMINTRMSK); 909 + pm_mask = intel_uncore_read(&dev_priv->uncore, GEN6_PMINTRMSK); 608 910 609 911 seq_printf(m, "Video Turbo Mode: %s\n", 610 912 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); ··· 709 1011 return 0; 710 1012 } 711 1013 712 - static int i915_ring_freq_table(struct seq_file *m, void *unused) 713 - { 714 - struct drm_i915_private *dev_priv = node_to_i915(m->private); 715 - struct intel_rps *rps = &dev_priv->gt.rps; 716 - unsigned int max_gpu_freq, min_gpu_freq; 717 - intel_wakeref_t wakeref; 718 - int gpu_freq, ia_freq; 719 - 720 - if (!HAS_LLC(dev_priv)) 721 - return -ENODEV; 722 - 723 - min_gpu_freq = rps->min_freq; 724 - max_gpu_freq = rps->max_freq; 725 - if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) { 726 - /* Convert GT frequency to 50 HZ units */ 727 - min_gpu_freq /= GEN9_FREQ_SCALER; 728 - max_gpu_freq /= GEN9_FREQ_SCALER; 729 - } 730 - 731 - seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 732 - 733 - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 734 - for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 735 - ia_freq = gpu_freq; 736 - sandybridge_pcode_read(dev_priv, 737 - GEN6_PCODE_READ_MIN_FREQ_TABLE, 738 - &ia_freq, NULL); 739 - seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 740 - intel_gpu_freq(rps, 741 - (gpu_freq * 742 - (IS_GEN9_BC(dev_priv) || 743 - INTEL_GEN(dev_priv) >= 10 ? 744 - GEN9_FREQ_SCALER : 1))), 745 - ((ia_freq >> 0) & 0xff) * 100, 746 - ((ia_freq >> 8) & 0xff) * 100); 747 - } 748 - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 749 - 750 - return 0; 751 - } 752 - 753 - static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) 754 - { 755 - seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)", 756 - ring->space, ring->head, ring->tail, ring->emit); 757 - } 758 - 759 - static int i915_context_status(struct seq_file *m, void *unused) 760 - { 761 - struct drm_i915_private *i915 = node_to_i915(m->private); 762 - struct i915_gem_context *ctx, *cn; 763 - 764 - spin_lock(&i915->gem.contexts.lock); 765 - list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { 766 - struct i915_gem_engines_iter it; 767 - struct intel_context *ce; 768 - 769 - if (!kref_get_unless_zero(&ctx->ref)) 770 - continue; 771 - 772 - spin_unlock(&i915->gem.contexts.lock); 773 - 774 - seq_puts(m, "HW context "); 775 - if (ctx->pid) { 776 - struct task_struct *task; 777 - 778 - task = get_pid_task(ctx->pid, PIDTYPE_PID); 779 - if (task) { 780 - seq_printf(m, "(%s [%d]) ", 781 - task->comm, task->pid); 782 - put_task_struct(task); 783 - } 784 - } else if (IS_ERR(ctx->file_priv)) { 785 - seq_puts(m, "(deleted) "); 786 - } else { 787 - seq_puts(m, "(kernel) "); 788 - } 789 - 790 - seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 791 - seq_putc(m, '\n'); 792 - 793 - for_each_gem_engine(ce, 794 - i915_gem_context_lock_engines(ctx), it) { 795 - if (intel_context_pin_if_active(ce)) { 796 - seq_printf(m, "%s: ", ce->engine->name); 797 - if (ce->state) 798 - i915_debugfs_describe_obj(m, ce->state->obj); 799 - describe_ctx_ring(m, ce->ring); 800 - seq_putc(m, '\n'); 801 - intel_context_unpin(ce); 802 - } 803 - } 804 - i915_gem_context_unlock_engines(ctx); 805 - 806 - seq_putc(m, '\n'); 807 - 808 - spin_lock(&i915->gem.contexts.lock); 809 - list_safe_reset_next(ctx, cn, link); 810 - i915_gem_context_put(ctx); 811 - } 812 - spin_unlock(&i915->gem.contexts.lock); 813 - 814 - return 0; 815 - } 816 - 817 1014 static const char *swizzle_string(unsigned swizzle) 818 1015 { 819 1016 switch (swizzle) { ··· 786 1193 return 0; 787 1194 } 788 1195 789 - static const char *rps_power_to_str(unsigned int power) 790 - { 791 - static const char * const strings[] = { 792 - [LOW_POWER] = "low power", 793 - [BETWEEN] = "mixed", 794 - [HIGH_POWER] = "high power", 795 - }; 796 - 797 - if (power >= ARRAY_SIZE(strings) || !strings[power]) 798 - return "unknown"; 799 - 800 - return strings[power]; 801 - } 802 - 803 1196 static int i915_rps_boost_info(struct seq_file *m, void *data) 804 1197 { 805 1198 struct drm_i915_private *dev_priv = node_to_i915(m->private); ··· 812 1233 813 1234 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts)); 814 1235 815 - if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) { 816 - u32 rpup, rpupei; 817 - u32 rpdown, rpdownei; 818 - 819 - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 820 - rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; 821 - rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; 822 - rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; 823 - rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; 824 - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 825 - 826 - seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", 827 - rps_power_to_str(rps->power.mode)); 828 - seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", 829 - rpup && rpupei ? 100 * rpup / rpupei : 0, 830 - rps->power.up_threshold); 831 - seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", 832 - rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, 833 - rps->power.down_threshold); 834 - } else { 835 - seq_puts(m, "\nRPS Autotuning inactive\n"); 836 - } 837 - 838 - return 0; 839 - } 840 - 841 - static int i915_llc(struct seq_file *m, void *data) 842 - { 843 - struct drm_i915_private *dev_priv = node_to_i915(m->private); 844 - const bool edram = INTEL_GEN(dev_priv) > 8; 845 - 846 - seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv))); 847 - seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC", 848 - dev_priv->edram_size_mb); 849 - 850 1236 return 0; 851 1237 } 852 1238 ··· 824 1280 seq_puts(m, "Runtime power management not supported\n"); 825 1281 826 1282 seq_printf(m, "Runtime power status: %s\n", 827 - enableddisabled(!dev_priv->power_domains.wakeref)); 1283 + enableddisabled(!dev_priv->power_domains.init_wakeref)); 828 1284 829 1285 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); 830 1286 seq_printf(m, "IRQs disabled: %s\n", ··· 868 1324 intel_engine_dump(engine, &p, "%s\n", engine->name); 869 1325 870 1326 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 871 - 872 - return 0; 873 - } 874 - 875 - static int i915_shrinker_info(struct seq_file *m, void *unused) 876 - { 877 - struct drm_i915_private *i915 = node_to_i915(m->private); 878 - 879 - seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks); 880 - seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch); 881 1327 882 1328 return 0; 883 1329 } ··· 1063 1529 i915_drop_caches_get, i915_drop_caches_set, 1064 1530 "0x%08llx\n"); 1065 1531 1066 - static int 1067 - i915_cache_sharing_get(void *data, u64 *val) 1068 - { 1069 - struct drm_i915_private *dev_priv = data; 1070 - intel_wakeref_t wakeref; 1071 - u32 snpcr = 0; 1072 - 1073 - if (!(IS_GEN_RANGE(dev_priv, 6, 7))) 1074 - return -ENODEV; 1075 - 1076 - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) 1077 - snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1078 - 1079 - *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 1080 - 1081 - return 0; 1082 - } 1083 - 1084 - static int 1085 - i915_cache_sharing_set(void *data, u64 val) 1086 - { 1087 - struct drm_i915_private *dev_priv = data; 1088 - intel_wakeref_t wakeref; 1089 - 1090 - if (!(IS_GEN_RANGE(dev_priv, 6, 7))) 1091 - return -ENODEV; 1092 - 1093 - if (val > 3) 1094 - return -EINVAL; 1095 - 1096 - drm_dbg(&dev_priv->drm, 1097 - "Manually setting uncore sharing to %llu\n", val); 1098 - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { 1099 - u32 snpcr; 1100 - 1101 - /* Update the cache sharing policy here as well */ 1102 - snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1103 - snpcr &= ~GEN6_MBC_SNPCR_MASK; 1104 - snpcr |= val << GEN6_MBC_SNPCR_SHIFT; 1105 - I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1106 - } 1107 - 1108 - return 0; 1109 - } 1110 - 1111 - DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 1112 - i915_cache_sharing_get, i915_cache_sharing_set, 1113 - "%llu\n"); 1114 - 1115 1532 static int i915_sseu_status(struct seq_file *m, void *unused) 1116 1533 { 1117 1534 struct drm_i915_private *i915 = node_to_i915(m->private); ··· 1106 1621 static const struct drm_info_list i915_debugfs_list[] = { 1107 1622 {"i915_capabilities", i915_capabilities, 0}, 1108 1623 {"i915_gem_objects", i915_gem_object_info, 0}, 1109 - {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 1110 - {"i915_gem_interrupt", i915_interrupt_info, 0}, 1111 1624 {"i915_frequency_info", i915_frequency_info, 0}, 1112 - {"i915_ring_freq_table", i915_ring_freq_table, 0}, 1113 - {"i915_context_status", i915_context_status, 0}, 1114 1625 {"i915_swizzle_info", i915_swizzle_info, 0}, 1115 - {"i915_llc", i915_llc, 0}, 1116 1626 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 1117 1627 {"i915_engine_info", i915_engine_info, 0}, 1118 - {"i915_shrinker_info", i915_shrinker_info, 0}, 1119 1628 {"i915_wa_registers", i915_wa_registers, 0}, 1120 1629 {"i915_sseu_status", i915_sseu_status, 0}, 1121 1630 {"i915_rps_boost_info", i915_rps_boost_info, 0}, ··· 1122 1643 } i915_debugfs_files[] = { 1123 1644 {"i915_perf_noa_delay", &i915_perf_noa_delay_fops}, 1124 1645 {"i915_wedged", &i915_wedged_fops}, 1125 - {"i915_cache_sharing", &i915_cache_sharing_fops}, 1126 1646 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 1127 1647 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 1128 1648 {"i915_error_state", &i915_error_state_fops},
+1 -7
drivers/gpu/drm/i915/i915_drv.c
··· 578 578 579 579 pci_set_master(pdev); 580 580 581 - cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 582 - 583 581 intel_gt_init_workarounds(dev_priv); 584 582 585 583 /* On the 945G/GM, the chipset reports the MSI capability on the ··· 624 626 err_msi: 625 627 if (pdev->msi_enabled) 626 628 pci_disable_msi(pdev); 627 - cpu_latency_qos_remove_request(&dev_priv->pm_qos); 628 629 err_mem_regions: 629 630 intel_memory_regions_driver_release(dev_priv); 630 631 err_ggtt: ··· 645 648 646 649 if (pdev->msi_enabled) 647 650 pci_disable_msi(pdev); 648 - 649 - cpu_latency_qos_remove_request(&dev_priv->pm_qos); 650 651 } 651 652 652 653 /** ··· 733 738 * events. 734 739 */ 735 740 drm_kms_helper_poll_fini(&dev_priv->drm); 741 + drm_atomic_helper_shutdown(&dev_priv->drm); 736 742 737 743 intel_gt_driver_unregister(&dev_priv->gt); 738 744 acpi_video_unregister(); ··· 935 939 synchronize_rcu(); 936 940 937 941 i915_gem_suspend(i915); 938 - 939 - drm_atomic_helper_shutdown(&i915->drm); 940 942 941 943 intel_gvt_driver_remove(i915); 942 944
+1 -40
drivers/gpu/drm/i915/i915_drv.h
··· 416 416 u16 gen9_wa_cfb_stride; 417 417 u16 interval; 418 418 s8 fence_id; 419 + bool psr2_active; 419 420 } state_cache; 420 421 421 422 /* ··· 891 890 spinlock_t irq_lock; 892 891 893 892 bool display_irqs_enabled; 894 - 895 - /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 896 - struct pm_qos_request pm_qos; 897 893 898 894 /* Sideband mailbox protection */ 899 895 struct mutex sb_lock; ··· 1967 1969 1968 1970 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 1969 1971 struct drm_file *file); 1970 - 1971 - #define __I915_REG_OP(op__, dev_priv__, ...) \ 1972 - intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__) 1973 - 1974 - #define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__)) 1975 - #define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__)) 1976 - 1977 - #define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__)) 1978 - 1979 - /* These are untraced mmio-accessors that are only valid to be used inside 1980 - * critical sections, such as inside IRQ handlers, where forcewake is explicitly 1981 - * controlled. 1982 - * 1983 - * Think twice, and think again, before using these. 1984 - * 1985 - * As an example, these accessors can possibly be used between: 1986 - * 1987 - * spin_lock_irq(&dev_priv->uncore.lock); 1988 - * intel_uncore_forcewake_get__locked(); 1989 - * 1990 - * and 1991 - * 1992 - * intel_uncore_forcewake_put__locked(); 1993 - * spin_unlock_irq(&dev_priv->uncore.lock); 1994 - * 1995 - * 1996 - * Note: some registers may not need forcewake held, so 1997 - * intel_uncore_forcewake_{get,put} can be omitted, see 1998 - * intel_uncore_forcewake_for_reg(). 1999 - * 2000 - * Certain architectures will die if the same cacheline is concurrently accessed 2001 - * by different clients (e.g. on Ivybridge). Access to registers should 2002 - * therefore generally be serialised, by either the dev_priv->uncore.lock or 2003 - * a more localised lock guarding all access to that bank of registers. 2004 - */ 2005 - #define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__)) 2006 - #define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__)) 2007 1972 2008 1973 /* i915_mm.c */ 2009 1974 int remap_io_mapping(struct vm_area_struct *vma,
+171 -184
drivers/gpu/drm/i915/i915_irq.c
··· 327 327 lockdep_assert_held(&dev_priv->irq_lock); 328 328 drm_WARN_ON(&dev_priv->drm, bits & ~mask); 329 329 330 - val = I915_READ(PORT_HOTPLUG_EN); 330 + val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN); 331 331 val &= ~mask; 332 332 val |= bits; 333 - I915_WRITE(PORT_HOTPLUG_EN, val); 333 + intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val); 334 334 } 335 335 336 336 /** ··· 376 376 if (new_val != dev_priv->irq_mask && 377 377 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { 378 378 dev_priv->irq_mask = new_val; 379 - I915_WRITE(DEIMR, dev_priv->irq_mask); 380 - POSTING_READ(DEIMR); 379 + intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); 380 + intel_uncore_posting_read(&dev_priv->uncore, DEIMR); 381 381 } 382 382 } 383 383 ··· 401 401 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 402 402 return; 403 403 404 - old_val = I915_READ(GEN8_DE_PORT_IMR); 404 + old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 405 405 406 406 new_val = old_val; 407 407 new_val &= ~interrupt_mask; 408 408 new_val |= (~enabled_irq_mask & interrupt_mask); 409 409 410 410 if (new_val != old_val) { 411 - I915_WRITE(GEN8_DE_PORT_IMR, new_val); 412 - POSTING_READ(GEN8_DE_PORT_IMR); 411 + intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); 412 + intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 413 413 } 414 414 } 415 415 ··· 440 440 441 441 if (new_val != dev_priv->de_irq_mask[pipe]) { 442 442 dev_priv->de_irq_mask[pipe] = new_val; 443 - I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 444 - POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 443 + intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 444 + intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); 445 445 } 446 446 } 447 447 ··· 455 455 u32 interrupt_mask, 456 456 u32 enabled_irq_mask) 457 457 { 458 - u32 sdeimr = I915_READ(SDEIMR); 458 + u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); 459 459 sdeimr &= ~interrupt_mask; 460 460 sdeimr |= (~enabled_irq_mask & interrupt_mask); 461 461 ··· 466 466 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 467 467 return; 468 468 469 - I915_WRITE(SDEIMR, sdeimr); 470 - POSTING_READ(SDEIMR); 469 + intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); 470 + intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); 471 471 } 472 472 473 473 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, ··· 533 533 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 534 534 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 535 535 536 - I915_WRITE(reg, enable_mask | status_mask); 537 - POSTING_READ(reg); 536 + intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 537 + intel_uncore_posting_read(&dev_priv->uncore, reg); 538 538 } 539 539 540 540 void i915_disable_pipestat(struct drm_i915_private *dev_priv, ··· 556 556 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 557 557 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 558 558 559 - I915_WRITE(reg, enable_mask | status_mask); 560 - POSTING_READ(reg); 559 + intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 560 + intel_uncore_posting_read(&dev_priv->uncore, reg); 561 561 } 562 562 563 563 static bool i915_has_asle(struct drm_i915_private *dev_priv) ··· 715 715 if (!vblank->max_vblank_count) 716 716 return 0; 717 717 718 - return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 718 + return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe)); 719 719 } 720 720 721 721 /* ··· 1004 1004 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) 1005 1005 goto out; 1006 1006 1007 - misccpctl = I915_READ(GEN7_MISCCPCTL); 1008 - I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1009 - POSTING_READ(GEN7_MISCCPCTL); 1007 + misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL); 1008 + intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1009 + intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL); 1010 1010 1011 1011 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1012 1012 i915_reg_t reg; ··· 1020 1020 1021 1021 reg = GEN7_L3CDERRST1(slice); 1022 1022 1023 - error_status = I915_READ(reg); 1023 + error_status = intel_uncore_read(&dev_priv->uncore, reg); 1024 1024 row = GEN7_PARITY_ERROR_ROW(error_status); 1025 1025 bank = GEN7_PARITY_ERROR_BANK(error_status); 1026 1026 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1027 1027 1028 - I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1029 - POSTING_READ(reg); 1028 + intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1029 + intel_uncore_posting_read(&dev_priv->uncore, reg); 1030 1030 1031 1031 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1032 1032 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); ··· 1047 1047 kfree(parity_event[1]); 1048 1048 } 1049 1049 1050 - I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1050 + intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl); 1051 1051 1052 1052 out: 1053 1053 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); ··· 1062 1062 { 1063 1063 switch (pin) { 1064 1064 case HPD_PORT_TC1: 1065 - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC1); 1066 1065 case HPD_PORT_TC2: 1067 - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC2); 1068 1066 case HPD_PORT_TC3: 1069 - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC3); 1070 1067 case HPD_PORT_TC4: 1071 - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC4); 1072 1068 case HPD_PORT_TC5: 1073 - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC5); 1074 1069 case HPD_PORT_TC6: 1075 - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC6); 1070 + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin); 1076 1071 default: 1077 1072 return false; 1078 1073 } ··· 1091 1096 { 1092 1097 switch (pin) { 1093 1098 case HPD_PORT_A: 1094 - return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_A); 1095 1099 case HPD_PORT_B: 1096 - return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_B); 1097 1100 case HPD_PORT_C: 1098 - return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_C); 1099 1101 case HPD_PORT_D: 1100 - return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_D); 1102 + return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin); 1101 1103 default: 1102 1104 return false; 1103 1105 } ··· 1104 1112 { 1105 1113 switch (pin) { 1106 1114 case HPD_PORT_TC1: 1107 - return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC1); 1108 1115 case HPD_PORT_TC2: 1109 - return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC2); 1110 1116 case HPD_PORT_TC3: 1111 - return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC3); 1112 1117 case HPD_PORT_TC4: 1113 - return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC4); 1114 1118 case HPD_PORT_TC5: 1115 - return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC5); 1116 1119 case HPD_PORT_TC6: 1117 - return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC6); 1120 + return val & ICP_TC_HPD_LONG_DETECT(pin); 1118 1121 default: 1119 1122 return false; 1120 1123 } ··· 1324 1337 enum pipe pipe) 1325 1338 { 1326 1339 display_pipe_crc_irq_handler(dev_priv, pipe, 1327 - I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1340 + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 1328 1341 0, 0, 0, 0); 1329 1342 } 1330 1343 ··· 1332 1345 enum pipe pipe) 1333 1346 { 1334 1347 display_pipe_crc_irq_handler(dev_priv, pipe, 1335 - I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1336 - I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1337 - I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1338 - I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1339 - I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1348 + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 1349 + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), 1350 + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), 1351 + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), 1352 + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); 1340 1353 } 1341 1354 1342 1355 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, ··· 1345 1358 u32 res1, res2; 1346 1359 1347 1360 if (INTEL_GEN(dev_priv) >= 3) 1348 - res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1361 + res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe)); 1349 1362 else 1350 1363 res1 = 0; 1351 1364 1352 1365 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1353 - res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1366 + res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe)); 1354 1367 else 1355 1368 res2 = 0; 1356 1369 1357 1370 display_pipe_crc_irq_handler(dev_priv, pipe, 1358 - I915_READ(PIPE_CRC_RES_RED(pipe)), 1359 - I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1360 - I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1371 + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)), 1372 + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)), 1373 + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)), 1361 1374 res1, res2); 1362 1375 } 1363 1376 ··· 1366 1379 enum pipe pipe; 1367 1380 1368 1381 for_each_pipe(dev_priv, pipe) { 1369 - I915_WRITE(PIPESTAT(pipe), 1382 + intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe), 1370 1383 PIPESTAT_INT_STATUS_MASK | 1371 1384 PIPE_FIFO_UNDERRUN_STATUS); 1372 1385 ··· 1420 1433 continue; 1421 1434 1422 1435 reg = PIPESTAT(pipe); 1423 - pipe_stats[pipe] = I915_READ(reg) & status_mask; 1436 + pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; 1424 1437 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1425 1438 1426 1439 /* ··· 1433 1446 * an interrupt is still pending. 1434 1447 */ 1435 1448 if (pipe_stats[pipe]) { 1436 - I915_WRITE(reg, pipe_stats[pipe]); 1437 - I915_WRITE(reg, enable_mask); 1449 + intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); 1450 + intel_uncore_write(&dev_priv->uncore, reg, enable_mask); 1438 1451 } 1439 1452 } 1440 1453 spin_unlock(&dev_priv->irq_lock); ··· 1550 1563 * bits can itself generate a new hotplug interrupt :( 1551 1564 */ 1552 1565 for (i = 0; i < 10; i++) { 1553 - u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 1566 + u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask; 1554 1567 1555 1568 if (tmp == 0) 1556 1569 return hotplug_status; 1557 1570 1558 1571 hotplug_status |= tmp; 1559 - I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1572 + intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status); 1560 1573 } 1561 1574 1562 1575 drm_WARN_ONCE(&dev_priv->drm, 1, 1563 1576 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 1564 - I915_READ(PORT_HOTPLUG_STAT)); 1577 + intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 1565 1578 1566 1579 return hotplug_status; 1567 1580 } ··· 1610 1623 u32 hotplug_status = 0; 1611 1624 u32 ier = 0; 1612 1625 1613 - gt_iir = I915_READ(GTIIR); 1614 - pm_iir = I915_READ(GEN6_PMIIR); 1615 - iir = I915_READ(VLV_IIR); 1626 + gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR); 1627 + pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); 1628 + iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 1616 1629 1617 1630 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1618 1631 break; ··· 1632 1645 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1633 1646 * bits this time around. 1634 1647 */ 1635 - I915_WRITE(VLV_MASTER_IER, 0); 1636 - ier = I915_READ(VLV_IER); 1637 - I915_WRITE(VLV_IER, 0); 1648 + intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 1649 + ier = intel_uncore_read(&dev_priv->uncore, VLV_IER); 1650 + intel_uncore_write(&dev_priv->uncore, VLV_IER, 0); 1638 1651 1639 1652 if (gt_iir) 1640 - I915_WRITE(GTIIR, gt_iir); 1653 + intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir); 1641 1654 if (pm_iir) 1642 - I915_WRITE(GEN6_PMIIR, pm_iir); 1655 + intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir); 1643 1656 1644 1657 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1645 1658 hotplug_status = i9xx_hpd_irq_ack(dev_priv); ··· 1657 1670 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1658 1671 */ 1659 1672 if (iir) 1660 - I915_WRITE(VLV_IIR, iir); 1673 + intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 1661 1674 1662 - I915_WRITE(VLV_IER, ier); 1663 - I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1675 + intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 1676 + intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1664 1677 1665 1678 if (gt_iir) 1666 1679 gen6_gt_irq_handler(&dev_priv->gt, gt_iir); ··· 1697 1710 u32 hotplug_status = 0; 1698 1711 u32 ier = 0; 1699 1712 1700 - master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1701 - iir = I915_READ(VLV_IIR); 1713 + master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1714 + iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 1702 1715 1703 1716 if (master_ctl == 0 && iir == 0) 1704 1717 break; ··· 1718 1731 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1719 1732 * bits this time around. 1720 1733 */ 1721 - I915_WRITE(GEN8_MASTER_IRQ, 0); 1722 - ier = I915_READ(VLV_IER); 1723 - I915_WRITE(VLV_IER, 0); 1734 + intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); 1735 + ier = intel_uncore_read(&dev_priv->uncore, VLV_IER); 1736 + intel_uncore_write(&dev_priv->uncore, VLV_IER, 0); 1724 1737 1725 1738 gen8_gt_irq_handler(&dev_priv->gt, master_ctl); 1726 1739 ··· 1741 1754 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1742 1755 */ 1743 1756 if (iir) 1744 - I915_WRITE(VLV_IIR, iir); 1757 + intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 1745 1758 1746 - I915_WRITE(VLV_IER, ier); 1747 - I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1759 + intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 1760 + intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1748 1761 1749 1762 if (hotplug_status) 1750 1763 i9xx_hpd_irq_handler(dev_priv, hotplug_status); ··· 1770 1783 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1771 1784 * errors. 1772 1785 */ 1773 - dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1786 + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 1774 1787 if (!hotplug_trigger) { 1775 1788 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1776 1789 PORTD_HOTPLUG_STATUS_MASK | ··· 1779 1792 dig_hotplug_reg &= ~mask; 1780 1793 } 1781 1794 1782 - I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1795 + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 1783 1796 if (!hotplug_trigger) 1784 1797 return; 1785 1798 ··· 1824 1837 for_each_pipe(dev_priv, pipe) 1825 1838 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1826 1839 pipe_name(pipe), 1827 - I915_READ(FDI_RX_IIR(pipe))); 1840 + intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 1828 1841 } 1829 1842 1830 1843 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) ··· 1843 1856 1844 1857 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 1845 1858 { 1846 - u32 err_int = I915_READ(GEN7_ERR_INT); 1859 + u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); 1847 1860 enum pipe pipe; 1848 1861 1849 1862 if (err_int & ERR_INT_POISON) ··· 1861 1874 } 1862 1875 } 1863 1876 1864 - I915_WRITE(GEN7_ERR_INT, err_int); 1877 + intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); 1865 1878 } 1866 1879 1867 1880 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 1868 1881 { 1869 - u32 serr_int = I915_READ(SERR_INT); 1882 + u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); 1870 1883 enum pipe pipe; 1871 1884 1872 1885 if (serr_int & SERR_INT_POISON) ··· 1876 1889 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 1877 1890 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 1878 1891 1879 - I915_WRITE(SERR_INT, serr_int); 1892 + intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); 1880 1893 } 1881 1894 1882 1895 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) ··· 1909 1922 for_each_pipe(dev_priv, pipe) 1910 1923 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 1911 1924 pipe_name(pipe), 1912 - I915_READ(FDI_RX_IIR(pipe))); 1925 + intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 1913 1926 } 1914 1927 1915 1928 if (pch_iir & SDE_ERROR_CPT) ··· 1925 1938 if (ddi_hotplug_trigger) { 1926 1939 u32 dig_hotplug_reg; 1927 1940 1928 - dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 1929 - I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 1941 + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI); 1942 + intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg); 1930 1943 1931 1944 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1932 1945 ddi_hotplug_trigger, dig_hotplug_reg, ··· 1937 1950 if (tc_hotplug_trigger) { 1938 1951 u32 dig_hotplug_reg; 1939 1952 1940 - dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 1941 - I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 1953 + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC); 1954 + intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg); 1942 1955 1943 1956 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1944 1957 tc_hotplug_trigger, dig_hotplug_reg, ··· 1963 1976 if (hotplug_trigger) { 1964 1977 u32 dig_hotplug_reg; 1965 1978 1966 - dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1967 - I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1979 + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 1980 + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 1968 1981 1969 1982 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1970 1983 hotplug_trigger, dig_hotplug_reg, ··· 1975 1988 if (hotplug2_trigger) { 1976 1989 u32 dig_hotplug_reg; 1977 1990 1978 - dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 1979 - I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 1991 + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2); 1992 + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg); 1980 1993 1981 1994 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 1982 1995 hotplug2_trigger, dig_hotplug_reg, ··· 1996 2009 { 1997 2010 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1998 2011 1999 - dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2000 - I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2012 + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL); 2013 + intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2001 2014 2002 2015 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2003 2016 hotplug_trigger, dig_hotplug_reg, ··· 2038 2051 2039 2052 /* check event from PCH */ 2040 2053 if (de_iir & DE_PCH_EVENT) { 2041 - u32 pch_iir = I915_READ(SDEIIR); 2054 + u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2042 2055 2043 2056 if (HAS_PCH_CPT(dev_priv)) 2044 2057 cpt_irq_handler(dev_priv, pch_iir); ··· 2046 2059 ibx_irq_handler(dev_priv, pch_iir); 2047 2060 2048 2061 /* should clear PCH hotplug event before clear CPU irq */ 2049 - I915_WRITE(SDEIIR, pch_iir); 2062 + intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 2050 2063 } 2051 2064 2052 2065 if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT) ··· 2066 2079 ivb_err_int_handler(dev_priv); 2067 2080 2068 2081 if (de_iir & DE_EDP_PSR_INT_HSW) { 2069 - u32 psr_iir = I915_READ(EDP_PSR_IIR); 2082 + u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR); 2070 2083 2071 2084 intel_psr_irq_handler(dev_priv, psr_iir); 2072 - I915_WRITE(EDP_PSR_IIR, psr_iir); 2085 + intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir); 2073 2086 } 2074 2087 2075 2088 if (de_iir & DE_AUX_CHANNEL_A_IVB) ··· 2085 2098 2086 2099 /* check event from PCH */ 2087 2100 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2088 - u32 pch_iir = I915_READ(SDEIIR); 2101 + u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2089 2102 2090 2103 cpt_irq_handler(dev_priv, pch_iir); 2091 2104 2092 2105 /* clear PCH hotplug event before clear CPU irq */ 2093 - I915_WRITE(SDEIIR, pch_iir); 2106 + intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 2094 2107 } 2095 2108 } 2096 2109 ··· 2177 2190 { 2178 2191 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2179 2192 2180 - dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2181 - I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2193 + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 2194 + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); 2182 2195 2183 2196 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2184 2197 hotplug_trigger, dig_hotplug_reg, ··· 2197 2210 if (trigger_tc) { 2198 2211 u32 dig_hotplug_reg; 2199 2212 2200 - dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2201 - I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2213 + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL); 2214 + intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2202 2215 2203 2216 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2204 2217 trigger_tc, dig_hotplug_reg, ··· 2209 2222 if (trigger_tbt) { 2210 2223 u32 dig_hotplug_reg; 2211 2224 2212 - dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2213 - I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2225 + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL); 2226 + intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2214 2227 2215 2228 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2216 2229 trigger_tbt, dig_hotplug_reg, ··· 2287 2300 else 2288 2301 iir_reg = EDP_PSR_IIR; 2289 2302 2290 - psr_iir = I915_READ(iir_reg); 2291 - I915_WRITE(iir_reg, psr_iir); 2303 + psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg); 2304 + intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir); 2292 2305 2293 2306 if (psr_iir) 2294 2307 found = true; ··· 2312 2325 * Incase of dual link, TE comes from DSI_1 2313 2326 * this is to check if dual link is enabled 2314 2327 */ 2315 - val = I915_READ(TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); 2328 + val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); 2316 2329 val &= PORT_SYNC_MODE_ENABLE; 2317 2330 2318 2331 /* ··· 2324 2337 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 2325 2338 2326 2339 /* Check if DSI configured in command mode */ 2327 - val = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)); 2340 + val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); 2328 2341 val = val & OP_MODE_MASK; 2329 2342 2330 2343 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { ··· 2333 2346 } 2334 2347 2335 2348 /* Get PIPE for handling VBLANK event */ 2336 - val = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); 2349 + val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans)); 2337 2350 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 2338 2351 case TRANS_DDI_EDP_INPUT_A_ON: 2339 2352 pipe = PIPE_A; ··· 2353 2366 2354 2367 /* clear TE in dsi IIR */ 2355 2368 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 2356 - tmp = I915_READ(DSI_INTR_IDENT_REG(port)); 2357 - I915_WRITE(DSI_INTR_IDENT_REG(port), tmp); 2369 + tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)); 2370 + intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp); 2358 2371 } 2359 2372 2360 2373 static irqreturn_t ··· 2365 2378 enum pipe pipe; 2366 2379 2367 2380 if (master_ctl & GEN8_DE_MISC_IRQ) { 2368 - iir = I915_READ(GEN8_DE_MISC_IIR); 2381 + iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); 2369 2382 if (iir) { 2370 - I915_WRITE(GEN8_DE_MISC_IIR, iir); 2383 + intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); 2371 2384 ret = IRQ_HANDLED; 2372 2385 gen8_de_misc_irq_handler(dev_priv, iir); 2373 2386 } else { ··· 2377 2390 } 2378 2391 2379 2392 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2380 - iir = I915_READ(GEN11_DE_HPD_IIR); 2393 + iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); 2381 2394 if (iir) { 2382 - I915_WRITE(GEN11_DE_HPD_IIR, iir); 2395 + intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); 2383 2396 ret = IRQ_HANDLED; 2384 2397 gen11_hpd_irq_handler(dev_priv, iir); 2385 2398 } else { ··· 2389 2402 } 2390 2403 2391 2404 if (master_ctl & GEN8_DE_PORT_IRQ) { 2392 - iir = I915_READ(GEN8_DE_PORT_IIR); 2405 + iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); 2393 2406 if (iir) { 2394 2407 bool found = false; 2395 2408 2396 - I915_WRITE(GEN8_DE_PORT_IIR, iir); 2409 + intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); 2397 2410 ret = IRQ_HANDLED; 2398 2411 2399 2412 if (iir & gen8_de_port_aux_mask(dev_priv)) { ··· 2446 2459 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2447 2460 continue; 2448 2461 2449 - iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2462 + iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); 2450 2463 if (!iir) { 2451 2464 drm_err(&dev_priv->drm, 2452 2465 "The master control interrupt lied (DE PIPE)!\n"); ··· 2454 2467 } 2455 2468 2456 2469 ret = IRQ_HANDLED; 2457 - I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2470 + intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); 2458 2471 2459 2472 if (iir & GEN8_PIPE_VBLANK) 2460 2473 intel_handle_vblank(dev_priv, pipe); ··· 2483 2496 * scheme also closed the SDE interrupt handling race we've seen 2484 2497 * on older pch-split platforms. But this needs testing. 2485 2498 */ 2486 - iir = I915_READ(SDEIIR); 2499 + iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 2487 2500 if (iir) { 2488 - I915_WRITE(SDEIIR, iir); 2501 + intel_uncore_write(&dev_priv->uncore, SDEIIR, iir); 2489 2502 ret = IRQ_HANDLED; 2490 2503 2491 2504 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) ··· 2728 2741 * only when vblank interrupts are actually enabled. 2729 2742 */ 2730 2743 if (dev_priv->vblank_enabled++ == 0) 2731 - I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2744 + intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2732 2745 2733 2746 return i8xx_enable_vblank(crtc); 2734 2747 } ··· 2785 2798 else 2786 2799 port = PORT_A; 2787 2800 2788 - tmp = I915_READ(DSI_INTR_MASK_REG(port)); 2801 + tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port)); 2789 2802 if (enable) 2790 2803 tmp &= ~DSI_TE_EVENT; 2791 2804 else 2792 2805 tmp |= DSI_TE_EVENT; 2793 2806 2794 - I915_WRITE(DSI_INTR_MASK_REG(port), tmp); 2807 + intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp); 2795 2808 2796 - tmp = I915_READ(DSI_INTR_IDENT_REG(port)); 2797 - I915_WRITE(DSI_INTR_IDENT_REG(port), tmp); 2809 + tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)); 2810 + intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp); 2798 2811 2799 2812 return true; 2800 2813 } ··· 2856 2869 i8xx_disable_vblank(crtc); 2857 2870 2858 2871 if (--dev_priv->vblank_enabled == 0) 2859 - I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2872 + intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 2860 2873 } 2861 2874 2862 2875 void i965_disable_vblank(struct drm_crtc *crtc) ··· 2922 2935 GEN3_IRQ_RESET(uncore, SDE); 2923 2936 2924 2937 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 2925 - I915_WRITE(SERR_INT, 0xffffffff); 2938 + intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff); 2926 2939 } 2927 2940 2928 2941 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) ··· 2935 2948 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK); 2936 2949 2937 2950 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 2938 - intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2951 + intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 2939 2952 2940 2953 i9xx_pipestat_irq_reset(dev_priv); 2941 2954 ··· 2998 3011 2999 3012 static void valleyview_irq_reset(struct drm_i915_private *dev_priv) 3000 3013 { 3001 - I915_WRITE(VLV_MASTER_IER, 0); 3002 - POSTING_READ(VLV_MASTER_IER); 3014 + intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 3015 + intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 3003 3016 3004 3017 gen5_gt_irq_reset(&dev_priv->gt); 3005 3018 ··· 3152 3165 { 3153 3166 struct intel_uncore *uncore = &dev_priv->uncore; 3154 3167 3155 - I915_WRITE(GEN8_MASTER_IRQ, 0); 3156 - POSTING_READ(GEN8_MASTER_IRQ); 3168 + intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); 3169 + intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 3157 3170 3158 3171 gen8_gt_irq_reset(&dev_priv->gt); 3159 3172 ··· 3199 3212 * duration to 2ms (which is the minimum in the Display Port spec). 3200 3213 * The pulse duration bits are reserved on LPT+. 3201 3214 */ 3202 - hotplug = I915_READ(PCH_PORT_HOTPLUG); 3215 + hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 3203 3216 hotplug &= ~(PORTA_HOTPLUG_ENABLE | 3204 3217 PORTB_HOTPLUG_ENABLE | 3205 3218 PORTC_HOTPLUG_ENABLE | ··· 3208 3221 PORTC_PULSE_DURATION_MASK | 3209 3222 PORTD_PULSE_DURATION_MASK); 3210 3223 hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables); 3211 - I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3224 + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); 3212 3225 } 3213 3226 3214 3227 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) ··· 3257 3270 { 3258 3271 u32 hotplug; 3259 3272 3260 - hotplug = I915_READ(SHOTPLUG_CTL_DDI); 3273 + hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI); 3261 3274 hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) | 3262 3275 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) | 3263 3276 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) | 3264 3277 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D)); 3265 3278 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables); 3266 - I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 3279 + intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug); 3267 3280 } 3268 3281 3269 3282 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) 3270 3283 { 3271 3284 u32 hotplug; 3272 3285 3273 - hotplug = I915_READ(SHOTPLUG_CTL_TC); 3286 + hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC); 3274 3287 hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) | 3275 3288 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) | 3276 3289 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) | ··· 3278 3291 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) | 3279 3292 ICP_TC_HPD_ENABLE(HPD_PORT_TC6)); 3280 3293 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables); 3281 - I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 3294 + intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug); 3282 3295 } 3283 3296 3284 3297 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) ··· 3289 3302 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3290 3303 3291 3304 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) 3292 - I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3305 + intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3293 3306 3294 3307 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3295 3308 ··· 3317 3330 { 3318 3331 u32 val; 3319 3332 3320 - val = I915_READ(SOUTH_CHICKEN1); 3333 + val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1); 3321 3334 val |= (INVERT_DDIA_HPD | 3322 3335 INVERT_DDIB_HPD | 3323 3336 INVERT_DDIC_HPD | 3324 3337 INVERT_DDID_HPD); 3325 - I915_WRITE(SOUTH_CHICKEN1, val); 3338 + intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val); 3326 3339 3327 3340 icp_hpd_irq_setup(dev_priv); 3328 3341 } ··· 3331 3344 { 3332 3345 u32 hotplug; 3333 3346 3334 - hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3347 + hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL); 3335 3348 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | 3336 3349 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | 3337 3350 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | ··· 3339 3352 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | 3340 3353 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)); 3341 3354 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables); 3342 - I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3355 + intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug); 3343 3356 } 3344 3357 3345 3358 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3346 3359 { 3347 3360 u32 hotplug; 3348 3361 3349 - hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3362 + hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL); 3350 3363 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | 3351 3364 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | 3352 3365 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | ··· 3354 3367 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | 3355 3368 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)); 3356 3369 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables); 3357 - I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3370 + intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug); 3358 3371 } 3359 3372 3360 3373 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) ··· 3365 3378 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); 3366 3379 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); 3367 3380 3368 - val = I915_READ(GEN11_DE_HPD_IMR); 3381 + val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); 3369 3382 val &= ~hotplug_irqs; 3370 3383 val |= ~enabled_irqs & hotplug_irqs; 3371 - I915_WRITE(GEN11_DE_HPD_IMR, val); 3372 - POSTING_READ(GEN11_DE_HPD_IMR); 3384 + intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val); 3385 + intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); 3373 3386 3374 3387 gen11_tc_hpd_detection_setup(dev_priv); 3375 3388 gen11_tbt_hpd_detection_setup(dev_priv); ··· 3412 3425 3413 3426 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3414 3427 if (HAS_PCH_CNP(dev_priv)) { 3415 - val = I915_READ(SOUTH_CHICKEN1); 3428 + val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1); 3416 3429 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3417 3430 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3418 - I915_WRITE(SOUTH_CHICKEN1, val); 3431 + intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val); 3419 3432 } 3420 3433 3421 3434 /* Enable digital hotplug on the PCH */ 3422 - hotplug = I915_READ(PCH_PORT_HOTPLUG); 3435 + hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 3423 3436 hotplug &= ~(PORTA_HOTPLUG_ENABLE | 3424 3437 PORTB_HOTPLUG_ENABLE | 3425 3438 PORTC_HOTPLUG_ENABLE | 3426 3439 PORTD_HOTPLUG_ENABLE); 3427 3440 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables); 3428 - I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3441 + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); 3429 3442 3430 - hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3443 + hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2); 3431 3444 hotplug &= ~PORTE_HOTPLUG_ENABLE; 3432 3445 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables); 3433 - I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3446 + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug); 3434 3447 } 3435 3448 3436 3449 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) ··· 3438 3451 u32 hotplug_irqs, enabled_irqs; 3439 3452 3440 3453 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) 3441 - I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3454 + intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); 3442 3455 3443 3456 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); 3444 3457 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); ··· 3469 3482 * duration to 2ms (which is the minimum in the Display Port spec) 3470 3483 * The pulse duration bits are reserved on HSW+. 3471 3484 */ 3472 - hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3485 + hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL); 3473 3486 hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE | 3474 3487 DIGITAL_PORTA_PULSE_DURATION_MASK); 3475 3488 hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables); 3476 - I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3489 + intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3477 3490 } 3478 3491 3479 3492 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) ··· 3523 3536 { 3524 3537 u32 hotplug; 3525 3538 3526 - hotplug = I915_READ(PCH_PORT_HOTPLUG); 3539 + hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); 3527 3540 hotplug &= ~(PORTA_HOTPLUG_ENABLE | 3528 3541 PORTB_HOTPLUG_ENABLE | 3529 3542 PORTC_HOTPLUG_ENABLE | ··· 3531 3544 BXT_DDIB_HPD_INVERT | 3532 3545 BXT_DDIC_HPD_INVERT); 3533 3546 hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables); 3534 - I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3547 + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); 3535 3548 } 3536 3549 3537 3550 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) ··· 3651 3664 vlv_display_irq_postinstall(dev_priv); 3652 3665 spin_unlock_irq(&dev_priv->irq_lock); 3653 3666 3654 - I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3655 - POSTING_READ(VLV_MASTER_IER); 3667 + intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3668 + intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 3656 3669 } 3657 3670 3658 3671 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) ··· 3765 3778 3766 3779 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 3767 3780 3768 - I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 3781 + intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 3769 3782 3770 3783 if (HAS_MASTER_UNIT_IRQ(dev_priv)) { 3771 3784 dg1_master_intr_enable(uncore->regs); 3772 - POSTING_READ(DG1_MSTR_UNIT_INTR); 3785 + intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR); 3773 3786 } else { 3774 3787 gen11_master_intr_enable(uncore->regs); 3775 - POSTING_READ(GEN11_GFX_MSTR_IRQ); 3788 + intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); 3776 3789 } 3777 3790 } 3778 3791 ··· 3785 3798 vlv_display_irq_postinstall(dev_priv); 3786 3799 spin_unlock_irq(&dev_priv->irq_lock); 3787 3800 3788 - I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3789 - POSTING_READ(GEN8_MASTER_IRQ); 3801 + intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3802 + intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 3790 3803 } 3791 3804 3792 3805 static void i8xx_irq_reset(struct drm_i915_private *dev_priv) ··· 3876 3889 { 3877 3890 u32 emr; 3878 3891 3879 - *eir = I915_READ(EIR); 3892 + *eir = intel_uncore_read(&dev_priv->uncore, EIR); 3880 3893 3881 - I915_WRITE(EIR, *eir); 3894 + intel_uncore_write(&dev_priv->uncore, EIR, *eir); 3882 3895 3883 - *eir_stuck = I915_READ(EIR); 3896 + *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR); 3884 3897 if (*eir_stuck == 0) 3885 3898 return; 3886 3899 ··· 3894 3907 * (or by a GPU reset) so we mask any bit that 3895 3908 * remains set. 3896 3909 */ 3897 - emr = I915_READ(EMR); 3898 - I915_WRITE(EMR, 0xffffffff); 3899 - I915_WRITE(EMR, emr | *eir_stuck); 3910 + emr = intel_uncore_read(&dev_priv->uncore, EMR); 3911 + intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff); 3912 + intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck); 3900 3913 } 3901 3914 3902 3915 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, ··· 3962 3975 3963 3976 if (I915_HAS_HOTPLUG(dev_priv)) { 3964 3977 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3965 - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3978 + intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 3966 3979 } 3967 3980 3968 3981 i9xx_pipestat_irq_reset(dev_priv); ··· 3976 3989 struct intel_uncore *uncore = &dev_priv->uncore; 3977 3990 u32 enable_mask; 3978 3991 3979 - I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 3992 + intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE | 3980 3993 I915_ERROR_MEMORY_REFRESH)); 3981 3994 3982 3995 /* Unmask the interrupts that we always want on. */ ··· 4029 4042 u32 hotplug_status = 0; 4030 4043 u32 iir; 4031 4044 4032 - iir = I915_READ(GEN2_IIR); 4045 + iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 4033 4046 if (iir == 0) 4034 4047 break; 4035 4048 ··· 4046 4059 if (iir & I915_MASTER_ERROR_INTERRUPT) 4047 4060 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4048 4061 4049 - I915_WRITE(GEN2_IIR, iir); 4062 + intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 4050 4063 4051 4064 if (iir & I915_USER_INTERRUPT) 4052 4065 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); ··· 4072 4085 struct intel_uncore *uncore = &dev_priv->uncore; 4073 4086 4074 4087 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4075 - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4088 + intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); 4076 4089 4077 4090 i9xx_pipestat_irq_reset(dev_priv); 4078 4091 ··· 4099 4112 error_mask = ~(I915_ERROR_PAGE_TABLE | 4100 4113 I915_ERROR_MEMORY_REFRESH); 4101 4114 } 4102 - I915_WRITE(EMR, error_mask); 4115 + intel_uncore_write(&dev_priv->uncore, EMR, error_mask); 4103 4116 4104 4117 /* Unmask the interrupts that we always want on. */ 4105 4118 dev_priv->irq_mask = ··· 4175 4188 u32 hotplug_status = 0; 4176 4189 u32 iir; 4177 4190 4178 - iir = I915_READ(GEN2_IIR); 4191 + iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 4179 4192 if (iir == 0) 4180 4193 break; 4181 4194 ··· 4191 4204 if (iir & I915_MASTER_ERROR_INTERRUPT) 4192 4205 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4193 4206 4194 - I915_WRITE(GEN2_IIR, iir); 4207 + intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 4195 4208 4196 4209 if (iir & I915_USER_INTERRUPT) 4197 4210 intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]);
+5 -2
drivers/gpu/drm/i915/i915_reg.h
··· 4352 4352 #define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30) 4353 4353 #define VRR_CTL_FLIP_LINE_EN REG_BIT(29) 4354 4354 #define VRR_CTL_LINE_COUNT_MASK REG_GENMASK(10, 3) 4355 + #define VRR_CTL_LINE_COUNT(x) REG_FIELD_PREP(VRR_CTL_LINE_COUNT_MASK, (x)) 4355 4356 #define VRR_CTL_SW_FULLLINE_COUNT REG_BIT(0) 4356 4357 4357 4358 #define _TRANS_VRR_VMAX_A 0x60424 ··· 10852 10851 #define CNL_DRAM_RANK_3 (0x2 << 9) 10853 10852 #define CNL_DRAM_RANK_4 (0x3 << 9) 10854 10853 10855 - /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, 10856 - * since on HSW we can't write to it using I915_WRITE. */ 10854 + /* 10855 + * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, 10856 + * since on HSW we can't write to it using intel_uncore_write. 10857 + */ 10857 10858 #define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C) 10858 10859 #define D_COMP_BDW _MMIO(0x138144) 10859 10860 #define D_COMP_RCOMP_IN_PROGRESS (1 << 9)
+17 -16
drivers/gpu/drm/i915/i915_suspend.c
··· 24 24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 25 */ 26 26 27 + #include "display/intel_de.h" 27 28 #include "display/intel_fbc.h" 28 29 #include "display/intel_gmbus.h" 29 30 #include "display/intel_vga.h" ··· 40 39 /* Scratch space */ 41 40 if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) { 42 41 for (i = 0; i < 7; i++) { 43 - dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i)); 44 - dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); 42 + dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i)); 43 + dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i)); 45 44 } 46 45 for (i = 0; i < 3; i++) 47 - dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); 46 + dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i)); 48 47 } else if (IS_GEN(dev_priv, 2)) { 49 48 for (i = 0; i < 7; i++) 50 - dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); 49 + dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i)); 51 50 } else if (HAS_GMCH(dev_priv)) { 52 51 for (i = 0; i < 16; i++) { 53 - dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i)); 54 - dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); 52 + dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i)); 53 + dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i)); 55 54 } 56 55 for (i = 0; i < 3; i++) 57 - dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); 56 + dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i)); 58 57 } 59 58 } 60 59 ··· 65 64 /* Scratch space */ 66 65 if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) { 67 66 for (i = 0; i < 7; i++) { 68 - I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]); 69 - I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); 67 + intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]); 68 + intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]); 70 69 } 71 70 for (i = 0; i < 3; i++) 72 - I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); 71 + intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]); 73 72 } else if (IS_GEN(dev_priv, 2)) { 74 73 for (i = 0; i < 7; i++) 75 - I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); 74 + intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]); 76 75 } else if (HAS_GMCH(dev_priv)) { 77 76 for (i = 0; i < 16; i++) { 78 - I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]); 79 - I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); 77 + intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]); 78 + intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]); 80 79 } 81 80 for (i = 0; i < 3; i++) 82 - I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); 81 + intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]); 83 82 } 84 83 } 85 84 ··· 89 88 90 89 /* Display arbitration control */ 91 90 if (INTEL_GEN(dev_priv) <= 4) 92 - dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); 91 + dev_priv->regfile.saveDSPARB = intel_de_read(dev_priv, DSPARB); 93 92 94 93 if (IS_GEN(dev_priv, 4)) 95 94 pci_read_config_word(pdev, GCDGMBUS, ··· 110 109 111 110 /* Display arbitration */ 112 111 if (INTEL_GEN(dev_priv) <= 4) 113 - I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); 112 + intel_de_write(dev_priv, DSPARB, dev_priv->regfile.saveDSPARB); 114 113 115 114 /* only restore FBC info on the platform that supports FBC*/ 116 115 intel_fbc_global_disable(dev_priv);
+1 -1
drivers/gpu/drm/i915/intel_device_info.c
··· 104 104 drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type); 105 105 drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size); 106 106 107 - #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name)); 107 + #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name)) 108 108 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); 109 109 #undef PRINT_FLAG 110 110
+276 -276
drivers/gpu/drm/i915/intel_pm.c
··· 82 82 * Must match Sampler, Pixel Back End, and Media. See 83 83 * WaCompressedResourceSamplerPbeMediaNewHashMode. 84 84 */ 85 - I915_WRITE(CHICKEN_PAR1_1, 86 - I915_READ(CHICKEN_PAR1_1) | 85 + intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, 86 + intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | 87 87 SKL_DE_COMPRESSED_HASH_MODE); 88 88 } 89 89 90 90 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ 91 - I915_WRITE(CHICKEN_PAR1_1, 92 - I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); 91 + intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, 92 + intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); 93 93 94 94 /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */ 95 - I915_WRITE(GEN8_CHICKEN_DCPR_1, 96 - I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); 95 + intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, 96 + intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); 97 97 98 98 /* 99 99 * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl 100 100 * Display WA #0859: skl,bxt,kbl,glk,cfl 101 101 */ 102 - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 102 + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | 103 103 DISP_FBC_MEMORY_WAKE); 104 104 } 105 105 ··· 108 108 gen9_init_clock_gating(dev_priv); 109 109 110 110 /* WaDisableSDEUnitClockGating:bxt */ 111 - I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 111 + intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) | 112 112 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 113 113 114 114 /* 115 115 * FIXME: 116 116 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. 117 117 */ 118 - I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 118 + intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) | 119 119 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); 120 120 121 121 /* 122 122 * Wa: Backlight PWM may stop in the asserted state, causing backlight 123 123 * to stay fully on. 124 124 */ 125 - I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | 125 + intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) | 126 126 PWM1_GATING_DIS | PWM2_GATING_DIS); 127 127 128 128 /* ··· 131 131 * is off and a MMIO access is attempted by any privilege 132 132 * application, using batch buffers or any other means. 133 133 */ 134 - I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950)); 134 + intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950)); 135 135 136 136 /* 137 137 * WaFbcTurnOffFbcWatermark:bxt 138 138 * Display WA #0562: bxt 139 139 */ 140 - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 140 + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | 141 141 DISP_FBC_WM_DIS); 142 142 143 143 /* 144 144 * WaFbcHighMemBwCorruptionAvoidance:bxt 145 145 * Display WA #0883: bxt 146 146 */ 147 - I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 147 + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | 148 148 ILK_DPFC_DISABLE_DUMMY0); 149 149 } 150 150 ··· 157 157 * Backlight PWM may stop in the asserted state, causing backlight 158 158 * to stay fully on. 159 159 */ 160 - I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | 160 + intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) | 161 161 PWM1_GATING_DIS | PWM2_GATING_DIS); 162 162 } 163 163 ··· 165 165 { 166 166 u32 tmp; 167 167 168 - tmp = I915_READ(CLKCFG); 168 + tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG); 169 169 170 170 switch (tmp & CLKCFG_FSB_MASK) { 171 171 case CLKCFG_FSB_533: ··· 195 195 } 196 196 197 197 /* detect pineview DDR3 setting */ 198 - tmp = I915_READ(CSHRDDR3CTL); 198 + tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL); 199 199 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 200 200 } 201 201 ··· 366 366 u32 val; 367 367 368 368 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 369 - was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 370 - I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); 371 - POSTING_READ(FW_BLC_SELF_VLV); 369 + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 370 + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); 371 + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV); 372 372 } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { 373 - was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 374 - I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); 375 - POSTING_READ(FW_BLC_SELF); 373 + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; 374 + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); 375 + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); 376 376 } else if (IS_PINEVIEW(dev_priv)) { 377 - val = I915_READ(DSPFW3); 377 + val = intel_uncore_read(&dev_priv->uncore, DSPFW3); 378 378 was_enabled = val & PINEVIEW_SELF_REFRESH_EN; 379 379 if (enable) 380 380 val |= PINEVIEW_SELF_REFRESH_EN; 381 381 else 382 382 val &= ~PINEVIEW_SELF_REFRESH_EN; 383 - I915_WRITE(DSPFW3, val); 384 - POSTING_READ(DSPFW3); 383 + intel_uncore_write(&dev_priv->uncore, DSPFW3, val); 384 + intel_uncore_posting_read(&dev_priv->uncore, DSPFW3); 385 385 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { 386 - was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 386 + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; 387 387 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : 388 388 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); 389 - I915_WRITE(FW_BLC_SELF, val); 390 - POSTING_READ(FW_BLC_SELF); 389 + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val); 390 + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); 391 391 } else if (IS_I915GM(dev_priv)) { 392 392 /* 393 393 * FIXME can't find a bit like this for 915G, and 394 394 * and yet it does have the related watermark in 395 395 * FW_BLC_SELF. What's going on? 396 396 */ 397 - was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 397 + was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN; 398 398 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : 399 399 _MASKED_BIT_DISABLE(INSTPM_SELF_EN); 400 - I915_WRITE(INSTPM, val); 401 - POSTING_READ(INSTPM); 400 + intel_uncore_write(&dev_priv->uncore, INSTPM, val); 401 + intel_uncore_posting_read(&dev_priv->uncore, INSTPM); 402 402 } else { 403 403 return false; 404 404 } ··· 494 494 495 495 switch (pipe) { 496 496 case PIPE_A: 497 - dsparb = I915_READ(DSPARB); 498 - dsparb2 = I915_READ(DSPARB2); 497 + dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 498 + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); 499 499 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); 500 500 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); 501 501 break; 502 502 case PIPE_B: 503 - dsparb = I915_READ(DSPARB); 504 - dsparb2 = I915_READ(DSPARB2); 503 + dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 504 + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); 505 505 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); 506 506 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); 507 507 break; 508 508 case PIPE_C: 509 - dsparb2 = I915_READ(DSPARB2); 510 - dsparb3 = I915_READ(DSPARB3); 509 + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); 510 + dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3); 511 511 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); 512 512 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); 513 513 break; ··· 525 525 static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, 526 526 enum i9xx_plane_id i9xx_plane) 527 527 { 528 - u32 dsparb = I915_READ(DSPARB); 528 + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 529 529 int size; 530 530 531 531 size = dsparb & 0x7f; ··· 541 541 static int i830_get_fifo_size(struct drm_i915_private *dev_priv, 542 542 enum i9xx_plane_id i9xx_plane) 543 543 { 544 - u32 dsparb = I915_READ(DSPARB); 544 + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 545 545 int size; 546 546 547 547 size = dsparb & 0x1ff; ··· 558 558 static int i845_get_fifo_size(struct drm_i915_private *dev_priv, 559 559 enum i9xx_plane_id i9xx_plane) 560 560 { 561 - u32 dsparb = I915_READ(DSPARB); 561 + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); 562 562 int size; 563 563 564 564 size = dsparb & 0x7f; ··· 911 911 wm = intel_calculate_wm(clock, &pnv_display_wm, 912 912 pnv_display_wm.fifo_size, 913 913 cpp, latency->display_sr); 914 - reg = I915_READ(DSPFW1); 914 + reg = intel_uncore_read(&dev_priv->uncore, DSPFW1); 915 915 reg &= ~DSPFW_SR_MASK; 916 916 reg |= FW_WM(wm, SR); 917 - I915_WRITE(DSPFW1, reg); 917 + intel_uncore_write(&dev_priv->uncore, DSPFW1, reg); 918 918 drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); 919 919 920 920 /* cursor SR */ 921 921 wm = intel_calculate_wm(clock, &pnv_cursor_wm, 922 922 pnv_display_wm.fifo_size, 923 923 4, latency->cursor_sr); 924 - reg = I915_READ(DSPFW3); 924 + reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); 925 925 reg &= ~DSPFW_CURSOR_SR_MASK; 926 926 reg |= FW_WM(wm, CURSOR_SR); 927 - I915_WRITE(DSPFW3, reg); 927 + intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); 928 928 929 929 /* Display HPLL off SR */ 930 930 wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm, 931 931 pnv_display_hplloff_wm.fifo_size, 932 932 cpp, latency->display_hpll_disable); 933 - reg = I915_READ(DSPFW3); 933 + reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); 934 934 reg &= ~DSPFW_HPLL_SR_MASK; 935 935 reg |= FW_WM(wm, HPLL_SR); 936 - I915_WRITE(DSPFW3, reg); 936 + intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); 937 937 938 938 /* cursor HPLL off SR */ 939 939 wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm, 940 940 pnv_display_hplloff_wm.fifo_size, 941 941 4, latency->cursor_hpll_disable); 942 - reg = I915_READ(DSPFW3); 942 + reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); 943 943 reg &= ~DSPFW_HPLL_CURSOR_MASK; 944 944 reg |= FW_WM(wm, HPLL_CURSOR); 945 - I915_WRITE(DSPFW3, reg); 945 + intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); 946 946 drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg); 947 947 948 948 intel_set_memory_cxsr(dev_priv, true); ··· 976 976 for_each_pipe(dev_priv, pipe) 977 977 trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); 978 978 979 - I915_WRITE(DSPFW1, 979 + intel_uncore_write(&dev_priv->uncore, DSPFW1, 980 980 FW_WM(wm->sr.plane, SR) | 981 981 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | 982 982 FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | 983 983 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); 984 - I915_WRITE(DSPFW2, 984 + intel_uncore_write(&dev_priv->uncore, DSPFW2, 985 985 (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) | 986 986 FW_WM(wm->sr.fbc, FBC_SR) | 987 987 FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | 988 988 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | 989 989 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | 990 990 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); 991 - I915_WRITE(DSPFW3, 991 + intel_uncore_write(&dev_priv->uncore, DSPFW3, 992 992 (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) | 993 993 FW_WM(wm->sr.cursor, CURSOR_SR) | 994 994 FW_WM(wm->hpll.cursor, HPLL_CURSOR) | 995 995 FW_WM(wm->hpll.plane, HPLL_SR)); 996 996 997 - POSTING_READ(DSPFW1); 997 + intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); 998 998 } 999 999 1000 1000 #define FW_WM_VLV(value, plane) \ ··· 1008 1008 for_each_pipe(dev_priv, pipe) { 1009 1009 trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); 1010 1010 1011 - I915_WRITE(VLV_DDL(pipe), 1011 + intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), 1012 1012 (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | 1013 1013 (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | 1014 1014 (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | ··· 1020 1020 * high order bits so that there are no out of bounds values 1021 1021 * present in the registers during the reprogramming. 1022 1022 */ 1023 - I915_WRITE(DSPHOWM, 0); 1024 - I915_WRITE(DSPHOWM1, 0); 1025 - I915_WRITE(DSPFW4, 0); 1026 - I915_WRITE(DSPFW5, 0); 1027 - I915_WRITE(DSPFW6, 0); 1023 + intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0); 1024 + intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0); 1025 + intel_uncore_write(&dev_priv->uncore, DSPFW4, 0); 1026 + intel_uncore_write(&dev_priv->uncore, DSPFW5, 0); 1027 + intel_uncore_write(&dev_priv->uncore, DSPFW6, 0); 1028 1028 1029 - I915_WRITE(DSPFW1, 1029 + intel_uncore_write(&dev_priv->uncore, DSPFW1, 1030 1030 FW_WM(wm->sr.plane, SR) | 1031 1031 FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | 1032 1032 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | 1033 1033 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); 1034 - I915_WRITE(DSPFW2, 1034 + intel_uncore_write(&dev_priv->uncore, DSPFW2, 1035 1035 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | 1036 1036 FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | 1037 1037 FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); 1038 - I915_WRITE(DSPFW3, 1038 + intel_uncore_write(&dev_priv->uncore, DSPFW3, 1039 1039 FW_WM(wm->sr.cursor, CURSOR_SR)); 1040 1040 1041 1041 if (IS_CHERRYVIEW(dev_priv)) { 1042 - I915_WRITE(DSPFW7_CHV, 1042 + intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV, 1043 1043 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | 1044 1044 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); 1045 - I915_WRITE(DSPFW8_CHV, 1045 + intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV, 1046 1046 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | 1047 1047 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); 1048 - I915_WRITE(DSPFW9_CHV, 1048 + intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV, 1049 1049 FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | 1050 1050 FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); 1051 - I915_WRITE(DSPHOWM, 1051 + intel_uncore_write(&dev_priv->uncore, DSPHOWM, 1052 1052 FW_WM(wm->sr.plane >> 9, SR_HI) | 1053 1053 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | 1054 1054 FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | ··· 1060 1060 FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | 1061 1061 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); 1062 1062 } else { 1063 - I915_WRITE(DSPFW7, 1063 + intel_uncore_write(&dev_priv->uncore, DSPFW7, 1064 1064 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | 1065 1065 FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); 1066 - I915_WRITE(DSPHOWM, 1066 + intel_uncore_write(&dev_priv->uncore, DSPHOWM, 1067 1067 FW_WM(wm->sr.plane >> 9, SR_HI) | 1068 1068 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | 1069 1069 FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | ··· 1073 1073 FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); 1074 1074 } 1075 1075 1076 - POSTING_READ(DSPFW1); 1076 + intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); 1077 1077 } 1078 1078 1079 1079 #undef FW_WM_VLV ··· 2310 2310 srwm); 2311 2311 2312 2312 /* 965 has limitations... */ 2313 - I915_WRITE(DSPFW1, FW_WM(srwm, SR) | 2313 + intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) | 2314 2314 FW_WM(8, CURSORB) | 2315 2315 FW_WM(8, PLANEB) | 2316 2316 FW_WM(8, PLANEA)); 2317 - I915_WRITE(DSPFW2, FW_WM(8, CURSORA) | 2317 + intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) | 2318 2318 FW_WM(8, PLANEC_OLD)); 2319 2319 /* update cursor SR watermark */ 2320 - I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); 2320 + intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); 2321 2321 2322 2322 if (cxsr_enabled) 2323 2323 intel_set_memory_cxsr(dev_priv, true); ··· 2447 2447 srwm = 1; 2448 2448 2449 2449 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 2450 - I915_WRITE(FW_BLC_SELF, 2450 + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, 2451 2451 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); 2452 2452 else 2453 - I915_WRITE(FW_BLC_SELF, srwm & 0x3f); 2453 + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f); 2454 2454 } 2455 2455 2456 2456 drm_dbg_kms(&dev_priv->drm, ··· 2464 2464 fwater_lo = fwater_lo | (1 << 24) | (1 << 8); 2465 2465 fwater_hi = fwater_hi | (1 << 8); 2466 2466 2467 - I915_WRITE(FW_BLC, fwater_lo); 2468 - I915_WRITE(FW_BLC2, fwater_hi); 2467 + intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); 2468 + intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi); 2469 2469 2470 2470 if (enabled) 2471 2471 intel_set_memory_cxsr(dev_priv, true); ··· 2488 2488 &i845_wm_info, 2489 2489 dev_priv->display.get_fifo_size(dev_priv, PLANE_A), 2490 2490 4, pessimal_latency_ns); 2491 - fwater_lo = I915_READ(FW_BLC) & ~0xfff; 2491 + fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff; 2492 2492 fwater_lo |= (3<<8) | planea_wm; 2493 2493 2494 2494 drm_dbg_kms(&dev_priv->drm, 2495 2495 "Setting FIFO watermarks - A: %d\n", planea_wm); 2496 2496 2497 - I915_WRITE(FW_BLC, fwater_lo); 2497 + intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); 2498 2498 } 2499 2499 2500 2500 /* latency must be in 0.1us units. */ ··· 3534 3534 3535 3535 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { 3536 3536 previous->wm_lp[2] &= ~WM1_LP_SR_EN; 3537 - I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]); 3537 + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]); 3538 3538 changed = true; 3539 3539 } 3540 3540 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { 3541 3541 previous->wm_lp[1] &= ~WM1_LP_SR_EN; 3542 - I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]); 3542 + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]); 3543 3543 changed = true; 3544 3544 } 3545 3545 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { 3546 3546 previous->wm_lp[0] &= ~WM1_LP_SR_EN; 3547 - I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]); 3547 + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]); 3548 3548 changed = true; 3549 3549 } 3550 3550 ··· 3574 3574 _ilk_disable_lp_wm(dev_priv, dirty); 3575 3575 3576 3576 if (dirty & WM_DIRTY_PIPE(PIPE_A)) 3577 - I915_WRITE(WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); 3577 + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); 3578 3578 if (dirty & WM_DIRTY_PIPE(PIPE_B)) 3579 - I915_WRITE(WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); 3579 + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); 3580 3580 if (dirty & WM_DIRTY_PIPE(PIPE_C)) 3581 - I915_WRITE(WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); 3581 + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); 3582 3582 3583 3583 if (dirty & WM_DIRTY_DDB) { 3584 3584 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3585 - val = I915_READ(WM_MISC); 3585 + val = intel_uncore_read(&dev_priv->uncore, WM_MISC); 3586 3586 if (results->partitioning == INTEL_DDB_PART_1_2) 3587 3587 val &= ~WM_MISC_DATA_PARTITION_5_6; 3588 3588 else 3589 3589 val |= WM_MISC_DATA_PARTITION_5_6; 3590 - I915_WRITE(WM_MISC, val); 3590 + intel_uncore_write(&dev_priv->uncore, WM_MISC, val); 3591 3591 } else { 3592 - val = I915_READ(DISP_ARB_CTL2); 3592 + val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2); 3593 3593 if (results->partitioning == INTEL_DDB_PART_1_2) 3594 3594 val &= ~DISP_DATA_PARTITION_5_6; 3595 3595 else 3596 3596 val |= DISP_DATA_PARTITION_5_6; 3597 - I915_WRITE(DISP_ARB_CTL2, val); 3597 + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val); 3598 3598 } 3599 3599 } 3600 3600 3601 3601 if (dirty & WM_DIRTY_FBC) { 3602 - val = I915_READ(DISP_ARB_CTL); 3602 + val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL); 3603 3603 if (results->enable_fbc_wm) 3604 3604 val &= ~DISP_FBC_WM_DIS; 3605 3605 else 3606 3606 val |= DISP_FBC_WM_DIS; 3607 - I915_WRITE(DISP_ARB_CTL, val); 3607 + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, val); 3608 3608 } 3609 3609 3610 3610 if (dirty & WM_DIRTY_LP(1) && 3611 3611 previous->wm_lp_spr[0] != results->wm_lp_spr[0]) 3612 - I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); 3612 + intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]); 3613 3613 3614 3614 if (INTEL_GEN(dev_priv) >= 7) { 3615 3615 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) 3616 - I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); 3616 + intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]); 3617 3617 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) 3618 - I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); 3618 + intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]); 3619 3619 } 3620 3620 3621 3621 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) 3622 - I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); 3622 + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]); 3623 3623 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) 3624 - I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); 3624 + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]); 3625 3625 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) 3626 - I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); 3626 + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]); 3627 3627 3628 3628 dev_priv->wm.hw = *results; 3629 3629 } ··· 3640 3640 u8 enabled_slices_mask = 0; 3641 3641 3642 3642 for (i = 0; i < max_slices; i++) { 3643 - if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE) 3643 + if (intel_uncore_read(&dev_priv->uncore, DBUF_CTL_S(i)) & DBUF_POWER_STATE) 3644 3644 enabled_slices_mask |= BIT(i); 3645 3645 } 3646 3646 ··· 4300 4300 4301 4301 /* Cursor doesn't support NV12/planar, so no extra calculation needed */ 4302 4302 if (plane_id == PLANE_CURSOR) { 4303 - val = I915_READ(CUR_BUF_CFG(pipe)); 4303 + val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe)); 4304 4304 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); 4305 4305 return; 4306 4306 } 4307 4307 4308 - val = I915_READ(PLANE_CTL(pipe, plane_id)); 4308 + val = intel_uncore_read(&dev_priv->uncore, PLANE_CTL(pipe, plane_id)); 4309 4309 4310 4310 /* No DDB allocated for disabled planes */ 4311 4311 if (val & PLANE_CTL_ENABLE) ··· 4314 4314 val & PLANE_CTL_ALPHA_MASK); 4315 4315 4316 4316 if (INTEL_GEN(dev_priv) >= 11) { 4317 - val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); 4317 + val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id)); 4318 4318 skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); 4319 4319 } else { 4320 - val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); 4321 - val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); 4320 + val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id)); 4321 + val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id)); 4322 4322 4323 4323 if (fourcc && 4324 4324 drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc))) ··· 6231 6231 6232 6232 for (level = 0; level <= max_level; level++) { 6233 6233 if (plane_id != PLANE_CURSOR) 6234 - val = I915_READ(PLANE_WM(pipe, plane_id, level)); 6234 + val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level)); 6235 6235 else 6236 - val = I915_READ(CUR_WM(pipe, level)); 6236 + val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level)); 6237 6237 6238 6238 skl_wm_level_from_reg_val(val, &wm->wm[level]); 6239 6239 } ··· 6242 6242 wm->sagv_wm0 = wm->wm[0]; 6243 6243 6244 6244 if (plane_id != PLANE_CURSOR) 6245 - val = I915_READ(PLANE_WM_TRANS(pipe, plane_id)); 6245 + val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id)); 6246 6246 else 6247 - val = I915_READ(CUR_WM_TRANS(pipe)); 6247 + val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe)); 6248 6248 6249 6249 skl_wm_level_from_reg_val(val, &wm->trans_wm); 6250 6250 } ··· 6280 6280 struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; 6281 6281 enum pipe pipe = crtc->pipe; 6282 6282 6283 - hw->wm_pipe[pipe] = I915_READ(WM0_PIPE_ILK(pipe)); 6283 + hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe)); 6284 6284 6285 6285 memset(active, 0, sizeof(*active)); 6286 6286 ··· 6324 6324 { 6325 6325 u32 tmp; 6326 6326 6327 - tmp = I915_READ(DSPFW1); 6327 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); 6328 6328 wm->sr.plane = _FW_WM(tmp, SR); 6329 6329 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); 6330 6330 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB); 6331 6331 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA); 6332 6332 6333 - tmp = I915_READ(DSPFW2); 6333 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); 6334 6334 wm->fbc_en = tmp & DSPFW_FBC_SR_EN; 6335 6335 wm->sr.fbc = _FW_WM(tmp, FBC_SR); 6336 6336 wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR); ··· 6338 6338 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); 6339 6339 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA); 6340 6340 6341 - tmp = I915_READ(DSPFW3); 6341 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); 6342 6342 wm->hpll_en = tmp & DSPFW_HPLL_SR_EN; 6343 6343 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); 6344 6344 wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR); ··· 6352 6352 u32 tmp; 6353 6353 6354 6354 for_each_pipe(dev_priv, pipe) { 6355 - tmp = I915_READ(VLV_DDL(pipe)); 6355 + tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe)); 6356 6356 6357 6357 wm->ddl[pipe].plane[PLANE_PRIMARY] = 6358 6358 (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); ··· 6364 6364 (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); 6365 6365 } 6366 6366 6367 - tmp = I915_READ(DSPFW1); 6367 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); 6368 6368 wm->sr.plane = _FW_WM(tmp, SR); 6369 6369 wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); 6370 6370 wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); 6371 6371 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); 6372 6372 6373 - tmp = I915_READ(DSPFW2); 6373 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); 6374 6374 wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); 6375 6375 wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); 6376 6376 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); 6377 6377 6378 - tmp = I915_READ(DSPFW3); 6378 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); 6379 6379 wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); 6380 6380 6381 6381 if (IS_CHERRYVIEW(dev_priv)) { 6382 - tmp = I915_READ(DSPFW7_CHV); 6382 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV); 6383 6383 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); 6384 6384 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); 6385 6385 6386 - tmp = I915_READ(DSPFW8_CHV); 6386 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV); 6387 6387 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); 6388 6388 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); 6389 6389 6390 - tmp = I915_READ(DSPFW9_CHV); 6390 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV); 6391 6391 wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); 6392 6392 wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); 6393 6393 6394 - tmp = I915_READ(DSPHOWM); 6394 + tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); 6395 6395 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 6396 6396 wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; 6397 6397 wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; ··· 6403 6403 wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; 6404 6404 wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; 6405 6405 } else { 6406 - tmp = I915_READ(DSPFW7); 6406 + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7); 6407 6407 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); 6408 6408 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); 6409 6409 6410 - tmp = I915_READ(DSPHOWM); 6410 + tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); 6411 6411 wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; 6412 6412 wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; 6413 6413 wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; ··· 6428 6428 6429 6429 g4x_read_wm_values(dev_priv, wm); 6430 6430 6431 - wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 6431 + wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; 6432 6432 6433 6433 for_each_intel_crtc(&dev_priv->drm, crtc) { 6434 6434 struct intel_crtc_state *crtc_state = ··· 6572 6572 6573 6573 vlv_read_wm_values(dev_priv, wm); 6574 6574 6575 - wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 6575 + wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 6576 6576 wm->level = VLV_WM_LEVEL_PM2; 6577 6577 6578 6578 if (IS_CHERRYVIEW(dev_priv)) { ··· 6719 6719 */ 6720 6720 static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) 6721 6721 { 6722 - I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); 6723 - I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); 6724 - I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); 6722 + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM1_LP_SR_EN); 6723 + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM1_LP_SR_EN); 6724 + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM1_LP_SR_EN); 6725 6725 6726 6726 /* 6727 6727 * Don't touch WM1S_LP_EN here. ··· 6739 6739 for_each_intel_crtc(&dev_priv->drm, crtc) 6740 6740 ilk_pipe_wm_get_hw_state(crtc); 6741 6741 6742 - hw->wm_lp[0] = I915_READ(WM1_LP_ILK); 6743 - hw->wm_lp[1] = I915_READ(WM2_LP_ILK); 6744 - hw->wm_lp[2] = I915_READ(WM3_LP_ILK); 6742 + hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK); 6743 + hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK); 6744 + hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK); 6745 6745 6746 - hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); 6746 + hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK); 6747 6747 if (INTEL_GEN(dev_priv) >= 7) { 6748 - hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); 6749 - hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); 6748 + hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB); 6749 + hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB); 6750 6750 } 6751 6751 6752 6752 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 6753 - hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 6753 + hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 6754 6754 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 6755 6755 else if (IS_IVYBRIDGE(dev_priv)) 6756 - hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? 6756 + hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? 6757 6757 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 6758 6758 6759 6759 hw->enable_fbc_wm = 6760 - !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); 6760 + !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS); 6761 6761 } 6762 6762 6763 6763 /** ··· 6808 6808 if (!HAS_IPC(dev_priv)) 6809 6809 return; 6810 6810 6811 - val = I915_READ(DISP_ARB_CTL2); 6811 + val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2); 6812 6812 6813 6813 if (dev_priv->ipc_enabled) 6814 6814 val |= DISP_IPC_ENABLE; 6815 6815 else 6816 6816 val &= ~DISP_IPC_ENABLE; 6817 6817 6818 - I915_WRITE(DISP_ARB_CTL2, val); 6818 + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val); 6819 6819 } 6820 6820 6821 6821 static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv) ··· 6850 6850 * gating for the panel power sequencer or it will fail to 6851 6851 * start up when no ports are active. 6852 6852 */ 6853 - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 6853 + intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 6854 6854 } 6855 6855 6856 6856 static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) ··· 6858 6858 enum pipe pipe; 6859 6859 6860 6860 for_each_pipe(dev_priv, pipe) { 6861 - I915_WRITE(DSPCNTR(pipe), 6862 - I915_READ(DSPCNTR(pipe)) | 6861 + intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe), 6862 + intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) | 6863 6863 DISPPLANE_TRICKLE_FEED_DISABLE); 6864 6864 6865 - I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe))); 6866 - POSTING_READ(DSPSURF(pipe)); 6865 + intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe))); 6866 + intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe)); 6867 6867 } 6868 6868 } 6869 6869 ··· 6879 6879 ILK_DPFCUNIT_CLOCK_GATE_DISABLE | 6880 6880 ILK_DPFDUNIT_CLOCK_GATE_ENABLE; 6881 6881 6882 - I915_WRITE(PCH_3DCGDIS0, 6882 + intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0, 6883 6883 MARIUNIT_CLOCK_GATE_DISABLE | 6884 6884 SVSMUNIT_CLOCK_GATE_DISABLE); 6885 - I915_WRITE(PCH_3DCGDIS1, 6885 + intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1, 6886 6886 VFMUNIT_CLOCK_GATE_DISABLE); 6887 6887 6888 6888 /* ··· 6892 6892 * The bit 5 of 0x42020 6893 6893 * The bit 15 of 0x45000 6894 6894 */ 6895 - I915_WRITE(ILK_DISPLAY_CHICKEN2, 6896 - (I915_READ(ILK_DISPLAY_CHICKEN2) | 6895 + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, 6896 + (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) | 6897 6897 ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 6898 6898 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; 6899 - I915_WRITE(DISP_ARB_CTL, 6900 - (I915_READ(DISP_ARB_CTL) | 6899 + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, 6900 + (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | 6901 6901 DISP_FBC_WM_DIS)); 6902 6902 6903 6903 /* ··· 6909 6909 */ 6910 6910 if (IS_IRONLAKE_M(dev_priv)) { 6911 6911 /* WaFbcAsynchFlipDisableFbcQueue:ilk */ 6912 - I915_WRITE(ILK_DISPLAY_CHICKEN1, 6913 - I915_READ(ILK_DISPLAY_CHICKEN1) | 6912 + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1, 6913 + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) | 6914 6914 ILK_FBCQ_DIS); 6915 - I915_WRITE(ILK_DISPLAY_CHICKEN2, 6916 - I915_READ(ILK_DISPLAY_CHICKEN2) | 6915 + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, 6916 + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) | 6917 6917 ILK_DPARB_GATE); 6918 6918 } 6919 6919 6920 - I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 6920 + intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate); 6921 6921 6922 - I915_WRITE(ILK_DISPLAY_CHICKEN2, 6923 - I915_READ(ILK_DISPLAY_CHICKEN2) | 6922 + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, 6923 + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) | 6924 6924 ILK_ELPIN_409_SELECT); 6925 6925 6926 6926 g4x_disable_trickle_feed(dev_priv); ··· 6938 6938 * gating for the panel power sequencer or it will fail to 6939 6939 * start up when no ports are active. 6940 6940 */ 6941 - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | 6941 + intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | 6942 6942 PCH_DPLUNIT_CLOCK_GATE_DISABLE | 6943 6943 PCH_CPUNIT_CLOCK_GATE_DISABLE); 6944 - I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 6944 + intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN2, intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN2) | 6945 6945 DPLS_EDP_PPS_FIX_DIS); 6946 6946 /* The below fixes the weird display corruption, a few pixels shifted 6947 6947 * downward, on (only) LVDS of some HP laptops with IVY. 6948 6948 */ 6949 6949 for_each_pipe(dev_priv, pipe) { 6950 - val = I915_READ(TRANS_CHICKEN2(pipe)); 6950 + val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe)); 6951 6951 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 6952 6952 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 6953 6953 if (dev_priv->vbt.fdi_rx_polarity_inverted) 6954 6954 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 6955 6955 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; 6956 6956 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; 6957 - I915_WRITE(TRANS_CHICKEN2(pipe), val); 6957 + intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val); 6958 6958 } 6959 6959 /* WADP0ClockGatingDisable */ 6960 6960 for_each_pipe(dev_priv, pipe) { 6961 - I915_WRITE(TRANS_CHICKEN1(pipe), 6961 + intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe), 6962 6962 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 6963 6963 } 6964 6964 } ··· 6967 6967 { 6968 6968 u32 tmp; 6969 6969 6970 - tmp = I915_READ(MCH_SSKPD); 6970 + tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD); 6971 6971 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) 6972 6972 drm_dbg_kms(&dev_priv->drm, 6973 6973 "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", ··· 6978 6978 { 6979 6979 u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 6980 6980 6981 - I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 6981 + intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate); 6982 6982 6983 - I915_WRITE(ILK_DISPLAY_CHICKEN2, 6984 - I915_READ(ILK_DISPLAY_CHICKEN2) | 6983 + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, 6984 + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) | 6985 6985 ILK_ELPIN_409_SELECT); 6986 6986 6987 - I915_WRITE(GEN6_UCGCTL1, 6988 - I915_READ(GEN6_UCGCTL1) | 6987 + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, 6988 + intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | 6989 6989 GEN6_BLBUNIT_CLOCK_GATE_DISABLE | 6990 6990 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 6991 6991 ··· 7002 7002 * WaDisableRCCUnitClockGating:snb 7003 7003 * WaDisableRCPBUnitClockGating:snb 7004 7004 */ 7005 - I915_WRITE(GEN6_UCGCTL2, 7005 + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2, 7006 7006 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 7007 7007 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 7008 7008 ··· 7017 7017 * 7018 7018 * WaFbcAsynchFlipDisableFbcQueue:snb 7019 7019 */ 7020 - I915_WRITE(ILK_DISPLAY_CHICKEN1, 7021 - I915_READ(ILK_DISPLAY_CHICKEN1) | 7020 + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1, 7021 + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) | 7022 7022 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 7023 - I915_WRITE(ILK_DISPLAY_CHICKEN2, 7024 - I915_READ(ILK_DISPLAY_CHICKEN2) | 7023 + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, 7024 + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) | 7025 7025 ILK_DPARB_GATE | ILK_VSDPFD_FULL); 7026 - I915_WRITE(ILK_DSPCLK_GATE_D, 7027 - I915_READ(ILK_DSPCLK_GATE_D) | 7026 + intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, 7027 + intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) | 7028 7028 ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 7029 7029 ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 7030 7030 ··· 7042 7042 * disabled when not needed anymore in order to save power. 7043 7043 */ 7044 7044 if (HAS_PCH_LPT_LP(dev_priv)) 7045 - I915_WRITE(SOUTH_DSPCLK_GATE_D, 7046 - I915_READ(SOUTH_DSPCLK_GATE_D) | 7045 + intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, 7046 + intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) | 7047 7047 PCH_LP_PARTITION_LEVEL_DISABLE); 7048 7048 7049 7049 /* WADPOClockGatingDisable:hsw */ 7050 - I915_WRITE(TRANS_CHICKEN1(PIPE_A), 7051 - I915_READ(TRANS_CHICKEN1(PIPE_A)) | 7050 + intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A), 7051 + intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A)) | 7052 7052 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 7053 7053 } 7054 7054 7055 7055 static void lpt_suspend_hw(struct drm_i915_private *dev_priv) 7056 7056 { 7057 7057 if (HAS_PCH_LPT_LP(dev_priv)) { 7058 - u32 val = I915_READ(SOUTH_DSPCLK_GATE_D); 7058 + u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D); 7059 7059 7060 7060 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 7061 - I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 7061 + intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val); 7062 7062 } 7063 7063 } 7064 7064 ··· 7070 7070 u32 val; 7071 7071 7072 7072 /* WaTempDisableDOPClkGating:bdw */ 7073 - misccpctl = I915_READ(GEN7_MISCCPCTL); 7074 - I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 7073 + misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL); 7074 + intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 7075 7075 7076 - val = I915_READ(GEN8_L3SQCREG1); 7076 + val = intel_uncore_read(&dev_priv->uncore, GEN8_L3SQCREG1); 7077 7077 val &= ~L3_PRIO_CREDITS_MASK; 7078 7078 val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits); 7079 7079 val |= L3_HIGH_PRIO_CREDITS(high_prio_credits); 7080 - I915_WRITE(GEN8_L3SQCREG1, val); 7080 + intel_uncore_write(&dev_priv->uncore, GEN8_L3SQCREG1, val); 7081 7081 7082 7082 /* 7083 7083 * Wait at least 100 clocks before re-enabling clock gating. 7084 7084 * See the definition of L3SQCREG1 in BSpec. 7085 7085 */ 7086 - POSTING_READ(GEN8_L3SQCREG1); 7086 + intel_uncore_posting_read(&dev_priv->uncore, GEN8_L3SQCREG1); 7087 7087 udelay(1); 7088 - I915_WRITE(GEN7_MISCCPCTL, misccpctl); 7088 + intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl); 7089 7089 } 7090 7090 7091 7091 static void icl_init_clock_gating(struct drm_i915_private *dev_priv) 7092 7092 { 7093 7093 /* Wa_1409120013:icl,ehl */ 7094 - I915_WRITE(ILK_DPFC_CHICKEN, 7094 + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, 7095 7095 ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); 7096 7096 7097 7097 /* This is not an Wa. Enable to reduce Sampler power */ 7098 - I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN, 7099 - I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE); 7098 + intel_uncore_write(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN, 7099 + intel_uncore_read(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE); 7100 7100 7101 7101 /*Wa_14010594013:icl, ehl */ 7102 7102 intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, ··· 7106 7106 static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) 7107 7107 { 7108 7108 /* Wa_1409120013:tgl */ 7109 - I915_WRITE(ILK_DPFC_CHICKEN, 7109 + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, 7110 7110 ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); 7111 7111 7112 7112 /* Wa_1409825376:tgl (pre-prod)*/ 7113 7113 if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1)) 7114 - I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) | 7114 + intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) | 7115 7115 TGL_VRH_GATING_DIS); 7116 7116 7117 7117 /* Wa_14011059788:tgl */ ··· 7123 7123 { 7124 7124 /* Wa_1409836686:dg1[a0] */ 7125 7125 if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0)) 7126 - I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) | 7126 + intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) | 7127 7127 DPT_GATING_DIS); 7128 7128 } 7129 7129 ··· 7133 7133 return; 7134 7134 7135 7135 /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */ 7136 - I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) | 7136 + intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) | 7137 7137 CNP_PWM_CGE_GATING_DISABLE); 7138 7138 } 7139 7139 ··· 7143 7143 cnp_init_clock_gating(dev_priv); 7144 7144 7145 7145 /* This is not an Wa. Enable for better image quality */ 7146 - I915_WRITE(_3D_CHICKEN3, 7146 + intel_uncore_write(&dev_priv->uncore, _3D_CHICKEN3, 7147 7147 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); 7148 7148 7149 7149 /* WaEnableChickenDCPR:cnl */ 7150 - I915_WRITE(GEN8_CHICKEN_DCPR_1, 7151 - I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); 7150 + intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, 7151 + intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); 7152 7152 7153 7153 /* 7154 7154 * WaFbcWakeMemOn:cnl 7155 7155 * Display WA #0859: cnl 7156 7156 */ 7157 - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 7157 + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | 7158 7158 DISP_FBC_MEMORY_WAKE); 7159 7159 7160 - val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE); 7160 + val = intel_uncore_read(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE); 7161 7161 /* ReadHitWriteOnlyDisable:cnl */ 7162 7162 val |= RCCUNIT_CLKGATE_DIS; 7163 - I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val); 7163 + intel_uncore_write(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE, val); 7164 7164 7165 7165 /* Wa_2201832410:cnl */ 7166 - val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE); 7166 + val = intel_uncore_read(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE); 7167 7167 val |= GWUNIT_CLKGATE_DIS; 7168 - I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val); 7168 + intel_uncore_write(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE, val); 7169 7169 7170 7170 /* WaDisableVFclkgate:cnl */ 7171 7171 /* WaVFUnitClockGatingDisable:cnl */ 7172 - val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE); 7172 + val = intel_uncore_read(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE); 7173 7173 val |= VFUNIT_CLKGATE_DIS; 7174 - I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val); 7174 + intel_uncore_write(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, val); 7175 7175 } 7176 7176 7177 7177 static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) ··· 7180 7180 gen9_init_clock_gating(dev_priv); 7181 7181 7182 7182 /* WAC6entrylatency:cfl */ 7183 - I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | 7183 + intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) | 7184 7184 FBC_LLC_FULLY_OPEN); 7185 7185 7186 7186 /* 7187 7187 * WaFbcTurnOffFbcWatermark:cfl 7188 7188 * Display WA #0562: cfl 7189 7189 */ 7190 - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 7190 + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | 7191 7191 DISP_FBC_WM_DIS); 7192 7192 7193 7193 /* 7194 7194 * WaFbcNukeOnHostModify:cfl 7195 7195 * Display WA #0873: cfl 7196 7196 */ 7197 - I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 7197 + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | 7198 7198 ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 7199 7199 } 7200 7200 ··· 7203 7203 gen9_init_clock_gating(dev_priv); 7204 7204 7205 7205 /* WAC6entrylatency:kbl */ 7206 - I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | 7206 + intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) | 7207 7207 FBC_LLC_FULLY_OPEN); 7208 7208 7209 7209 /* WaDisableSDEUnitClockGating:kbl */ 7210 7210 if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0)) 7211 - I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 7211 + intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) | 7212 7212 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7213 7213 7214 7214 /* WaDisableGamClockGating:kbl */ 7215 7215 if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0)) 7216 - I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 7216 + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | 7217 7217 GEN6_GAMUNIT_CLOCK_GATE_DISABLE); 7218 7218 7219 7219 /* 7220 7220 * WaFbcTurnOffFbcWatermark:kbl 7221 7221 * Display WA #0562: kbl 7222 7222 */ 7223 - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 7223 + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | 7224 7224 DISP_FBC_WM_DIS); 7225 7225 7226 7226 /* 7227 7227 * WaFbcNukeOnHostModify:kbl 7228 7228 * Display WA #0873: kbl 7229 7229 */ 7230 - I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 7230 + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | 7231 7231 ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 7232 7232 } 7233 7233 ··· 7236 7236 gen9_init_clock_gating(dev_priv); 7237 7237 7238 7238 /* WaDisableDopClockGating:skl */ 7239 - I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) & 7239 + intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL) & 7240 7240 ~GEN7_DOP_CLOCK_GATE_ENABLE); 7241 7241 7242 7242 /* WAC6entrylatency:skl */ 7243 - I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | 7243 + intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) | 7244 7244 FBC_LLC_FULLY_OPEN); 7245 7245 7246 7246 /* 7247 7247 * WaFbcTurnOffFbcWatermark:skl 7248 7248 * Display WA #0562: skl 7249 7249 */ 7250 - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 7250 + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | 7251 7251 DISP_FBC_WM_DIS); 7252 7252 7253 7253 /* 7254 7254 * WaFbcNukeOnHostModify:skl 7255 7255 * Display WA #0873: skl 7256 7256 */ 7257 - I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 7257 + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | 7258 7258 ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 7259 7259 7260 7260 /* 7261 7261 * WaFbcHighMemBwCorruptionAvoidance:skl 7262 7262 * Display WA #0883: skl 7263 7263 */ 7264 - I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 7264 + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | 7265 7265 ILK_DPFC_DISABLE_DUMMY0); 7266 7266 } 7267 7267 ··· 7270 7270 enum pipe pipe; 7271 7271 7272 7272 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 7273 - I915_WRITE(CHICKEN_PIPESL_1(PIPE_A), 7274 - I915_READ(CHICKEN_PIPESL_1(PIPE_A)) | 7273 + intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A), 7274 + intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) | 7275 7275 HSW_FBCQ_DIS); 7276 7276 7277 7277 /* WaSwitchSolVfFArbitrationPriority:bdw */ 7278 - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 7278 + intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 7279 7279 7280 7280 /* WaPsrDPAMaskVBlankInSRD:bdw */ 7281 - I915_WRITE(CHICKEN_PAR1_1, 7282 - I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); 7281 + intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, 7282 + intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); 7283 7283 7284 7284 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 7285 7285 for_each_pipe(dev_priv, pipe) { 7286 - I915_WRITE(CHICKEN_PIPESL_1(pipe), 7287 - I915_READ(CHICKEN_PIPESL_1(pipe)) | 7286 + intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), 7287 + intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) | 7288 7288 BDW_DPRS_MASK_VBLANK_SRD); 7289 7289 } 7290 7290 7291 7291 /* WaVSRefCountFullforceMissDisable:bdw */ 7292 7292 /* WaDSRefCountFullforceMissDisable:bdw */ 7293 - I915_WRITE(GEN7_FF_THREAD_MODE, 7294 - I915_READ(GEN7_FF_THREAD_MODE) & 7293 + intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE, 7294 + intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) & 7295 7295 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 7296 7296 7297 - I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 7297 + intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL, 7298 7298 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 7299 7299 7300 7300 /* WaDisableSDEUnitClockGating:bdw */ 7301 - I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 7301 + intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) | 7302 7302 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7303 7303 7304 7304 /* WaProgramL3SqcReg1Default:bdw */ 7305 7305 gen8_set_l3sqc_credits(dev_priv, 30, 2); 7306 7306 7307 7307 /* WaKVMNotificationOnConfigChange:bdw */ 7308 - I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1) 7308 + intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR2_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR2_1) 7309 7309 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT); 7310 7310 7311 7311 lpt_init_clock_gating(dev_priv); ··· 7315 7315 * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP 7316 7316 * clock gating. 7317 7317 */ 7318 - I915_WRITE(GEN6_UCGCTL1, 7319 - I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE); 7318 + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, 7319 + intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE); 7320 7320 } 7321 7321 7322 7322 static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) 7323 7323 { 7324 7324 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 7325 - I915_WRITE(CHICKEN_PIPESL_1(PIPE_A), 7326 - I915_READ(CHICKEN_PIPESL_1(PIPE_A)) | 7325 + intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A), 7326 + intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) | 7327 7327 HSW_FBCQ_DIS); 7328 7328 7329 7329 /* This is required by WaCatErrorRejectionIssue:hsw */ 7330 - I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 7331 - I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 7330 + intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 7331 + intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 7332 7332 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 7333 7333 7334 7334 /* WaSwitchSolVfFArbitrationPriority:hsw */ 7335 - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 7335 + intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 7336 7336 7337 7337 lpt_init_clock_gating(dev_priv); 7338 7338 } ··· 7341 7341 { 7342 7342 u32 snpcr; 7343 7343 7344 - I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 7344 + intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 7345 7345 7346 7346 /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 7347 - I915_WRITE(ILK_DISPLAY_CHICKEN1, 7348 - I915_READ(ILK_DISPLAY_CHICKEN1) | 7347 + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1, 7348 + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) | 7349 7349 ILK_FBCQ_DIS); 7350 7350 7351 7351 /* WaDisableBackToBackFlipFix:ivb */ 7352 - I915_WRITE(IVB_CHICKEN3, 7352 + intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3, 7353 7353 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 7354 7354 CHICKEN3_DGMG_DONE_FIX_DISABLE); 7355 7355 7356 7356 if (IS_IVB_GT1(dev_priv)) 7357 - I915_WRITE(GEN7_ROW_CHICKEN2, 7357 + intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2, 7358 7358 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 7359 7359 else { 7360 7360 /* must write both registers */ 7361 - I915_WRITE(GEN7_ROW_CHICKEN2, 7361 + intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2, 7362 7362 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 7363 - I915_WRITE(GEN7_ROW_CHICKEN2_GT2, 7363 + intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2, 7364 7364 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 7365 7365 } 7366 7366 ··· 7368 7368 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 7369 7369 * This implements the WaDisableRCZUnitClockGating:ivb workaround. 7370 7370 */ 7371 - I915_WRITE(GEN6_UCGCTL2, 7371 + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2, 7372 7372 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 7373 7373 7374 7374 /* This is required by WaCatErrorRejectionIssue:ivb */ 7375 - I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 7376 - I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 7375 + intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 7376 + intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 7377 7377 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 7378 7378 7379 7379 g4x_disable_trickle_feed(dev_priv); 7380 7380 7381 - snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 7381 + snpcr = intel_uncore_read(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR); 7382 7382 snpcr &= ~GEN6_MBC_SNPCR_MASK; 7383 7383 snpcr |= GEN6_MBC_SNPCR_MED; 7384 - I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 7384 + intel_uncore_write(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, snpcr); 7385 7385 7386 7386 if (!HAS_PCH_NOP(dev_priv)) 7387 7387 cpt_init_clock_gating(dev_priv); ··· 7392 7392 static void vlv_init_clock_gating(struct drm_i915_private *dev_priv) 7393 7393 { 7394 7394 /* WaDisableBackToBackFlipFix:vlv */ 7395 - I915_WRITE(IVB_CHICKEN3, 7395 + intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3, 7396 7396 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 7397 7397 CHICKEN3_DGMG_DONE_FIX_DISABLE); 7398 7398 7399 7399 /* WaDisableDopClockGating:vlv */ 7400 - I915_WRITE(GEN7_ROW_CHICKEN2, 7400 + intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2, 7401 7401 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 7402 7402 7403 7403 /* This is required by WaCatErrorRejectionIssue:vlv */ 7404 - I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 7405 - I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 7404 + intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 7405 + intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 7406 7406 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 7407 7407 7408 7408 /* 7409 7409 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 7410 7410 * This implements the WaDisableRCZUnitClockGating:vlv workaround. 7411 7411 */ 7412 - I915_WRITE(GEN6_UCGCTL2, 7412 + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2, 7413 7413 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 7414 7414 7415 7415 /* WaDisableL3Bank2xClockGate:vlv 7416 7416 * Disabling L3 clock gating- MMIO 940c[25] = 1 7417 7417 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */ 7418 - I915_WRITE(GEN7_UCGCTL4, 7419 - I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 7418 + intel_uncore_write(&dev_priv->uncore, GEN7_UCGCTL4, 7419 + intel_uncore_read(&dev_priv->uncore, GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 7420 7420 7421 7421 /* 7422 7422 * WaDisableVLVClockGating_VBIIssue:vlv 7423 7423 * Disable clock gating on th GCFG unit to prevent a delay 7424 7424 * in the reporting of vblank events. 7425 7425 */ 7426 - I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 7426 + intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 7427 7427 } 7428 7428 7429 7429 static void chv_init_clock_gating(struct drm_i915_private *dev_priv) 7430 7430 { 7431 7431 /* WaVSRefCountFullforceMissDisable:chv */ 7432 7432 /* WaDSRefCountFullforceMissDisable:chv */ 7433 - I915_WRITE(GEN7_FF_THREAD_MODE, 7434 - I915_READ(GEN7_FF_THREAD_MODE) & 7433 + intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE, 7434 + intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) & 7435 7435 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 7436 7436 7437 7437 /* WaDisableSemaphoreAndSyncFlipWait:chv */ 7438 - I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 7438 + intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL, 7439 7439 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 7440 7440 7441 7441 /* WaDisableCSUnitClockGating:chv */ 7442 - I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 7442 + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | 7443 7443 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 7444 7444 7445 7445 /* WaDisableSDEUnitClockGating:chv */ 7446 - I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 7446 + intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) | 7447 7447 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7448 7448 7449 7449 /* ··· 7458 7458 { 7459 7459 u32 dspclk_gate; 7460 7460 7461 - I915_WRITE(RENCLK_GATE_D1, 0); 7462 - I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 7461 + intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0); 7462 + intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 7463 7463 GS_UNIT_CLOCK_GATE_DISABLE | 7464 7464 CL_UNIT_CLOCK_GATE_DISABLE); 7465 - I915_WRITE(RAMCLK_GATE_D, 0); 7465 + intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0); 7466 7466 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 7467 7467 OVRUNIT_CLOCK_GATE_DISABLE | 7468 7468 OVCUNIT_CLOCK_GATE_DISABLE; 7469 7469 if (IS_GM45(dev_priv)) 7470 7470 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 7471 - I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 7471 + intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate); 7472 7472 7473 7473 g4x_disable_trickle_feed(dev_priv); 7474 7474 } ··· 7489 7489 7490 7490 static void i965g_init_clock_gating(struct drm_i915_private *dev_priv) 7491 7491 { 7492 - I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 7492 + intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 7493 7493 I965_RCC_CLOCK_GATE_DISABLE | 7494 7494 I965_RCPB_CLOCK_GATE_DISABLE | 7495 7495 I965_ISC_CLOCK_GATE_DISABLE | 7496 7496 I965_FBC_CLOCK_GATE_DISABLE); 7497 - I915_WRITE(RENCLK_GATE_D2, 0); 7498 - I915_WRITE(MI_ARB_STATE, 7497 + intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0); 7498 + intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, 7499 7499 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 7500 7500 } 7501 7501 7502 7502 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv) 7503 7503 { 7504 - u32 dstate = I915_READ(D_STATE); 7504 + u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE); 7505 7505 7506 7506 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 7507 7507 DSTATE_DOT_CLOCK_GATING; 7508 - I915_WRITE(D_STATE, dstate); 7508 + intel_uncore_write(&dev_priv->uncore, D_STATE, dstate); 7509 7509 7510 7510 if (IS_PINEVIEW(dev_priv)) 7511 - I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); 7511 + intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); 7512 7512 7513 7513 /* IIR "flip pending" means done if this bit is set */ 7514 - I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); 7514 + intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); 7515 7515 7516 7516 /* interrupts should cause a wake up from C3 */ 7517 - I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); 7517 + intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); 7518 7518 7519 7519 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 7520 - I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 7520 + intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 7521 7521 7522 - I915_WRITE(MI_ARB_STATE, 7522 + intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, 7523 7523 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 7524 7524 } 7525 7525 7526 7526 static void i85x_init_clock_gating(struct drm_i915_private *dev_priv) 7527 7527 { 7528 - I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 7528 + intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 7529 7529 7530 7530 /* interrupts should cause a wake up from C3 */ 7531 - I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | 7531 + intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | 7532 7532 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); 7533 7533 7534 - I915_WRITE(MEM_MODE, 7534 + intel_uncore_write(&dev_priv->uncore, MEM_MODE, 7535 7535 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE)); 7536 7536 7537 7537 /* ··· 7541 7541 * abosultely nothing) would not allow FBC to recompress 7542 7542 * until a 2D blit occurs. 7543 7543 */ 7544 - I915_WRITE(SCPD0, 7544 + intel_uncore_write(&dev_priv->uncore, SCPD0, 7545 7545 _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D)); 7546 7546 } 7547 7547 7548 7548 static void i830_init_clock_gating(struct drm_i915_private *dev_priv) 7549 7549 { 7550 - I915_WRITE(MEM_MODE, 7550 + intel_uncore_write(&dev_priv->uncore, MEM_MODE, 7551 7551 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) | 7552 7552 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE)); 7553 7553 }
+2 -2
drivers/gpu/drm/i915/intel_sideband.c
··· 404 404 lockdep_assert_held(&i915->sb_lock); 405 405 406 406 /* 407 - * GEN6_PCODE_* are outside of the forcewake domain, we can 408 - * use te fw I915_READ variants to reduce the amount of work 407 + * GEN6_PCODE_* are outside of the forcewake domain, we can use 408 + * intel_uncore_read/write_fw variants to reduce the amount of work 409 409 * required when reading/writing. 410 410 */ 411 411
+2 -2
drivers/gpu/drm/i915/intel_uncore.c
··· 2070 2070 * This routine waits until the target register @reg contains the expected 2071 2071 * @value after applying the @mask, i.e. it waits until :: 2072 2072 * 2073 - * (I915_READ_FW(reg) & mask) == value 2073 + * (intel_uncore_read_fw(uncore, reg) & mask) == value 2074 2074 * 2075 2075 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds. 2076 2076 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us ··· 2126 2126 * This routine waits until the target register @reg contains the expected 2127 2127 * @value after applying the @mask, i.e. it waits until :: 2128 2128 * 2129 - * (I915_READ(reg) & mask) == value 2129 + * (intel_uncore_read(uncore, reg) & mask) == value 2130 2130 * 2131 2131 * Otherwise, the wait will timeout after @timeout_ms milliseconds. 2132 2132 *
+3 -3
drivers/gpu/drm/i915/intel_uncore.h
··· 216 216 217 217 /* 218 218 * Like above but the caller must manage the uncore.lock itself. 219 - * Must be used with I915_READ_FW and friends. 219 + * Must be used with intel_uncore_read_fw() and friends. 220 220 */ 221 221 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, 222 222 enum forcewake_domains domains); ··· 318 318 * will be implemented using 2 32-bit writes in an arbitrary order with 319 319 * an arbitrary delay between them. This can cause the hardware to 320 320 * act upon the intermediate value, possibly leading to corruption and 321 - * machine death. For this reason we do not support I915_WRITE64, or 322 - * uncore->funcs.mmio_writeq. 321 + * machine death. For this reason we do not support intel_uncore_write64, 322 + * or uncore->funcs.mmio_writeq. 323 323 * 324 324 * When reading a 64-bit value as two 32-bit values, the delay may cause 325 325 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
+1
include/drm/drm_dsc.h
··· 603 603 } __packed; 604 604 605 605 void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header); 606 + int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size); 606 607 void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp, 607 608 const struct drm_dsc_config *dsc_cfg); 608 609 int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg);