Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2021-11-30' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

drm/i915 feature pull for v5.17:

Features and functionality:
- Implement per-lane DP drive settings for ICL+ (Ville)
- Enable runtime pm autosuspend by default (Tilak Tangudu)
- ADL-P DSI support (Vandita)
- Add support for pipe C and D DMC firmware (Anusha)
- Implement (near)atomic gamma LUT updates via vblank workers (Ville)
- Split plane updates to noarm+arm phases (Ville)
- Remove the CCS FB stride restrictions on ADL-P (Imre)
- Add PSR selective fetch support for biplanar formats (Jouni)
- Add support for display audio codec keepalive (Kai)
- VRR platform support for display 11 (Manasi)

Refactoring and cleanups:
- FBC refactoring and cleanups preparing for multiple FBC instances (Ville)
- PCH modeset refactoring, move to its own file (Ville)
- Refactor and simplify handling of modifiers (Imre)
- PXP cleanups (Ville)
- Display header and include refactoring (Jani)
- Some register macro cleanups (Ville)
- Refactor DP HDMI DFP limit code (Ville)

Fixes:
- Disable DSB usage for now due to incorrect gamma LUT updates (Ville)
- Check async flip state of every crtc and plane only once (José)
- Fix DPT FB suspend/resume (Imre)
- Fix black screen on reboot due to disabled DP++ TMDS output buffers (Ville)
- Don't request GMBUS to generate irqs when called while irqs are off (Ville)
- Fix type1 DVI DP dual mode adapter heuristics for modern platforms (Ville)
- Fix fix integer overflow in 128b/132b data rate calculation (Jani)
- Fix bigjoiner state readout (Ville)
- Build fix for non-x86 (Siva)
- PSR fixes (José, Jouni, Ville)
- Disable ADL-P underrun recovery (José)
- Fix DP link parameter usage before valid DPCD (Imre)
- VRR vblank and frame counter fixes (Ville)
- Fix fastsets on TypeC ports following a non-blocking modeset (Imre)
- Compiler warning fixes (Nathan Chancellor)
- Fix DSI HS mode commands (William Tseng)
- Error return fixes (Dan Carpenter)
- Update memory bandwidth calculations (Radhakrishna)
- Implement WM0 cursor WA for DG2 (Stan)
- Fix DSI Double pixelclock on read-back for dual-link panels (Hans de Goede)
- HDMI 2.1 PCON FRL configuration fixes (Ankit)

Merges:
- DP link training delay helpers, via topic branch (Jani)
- Backmerge drm-next (Jani)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87v909it0t.fsf@intel.com

+5390 -3885
+1
drivers/char/agp/intel-gtt.c
··· 20 20 #include <linux/kernel.h> 21 21 #include <linux/pagemap.h> 22 22 #include <linux/agp_backend.h> 23 + #include <linux/intel-iommu.h> 23 24 #include <linux/delay.h> 24 25 #include <asm/smp.h> 25 26 #include "agp.h"
+1 -1
drivers/gpu/drm/i915/Kconfig
··· 21 21 select ACPI_VIDEO if ACPI 22 22 select ACPI_BUTTON if ACPI 23 23 select SYNC_FILE 24 - select IOSF_MBI 24 + select IOSF_MBI if X86 25 25 select CRC32 26 26 select SND_HDA_I915 if SND_HDA_CORE 27 27 select CEC_CORE if CEC_NOTIFIER
+3 -1
drivers/gpu/drm/i915/Makefile
··· 30 30 # Please keep these build lists sorted! 31 31 32 32 # core driver code 33 - i915-y += i915_drv.o \ 33 + i915-y += i915_driver.o \ 34 34 i915_config.o \ 35 35 i915_irq.o \ 36 36 i915_getparam.o \ ··· 226 226 display/intel_hotplug.o \ 227 227 display/intel_lpe_audio.o \ 228 228 display/intel_overlay.o \ 229 + display/intel_pch_display.o \ 230 + display/intel_pch_refclk.o \ 229 231 display/intel_plane_initial.o \ 230 232 display/intel_psr.o \ 231 233 display/intel_quirks.o \
+80 -54
drivers/gpu/drm/i915/display/i9xx_plane.c
··· 60 60 DRM_FORMAT_XBGR16161616F, 61 61 }; 62 62 63 - static const u64 i9xx_format_modifiers[] = { 64 - I915_FORMAT_MOD_X_TILED, 65 - DRM_FORMAT_MOD_LINEAR, 66 - DRM_FORMAT_MOD_INVALID 67 - }; 68 - 69 63 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, 70 64 u32 format, u64 modifier) 71 65 { 72 - switch (modifier) { 73 - case DRM_FORMAT_MOD_LINEAR: 74 - case I915_FORMAT_MOD_X_TILED: 75 - break; 76 - default: 66 + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) 77 67 return false; 78 - } 79 68 80 69 switch (format) { 81 70 case DRM_FORMAT_C8: ··· 81 92 static bool i965_plane_format_mod_supported(struct drm_plane *_plane, 82 93 u32 format, u64 modifier) 83 94 { 84 - switch (modifier) { 85 - case DRM_FORMAT_MOD_LINEAR: 86 - case I915_FORMAT_MOD_X_TILED: 87 - break; 88 - default: 95 + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) 89 96 return false; 90 - } 91 97 92 98 switch (format) { 93 99 case DRM_FORMAT_C8: ··· 256 272 u32 alignment = intel_surf_alignment(fb, 0); 257 273 int cpp = fb->format->cpp[0]; 258 274 259 - while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].stride) { 275 + while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].mapping_stride) { 260 276 if (offset == 0) { 261 277 drm_dbg_kms(&dev_priv->drm, 262 278 "Unable to find suitable display surface offset due to X-tiling\n"); ··· 402 418 return DIV_ROUND_UP(pixel_rate * num, den); 403 419 } 404 420 405 - static void i9xx_update_plane(struct intel_plane *plane, 406 - const struct intel_crtc_state *crtc_state, 407 - const struct intel_plane_state *plane_state) 421 + static void i9xx_plane_update_noarm(struct intel_plane *plane, 422 + const struct intel_crtc_state *crtc_state, 423 + const struct intel_plane_state *plane_state) 408 424 { 409 425 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 410 426 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 411 - u32 linear_offset; 427 + unsigned long irqflags; 428 + 429 + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 430 + 431 + intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), 432 + plane_state->view.color_plane[0].mapping_stride); 433 + 434 + if (DISPLAY_VER(dev_priv) < 4) { 435 + int crtc_x = plane_state->uapi.dst.x1; 436 + int crtc_y = plane_state->uapi.dst.y1; 437 + int crtc_w = drm_rect_width(&plane_state->uapi.dst); 438 + int crtc_h = drm_rect_height(&plane_state->uapi.dst); 439 + 440 + /* 441 + * PLANE_A doesn't actually have a full window 442 + * generator but let's assume we still need to 443 + * program whatever is there. 444 + */ 445 + intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane), 446 + (crtc_y << 16) | crtc_x); 447 + intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), 448 + ((crtc_h - 1) << 16) | (crtc_w - 1)); 449 + } 450 + 451 + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 452 + } 453 + 454 + static void i9xx_plane_update_arm(struct intel_plane *plane, 455 + const struct intel_crtc_state *crtc_state, 456 + const struct intel_plane_state *plane_state) 457 + { 458 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 459 + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; 412 460 int x = plane_state->view.color_plane[0].x; 413 461 int y = plane_state->view.color_plane[0].y; 414 - int crtc_x = plane_state->uapi.dst.x1; 415 - int crtc_y = plane_state->uapi.dst.y1; 416 - int crtc_w = drm_rect_width(&plane_state->uapi.dst); 417 - int crtc_h = drm_rect_height(&plane_state->uapi.dst); 462 + u32 dspcntr, dspaddr_offset, linear_offset; 418 463 unsigned long irqflags; 419 - u32 dspaddr_offset; 420 - u32 dspcntr; 421 464 422 465 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); 423 466 ··· 457 446 458 447 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 459 448 460 - intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), 461 - plane_state->view.color_plane[0].stride); 449 + if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { 450 + int crtc_x = plane_state->uapi.dst.x1; 451 + int crtc_y = plane_state->uapi.dst.y1; 452 + int crtc_w = drm_rect_width(&plane_state->uapi.dst); 453 + int crtc_h = drm_rect_height(&plane_state->uapi.dst); 462 454 463 - if (DISPLAY_VER(dev_priv) < 4) { 464 - /* 465 - * PLANE_A doesn't actually have a full window 466 - * generator but let's assume we still need to 467 - * program whatever is there. 468 - */ 469 - intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane), 470 - (crtc_y << 16) | crtc_x); 471 - intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), 472 - ((crtc_h - 1) << 16) | (crtc_w - 1)); 473 - } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { 474 455 intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), 475 456 (crtc_y << 16) | crtc_x); 476 457 intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), ··· 496 493 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 497 494 } 498 495 499 - static void i9xx_disable_plane(struct intel_plane *plane, 500 - const struct intel_crtc_state *crtc_state) 496 + static void i830_plane_update_arm(struct intel_plane *plane, 497 + const struct intel_crtc_state *crtc_state, 498 + const struct intel_plane_state *plane_state) 499 + { 500 + /* 501 + * On i830/i845 all registers are self-arming [ALM040]. 502 + * 503 + * Additional breakage on i830 causes register reads to return 504 + * the last latched value instead of the last written value [ALM026]. 505 + */ 506 + i9xx_plane_update_noarm(plane, crtc_state, plane_state); 507 + i9xx_plane_update_arm(plane, crtc_state, plane_state); 508 + } 509 + 510 + static void i9xx_plane_disable_arm(struct intel_plane *plane, 511 + const struct intel_crtc_state *crtc_state) 501 512 { 502 513 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 503 514 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; ··· 785 768 struct intel_plane *plane; 786 769 const struct drm_plane_funcs *plane_funcs; 787 770 unsigned int supported_rotations; 771 + const u64 *modifiers; 788 772 const u32 *formats; 789 773 int num_formats; 790 774 int ret, zpos; ··· 807 789 plane->id = PLANE_PRIMARY; 808 790 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); 809 791 810 - plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); 811 - if (plane->has_fbc) { 812 - struct intel_fbc *fbc = &dev_priv->fbc; 813 - 814 - fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 815 - } 792 + if (i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane)) 793 + plane->fbc = &dev_priv->fbc; 794 + if (plane->fbc) 795 + plane->fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 816 796 817 797 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 818 798 formats = vlv_primary_formats; ··· 867 851 plane->max_stride = ilk_primary_max_stride; 868 852 } 869 853 870 - plane->update_plane = i9xx_update_plane; 871 - plane->disable_plane = i9xx_disable_plane; 854 + if (IS_I830(dev_priv) || IS_I845G(dev_priv)) { 855 + plane->update_arm = i830_plane_update_arm; 856 + } else { 857 + plane->update_noarm = i9xx_plane_update_noarm; 858 + plane->update_arm = i9xx_plane_update_arm; 859 + } 860 + plane->disable_arm = i9xx_plane_disable_arm; 872 861 plane->get_hw_state = i9xx_plane_get_hw_state; 873 862 plane->check_plane = i9xx_plane_check; 874 863 ··· 896 875 plane->disable_flip_done = ilk_primary_disable_flip_done; 897 876 } 898 877 878 + modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X); 879 + 899 880 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) 900 881 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 901 882 0, plane_funcs, 902 883 formats, num_formats, 903 - i9xx_format_modifiers, 884 + modifiers, 904 885 DRM_PLANE_TYPE_PRIMARY, 905 886 "primary %c", pipe_name(pipe)); 906 887 else 907 888 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 908 889 0, plane_funcs, 909 890 formats, num_formats, 910 - i9xx_format_modifiers, 891 + modifiers, 911 892 DRM_PLANE_TYPE_PRIMARY, 912 893 "plane %c", 913 894 plane_name(plane->i9xx_plane)); 895 + 896 + kfree(modifiers); 897 + 914 898 if (ret) 915 899 goto fail; 916 900
+8 -2
drivers/gpu/drm/i915/display/icl_dsi.c
··· 28 28 #include <drm/drm_atomic_helper.h> 29 29 #include <drm/drm_mipi_dsi.h> 30 30 31 + #include "icl_dsi.h" 31 32 #include "intel_atomic.h" 32 33 #include "intel_backlight.h" 33 34 #include "intel_combo_phy.h" ··· 37 36 #include "intel_ddi.h" 38 37 #include "intel_de.h" 39 38 #include "intel_dsi.h" 39 + #include "intel_dsi_vbt.h" 40 40 #include "intel_panel.h" 41 41 #include "intel_vdsc.h" 42 42 #include "skl_scaler.h" ··· 185 183 186 184 if (enable_lpdt) 187 185 tmp |= LP_DATA_TRANSFER; 186 + else 187 + tmp &= ~LP_DATA_TRANSFER; 188 188 189 189 tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK); 190 190 tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT); ··· 1230 1226 /* step5: program and powerup panel */ 1231 1227 gen11_dsi_powerup_panel(encoder); 1232 1228 1233 - intel_dsc_enable(encoder, pipe_config); 1229 + intel_dsc_dsi_pps_write(encoder, pipe_config); 1230 + 1231 + intel_dsc_enable(pipe_config); 1234 1232 1235 1233 /* step6c: configure transcoder timings */ 1236 1234 gen11_dsi_set_transcoder_timings(encoder, pipe_config); ··· 1629 1623 /* FIXME: initialize from VBT */ 1630 1624 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1631 1625 1632 - ret = intel_dsc_compute_params(encoder, crtc_state); 1626 + ret = intel_dsc_compute_params(crtc_state); 1633 1627 if (ret) 1634 1628 return ret; 1635 1629
+15
drivers/gpu/drm/i915/display/icl_dsi.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #ifndef __ICL_DSI_H__ 7 + #define __ICL_DSI_H__ 8 + 9 + struct drm_i915_private; 10 + struct intel_crtc_state; 11 + 12 + void icl_dsi_init(struct drm_i915_private *i915); 13 + void icl_dsi_frame_update(struct intel_crtc_state *crtc_state); 14 + 15 + #endif /* __ICL_DSI_H__ */
+69 -20
drivers/gpu/drm/i915/display/intel_atomic_plane.c
··· 39 39 #include "intel_atomic_plane.h" 40 40 #include "intel_cdclk.h" 41 41 #include "intel_display_types.h" 42 + #include "intel_fb.h" 42 43 #include "intel_fb_pin.h" 43 44 #include "intel_pm.h" 44 45 #include "intel_sprite.h" ··· 470 469 return NULL; 471 470 } 472 471 473 - void intel_update_plane(struct intel_plane *plane, 474 - const struct intel_crtc_state *crtc_state, 475 - const struct intel_plane_state *plane_state) 472 + void intel_plane_update_noarm(struct intel_plane *plane, 473 + const struct intel_crtc_state *crtc_state, 474 + const struct intel_plane_state *plane_state) 476 475 { 477 476 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 478 477 479 - trace_intel_update_plane(&plane->base, crtc); 478 + trace_intel_plane_update_noarm(&plane->base, crtc); 479 + 480 + if (plane->update_noarm) 481 + plane->update_noarm(plane, crtc_state, plane_state); 482 + } 483 + 484 + void intel_plane_update_arm(struct intel_plane *plane, 485 + const struct intel_crtc_state *crtc_state, 486 + const struct intel_plane_state *plane_state) 487 + { 488 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 489 + 490 + trace_intel_plane_update_arm(&plane->base, crtc); 480 491 481 492 if (crtc_state->uapi.async_flip && plane->async_flip) 482 493 plane->async_flip(plane, crtc_state, plane_state, true); 483 494 else 484 - plane->update_plane(plane, crtc_state, plane_state); 495 + plane->update_arm(plane, crtc_state, plane_state); 485 496 } 486 497 487 - void intel_disable_plane(struct intel_plane *plane, 488 - const struct intel_crtc_state *crtc_state) 498 + void intel_plane_disable_arm(struct intel_plane *plane, 499 + const struct intel_crtc_state *crtc_state) 489 500 { 490 501 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 491 502 492 - trace_intel_disable_plane(&plane->base, crtc); 493 - plane->disable_plane(plane, crtc_state); 503 + trace_intel_plane_disable_arm(&plane->base, crtc); 504 + plane->disable_arm(plane, crtc_state); 494 505 } 495 506 496 - void skl_update_planes_on_crtc(struct intel_atomic_state *state, 497 - struct intel_crtc *crtc) 507 + void intel_update_planes_on_crtc(struct intel_atomic_state *state, 508 + struct intel_crtc *crtc) 509 + { 510 + struct intel_crtc_state *new_crtc_state = 511 + intel_atomic_get_new_crtc_state(state, crtc); 512 + u32 update_mask = new_crtc_state->update_planes; 513 + struct intel_plane_state *new_plane_state; 514 + struct intel_plane *plane; 515 + int i; 516 + 517 + if (new_crtc_state->uapi.async_flip) 518 + return; 519 + 520 + /* 521 + * Since we only write non-arming registers here, 522 + * the order does not matter even for skl+. 523 + */ 524 + for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { 525 + if (crtc->pipe != plane->pipe || 526 + !(update_mask & BIT(plane->id))) 527 + continue; 528 + 529 + /* TODO: for mailbox updates this should be skipped */ 530 + if (new_plane_state->uapi.visible || 531 + new_plane_state->planar_slave) 532 + intel_plane_update_noarm(plane, new_crtc_state, new_plane_state); 533 + } 534 + } 535 + 536 + void skl_arm_planes_on_crtc(struct intel_atomic_state *state, 537 + struct intel_crtc *crtc) 498 538 { 499 539 struct intel_crtc_state *old_crtc_state = 500 540 intel_atomic_get_old_crtc_state(state, crtc); ··· 557 515 struct intel_plane_state *new_plane_state = 558 516 intel_atomic_get_new_plane_state(state, plane); 559 517 518 + /* 519 + * TODO: for mailbox updates intel_plane_update_noarm() 520 + * would have to be called here as well. 521 + */ 560 522 if (new_plane_state->uapi.visible || 561 - new_plane_state->planar_slave) { 562 - intel_update_plane(plane, new_crtc_state, new_plane_state); 563 - } else { 564 - intel_disable_plane(plane, new_crtc_state); 565 - } 523 + new_plane_state->planar_slave) 524 + intel_plane_update_arm(plane, new_crtc_state, new_plane_state); 525 + else 526 + intel_plane_disable_arm(plane, new_crtc_state); 566 527 } 567 528 } 568 529 569 - void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, 570 - struct intel_crtc *crtc) 530 + void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state, 531 + struct intel_crtc *crtc) 571 532 { 572 533 struct intel_crtc_state *new_crtc_state = 573 534 intel_atomic_get_new_crtc_state(state, crtc); ··· 584 539 !(update_mask & BIT(plane->id))) 585 540 continue; 586 541 542 + /* 543 + * TODO: for mailbox updates intel_plane_update_noarm() 544 + * would have to be called here as well. 545 + */ 587 546 if (new_plane_state->uapi.visible) 588 - intel_update_plane(plane, new_crtc_state, new_plane_state); 547 + intel_plane_update_arm(plane, new_crtc_state, new_plane_state); 589 548 else 590 - intel_disable_plane(plane, new_crtc_state); 549 + intel_plane_disable_arm(plane, new_crtc_state); 591 550 } 592 551 } 593 552
+14 -9
drivers/gpu/drm/i915/display/intel_atomic_plane.h
··· 30 30 struct intel_crtc *crtc); 31 31 void intel_plane_copy_hw_state(struct intel_plane_state *plane_state, 32 32 const struct intel_plane_state *from_plane_state); 33 - void intel_update_plane(struct intel_plane *plane, 34 - const struct intel_crtc_state *crtc_state, 35 - const struct intel_plane_state *plane_state); 36 - void intel_disable_plane(struct intel_plane *plane, 37 - const struct intel_crtc_state *crtc_state); 33 + void intel_plane_update_noarm(struct intel_plane *plane, 34 + const struct intel_crtc_state *crtc_state, 35 + const struct intel_plane_state *plane_state); 36 + void intel_plane_update_arm(struct intel_plane *plane, 37 + const struct intel_crtc_state *crtc_state, 38 + const struct intel_plane_state *plane_state); 39 + void intel_plane_disable_arm(struct intel_plane *plane, 40 + const struct intel_crtc_state *crtc_state); 38 41 struct intel_plane *intel_plane_alloc(void); 39 42 void intel_plane_free(struct intel_plane *plane); 40 43 struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); 41 44 void intel_plane_destroy_state(struct drm_plane *plane, 42 45 struct drm_plane_state *state); 43 - void skl_update_planes_on_crtc(struct intel_atomic_state *state, 44 - struct intel_crtc *crtc); 45 - void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, 46 - struct intel_crtc *crtc); 46 + void intel_update_planes_on_crtc(struct intel_atomic_state *state, 47 + struct intel_crtc *crtc); 48 + void skl_arm_planes_on_crtc(struct intel_atomic_state *state, 49 + struct intel_crtc *crtc); 50 + void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state, 51 + struct intel_crtc *crtc); 47 52 int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, 48 53 struct intel_crtc_state *crtc_state, 49 54 const struct intel_plane_state *old_plane_state,
+93 -47
drivers/gpu/drm/i915/display/intel_audio.c
··· 62 62 * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver. 63 63 */ 64 64 65 + struct intel_audio_funcs { 66 + void (*audio_codec_enable)(struct intel_encoder *encoder, 67 + const struct intel_crtc_state *crtc_state, 68 + const struct drm_connector_state *conn_state); 69 + void (*audio_codec_disable)(struct intel_encoder *encoder, 70 + const struct intel_crtc_state *old_crtc_state, 71 + const struct drm_connector_state *old_conn_state); 72 + }; 73 + 65 74 /* DP N/M table */ 66 75 #define LC_810M 810000 67 76 #define LC_540M 540000 ··· 397 388 const struct intel_crtc_state *crtc_state) 398 389 { 399 390 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 400 - struct i915_audio_component *acomp = dev_priv->audio_component; 391 + struct i915_audio_component *acomp = dev_priv->audio.component; 401 392 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 402 393 enum port port = encoder->port; 403 394 const struct dp_aud_n_m *nm; ··· 445 436 const struct intel_crtc_state *crtc_state) 446 437 { 447 438 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 448 - struct i915_audio_component *acomp = dev_priv->audio_component; 439 + struct i915_audio_component *acomp = dev_priv->audio.component; 449 440 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 450 441 enum port port = encoder->port; 451 442 int n, rate; ··· 503 494 drm_dbg_kms(&dev_priv->drm, "Disable audio codec on transcoder %s\n", 504 495 transcoder_name(cpu_transcoder)); 505 496 506 - mutex_lock(&dev_priv->av_mutex); 497 + mutex_lock(&dev_priv->audio.mutex); 507 498 508 499 /* Disable timestamps */ 509 500 tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder)); ··· 521 512 tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder); 522 513 intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp); 523 514 524 - mutex_unlock(&dev_priv->av_mutex); 515 + mutex_unlock(&dev_priv->audio.mutex); 525 516 } 526 517 527 518 static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder, ··· 650 641 "Enable audio codec on transcoder %s, %u bytes ELD\n", 651 642 transcoder_name(cpu_transcoder), drm_eld_size(eld)); 652 643 653 - mutex_lock(&dev_priv->av_mutex); 644 + mutex_lock(&dev_priv->audio.mutex); 654 645 655 646 /* Enable Audio WA for 4k DSC usecases */ 656 647 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP)) ··· 688 679 /* Enable timestamps */ 689 680 hsw_audio_config_update(encoder, crtc_state); 690 681 691 - mutex_unlock(&dev_priv->av_mutex); 682 + mutex_unlock(&dev_priv->audio.mutex); 692 683 } 693 684 694 685 static void ilk_audio_codec_disable(struct intel_encoder *encoder, ··· 835 826 const struct drm_connector_state *conn_state) 836 827 { 837 828 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 838 - struct i915_audio_component *acomp = dev_priv->audio_component; 829 + struct i915_audio_component *acomp = dev_priv->audio.component; 839 830 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 840 831 struct drm_connector *connector = conn_state->connector; 841 832 const struct drm_display_mode *adjusted_mode = ··· 857 848 858 849 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; 859 850 860 - if (dev_priv->audio_funcs) 861 - dev_priv->audio_funcs->audio_codec_enable(encoder, 851 + if (dev_priv->audio.funcs) 852 + dev_priv->audio.funcs->audio_codec_enable(encoder, 862 853 crtc_state, 863 854 conn_state); 864 855 865 - mutex_lock(&dev_priv->av_mutex); 856 + mutex_lock(&dev_priv->audio.mutex); 866 857 encoder->audio_connector = connector; 867 858 868 859 /* referred in audio callbacks */ 869 - dev_priv->av_enc_map[pipe] = encoder; 870 - mutex_unlock(&dev_priv->av_mutex); 860 + dev_priv->audio.encoder_map[pipe] = encoder; 861 + mutex_unlock(&dev_priv->audio.mutex); 871 862 872 863 if (acomp && acomp->base.audio_ops && 873 864 acomp->base.audio_ops->pin_eld_notify) { ··· 897 888 const struct drm_connector_state *old_conn_state) 898 889 { 899 890 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 900 - struct i915_audio_component *acomp = dev_priv->audio_component; 891 + struct i915_audio_component *acomp = dev_priv->audio.component; 901 892 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 902 893 enum port port = encoder->port; 903 894 enum pipe pipe = crtc->pipe; 904 895 905 - if (dev_priv->audio_funcs) 906 - dev_priv->audio_funcs->audio_codec_disable(encoder, 896 + if (dev_priv->audio.funcs) 897 + dev_priv->audio.funcs->audio_codec_disable(encoder, 907 898 old_crtc_state, 908 899 old_conn_state); 909 900 910 - mutex_lock(&dev_priv->av_mutex); 901 + mutex_lock(&dev_priv->audio.mutex); 911 902 encoder->audio_connector = NULL; 912 - dev_priv->av_enc_map[pipe] = NULL; 913 - mutex_unlock(&dev_priv->av_mutex); 903 + dev_priv->audio.encoder_map[pipe] = NULL; 904 + mutex_unlock(&dev_priv->audio.mutex); 914 905 915 906 if (acomp && acomp->base.audio_ops && 916 907 acomp->base.audio_ops->pin_eld_notify) { ··· 940 931 }; 941 932 942 933 /** 943 - * intel_init_audio_hooks - Set up chip specific audio hooks 934 + * intel_audio_hooks_init - Set up chip specific audio hooks 944 935 * @dev_priv: device private 945 936 */ 946 - void intel_init_audio_hooks(struct drm_i915_private *dev_priv) 937 + void intel_audio_hooks_init(struct drm_i915_private *dev_priv) 947 938 { 948 939 if (IS_G4X(dev_priv)) { 949 - dev_priv->audio_funcs = &g4x_audio_funcs; 940 + dev_priv->audio.funcs = &g4x_audio_funcs; 950 941 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 951 - dev_priv->audio_funcs = &ilk_audio_funcs; 942 + dev_priv->audio.funcs = &ilk_audio_funcs; 952 943 } else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) { 953 - dev_priv->audio_funcs = &hsw_audio_funcs; 944 + dev_priv->audio.funcs = &hsw_audio_funcs; 954 945 } else if (HAS_PCH_SPLIT(dev_priv)) { 955 - dev_priv->audio_funcs = &ilk_audio_funcs; 946 + dev_priv->audio.funcs = &ilk_audio_funcs; 947 + } 948 + } 949 + 950 + struct aud_ts_cdclk_m_n { 951 + u8 m; 952 + u16 n; 953 + }; 954 + 955 + void intel_audio_cdclk_change_pre(struct drm_i915_private *i915) 956 + { 957 + if (DISPLAY_VER(i915) >= 13) 958 + intel_de_rmw(i915, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0); 959 + } 960 + 961 + static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts) 962 + { 963 + if (refclk == 24000) 964 + aud_ts->m = 12; 965 + else 966 + aud_ts->m = 15; 967 + 968 + aud_ts->n = cdclk * aud_ts->m / 24000; 969 + } 970 + 971 + void intel_audio_cdclk_change_post(struct drm_i915_private *i915) 972 + { 973 + struct aud_ts_cdclk_m_n aud_ts; 974 + 975 + if (DISPLAY_VER(i915) >= 13) { 976 + get_aud_ts_cdclk_m_n(i915->cdclk.hw.ref, i915->cdclk.hw.cdclk, &aud_ts); 977 + 978 + intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n); 979 + intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN); 980 + drm_dbg_kms(&i915->drm, "aud_ts_cdclk set to M=%u, N=%u\n", aud_ts.m, aud_ts.n); 956 981 } 957 982 } 958 983 ··· 1057 1014 1058 1015 ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK); 1059 1016 1060 - if (dev_priv->audio_power_refcount++ == 0) { 1017 + if (dev_priv->audio.power_refcount++ == 0) { 1061 1018 if (DISPLAY_VER(dev_priv) >= 9) { 1062 1019 intel_de_write(dev_priv, AUD_FREQ_CNTRL, 1063 - dev_priv->audio_freq_cntrl); 1020 + dev_priv->audio.freq_cntrl); 1064 1021 drm_dbg_kms(&dev_priv->drm, 1065 1022 "restored AUD_FREQ_CNTRL to 0x%x\n", 1066 - dev_priv->audio_freq_cntrl); 1023 + dev_priv->audio.freq_cntrl); 1067 1024 } 1068 1025 1069 1026 /* Force CDCLK to 2*BCLK as long as we need audio powered. */ ··· 1084 1041 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 1085 1042 1086 1043 /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ 1087 - if (--dev_priv->audio_power_refcount == 0) 1044 + if (--dev_priv->audio.power_refcount == 0) 1088 1045 if (IS_GEMINILAKE(dev_priv)) 1089 1046 glk_force_audio_cdclk(dev_priv, false); 1090 1047 ··· 1136 1093 /* 1137 1094 * get the intel_encoder according to the parameter port and pipe 1138 1095 * intel_encoder is saved by the index of pipe 1139 - * MST & (pipe >= 0): return the av_enc_map[pipe], 1096 + * MST & (pipe >= 0): return the audio.encoder_map[pipe], 1140 1097 * when port is matched 1141 1098 * MST & (pipe < 0): this is invalid 1142 1099 * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry) ··· 1151 1108 /* MST */ 1152 1109 if (pipe >= 0) { 1153 1110 if (drm_WARN_ON(&dev_priv->drm, 1154 - pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) 1111 + pipe >= ARRAY_SIZE(dev_priv->audio.encoder_map))) 1155 1112 return NULL; 1156 1113 1157 - encoder = dev_priv->av_enc_map[pipe]; 1114 + encoder = dev_priv->audio.encoder_map[pipe]; 1158 1115 /* 1159 1116 * when bootup, audio driver may not know it is 1160 1117 * MST or not. So it will poll all the port & pipe ··· 1170 1127 return NULL; 1171 1128 1172 1129 for_each_pipe(dev_priv, pipe) { 1173 - encoder = dev_priv->av_enc_map[pipe]; 1130 + encoder = dev_priv->audio.encoder_map[pipe]; 1174 1131 if (encoder == NULL) 1175 1132 continue; 1176 1133 ··· 1188 1145 int pipe, int rate) 1189 1146 { 1190 1147 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 1191 - struct i915_audio_component *acomp = dev_priv->audio_component; 1148 + struct i915_audio_component *acomp = dev_priv->audio.component; 1192 1149 struct intel_encoder *encoder; 1193 1150 struct intel_crtc *crtc; 1194 1151 unsigned long cookie; ··· 1198 1155 return 0; 1199 1156 1200 1157 cookie = i915_audio_component_get_power(kdev); 1201 - mutex_lock(&dev_priv->av_mutex); 1158 + mutex_lock(&dev_priv->audio.mutex); 1202 1159 1203 1160 /* 1. get the pipe */ 1204 1161 encoder = get_saved_enc(dev_priv, port, pipe); ··· 1217 1174 hsw_audio_config_update(encoder, crtc->config); 1218 1175 1219 1176 unlock: 1220 - mutex_unlock(&dev_priv->av_mutex); 1177 + mutex_unlock(&dev_priv->audio.mutex); 1221 1178 i915_audio_component_put_power(kdev, cookie); 1222 1179 return err; 1223 1180 } ··· 1231 1188 const u8 *eld; 1232 1189 int ret = -EINVAL; 1233 1190 1234 - mutex_lock(&dev_priv->av_mutex); 1191 + mutex_lock(&dev_priv->audio.mutex); 1235 1192 1236 1193 intel_encoder = get_saved_enc(dev_priv, port, pipe); 1237 1194 if (!intel_encoder) { 1238 1195 drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n", 1239 1196 port_name(port)); 1240 - mutex_unlock(&dev_priv->av_mutex); 1197 + mutex_unlock(&dev_priv->audio.mutex); 1241 1198 return ret; 1242 1199 } 1243 1200 ··· 1249 1206 memcpy(buf, eld, min(max_bytes, ret)); 1250 1207 } 1251 1208 1252 - mutex_unlock(&dev_priv->av_mutex); 1209 + mutex_unlock(&dev_priv->audio.mutex); 1253 1210 return ret; 1254 1211 } 1255 1212 ··· 1284 1241 BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); 1285 1242 for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) 1286 1243 acomp->aud_sample_rate[i] = 0; 1287 - dev_priv->audio_component = acomp; 1244 + dev_priv->audio.component = acomp; 1288 1245 drm_modeset_unlock_all(&dev_priv->drm); 1289 1246 1290 1247 return 0; ··· 1299 1256 drm_modeset_lock_all(&dev_priv->drm); 1300 1257 acomp->base.ops = NULL; 1301 1258 acomp->base.dev = NULL; 1302 - dev_priv->audio_component = NULL; 1259 + dev_priv->audio.component = NULL; 1303 1260 drm_modeset_unlock_all(&dev_priv->drm); 1304 1261 1305 1262 device_link_remove(hda_kdev, i915_kdev); 1306 1263 1307 - if (dev_priv->audio_power_refcount) 1264 + if (dev_priv->audio.power_refcount) 1308 1265 drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n", 1309 - dev_priv->audio_power_refcount); 1266 + dev_priv->audio.power_refcount); 1310 1267 } 1311 1268 1312 1269 static const struct component_ops i915_audio_component_bind_ops = { ··· 1370 1327 drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n", 1371 1328 aud_freq, aud_freq_init); 1372 1329 1373 - dev_priv->audio_freq_cntrl = aud_freq; 1330 + dev_priv->audio.freq_cntrl = aud_freq; 1374 1331 } 1375 1332 1376 - dev_priv->audio_component_registered = true; 1333 + /* init with current cdclk */ 1334 + intel_audio_cdclk_change_post(dev_priv); 1335 + 1336 + dev_priv->audio.component_registered = true; 1377 1337 } 1378 1338 1379 1339 /** ··· 1388 1342 */ 1389 1343 static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv) 1390 1344 { 1391 - if (!dev_priv->audio_component_registered) 1345 + if (!dev_priv->audio.component_registered) 1392 1346 return; 1393 1347 1394 1348 component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops); 1395 - dev_priv->audio_component_registered = false; 1349 + dev_priv->audio.component_registered = false; 1396 1350 } 1397 1351 1398 1352 /** ··· 1414 1368 */ 1415 1369 void intel_audio_deinit(struct drm_i915_private *dev_priv) 1416 1370 { 1417 - if ((dev_priv)->lpe_audio.platdev != NULL) 1371 + if ((dev_priv)->audio.lpe.platdev != NULL) 1418 1372 intel_lpe_audio_teardown(dev_priv); 1419 1373 else 1420 1374 i915_audio_component_cleanup(dev_priv);
+3 -1
drivers/gpu/drm/i915/display/intel_audio.h
··· 11 11 struct intel_crtc_state; 12 12 struct intel_encoder; 13 13 14 - void intel_init_audio_hooks(struct drm_i915_private *dev_priv); 14 + void intel_audio_hooks_init(struct drm_i915_private *dev_priv); 15 15 void intel_audio_codec_enable(struct intel_encoder *encoder, 16 16 const struct intel_crtc_state *crtc_state, 17 17 const struct drm_connector_state *conn_state); 18 18 void intel_audio_codec_disable(struct intel_encoder *encoder, 19 19 const struct intel_crtc_state *old_crtc_state, 20 20 const struct drm_connector_state *old_conn_state); 21 + void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv); 22 + void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv); 21 23 void intel_audio_init(struct drm_i915_private *dev_priv); 22 24 void intel_audio_deinit(struct drm_i915_private *dev_priv); 23 25
+181 -34
drivers/gpu/drm/i915/display/intel_bw.c
··· 27 27 u8 num_points; 28 28 u8 num_psf_points; 29 29 u8 t_bl; 30 + u8 max_numchannels; 31 + u8 channel_width; 32 + u8 deinterleave; 30 33 }; 31 34 32 35 static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, ··· 45 42 dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */ 46 43 else 47 44 dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */ 48 - sp->dclk = dclk_ratio * dclk_reference; 45 + sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000); 49 46 50 47 val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); 51 48 if (val & DG1_GEAR_TYPE) ··· 72 69 int point) 73 70 { 74 71 u32 val = 0, val2 = 0; 72 + u16 dclk; 75 73 int ret; 76 74 77 75 ret = sandybridge_pcode_read(dev_priv, ··· 82 78 if (ret) 83 79 return ret; 84 80 85 - sp->dclk = val & 0xffff; 81 + dclk = val & 0xffff; 82 + sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) > 11 ? 500 : 0), 1000); 86 83 sp->t_rp = (val & 0xff0000) >> 16; 87 84 sp->t_rcd = (val & 0xff000000) >> 24; 88 85 ··· 138 133 } 139 134 140 135 static int icl_get_qgv_points(struct drm_i915_private *dev_priv, 141 - struct intel_qgv_info *qi) 136 + struct intel_qgv_info *qi, 137 + bool is_y_tile) 142 138 { 143 139 const struct dram_info *dram_info = &dev_priv->dram_info; 144 140 int i, ret; ··· 147 141 qi->num_points = dram_info->num_qgv_points; 148 142 qi->num_psf_points = dram_info->num_psf_gv_points; 149 143 150 - if (DISPLAY_VER(dev_priv) == 12) 144 + if (DISPLAY_VER(dev_priv) >= 12) 151 145 switch (dram_info->type) { 152 146 case INTEL_DRAM_DDR4: 153 - qi->t_bl = 4; 147 + qi->t_bl = is_y_tile ? 8 : 4; 148 + qi->max_numchannels = 2; 149 + qi->channel_width = 64; 150 + qi->deinterleave = is_y_tile ? 1 : 2; 154 151 break; 155 152 case INTEL_DRAM_DDR5: 156 - qi->t_bl = 8; 153 + qi->t_bl = is_y_tile ? 16 : 8; 154 + qi->max_numchannels = 4; 155 + qi->channel_width = 32; 156 + qi->deinterleave = is_y_tile ? 1 : 2; 157 + break; 158 + case INTEL_DRAM_LPDDR4: 159 + if (IS_ROCKETLAKE(dev_priv)) { 160 + qi->t_bl = 8; 161 + qi->max_numchannels = 4; 162 + qi->channel_width = 32; 163 + qi->deinterleave = 2; 164 + break; 165 + } 166 + fallthrough; 167 + case INTEL_DRAM_LPDDR5: 168 + qi->t_bl = 16; 169 + qi->max_numchannels = 8; 170 + qi->channel_width = 16; 171 + qi->deinterleave = is_y_tile ? 2 : 4; 157 172 break; 158 173 default: 159 174 qi->t_bl = 16; 175 + qi->max_numchannels = 1; 160 176 break; 161 177 } 162 - else if (DISPLAY_VER(dev_priv) == 11) 178 + else if (DISPLAY_VER(dev_priv) == 11) { 163 179 qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8; 180 + qi->max_numchannels = 1; 181 + } 164 182 165 183 if (drm_WARN_ON(&dev_priv->drm, 166 184 qi->num_points > ARRAY_SIZE(qi->points))) ··· 221 191 } 222 192 223 193 return 0; 224 - } 225 - 226 - static int icl_calc_bw(int dclk, int num, int den) 227 - { 228 - /* multiples of 16.666MHz (100/6) */ 229 - return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6); 230 194 } 231 195 232 196 static int adl_calc_psf_bw(int clk) ··· 264 240 }; 265 241 266 242 static const struct intel_sa_info rkl_sa_info = { 267 - .deburst = 16, 243 + .deburst = 8, 268 244 .deprogbwlimit = 20, /* GB/s */ 269 245 .displayrtids = 128, 270 246 .derating = 10, ··· 289 265 struct intel_qgv_info qi = {}; 290 266 bool is_y_tile = true; /* assume y tile may be used */ 291 267 int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); 292 - int deinterleave; 293 - int ipqdepth, ipqdepthpch; 268 + int ipqdepth, ipqdepthpch = 16; 294 269 int dclk_max; 295 270 int maxdebw; 271 + int num_groups = ARRAY_SIZE(dev_priv->max_bw); 296 272 int i, ret; 297 273 298 - ret = icl_get_qgv_points(dev_priv, &qi); 274 + ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); 299 275 if (ret) { 300 276 drm_dbg_kms(&dev_priv->drm, 301 277 "Failed to get memory subsystem information, ignoring bandwidth limits"); 302 278 return ret; 303 279 } 304 280 305 - deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); 306 281 dclk_max = icl_sagv_max_dclk(&qi); 307 - 308 - ipqdepthpch = 16; 309 - 310 - maxdebw = min(sa->deprogbwlimit * 1000, 311 - icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */ 282 + maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10); 312 283 ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels); 284 + qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); 313 285 314 - for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { 286 + for (i = 0; i < num_groups; i++) { 315 287 struct intel_bw_info *bi = &dev_priv->max_bw[i]; 316 288 int clpchgroup; 317 289 int j; 318 290 319 - clpchgroup = (sa->deburst * deinterleave / num_channels) << i; 291 + clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; 320 292 bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; 321 293 322 294 bi->num_qgv_points = qi.num_points; ··· 330 310 */ 331 311 ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + 332 312 (clpchgroup - 1) * qi.t_bl + sp->t_rdpre); 333 - bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct); 313 + bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct); 314 + 315 + bi->deratedbw[j] = min(maxdebw, 316 + bw * (100 - sa->derating) / 100); 317 + 318 + drm_dbg_kms(&dev_priv->drm, 319 + "BW%d / QGV %d: num_planes=%d deratedbw=%u\n", 320 + i, j, bi->num_planes, bi->deratedbw[j]); 321 + } 322 + } 323 + /* 324 + * In case if SAGV is disabled in BIOS, we always get 1 325 + * SAGV point, but we can't send PCode commands to restrict it 326 + * as it will fail and pointless anyway. 327 + */ 328 + if (qi.num_points == 1) 329 + dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; 330 + else 331 + dev_priv->sagv_status = I915_SAGV_ENABLED; 332 + 333 + return 0; 334 + } 335 + 336 + static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) 337 + { 338 + struct intel_qgv_info qi = {}; 339 + const struct dram_info *dram_info = &dev_priv->dram_info; 340 + bool is_y_tile = true; /* assume y tile may be used */ 341 + int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); 342 + int ipqdepth, ipqdepthpch = 16; 343 + int dclk_max; 344 + int maxdebw, peakbw; 345 + int clperchgroup; 346 + int num_groups = ARRAY_SIZE(dev_priv->max_bw); 347 + int i, ret; 348 + 349 + ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); 350 + if (ret) { 351 + drm_dbg_kms(&dev_priv->drm, 352 + "Failed to get memory subsystem information, ignoring bandwidth limits"); 353 + return ret; 354 + } 355 + 356 + if (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5) 357 + num_channels *= 2; 358 + 359 + qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); 360 + 361 + if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12) 362 + qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1); 363 + 364 + if (DISPLAY_VER(dev_priv) > 11 && num_channels > qi.max_numchannels) 365 + drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels."); 366 + if (qi.max_numchannels != 0) 367 + num_channels = min_t(u8, num_channels, qi.max_numchannels); 368 + 369 + dclk_max = icl_sagv_max_dclk(&qi); 370 + 371 + peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max; 372 + maxdebw = min(sa->deprogbwlimit * 1000, peakbw * 6 / 10); /* 60% */ 373 + 374 + ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels); 375 + /* 376 + * clperchgroup = 4kpagespermempage * clperchperblock, 377 + * clperchperblock = 8 / num_channels * interleave 378 + */ 379 + clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave; 380 + 381 + for (i = 0; i < num_groups; i++) { 382 + struct intel_bw_info *bi = &dev_priv->max_bw[i]; 383 + struct intel_bw_info *bi_next; 384 + int clpchgroup; 385 + int j; 386 + 387 + if (i < num_groups - 1) 388 + bi_next = &dev_priv->max_bw[i + 1]; 389 + 390 + clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; 391 + 392 + if (i < num_groups - 1 && clpchgroup < clperchgroup) 393 + bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; 394 + else 395 + bi_next->num_planes = 0; 396 + 397 + bi->num_qgv_points = qi.num_points; 398 + bi->num_psf_gv_points = qi.num_psf_points; 399 + 400 + for (j = 0; j < qi.num_points; j++) { 401 + const struct intel_qgv_point *sp = &qi.points[j]; 402 + int ct, bw; 403 + 404 + /* 405 + * Max row cycle time 406 + * 407 + * FIXME what is the logic behind the 408 + * assumed burst length? 409 + */ 410 + ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + 411 + (clpchgroup - 1) * qi.t_bl + sp->t_rdpre); 412 + bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct); 334 413 335 414 bi->deratedbw[j] = min(maxdebw, 336 415 bw * (100 - sa->derating) / 100); ··· 448 329 "BW%d / PSF GV %d: num_planes=%d bw=%u\n", 449 330 i, j, bi->num_planes, bi->psf_bw[j]); 450 331 } 451 - 452 - if (bi->num_planes == 1) 453 - break; 454 332 } 455 333 456 334 /* ··· 511 395 return 0; 512 396 } 513 397 398 + static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv, 399 + int num_planes, int qgv_point) 400 + { 401 + int i; 402 + 403 + /* 404 + * Let's return max bw for 0 planes 405 + */ 406 + num_planes = max(1, num_planes); 407 + 408 + for (i = ARRAY_SIZE(dev_priv->max_bw) - 1; i >= 0; i--) { 409 + const struct intel_bw_info *bi = 410 + &dev_priv->max_bw[i]; 411 + 412 + /* 413 + * Pcode will not expose all QGV points when 414 + * SAGV is forced to off/min/med/max. 415 + */ 416 + if (qgv_point >= bi->num_qgv_points) 417 + return UINT_MAX; 418 + 419 + if (num_planes <= bi->num_planes) 420 + return bi->deratedbw[qgv_point]; 421 + } 422 + 423 + return dev_priv->max_bw[0].deratedbw[qgv_point]; 424 + } 425 + 514 426 static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv, 515 427 int psf_gv_point) 516 428 { ··· 556 412 if (IS_DG2(dev_priv)) 557 413 dg2_get_bw_info(dev_priv); 558 414 else if (IS_ALDERLAKE_P(dev_priv)) 559 - icl_get_bw_info(dev_priv, &adlp_sa_info); 415 + tgl_get_bw_info(dev_priv, &adlp_sa_info); 560 416 else if (IS_ALDERLAKE_S(dev_priv)) 561 - icl_get_bw_info(dev_priv, &adls_sa_info); 417 + tgl_get_bw_info(dev_priv, &adls_sa_info); 562 418 else if (IS_ROCKETLAKE(dev_priv)) 563 - icl_get_bw_info(dev_priv, &rkl_sa_info); 419 + tgl_get_bw_info(dev_priv, &rkl_sa_info); 564 420 else if (DISPLAY_VER(dev_priv) == 12) 565 - icl_get_bw_info(dev_priv, &tgl_sa_info); 421 + tgl_get_bw_info(dev_priv, &tgl_sa_info); 566 422 else if (DISPLAY_VER(dev_priv) == 11) 567 423 icl_get_bw_info(dev_priv, &icl_sa_info); 568 424 } ··· 890 746 for (i = 0; i < num_qgv_points; i++) { 891 747 unsigned int max_data_rate; 892 748 893 - max_data_rate = icl_max_bw(dev_priv, num_active_planes, i); 749 + if (DISPLAY_VER(dev_priv) > 11) 750 + max_data_rate = tgl_max_bw(dev_priv, num_active_planes, i); 751 + else 752 + max_data_rate = icl_max_bw(dev_priv, num_active_planes, i); 894 753 /* 895 754 * We need to know which qgv point gives us 896 755 * maximum bandwidth in order to disable SAGV
+5
drivers/gpu/drm/i915/display/intel_cdclk.c
··· 24 24 #include <linux/time.h> 25 25 26 26 #include "intel_atomic.h" 27 + #include "intel_audio.h" 27 28 #include "intel_bw.h" 28 29 #include "intel_cdclk.h" 29 30 #include "intel_de.h" ··· 1976 1975 intel_psr_pause(intel_dp); 1977 1976 } 1978 1977 1978 + intel_audio_cdclk_change_pre(dev_priv); 1979 + 1979 1980 /* 1980 1981 * Lock aux/gmbus while we change cdclk in case those 1981 1982 * functions use cdclk. Not all platforms/ports do, ··· 2005 2002 2006 2003 intel_psr_resume(intel_dp); 2007 2004 } 2005 + 2006 + intel_audio_cdclk_change_post(dev_priv); 2008 2007 2009 2008 if (drm_WARN(&dev_priv->drm, 2010 2009 intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
+65 -65
drivers/gpu/drm/i915/display/intel_color.c
··· 26 26 #include "intel_de.h" 27 27 #include "intel_display_types.h" 28 28 #include "intel_dpll.h" 29 - #include "intel_dsi.h" 29 + #include "vlv_dsi_pll.h" 30 30 31 31 #define CTM_COEFF_SIGN (1ULL << 63) 32 32 ··· 552 552 lut = blob->data; 553 553 554 554 for (i = 0; i < 256; i++) 555 - intel_de_write(dev_priv, PALETTE(pipe, i), 556 - i9xx_lut_8(&lut[i])); 555 + intel_de_write_fw(dev_priv, PALETTE(pipe, i), 556 + i9xx_lut_8(&lut[i])); 557 557 } 558 558 559 559 static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) ··· 576 576 enum pipe pipe = crtc->pipe; 577 577 578 578 for (i = 0; i < lut_size - 1; i++) { 579 - intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 0), 580 - i965_lut_10p6_ldw(&lut[i])); 581 - intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 1), 582 - i965_lut_10p6_udw(&lut[i])); 579 + intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0), 580 + i965_lut_10p6_ldw(&lut[i])); 581 + intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1), 582 + i965_lut_10p6_udw(&lut[i])); 583 583 } 584 584 585 - intel_de_write(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red); 586 - intel_de_write(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green); 587 - intel_de_write(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue); 585 + intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red); 586 + intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green); 587 + intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue); 588 588 } 589 589 590 590 static void i965_load_luts(const struct intel_crtc_state *crtc_state) ··· 618 618 lut = blob->data; 619 619 620 620 for (i = 0; i < 256; i++) 621 - intel_de_write(dev_priv, LGC_PALETTE(pipe, i), 622 - i9xx_lut_8(&lut[i])); 621 + intel_de_write_fw(dev_priv, LGC_PALETTE(pipe, i), 622 + i9xx_lut_8(&lut[i])); 623 623 } 624 624 625 625 static void ilk_load_lut_10(struct intel_crtc *crtc, ··· 631 631 enum pipe pipe = crtc->pipe; 632 632 633 633 for (i = 0; i < lut_size; i++) 634 - intel_de_write(dev_priv, PREC_PALETTE(pipe, i), 635 - ilk_lut_10(&lut[i])); 634 + intel_de_write_fw(dev_priv, PREC_PALETTE(pipe, i), 635 + ilk_lut_10(&lut[i])); 636 636 } 637 637 638 638 static void ilk_load_luts(const struct intel_crtc_state *crtc_state) ··· 681 681 const struct drm_color_lut *entry = 682 682 &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; 683 683 684 - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index++); 685 - intel_de_write(dev_priv, PREC_PAL_DATA(pipe), 686 - ilk_lut_10(entry)); 684 + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), prec_index++); 685 + intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe), 686 + ilk_lut_10(entry)); 687 687 } 688 688 689 689 /* 690 690 * Reset the index, otherwise it prevents the legacy palette to be 691 691 * written properly. 692 692 */ 693 - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); 693 + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0); 694 694 } 695 695 696 696 /* On BDW+ the index auto increment mode actually works */ ··· 704 704 int i, lut_size = drm_color_lut_size(blob); 705 705 enum pipe pipe = crtc->pipe; 706 706 707 - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 708 - prec_index | PAL_PREC_AUTO_INCREMENT); 707 + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 708 + prec_index | PAL_PREC_AUTO_INCREMENT); 709 709 710 710 for (i = 0; i < hw_lut_size; i++) { 711 711 /* We discard half the user entries in split gamma mode */ 712 712 const struct drm_color_lut *entry = 713 713 &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; 714 714 715 - intel_de_write(dev_priv, PREC_PAL_DATA(pipe), 716 - ilk_lut_10(entry)); 715 + intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe), 716 + ilk_lut_10(entry)); 717 717 } 718 718 719 719 /* 720 720 * Reset the index, otherwise it prevents the legacy palette to be 721 721 * written properly. 722 722 */ 723 - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); 723 + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0); 724 724 } 725 725 726 726 static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state) ··· 821 821 * ignore the index bits, so we need to reset it to index 0 822 822 * separately. 823 823 */ 824 - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); 825 - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 826 - PRE_CSC_GAMC_AUTO_INCREMENT); 824 + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); 825 + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 826 + PRE_CSC_GAMC_AUTO_INCREMENT); 827 827 828 828 for (i = 0; i < lut_size; i++) { 829 829 /* ··· 839 839 * ToDo: Extend to max 7.0. Enable 32 bit input value 840 840 * as compared to just 16 to achieve this. 841 841 */ 842 - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 843 - lut[i].green); 842 + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 843 + lut[i].green); 844 844 } 845 845 846 846 /* Clamp values > 1.0. */ 847 847 while (i++ < 35) 848 - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); 848 + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); 849 849 850 - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); 850 + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); 851 851 } 852 852 853 853 static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state) ··· 862 862 * ignore the index bits, so we need to reset it to index 0 863 863 * separately. 864 864 */ 865 - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); 866 - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 867 - PRE_CSC_GAMC_AUTO_INCREMENT); 865 + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); 866 + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 867 + PRE_CSC_GAMC_AUTO_INCREMENT); 868 868 869 869 for (i = 0; i < lut_size; i++) { 870 870 u32 v = (i << 16) / (lut_size - 1); 871 871 872 - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), v); 872 + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), v); 873 873 } 874 874 875 875 /* Clamp values > 1.0. */ 876 876 while (i++ < 35) 877 - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); 877 + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); 878 878 879 - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); 879 + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); 880 880 } 881 881 882 882 static void glk_load_luts(const struct intel_crtc_state *crtc_state) ··· 1071 1071 enum pipe pipe = crtc->pipe; 1072 1072 1073 1073 for (i = 0; i < lut_size; i++) { 1074 - intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0), 1075 - chv_cgm_degamma_ldw(&lut[i])); 1076 - intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1), 1077 - chv_cgm_degamma_udw(&lut[i])); 1074 + intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0), 1075 + chv_cgm_degamma_ldw(&lut[i])); 1076 + intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1), 1077 + chv_cgm_degamma_udw(&lut[i])); 1078 1078 } 1079 1079 } 1080 1080 ··· 1105 1105 enum pipe pipe = crtc->pipe; 1106 1106 1107 1107 for (i = 0; i < lut_size; i++) { 1108 - intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0), 1109 - chv_cgm_gamma_ldw(&lut[i])); 1110 - intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1), 1111 - chv_cgm_gamma_udw(&lut[i])); 1108 + intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0), 1109 + chv_cgm_gamma_ldw(&lut[i])); 1110 + intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1), 1111 + chv_cgm_gamma_udw(&lut[i])); 1112 1112 } 1113 1113 } 1114 1114 ··· 1131 1131 else 1132 1132 i965_load_luts(crtc_state); 1133 1133 1134 - intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe), 1135 - crtc_state->cgm_mode); 1134 + intel_de_write_fw(dev_priv, CGM_PIPE_MODE(crtc->pipe), 1135 + crtc_state->cgm_mode); 1136 1136 } 1137 1137 1138 1138 void intel_color_load_luts(const struct intel_crtc_state *crtc_state) ··· 1808 1808 lut = blob->data; 1809 1809 1810 1810 for (i = 0; i < LEGACY_LUT_LENGTH; i++) { 1811 - u32 val = intel_de_read(dev_priv, PALETTE(pipe, i)); 1811 + u32 val = intel_de_read_fw(dev_priv, PALETTE(pipe, i)); 1812 1812 1813 1813 i9xx_lut_8_pack(&lut[i], val); 1814 1814 } ··· 1843 1843 lut = blob->data; 1844 1844 1845 1845 for (i = 0; i < lut_size - 1; i++) { 1846 - u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0)); 1847 - u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1)); 1846 + u32 ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0)); 1847 + u32 udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1)); 1848 1848 1849 1849 i965_lut_10p6_pack(&lut[i], ldw, udw); 1850 1850 } 1851 1851 1852 - lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0))); 1853 - lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1))); 1854 - lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2))); 1852 + lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 0))); 1853 + lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 1))); 1854 + lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 2))); 1855 1855 1856 1856 return blob; 1857 1857 } ··· 1886 1886 lut = blob->data; 1887 1887 1888 1888 for (i = 0; i < lut_size; i++) { 1889 - u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0)); 1890 - u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1)); 1889 + u32 ldw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0)); 1890 + u32 udw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1)); 1891 1891 1892 1892 chv_cgm_gamma_pack(&lut[i], ldw, udw); 1893 1893 } ··· 1922 1922 lut = blob->data; 1923 1923 1924 1924 for (i = 0; i < LEGACY_LUT_LENGTH; i++) { 1925 - u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i)); 1925 + u32 val = intel_de_read_fw(dev_priv, LGC_PALETTE(pipe, i)); 1926 1926 1927 1927 i9xx_lut_8_pack(&lut[i], val); 1928 1928 } ··· 1947 1947 lut = blob->data; 1948 1948 1949 1949 for (i = 0; i < lut_size; i++) { 1950 - u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i)); 1950 + u32 val = intel_de_read_fw(dev_priv, PREC_PALETTE(pipe, i)); 1951 1951 1952 1952 ilk_lut_10_pack(&lut[i], val); 1953 1953 } ··· 1999 1999 2000 2000 lut = blob->data; 2001 2001 2002 - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 2003 - prec_index | PAL_PREC_AUTO_INCREMENT); 2002 + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 2003 + prec_index | PAL_PREC_AUTO_INCREMENT); 2004 2004 2005 2005 for (i = 0; i < lut_size; i++) { 2006 - u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe)); 2006 + u32 val = intel_de_read_fw(dev_priv, PREC_PAL_DATA(pipe)); 2007 2007 2008 2008 ilk_lut_10_pack(&lut[i], val); 2009 2009 } 2010 2010 2011 - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); 2011 + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0); 2012 2012 2013 2013 return blob; 2014 2014 } ··· 2050 2050 2051 2051 lut = blob->data; 2052 2052 2053 - intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 2054 - PAL_PREC_AUTO_INCREMENT); 2053 + intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 2054 + PAL_PREC_AUTO_INCREMENT); 2055 2055 2056 2056 for (i = 0; i < 9; i++) { 2057 - u32 ldw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); 2058 - u32 udw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); 2057 + u32 ldw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); 2058 + u32 udw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); 2059 2059 2060 2060 icl_lut_multi_seg_pack(&lut[i], ldw, udw); 2061 2061 } 2062 2062 2063 - intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0); 2063 + intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0); 2064 2064 2065 2065 /* 2066 2066 * FIXME readouts from PAL_PREC_DATA register aren't giving
+1 -1
drivers/gpu/drm/i915/display/intel_combo_phy.c
··· 301 301 302 302 val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy)); 303 303 val &= ~PWR_DOWN_LN_MASK; 304 - val |= lane_mask << PWR_DOWN_LN_SHIFT; 304 + val |= lane_mask; 305 305 intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val); 306 306 } 307 307
+6 -7
drivers/gpu/drm/i915/display/intel_crt.c
··· 45 45 #include "intel_fifo_underrun.h" 46 46 #include "intel_gmbus.h" 47 47 #include "intel_hotplug.h" 48 + #include "intel_pch_display.h" 48 49 49 50 /* Here's the desired hotplug mode */ 50 51 #define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ ··· 144 143 static void hsw_crt_get_config(struct intel_encoder *encoder, 145 144 struct intel_crtc_state *pipe_config) 146 145 { 147 - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 146 + lpt_pch_get_config(pipe_config); 148 147 149 148 hsw_ddi_get_config(encoder, pipe_config); 150 149 ··· 153 152 DRM_MODE_FLAG_PVSYNC | 154 153 DRM_MODE_FLAG_NVSYNC); 155 154 pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder); 156 - 157 - pipe_config->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); 158 155 } 159 156 160 157 /* Note: The caller is required to filter out dpms modes not supported by the ··· 246 247 const struct intel_crtc_state *old_crtc_state, 247 248 const struct drm_connector_state *old_conn_state) 248 249 { 250 + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 249 251 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 250 252 251 253 intel_crtc_vblank_off(old_crtc_state); ··· 261 261 262 262 pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state); 263 263 264 - lpt_disable_pch_transcoder(dev_priv); 265 - lpt_disable_iclkip(dev_priv); 264 + lpt_pch_disable(state, crtc); 266 265 267 - intel_ddi_fdi_post_disable(state, encoder, old_crtc_state, old_conn_state); 266 + hsw_fdi_disable(encoder); 268 267 269 268 drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder); 270 269 ··· 315 316 316 317 intel_enable_transcoder(crtc_state); 317 318 318 - lpt_pch_enable(crtc_state); 319 + lpt_pch_enable(state, crtc); 319 320 320 321 intel_crtc_vblank_on(crtc_state); 321 322
+98 -9
drivers/gpu/drm/i915/display/intel_crtc.c
··· 3 3 * Copyright © 2020 Intel Corporation 4 4 */ 5 5 #include <linux/kernel.h> 6 + #include <linux/pm_qos.h> 6 7 #include <linux/slab.h> 7 8 8 9 #include <drm/drm_atomic_helper.h> 9 10 #include <drm/drm_fourcc.h> 10 11 #include <drm/drm_plane.h> 11 12 #include <drm/drm_plane_helper.h> 13 + #include <drm/drm_vblank_work.h> 12 14 13 15 #include "i915_trace.h" 14 16 #include "i915_vgpu.h" 15 - 17 + #include "icl_dsi.h" 16 18 #include "intel_atomic.h" 17 19 #include "intel_atomic_plane.h" 18 20 #include "intel_color.h" ··· 168 166 static void intel_crtc_destroy(struct drm_crtc *_crtc) 169 167 { 170 168 struct intel_crtc *crtc = to_intel_crtc(_crtc); 169 + 170 + cpu_latency_qos_remove_request(&crtc->vblank_pm_qos); 171 171 172 172 drm_crtc_cleanup(&crtc->base); 173 173 kfree(crtc); ··· 348 344 349 345 intel_crtc_crc_init(crtc); 350 346 347 + cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE); 348 + 351 349 drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe); 352 350 353 351 return 0; ··· 358 352 intel_crtc_free(crtc); 359 353 360 354 return ret; 355 + } 356 + 357 + static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state) 358 + { 359 + return crtc_state->hw.active && 360 + !intel_crtc_needs_modeset(crtc_state) && 361 + !crtc_state->preload_luts && 362 + (crtc_state->uapi.color_mgmt_changed || 363 + crtc_state->update_pipe); 364 + } 365 + 366 + static void intel_crtc_vblank_work(struct kthread_work *base) 367 + { 368 + struct drm_vblank_work *work = to_drm_vblank_work(base); 369 + struct intel_crtc_state *crtc_state = 370 + container_of(work, typeof(*crtc_state), vblank_work); 371 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 372 + 373 + trace_intel_crtc_vblank_work_start(crtc); 374 + 375 + intel_color_load_luts(crtc_state); 376 + 377 + if (crtc_state->uapi.event) { 378 + spin_lock_irq(&crtc->base.dev->event_lock); 379 + drm_crtc_send_vblank_event(&crtc->base, crtc_state->uapi.event); 380 + crtc_state->uapi.event = NULL; 381 + spin_unlock_irq(&crtc->base.dev->event_lock); 382 + } 383 + 384 + trace_intel_crtc_vblank_work_end(crtc); 385 + } 386 + 387 + static void intel_crtc_vblank_work_init(struct intel_crtc_state *crtc_state) 388 + { 389 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 390 + 391 + drm_vblank_work_init(&crtc_state->vblank_work, &crtc->base, 392 + intel_crtc_vblank_work); 393 + /* 394 + * Interrupt latency is critical for getting the vblank 395 + * work executed as early as possible during the vblank. 396 + */ 397 + cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 0); 398 + } 399 + 400 + void intel_wait_for_vblank_workers(struct intel_atomic_state *state) 401 + { 402 + struct intel_crtc_state *crtc_state; 403 + struct intel_crtc *crtc; 404 + int i; 405 + 406 + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { 407 + if (!intel_crtc_needs_vblank_work(crtc_state)) 408 + continue; 409 + 410 + drm_vblank_work_flush(&crtc_state->vblank_work); 411 + cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 412 + PM_QOS_DEFAULT_VALUE); 413 + } 361 414 } 362 415 363 416 int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, ··· 452 387 * until a subsequent call to intel_pipe_update_end(). That is done to 453 388 * avoid random delays. 454 389 */ 455 - void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) 390 + void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state) 456 391 { 457 392 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); 458 393 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); ··· 467 402 if (new_crtc_state->uapi.async_flip) 468 403 return; 469 404 470 - if (new_crtc_state->vrr.enable) 471 - vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state); 472 - else 405 + if (intel_crtc_needs_vblank_work(new_crtc_state)) 406 + intel_crtc_vblank_work_init(new_crtc_state); 407 + 408 + if (new_crtc_state->vrr.enable) { 409 + if (intel_vrr_is_push_sent(new_crtc_state)) 410 + vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state); 411 + else 412 + vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state); 413 + } else { 473 414 vblank_start = intel_mode_vblank_start(adjusted_mode); 415 + } 474 416 475 417 /* FIXME needs to be calibrated sensibly */ 476 418 min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, ··· 626 554 * Would be slightly nice to just grab the vblank count and arm the 627 555 * event outside of the critical section - the spinlock might spin for a 628 556 * while ... */ 629 - if (new_crtc_state->uapi.event) { 557 + if (intel_crtc_needs_vblank_work(new_crtc_state)) { 558 + drm_vblank_work_schedule(&new_crtc_state->vblank_work, 559 + drm_crtc_accurate_vblank_count(&crtc->base) + 1, 560 + false); 561 + } else if (new_crtc_state->uapi.event) { 630 562 drm_WARN_ON(&dev_priv->drm, 631 563 drm_crtc_vblank_get(&crtc->base) != 0); 632 564 ··· 642 566 new_crtc_state->uapi.event = NULL; 643 567 } 644 568 645 - local_irq_enable(); 646 - 647 - /* Send VRR Push to terminate Vblank */ 569 + /* 570 + * Send VRR Push to terminate Vblank. If we are already in vblank 571 + * this has to be done _after_ sampling the frame counter, as 572 + * otherwise the push would immediately terminate the vblank and 573 + * the sampled frame counter would correspond to the next frame 574 + * instead of the current frame. 575 + * 576 + * There is a tiny race here (iff vblank evasion failed us) where 577 + * we might sample the frame counter just before vmax vblank start 578 + * but the push would be sent just after it. That would cause the 579 + * push to affect the next frame instead of the current frame, 580 + * which would cause the next frame to terminate already at vmin 581 + * vblank start instead of vmax vblank start. 582 + */ 648 583 intel_vrr_send_push(new_crtc_state); 584 + 585 + local_irq_enable(); 649 586 650 587 if (intel_vgpu_active(dev_priv)) 651 588 return;
+7
drivers/gpu/drm/i915/display/intel_crtc.h
··· 9 9 #include <linux/types.h> 10 10 11 11 enum pipe; 12 + struct drm_display_mode; 12 13 struct drm_i915_private; 14 + struct intel_atomic_state; 13 15 struct intel_crtc; 14 16 struct intel_crtc_state; 15 17 18 + int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 19 + int usecs); 16 20 u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state); 17 21 int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe); 18 22 struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); ··· 25 21 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); 26 22 void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state); 27 23 void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state); 24 + void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state); 25 + void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); 26 + void intel_wait_for_vblank_workers(struct intel_atomic_state *state); 28 27 29 28 #endif
+38 -31
drivers/gpu/drm/i915/display/intel_cursor.c
··· 28 28 DRM_FORMAT_ARGB8888, 29 29 }; 30 30 31 - static const u64 cursor_format_modifiers[] = { 32 - DRM_FORMAT_MOD_LINEAR, 33 - DRM_FORMAT_MOD_INVALID 34 - }; 35 - 36 31 static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 37 32 { 38 33 struct drm_i915_private *dev_priv = ··· 190 195 { 191 196 return CURSOR_ENABLE | 192 197 CURSOR_FORMAT_ARGB | 193 - CURSOR_STRIDE(plane_state->view.color_plane[0].stride); 198 + CURSOR_STRIDE(plane_state->view.color_plane[0].mapping_stride); 194 199 } 195 200 196 201 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) ··· 229 234 } 230 235 231 236 drm_WARN_ON(&i915->drm, plane_state->uapi.visible && 232 - plane_state->view.color_plane[0].stride != fb->pitches[0]); 237 + plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]); 233 238 234 239 switch (fb->pitches[0]) { 235 240 case 256: ··· 248 253 return 0; 249 254 } 250 255 251 - static void i845_update_cursor(struct intel_plane *plane, 252 - const struct intel_crtc_state *crtc_state, 253 - const struct intel_plane_state *plane_state) 256 + /* TODO: split into noarm+arm pair */ 257 + static void i845_cursor_update_arm(struct intel_plane *plane, 258 + const struct intel_crtc_state *crtc_state, 259 + const struct intel_plane_state *plane_state) 254 260 { 255 261 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 256 262 u32 cntl = 0, base = 0, pos = 0, size = 0; ··· 294 298 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 295 299 } 296 300 297 - static void i845_disable_cursor(struct intel_plane *plane, 298 - const struct intel_crtc_state *crtc_state) 301 + static void i845_cursor_disable_arm(struct intel_plane *plane, 302 + const struct intel_crtc_state *crtc_state) 299 303 { 300 - i845_update_cursor(plane, crtc_state, NULL); 304 + i845_cursor_update_arm(plane, crtc_state, NULL); 301 305 } 302 306 303 307 static bool i845_cursor_get_hw_state(struct intel_plane *plane, ··· 451 455 } 452 456 453 457 drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible && 454 - plane_state->view.color_plane[0].stride != fb->pitches[0]); 458 + plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]); 455 459 456 460 if (fb->pitches[0] != 457 461 drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { ··· 484 488 return 0; 485 489 } 486 490 487 - static void i9xx_update_cursor(struct intel_plane *plane, 488 - const struct intel_crtc_state *crtc_state, 489 - const struct intel_plane_state *plane_state) 491 + /* TODO: split into noarm+arm pair */ 492 + static void i9xx_cursor_update_arm(struct intel_plane *plane, 493 + const struct intel_crtc_state *crtc_state, 494 + const struct intel_plane_state *plane_state) 490 495 { 491 496 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 492 497 enum pipe pipe = plane->pipe; ··· 559 562 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 560 563 } 561 564 562 - static void i9xx_disable_cursor(struct intel_plane *plane, 563 - const struct intel_crtc_state *crtc_state) 565 + static void i9xx_cursor_disable_arm(struct intel_plane *plane, 566 + const struct intel_crtc_state *crtc_state) 564 567 { 565 - i9xx_update_cursor(plane, crtc_state, NULL); 568 + i9xx_cursor_update_arm(plane, crtc_state, NULL); 566 569 } 567 570 568 571 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, ··· 602 605 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, 603 606 u32 format, u64 modifier) 604 607 { 605 - return modifier == DRM_FORMAT_MOD_LINEAR && 606 - format == DRM_FORMAT_ARGB8888; 608 + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) 609 + return false; 610 + 611 + return format == DRM_FORMAT_ARGB8888; 607 612 } 608 613 609 614 static int ··· 716 717 */ 717 718 crtc_state->active_planes = new_crtc_state->active_planes; 718 719 719 - if (new_plane_state->uapi.visible) 720 - intel_update_plane(plane, crtc_state, new_plane_state); 721 - else 722 - intel_disable_plane(plane, crtc_state); 720 + if (new_plane_state->uapi.visible) { 721 + intel_plane_update_noarm(plane, crtc_state, new_plane_state); 722 + intel_plane_update_arm(plane, crtc_state, new_plane_state); 723 + } else { 724 + intel_plane_disable_arm(plane, crtc_state); 725 + } 723 726 724 727 intel_plane_unpin_fb(old_plane_state); 725 728 ··· 755 754 { 756 755 struct intel_plane *cursor; 757 756 int ret, zpos; 757 + u64 *modifiers; 758 758 759 759 cursor = intel_plane_alloc(); 760 760 if (IS_ERR(cursor)) ··· 768 766 769 767 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 770 768 cursor->max_stride = i845_cursor_max_stride; 771 - cursor->update_plane = i845_update_cursor; 772 - cursor->disable_plane = i845_disable_cursor; 769 + cursor->update_arm = i845_cursor_update_arm; 770 + cursor->disable_arm = i845_cursor_disable_arm; 773 771 cursor->get_hw_state = i845_cursor_get_hw_state; 774 772 cursor->check_plane = i845_check_cursor; 775 773 } else { 776 774 cursor->max_stride = i9xx_cursor_max_stride; 777 - cursor->update_plane = i9xx_update_cursor; 778 - cursor->disable_plane = i9xx_disable_cursor; 775 + cursor->update_arm = i9xx_cursor_update_arm; 776 + cursor->disable_arm = i9xx_cursor_disable_arm; 779 777 cursor->get_hw_state = i9xx_cursor_get_hw_state; 780 778 cursor->check_plane = i9xx_check_cursor; 781 779 } ··· 786 784 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 787 785 cursor->cursor.size = ~0; 788 786 787 + modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_NONE); 788 + 789 789 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 790 790 0, &intel_cursor_plane_funcs, 791 791 intel_cursor_formats, 792 792 ARRAY_SIZE(intel_cursor_formats), 793 - cursor_format_modifiers, 793 + modifiers, 794 794 DRM_PLANE_TYPE_CURSOR, 795 795 "cursor %c", pipe_name(pipe)); 796 + 797 + kfree(modifiers); 798 + 796 799 if (ret) 797 800 goto fail; 798 801
+140 -202
drivers/gpu/drm/i915/display/intel_ddi.c
··· 321 321 { 322 322 int dotclock; 323 323 324 + /* CRT dotclock is determined via other means */ 324 325 if (pipe_config->has_pch_encoder) 325 - dotclock = intel_dotclock_calculate(pipe_config->port_clock, 326 - &pipe_config->fdi_m_n); 327 - else if (intel_crtc_has_dp_encoder(pipe_config)) 326 + return; 327 + 328 + if (intel_crtc_has_dp_encoder(pipe_config)) 328 329 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 329 330 &pipe_config->dp_m_n); 330 331 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) ··· 1040 1039 const struct intel_crtc_state *crtc_state) 1041 1040 { 1042 1041 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1043 - int level = intel_ddi_level(encoder, crtc_state, 0); 1044 1042 const struct intel_ddi_buf_trans *trans; 1045 1043 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 1046 1044 int n_entries, ln; ··· 1068 1068 intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); 1069 1069 1070 1070 /* Program PORT_TX_DW2 */ 1071 - val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); 1072 - val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | 1073 - RCOMP_SCALAR_MASK); 1074 - val |= SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel); 1075 - val |= SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel); 1076 - /* Program Rcomp scalar for every table entry */ 1077 - val |= RCOMP_SCALAR(0x98); 1078 - intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val); 1071 + for (ln = 0; ln < 4; ln++) { 1072 + int level = intel_ddi_level(encoder, crtc_state, ln); 1073 + 1074 + intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy), 1075 + SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK, 1076 + SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) | 1077 + SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) | 1078 + RCOMP_SCALAR(0x98)); 1079 + } 1079 1080 1080 1081 /* Program PORT_TX_DW4 */ 1081 1082 /* We cannot write to GRP. It would overwrite individual loadgen. */ 1082 1083 for (ln = 0; ln < 4; ln++) { 1083 - val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy)); 1084 - val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | 1085 - CURSOR_COEFF_MASK); 1086 - val |= POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1); 1087 - val |= POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2); 1088 - val |= CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff); 1089 - intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val); 1084 + int level = intel_ddi_level(encoder, crtc_state, ln); 1085 + 1086 + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), 1087 + POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK, 1088 + POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) | 1089 + POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) | 1090 + CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff)); 1090 1091 } 1091 1092 1092 1093 /* Program PORT_TX_DW7 */ 1093 - val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN(0, phy)); 1094 - val &= ~N_SCALAR_MASK; 1095 - val |= N_SCALAR(trans->entries[level].icl.dw7_n_scalar); 1096 - intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val); 1094 + for (ln = 0; ln < 4; ln++) { 1095 + int level = intel_ddi_level(encoder, crtc_state, ln); 1096 + 1097 + intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy), 1098 + N_SCALAR_MASK, 1099 + N_SCALAR(trans->entries[level].icl.dw7_n_scalar)); 1100 + } 1097 1101 } 1098 1102 1099 1103 static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder, ··· 1128 1124 * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) 1129 1125 */ 1130 1126 for (ln = 0; ln < 4; ln++) { 1131 - val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy)); 1132 - val &= ~LOADGEN_SELECT; 1133 - val |= icl_combo_phy_loadgen_select(crtc_state, ln); 1134 - intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val); 1127 + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), 1128 + LOADGEN_SELECT, 1129 + icl_combo_phy_loadgen_select(crtc_state, ln)); 1135 1130 } 1136 1131 1137 1132 /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ 1138 - val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)); 1139 - val |= SUS_CLOCK_CONFIG; 1140 - intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val); 1133 + intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 1134 + 0, SUS_CLOCK_CONFIG); 1141 1135 1142 1136 /* 4. Clear training enable to change swing values */ 1143 1137 val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); ··· 1156 1154 { 1157 1155 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1158 1156 enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); 1159 - int level = intel_ddi_level(encoder, crtc_state, 0); 1160 1157 const struct intel_ddi_buf_trans *trans; 1161 1158 int n_entries, ln; 1162 - u32 val; 1163 1159 1164 1160 if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder))) 1165 1161 return; ··· 1166 1166 if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) 1167 1167 return; 1168 1168 1169 - /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ 1170 1169 for (ln = 0; ln < 2; ln++) { 1171 - val = intel_de_read(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port)); 1172 - val &= ~CRI_USE_FS32; 1173 - intel_de_write(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), val); 1174 - 1175 - val = intel_de_read(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port)); 1176 - val &= ~CRI_USE_FS32; 1177 - intel_de_write(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), val); 1170 + intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), 1171 + CRI_USE_FS32, 0); 1172 + intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), 1173 + CRI_USE_FS32, 0); 1178 1174 } 1179 1175 1180 1176 /* Program MG_TX_SWINGCTRL with values from vswing table */ 1181 1177 for (ln = 0; ln < 2; ln++) { 1182 - val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port)); 1183 - val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; 1184 - val |= CRI_TXDEEMPH_OVERRIDE_17_12( 1185 - trans->entries[level].mg.cri_txdeemph_override_17_12); 1186 - intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val); 1178 + int level; 1187 1179 1188 - val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port)); 1189 - val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; 1190 - val |= CRI_TXDEEMPH_OVERRIDE_17_12( 1191 - trans->entries[level].mg.cri_txdeemph_override_17_12); 1192 - intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val); 1180 + level = intel_ddi_level(encoder, crtc_state, 2*ln+0); 1181 + 1182 + intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), 1183 + CRI_TXDEEMPH_OVERRIDE_17_12_MASK, 1184 + CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12)); 1185 + 1186 + level = intel_ddi_level(encoder, crtc_state, 2*ln+1); 1187 + 1188 + intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), 1189 + CRI_TXDEEMPH_OVERRIDE_17_12_MASK, 1190 + CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12)); 1193 1191 } 1194 1192 1195 1193 /* Program MG_TX_DRVCTRL with values from vswing table */ 1196 1194 for (ln = 0; ln < 2; ln++) { 1197 - val = intel_de_read(dev_priv, MG_TX1_DRVCTRL(ln, tc_port)); 1198 - val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | 1199 - CRI_TXDEEMPH_OVERRIDE_5_0_MASK); 1200 - val |= CRI_TXDEEMPH_OVERRIDE_5_0( 1201 - trans->entries[level].mg.cri_txdeemph_override_5_0) | 1202 - CRI_TXDEEMPH_OVERRIDE_11_6( 1203 - trans->entries[level].mg.cri_txdeemph_override_11_6) | 1204 - CRI_TXDEEMPH_OVERRIDE_EN; 1205 - intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val); 1195 + int level; 1206 1196 1207 - val = intel_de_read(dev_priv, MG_TX2_DRVCTRL(ln, tc_port)); 1208 - val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | 1209 - CRI_TXDEEMPH_OVERRIDE_5_0_MASK); 1210 - val |= CRI_TXDEEMPH_OVERRIDE_5_0( 1211 - trans->entries[level].mg.cri_txdeemph_override_5_0) | 1212 - CRI_TXDEEMPH_OVERRIDE_11_6( 1213 - trans->entries[level].mg.cri_txdeemph_override_11_6) | 1214 - CRI_TXDEEMPH_OVERRIDE_EN; 1215 - intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val); 1197 + level = intel_ddi_level(encoder, crtc_state, 2*ln+0); 1198 + 1199 + intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), 1200 + CRI_TXDEEMPH_OVERRIDE_11_6_MASK | 1201 + CRI_TXDEEMPH_OVERRIDE_5_0_MASK, 1202 + CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) | 1203 + CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) | 1204 + CRI_TXDEEMPH_OVERRIDE_EN); 1205 + 1206 + level = intel_ddi_level(encoder, crtc_state, 2*ln+1); 1207 + 1208 + intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), 1209 + CRI_TXDEEMPH_OVERRIDE_11_6_MASK | 1210 + CRI_TXDEEMPH_OVERRIDE_5_0_MASK, 1211 + CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) | 1212 + CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) | 1213 + CRI_TXDEEMPH_OVERRIDE_EN); 1216 1214 1217 1215 /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */ 1218 1216 } ··· 1221 1223 * values from table for which TX1 and TX2 enabled. 1222 1224 */ 1223 1225 for (ln = 0; ln < 2; ln++) { 1224 - val = intel_de_read(dev_priv, MG_CLKHUB(ln, tc_port)); 1225 - if (crtc_state->port_clock < 300000) 1226 - val |= CFG_LOW_RATE_LKREN_EN; 1227 - else 1228 - val &= ~CFG_LOW_RATE_LKREN_EN; 1229 - intel_de_write(dev_priv, MG_CLKHUB(ln, tc_port), val); 1226 + intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port), 1227 + CFG_LOW_RATE_LKREN_EN, 1228 + crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0); 1230 1229 } 1231 1230 1232 1231 /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */ 1233 1232 for (ln = 0; ln < 2; ln++) { 1234 - val = intel_de_read(dev_priv, MG_TX1_DCC(ln, tc_port)); 1235 - val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; 1236 - if (crtc_state->port_clock <= 500000) { 1237 - val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; 1238 - } else { 1239 - val |= CFG_AMI_CK_DIV_OVERRIDE_EN | 1240 - CFG_AMI_CK_DIV_OVERRIDE_VAL(1); 1241 - } 1242 - intel_de_write(dev_priv, MG_TX1_DCC(ln, tc_port), val); 1233 + intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port), 1234 + CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK | 1235 + CFG_AMI_CK_DIV_OVERRIDE_EN, 1236 + crtc_state->port_clock > 500000 ? 1237 + CFG_AMI_CK_DIV_OVERRIDE_VAL(1) | 1238 + CFG_AMI_CK_DIV_OVERRIDE_EN : 0); 1243 1239 1244 - val = intel_de_read(dev_priv, MG_TX2_DCC(ln, tc_port)); 1245 - val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; 1246 - if (crtc_state->port_clock <= 500000) { 1247 - val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; 1248 - } else { 1249 - val |= CFG_AMI_CK_DIV_OVERRIDE_EN | 1250 - CFG_AMI_CK_DIV_OVERRIDE_VAL(1); 1251 - } 1252 - intel_de_write(dev_priv, MG_TX2_DCC(ln, tc_port), val); 1240 + intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port), 1241 + CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK | 1242 + CFG_AMI_CK_DIV_OVERRIDE_EN, 1243 + crtc_state->port_clock > 500000 ? 1244 + CFG_AMI_CK_DIV_OVERRIDE_VAL(1) | 1245 + CFG_AMI_CK_DIV_OVERRIDE_EN : 0); 1253 1246 } 1254 1247 1255 1248 /* Program MG_TX_PISO_READLOAD with values from vswing table */ 1256 1249 for (ln = 0; ln < 2; ln++) { 1257 - val = intel_de_read(dev_priv, 1258 - MG_TX1_PISO_READLOAD(ln, tc_port)); 1259 - val |= CRI_CALCINIT; 1260 - intel_de_write(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port), 1261 - val); 1262 - 1263 - val = intel_de_read(dev_priv, 1264 - MG_TX2_PISO_READLOAD(ln, tc_port)); 1265 - val |= CRI_CALCINIT; 1266 - intel_de_write(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port), 1267 - val); 1250 + intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port), 1251 + 0, CRI_CALCINIT); 1252 + intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port), 1253 + 0, CRI_CALCINIT); 1268 1254 } 1269 1255 } 1270 1256 ··· 1257 1275 { 1258 1276 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1259 1277 enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); 1260 - int level = intel_ddi_level(encoder, crtc_state, 0); 1261 1278 const struct intel_ddi_buf_trans *trans; 1262 - u32 val, dpcnt_mask, dpcnt_val; 1263 1279 int n_entries, ln; 1264 1280 1265 1281 if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder))) ··· 1267 1287 if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) 1268 1288 return; 1269 1289 1270 - dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK | 1271 - DKL_TX_DE_EMPAHSIS_COEFF_MASK | 1272 - DKL_TX_VSWING_CONTROL_MASK); 1273 - dpcnt_val = DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing); 1274 - dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis); 1275 - dpcnt_val |= DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot); 1276 - 1277 1290 for (ln = 0; ln < 2; ln++) { 1291 + int level; 1292 + 1278 1293 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 1279 1294 HIP_INDEX_VAL(tc_port, ln)); 1280 1295 1281 1296 intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0); 1282 1297 1283 - /* All the registers are RMW */ 1284 - val = intel_de_read(dev_priv, DKL_TX_DPCNTL0(tc_port)); 1285 - val &= ~dpcnt_mask; 1286 - val |= dpcnt_val; 1287 - intel_de_write(dev_priv, DKL_TX_DPCNTL0(tc_port), val); 1298 + level = intel_ddi_level(encoder, crtc_state, 2*ln+0); 1288 1299 1289 - val = intel_de_read(dev_priv, DKL_TX_DPCNTL1(tc_port)); 1290 - val &= ~dpcnt_mask; 1291 - val |= dpcnt_val; 1292 - intel_de_write(dev_priv, DKL_TX_DPCNTL1(tc_port), val); 1300 + intel_de_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port), 1301 + DKL_TX_PRESHOOT_COEFF_MASK | 1302 + DKL_TX_DE_EMPAHSIS_COEFF_MASK | 1303 + DKL_TX_VSWING_CONTROL_MASK, 1304 + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | 1305 + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | 1306 + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); 1293 1307 1294 - val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port)); 1295 - val &= ~DKL_TX_DP20BITMODE; 1296 - intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val); 1308 + level = intel_ddi_level(encoder, crtc_state, 2*ln+1); 1309 + 1310 + intel_de_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port), 1311 + DKL_TX_PRESHOOT_COEFF_MASK | 1312 + DKL_TX_DE_EMPAHSIS_COEFF_MASK | 1313 + DKL_TX_VSWING_CONTROL_MASK, 1314 + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | 1315 + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | 1316 + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); 1317 + 1318 + intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), 1319 + DKL_TX_DP20BITMODE, 0); 1297 1320 } 1298 1321 } 1299 1322 ··· 1921 1938 encoder->enable_clock(encoder, crtc_state); 1922 1939 } 1923 1940 1924 - static void intel_ddi_disable_clock(struct intel_encoder *encoder) 1941 + void intel_ddi_disable_clock(struct intel_encoder *encoder) 1925 1942 { 1926 1943 if (encoder->disable_clock) 1927 1944 encoder->disable_clock(encoder); ··· 2368 2385 2369 2386 /* 5.k Configure and enable FEC if needed */ 2370 2387 intel_ddi_enable_fec(encoder, crtc_state); 2371 - intel_dsc_enable(encoder, crtc_state); 2388 + 2389 + intel_dsc_dp_pps_write(encoder, crtc_state); 2390 + 2391 + intel_dsc_enable(crtc_state); 2372 2392 } 2373 2393 2374 2394 static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, ··· 2505 2519 2506 2520 /* 7.l Configure and enable FEC if needed */ 2507 2521 intel_ddi_enable_fec(encoder, crtc_state); 2522 + 2523 + intel_dsc_dp_pps_write(encoder, crtc_state); 2524 + 2508 2525 if (!crtc_state->bigjoiner) 2509 - intel_dsc_enable(encoder, crtc_state); 2526 + intel_dsc_enable(crtc_state); 2510 2527 } 2511 2528 2512 2529 static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, ··· 2574 2585 if (!is_mst) 2575 2586 intel_ddi_enable_pipe_clock(encoder, crtc_state); 2576 2587 2588 + intel_dsc_dp_pps_write(encoder, crtc_state); 2589 + 2577 2590 if (!crtc_state->bigjoiner) 2578 - intel_dsc_enable(encoder, crtc_state); 2591 + intel_dsc_enable(crtc_state); 2579 2592 } 2580 2593 2581 2594 static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, ··· 2815 2824 } 2816 2825 2817 2826 if (old_crtc_state->bigjoiner_linked_crtc) { 2818 - struct intel_atomic_state *state = 2819 - to_intel_atomic_state(old_crtc_state->uapi.state); 2820 - struct intel_crtc *slave = 2827 + struct intel_crtc *slave_crtc = 2821 2828 old_crtc_state->bigjoiner_linked_crtc; 2822 2829 const struct intel_crtc_state *old_slave_crtc_state = 2823 - intel_atomic_get_old_crtc_state(state, slave); 2830 + intel_atomic_get_old_crtc_state(state, slave_crtc); 2824 2831 2825 2832 intel_crtc_vblank_off(old_slave_crtc_state); 2826 2833 ··· 2853 2864 2854 2865 if (is_tc_port) 2855 2866 intel_tc_port_put_link(dig_port); 2856 - } 2857 - 2858 - void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, 2859 - struct intel_encoder *encoder, 2860 - const struct intel_crtc_state *old_crtc_state, 2861 - const struct drm_connector_state *old_conn_state) 2862 - { 2863 - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2864 - u32 val; 2865 - 2866 - /* 2867 - * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) 2868 - * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, 2869 - * step 13 is the correct place for it. Step 18 is where it was 2870 - * originally before the BUN. 2871 - */ 2872 - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 2873 - val &= ~FDI_RX_ENABLE; 2874 - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); 2875 - 2876 - intel_disable_ddi_buf(encoder, old_crtc_state); 2877 - intel_ddi_disable_clock(encoder); 2878 - 2879 - val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); 2880 - val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 2881 - val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); 2882 - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val); 2883 - 2884 - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 2885 - val &= ~FDI_PCDCLK; 2886 - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); 2887 - 2888 - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 2889 - val &= ~FDI_RX_PLL_ENABLE; 2890 - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); 2891 2867 } 2892 2868 2893 2869 static void trans_port_sync_stop_link_train(struct intel_atomic_state *state, ··· 3049 3095 3050 3096 intel_dp->link_trained = false; 3051 3097 3098 + if (old_crtc_state->has_audio) 3099 + intel_audio_codec_disable(encoder, 3100 + old_crtc_state, old_conn_state); 3101 + 3102 + intel_drrs_disable(intel_dp, old_crtc_state); 3103 + intel_psr_disable(intel_dp, old_crtc_state); 3052 3104 intel_edp_backlight_off(old_conn_state); 3053 3105 /* Disable the decompression in DP Sink */ 3054 3106 intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state, ··· 3072 3112 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3073 3113 struct drm_connector *connector = old_conn_state->connector; 3074 3114 3115 + if (old_crtc_state->has_audio) 3116 + intel_audio_codec_disable(encoder, 3117 + old_crtc_state, old_conn_state); 3118 + 3075 3119 if (!intel_hdmi_handle_sink_scrambling(encoder, connector, 3076 3120 false, false)) 3077 3121 drm_dbg_kms(&i915->drm, 3078 3122 "[CONNECTOR:%d:%s] Failed to reset sink scrambling/TMDS bit clock ratio\n", 3079 3123 connector->base.id, connector->name); 3080 - } 3081 - 3082 - static void intel_pre_disable_ddi(struct intel_atomic_state *state, 3083 - struct intel_encoder *encoder, 3084 - const struct intel_crtc_state *old_crtc_state, 3085 - const struct drm_connector_state *old_conn_state) 3086 - { 3087 - struct intel_dp *intel_dp; 3088 - 3089 - if (old_crtc_state->has_audio) 3090 - intel_audio_codec_disable(encoder, old_crtc_state, 3091 - old_conn_state); 3092 - 3093 - if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) 3094 - return; 3095 - 3096 - intel_dp = enc_to_intel_dp(encoder); 3097 - intel_drrs_disable(intel_dp, old_crtc_state); 3098 - intel_psr_disable(intel_dp, old_crtc_state); 3099 3124 } 3100 3125 3101 3126 static void intel_disable_ddi(struct intel_atomic_state *state, ··· 3140 3195 3141 3196 intel_tc_port_get_link(enc_to_dig_port(encoder), 3142 3197 required_lanes); 3143 - if (crtc_state && crtc_state->hw.active) 3198 + if (crtc_state && crtc_state->hw.active) { 3199 + struct intel_crtc *slave_crtc = crtc_state->bigjoiner_linked_crtc; 3200 + 3144 3201 intel_update_active_dpll(state, crtc, encoder); 3202 + 3203 + if (slave_crtc) 3204 + intel_update_active_dpll(state, slave_crtc, encoder); 3205 + } 3145 3206 } 3146 3207 3147 3208 static void ··· 3503 3552 if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder))) 3504 3553 return; 3505 3554 3506 - if (pipe_config->bigjoiner_slave) { 3507 - /* read out pipe settings from master */ 3508 - enum transcoder save = pipe_config->cpu_transcoder; 3509 - 3510 - /* Our own transcoder needs to be disabled when reading it in intel_ddi_read_func_ctl() */ 3511 - WARN_ON(pipe_config->output_types); 3512 - pipe_config->cpu_transcoder = (enum transcoder)pipe_config->bigjoiner_linked_crtc->pipe; 3513 - intel_ddi_read_func_ctl(encoder, pipe_config); 3514 - pipe_config->cpu_transcoder = save; 3515 - } else { 3516 - intel_ddi_read_func_ctl(encoder, pipe_config); 3517 - } 3555 + intel_ddi_read_func_ctl(encoder, pipe_config); 3518 3556 3519 3557 intel_ddi_mso_get_config(encoder, pipe_config); 3520 3558 ··· 3531 3591 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; 3532 3592 } 3533 3593 3534 - if (!pipe_config->bigjoiner_slave) 3535 - ddi_dotclock_get(pipe_config); 3594 + ddi_dotclock_get(pipe_config); 3536 3595 3537 3596 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 3538 3597 pipe_config->lane_lat_optim_mask = ··· 4411 4472 encoder->enable = intel_enable_ddi; 4412 4473 encoder->pre_pll_enable = intel_ddi_pre_pll_enable; 4413 4474 encoder->pre_enable = intel_ddi_pre_enable; 4414 - encoder->pre_disable = intel_pre_disable_ddi; 4415 4475 encoder->disable = intel_disable_ddi; 4416 4476 encoder->post_disable = intel_ddi_post_disable; 4417 4477 encoder->update_pipe = intel_ddi_update_pipe;
+4 -1
drivers/gpu/drm/i915/display/intel_ddi.h
··· 6 6 #ifndef __INTEL_DDI_H__ 7 7 #define __INTEL_DDI_H__ 8 8 9 - #include "intel_display.h" 10 9 #include "i915_reg.h" 11 10 12 11 struct drm_connector_state; 13 12 struct drm_i915_private; 13 + struct intel_atomic_state; 14 14 struct intel_connector; 15 15 struct intel_crtc; 16 16 struct intel_crtc_state; ··· 18 18 struct intel_dpll_hw_state; 19 19 struct intel_encoder; 20 20 struct intel_shared_dpll; 21 + enum pipe; 22 + enum port; 21 23 enum transcoder; 22 24 23 25 i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder, ··· 32 30 const struct drm_connector_state *old_conn_state); 33 31 void intel_ddi_enable_clock(struct intel_encoder *encoder, 34 32 const struct intel_crtc_state *crtc_state); 33 + void intel_ddi_disable_clock(struct intel_encoder *encoder); 35 34 void intel_ddi_get_clock(struct intel_encoder *encoder, 36 35 struct intel_crtc_state *crtc_state, 37 36 struct intel_shared_dpll *pll);
+306 -1494
drivers/gpu/drm/i915/display/intel_display.c
··· 70 70 71 71 #include "gt/gen8_ppgtt.h" 72 72 73 - #include "pxp/intel_pxp.h" 74 - 75 73 #include "g4x_dp.h" 76 74 #include "g4x_hdmi.h" 77 75 #include "i915_drv.h" 76 + #include "icl_dsi.h" 78 77 #include "intel_acpi.h" 79 78 #include "intel_atomic.h" 80 79 #include "intel_atomic_plane.h" ··· 95 96 #include "intel_hotplug.h" 96 97 #include "intel_overlay.h" 97 98 #include "intel_panel.h" 99 + #include "intel_pch_display.h" 100 + #include "intel_pch_refclk.h" 98 101 #include "intel_pcode.h" 99 102 #include "intel_pipe_crc.h" 100 103 #include "intel_plane_initial.h" ··· 104 103 #include "intel_pps.h" 105 104 #include "intel_psr.h" 106 105 #include "intel_quirks.h" 107 - #include "intel_sbi.h" 108 106 #include "intel_sprite.h" 109 107 #include "intel_tc.h" 110 108 #include "intel_vga.h" 111 109 #include "i9xx_plane.h" 112 110 #include "skl_scaler.h" 113 111 #include "skl_universal_plane.h" 112 + #include "vlv_dsi_pll.h" 114 113 #include "vlv_sideband.h" 115 - 116 - static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 117 - struct intel_crtc_state *pipe_config); 118 - static void ilk_pch_clock_get(struct intel_crtc *crtc, 119 - struct intel_crtc_state *pipe_config); 114 + #include "vlv_dsi.h" 120 115 121 116 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 122 117 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); ··· 338 341 is_trans_port_sync_slave(crtc_state); 339 342 } 340 343 344 + static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) 345 + { 346 + if (crtc_state->bigjoiner_slave) 347 + return crtc_state->bigjoiner_linked_crtc; 348 + else 349 + return to_intel_crtc(crtc_state->uapi.crtc); 350 + } 351 + 341 352 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 342 353 enum pipe pipe) 343 354 { ··· 459 454 assert_plane_disabled(plane); 460 455 } 461 456 462 - void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 463 - enum pipe pipe) 464 - { 465 - u32 val; 466 - bool enabled; 467 - 468 - val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); 469 - enabled = !!(val & TRANS_ENABLE); 470 - I915_STATE_WARN(enabled, 471 - "transcoder assertion failed, should be off on pipe %c but is still active\n", 472 - pipe_name(pipe)); 473 - } 474 - 475 - static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 476 - enum pipe pipe, enum port port, 477 - i915_reg_t dp_reg) 478 - { 479 - enum pipe port_pipe; 480 - bool state; 481 - 482 - state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); 483 - 484 - I915_STATE_WARN(state && port_pipe == pipe, 485 - "PCH DP %c enabled on transcoder %c, should be disabled\n", 486 - port_name(port), pipe_name(pipe)); 487 - 488 - I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 489 - "IBX PCH DP %c still using transcoder B\n", 490 - port_name(port)); 491 - } 492 - 493 - static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 494 - enum pipe pipe, enum port port, 495 - i915_reg_t hdmi_reg) 496 - { 497 - enum pipe port_pipe; 498 - bool state; 499 - 500 - state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); 501 - 502 - I915_STATE_WARN(state && port_pipe == pipe, 503 - "PCH HDMI %c enabled on transcoder %c, should be disabled\n", 504 - port_name(port), pipe_name(pipe)); 505 - 506 - I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 507 - "IBX PCH HDMI %c still using transcoder B\n", 508 - port_name(port)); 509 - } 510 - 511 - static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 512 - enum pipe pipe) 513 - { 514 - enum pipe port_pipe; 515 - 516 - assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); 517 - assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); 518 - assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); 519 - 520 - I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && 521 - port_pipe == pipe, 522 - "PCH VGA enabled on transcoder %c, should be disabled\n", 523 - pipe_name(pipe)); 524 - 525 - I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && 526 - port_pipe == pipe, 527 - "PCH LVDS enabled on transcoder %c, should be disabled\n", 528 - pipe_name(pipe)); 529 - 530 - /* PCH SDVOB multiplex with HDMIB */ 531 - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 532 - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 533 - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 534 - } 535 - 536 457 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 537 458 struct intel_digital_port *dig_port, 538 459 unsigned int expected_mask) ··· 491 560 dig_port->base.base.base.id, dig_port->base.base.name, 492 561 intel_de_read(dev_priv, dpll_reg) & port_mask, 493 562 expected_mask); 494 - } 495 - 496 - static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) 497 - { 498 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 499 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 500 - enum pipe pipe = crtc->pipe; 501 - i915_reg_t reg; 502 - u32 val, pipeconf_val; 503 - 504 - /* Make sure PCH DPLL is enabled */ 505 - assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); 506 - 507 - /* FDI must be feeding us bits for PCH ports */ 508 - assert_fdi_tx_enabled(dev_priv, pipe); 509 - assert_fdi_rx_enabled(dev_priv, pipe); 510 - 511 - if (HAS_PCH_CPT(dev_priv)) { 512 - reg = TRANS_CHICKEN2(pipe); 513 - val = intel_de_read(dev_priv, reg); 514 - /* 515 - * Workaround: Set the timing override bit 516 - * before enabling the pch transcoder. 517 - */ 518 - val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 519 - /* Configure frame start delay to match the CPU */ 520 - val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 521 - val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 522 - intel_de_write(dev_priv, reg, val); 523 - } 524 - 525 - reg = PCH_TRANSCONF(pipe); 526 - val = intel_de_read(dev_priv, reg); 527 - pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); 528 - 529 - if (HAS_PCH_IBX(dev_priv)) { 530 - /* Configure frame start delay to match the CPU */ 531 - val &= ~TRANS_FRAME_START_DELAY_MASK; 532 - val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 533 - 534 - /* 535 - * Make the BPC in transcoder be consistent with 536 - * that in pipeconf reg. For HDMI we must use 8bpc 537 - * here for both 8bpc and 12bpc. 538 - */ 539 - val &= ~PIPECONF_BPC_MASK; 540 - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 541 - val |= PIPECONF_8BPC; 542 - else 543 - val |= pipeconf_val & PIPECONF_BPC_MASK; 544 - } 545 - 546 - val &= ~TRANS_INTERLACE_MASK; 547 - if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { 548 - if (HAS_PCH_IBX(dev_priv) && 549 - intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 550 - val |= TRANS_LEGACY_INTERLACED_ILK; 551 - else 552 - val |= TRANS_INTERLACED; 553 - } else { 554 - val |= TRANS_PROGRESSIVE; 555 - } 556 - 557 - intel_de_write(dev_priv, reg, val | TRANS_ENABLE); 558 - if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) 559 - drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", 560 - pipe_name(pipe)); 561 - } 562 - 563 - static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 564 - enum transcoder cpu_transcoder) 565 - { 566 - u32 val, pipeconf_val; 567 - 568 - /* FDI must be feeding us bits for PCH ports */ 569 - assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 570 - assert_fdi_rx_enabled(dev_priv, PIPE_A); 571 - 572 - val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); 573 - /* Workaround: set timing override bit. */ 574 - val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 575 - /* Configure frame start delay to match the CPU */ 576 - val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 577 - val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 578 - intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 579 - 580 - val = TRANS_ENABLE; 581 - pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 582 - 583 - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 584 - PIPECONF_INTERLACED_ILK) 585 - val |= TRANS_INTERLACED; 586 - else 587 - val |= TRANS_PROGRESSIVE; 588 - 589 - intel_de_write(dev_priv, LPT_TRANSCONF, val); 590 - if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, 591 - TRANS_STATE_ENABLE, 100)) 592 - drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); 593 - } 594 - 595 - static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv, 596 - enum pipe pipe) 597 - { 598 - i915_reg_t reg; 599 - u32 val; 600 - 601 - /* FDI relies on the transcoder */ 602 - assert_fdi_tx_disabled(dev_priv, pipe); 603 - assert_fdi_rx_disabled(dev_priv, pipe); 604 - 605 - /* Ports must be off as well */ 606 - assert_pch_ports_disabled(dev_priv, pipe); 607 - 608 - reg = PCH_TRANSCONF(pipe); 609 - val = intel_de_read(dev_priv, reg); 610 - val &= ~TRANS_ENABLE; 611 - intel_de_write(dev_priv, reg, val); 612 - /* wait for PCH transcoder off, transcoder state */ 613 - if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) 614 - drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", 615 - pipe_name(pipe)); 616 - 617 - if (HAS_PCH_CPT(dev_priv)) { 618 - /* Workaround: Clear the timing override chicken bit again. */ 619 - reg = TRANS_CHICKEN2(pipe); 620 - val = intel_de_read(dev_priv, reg); 621 - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 622 - intel_de_write(dev_priv, reg, val); 623 - } 624 - } 625 - 626 - void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 627 - { 628 - u32 val; 629 - 630 - val = intel_de_read(dev_priv, LPT_TRANSCONF); 631 - val &= ~TRANS_ENABLE; 632 - intel_de_write(dev_priv, LPT_TRANSCONF, val); 633 - /* wait for PCH transcoder off, transcoder state */ 634 - if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, 635 - TRANS_STATE_ENABLE, 50)) 636 - drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); 637 - 638 - /* Workaround: clear timing override bit. */ 639 - val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); 640 - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 641 - intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 642 563 } 643 564 644 565 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) ··· 606 823 intel_wait_for_pipe_off(old_crtc_state); 607 824 } 608 825 609 - bool 610 - intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, 611 - u64 modifier) 612 - { 613 - return info->is_yuv && 614 - info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2); 615 - } 616 - 617 826 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 618 827 { 619 828 unsigned int size = 0; ··· 625 850 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 626 851 unsigned int plane_size; 627 852 628 - plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; 853 + if (rem_info->plane[i].linear) 854 + plane_size = rem_info->plane[i].size; 855 + else 856 + plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; 857 + 629 858 if (plane_size == 0) 630 859 continue; 631 860 ··· 648 869 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 649 870 650 871 return DISPLAY_VER(dev_priv) < 4 || 651 - (plane->has_fbc && 872 + (plane->fbc && 652 873 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL); 653 874 } 654 875 ··· 664 885 { 665 886 const struct drm_framebuffer *fb = state->hw.fb; 666 887 unsigned int cpp = fb->format->cpp[color_plane]; 667 - unsigned int pitch = state->view.color_plane[color_plane].stride; 888 + unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; 668 889 669 890 return y * pitch + x * cpp; 670 891 } ··· 681 902 { 682 903 *x += state->view.color_plane[color_plane].x; 683 904 *y += state->view.color_plane[color_plane].y; 684 - } 685 - 686 - /* 687 - * From the Sky Lake PRM: 688 - * "The Color Control Surface (CCS) contains the compression status of 689 - * the cache-line pairs. The compression state of the cache-line pair 690 - * is specified by 2 bits in the CCS. Each CCS cache-line represents 691 - * an area on the main surface of 16 x16 sets of 128 byte Y-tiled 692 - * cache-line-pairs. CCS is always Y tiled." 693 - * 694 - * Since cache line pairs refers to horizontally adjacent cache lines, 695 - * each cache line in the CCS corresponds to an area of 32x16 cache 696 - * lines on the main surface. Since each pixel is 4 bytes, this gives 697 - * us a ratio of one byte in the CCS for each 8x16 pixels in the 698 - * main surface. 699 - */ 700 - static const struct drm_format_info skl_ccs_formats[] = { 701 - { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 702 - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 703 - { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 704 - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 705 - { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 706 - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 707 - { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 708 - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 709 - }; 710 - 711 - /* 712 - * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the 713 - * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles 714 - * in the main surface. With 4 byte pixels and each Y-tile having dimensions of 715 - * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in 716 - * the main surface. 717 - */ 718 - static const struct drm_format_info gen12_ccs_formats[] = { 719 - { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 720 - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 721 - .hsub = 1, .vsub = 1, }, 722 - { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 723 - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 724 - .hsub = 1, .vsub = 1, }, 725 - { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 726 - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 727 - .hsub = 1, .vsub = 1, .has_alpha = true }, 728 - { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 729 - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 730 - .hsub = 1, .vsub = 1, .has_alpha = true }, 731 - { .format = DRM_FORMAT_YUYV, .num_planes = 2, 732 - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 733 - .hsub = 2, .vsub = 1, .is_yuv = true }, 734 - { .format = DRM_FORMAT_YVYU, .num_planes = 2, 735 - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 736 - .hsub = 2, .vsub = 1, .is_yuv = true }, 737 - { .format = DRM_FORMAT_UYVY, .num_planes = 2, 738 - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 739 - .hsub = 2, .vsub = 1, .is_yuv = true }, 740 - { .format = DRM_FORMAT_VYUY, .num_planes = 2, 741 - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 742 - .hsub = 2, .vsub = 1, .is_yuv = true }, 743 - { .format = DRM_FORMAT_XYUV8888, .num_planes = 2, 744 - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 745 - .hsub = 1, .vsub = 1, .is_yuv = true }, 746 - { .format = DRM_FORMAT_NV12, .num_planes = 4, 747 - .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, 748 - .hsub = 2, .vsub = 2, .is_yuv = true }, 749 - { .format = DRM_FORMAT_P010, .num_planes = 4, 750 - .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 751 - .hsub = 2, .vsub = 2, .is_yuv = true }, 752 - { .format = DRM_FORMAT_P012, .num_planes = 4, 753 - .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 754 - .hsub = 2, .vsub = 2, .is_yuv = true }, 755 - { .format = DRM_FORMAT_P016, .num_planes = 4, 756 - .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 757 - .hsub = 2, .vsub = 2, .is_yuv = true }, 758 - }; 759 - 760 - /* 761 - * Same as gen12_ccs_formats[] above, but with additional surface used 762 - * to pass Clear Color information in plane 2 with 64 bits of data. 763 - */ 764 - static const struct drm_format_info gen12_ccs_cc_formats[] = { 765 - { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, 766 - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, 767 - .hsub = 1, .vsub = 1, }, 768 - { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, 769 - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, 770 - .hsub = 1, .vsub = 1, }, 771 - { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, 772 - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, 773 - .hsub = 1, .vsub = 1, .has_alpha = true }, 774 - { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, 775 - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, 776 - .hsub = 1, .vsub = 1, .has_alpha = true }, 777 - }; 778 - 779 - static const struct drm_format_info * 780 - lookup_format_info(const struct drm_format_info formats[], 781 - int num_formats, u32 format) 782 - { 783 - int i; 784 - 785 - for (i = 0; i < num_formats; i++) { 786 - if (formats[i].format == format) 787 - return &formats[i]; 788 - } 789 - 790 - return NULL; 791 - } 792 - 793 - static const struct drm_format_info * 794 - intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 795 - { 796 - switch (cmd->modifier[0]) { 797 - case I915_FORMAT_MOD_Y_TILED_CCS: 798 - case I915_FORMAT_MOD_Yf_TILED_CCS: 799 - return lookup_format_info(skl_ccs_formats, 800 - ARRAY_SIZE(skl_ccs_formats), 801 - cmd->pixel_format); 802 - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 803 - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 804 - return lookup_format_info(gen12_ccs_formats, 805 - ARRAY_SIZE(gen12_ccs_formats), 806 - cmd->pixel_format); 807 - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: 808 - return lookup_format_info(gen12_ccs_cc_formats, 809 - ARRAY_SIZE(gen12_ccs_cc_formats), 810 - cmd->pixel_format); 811 - default: 812 - return NULL; 813 - } 814 905 } 815 906 816 907 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, ··· 784 1135 if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) 785 1136 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 786 1137 787 - intel_disable_plane(plane, crtc_state); 1138 + intel_plane_disable_arm(plane, crtc_state); 788 1139 intel_wait_for_vblank(dev_priv, crtc->pipe); 789 1140 } 790 1141 ··· 959 1310 clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); 960 1311 } 961 1312 962 - static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state) 963 - { 964 - if (crtc_state->pch_pfit.enabled && 965 - (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) || 966 - crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) || 967 - crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)) 968 - return false; 969 - 970 - if (crtc_state->dsc.compression_enable) 971 - return false; 972 - 973 - if (crtc_state->has_psr2) 974 - return false; 975 - 976 - if (crtc_state->splitter.enable) 977 - return false; 978 - 979 - return true; 980 - } 981 - 982 1313 static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) 983 1314 { 984 1315 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); ··· 982 1353 */ 983 1354 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; 984 1355 985 - if (IS_DG2(dev_priv)) { 986 - /* 987 - * Underrun recovery must always be disabled on DG2. However 988 - * the chicken bit meaning is inverted compared to other 989 - * platforms. 990 - */ 1356 + /* 1357 + * Underrun recovery must always be disabled on display 13+. 1358 + * DG2 chicken bit meaning is inverted compared to other platforms. 1359 + */ 1360 + if (IS_DG2(dev_priv)) 991 1361 tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; 992 - } else if (DISPLAY_VER(dev_priv) >= 13) { 993 - if (underrun_recovery_supported(crtc_state)) 994 - tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP; 995 - else 996 - tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; 997 - } 1362 + else if (DISPLAY_VER(dev_priv) >= 13) 1363 + tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; 998 1364 999 1365 intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); 1000 1366 } ··· 1019 1395 return false; 1020 1396 } 1021 1397 1022 - void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 1023 - { 1024 - u32 temp; 1025 - 1026 - intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); 1027 - 1028 - mutex_lock(&dev_priv->sb_lock); 1029 - 1030 - temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 1031 - temp |= SBI_SSCCTL_DISABLE; 1032 - intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 1033 - 1034 - mutex_unlock(&dev_priv->sb_lock); 1035 - } 1036 - 1037 - /* Program iCLKIP clock to the desired frequency */ 1038 - static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 1039 - { 1040 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1041 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1042 - int clock = crtc_state->hw.adjusted_mode.crtc_clock; 1043 - u32 divsel, phaseinc, auxdiv, phasedir = 0; 1044 - u32 temp; 1045 - 1046 - lpt_disable_iclkip(dev_priv); 1047 - 1048 - /* The iCLK virtual clock root frequency is in MHz, 1049 - * but the adjusted_mode->crtc_clock in in KHz. To get the 1050 - * divisors, it is necessary to divide one by another, so we 1051 - * convert the virtual clock precision to KHz here for higher 1052 - * precision. 1053 - */ 1054 - for (auxdiv = 0; auxdiv < 2; auxdiv++) { 1055 - u32 iclk_virtual_root_freq = 172800 * 1000; 1056 - u32 iclk_pi_range = 64; 1057 - u32 desired_divisor; 1058 - 1059 - desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 1060 - clock << auxdiv); 1061 - divsel = (desired_divisor / iclk_pi_range) - 2; 1062 - phaseinc = desired_divisor % iclk_pi_range; 1063 - 1064 - /* 1065 - * Near 20MHz is a corner case which is 1066 - * out of range for the 7-bit divisor 1067 - */ 1068 - if (divsel <= 0x7f) 1069 - break; 1070 - } 1071 - 1072 - /* This should not happen with any sane values */ 1073 - drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 1074 - ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 1075 - drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) & 1076 - ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 1077 - 1078 - drm_dbg_kms(&dev_priv->drm, 1079 - "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 1080 - clock, auxdiv, divsel, phasedir, phaseinc); 1081 - 1082 - mutex_lock(&dev_priv->sb_lock); 1083 - 1084 - /* Program SSCDIVINTPHASE6 */ 1085 - temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 1086 - temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 1087 - temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 1088 - temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 1089 - temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 1090 - temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 1091 - temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 1092 - intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 1093 - 1094 - /* Program SSCAUXDIV */ 1095 - temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 1096 - temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 1097 - temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 1098 - intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 1099 - 1100 - /* Enable modulator and associated divider */ 1101 - temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 1102 - temp &= ~SBI_SSCCTL_DISABLE; 1103 - intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 1104 - 1105 - mutex_unlock(&dev_priv->sb_lock); 1106 - 1107 - /* Wait for initialization time */ 1108 - udelay(24); 1109 - 1110 - intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); 1111 - } 1112 - 1113 - int lpt_get_iclkip(struct drm_i915_private *dev_priv) 1114 - { 1115 - u32 divsel, phaseinc, auxdiv; 1116 - u32 iclk_virtual_root_freq = 172800 * 1000; 1117 - u32 iclk_pi_range = 64; 1118 - u32 desired_divisor; 1119 - u32 temp; 1120 - 1121 - if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 1122 - return 0; 1123 - 1124 - mutex_lock(&dev_priv->sb_lock); 1125 - 1126 - temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 1127 - if (temp & SBI_SSCCTL_DISABLE) { 1128 - mutex_unlock(&dev_priv->sb_lock); 1129 - return 0; 1130 - } 1131 - 1132 - temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 1133 - divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 1134 - SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 1135 - phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 1136 - SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 1137 - 1138 - temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 1139 - auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 1140 - SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 1141 - 1142 - mutex_unlock(&dev_priv->sb_lock); 1143 - 1144 - desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 1145 - 1146 - return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 1147 - desired_divisor << auxdiv); 1148 - } 1149 - 1150 - static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, 1151 - enum pipe pch_transcoder) 1152 - { 1153 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1154 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1155 - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1156 - 1157 - intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), 1158 - intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); 1159 - intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), 1160 - intel_de_read(dev_priv, HBLANK(cpu_transcoder))); 1161 - intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), 1162 - intel_de_read(dev_priv, HSYNC(cpu_transcoder))); 1163 - 1164 - intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), 1165 - intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 1166 - intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), 1167 - intel_de_read(dev_priv, VBLANK(cpu_transcoder))); 1168 - intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), 1169 - intel_de_read(dev_priv, VSYNC(cpu_transcoder))); 1170 - intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), 1171 - intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); 1172 - } 1173 - 1174 1398 /* 1175 1399 * Finds the encoder associated with the given CRTC. This can only be 1176 1400 * used when we know that the CRTC isn't feeding multiple encoders! ··· 1027 1555 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, 1028 1556 const struct intel_crtc_state *crtc_state) 1029 1557 { 1030 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1031 1558 const struct drm_connector_state *connector_state; 1032 1559 const struct drm_connector *connector; 1033 1560 struct intel_encoder *encoder = NULL; 1561 + struct intel_crtc *master_crtc; 1034 1562 int num_encoders = 0; 1035 1563 int i; 1036 1564 1565 + master_crtc = intel_master_crtc(crtc_state); 1566 + 1037 1567 for_each_new_connector_in_state(&state->base, connector, connector_state, i) { 1038 - if (connector_state->crtc != &crtc->base) 1568 + if (connector_state->crtc != &master_crtc->base) 1039 1569 continue; 1040 1570 1041 1571 encoder = to_intel_encoder(connector_state->best_encoder); ··· 1046 1572 1047 1573 drm_WARN(encoder->base.dev, num_encoders != 1, 1048 1574 "%d encoders for pipe %c\n", 1049 - num_encoders, pipe_name(crtc->pipe)); 1575 + num_encoders, pipe_name(master_crtc->pipe)); 1050 1576 1051 1577 return encoder; 1052 - } 1053 - 1054 - /* 1055 - * Enable PCH resources required for PCH ports: 1056 - * - PCH PLLs 1057 - * - FDI training & RX/TX 1058 - * - update transcoder timings 1059 - * - DP transcoding bits 1060 - * - transcoder 1061 - */ 1062 - static void ilk_pch_enable(const struct intel_atomic_state *state, 1063 - const struct intel_crtc_state *crtc_state) 1064 - { 1065 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1066 - struct drm_device *dev = crtc->base.dev; 1067 - struct drm_i915_private *dev_priv = to_i915(dev); 1068 - enum pipe pipe = crtc->pipe; 1069 - u32 temp; 1070 - 1071 - assert_pch_transcoder_disabled(dev_priv, pipe); 1072 - 1073 - /* For PCH output, training FDI link */ 1074 - intel_fdi_link_train(crtc, crtc_state); 1075 - 1076 - /* We need to program the right clock selection before writing the pixel 1077 - * mutliplier into the DPLL. */ 1078 - if (HAS_PCH_CPT(dev_priv)) { 1079 - u32 sel; 1080 - 1081 - temp = intel_de_read(dev_priv, PCH_DPLL_SEL); 1082 - temp |= TRANS_DPLL_ENABLE(pipe); 1083 - sel = TRANS_DPLLB_SEL(pipe); 1084 - if (crtc_state->shared_dpll == 1085 - intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 1086 - temp |= sel; 1087 - else 1088 - temp &= ~sel; 1089 - intel_de_write(dev_priv, PCH_DPLL_SEL, temp); 1090 - } 1091 - 1092 - /* XXX: pch pll's can be enabled any time before we enable the PCH 1093 - * transcoder, and we actually should do this to not upset any PCH 1094 - * transcoder that already use the clock when we share it. 1095 - * 1096 - * Note that enable_shared_dpll tries to do the right thing, but 1097 - * get_shared_dpll unconditionally resets the pll - we need that to have 1098 - * the right LVDS enable sequence. */ 1099 - intel_enable_shared_dpll(crtc_state); 1100 - 1101 - /* set transcoder timing, panel must allow it */ 1102 - assert_pps_unlocked(dev_priv, pipe); 1103 - ilk_pch_transcoder_set_timings(crtc_state, pipe); 1104 - 1105 - intel_fdi_normal_train(crtc); 1106 - 1107 - /* For PCH DP, enable TRANS_DP_CTL */ 1108 - if (HAS_PCH_CPT(dev_priv) && 1109 - intel_crtc_has_dp_encoder(crtc_state)) { 1110 - const struct drm_display_mode *adjusted_mode = 1111 - &crtc_state->hw.adjusted_mode; 1112 - u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 1113 - i915_reg_t reg = TRANS_DP_CTL(pipe); 1114 - enum port port; 1115 - 1116 - temp = intel_de_read(dev_priv, reg); 1117 - temp &= ~(TRANS_DP_PORT_SEL_MASK | 1118 - TRANS_DP_SYNC_MASK | 1119 - TRANS_DP_BPC_MASK); 1120 - temp |= TRANS_DP_OUTPUT_ENABLE; 1121 - temp |= bpc << 9; /* same format but at 11:9 */ 1122 - 1123 - if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1124 - temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 1125 - if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1126 - temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 1127 - 1128 - port = intel_get_crtc_new_encoder(state, crtc_state)->port; 1129 - drm_WARN_ON(dev, port < PORT_B || port > PORT_D); 1130 - temp |= TRANS_DP_PORT_SEL(port); 1131 - 1132 - intel_de_write(dev_priv, reg, temp); 1133 - } 1134 - 1135 - ilk_enable_pch_transcoder(crtc_state); 1136 - } 1137 - 1138 - void lpt_pch_enable(const struct intel_crtc_state *crtc_state) 1139 - { 1140 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1141 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1142 - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 1143 - 1144 - assert_pch_transcoder_disabled(dev_priv, PIPE_A); 1145 - 1146 - lpt_program_iclkip(crtc_state); 1147 - 1148 - /* Set transcoder timing. */ 1149 - ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); 1150 - 1151 - lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 1152 1578 } 1153 1579 1154 1580 static void cpt_verify_modeset(struct drm_i915_private *dev_priv, ··· 1428 2054 intel_atomic_get_new_crtc_state(state, crtc); 1429 2055 enum pipe pipe = crtc->pipe; 1430 2056 2057 + intel_psr_pre_plane_update(state, crtc); 2058 + 1431 2059 if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state)) 1432 2060 hsw_disable_ips(old_crtc_state); 1433 2061 ··· 1541 2165 !(update_mask & BIT(plane->id))) 1542 2166 continue; 1543 2167 1544 - intel_disable_plane(plane, new_crtc_state); 2168 + intel_plane_disable_arm(plane, new_crtc_state); 1545 2169 1546 2170 if (old_plane_state->uapi.visible) 1547 2171 fb_bits |= plane->frontbuffer_bit; ··· 1575 2199 1576 2200 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 1577 2201 { 2202 + struct drm_i915_private *i915 = to_i915(state->base.dev); 2203 + struct intel_crtc_state *new_crtc_state, *old_crtc_state; 2204 + struct intel_crtc *crtc; 1578 2205 struct drm_connector_state *new_conn_state; 1579 2206 struct drm_connector *connector; 1580 2207 int i; 2208 + 2209 + /* 2210 + * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. 2211 + * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. 2212 + */ 2213 + if (i915->dpll.mgr) { 2214 + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 2215 + if (intel_crtc_needs_modeset(new_crtc_state)) 2216 + continue; 2217 + 2218 + new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; 2219 + new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; 2220 + } 2221 + } 2222 + 2223 + if (!state->modeset) 2224 + return; 1581 2225 1582 2226 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 1583 2227 i) { ··· 1624 2228 struct drm_connector_state *new_conn_state; 1625 2229 struct drm_connector *connector; 1626 2230 int i; 2231 + 2232 + if (!state->modeset) 2233 + return; 1627 2234 1628 2235 for_each_new_connector_in_state(&state->base, connector, new_conn_state, 1629 2236 i) { ··· 1712 2313 encoder->enable(state, encoder, 1713 2314 crtc_state, conn_state); 1714 2315 intel_opregion_notify_encoder(encoder, true); 1715 - } 1716 - } 1717 - 1718 - static void intel_encoders_pre_disable(struct intel_atomic_state *state, 1719 - struct intel_crtc *crtc) 1720 - { 1721 - const struct intel_crtc_state *old_crtc_state = 1722 - intel_atomic_get_old_crtc_state(state, crtc); 1723 - const struct drm_connector_state *old_conn_state; 1724 - struct drm_connector *conn; 1725 - int i; 1726 - 1727 - for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 1728 - struct intel_encoder *encoder = 1729 - to_intel_encoder(old_conn_state->best_encoder); 1730 - 1731 - if (old_conn_state->crtc != &crtc->base) 1732 - continue; 1733 - 1734 - if (encoder->pre_disable) 1735 - encoder->pre_disable(state, encoder, old_crtc_state, 1736 - old_conn_state); 1737 2316 } 1738 2317 } 1739 2318 ··· 1809 2432 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1810 2433 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 1811 2434 1812 - plane->disable_plane(plane, crtc_state); 2435 + plane->disable_arm(plane, crtc_state); 1813 2436 } 1814 2437 1815 2438 static void ilk_crtc_enable(struct intel_atomic_state *state, ··· 1877 2500 intel_enable_transcoder(new_crtc_state); 1878 2501 1879 2502 if (new_crtc_state->has_pch_encoder) 1880 - ilk_pch_enable(state, new_crtc_state); 2503 + ilk_pch_enable(state, crtc); 1881 2504 1882 2505 intel_crtc_vblank_on(new_crtc_state); 1883 2506 ··· 1969 2592 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, 1970 2593 const struct intel_crtc_state *crtc_state) 1971 2594 { 1972 - struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc); 1973 - struct drm_i915_private *dev_priv = to_i915(master->base.dev); 2595 + struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1974 2596 struct intel_crtc_state *master_crtc_state; 2597 + struct intel_crtc *master_crtc; 1975 2598 struct drm_connector_state *conn_state; 1976 2599 struct drm_connector *conn; 1977 2600 struct intel_encoder *encoder = NULL; 1978 2601 int i; 1979 2602 1980 - if (crtc_state->bigjoiner_slave) 1981 - master = crtc_state->bigjoiner_linked_crtc; 1982 - 1983 - master_crtc_state = intel_atomic_get_new_crtc_state(state, master); 2603 + master_crtc = intel_master_crtc(crtc_state); 2604 + master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); 1984 2605 1985 2606 for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 1986 - if (conn_state->crtc != &master->base) 2607 + if (conn_state->crtc != &master_crtc->base) 1987 2608 continue; 1988 2609 1989 2610 encoder = to_intel_encoder(conn_state->best_encoder); 1990 2611 break; 1991 2612 } 1992 2613 1993 - if (!crtc_state->bigjoiner_slave) { 1994 - /* need to enable VDSC, which we skipped in pre-enable */ 1995 - intel_dsc_enable(encoder, crtc_state); 1996 - } else { 1997 - /* 1998 - * Enable sequence steps 1-7 on bigjoiner master 1999 - */ 2000 - intel_encoders_pre_pll_enable(state, master); 2001 - if (master_crtc_state->shared_dpll) 2002 - intel_enable_shared_dpll(master_crtc_state); 2003 - intel_encoders_pre_enable(state, master); 2614 + /* 2615 + * Enable sequence steps 1-7 on bigjoiner master 2616 + */ 2617 + if (crtc_state->bigjoiner_slave) 2618 + intel_encoders_pre_pll_enable(state, master_crtc); 2004 2619 2005 - /* and DSC on slave */ 2006 - intel_dsc_enable(NULL, crtc_state); 2007 - } 2620 + if (crtc_state->shared_dpll) 2621 + intel_enable_shared_dpll(crtc_state); 2622 + 2623 + if (crtc_state->bigjoiner_slave) 2624 + intel_encoders_pre_enable(state, master_crtc); 2625 + 2626 + /* need to enable VDSC, which we skipped in pre-enable */ 2627 + intel_dsc_enable(crtc_state); 2008 2628 2009 2629 if (DISPLAY_VER(dev_priv) >= 13) 2010 2630 intel_uncompressed_joiner_enable(crtc_state); ··· 2148 2774 ilk_pfit_disable(old_crtc_state); 2149 2775 2150 2776 if (old_crtc_state->has_pch_encoder) 2151 - ilk_fdi_disable(crtc); 2777 + ilk_pch_disable(state, crtc); 2152 2778 2153 2779 intel_encoders_post_disable(state, crtc); 2154 2780 2155 - if (old_crtc_state->has_pch_encoder) { 2156 - ilk_disable_pch_transcoder(dev_priv, pipe); 2157 - 2158 - if (HAS_PCH_CPT(dev_priv)) { 2159 - i915_reg_t reg; 2160 - u32 temp; 2161 - 2162 - /* disable TRANS_DP_CTL */ 2163 - reg = TRANS_DP_CTL(pipe); 2164 - temp = intel_de_read(dev_priv, reg); 2165 - temp &= ~(TRANS_DP_OUTPUT_ENABLE | 2166 - TRANS_DP_PORT_SEL_MASK); 2167 - temp |= TRANS_DP_PORT_SEL_NONE; 2168 - intel_de_write(dev_priv, reg, temp); 2169 - 2170 - /* disable DPLL_SEL */ 2171 - temp = intel_de_read(dev_priv, PCH_DPLL_SEL); 2172 - temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 2173 - intel_de_write(dev_priv, PCH_DPLL_SEL, temp); 2174 - } 2175 - 2176 - ilk_fdi_pll_disable(crtc); 2177 - } 2781 + if (old_crtc_state->has_pch_encoder) 2782 + ilk_pch_post_disable(state, crtc); 2178 2783 2179 2784 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 2180 2785 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); ··· 2162 2809 static void hsw_crtc_disable(struct intel_atomic_state *state, 2163 2810 struct intel_crtc *crtc) 2164 2811 { 2812 + const struct intel_crtc_state *old_crtc_state = 2813 + intel_atomic_get_old_crtc_state(state, crtc); 2814 + 2165 2815 /* 2166 2816 * FIXME collapse everything to one hook. 2167 2817 * Need care with mst->ddi interactions. 2168 2818 */ 2169 - intel_encoders_disable(state, crtc); 2170 - intel_encoders_post_disable(state, crtc); 2819 + if (!old_crtc_state->bigjoiner_slave) { 2820 + intel_encoders_disable(state, crtc); 2821 + intel_encoders_post_disable(state, crtc); 2822 + } 2171 2823 } 2172 2824 2173 2825 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) ··· 3664 4306 return ret; 3665 4307 } 3666 4308 3667 - static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) 3668 - { 3669 - struct intel_encoder *encoder; 3670 - int i; 3671 - u32 val, final; 3672 - bool has_lvds = false; 3673 - bool has_cpu_edp = false; 3674 - bool has_panel = false; 3675 - bool has_ck505 = false; 3676 - bool can_ssc = false; 3677 - bool using_ssc_source = false; 3678 - 3679 - /* We need to take the global config into account */ 3680 - for_each_intel_encoder(&dev_priv->drm, encoder) { 3681 - switch (encoder->type) { 3682 - case INTEL_OUTPUT_LVDS: 3683 - has_panel = true; 3684 - has_lvds = true; 3685 - break; 3686 - case INTEL_OUTPUT_EDP: 3687 - has_panel = true; 3688 - if (encoder->port == PORT_A) 3689 - has_cpu_edp = true; 3690 - break; 3691 - default: 3692 - break; 3693 - } 3694 - } 3695 - 3696 - if (HAS_PCH_IBX(dev_priv)) { 3697 - has_ck505 = dev_priv->vbt.display_clock_mode; 3698 - can_ssc = has_ck505; 3699 - } else { 3700 - has_ck505 = false; 3701 - can_ssc = true; 3702 - } 3703 - 3704 - /* Check if any DPLLs are using the SSC source */ 3705 - for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { 3706 - u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); 3707 - 3708 - if (!(temp & DPLL_VCO_ENABLE)) 3709 - continue; 3710 - 3711 - if ((temp & PLL_REF_INPUT_MASK) == 3712 - PLLB_REF_INPUT_SPREADSPECTRUMIN) { 3713 - using_ssc_source = true; 3714 - break; 3715 - } 3716 - } 3717 - 3718 - drm_dbg_kms(&dev_priv->drm, 3719 - "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 3720 - has_panel, has_lvds, has_ck505, using_ssc_source); 3721 - 3722 - /* Ironlake: try to setup display ref clock before DPLL 3723 - * enabling. This is only under driver's control after 3724 - * PCH B stepping, previous chipset stepping should be 3725 - * ignoring this setting. 3726 - */ 3727 - val = intel_de_read(dev_priv, PCH_DREF_CONTROL); 3728 - 3729 - /* As we must carefully and slowly disable/enable each source in turn, 3730 - * compute the final state we want first and check if we need to 3731 - * make any changes at all. 3732 - */ 3733 - final = val; 3734 - final &= ~DREF_NONSPREAD_SOURCE_MASK; 3735 - if (has_ck505) 3736 - final |= DREF_NONSPREAD_CK505_ENABLE; 3737 - else 3738 - final |= DREF_NONSPREAD_SOURCE_ENABLE; 3739 - 3740 - final &= ~DREF_SSC_SOURCE_MASK; 3741 - final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 3742 - final &= ~DREF_SSC1_ENABLE; 3743 - 3744 - if (has_panel) { 3745 - final |= DREF_SSC_SOURCE_ENABLE; 3746 - 3747 - if (intel_panel_use_ssc(dev_priv) && can_ssc) 3748 - final |= DREF_SSC1_ENABLE; 3749 - 3750 - if (has_cpu_edp) { 3751 - if (intel_panel_use_ssc(dev_priv) && can_ssc) 3752 - final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 3753 - else 3754 - final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 3755 - } else 3756 - final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 3757 - } else if (using_ssc_source) { 3758 - final |= DREF_SSC_SOURCE_ENABLE; 3759 - final |= DREF_SSC1_ENABLE; 3760 - } 3761 - 3762 - if (final == val) 3763 - return; 3764 - 3765 - /* Always enable nonspread source */ 3766 - val &= ~DREF_NONSPREAD_SOURCE_MASK; 3767 - 3768 - if (has_ck505) 3769 - val |= DREF_NONSPREAD_CK505_ENABLE; 3770 - else 3771 - val |= DREF_NONSPREAD_SOURCE_ENABLE; 3772 - 3773 - if (has_panel) { 3774 - val &= ~DREF_SSC_SOURCE_MASK; 3775 - val |= DREF_SSC_SOURCE_ENABLE; 3776 - 3777 - /* SSC must be turned on before enabling the CPU output */ 3778 - if (intel_panel_use_ssc(dev_priv) && can_ssc) { 3779 - drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); 3780 - val |= DREF_SSC1_ENABLE; 3781 - } else 3782 - val &= ~DREF_SSC1_ENABLE; 3783 - 3784 - /* Get SSC going before enabling the outputs */ 3785 - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 3786 - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 3787 - udelay(200); 3788 - 3789 - val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 3790 - 3791 - /* Enable CPU source on CPU attached eDP */ 3792 - if (has_cpu_edp) { 3793 - if (intel_panel_use_ssc(dev_priv) && can_ssc) { 3794 - drm_dbg_kms(&dev_priv->drm, 3795 - "Using SSC on eDP\n"); 3796 - val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 3797 - } else 3798 - val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 3799 - } else 3800 - val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 3801 - 3802 - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 3803 - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 3804 - udelay(200); 3805 - } else { 3806 - drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); 3807 - 3808 - val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 3809 - 3810 - /* Turn off CPU output */ 3811 - val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 3812 - 3813 - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 3814 - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 3815 - udelay(200); 3816 - 3817 - if (!using_ssc_source) { 3818 - drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); 3819 - 3820 - /* Turn off the SSC source */ 3821 - val &= ~DREF_SSC_SOURCE_MASK; 3822 - val |= DREF_SSC_SOURCE_DISABLE; 3823 - 3824 - /* Turn off SSC1 */ 3825 - val &= ~DREF_SSC1_ENABLE; 3826 - 3827 - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 3828 - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 3829 - udelay(200); 3830 - } 3831 - } 3832 - 3833 - BUG_ON(val != final); 3834 - } 3835 - 3836 - /* Implements 3 different sequences from BSpec chapter "Display iCLK 3837 - * Programming" based on the parameters passed: 3838 - * - Sequence to enable CLKOUT_DP 3839 - * - Sequence to enable CLKOUT_DP without spread 3840 - * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 3841 - */ 3842 - static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 3843 - bool with_spread, bool with_fdi) 3844 - { 3845 - u32 reg, tmp; 3846 - 3847 - if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, 3848 - "FDI requires downspread\n")) 3849 - with_spread = true; 3850 - if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && 3851 - with_fdi, "LP PCH doesn't have FDI\n")) 3852 - with_fdi = false; 3853 - 3854 - mutex_lock(&dev_priv->sb_lock); 3855 - 3856 - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 3857 - tmp &= ~SBI_SSCCTL_DISABLE; 3858 - tmp |= SBI_SSCCTL_PATHALT; 3859 - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 3860 - 3861 - udelay(24); 3862 - 3863 - if (with_spread) { 3864 - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 3865 - tmp &= ~SBI_SSCCTL_PATHALT; 3866 - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 3867 - 3868 - if (with_fdi) 3869 - lpt_fdi_program_mphy(dev_priv); 3870 - } 3871 - 3872 - reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 3873 - tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 3874 - tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 3875 - intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 3876 - 3877 - mutex_unlock(&dev_priv->sb_lock); 3878 - } 3879 - 3880 - /* Sequence to disable CLKOUT_DP */ 3881 - void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 3882 - { 3883 - u32 reg, tmp; 3884 - 3885 - mutex_lock(&dev_priv->sb_lock); 3886 - 3887 - reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 3888 - tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 3889 - tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 3890 - intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 3891 - 3892 - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 3893 - if (!(tmp & SBI_SSCCTL_DISABLE)) { 3894 - if (!(tmp & SBI_SSCCTL_PATHALT)) { 3895 - tmp |= SBI_SSCCTL_PATHALT; 3896 - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 3897 - udelay(32); 3898 - } 3899 - tmp |= SBI_SSCCTL_DISABLE; 3900 - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 3901 - } 3902 - 3903 - mutex_unlock(&dev_priv->sb_lock); 3904 - } 3905 - 3906 - #define BEND_IDX(steps) ((50 + (steps)) / 5) 3907 - 3908 - static const u16 sscdivintphase[] = { 3909 - [BEND_IDX( 50)] = 0x3B23, 3910 - [BEND_IDX( 45)] = 0x3B23, 3911 - [BEND_IDX( 40)] = 0x3C23, 3912 - [BEND_IDX( 35)] = 0x3C23, 3913 - [BEND_IDX( 30)] = 0x3D23, 3914 - [BEND_IDX( 25)] = 0x3D23, 3915 - [BEND_IDX( 20)] = 0x3E23, 3916 - [BEND_IDX( 15)] = 0x3E23, 3917 - [BEND_IDX( 10)] = 0x3F23, 3918 - [BEND_IDX( 5)] = 0x3F23, 3919 - [BEND_IDX( 0)] = 0x0025, 3920 - [BEND_IDX( -5)] = 0x0025, 3921 - [BEND_IDX(-10)] = 0x0125, 3922 - [BEND_IDX(-15)] = 0x0125, 3923 - [BEND_IDX(-20)] = 0x0225, 3924 - [BEND_IDX(-25)] = 0x0225, 3925 - [BEND_IDX(-30)] = 0x0325, 3926 - [BEND_IDX(-35)] = 0x0325, 3927 - [BEND_IDX(-40)] = 0x0425, 3928 - [BEND_IDX(-45)] = 0x0425, 3929 - [BEND_IDX(-50)] = 0x0525, 3930 - }; 3931 - 3932 - /* 3933 - * Bend CLKOUT_DP 3934 - * steps -50 to 50 inclusive, in steps of 5 3935 - * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 3936 - * change in clock period = -(steps / 10) * 5.787 ps 3937 - */ 3938 - static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 3939 - { 3940 - u32 tmp; 3941 - int idx = BEND_IDX(steps); 3942 - 3943 - if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) 3944 - return; 3945 - 3946 - if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) 3947 - return; 3948 - 3949 - mutex_lock(&dev_priv->sb_lock); 3950 - 3951 - if (steps % 10 != 0) 3952 - tmp = 0xAAAAAAAB; 3953 - else 3954 - tmp = 0x00000000; 3955 - intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 3956 - 3957 - tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 3958 - tmp &= 0xffff0000; 3959 - tmp |= sscdivintphase[idx]; 3960 - intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 3961 - 3962 - mutex_unlock(&dev_priv->sb_lock); 3963 - } 3964 - 3965 - #undef BEND_IDX 3966 - 3967 - static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) 3968 - { 3969 - u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 3970 - u32 ctl = intel_de_read(dev_priv, SPLL_CTL); 3971 - 3972 - if ((ctl & SPLL_PLL_ENABLE) == 0) 3973 - return false; 3974 - 3975 - if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 3976 - (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 3977 - return true; 3978 - 3979 - if (IS_BROADWELL(dev_priv) && 3980 - (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 3981 - return true; 3982 - 3983 - return false; 3984 - } 3985 - 3986 - static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, 3987 - enum intel_dpll_id id) 3988 - { 3989 - u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 3990 - u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); 3991 - 3992 - if ((ctl & WRPLL_PLL_ENABLE) == 0) 3993 - return false; 3994 - 3995 - if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 3996 - return true; 3997 - 3998 - if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && 3999 - (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 4000 - (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 4001 - return true; 4002 - 4003 - return false; 4004 - } 4005 - 4006 - static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 4007 - { 4008 - struct intel_encoder *encoder; 4009 - bool has_fdi = false; 4010 - 4011 - for_each_intel_encoder(&dev_priv->drm, encoder) { 4012 - switch (encoder->type) { 4013 - case INTEL_OUTPUT_ANALOG: 4014 - has_fdi = true; 4015 - break; 4016 - default: 4017 - break; 4018 - } 4019 - } 4020 - 4021 - /* 4022 - * The BIOS may have decided to use the PCH SSC 4023 - * reference so we must not disable it until the 4024 - * relevant PLLs have stopped relying on it. We'll 4025 - * just leave the PCH SSC reference enabled in case 4026 - * any active PLL is using it. It will get disabled 4027 - * after runtime suspend if we don't have FDI. 4028 - * 4029 - * TODO: Move the whole reference clock handling 4030 - * to the modeset sequence proper so that we can 4031 - * actually enable/disable/reconfigure these things 4032 - * safely. To do that we need to introduce a real 4033 - * clock hierarchy. That would also allow us to do 4034 - * clock bending finally. 4035 - */ 4036 - dev_priv->pch_ssc_use = 0; 4037 - 4038 - if (spll_uses_pch_ssc(dev_priv)) { 4039 - drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); 4040 - dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); 4041 - } 4042 - 4043 - if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { 4044 - drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); 4045 - dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); 4046 - } 4047 - 4048 - if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { 4049 - drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); 4050 - dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); 4051 - } 4052 - 4053 - if (dev_priv->pch_ssc_use) 4054 - return; 4055 - 4056 - if (has_fdi) { 4057 - lpt_bend_clkout_dp(dev_priv, 0); 4058 - lpt_enable_clkout_dp(dev_priv, true, true); 4059 - } else { 4060 - lpt_disable_clkout_dp(dev_priv); 4061 - } 4062 - } 4063 - 4064 - /* 4065 - * Initialize reference clocks when the driver loads 4066 - */ 4067 - void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 4068 - { 4069 - if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 4070 - ilk_init_pch_refclk(dev_priv); 4071 - else if (HAS_PCH_LPT(dev_priv)) 4072 - lpt_init_pch_refclk(dev_priv); 4073 - } 4074 - 4075 4309 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) 4076 4310 { 4077 4311 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); ··· 3928 4978 &pipe_config->dp_m2_n2); 3929 4979 } 3930 4980 3931 - static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, 3932 - struct intel_crtc_state *pipe_config) 4981 + void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, 4982 + struct intel_crtc_state *pipe_config) 3933 4983 { 3934 4984 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 3935 4985 &pipe_config->fdi_m_n, NULL); ··· 4066 5116 i9xx_get_pipe_color_config(pipe_config); 4067 5117 intel_color_get_config(pipe_config); 4068 5118 4069 - if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 4070 - struct intel_shared_dpll *pll; 4071 - enum intel_dpll_id pll_id; 4072 - bool pll_active; 5119 + pipe_config->pixel_multiplier = 1; 4073 5120 4074 - pipe_config->has_pch_encoder = true; 4075 - 4076 - tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe)); 4077 - pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 4078 - FDI_DP_PORT_WIDTH_SHIFT) + 1; 4079 - 4080 - ilk_get_fdi_m_n_config(crtc, pipe_config); 4081 - 4082 - if (HAS_PCH_IBX(dev_priv)) { 4083 - /* 4084 - * The pipe->pch transcoder and pch transcoder->pll 4085 - * mapping is fixed. 4086 - */ 4087 - pll_id = (enum intel_dpll_id) crtc->pipe; 4088 - } else { 4089 - tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); 4090 - if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 4091 - pll_id = DPLL_ID_PCH_PLL_B; 4092 - else 4093 - pll_id= DPLL_ID_PCH_PLL_A; 4094 - } 4095 - 4096 - pipe_config->shared_dpll = 4097 - intel_get_shared_dpll_by_id(dev_priv, pll_id); 4098 - pll = pipe_config->shared_dpll; 4099 - 4100 - pll_active = intel_dpll_get_hw_state(dev_priv, pll, 4101 - &pipe_config->dpll_hw_state); 4102 - drm_WARN_ON(dev, !pll_active); 4103 - 4104 - tmp = pipe_config->dpll_hw_state.dpll; 4105 - pipe_config->pixel_multiplier = 4106 - ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 4107 - >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 4108 - 4109 - ilk_pch_clock_get(crtc, pipe_config); 4110 - } else { 4111 - pipe_config->pixel_multiplier = 1; 4112 - } 5121 + ilk_pch_get_config(pipe_config); 4113 5122 4114 5123 intel_get_transcoder_timings(crtc, pipe_config); 4115 5124 intel_get_pipe_src_size(crtc, pipe_config); ··· 4081 5172 intel_display_power_put(dev_priv, power_domain, wakeref); 4082 5173 4083 5174 return ret; 5175 + } 5176 + 5177 + static u8 bigjoiner_pipes(struct drm_i915_private *i915) 5178 + { 5179 + if (DISPLAY_VER(i915) >= 12) 5180 + return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); 5181 + else if (DISPLAY_VER(i915) >= 11) 5182 + return BIT(PIPE_B) | BIT(PIPE_C); 5183 + else 5184 + return 0; 4084 5185 } 4085 5186 4086 5187 static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, ··· 4106 5187 tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); 4107 5188 4108 5189 return tmp & TRANS_DDI_FUNC_ENABLE; 5190 + } 5191 + 5192 + static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv) 5193 + { 5194 + u8 master_pipes = 0, slave_pipes = 0; 5195 + struct intel_crtc *crtc; 5196 + 5197 + for_each_intel_crtc(&dev_priv->drm, crtc) { 5198 + enum intel_display_power_domain power_domain; 5199 + enum pipe pipe = crtc->pipe; 5200 + intel_wakeref_t wakeref; 5201 + 5202 + if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0) 5203 + continue; 5204 + 5205 + power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); 5206 + with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 5207 + u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 5208 + 5209 + if (!(tmp & BIG_JOINER_ENABLE)) 5210 + continue; 5211 + 5212 + if (tmp & MASTER_BIG_JOINER_ENABLE) 5213 + master_pipes |= BIT(pipe); 5214 + else 5215 + slave_pipes |= BIT(pipe); 5216 + } 5217 + 5218 + if (DISPLAY_VER(dev_priv) < 13) 5219 + continue; 5220 + 5221 + power_domain = POWER_DOMAIN_PIPE(pipe); 5222 + with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { 5223 + u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); 5224 + 5225 + if (tmp & UNCOMPRESSED_JOINER_MASTER) 5226 + master_pipes |= BIT(pipe); 5227 + if (tmp & UNCOMPRESSED_JOINER_SLAVE) 5228 + slave_pipes |= BIT(pipe); 5229 + } 5230 + } 5231 + 5232 + /* Bigjoiner pipes should always be consecutive master and slave */ 5233 + drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1, 5234 + "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n", 5235 + master_pipes, slave_pipes); 5236 + 5237 + return slave_pipes; 4109 5238 } 4110 5239 4111 5240 static u8 hsw_panel_transcoders(struct drm_i915_private *i915) ··· 4217 5250 enabled_transcoders |= BIT(cpu_transcoder); 4218 5251 } 4219 5252 5253 + /* single pipe or bigjoiner master */ 4220 5254 cpu_transcoder = (enum transcoder) crtc->pipe; 4221 5255 if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 4222 5256 enabled_transcoders |= BIT(cpu_transcoder); 5257 + 5258 + /* bigjoiner slave -> consider the master pipe's transcoder as well */ 5259 + if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) { 5260 + cpu_transcoder = (enum transcoder) crtc->pipe - 1; 5261 + if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) 5262 + enabled_transcoders |= BIT(cpu_transcoder); 5263 + } 4223 5264 4224 5265 return enabled_transcoders; 4225 5266 } ··· 4349 5374 return transcoder_is_dsi(pipe_config->cpu_transcoder); 4350 5375 } 4351 5376 4352 - static void hsw_get_ddi_port_state(struct intel_crtc *crtc, 4353 - struct intel_crtc_state *pipe_config) 4354 - { 4355 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4356 - enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 4357 - enum port port; 4358 - u32 tmp; 4359 - 4360 - if (transcoder_is_dsi(cpu_transcoder)) { 4361 - port = (cpu_transcoder == TRANSCODER_DSI_A) ? 4362 - PORT_A : PORT_B; 4363 - } else { 4364 - tmp = intel_de_read(dev_priv, 4365 - TRANS_DDI_FUNC_CTL(cpu_transcoder)); 4366 - if (!(tmp & TRANS_DDI_FUNC_ENABLE)) 4367 - return; 4368 - if (DISPLAY_VER(dev_priv) >= 12) 4369 - port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 4370 - else 4371 - port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); 4372 - } 4373 - 4374 - /* 4375 - * Haswell has only FDI/PCH transcoder A. It is which is connected to 4376 - * DDI E. So just check whether this pipe is wired to DDI E and whether 4377 - * the PCH transcoder is on. 4378 - */ 4379 - if (DISPLAY_VER(dev_priv) < 9 && 4380 - (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) { 4381 - pipe_config->has_pch_encoder = true; 4382 - 4383 - tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 4384 - pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 4385 - FDI_DP_PORT_WIDTH_SHIFT) + 1; 4386 - 4387 - ilk_get_fdi_m_n_config(crtc, pipe_config); 4388 - } 4389 - } 4390 - 4391 5377 static bool hsw_get_pipe_config(struct intel_crtc *crtc, 4392 5378 struct intel_crtc_state *pipe_config) 4393 5379 { ··· 4375 5439 if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable) 4376 5440 intel_uncompressed_joiner_get_config(pipe_config); 4377 5441 4378 - if (!active) { 4379 - /* bigjoiner slave doesn't enable transcoder */ 4380 - if (!pipe_config->bigjoiner_slave) 4381 - goto out; 5442 + if (!active) 5443 + goto out; 4382 5444 4383 - active = true; 4384 - pipe_config->pixel_multiplier = 1; 4385 - 4386 - /* we cannot read out most state, so don't bother.. */ 4387 - pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE; 4388 - } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 4389 - DISPLAY_VER(dev_priv) >= 11) { 4390 - hsw_get_ddi_port_state(crtc, pipe_config); 5445 + if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || 5446 + DISPLAY_VER(dev_priv) >= 11) 4391 5447 intel_get_transcoder_timings(crtc, pipe_config); 4392 - } 4393 5448 4394 5449 if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) 4395 5450 intel_vrr_get_config(crtc, pipe_config); ··· 4448 5521 } 4449 5522 } 4450 5523 4451 - if (pipe_config->bigjoiner_slave) { 4452 - /* Cannot be read out as a slave, set to 0. */ 4453 - pipe_config->pixel_multiplier = 0; 4454 - } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 5524 + if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 4455 5525 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 4456 5526 pipe_config->pixel_multiplier = 4457 5527 intel_de_read(dev_priv, ··· 4706 5782 } 4707 5783 4708 5784 /* Returns the clock of the currently programmed mode of the given pipe. */ 4709 - static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 4710 - struct intel_crtc_state *pipe_config) 5785 + void i9xx_crtc_clock_get(struct intel_crtc *crtc, 5786 + struct intel_crtc_state *pipe_config) 4711 5787 { 4712 5788 struct drm_device *dev = crtc->base.dev; 4713 5789 struct drm_i915_private *dev_priv = to_i915(dev); ··· 4815 5891 return 0; 4816 5892 4817 5893 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 4818 - } 4819 - 4820 - static void ilk_pch_clock_get(struct intel_crtc *crtc, 4821 - struct intel_crtc_state *pipe_config) 4822 - { 4823 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4824 - 4825 - /* read out port_clock from the DPLL */ 4826 - i9xx_crtc_clock_get(crtc, pipe_config); 4827 - 4828 - /* 4829 - * In case there is an active pipe without active ports, 4830 - * we may need some idea for the dotclock anyway. 4831 - * Calculate one based on the FDI configuration. 4832 - */ 4833 - pipe_config->hw.adjusted_mode.crtc_clock = 4834 - intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 4835 - &pipe_config->fdi_m_n); 4836 5894 } 4837 5895 4838 5896 /* Returns the currently programmed mode of the given encoder. */ ··· 5151 6245 linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; 5152 6246 linked_state->color_ctl = plane_state->color_ctl; 5153 6247 linked_state->view = plane_state->view; 6248 + linked_state->decrypt = plane_state->decrypt; 5154 6249 5155 6250 intel_plane_copy_hw_state(linked_state, plane_state); 5156 6251 linked_state->uapi.src = plane_state->uapi.src; ··· 5278 6371 crtc_state->update_wm_post = true; 5279 6372 5280 6373 if (mode_changed && crtc_state->hw.enable && 5281 - dev_priv->dpll_funcs && 5282 - !crtc_state->bigjoiner_slave && 5283 6374 !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { 5284 6375 ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state); 5285 6376 if (ret) ··· 5833 6928 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, 5834 6929 struct intel_crtc_state *crtc_state) 5835 6930 { 5836 - const struct intel_crtc_state *from_crtc_state = crtc_state; 6931 + const struct intel_crtc_state *master_crtc_state; 6932 + struct intel_crtc *master_crtc; 5837 6933 5838 - if (crtc_state->bigjoiner_slave) { 5839 - from_crtc_state = intel_atomic_get_new_crtc_state(state, 5840 - crtc_state->bigjoiner_linked_crtc); 6934 + master_crtc = intel_master_crtc(crtc_state); 6935 + master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); 5841 6936 5842 - /* No need to copy state if the master state is unchanged */ 5843 - if (!from_crtc_state) 5844 - return; 5845 - } 5846 - 5847 - intel_crtc_copy_color_blobs(crtc_state, from_crtc_state); 6937 + /* No need to copy state if the master state is unchanged */ 6938 + if (master_crtc_state) 6939 + intel_crtc_copy_color_blobs(crtc_state, master_crtc_state); 5848 6940 } 5849 6941 5850 6942 static void ··· 5884 6982 const struct intel_crtc_state *from_crtc_state) 5885 6983 { 5886 6984 struct intel_crtc_state *saved_state; 5887 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5888 6985 5889 6986 saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL); 5890 6987 if (!saved_state) ··· 5913 7012 crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0; 5914 7013 crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc); 5915 7014 crtc_state->bigjoiner_slave = true; 5916 - crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe; 5917 - crtc_state->has_audio = false; 7015 + crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder; 7016 + crtc_state->has_audio = from_crtc_state->has_audio; 5918 7017 5919 7018 return 0; 5920 7019 } ··· 6510 7609 6511 7610 PIPE_CONF_CHECK_X(output_types); 6512 7611 6513 - /* FIXME do the readout properly and get rid of this quirk */ 6514 - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { 6515 - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay); 6516 - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal); 6517 - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start); 6518 - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end); 6519 - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start); 6520 - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end); 7612 + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay); 7613 + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal); 7614 + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start); 7615 + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end); 7616 + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start); 7617 + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end); 6521 7618 6522 - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay); 6523 - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal); 6524 - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start); 6525 - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end); 6526 - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start); 6527 - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end); 7619 + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay); 7620 + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal); 7621 + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start); 7622 + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end); 7623 + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start); 7624 + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end); 6528 7625 6529 - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); 6530 - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); 6531 - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); 6532 - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); 6533 - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); 6534 - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); 7626 + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); 7627 + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); 7628 + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); 7629 + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); 7630 + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); 7631 + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); 6535 7632 6536 - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); 6537 - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); 6538 - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); 6539 - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); 6540 - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); 6541 - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); 7633 + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); 7634 + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); 7635 + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); 7636 + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); 7637 + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); 7638 + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); 6542 7639 6543 - PIPE_CONF_CHECK_I(pixel_multiplier); 7640 + PIPE_CONF_CHECK_I(pixel_multiplier); 6544 7641 7642 + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 7643 + DRM_MODE_FLAG_INTERLACE); 7644 + 7645 + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 6545 7646 PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 6546 - DRM_MODE_FLAG_INTERLACE); 6547 - 6548 - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 6549 - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 6550 - DRM_MODE_FLAG_PHSYNC); 6551 - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 6552 - DRM_MODE_FLAG_NHSYNC); 6553 - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 6554 - DRM_MODE_FLAG_PVSYNC); 6555 - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 6556 - DRM_MODE_FLAG_NVSYNC); 6557 - } 7647 + DRM_MODE_FLAG_PHSYNC); 7648 + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 7649 + DRM_MODE_FLAG_NHSYNC); 7650 + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 7651 + DRM_MODE_FLAG_PVSYNC); 7652 + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, 7653 + DRM_MODE_FLAG_NVSYNC); 6558 7654 } 6559 7655 6560 7656 PIPE_CONF_CHECK_I(output_format); ··· 6563 7665 PIPE_CONF_CHECK_BOOL(hdmi_scrambling); 6564 7666 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); 6565 7667 PIPE_CONF_CHECK_BOOL(has_infoframe); 6566 - /* FIXME do the readout properly and get rid of this quirk */ 6567 - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) 6568 - PIPE_CONF_CHECK_BOOL(fec_enable); 7668 + PIPE_CONF_CHECK_BOOL(fec_enable); 6569 7669 6570 7670 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 6571 7671 ··· 6592 7696 } 6593 7697 6594 7698 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 6595 - /* FIXME do the readout properly and get rid of this quirk */ 6596 - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) 6597 - PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 7699 + PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 6598 7700 6599 7701 PIPE_CONF_CHECK_X(gamma_mode); 6600 7702 if (IS_CHERRYVIEW(dev_priv)) ··· 6619 7725 6620 7726 PIPE_CONF_CHECK_BOOL(double_wide); 6621 7727 6622 - if (dev_priv->dpll.mgr) 7728 + if (dev_priv->dpll.mgr) { 6623 7729 PIPE_CONF_CHECK_P(shared_dpll); 6624 7730 6625 - /* FIXME do the readout properly and get rid of this quirk */ 6626 - if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { 6627 7731 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 6628 7732 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 6629 7733 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); ··· 6655 7763 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); 6656 7764 } 6657 7765 6658 - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { 6659 - PIPE_CONF_CHECK_X(dsi_pll.ctrl); 6660 - PIPE_CONF_CHECK_X(dsi_pll.div); 7766 + PIPE_CONF_CHECK_X(dsi_pll.ctrl); 7767 + PIPE_CONF_CHECK_X(dsi_pll.div); 6661 7768 6662 - if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) 6663 - PIPE_CONF_CHECK_I(pipe_bpp); 7769 + if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) 7770 + PIPE_CONF_CHECK_I(pipe_bpp); 6664 7771 6665 - PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); 6666 - PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); 6667 - PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 7772 + PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); 7773 + PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); 7774 + PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 6668 7775 6669 - PIPE_CONF_CHECK_I(min_voltage_level); 6670 - } 7776 + PIPE_CONF_CHECK_I(min_voltage_level); 6671 7777 6672 7778 if (current_config->has_psr || pipe_config->has_psr) 6673 7779 PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, ··· 6939 8049 struct intel_encoder *encoder; 6940 8050 struct intel_crtc_state *pipe_config = old_crtc_state; 6941 8051 struct drm_atomic_state *state = old_crtc_state->uapi.state; 6942 - struct intel_crtc *master = crtc; 8052 + struct intel_crtc *master_crtc; 6943 8053 6944 8054 __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); 6945 8055 intel_crtc_free_hw_state(old_crtc_state); ··· 6967 8077 "(expected %i, found %i)\n", 6968 8078 new_crtc_state->hw.active, crtc->active); 6969 8079 6970 - if (new_crtc_state->bigjoiner_slave) 6971 - master = new_crtc_state->bigjoiner_linked_crtc; 8080 + master_crtc = intel_master_crtc(new_crtc_state); 6972 8081 6973 - for_each_encoder_on_crtc(dev, &master->base, encoder) { 8082 + for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) { 6974 8083 enum pipe pipe; 6975 8084 bool active; 6976 8085 ··· 6979 8090 encoder->base.base.id, active, 6980 8091 new_crtc_state->hw.active); 6981 8092 6982 - I915_STATE_WARN(active && master->pipe != pipe, 8093 + I915_STATE_WARN(active && master_crtc->pipe != pipe, 6983 8094 "Encoder connected to wrong pipe %c\n", 6984 8095 pipe_name(pipe)); 6985 8096 ··· 6989 8100 6990 8101 if (!new_crtc_state->hw.active) 6991 8102 return; 6992 - 6993 - if (new_crtc_state->bigjoiner_slave) 6994 - /* No PLLs set for slave */ 6995 - pipe_config->shared_dpll = NULL; 6996 8103 6997 8104 intel_pipe_config_sanity_check(dev_priv, pipe_config); 6998 8105 ··· 7106 8221 return; 7107 8222 7108 8223 if (!new_crtc_state->hw.active) 7109 - return; 7110 - 7111 - if (new_crtc_state->bigjoiner_slave) 7112 8224 return; 7113 8225 7114 8226 encoder = intel_get_crtc_new_encoder(state, new_crtc_state); ··· 7489 8607 return 0; 7490 8608 } 7491 8609 7492 - static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj) 7493 - { 7494 - struct drm_i915_private *i915 = to_i915(obj->base.dev); 7495 - 7496 - return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0; 7497 - } 7498 - 7499 - static bool pxp_is_borked(struct drm_i915_gem_object *obj) 7500 - { 7501 - return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj); 7502 - } 7503 - 7504 8610 static int intel_atomic_check_planes(struct intel_atomic_state *state) 7505 8611 { 7506 8612 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 7507 8613 struct intel_crtc_state *old_crtc_state, *new_crtc_state; 7508 8614 struct intel_plane_state *plane_state; 7509 8615 struct intel_plane *plane; 7510 - struct intel_plane_state *new_plane_state; 7511 - struct intel_plane_state *old_plane_state; 7512 8616 struct intel_crtc *crtc; 7513 - const struct drm_framebuffer *fb; 7514 8617 int i, ret; 7515 8618 7516 8619 ret = icl_add_linked_planes(state); ··· 7541 8674 ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); 7542 8675 if (ret) 7543 8676 return ret; 7544 - } 7545 - 7546 - for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 7547 - new_plane_state = intel_atomic_get_new_plane_state(state, plane); 7548 - old_plane_state = intel_atomic_get_old_plane_state(state, plane); 7549 - fb = new_plane_state->hw.fb; 7550 - if (fb) { 7551 - new_plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb)); 7552 - new_plane_state->force_black = pxp_is_borked(intel_fb_obj(fb)); 7553 - } else { 7554 - new_plane_state->decrypt = old_plane_state->decrypt; 7555 - new_plane_state->force_black = old_plane_state->force_black; 7556 - } 7557 8677 } 7558 8678 7559 8679 return 0; ··· 7644 8790 struct intel_crtc_state *new_crtc_state) 7645 8791 { 7646 8792 struct intel_crtc_state *slave_crtc_state, *master_crtc_state; 7647 - struct intel_crtc *slave, *master; 8793 + struct intel_crtc *slave_crtc, *master_crtc; 7648 8794 7649 8795 /* slave being enabled, is master is still claiming this crtc? */ 7650 8796 if (old_crtc_state->bigjoiner_slave) { 7651 - slave = crtc; 7652 - master = old_crtc_state->bigjoiner_linked_crtc; 7653 - master_crtc_state = intel_atomic_get_new_crtc_state(state, master); 8797 + slave_crtc = crtc; 8798 + master_crtc = old_crtc_state->bigjoiner_linked_crtc; 8799 + master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); 7654 8800 if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state)) 7655 8801 goto claimed; 7656 8802 } ··· 7658 8804 if (!new_crtc_state->bigjoiner) 7659 8805 return 0; 7660 8806 7661 - slave = intel_dsc_get_bigjoiner_secondary(crtc); 7662 - if (!slave) { 8807 + slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc); 8808 + if (!slave_crtc) { 7663 8809 DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires " 7664 8810 "CRTC + 1 to be used, doesn't exist\n", 7665 8811 crtc->base.base.id, crtc->base.name); 7666 8812 return -EINVAL; 7667 8813 } 7668 8814 7669 - new_crtc_state->bigjoiner_linked_crtc = slave; 7670 - slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave); 7671 - master = crtc; 8815 + new_crtc_state->bigjoiner_linked_crtc = slave_crtc; 8816 + slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc); 8817 + master_crtc = crtc; 7672 8818 if (IS_ERR(slave_crtc_state)) 7673 8819 return PTR_ERR(slave_crtc_state); 7674 8820 ··· 7677 8823 goto claimed; 7678 8824 7679 8825 DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n", 7680 - slave->base.base.id, slave->base.name); 8826 + slave_crtc->base.base.id, slave_crtc->base.name); 7681 8827 7682 8828 return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state); 7683 8829 7684 8830 claimed: 7685 8831 DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but " 7686 8832 "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", 7687 - slave->base.base.id, slave->base.name, 7688 - master->base.base.id, master->base.name); 8833 + slave_crtc->base.base.id, slave_crtc->base.name, 8834 + master_crtc->base.base.id, master_crtc->base.name); 7689 8835 return -EINVAL; 7690 8836 } 7691 8837 ··· 7719 8865 * correspond to the last vblank and have no relation to the actual time when 7720 8866 * the flip done event was sent. 7721 8867 */ 7722 - static int intel_atomic_check_async(struct intel_atomic_state *state) 8868 + static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc) 7723 8869 { 7724 8870 struct drm_i915_private *i915 = to_i915(state->base.dev); 7725 8871 const struct intel_crtc_state *old_crtc_state, *new_crtc_state; 7726 8872 const struct intel_plane_state *new_plane_state, *old_plane_state; 7727 - struct intel_crtc *crtc; 7728 8873 struct intel_plane *plane; 7729 8874 int i; 7730 8875 7731 - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 7732 - new_crtc_state, i) { 7733 - if (intel_crtc_needs_modeset(new_crtc_state)) { 7734 - drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n"); 7735 - return -EINVAL; 7736 - } 8876 + old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 8877 + new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 7737 8878 7738 - if (!new_crtc_state->hw.active) { 7739 - drm_dbg_kms(&i915->drm, "CRTC inactive\n"); 7740 - return -EINVAL; 7741 - } 7742 - if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 7743 - drm_dbg_kms(&i915->drm, 7744 - "Active planes cannot be changed during async flip\n"); 7745 - return -EINVAL; 7746 - } 8879 + if (intel_crtc_needs_modeset(new_crtc_state)) { 8880 + drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n"); 8881 + return -EINVAL; 8882 + } 8883 + 8884 + if (!new_crtc_state->hw.active) { 8885 + drm_dbg_kms(&i915->drm, "CRTC inactive\n"); 8886 + return -EINVAL; 8887 + } 8888 + if (old_crtc_state->active_planes != new_crtc_state->active_planes) { 8889 + drm_dbg_kms(&i915->drm, 8890 + "Active planes cannot be changed during async flip\n"); 8891 + return -EINVAL; 7747 8892 } 7748 8893 7749 8894 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 7750 8895 new_plane_state, i) { 8896 + if (plane->pipe != crtc->pipe) 8897 + continue; 8898 + 7751 8899 /* 7752 8900 * TODO: Async flip is only supported through the page flip IOCTL 7753 8901 * as of now. So support currently added for primary plane only. ··· 7776 8920 return -EINVAL; 7777 8921 } 7778 8922 7779 - if (old_plane_state->view.color_plane[0].stride != 7780 - new_plane_state->view.color_plane[0].stride) { 8923 + if (new_plane_state->hw.fb->format->num_planes > 1) { 8924 + drm_dbg_kms(&i915->drm, 8925 + "Planar formats not supported with async flips\n"); 8926 + return -EINVAL; 8927 + } 8928 + 8929 + if (old_plane_state->view.color_plane[0].mapping_stride != 8930 + new_plane_state->view.color_plane[0].mapping_stride) { 7781 8931 drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n"); 7782 8932 return -EINVAL; 7783 8933 } ··· 8074 9212 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 8075 9213 new_crtc_state, i) { 8076 9214 if (new_crtc_state->uapi.async_flip) { 8077 - ret = intel_atomic_check_async(state); 9215 + ret = intel_atomic_check_async(state, crtc); 8078 9216 if (ret) 8079 9217 goto fail; 8080 9218 } ··· 8279 9417 8280 9418 intel_fbc_update(state, crtc); 8281 9419 9420 + intel_update_planes_on_crtc(state, crtc); 9421 + 8282 9422 /* Perform vblank evasion around commit operation */ 8283 9423 intel_pipe_update_start(new_crtc_state); 8284 9424 8285 9425 commit_pipe_pre_planes(state, crtc); 8286 9426 8287 9427 if (DISPLAY_VER(dev_priv) >= 9) 8288 - skl_update_planes_on_crtc(state, crtc); 9428 + skl_arm_planes_on_crtc(state, crtc); 8289 9429 else 8290 - i9xx_update_planes_on_crtc(state, crtc); 9430 + i9xx_arm_planes_on_crtc(state, crtc); 8291 9431 8292 9432 commit_pipe_post_planes(state, crtc); 8293 9433 ··· 8312 9448 struct intel_crtc *crtc) 8313 9449 { 8314 9450 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 8315 - 8316 - drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave); 8317 - 8318 - intel_encoders_pre_disable(state, crtc); 8319 - 8320 - intel_crtc_disable_planes(state, crtc); 8321 - 8322 - /* 8323 - * We still need special handling for disabling bigjoiner master 8324 - * and slaves since for slave we do not have encoder or plls 8325 - * so we dont need to disable those. 8326 - */ 8327 - if (old_crtc_state->bigjoiner) { 8328 - intel_crtc_disable_planes(state, 8329 - old_crtc_state->bigjoiner_linked_crtc); 8330 - old_crtc_state->bigjoiner_linked_crtc->active = false; 8331 - } 8332 9451 8333 9452 /* 8334 9453 * We need to disable pipe CRC before disabling the pipe, ··· 8337 9490 u32 handled = 0; 8338 9491 int i; 8339 9492 9493 + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 9494 + new_crtc_state, i) { 9495 + if (!intel_crtc_needs_modeset(new_crtc_state)) 9496 + continue; 9497 + 9498 + if (!old_crtc_state->hw.active) 9499 + continue; 9500 + 9501 + intel_pre_plane_update(state, crtc); 9502 + intel_crtc_disable_planes(state, crtc); 9503 + } 9504 + 8340 9505 /* Only disable port sync and MST slaves */ 8341 9506 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 8342 9507 new_crtc_state, i) { 8343 - if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner) 9508 + if (!intel_crtc_needs_modeset(new_crtc_state)) 8344 9509 continue; 8345 9510 8346 9511 if (!old_crtc_state->hw.active) ··· 8364 9505 * Slave vblanks are masked till Master Vblanks. 8365 9506 */ 8366 9507 if (!is_trans_port_sync_slave(old_crtc_state) && 8367 - !intel_dp_mst_is_slave_trans(old_crtc_state)) 9508 + !intel_dp_mst_is_slave_trans(old_crtc_state) && 9509 + !old_crtc_state->bigjoiner_slave) 8368 9510 continue; 8369 9511 8370 - intel_pre_plane_update(state, crtc); 8371 9512 intel_old_crtc_state_disables(state, old_crtc_state, 8372 9513 new_crtc_state, crtc); 8373 9514 handled |= BIT(crtc->pipe); ··· 8377 9518 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 8378 9519 new_crtc_state, i) { 8379 9520 if (!intel_crtc_needs_modeset(new_crtc_state) || 8380 - (handled & BIT(crtc->pipe)) || 8381 - old_crtc_state->bigjoiner_slave) 9521 + (handled & BIT(crtc->pipe))) 8382 9522 continue; 8383 9523 8384 - intel_pre_plane_update(state, crtc); 8385 - if (old_crtc_state->bigjoiner) { 8386 - struct intel_crtc *slave = 8387 - old_crtc_state->bigjoiner_linked_crtc; 9524 + if (!old_crtc_state->hw.active) 9525 + continue; 8388 9526 8389 - intel_pre_plane_update(state, slave); 8390 - } 8391 - 8392 - if (old_crtc_state->hw.active) 8393 - intel_old_crtc_state_disables(state, old_crtc_state, 8394 - new_crtc_state, crtc); 9527 + intel_old_crtc_state_disables(state, old_crtc_state, 9528 + new_crtc_state, crtc); 8395 9529 } 8396 9530 } 8397 9531 ··· 8604 9752 8605 9753 for_each_new_intel_plane_in_state(state, plane, plane_state, i) { 8606 9754 struct drm_framebuffer *fb = plane_state->hw.fb; 9755 + int cc_plane; 8607 9756 int ret; 8608 9757 8609 - if (!fb || 8610 - fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC) 9758 + if (!fb) 9759 + continue; 9760 + 9761 + cc_plane = intel_fb_rc_ccs_cc_plane(fb); 9762 + if (cc_plane < 0) 8611 9763 continue; 8612 9764 8613 9765 /* ··· 8628 9772 * GPU write on it. 8629 9773 */ 8630 9774 ret = i915_gem_object_read_from_page(intel_fb_obj(fb), 8631 - fb->offsets[2] + 16, 9775 + fb->offsets[cc_plane] + 16, 8632 9776 &plane_state->ccval, 8633 9777 sizeof(plane_state->ccval)); 8634 9778 /* The above could only fail if the FB obj has an unexpected backing store type. */ ··· 8696 9840 } 8697 9841 } 8698 9842 8699 - if (state->modeset) 8700 - intel_encoders_update_prepare(state); 9843 + intel_encoders_update_prepare(state); 8701 9844 8702 9845 intel_dbuf_pre_plane_update(state); 8703 - intel_psr_pre_plane_update(state); 8704 9846 8705 9847 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 8706 9848 if (new_crtc_state->uapi.async_flip) ··· 8708 9854 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 8709 9855 dev_priv->display->commit_modeset_enables(state); 8710 9856 8711 - if (state->modeset) { 8712 - intel_encoders_update_complete(state); 9857 + intel_encoders_update_complete(state); 8713 9858 9859 + if (state->modeset) 8714 9860 intel_set_cdclk_post_plane_update(state); 8715 - } 9861 + 9862 + intel_wait_for_vblank_workers(state); 8716 9863 8717 9864 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 8718 9865 * already, but still need the state for the delayed optimization. To ··· 8729 9874 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 8730 9875 if (new_crtc_state->uapi.async_flip) 8731 9876 intel_crtc_disable_flip_done(state, crtc); 8732 - 8733 - if (new_crtc_state->hw.active && 8734 - !intel_crtc_needs_modeset(new_crtc_state) && 8735 - !new_crtc_state->preload_luts && 8736 - (new_crtc_state->uapi.color_mgmt_changed || 8737 - new_crtc_state->update_pipe)) 8738 - intel_color_load_luts(new_crtc_state); 8739 9877 } 8740 9878 8741 9879 /* ··· 9428 10580 9429 10581 static const struct drm_mode_config_funcs intel_mode_funcs = { 9430 10582 .fb_create = intel_user_framebuffer_create, 9431 - .get_format_info = intel_get_format_info, 10583 + .get_format_info = intel_fb_get_format_info, 9432 10584 .output_poll_changed = intel_fbdev_output_poll_changed, 9433 10585 .mode_valid = intel_mode_valid, 9434 10586 .atomic_check = intel_atomic_check, ··· 9488 10640 return; 9489 10641 9490 10642 intel_init_cdclk_hooks(dev_priv); 9491 - intel_init_audio_hooks(dev_priv); 10643 + intel_audio_hooks_init(dev_priv); 9492 10644 9493 10645 intel_dpll_init_clock_hook(dev_priv); 9494 10646 ··· 10476 11628 struct intel_plane *plane; 10477 11629 int min_cdclk = 0; 10478 11630 10479 - if (crtc_state->bigjoiner_slave) 10480 - continue; 10481 - 10482 11631 if (crtc_state->hw.active) { 10483 11632 /* 10484 11633 * The initial mode needs to be set in order to keep ··· 10535 11690 intel_bw_crtc_update(bw_state, crtc_state); 10536 11691 10537 11692 intel_pipe_config_sanity_check(dev_priv, crtc_state); 10538 - 10539 - /* discard our incomplete slave state, copy it from master */ 10540 - if (crtc_state->bigjoiner && crtc_state->hw.active) { 10541 - struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc; 10542 - struct intel_crtc_state *slave_crtc_state = 10543 - to_intel_crtc_state(slave->base.state); 10544 - 10545 - copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state); 10546 - slave->base.mode = crtc->base.mode; 10547 - 10548 - cdclk_state->min_cdclk[slave->pipe] = min_cdclk; 10549 - cdclk_state->min_voltage_level[slave->pipe] = 10550 - crtc_state->min_voltage_level; 10551 - 10552 - for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) { 10553 - const struct intel_plane_state *plane_state = 10554 - to_intel_plane_state(plane->base.state); 10555 - 10556 - /* 10557 - * FIXME don't have the fb yet, so can't 10558 - * use intel_plane_data_rate() :( 10559 - */ 10560 - if (plane_state->uapi.visible) 10561 - crtc_state->data_rate[plane->id] = 10562 - 4 * crtc_state->pixel_rate; 10563 - else 10564 - crtc_state->data_rate[plane->id] = 0; 10565 - } 10566 - 10567 - intel_bw_crtc_update(bw_state, slave_crtc_state); 10568 - drm_calc_timestamping_constants(&slave->base, 10569 - &slave_crtc_state->hw.adjusted_mode); 10570 - } 10571 11693 } 10572 11694 } 10573 11695 ··· 10839 12027 destroy_workqueue(i915->flip_wq); 10840 12028 destroy_workqueue(i915->modeset_wq); 10841 12029 10842 - intel_fbc_cleanup_cfb(i915); 12030 + intel_fbc_cleanup(i915); 10843 12031 } 10844 12032 10845 12033 /* part #3: call after gem init */
+4 -13
drivers/gpu/drm/i915/display/intel_display.h
··· 521 521 int pixel_clock, int link_clock, 522 522 struct intel_link_m_n *m_n, 523 523 bool constant_n, bool fec_enable); 524 - void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); 525 524 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 526 525 u32 pixel_format, u64 modifier); 527 526 enum drm_mode_status ··· 541 542 const char *name, u32 reg, int ref_freq); 542 543 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 543 544 const char *name, u32 reg); 544 - void lpt_pch_enable(const struct intel_crtc_state *crtc_state); 545 - void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv); 546 - void lpt_disable_iclkip(struct drm_i915_private *dev_priv); 547 545 void intel_init_display_hooks(struct drm_i915_private *dev_priv); 548 546 unsigned int intel_fb_xy_to_linear(int x, int y, 549 547 const struct intel_plane_state *state, ··· 576 580 intel_framebuffer_create(struct drm_i915_gem_object *obj, 577 581 struct drm_mode_fb_cmd2 *mode_cmd); 578 582 579 - void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 580 - enum pipe pipe); 581 - 582 - int lpt_get_iclkip(struct drm_i915_private *dev_priv); 583 583 bool intel_fuzzy_clock_check(int clock1, int clock2); 584 584 585 585 void intel_display_prepare_reset(struct drm_i915_private *dev_priv); ··· 584 592 struct intel_crtc_state *pipe_config); 585 593 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, 586 594 enum link_m_n_set m_n); 595 + void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, 596 + struct intel_crtc_state *pipe_config); 597 + void i9xx_crtc_clock_get(struct intel_crtc *crtc, 598 + struct intel_crtc_state *pipe_config); 587 599 int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); 588 - 589 600 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state); 590 601 void hsw_enable_ips(const struct intel_crtc_state *crtc_state); 591 602 void hsw_disable_ips(const struct intel_crtc_state *crtc_state); ··· 605 610 unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state); 606 611 607 612 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state); 608 - bool 609 - intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, 610 - u64 modifier); 611 613 612 614 struct intel_encoder * 613 615 intel_get_crtc_new_encoder(const struct intel_atomic_state *state, ··· 624 632 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915); 625 633 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915); 626 634 void intel_display_resume(struct drm_device *dev); 627 - void intel_init_pch_refclk(struct drm_i915_private *dev_priv); 628 635 int intel_modeset_all_pipes(struct intel_atomic_state *state); 629 636 630 637 /* modesetting asserts */
+9 -60
drivers/gpu/drm/i915/display/intel_display_debugfs.c
··· 52 52 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 53 53 mutex_lock(&fbc->lock); 54 54 55 - if (intel_fbc_is_active(dev_priv)) 55 + if (intel_fbc_is_active(fbc)) { 56 56 seq_puts(m, "FBC enabled\n"); 57 - else 57 + seq_printf(m, "Compressing: %s\n", 58 + yesno(intel_fbc_is_compressing(fbc))); 59 + } else { 58 60 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); 59 - 60 - if (intel_fbc_is_active(dev_priv)) { 61 - u32 mask; 62 - 63 - if (DISPLAY_VER(dev_priv) >= 8) 64 - mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; 65 - else if (DISPLAY_VER(dev_priv) >= 7) 66 - mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; 67 - else if (DISPLAY_VER(dev_priv) >= 5) 68 - mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; 69 - else if (IS_G4X(dev_priv)) 70 - mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK; 71 - else 72 - mask = intel_de_read(dev_priv, FBC_STATUS) & 73 - (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); 74 - 75 - seq_printf(m, "Compressing: %s\n", yesno(mask)); 76 61 } 77 62 78 63 mutex_unlock(&fbc->lock); ··· 70 85 { 71 86 struct drm_i915_private *dev_priv = data; 72 87 73 - if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv)) 74 - return -ENODEV; 75 - 76 88 *val = dev_priv->fbc.false_color; 77 89 78 90 return 0; ··· 78 96 static int i915_fbc_false_color_set(void *data, u64 val) 79 97 { 80 98 struct drm_i915_private *dev_priv = data; 81 - u32 reg; 82 99 83 - if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv)) 84 - return -ENODEV; 85 - 86 - mutex_lock(&dev_priv->fbc.lock); 87 - 88 - reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL); 89 - dev_priv->fbc.false_color = val; 90 - 91 - intel_de_write(dev_priv, ILK_DPFC_CONTROL, 92 - val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR)); 93 - 94 - mutex_unlock(&dev_priv->fbc.lock); 95 - return 0; 100 + return intel_fbc_set_false_color(&dev_priv->fbc, val); 96 101 } 97 102 98 103 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, ··· 272 303 }; 273 304 val = intel_de_read(dev_priv, 274 305 EDP_PSR2_STATUS(intel_dp->psr.transcoder)); 275 - status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> 276 - EDP_PSR2_STATUS_STATE_SHIFT; 306 + status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); 277 307 if (status_val < ARRAY_SIZE(live_status)) 278 308 status = live_status[status_val]; 279 309 } else { ··· 471 503 472 504 static int i915_power_domain_info(struct seq_file *m, void *unused) 473 505 { 474 - struct drm_i915_private *dev_priv = node_to_i915(m->private); 475 - struct i915_power_domains *power_domains = &dev_priv->power_domains; 476 - int i; 506 + struct drm_i915_private *i915 = node_to_i915(m->private); 477 507 478 - mutex_lock(&power_domains->lock); 479 - 480 - seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 481 - for (i = 0; i < power_domains->power_well_count; i++) { 482 - struct i915_power_well *power_well; 483 - enum intel_display_power_domain power_domain; 484 - 485 - power_well = &power_domains->power_wells[i]; 486 - seq_printf(m, "%-25s %d\n", power_well->desc->name, 487 - power_well->count); 488 - 489 - for_each_power_domain(power_domain, power_well->desc->domains) 490 - seq_printf(m, " %-23s %d\n", 491 - intel_display_power_domain_str(power_domain), 492 - power_domains->domain_use_count[power_domain]); 493 - } 494 - 495 - mutex_unlock(&power_domains->lock); 508 + intel_display_power_debug(i915, m); 496 509 497 510 return 0; 498 511 } ··· 2044 2095 return ret; 2045 2096 } 2046 2097 2047 - ret = intel_fbc_reset_underrun(dev_priv); 2098 + ret = intel_fbc_reset_underrun(&dev_priv->fbc); 2048 2099 if (ret) 2049 2100 return ret; 2050 2101
+128 -4
drivers/gpu/drm/i915/display/intel_display_power.c
··· 15 15 #include "intel_dpio_phy.h" 16 16 #include "intel_dpll.h" 17 17 #include "intel_hotplug.h" 18 + #include "intel_pch_refclk.h" 18 19 #include "intel_pcode.h" 19 20 #include "intel_pm.h" 20 21 #include "intel_pps.h" ··· 23 22 #include "intel_tc.h" 24 23 #include "intel_vga.h" 25 24 #include "vlv_sideband.h" 25 + 26 + struct i915_power_well_ops { 27 + /* 28 + * Synchronize the well's hw state to match the current sw state, for 29 + * example enable/disable it based on the current refcount. Called 30 + * during driver init and resume time, possibly after first calling 31 + * the enable/disable handlers. 32 + */ 33 + void (*sync_hw)(struct drm_i915_private *dev_priv, 34 + struct i915_power_well *power_well); 35 + /* 36 + * Enable the well and resources that depend on it (for example 37 + * interrupts located on the well). Called after the 0->1 refcount 38 + * transition. 39 + */ 40 + void (*enable)(struct drm_i915_private *dev_priv, 41 + struct i915_power_well *power_well); 42 + /* 43 + * Disable the well and resources that depend on it. Called after 44 + * the 1->0 refcount transition. 45 + */ 46 + void (*disable)(struct drm_i915_private *dev_priv, 47 + struct i915_power_well *power_well); 48 + /* Returns the hw enabled state. */ 49 + bool (*is_enabled)(struct drm_i915_private *dev_priv, 50 + struct i915_power_well *power_well); 51 + }; 52 + 53 + struct i915_power_well_regs { 54 + i915_reg_t bios; 55 + i915_reg_t driver; 56 + i915_reg_t kvmr; 57 + i915_reg_t debug; 58 + }; 59 + 60 + /* Power well structure for haswell */ 61 + struct i915_power_well_desc { 62 + const char *name; 63 + bool always_on; 64 + u64 domains; 65 + /* unique identifier for this power well */ 66 + enum i915_power_well_id id; 67 + /* 68 + * Arbitraty data associated with this power well. Platform and power 69 + * well specific. 70 + */ 71 + union { 72 + struct { 73 + /* 74 + * request/status flag index in the PUNIT power well 75 + * control/status registers. 76 + */ 77 + u8 idx; 78 + } vlv; 79 + struct { 80 + enum dpio_phy phy; 81 + } bxt; 82 + struct { 83 + const struct i915_power_well_regs *regs; 84 + /* 85 + * request/status flag index in the power well 86 + * constrol/status registers. 87 + */ 88 + u8 idx; 89 + /* Mask of pipes whose IRQ logic is backed by the pw */ 90 + u8 irq_pipe_mask; 91 + /* 92 + * Instead of waiting for the status bit to ack enables, 93 + * just wait a specific amount of time and then consider 94 + * the well enabled. 95 + */ 96 + u16 fixed_enable_delay; 97 + /* The pw is backing the VGA functionality */ 98 + bool has_vga:1; 99 + bool has_fuses:1; 100 + /* 101 + * The pw is for an ICL+ TypeC PHY port in 102 + * Thunderbolt mode. 103 + */ 104 + bool is_tc_tbt:1; 105 + } hsw; 106 + }; 107 + const struct i915_power_well_ops *ops; 108 + }; 109 + 110 + struct i915_power_well { 111 + const struct i915_power_well_desc *desc; 112 + /* power well enable/disable usage count */ 113 + int count; 114 + /* cached hw enabled state */ 115 + bool hw_enabled; 116 + }; 26 117 27 118 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 28 119 enum i915_power_well_id power_well_id); ··· 247 154 return "MODESET"; 248 155 case POWER_DOMAIN_GT_IRQ: 249 156 return "GT_IRQ"; 250 - case POWER_DOMAIN_DPLL_DC_OFF: 251 - return "DPLL_DC_OFF"; 157 + case POWER_DOMAIN_DC_OFF: 158 + return "DC_OFF"; 252 159 case POWER_DOMAIN_TC_COLD_OFF: 253 160 return "TC_COLD_OFF"; 254 161 default: ··· 527 434 528 435 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 529 436 SKL_PW_CTL_IDX_TO_PG(pw_idx); 437 + 438 + /* Wa_16013190616:adlp */ 439 + if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) 440 + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); 441 + 530 442 /* 531 443 * For PW1 we have to wait both for the PW0/PG0 fuse state 532 444 * before enabling the power well and PW1/PG1's own fuse ··· 992 894 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 993 895 u32 target_dc_state) 994 896 { 995 - u32 states[] = { 897 + static const u32 states[] = { 996 898 DC_STATE_EN_UPTO_DC6, 997 899 DC_STATE_EN_UPTO_DC5, 998 900 DC_STATE_EN_DC3CO, ··· 2900 2802 ICL_PW_2_POWER_DOMAINS | \ 2901 2803 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2902 2804 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2903 - BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ 2805 + BIT_ULL(POWER_DOMAIN_DC_OFF) | \ 2904 2806 BIT_ULL(POWER_DOMAIN_INIT)) 2905 2807 2906 2808 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ ··· 3203 3105 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3204 3106 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3205 3107 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3108 + BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 3206 3109 BIT_ULL(POWER_DOMAIN_INIT)) 3207 3110 3208 3111 #define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) ··· 6488 6389 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6489 6390 hsw_disable_pc8(i915); 6490 6391 } 6392 + } 6393 + 6394 + void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) 6395 + { 6396 + struct i915_power_domains *power_domains = &i915->power_domains; 6397 + int i; 6398 + 6399 + mutex_lock(&power_domains->lock); 6400 + 6401 + seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 6402 + for (i = 0; i < power_domains->power_well_count; i++) { 6403 + struct i915_power_well *power_well; 6404 + enum intel_display_power_domain power_domain; 6405 + 6406 + power_well = &power_domains->power_wells[i]; 6407 + seq_printf(m, "%-25s %d\n", power_well->desc->name, 6408 + power_well->count); 6409 + 6410 + for_each_power_domain(power_domain, power_well->desc->domains) 6411 + seq_printf(m, " %-23s %d\n", 6412 + intel_display_power_domain_str(power_domain), 6413 + power_domains->domain_use_count[power_domain]); 6414 + } 6415 + 6416 + mutex_unlock(&power_domains->lock); 6491 6417 }
+6 -96
drivers/gpu/drm/i915/display/intel_display_power.h
··· 6 6 #ifndef __INTEL_DISPLAY_POWER_H__ 7 7 #define __INTEL_DISPLAY_POWER_H__ 8 8 9 - #include "intel_display.h" 10 9 #include "intel_runtime_pm.h" 11 10 #include "i915_reg.h" 12 11 12 + enum dpio_channel; 13 + enum dpio_phy; 13 14 struct drm_i915_private; 15 + struct i915_power_well; 14 16 struct intel_encoder; 15 17 16 18 enum intel_display_power_domain { ··· 119 117 POWER_DOMAIN_GMBUS, 120 118 POWER_DOMAIN_MODESET, 121 119 POWER_DOMAIN_GT_IRQ, 122 - POWER_DOMAIN_DPLL_DC_OFF, 120 + POWER_DOMAIN_DC_OFF, 123 121 POWER_DOMAIN_TC_COLD_OFF, 124 122 POWER_DOMAIN_INIT, 125 123 ··· 156 154 #define POWER_DOMAIN_TRANSCODER(tran) \ 157 155 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ 158 156 (tran) + POWER_DOMAIN_TRANSCODER_A) 159 - 160 - struct i915_power_well; 161 - 162 - struct i915_power_well_ops { 163 - /* 164 - * Synchronize the well's hw state to match the current sw state, for 165 - * example enable/disable it based on the current refcount. Called 166 - * during driver init and resume time, possibly after first calling 167 - * the enable/disable handlers. 168 - */ 169 - void (*sync_hw)(struct drm_i915_private *dev_priv, 170 - struct i915_power_well *power_well); 171 - /* 172 - * Enable the well and resources that depend on it (for example 173 - * interrupts located on the well). Called after the 0->1 refcount 174 - * transition. 175 - */ 176 - void (*enable)(struct drm_i915_private *dev_priv, 177 - struct i915_power_well *power_well); 178 - /* 179 - * Disable the well and resources that depend on it. Called after 180 - * the 1->0 refcount transition. 181 - */ 182 - void (*disable)(struct drm_i915_private *dev_priv, 183 - struct i915_power_well *power_well); 184 - /* Returns the hw enabled state. */ 185 - bool (*is_enabled)(struct drm_i915_private *dev_priv, 186 - struct i915_power_well *power_well); 187 - }; 188 - 189 - struct i915_power_well_regs { 190 - i915_reg_t bios; 191 - i915_reg_t driver; 192 - i915_reg_t kvmr; 193 - i915_reg_t debug; 194 - }; 195 - 196 - /* Power well structure for haswell */ 197 - struct i915_power_well_desc { 198 - const char *name; 199 - bool always_on; 200 - u64 domains; 201 - /* unique identifier for this power well */ 202 - enum i915_power_well_id id; 203 - /* 204 - * Arbitraty data associated with this power well. Platform and power 205 - * well specific. 206 - */ 207 - union { 208 - struct { 209 - /* 210 - * request/status flag index in the PUNIT power well 211 - * control/status registers. 212 - */ 213 - u8 idx; 214 - } vlv; 215 - struct { 216 - enum dpio_phy phy; 217 - } bxt; 218 - struct { 219 - const struct i915_power_well_regs *regs; 220 - /* 221 - * request/status flag index in the power well 222 - * constrol/status registers. 223 - */ 224 - u8 idx; 225 - /* Mask of pipes whose IRQ logic is backed by the pw */ 226 - u8 irq_pipe_mask; 227 - /* 228 - * Instead of waiting for the status bit to ack enables, 229 - * just wait a specific amount of time and then consider 230 - * the well enabled. 231 - */ 232 - u16 fixed_enable_delay; 233 - /* The pw is backing the VGA functionality */ 234 - bool has_vga:1; 235 - bool has_fuses:1; 236 - /* 237 - * The pw is for an ICL+ TypeC PHY port in 238 - * Thunderbolt mode. 239 - */ 240 - bool is_tc_tbt:1; 241 - } hsw; 242 - }; 243 - const struct i915_power_well_ops *ops; 244 - }; 245 - 246 - struct i915_power_well { 247 - const struct i915_power_well_desc *desc; 248 - /* power well enable/disable usage count */ 249 - int count; 250 - /* cached hw enabled state */ 251 - bool hw_enabled; 252 - }; 253 157 254 158 struct i915_power_domains { 255 159 /* ··· 298 390 { 299 391 intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask); 300 392 } 393 + 394 + void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m); 301 395 302 396 /* 303 397 * FIXME: We should probably switch this to a 0-based scheme to be consistent
+24 -27
drivers/gpu/drm/i915/display/intel_display_types.h
··· 28 28 29 29 #include <linux/async.h> 30 30 #include <linux/i2c.h> 31 + #include <linux/pm_qos.h> 31 32 #include <linux/pwm.h> 32 33 #include <linux/sched/clock.h> 33 34 ··· 42 41 #include <drm/drm_probe_helper.h> 43 42 #include <drm/drm_rect.h> 44 43 #include <drm/drm_vblank.h> 44 + #include <drm/drm_vblank_work.h> 45 45 #include <drm/i915_mei_hdcp_interface.h> 46 46 #include <media/cec-notifier.h> 47 47 ··· 51 49 struct drm_printer; 52 50 struct __intel_global_objs_state; 53 51 struct intel_ddi_buf_trans; 52 + struct intel_fbc; 54 53 55 54 /* 56 55 * Display related stuff ··· 118 115 * bytes for 0/180 degree rotation 119 116 * pixels for 90/270 degree rotation 120 117 */ 121 - unsigned int stride; 118 + unsigned int mapping_stride; 119 + unsigned int scanout_stride; 122 120 } color_plane[4]; 123 121 }; 124 122 ··· 198 194 void (*update_complete)(struct intel_atomic_state *, 199 195 struct intel_encoder *, 200 196 struct intel_crtc *); 201 - void (*pre_disable)(struct intel_atomic_state *, 202 - struct intel_encoder *, 203 - const struct intel_crtc_state *, 204 - const struct drm_connector_state *); 205 197 void (*disable)(struct intel_atomic_state *, 206 198 struct intel_encoder *, 207 199 const struct intel_crtc_state *, ··· 949 949 * accordingly. 950 950 */ 951 951 #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ 952 - #define PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE (1<<1) /* bigjoiner slave, partial readout */ 953 952 unsigned long quirks; 954 953 955 954 unsigned fb_bits; /* framebuffers to flip */ ··· 1240 1241 u8 link_count; 1241 1242 u8 pixel_overlap; 1242 1243 } splitter; 1244 + 1245 + /* for loading single buffered registers during vblank */ 1246 + struct drm_vblank_work vblank_work; 1243 1247 }; 1244 1248 1245 1249 enum intel_pipe_crc_source { ··· 1327 1325 /* scalers available on this crtc */ 1328 1326 int num_scalers; 1329 1327 1328 + /* for loading single buffered registers during vblank */ 1329 + struct pm_qos_request vblank_pm_qos; 1330 + 1330 1331 #ifdef CONFIG_DEBUG_FS 1331 1332 struct intel_pipe_crc pipe_crc; 1332 1333 #endif ··· 1340 1335 enum i9xx_plane_id i9xx_plane; 1341 1336 enum plane_id id; 1342 1337 enum pipe pipe; 1343 - bool has_fbc; 1344 - bool has_ccs; 1345 1338 bool need_async_flip_disable_wa; 1346 1339 u32 frontbuffer_bit; 1347 1340 1348 1341 struct { 1349 1342 u32 base, cntl, size; 1350 1343 } cursor; 1344 + 1345 + struct intel_fbc *fbc; 1351 1346 1352 1347 /* 1353 1348 * NOTE: Do not place new plane state fields here (e.g., when adding ··· 1367 1362 unsigned int (*max_stride)(struct intel_plane *plane, 1368 1363 u32 pixel_format, u64 modifier, 1369 1364 unsigned int rotation); 1370 - void (*update_plane)(struct intel_plane *plane, 1365 + /* Write all non-self arming plane registers */ 1366 + void (*update_noarm)(struct intel_plane *plane, 1371 1367 const struct intel_crtc_state *crtc_state, 1372 1368 const struct intel_plane_state *plane_state); 1373 - void (*disable_plane)(struct intel_plane *plane, 1374 - const struct intel_crtc_state *crtc_state); 1369 + /* Write all self-arming plane registers */ 1370 + void (*update_arm)(struct intel_plane *plane, 1371 + const struct intel_crtc_state *crtc_state, 1372 + const struct intel_plane_state *plane_state); 1373 + /* Disable the plane, must arm */ 1374 + void (*disable_arm)(struct intel_plane *plane, 1375 + const struct intel_crtc_state *crtc_state); 1375 1376 bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); 1376 1377 int (*check_plane)(struct intel_crtc_state *crtc_state, 1377 1378 struct intel_plane_state *plane_state); ··· 1574 1563 int num_sink_rates; 1575 1564 int sink_rates[DP_MAX_SUPPORTED_RATES]; 1576 1565 bool use_rate_select; 1566 + /* Max sink lane count as reported by DP_MAX_LANE_COUNT */ 1567 + int max_sink_lane_count; 1577 1568 /* intersection of source and sink rates */ 1578 1569 int num_common_rates; 1579 1570 int common_rates[DP_MAX_SUPPORTED_RATES]; ··· 2052 2039 to_intel_frontbuffer(struct drm_framebuffer *fb) 2053 2040 { 2054 2041 return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; 2055 - } 2056 - 2057 - static inline bool is_ccs_modifier(u64 modifier) 2058 - { 2059 - return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 2060 - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || 2061 - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || 2062 - modifier == I915_FORMAT_MOD_Y_TILED_CCS || 2063 - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; 2064 - } 2065 - 2066 - static inline bool is_gen12_ccs_modifier(u64 modifier) 2067 - { 2068 - return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 2069 - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || 2070 - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; 2071 2042 } 2072 2043 2073 2044 #endif /* __INTEL_DISPLAY_TYPES_H__ */
+2
drivers/gpu/drm/i915/display/intel_dmc.h
··· 20 20 DMC_FW_MAIN = 0, 21 21 DMC_FW_PIPEA, 22 22 DMC_FW_PIPEB, 23 + DMC_FW_PIPEC, 24 + DMC_FW_PIPED, 23 25 DMC_FW_MAX 24 26 }; 25 27
+125 -36
drivers/gpu/drm/i915/display/intel_dp.c
··· 127 127 } 128 128 129 129 /* update sink rates from dpcd */ 130 - static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 130 + static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) 131 131 { 132 132 static const int dp_rates[] = { 133 133 162000, 270000, 540000, 810000 ··· 197 197 intel_dp->num_sink_rates = i; 198 198 } 199 199 200 + static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 201 + { 202 + struct intel_connector *connector = intel_dp->attached_connector; 203 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 204 + struct intel_encoder *encoder = &intel_dig_port->base; 205 + 206 + intel_dp_set_dpcd_sink_rates(intel_dp); 207 + 208 + if (intel_dp->num_sink_rates) 209 + return; 210 + 211 + drm_err(&dp_to_i915(intel_dp)->drm, 212 + "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n", 213 + connector->base.base.id, connector->base.name, 214 + encoder->base.base.id, encoder->base.name); 215 + 216 + intel_dp_set_default_sink_rates(intel_dp); 217 + } 218 + 219 + static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp) 220 + { 221 + intel_dp->max_sink_lane_count = 1; 222 + } 223 + 224 + static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp) 225 + { 226 + struct intel_connector *connector = intel_dp->attached_connector; 227 + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 228 + struct intel_encoder *encoder = &intel_dig_port->base; 229 + 230 + intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); 231 + 232 + switch (intel_dp->max_sink_lane_count) { 233 + case 1: 234 + case 2: 235 + case 4: 236 + return; 237 + } 238 + 239 + drm_err(&dp_to_i915(intel_dp)->drm, 240 + "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n", 241 + connector->base.base.id, connector->base.name, 242 + encoder->base.base.id, encoder->base.name, 243 + intel_dp->max_sink_lane_count); 244 + 245 + intel_dp_set_default_max_sink_lane_count(intel_dp); 246 + } 247 + 200 248 /* Get length of rates array potentially limited by max_rate. */ 201 249 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 202 250 { ··· 267 219 intel_dp->num_common_rates, max_rate); 268 220 } 269 221 222 + static int intel_dp_common_rate(struct intel_dp *intel_dp, int index) 223 + { 224 + if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm, 225 + index < 0 || index >= intel_dp->num_common_rates)) 226 + return 162000; 227 + 228 + return intel_dp->common_rates[index]; 229 + } 230 + 270 231 /* Theoretical max between source and sink */ 271 232 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 272 233 { 273 - return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 234 + return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); 274 235 } 275 236 276 237 /* Theoretical max between source and sink */ ··· 287 230 { 288 231 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 289 232 int source_max = dig_port->max_lanes; 290 - int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 233 + int sink_max = intel_dp->max_sink_lane_count; 291 234 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 292 235 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 293 236 ··· 299 242 300 243 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 301 244 { 302 - return intel_dp->max_link_lane_count; 245 + switch (intel_dp->max_link_lane_count) { 246 + case 1: 247 + case 2: 248 + case 4: 249 + return intel_dp->max_link_lane_count; 250 + default: 251 + MISSING_CASE(intel_dp->max_link_lane_count); 252 + return 1; 253 + } 303 254 } 304 255 305 256 /* ··· 619 554 if (index > 0) { 620 555 if (intel_dp_is_edp(intel_dp) && 621 556 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 622 - intel_dp->common_rates[index - 1], 557 + intel_dp_common_rate(intel_dp, index - 1), 623 558 lane_count)) { 624 559 drm_dbg_kms(&i915->drm, 625 560 "Retrying Link training for eDP with same parameters\n"); 626 561 return 0; 627 562 } 628 - intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 563 + intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index - 1); 629 564 intel_dp->max_link_lane_count = lane_count; 630 565 } else if (lane_count > 1) { 631 566 if (intel_dp_is_edp(intel_dp) && ··· 1065 1000 int 1066 1001 intel_dp_max_link_rate(struct intel_dp *intel_dp) 1067 1002 { 1068 - struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1069 1003 int len; 1070 1004 1071 1005 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 1072 - if (drm_WARN_ON(&i915->drm, len <= 0)) 1073 - return 162000; 1074 1006 1075 - return intel_dp->common_rates[len - 1]; 1007 + return intel_dp_common_rate(intel_dp, len - 1); 1076 1008 } 1077 1009 1078 1010 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) ··· 1266 1204 output_bpp); 1267 1205 1268 1206 for (i = 0; i < intel_dp->num_common_rates; i++) { 1269 - link_rate = intel_dp->common_rates[i]; 1207 + link_rate = intel_dp_common_rate(intel_dp, i); 1270 1208 if (link_rate < limits->min_rate || 1271 1209 link_rate > limits->max_rate) 1272 1210 continue; ··· 1345 1283 else 1346 1284 vdsc_cfg->slice_height = 2; 1347 1285 1348 - ret = intel_dsc_compute_params(encoder, crtc_state); 1286 + ret = intel_dsc_compute_params(crtc_state); 1349 1287 if (ret) 1350 1288 return ret; 1351 1289 ··· 1514 1452 &pipe_config->hw.adjusted_mode; 1515 1453 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1516 1454 struct link_config_limits limits; 1517 - int common_len; 1518 1455 int ret; 1519 1456 1520 - common_len = intel_dp_common_len_rate_limit(intel_dp, 1521 - intel_dp->max_link_rate); 1522 - 1523 - /* No common link rates between source and sink */ 1524 - drm_WARN_ON(encoder->base.dev, common_len <= 0); 1525 - 1526 - limits.min_rate = intel_dp->common_rates[0]; 1527 - limits.max_rate = intel_dp->common_rates[common_len - 1]; 1457 + limits.min_rate = intel_dp_common_rate(intel_dp, 0); 1458 + limits.max_rate = intel_dp_max_link_rate(intel_dp); 1528 1459 1529 1460 limits.min_lane_count = 1; 1530 1461 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); ··· 2198 2143 return max_frl_rate; 2199 2144 } 2200 2145 2146 + static bool 2147 + intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp, 2148 + u8 max_frl_bw_mask, u8 *frl_trained_mask) 2149 + { 2150 + if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) && 2151 + drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL && 2152 + *frl_trained_mask >= max_frl_bw_mask) 2153 + return true; 2154 + 2155 + return false; 2156 + } 2157 + 2201 2158 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 2202 2159 { 2203 2160 #define TIMEOUT_FRL_READY_MS 500 ··· 2219 2152 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 2220 2153 u8 max_frl_bw_mask = 0, frl_trained_mask; 2221 2154 bool is_active; 2222 - 2223 - ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2224 - if (ret < 0) 2225 - return ret; 2226 2155 2227 2156 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 2228 2157 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); ··· 2231 2168 if (max_frl_bw <= 0) 2232 2169 return -EINVAL; 2233 2170 2171 + max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 2172 + drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask); 2173 + 2174 + if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask)) 2175 + goto frl_trained; 2176 + 2234 2177 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 2235 2178 if (ret < 0) 2236 2179 return ret; ··· 2246 2177 if (!is_active) 2247 2178 return -ETIMEDOUT; 2248 2179 2249 - max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 2250 2180 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, 2251 2181 DP_PCON_ENABLE_SEQUENTIAL_LINK); 2252 2182 if (ret < 0) ··· 2261 2193 * Wait for FRL to be completed 2262 2194 * Check if the HDMI Link is up and active. 2263 2195 */ 2264 - wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS); 2196 + wait_for(is_active = 2197 + intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask), 2198 + TIMEOUT_HDMI_LINK_ACTIVE_MS); 2265 2199 2266 2200 if (!is_active) 2267 2201 return -ETIMEDOUT; 2268 2202 2269 - /* Verify HDMI Link configuration shows FRL Mode */ 2270 - if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) != 2271 - DP_PCON_HDMI_MODE_FRL) { 2272 - drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n"); 2273 - return -EINVAL; 2274 - } 2275 - drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask); 2276 - 2203 + frl_trained: 2204 + drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask); 2277 2205 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 2278 2206 intel_dp->frl.is_trained = true; 2279 2207 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); ··· 2285 2221 return true; 2286 2222 2287 2223 return false; 2224 + } 2225 + 2226 + static 2227 + int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp) 2228 + { 2229 + int ret; 2230 + u8 buf = 0; 2231 + 2232 + /* Set PCON source control mode */ 2233 + buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE; 2234 + 2235 + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); 2236 + if (ret < 0) 2237 + return ret; 2238 + 2239 + /* Set HDMI LINK ENABLE */ 2240 + buf |= DP_PCON_ENABLE_HDMI_LINK; 2241 + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); 2242 + if (ret < 0) 2243 + return ret; 2244 + 2245 + return 0; 2288 2246 } 2289 2247 2290 2248 void intel_dp_check_frl_training(struct intel_dp *intel_dp) ··· 2327 2241 int ret, mode; 2328 2242 2329 2243 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); 2330 - ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2244 + ret = intel_dp_pcon_set_tmds_mode(intel_dp); 2331 2245 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 2332 2246 2333 2247 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) ··· 2689 2603 intel_dp->use_rate_select = true; 2690 2604 else 2691 2605 intel_dp_set_sink_rates(intel_dp); 2606 + intel_dp_set_max_sink_lane_count(intel_dp); 2692 2607 2693 2608 intel_dp_set_common_rates(intel_dp); 2694 2609 intel_dp_reset_max_link_params(intel_dp); ··· 2735 2648 drm_dp_is_branch(intel_dp->dpcd)); 2736 2649 2737 2650 intel_dp_set_sink_rates(intel_dp); 2651 + intel_dp_set_max_sink_lane_count(intel_dp); 2738 2652 intel_dp_set_common_rates(intel_dp); 2739 2653 } 2740 2654 ··· 5102 5014 5103 5015 intel_dp_set_source_rates(intel_dp); 5104 5016 intel_dp_set_default_sink_rates(intel_dp); 5017 + intel_dp_set_default_max_sink_lane_count(intel_dp); 5105 5018 intel_dp_set_common_rates(intel_dp); 5106 5019 intel_dp_reset_max_link_params(intel_dp); 5107 5020
+11 -21
drivers/gpu/drm/i915/display/intel_dp_mst.c
··· 231 231 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 232 232 struct drm_connector_list_iter connector_list_iter; 233 233 struct intel_connector *connector_iter; 234 + int ret = 0; 234 235 235 236 if (DISPLAY_VER(dev_priv) < 12) 236 237 return 0; ··· 244 243 struct intel_digital_connector_state *conn_iter_state; 245 244 struct intel_crtc_state *crtc_state; 246 245 struct intel_crtc *crtc; 247 - int ret; 248 246 249 247 if (connector_iter->mst_port != connector->mst_port || 250 248 connector_iter == connector) ··· 252 252 conn_iter_state = intel_atomic_get_digital_connector_state(state, 253 253 connector_iter); 254 254 if (IS_ERR(conn_iter_state)) { 255 - drm_connector_list_iter_end(&connector_list_iter); 256 - return PTR_ERR(conn_iter_state); 255 + ret = PTR_ERR(conn_iter_state); 256 + break; 257 257 } 258 258 259 259 if (!conn_iter_state->base.crtc) ··· 262 262 crtc = to_intel_crtc(conn_iter_state->base.crtc); 263 263 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 264 264 if (IS_ERR(crtc_state)) { 265 - drm_connector_list_iter_end(&connector_list_iter); 266 - return PTR_ERR(crtc_state); 265 + ret = PTR_ERR(crtc_state); 266 + break; 267 267 } 268 268 269 269 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 270 - if (ret) { 271 - drm_connector_list_iter_end(&connector_list_iter); 272 - return ret; 273 - } 270 + if (ret) 271 + break; 274 272 crtc_state->uapi.mode_changed = true; 275 273 } 276 274 drm_connector_list_iter_end(&connector_list_iter); 277 275 278 - return 0; 276 + return ret; 279 277 } 280 278 281 279 static int ··· 346 348 drm_dp_check_act_status(&intel_dp->mst_mgr); 347 349 } 348 350 349 - static void intel_mst_pre_disable_dp(struct intel_atomic_state *state, 350 - struct intel_encoder *encoder, 351 - const struct intel_crtc_state *old_crtc_state, 352 - const struct drm_connector_state *old_conn_state) 353 - { 354 - if (old_crtc_state->has_audio) 355 - intel_audio_codec_disable(encoder, old_crtc_state, 356 - old_conn_state); 357 - } 358 - 359 351 static void intel_mst_disable_dp(struct intel_atomic_state *state, 360 352 struct intel_encoder *encoder, 361 353 const struct intel_crtc_state *old_crtc_state, ··· 370 382 if (ret) { 371 383 drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret); 372 384 } 385 + if (old_crtc_state->has_audio) 386 + intel_audio_codec_disable(encoder, 387 + old_crtc_state, old_conn_state); 373 388 } 374 389 375 390 static void intel_mst_post_disable_dp(struct intel_atomic_state *state, ··· 907 916 908 917 intel_encoder->compute_config = intel_dp_mst_compute_config; 909 918 intel_encoder->compute_config_late = intel_dp_mst_compute_config_late; 910 - intel_encoder->pre_disable = intel_mst_pre_disable_dp; 911 919 intel_encoder->disable = intel_mst_disable_dp; 912 920 intel_encoder->post_disable = intel_mst_post_disable_dp; 913 921 intel_encoder->update_pipe = intel_ddi_update_pipe;
+4 -3
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
··· 26 26 #include "intel_dpio_phy.h" 27 27 #include "intel_dpll.h" 28 28 #include "intel_dpll_mgr.h" 29 + #include "intel_pch_refclk.h" 29 30 #include "intel_tc.h" 30 31 31 32 /** ··· 3741 3740 * domain. 3742 3741 */ 3743 3742 pll->wakeref = intel_display_power_get(dev_priv, 3744 - POWER_DOMAIN_DPLL_DC_OFF); 3743 + POWER_DOMAIN_DC_OFF); 3745 3744 } 3746 3745 3747 3746 icl_pll_power_enable(dev_priv, pll, enable_reg); ··· 3848 3847 3849 3848 if (IS_JSL_EHL(dev_priv) && 3850 3849 pll->info->id == DPLL_ID_EHL_DPLL4) 3851 - intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF, 3850 + intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, 3852 3851 pll->wakeref); 3853 3852 } 3854 3853 ··· 4232 4231 if (IS_JSL_EHL(i915) && pll->on && 4233 4232 pll->info->id == DPLL_ID_EHL_DPLL4) { 4234 4233 pll->wakeref = intel_display_power_get(i915, 4235 - POWER_DOMAIN_DPLL_DC_OFF); 4234 + POWER_DOMAIN_DC_OFF); 4236 4235 } 4237 4236 4238 4237 pll->state.pipe_mask = 0;
+1 -1
drivers/gpu/drm/i915/display/intel_dpll_mgr.h
··· 27 27 28 28 #include <linux/types.h> 29 29 30 - #include "intel_display.h" 31 30 #include "intel_wakeref.h" 32 31 33 32 /*FIXME: Move this to a more appropriate place. */ ··· 36 37 (void) (&__a == &__b); \ 37 38 __a > __b ? (__a - __b) : (__b - __a); }) 38 39 40 + enum tc_port; 39 41 struct drm_device; 40 42 struct drm_i915_private; 41 43 struct intel_atomic_state;
+58
drivers/gpu/drm/i915/display/intel_dpt.c
··· 167 167 i915_vma_put(dpt->vma); 168 168 } 169 169 170 + /** 171 + * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume 172 + * @i915: device instance 173 + * 174 + * Restore the memory mapping during system resume for all framebuffers which 175 + * are mapped to HW via a GGTT->DPT page table. The content of these page 176 + * tables are not stored in the hibernation image during S4 and S3RST->S4 177 + * transitions, so here we reprogram the PTE entries in those tables. 178 + * 179 + * This function must be called after the mappings in GGTT have been restored calling 180 + * i915_ggtt_resume(). 181 + */ 182 + void intel_dpt_resume(struct drm_i915_private *i915) 183 + { 184 + struct drm_framebuffer *drm_fb; 185 + 186 + if (!HAS_DISPLAY(i915)) 187 + return; 188 + 189 + mutex_lock(&i915->drm.mode_config.fb_lock); 190 + drm_for_each_fb(drm_fb, &i915->drm) { 191 + struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 192 + 193 + if (fb->dpt_vm) 194 + i915_ggtt_resume_vm(fb->dpt_vm); 195 + } 196 + mutex_unlock(&i915->drm.mode_config.fb_lock); 197 + } 198 + 199 + /** 200 + * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend 201 + * @i915: device instance 202 + * 203 + * Suspend the memory mapping during system suspend for all framebuffers which 204 + * are mapped to HW via a GGTT->DPT page table. 205 + * 206 + * This function must be called before the mappings in GGTT are suspended calling 207 + * i915_ggtt_suspend(). 208 + */ 209 + void intel_dpt_suspend(struct drm_i915_private *i915) 210 + { 211 + struct drm_framebuffer *drm_fb; 212 + 213 + if (!HAS_DISPLAY(i915)) 214 + return; 215 + 216 + mutex_lock(&i915->drm.mode_config.fb_lock); 217 + 218 + drm_for_each_fb(drm_fb, &i915->drm) { 219 + struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 220 + 221 + if (fb->dpt_vm) 222 + i915_ggtt_suspend_vm(fb->dpt_vm); 223 + } 224 + 225 + mutex_unlock(&i915->drm.mode_config.fb_lock); 226 + } 227 + 170 228 struct i915_address_space * 171 229 intel_dpt_create(struct intel_framebuffer *fb) 172 230 {
+4
drivers/gpu/drm/i915/display/intel_dpt.h
··· 6 6 #ifndef __INTEL_DPT_H__ 7 7 #define __INTEL_DPT_H__ 8 8 9 + struct drm_i915_private; 10 + 9 11 struct i915_address_space; 10 12 struct i915_vma; 11 13 struct intel_framebuffer; ··· 15 13 void intel_dpt_destroy(struct i915_address_space *vm); 16 14 struct i915_vma *intel_dpt_pin(struct i915_address_space *vm); 17 15 void intel_dpt_unpin(struct i915_address_space *vm); 16 + void intel_dpt_suspend(struct drm_i915_private *i915); 17 + void intel_dpt_resume(struct drm_i915_private *i915); 18 18 struct i915_address_space * 19 19 intel_dpt_create(struct intel_framebuffer *fb); 20 20
+2 -2
drivers/gpu/drm/i915/display/intel_dsb.c
··· 100 100 u32 reg_val; 101 101 102 102 if (!dsb) { 103 - intel_de_write(dev_priv, reg, val); 103 + intel_de_write_fw(dev_priv, reg, val); 104 104 return; 105 105 } 106 106 buf = dsb->cmd_buf; ··· 177 177 178 178 dsb = crtc_state->dsb; 179 179 if (!dsb) { 180 - intel_de_write(dev_priv, reg, val); 180 + intel_de_write_fw(dev_priv, reg, val); 181 181 return; 182 182 } 183 183
-42
drivers/gpu/drm/i915/display/intel_dsi.h
··· 166 166 return enc_to_intel_dsi(encoder)->ports; 167 167 } 168 168 169 - /* icl_dsi.c */ 170 - void icl_dsi_init(struct drm_i915_private *dev_priv); 171 - void icl_dsi_frame_update(struct intel_crtc_state *crtc_state); 172 - 173 - /* intel_dsi.c */ 174 169 int intel_dsi_bitrate(const struct intel_dsi *intel_dsi); 175 170 int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi); 176 171 enum drm_panel_orientation 177 172 intel_dsi_get_panel_orientation(struct intel_connector *connector); 178 - 179 - /* vlv_dsi.c */ 180 - void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); 181 - enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); 182 173 int intel_dsi_get_modes(struct drm_connector *connector); 183 174 enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, 184 175 struct drm_display_mode *mode); 185 176 struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, 186 177 const struct mipi_dsi_host_ops *funcs, 187 178 enum port port); 188 - void vlv_dsi_init(struct drm_i915_private *dev_priv); 189 - 190 - /* vlv_dsi_pll.c */ 191 - int vlv_dsi_pll_compute(struct intel_encoder *encoder, 192 - struct intel_crtc_state *config); 193 - void vlv_dsi_pll_enable(struct intel_encoder *encoder, 194 - const struct intel_crtc_state *config); 195 - void vlv_dsi_pll_disable(struct intel_encoder *encoder); 196 - u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, 197 - struct intel_crtc_state *config); 198 - void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); 199 - 200 - bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); 201 - int bxt_dsi_pll_compute(struct intel_encoder *encoder, 202 - struct intel_crtc_state *config); 203 - void bxt_dsi_pll_enable(struct intel_encoder *encoder, 204 - const struct intel_crtc_state *config); 205 - void bxt_dsi_pll_disable(struct intel_encoder *encoder); 206 - u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, 207 - struct intel_crtc_state *config); 208 - void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); 209 - 210 - void assert_dsi_pll_enabled(struct drm_i915_private *i915); 211 - void assert_dsi_pll_disabled(struct drm_i915_private *i915); 212 - 213 - /* intel_dsi_vbt.c */ 214 - bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); 215 - void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on); 216 - void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi); 217 - void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, 218 - enum mipi_seq seq_id); 219 - void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); 220 - void intel_dsi_log_params(struct intel_dsi *intel_dsi); 221 179 222 180 #endif /* _INTEL_DSI_H */
+4
drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
··· 71 71 u8 data[2] = {}; 72 72 enum port port; 73 73 size_t len = panel->backlight.max > U8_MAX ? 2 : 1; 74 + unsigned long mode_flags; 74 75 75 76 if (len == 1) { 76 77 data[0] = level; ··· 82 81 83 82 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { 84 83 dsi_device = intel_dsi->dsi_hosts[port]->device; 84 + mode_flags = dsi_device->mode_flags; 85 + dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM; 85 86 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 86 87 &data, len); 88 + dsi_device->mode_flags = mode_flags; 87 89 } 88 90 } 89 91
+2
drivers/gpu/drm/i915/display/intel_dsi_vbt.c
··· 41 41 #include "i915_drv.h" 42 42 #include "intel_display_types.h" 43 43 #include "intel_dsi.h" 44 + #include "intel_dsi_vbt.h" 45 + #include "vlv_dsi.h" 44 46 #include "vlv_sideband.h" 45 47 46 48 #define MIPI_TRANSFER_MODE_SHIFT 0
+22
drivers/gpu/drm/i915/display/intel_dsi_vbt.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_DSI_VBT_H__ 7 + #define __INTEL_DSI_VBT_H__ 8 + 9 + #include <linux/types.h> 10 + 11 + enum mipi_seq; 12 + struct intel_dsi; 13 + 14 + bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); 15 + void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on); 16 + void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi); 17 + void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, 18 + enum mipi_seq seq_id); 19 + void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); 20 + void intel_dsi_log_params(struct intel_dsi *intel_dsi); 21 + 22 + #endif /* __INTEL_DSI_VBT_H__ */
+593 -113
drivers/gpu/drm/i915/display/intel_fb.c
··· 13 13 14 14 #define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a)) 15 15 16 - bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) 16 + /* 17 + * From the Sky Lake PRM: 18 + * "The Color Control Surface (CCS) contains the compression status of 19 + * the cache-line pairs. The compression state of the cache-line pair 20 + * is specified by 2 bits in the CCS. Each CCS cache-line represents 21 + * an area on the main surface of 16 x16 sets of 128 byte Y-tiled 22 + * cache-line-pairs. CCS is always Y tiled." 23 + * 24 + * Since cache line pairs refers to horizontally adjacent cache lines, 25 + * each cache line in the CCS corresponds to an area of 32x16 cache 26 + * lines on the main surface. Since each pixel is 4 bytes, this gives 27 + * us a ratio of one byte in the CCS for each 8x16 pixels in the 28 + * main surface. 29 + */ 30 + static const struct drm_format_info skl_ccs_formats[] = { 31 + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 32 + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 33 + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 34 + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 35 + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 36 + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 37 + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 38 + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, 39 + }; 40 + 41 + /* 42 + * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the 43 + * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles 44 + * in the main surface. With 4 byte pixels and each Y-tile having dimensions of 45 + * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in 46 + * the main surface. 47 + */ 48 + static const struct drm_format_info gen12_ccs_formats[] = { 49 + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, 50 + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 51 + .hsub = 1, .vsub = 1, }, 52 + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, 53 + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 54 + .hsub = 1, .vsub = 1, }, 55 + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, 56 + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 57 + .hsub = 1, .vsub = 1, .has_alpha = true }, 58 + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, 59 + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 60 + .hsub = 1, .vsub = 1, .has_alpha = true }, 61 + { .format = DRM_FORMAT_YUYV, .num_planes = 2, 62 + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 63 + .hsub = 2, .vsub = 1, .is_yuv = true }, 64 + { .format = DRM_FORMAT_YVYU, .num_planes = 2, 65 + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 66 + .hsub = 2, .vsub = 1, .is_yuv = true }, 67 + { .format = DRM_FORMAT_UYVY, .num_planes = 2, 68 + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 69 + .hsub = 2, .vsub = 1, .is_yuv = true }, 70 + { .format = DRM_FORMAT_VYUY, .num_planes = 2, 71 + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 72 + .hsub = 2, .vsub = 1, .is_yuv = true }, 73 + { .format = DRM_FORMAT_XYUV8888, .num_planes = 2, 74 + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, 75 + .hsub = 1, .vsub = 1, .is_yuv = true }, 76 + { .format = DRM_FORMAT_NV12, .num_planes = 4, 77 + .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, 78 + .hsub = 2, .vsub = 2, .is_yuv = true }, 79 + { .format = DRM_FORMAT_P010, .num_planes = 4, 80 + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 81 + .hsub = 2, .vsub = 2, .is_yuv = true }, 82 + { .format = DRM_FORMAT_P012, .num_planes = 4, 83 + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 84 + .hsub = 2, .vsub = 2, .is_yuv = true }, 85 + { .format = DRM_FORMAT_P016, .num_planes = 4, 86 + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, 87 + .hsub = 2, .vsub = 2, .is_yuv = true }, 88 + }; 89 + 90 + /* 91 + * Same as gen12_ccs_formats[] above, but with additional surface used 92 + * to pass Clear Color information in plane 2 with 64 bits of data. 93 + */ 94 + static const struct drm_format_info gen12_ccs_cc_formats[] = { 95 + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, 96 + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, 97 + .hsub = 1, .vsub = 1, }, 98 + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, 99 + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, 100 + .hsub = 1, .vsub = 1, }, 101 + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, 102 + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, 103 + .hsub = 1, .vsub = 1, .has_alpha = true }, 104 + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, 105 + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, 106 + .hsub = 1, .vsub = 1, .has_alpha = true }, 107 + }; 108 + 109 + struct intel_modifier_desc { 110 + u64 modifier; 111 + struct { 112 + u8 from; 113 + u8 until; 114 + } display_ver; 115 + #define DISPLAY_VER_ALL { 0, -1 } 116 + 117 + const struct drm_format_info *formats; 118 + int format_count; 119 + #define FORMAT_OVERRIDE(format_list) \ 120 + .formats = format_list, \ 121 + .format_count = ARRAY_SIZE(format_list) 122 + 123 + u8 plane_caps; 124 + 125 + struct { 126 + u8 cc_planes:3; 127 + u8 packed_aux_planes:4; 128 + u8 planar_aux_planes:4; 129 + } ccs; 130 + }; 131 + 132 + #define INTEL_PLANE_CAP_CCS_MASK (INTEL_PLANE_CAP_CCS_RC | \ 133 + INTEL_PLANE_CAP_CCS_RC_CC | \ 134 + INTEL_PLANE_CAP_CCS_MC) 135 + #define INTEL_PLANE_CAP_TILING_MASK (INTEL_PLANE_CAP_TILING_X | \ 136 + INTEL_PLANE_CAP_TILING_Y | \ 137 + INTEL_PLANE_CAP_TILING_Yf) 138 + #define INTEL_PLANE_CAP_TILING_NONE 0 139 + 140 + static const struct intel_modifier_desc intel_modifiers[] = { 141 + { 142 + .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, 143 + .display_ver = { 12, 13 }, 144 + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_MC, 145 + 146 + .ccs.packed_aux_planes = BIT(1), 147 + .ccs.planar_aux_planes = BIT(2) | BIT(3), 148 + 149 + FORMAT_OVERRIDE(gen12_ccs_formats), 150 + }, { 151 + .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, 152 + .display_ver = { 12, 13 }, 153 + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC, 154 + 155 + .ccs.packed_aux_planes = BIT(1), 156 + 157 + FORMAT_OVERRIDE(gen12_ccs_formats), 158 + }, { 159 + .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, 160 + .display_ver = { 12, 13 }, 161 + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC_CC, 162 + 163 + .ccs.cc_planes = BIT(2), 164 + .ccs.packed_aux_planes = BIT(1), 165 + 166 + FORMAT_OVERRIDE(gen12_ccs_cc_formats), 167 + }, { 168 + .modifier = I915_FORMAT_MOD_Yf_TILED_CCS, 169 + .display_ver = { 9, 11 }, 170 + .plane_caps = INTEL_PLANE_CAP_TILING_Yf | INTEL_PLANE_CAP_CCS_RC, 171 + 172 + .ccs.packed_aux_planes = BIT(1), 173 + 174 + FORMAT_OVERRIDE(skl_ccs_formats), 175 + }, { 176 + .modifier = I915_FORMAT_MOD_Y_TILED_CCS, 177 + .display_ver = { 9, 11 }, 178 + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC, 179 + 180 + .ccs.packed_aux_planes = BIT(1), 181 + 182 + FORMAT_OVERRIDE(skl_ccs_formats), 183 + }, { 184 + .modifier = I915_FORMAT_MOD_Yf_TILED, 185 + .display_ver = { 9, 11 }, 186 + .plane_caps = INTEL_PLANE_CAP_TILING_Yf, 187 + }, { 188 + .modifier = I915_FORMAT_MOD_Y_TILED, 189 + .display_ver = { 9, 13 }, 190 + .plane_caps = INTEL_PLANE_CAP_TILING_Y, 191 + }, { 192 + .modifier = I915_FORMAT_MOD_X_TILED, 193 + .display_ver = DISPLAY_VER_ALL, 194 + .plane_caps = INTEL_PLANE_CAP_TILING_X, 195 + }, { 196 + .modifier = DRM_FORMAT_MOD_LINEAR, 197 + .display_ver = DISPLAY_VER_ALL, 198 + }, 199 + }; 200 + 201 + static const struct intel_modifier_desc *lookup_modifier_or_null(u64 modifier) 17 202 { 18 - if (!is_ccs_modifier(fb->modifier)) 203 + int i; 204 + 205 + for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) 206 + if (intel_modifiers[i].modifier == modifier) 207 + return &intel_modifiers[i]; 208 + 209 + return NULL; 210 + } 211 + 212 + static const struct intel_modifier_desc *lookup_modifier(u64 modifier) 213 + { 214 + const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier); 215 + 216 + if (WARN_ON(!md)) 217 + return &intel_modifiers[0]; 218 + 219 + return md; 220 + } 221 + 222 + static const struct drm_format_info * 223 + lookup_format_info(const struct drm_format_info formats[], 224 + int num_formats, u32 format) 225 + { 226 + int i; 227 + 228 + for (i = 0; i < num_formats; i++) { 229 + if (formats[i].format == format) 230 + return &formats[i]; 231 + } 232 + 233 + return NULL; 234 + } 235 + 236 + /** 237 + * intel_fb_get_format_info: Get a modifier specific format information 238 + * @cmd: FB add command structure 239 + * 240 + * Returns: 241 + * Returns the format information for @cmd->pixel_format specific to @cmd->modifier[0], 242 + * or %NULL if the modifier doesn't override the format. 243 + */ 244 + const struct drm_format_info * 245 + intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 246 + { 247 + const struct intel_modifier_desc *md = lookup_modifier_or_null(cmd->modifier[0]); 248 + 249 + if (!md || !md->formats) 250 + return NULL; 251 + 252 + return lookup_format_info(md->formats, md->format_count, cmd->pixel_format); 253 + } 254 + 255 + static bool plane_caps_contain_any(u8 caps, u8 mask) 256 + { 257 + return caps & mask; 258 + } 259 + 260 + static bool plane_caps_contain_all(u8 caps, u8 mask) 261 + { 262 + return (caps & mask) == mask; 263 + } 264 + 265 + /** 266 + * intel_fb_is_ccs_modifier: Check if a modifier is a CCS modifier type 267 + * @modifier: Modifier to check 268 + * 269 + * Returns: 270 + * Returns %true if @modifier is a render, render with color clear or 271 + * media compression modifier. 272 + */ 273 + bool intel_fb_is_ccs_modifier(u64 modifier) 274 + { 275 + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, 276 + INTEL_PLANE_CAP_CCS_MASK); 277 + } 278 + 279 + /** 280 + * intel_fb_is_rc_ccs_cc_modifier: Check if a modifier is an RC CCS CC modifier type 281 + * @modifier: Modifier to check 282 + * 283 + * Returns: 284 + * Returns %true if @modifier is a render with color clear modifier. 285 + */ 286 + bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier) 287 + { 288 + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, 289 + INTEL_PLANE_CAP_CCS_RC_CC); 290 + } 291 + 292 + /** 293 + * intel_fb_is_mc_ccs_modifier: Check if a modifier is an MC CCS modifier type 294 + * @modifier: Modifier to check 295 + * 296 + * Returns: 297 + * Returns %true if @modifier is a media compression modifier. 298 + */ 299 + bool intel_fb_is_mc_ccs_modifier(u64 modifier) 300 + { 301 + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, 302 + INTEL_PLANE_CAP_CCS_MC); 303 + } 304 + 305 + static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md, 306 + u8 display_ver_from, u8 display_ver_until) 307 + { 308 + return md->display_ver.from <= display_ver_until && 309 + display_ver_from <= md->display_ver.until; 310 + } 311 + 312 + static bool plane_has_modifier(struct drm_i915_private *i915, 313 + u8 plane_caps, 314 + const struct intel_modifier_desc *md) 315 + { 316 + if (!IS_DISPLAY_VER(i915, md->display_ver.from, md->display_ver.until)) 19 317 return false; 20 318 21 - return plane >= fb->format->num_planes / 2; 319 + if (!plane_caps_contain_all(plane_caps, md->plane_caps)) 320 + return false; 321 + 322 + return true; 22 323 } 23 324 24 - bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) 325 + /** 326 + * intel_fb_plane_get_modifiers: Get the modifiers for the given platform and plane capabilities 327 + * @i915: i915 device instance 328 + * @plane_caps: capabilities for the plane the modifiers are queried for 329 + * 330 + * Returns: 331 + * Returns the list of modifiers allowed by the @i915 platform and @plane_caps. 332 + * The caller must free the returned buffer. 333 + */ 334 + u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915, 335 + u8 plane_caps) 25 336 { 26 - return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane); 337 + u64 *list, *p; 338 + int count = 1; /* +1 for invalid modifier terminator */ 339 + int i; 340 + 341 + for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) { 342 + if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i])) 343 + count++; 344 + } 345 + 346 + list = kmalloc_array(count, sizeof(*list), GFP_KERNEL); 347 + if (drm_WARN_ON(&i915->drm, !list)) 348 + return NULL; 349 + 350 + p = list; 351 + for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) { 352 + if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i])) 353 + *p++ = intel_modifiers[i].modifier; 354 + } 355 + *p++ = DRM_FORMAT_MOD_INVALID; 356 + 357 + return list; 27 358 } 28 359 29 - bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane) 360 + /** 361 + * intel_fb_plane_supports_modifier: Determine if a modifier is supported by the given plane 362 + * @plane: Plane to check the modifier support for 363 + * @modifier: The modifier to check the support for 364 + * 365 + * Returns: 366 + * %true if the @modifier is supported on @plane. 367 + */ 368 + bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier) 30 369 { 31 - return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC && 32 - plane == 2; 370 + int i; 371 + 372 + for (i = 0; i < plane->base.modifier_count; i++) 373 + if (plane->base.modifiers[i] == modifier) 374 + return true; 375 + 376 + return false; 33 377 } 34 378 35 - bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane) 379 + static bool format_is_yuv_semiplanar(const struct intel_modifier_desc *md, 380 + const struct drm_format_info *info) 381 + { 382 + int yuv_planes; 383 + 384 + if (!info->is_yuv) 385 + return false; 386 + 387 + if (plane_caps_contain_any(md->plane_caps, INTEL_PLANE_CAP_CCS_MASK)) 388 + yuv_planes = 4; 389 + else 390 + yuv_planes = 2; 391 + 392 + return info->num_planes == yuv_planes; 393 + } 394 + 395 + /** 396 + * intel_format_info_is_yuv_semiplanar: Check if the given format is YUV semiplanar 397 + * @info: format to check 398 + * @modifier: modifier used with the format 399 + * 400 + * Returns: 401 + * %true if @info / @modifier is YUV semiplanar. 402 + */ 403 + bool intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, 404 + u64 modifier) 405 + { 406 + return format_is_yuv_semiplanar(lookup_modifier(modifier), info); 407 + } 408 + 409 + static u8 ccs_aux_plane_mask(const struct intel_modifier_desc *md, 410 + const struct drm_format_info *format) 411 + { 412 + if (format_is_yuv_semiplanar(md, format)) 413 + return md->ccs.planar_aux_planes; 414 + else 415 + return md->ccs.packed_aux_planes; 416 + } 417 + 418 + /** 419 + * intel_fb_is_ccs_aux_plane: Check if a framebuffer color plane is a CCS AUX plane 420 + * @fb: Framebuffer 421 + * @color_plane: color plane index to check 422 + * 423 + * Returns: 424 + * Returns %true if @fb's color plane at index @color_plane is a CCS AUX plane. 425 + */ 426 + bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane) 427 + { 428 + const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); 429 + 430 + return ccs_aux_plane_mask(md, fb->format) & BIT(color_plane); 431 + } 432 + 433 + /** 434 + * intel_fb_is_gen12_ccs_aux_plane: Check if a framebuffer color plane is a GEN12 CCS AUX plane 435 + * @fb: Framebuffer 436 + * @color_plane: color plane index to check 437 + * 438 + * Returns: 439 + * Returns %true if @fb's color plane at index @color_plane is a GEN12 CCS AUX plane. 440 + */ 441 + static bool intel_fb_is_gen12_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane) 442 + { 443 + const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); 444 + 445 + return check_modifier_display_ver_range(md, 12, 13) && 446 + ccs_aux_plane_mask(md, fb->format) & BIT(color_plane); 447 + } 448 + 449 + /** 450 + * intel_fb_rc_ccs_cc_plane: Get the CCS CC color plane index for a framebuffer 451 + * @fb: Framebuffer 452 + * 453 + * Returns: 454 + * Returns the index of the color clear plane for @fb, or -1 if @fb is not a 455 + * framebuffer using a render compression/color clear modifier. 456 + */ 457 + int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb) 458 + { 459 + const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); 460 + 461 + if (!md->ccs.cc_planes) 462 + return -1; 463 + 464 + drm_WARN_ON_ONCE(fb->dev, hweight8(md->ccs.cc_planes) > 1); 465 + 466 + return ilog2((int)md->ccs.cc_planes); 467 + } 468 + 469 + static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int color_plane) 470 + { 471 + return intel_fb_rc_ccs_cc_plane(fb) == color_plane; 472 + } 473 + 474 + static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane) 36 475 { 37 476 return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && 38 477 color_plane == 1; ··· 480 41 bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane) 481 42 { 482 43 return fb->modifier == DRM_FORMAT_MOD_LINEAR || 483 - is_gen12_ccs_plane(fb, color_plane); 44 + intel_fb_is_gen12_ccs_aux_plane(fb, color_plane) || 45 + is_gen12_ccs_cc_plane(fb, color_plane); 484 46 } 485 47 486 48 int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) 487 49 { 488 - drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || 50 + drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) || 489 51 (main_plane && main_plane >= fb->format->num_planes / 2)); 490 52 491 53 return fb->format->num_planes / 2 + main_plane; ··· 494 54 495 55 int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) 496 56 { 497 - drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || 57 + drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) || 498 58 ccs_plane < fb->format->num_planes / 2); 499 59 500 60 if (is_gen12_ccs_cc_plane(fb, ccs_plane)) ··· 503 63 return ccs_plane - fb->format->num_planes / 2; 504 64 } 505 65 506 - static unsigned int gen12_aligned_scanout_stride(const struct intel_framebuffer *fb, 507 - int color_plane) 508 - { 509 - struct drm_i915_private *i915 = to_i915(fb->base.dev); 510 - unsigned int stride = fb->base.pitches[color_plane]; 511 - 512 - if (IS_ALDERLAKE_P(i915)) 513 - return roundup_pow_of_two(max(stride, 514 - 8u * intel_tile_width_bytes(&fb->base, color_plane))); 515 - 516 - return stride; 517 - } 518 - 519 66 static unsigned int gen12_ccs_aux_stride(struct intel_framebuffer *fb, int ccs_plane) 520 67 { 521 - struct drm_i915_private *i915 = to_i915(fb->base.dev); 522 68 int main_plane = skl_ccs_to_main_plane(&fb->base, ccs_plane); 523 69 unsigned int main_stride = fb->base.pitches[main_plane]; 524 70 unsigned int main_tile_width = intel_tile_width_bytes(&fb->base, main_plane); 525 - 526 - /* 527 - * On ADL-P the AUX stride must align with a power-of-two aligned main 528 - * surface stride. The stride of the allocated main surface object can 529 - * be less than this POT stride, which is then autopadded to the POT 530 - * size. 531 - */ 532 - if (IS_ALDERLAKE_P(i915)) 533 - main_stride = gen12_aligned_scanout_stride(fb, main_plane); 534 71 535 72 return DIV_ROUND_UP(main_stride, 4 * main_tile_width) * 64; 536 73 } ··· 516 99 { 517 100 struct drm_i915_private *i915 = to_i915(fb->dev); 518 101 519 - if (is_ccs_modifier(fb->modifier)) 102 + if (intel_fb_is_ccs_modifier(fb->modifier)) 520 103 return main_to_ccs_plane(fb, main_plane); 521 104 else if (DISPLAY_VER(i915) < 11 && 522 105 intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) ··· 545 128 else 546 129 return 512; 547 130 case I915_FORMAT_MOD_Y_TILED_CCS: 548 - if (is_ccs_plane(fb, color_plane)) 131 + if (intel_fb_is_ccs_aux_plane(fb, color_plane)) 549 132 return 128; 550 133 fallthrough; 551 134 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 552 135 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: 553 136 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 554 - if (is_ccs_plane(fb, color_plane)) 137 + if (intel_fb_is_ccs_aux_plane(fb, color_plane) || 138 + is_gen12_ccs_cc_plane(fb, color_plane)) 555 139 return 64; 556 140 fallthrough; 557 141 case I915_FORMAT_MOD_Y_TILED: ··· 561 143 else 562 144 return 512; 563 145 case I915_FORMAT_MOD_Yf_TILED_CCS: 564 - if (is_ccs_plane(fb, color_plane)) 146 + if (intel_fb_is_ccs_aux_plane(fb, color_plane)) 565 147 return 128; 566 148 fallthrough; 567 149 case I915_FORMAT_MOD_Yf_TILED: ··· 617 199 { 618 200 intel_tile_dims(fb, color_plane, tile_width, tile_height); 619 201 620 - if (is_gen12_ccs_plane(fb, color_plane)) 202 + if (intel_fb_is_gen12_ccs_aux_plane(fb, color_plane)) 621 203 *tile_height = 1; 622 204 } 623 205 ··· 641 223 642 224 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) 643 225 { 644 - switch (fb_modifier) { 645 - case I915_FORMAT_MOD_X_TILED: 646 - return I915_TILING_X; 647 - case I915_FORMAT_MOD_Y_TILED: 648 - case I915_FORMAT_MOD_Y_TILED_CCS: 649 - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 650 - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: 651 - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 226 + u8 tiling_caps = lookup_modifier(fb_modifier)->plane_caps & 227 + INTEL_PLANE_CAP_TILING_MASK; 228 + 229 + switch (tiling_caps) { 230 + case INTEL_PLANE_CAP_TILING_Y: 652 231 return I915_TILING_Y; 232 + case INTEL_PLANE_CAP_TILING_X: 233 + return I915_TILING_X; 234 + case INTEL_PLANE_CAP_TILING_Yf: 235 + case INTEL_PLANE_CAP_TILING_NONE: 236 + return I915_TILING_NONE; 653 237 default: 238 + MISSING_CASE(tiling_caps); 654 239 return I915_TILING_NONE; 655 240 } 656 241 } ··· 692 271 return 512 * 4096; 693 272 694 273 /* AUX_DIST needs only 4K alignment */ 695 - if (is_ccs_plane(fb, color_plane)) 274 + if (intel_fb_is_ccs_aux_plane(fb, color_plane)) 696 275 return 4096; 697 276 698 277 if (is_semiplanar_uv_plane(fb, color_plane)) { ··· 751 330 * TODO: Deduct the subsampling from the char block for all CCS 752 331 * formats and planes. 753 332 */ 754 - if (!is_gen12_ccs_plane(fb, color_plane)) { 333 + if (!intel_fb_is_gen12_ccs_aux_plane(fb, color_plane)) { 755 334 *hsub = fb->format->hsub; 756 335 *vsub = fb->format->vsub; 757 336 ··· 778 357 779 358 static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_plane, int *w, int *h) 780 359 { 781 - struct drm_i915_private *i915 = to_i915(fb->base.dev); 782 - int main_plane = is_ccs_plane(&fb->base, color_plane) ? 360 + int main_plane = intel_fb_is_ccs_aux_plane(&fb->base, color_plane) ? 783 361 skl_ccs_to_main_plane(&fb->base, color_plane) : 0; 784 362 unsigned int main_width = fb->base.width; 785 363 unsigned int main_height = fb->base.height; 786 364 int main_hsub, main_vsub; 787 365 int hsub, vsub; 788 - 789 - /* 790 - * On ADL-P the CCS AUX surface layout always aligns with the 791 - * power-of-two aligned main surface stride. The main surface 792 - * stride in the allocated FB object may not be power-of-two 793 - * sized, in which case it is auto-padded to the POT size. 794 - */ 795 - if (IS_ALDERLAKE_P(i915) && is_ccs_plane(&fb->base, color_plane)) 796 - main_width = gen12_aligned_scanout_stride(fb, 0) / 797 - fb->base.format->cpp[0]; 798 366 799 367 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane); 800 368 intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane); ··· 819 409 return new_offset; 820 410 } 821 411 412 + static u32 intel_adjust_linear_offset(int *x, int *y, 413 + unsigned int cpp, 414 + unsigned int pitch, 415 + u32 old_offset, 416 + u32 new_offset) 417 + { 418 + old_offset += *y * pitch + *x * cpp; 419 + 420 + *y = (old_offset - new_offset) / pitch; 421 + *x = ((old_offset - new_offset) - *y * pitch) / cpp; 422 + 423 + return new_offset; 424 + } 425 + 822 426 static u32 intel_adjust_aligned_offset(int *x, int *y, 823 427 const struct drm_framebuffer *fb, 824 428 int color_plane, ··· 863 439 tile_size, pitch_tiles, 864 440 old_offset, new_offset); 865 441 } else { 866 - old_offset += *y * pitch + *x * cpp; 867 - 868 - *y = (old_offset - new_offset) / pitch; 869 - *x = ((old_offset - new_offset) - *y * pitch) / cpp; 442 + intel_adjust_linear_offset(x, y, cpp, pitch, 443 + old_offset, new_offset); 870 444 } 871 445 872 446 return new_offset; ··· 881 459 { 882 460 return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane, 883 461 state->hw.rotation, 884 - state->view.color_plane[color_plane].stride, 462 + state->view.color_plane[color_plane].mapping_stride, 885 463 old_offset, new_offset); 886 464 } 887 465 ··· 962 540 struct drm_i915_private *i915 = to_i915(intel_plane->base.dev); 963 541 const struct drm_framebuffer *fb = state->hw.fb; 964 542 unsigned int rotation = state->hw.rotation; 965 - int pitch = state->view.color_plane[color_plane].stride; 543 + int pitch = state->view.color_plane[color_plane].mapping_stride; 966 544 u32 alignment; 967 545 968 546 if (intel_plane->id == PLANE_CURSOR) ··· 984 562 u32 alignment; 985 563 986 564 if (DISPLAY_VER(i915) >= 12 && 565 + !intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) && 987 566 is_semiplanar_uv_plane(fb, color_plane)) 988 567 alignment = intel_tile_row_size(fb, color_plane); 989 568 else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) ··· 1033 610 int ccs_x, ccs_y; 1034 611 int main_x, main_y; 1035 612 1036 - if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane)) 613 + if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane)) 1037 614 return 0; 1038 615 1039 616 /* ··· 1096 673 * The new CCS hash mode isn't compatible with remapping as 1097 674 * the virtual address of the pages affects the compressed data. 1098 675 */ 1099 - if (is_ccs_modifier(fb->modifier)) 676 + if (intel_fb_is_ccs_modifier(fb->modifier)) 1100 677 return false; 1101 678 1102 679 /* Linear needs a page aligned stride for remapping */ ··· 1122 699 static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation) 1123 700 { 1124 701 if (drm_rotation_90_or_270(rotation)) 1125 - return fb->rotated_view.color_plane[color_plane].stride; 702 + return fb->rotated_view.color_plane[color_plane].mapping_stride; 1126 703 else if (intel_fb_needs_pot_stride_remap(fb)) 1127 - return fb->remapped_view.color_plane[color_plane].stride; 704 + return fb->remapped_view.color_plane[color_plane].mapping_stride; 1128 705 else 1129 - return fb->normal_view.color_plane[color_plane].stride; 706 + return fb->normal_view.color_plane[color_plane].mapping_stride; 1130 707 } 1131 708 1132 709 static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) ··· 1237 814 unsigned int pitch_tiles) 1238 815 { 1239 816 if (intel_fb_needs_pot_stride_remap(fb)) { 1240 - unsigned int min_stride = is_ccs_plane(&fb->base, color_plane) ? 2 : 8; 1241 817 /* 1242 818 * ADL_P, the only platform needing a POT stride has a minimum 1243 - * of 8 main surface and 2 CCS AUX stride tiles. 819 + * of 8 main surface tiles. 1244 820 */ 1245 - return roundup_pow_of_two(max(pitch_tiles, min_stride)); 821 + return roundup_pow_of_two(max(pitch_tiles, 8u)); 1246 822 } else { 1247 823 return pitch_tiles; 1248 824 } 825 + } 826 + 827 + static unsigned int 828 + plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane, 829 + unsigned int tile_width, 830 + unsigned int src_stride_tiles, unsigned int dst_stride_tiles) 831 + { 832 + unsigned int stride_tiles; 833 + 834 + if (IS_ALDERLAKE_P(to_i915(fb->base.dev))) 835 + stride_tiles = src_stride_tiles; 836 + else 837 + stride_tiles = dst_stride_tiles; 838 + 839 + return stride_tiles * tile_width * fb->base.format->cpp[color_plane]; 1249 840 } 1250 841 1251 842 static unsigned int ··· 1278 841 return DIV_ROUND_UP(y + dims->height, dims->tile_height); 1279 842 } 1280 843 844 + static unsigned int 845 + plane_view_linear_tiles(const struct intel_framebuffer *fb, int color_plane, 846 + const struct fb_plane_view_dims *dims, 847 + int x, int y) 848 + { 849 + struct drm_i915_private *i915 = to_i915(fb->base.dev); 850 + unsigned int size; 851 + 852 + size = (y + dims->height) * fb->base.pitches[color_plane] + 853 + x * fb->base.format->cpp[color_plane]; 854 + 855 + return DIV_ROUND_UP(size, intel_tile_size(i915)); 856 + } 857 + 1281 858 #define assign_chk_ovf(i915, var, val) ({ \ 1282 859 drm_WARN_ON(&(i915)->drm, overflows_type(val, var)); \ 1283 860 (var) = (val); \ 861 + }) 862 + 863 + #define assign_bfld_chk_ovf(i915, var, val) ({ \ 864 + (var) = (val); \ 865 + drm_WARN_ON(&(i915)->drm, (var) != (val)); \ 866 + (var); \ 1284 867 }) 1285 868 1286 869 static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_plane, ··· 1317 860 struct drm_rect r; 1318 861 u32 size = 0; 1319 862 1320 - assign_chk_ovf(i915, remap_info->offset, obj_offset); 1321 - assign_chk_ovf(i915, remap_info->src_stride, plane_view_src_stride_tiles(fb, color_plane, dims)); 1322 - assign_chk_ovf(i915, remap_info->width, plane_view_width_tiles(fb, color_plane, dims, x)); 1323 - assign_chk_ovf(i915, remap_info->height, plane_view_height_tiles(fb, color_plane, dims, y)); 863 + assign_bfld_chk_ovf(i915, remap_info->offset, obj_offset); 864 + 865 + if (intel_fb_is_gen12_ccs_aux_plane(&fb->base, color_plane)) { 866 + remap_info->linear = 1; 867 + 868 + assign_chk_ovf(i915, remap_info->size, 869 + plane_view_linear_tiles(fb, color_plane, dims, x, y)); 870 + } else { 871 + remap_info->linear = 0; 872 + 873 + assign_chk_ovf(i915, remap_info->src_stride, 874 + plane_view_src_stride_tiles(fb, color_plane, dims)); 875 + assign_chk_ovf(i915, remap_info->width, 876 + plane_view_width_tiles(fb, color_plane, dims, x)); 877 + assign_chk_ovf(i915, remap_info->height, 878 + plane_view_height_tiles(fb, color_plane, dims, y)); 879 + } 1324 880 1325 881 if (view->gtt.type == I915_GGTT_VIEW_ROTATED) { 882 + drm_WARN_ON(&i915->drm, remap_info->linear); 1326 883 check_array_bounds(i915, view->gtt.rotated.plane, color_plane); 1327 884 1328 885 assign_chk_ovf(i915, remap_info->dst_stride, ··· 1352 881 color_plane_info->x = r.x1; 1353 882 color_plane_info->y = r.y1; 1354 883 1355 - color_plane_info->stride = remap_info->dst_stride * tile_height; 884 + color_plane_info->mapping_stride = remap_info->dst_stride * tile_height; 885 + color_plane_info->scanout_stride = color_plane_info->mapping_stride; 1356 886 1357 887 size += remap_info->dst_stride * remap_info->width; 1358 888 ··· 1372 900 gtt_offset = aligned_offset; 1373 901 } 1374 902 1375 - assign_chk_ovf(i915, remap_info->dst_stride, 1376 - plane_view_dst_stride_tiles(fb, color_plane, remap_info->width)); 1377 - 1378 903 color_plane_info->x = x; 1379 904 color_plane_info->y = y; 1380 905 1381 - color_plane_info->stride = remap_info->dst_stride * tile_width * 1382 - fb->base.format->cpp[color_plane]; 906 + if (remap_info->linear) { 907 + color_plane_info->mapping_stride = fb->base.pitches[color_plane]; 908 + color_plane_info->scanout_stride = color_plane_info->mapping_stride; 1383 909 1384 - size += remap_info->dst_stride * remap_info->height; 910 + size += remap_info->size; 911 + } else { 912 + unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane, 913 + remap_info->width); 914 + 915 + assign_chk_ovf(i915, remap_info->dst_stride, dst_stride); 916 + color_plane_info->mapping_stride = dst_stride * 917 + tile_width * 918 + fb->base.format->cpp[color_plane]; 919 + color_plane_info->scanout_stride = 920 + plane_view_scanout_stride(fb, color_plane, tile_width, 921 + remap_info->src_stride, 922 + dst_stride); 923 + 924 + size += dst_stride * remap_info->height; 925 + } 1385 926 } 1386 927 1387 928 /* ··· 1402 917 * the x/y offsets. x,y will hold the first pixel of the framebuffer 1403 918 * plane from the start of the remapped/rotated gtt mapping. 1404 919 */ 1405 - intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y, 1406 - tile_width, tile_height, 1407 - tile_size, remap_info->dst_stride, 1408 - gtt_offset * tile_size, 0); 920 + if (remap_info->linear) 921 + intel_adjust_linear_offset(&color_plane_info->x, &color_plane_info->y, 922 + fb->base.format->cpp[color_plane], 923 + color_plane_info->mapping_stride, 924 + gtt_offset * tile_size, 0); 925 + else 926 + intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y, 927 + tile_width, tile_height, 928 + tile_size, remap_info->dst_stride, 929 + gtt_offset * tile_size, 0); 1409 930 1410 931 return size; 1411 932 } ··· 1424 933 const struct fb_plane_view_dims *dims, 1425 934 int x, int y) 1426 935 { 1427 - struct drm_i915_private *i915 = to_i915(fb->base.dev); 1428 936 unsigned int tiles; 1429 937 1430 938 if (is_surface_linear(&fb->base, color_plane)) { 1431 - unsigned int size; 1432 - 1433 - size = (y + dims->height) * fb->base.pitches[color_plane] + 1434 - x * fb->base.format->cpp[color_plane]; 1435 - tiles = DIV_ROUND_UP(size, intel_tile_size(i915)); 939 + tiles = plane_view_linear_tiles(fb, color_plane, dims, x, y); 1436 940 } else { 1437 941 tiles = plane_view_src_stride_tiles(fb, color_plane, dims) * 1438 942 plane_view_height_tiles(fb, color_plane, dims, y); ··· 1516 1030 */ 1517 1031 fb->normal_view.color_plane[i].x = x; 1518 1032 fb->normal_view.color_plane[i].y = y; 1519 - fb->normal_view.color_plane[i].stride = fb->base.pitches[i]; 1033 + fb->normal_view.color_plane[i].mapping_stride = fb->base.pitches[i]; 1034 + fb->normal_view.color_plane[i].scanout_stride = 1035 + fb->normal_view.color_plane[i].mapping_stride; 1520 1036 1521 1037 offset = calc_plane_aligned_offset(fb, i, &x, &y); 1522 1038 ··· 1568 1080 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 1569 1081 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 1570 1082 1571 - drm_WARN_ON(&i915->drm, is_ccs_modifier(fb->modifier)); 1083 + drm_WARN_ON(&i915->drm, intel_fb_is_ccs_modifier(fb->modifier)); 1572 1084 1573 1085 /* Make src coordinates relative to the viewport */ 1574 1086 drm_rect_translate(&plane_state->uapi.src, ··· 1631 1143 * 1632 1144 * The new CCS hash mode makes remapping impossible 1633 1145 */ 1634 - if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) || 1146 + if (DISPLAY_VER(dev_priv) < 4 || intel_fb_is_ccs_modifier(modifier) || 1635 1147 intel_modifier_uses_dpt(dev_priv, modifier)) 1636 1148 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); 1637 1149 else if (DISPLAY_VER(dev_priv) >= 7) ··· 1656 1168 * we need the stride to be page aligned. 1657 1169 */ 1658 1170 if (fb->pitches[color_plane] > max_stride && 1659 - !is_ccs_modifier(fb->modifier)) 1171 + !intel_fb_is_ccs_modifier(fb->modifier)) 1660 1172 return intel_tile_size(dev_priv); 1661 1173 else 1662 1174 return 64; 1663 1175 } 1664 1176 1665 1177 tile_width = intel_tile_width_bytes(fb, color_plane); 1666 - if (is_ccs_modifier(fb->modifier)) { 1667 - /* 1668 - * On ADL-P the stride must be either 8 tiles or a stride 1669 - * that is aligned to 16 tiles, required by the 16 tiles = 1670 - * 64 kbyte CCS AUX PTE granularity, allowing CCS FBs to be 1671 - * remapped. 1672 - */ 1673 - if (IS_ALDERLAKE_P(dev_priv)) 1674 - tile_width *= fb->pitches[0] <= tile_width * 8 ? 8 : 16; 1178 + if (intel_fb_is_ccs_modifier(fb->modifier)) { 1675 1179 /* 1676 1180 * On TGL the surface stride must be 4 tile aligned, mapped by 1677 1181 * one 64 byte cacheline on the CCS AUX surface. 1678 1182 */ 1679 - else if (DISPLAY_VER(dev_priv) >= 12) 1183 + if (DISPLAY_VER(dev_priv) >= 12) 1680 1184 tile_width *= 4; 1681 1185 /* 1682 1186 * Display WA #0531: skl,bxt,kbl,glk ··· 1704 1224 return 0; 1705 1225 1706 1226 /* FIXME other color planes? */ 1707 - stride = plane_state->view.color_plane[0].stride; 1227 + stride = plane_state->view.color_plane[0].mapping_stride; 1708 1228 max_stride = plane->max_stride(plane, fb->format->format, 1709 1229 fb->modifier, rotation); 1710 1230 ··· 1910 1430 goto err; 1911 1431 } 1912 1432 1913 - if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) { 1433 + if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) { 1914 1434 int ccs_aux_stride = gen12_ccs_aux_stride(intel_fb, i); 1915 1435 1916 1436 if (fb->pitches[i] != ccs_aux_stride) {
+27 -4
drivers/gpu/drm/i915/display/intel_fb.h
··· 6 6 #ifndef __INTEL_FB_H__ 7 7 #define __INTEL_FB_H__ 8 8 9 + #include <linux/bits.h> 9 10 #include <linux/types.h> 10 11 11 12 struct drm_device; ··· 17 16 struct drm_mode_fb_cmd2; 18 17 struct intel_fb_view; 19 18 struct intel_framebuffer; 19 + struct intel_plane; 20 20 struct intel_plane_state; 21 21 22 - bool is_ccs_plane(const struct drm_framebuffer *fb, int plane); 23 - bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane); 24 - bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane); 25 - bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane); 22 + #define INTEL_PLANE_CAP_NONE 0 23 + #define INTEL_PLANE_CAP_CCS_RC BIT(0) 24 + #define INTEL_PLANE_CAP_CCS_RC_CC BIT(1) 25 + #define INTEL_PLANE_CAP_CCS_MC BIT(2) 26 + #define INTEL_PLANE_CAP_TILING_X BIT(3) 27 + #define INTEL_PLANE_CAP_TILING_Y BIT(4) 28 + #define INTEL_PLANE_CAP_TILING_Yf BIT(5) 29 + 30 + bool intel_fb_is_ccs_modifier(u64 modifier); 31 + bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier); 32 + bool intel_fb_is_mc_ccs_modifier(u64 modifier); 33 + 34 + bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane); 35 + int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb); 36 + 37 + u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915, 38 + u8 plane_caps); 39 + bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier); 40 + 41 + const struct drm_format_info * 42 + intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd); 43 + 44 + bool 45 + intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, 46 + u64 modifier); 26 47 27 48 bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane); 28 49
+5 -7
drivers/gpu/drm/i915/display/intel_fb_pin.c
··· 142 142 if (ret) 143 143 goto err; 144 144 145 - if (!ret) { 146 - vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment, 147 - view, pinctl); 148 - if (IS_ERR(vma)) { 149 - ret = PTR_ERR(vma); 150 - goto err_unpin; 151 - } 145 + vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment, 146 + view, pinctl); 147 + if (IS_ERR(vma)) { 148 + ret = PTR_ERR(vma); 149 + goto err_unpin; 152 150 } 153 151 154 152 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
+631 -479
drivers/gpu/drm/i915/display/intel_fbc.c
··· 48 48 #include "intel_fbc.h" 49 49 #include "intel_frontbuffer.h" 50 50 51 + struct intel_fbc_funcs { 52 + void (*activate)(struct intel_fbc *fbc); 53 + void (*deactivate)(struct intel_fbc *fbc); 54 + bool (*is_active)(struct intel_fbc *fbc); 55 + bool (*is_compressing)(struct intel_fbc *fbc); 56 + void (*nuke)(struct intel_fbc *fbc); 57 + void (*program_cfb)(struct intel_fbc *fbc); 58 + void (*set_false_color)(struct intel_fbc *fbc, bool enable); 59 + }; 60 + 51 61 /* 52 62 * For SKL+, the plane source size used by the hardware is based on the value we 53 63 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value ··· 78 68 const struct drm_framebuffer *fb = plane_state->hw.fb; 79 69 unsigned int stride; 80 70 81 - stride = plane_state->view.color_plane[0].stride; 71 + stride = plane_state->view.color_plane[0].mapping_stride; 82 72 if (!drm_rotation_90_or_270(plane_state->hw.rotation)) 83 73 stride /= fb->format->cpp[0]; 84 74 ··· 94 84 } 95 85 96 86 /* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */ 97 - static unsigned int skl_fbc_min_cfb_stride(struct drm_i915_private *i915, 87 + static unsigned int skl_fbc_min_cfb_stride(struct intel_fbc *fbc, 98 88 const struct intel_fbc_state_cache *cache) 99 89 { 90 + struct drm_i915_private *i915 = fbc->i915; 100 91 unsigned int limit = 4; /* 1:4 compression limit is the worst case */ 101 92 unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ 102 93 unsigned int height = 4; /* FBC segment is 4 lines */ ··· 124 113 } 125 114 126 115 /* properly aligned cfb stride in bytes, assuming 1:1 compression limit */ 127 - static unsigned int intel_fbc_cfb_stride(struct drm_i915_private *i915, 116 + static unsigned int intel_fbc_cfb_stride(struct intel_fbc *fbc, 128 117 const struct intel_fbc_state_cache *cache) 129 118 { 119 + struct drm_i915_private *i915 = fbc->i915; 130 120 unsigned int stride = _intel_fbc_cfb_stride(cache); 131 121 132 122 /* ··· 136 124 * that regardless of the compression limit we choose later. 137 125 */ 138 126 if (DISPLAY_VER(i915) >= 9) 139 - return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(i915, cache)); 127 + return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(fbc, cache)); 140 128 else 141 129 return stride; 142 130 } 143 131 144 - static unsigned int intel_fbc_cfb_size(struct drm_i915_private *dev_priv, 132 + static unsigned int intel_fbc_cfb_size(struct intel_fbc *fbc, 145 133 const struct intel_fbc_state_cache *cache) 146 134 { 135 + struct drm_i915_private *i915 = fbc->i915; 147 136 int lines = cache->plane.src_h; 148 137 149 - if (DISPLAY_VER(dev_priv) == 7) 138 + if (DISPLAY_VER(i915) == 7) 150 139 lines = min(lines, 2048); 151 - else if (DISPLAY_VER(dev_priv) >= 8) 140 + else if (DISPLAY_VER(i915) >= 8) 152 141 lines = min(lines, 2560); 153 142 154 - return lines * intel_fbc_cfb_stride(dev_priv, cache); 143 + return lines * intel_fbc_cfb_stride(fbc, cache); 155 144 } 156 145 157 - static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) 146 + static u32 i8xx_fbc_ctl(struct intel_fbc *fbc) 158 147 { 148 + const struct intel_fbc_reg_params *params = &fbc->params; 149 + struct drm_i915_private *i915 = fbc->i915; 150 + unsigned int cfb_stride; 151 + u32 fbc_ctl; 152 + 153 + cfb_stride = params->cfb_stride / fbc->limit; 154 + 155 + /* FBC_CTL wants 32B or 64B units */ 156 + if (DISPLAY_VER(i915) == 2) 157 + cfb_stride = (cfb_stride / 32) - 1; 158 + else 159 + cfb_stride = (cfb_stride / 64) - 1; 160 + 161 + fbc_ctl = FBC_CTL_PERIODIC | 162 + FBC_CTL_INTERVAL(params->interval) | 163 + FBC_CTL_STRIDE(cfb_stride); 164 + 165 + if (IS_I945GM(i915)) 166 + fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 167 + 168 + if (params->fence_id >= 0) 169 + fbc_ctl |= FBC_CTL_FENCENO(params->fence_id); 170 + 171 + return fbc_ctl; 172 + } 173 + 174 + static u32 i965_fbc_ctl2(struct intel_fbc *fbc) 175 + { 176 + const struct intel_fbc_reg_params *params = &fbc->params; 177 + u32 fbc_ctl2; 178 + 179 + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | 180 + FBC_CTL_PLANE(params->crtc.i9xx_plane); 181 + 182 + if (params->fence_id >= 0) 183 + fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN; 184 + 185 + return fbc_ctl2; 186 + } 187 + 188 + static void i8xx_fbc_deactivate(struct intel_fbc *fbc) 189 + { 190 + struct drm_i915_private *i915 = fbc->i915; 159 191 u32 fbc_ctl; 160 192 161 193 /* Disable compression */ 162 - fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); 194 + fbc_ctl = intel_de_read(i915, FBC_CONTROL); 163 195 if ((fbc_ctl & FBC_CTL_EN) == 0) 164 196 return; 165 197 166 198 fbc_ctl &= ~FBC_CTL_EN; 167 - intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); 199 + intel_de_write(i915, FBC_CONTROL, fbc_ctl); 168 200 169 201 /* Wait for compressing bit to clear */ 170 - if (intel_de_wait_for_clear(dev_priv, FBC_STATUS, 202 + if (intel_de_wait_for_clear(i915, FBC_STATUS, 171 203 FBC_STAT_COMPRESSING, 10)) { 172 - drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n"); 204 + drm_dbg_kms(&i915->drm, "FBC idle timed out\n"); 173 205 return; 174 206 } 175 207 } 176 208 177 - static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) 209 + static void i8xx_fbc_activate(struct intel_fbc *fbc) 178 210 { 179 - struct intel_fbc *fbc = &dev_priv->fbc; 180 211 const struct intel_fbc_reg_params *params = &fbc->params; 181 - int cfb_pitch; 212 + struct drm_i915_private *i915 = fbc->i915; 182 213 int i; 183 - u32 fbc_ctl; 184 - 185 - cfb_pitch = params->cfb_stride / fbc->limit; 186 - 187 - /* FBC_CTL wants 32B or 64B units */ 188 - if (DISPLAY_VER(dev_priv) == 2) 189 - cfb_pitch = (cfb_pitch / 32) - 1; 190 - else 191 - cfb_pitch = (cfb_pitch / 64) - 1; 192 214 193 215 /* Clear old tags */ 194 216 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 195 - intel_de_write(dev_priv, FBC_TAG(i), 0); 217 + intel_de_write(i915, FBC_TAG(i), 0); 196 218 197 - if (DISPLAY_VER(dev_priv) == 4) { 198 - u32 fbc_ctl2; 199 - 200 - /* Set it up... */ 201 - fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM; 202 - fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane); 203 - if (params->fence_id >= 0) 204 - fbc_ctl2 |= FBC_CTL_CPU_FENCE; 205 - intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2); 206 - intel_de_write(dev_priv, FBC_FENCE_OFF, 219 + if (DISPLAY_VER(i915) == 4) { 220 + intel_de_write(i915, FBC_CONTROL2, 221 + i965_fbc_ctl2(fbc)); 222 + intel_de_write(i915, FBC_FENCE_OFF, 207 223 params->fence_y_offset); 208 224 } 209 225 210 - /* enable it... */ 211 - fbc_ctl = FBC_CTL_INTERVAL(params->interval); 212 - fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; 213 - if (IS_I945GM(dev_priv)) 214 - fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 215 - fbc_ctl |= FBC_CTL_STRIDE(cfb_pitch & 0xff); 216 - if (params->fence_id >= 0) 217 - fbc_ctl |= FBC_CTL_FENCENO(params->fence_id); 218 - intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); 226 + intel_de_write(i915, FBC_CONTROL, 227 + FBC_CTL_EN | i8xx_fbc_ctl(fbc)); 219 228 } 220 229 221 - static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) 230 + static bool i8xx_fbc_is_active(struct intel_fbc *fbc) 222 231 { 223 - return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN; 232 + return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN; 224 233 } 225 234 226 - static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915) 235 + static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc) 227 236 { 228 - switch (i915->fbc.limit) { 237 + return intel_de_read(fbc->i915, FBC_STATUS) & 238 + (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); 239 + } 240 + 241 + static void i8xx_fbc_nuke(struct intel_fbc *fbc) 242 + { 243 + struct intel_fbc_reg_params *params = &fbc->params; 244 + enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; 245 + struct drm_i915_private *dev_priv = fbc->i915; 246 + 247 + spin_lock_irq(&dev_priv->uncore.lock); 248 + intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 249 + intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane))); 250 + spin_unlock_irq(&dev_priv->uncore.lock); 251 + } 252 + 253 + static void i8xx_fbc_program_cfb(struct intel_fbc *fbc) 254 + { 255 + struct drm_i915_private *i915 = fbc->i915; 256 + 257 + GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start, 258 + fbc->compressed_fb.start, U32_MAX)); 259 + GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start, 260 + fbc->compressed_llb.start, U32_MAX)); 261 + 262 + intel_de_write(i915, FBC_CFB_BASE, 263 + i915->dsm.start + fbc->compressed_fb.start); 264 + intel_de_write(i915, FBC_LL_BASE, 265 + i915->dsm.start + fbc->compressed_llb.start); 266 + } 267 + 268 + static const struct intel_fbc_funcs i8xx_fbc_funcs = { 269 + .activate = i8xx_fbc_activate, 270 + .deactivate = i8xx_fbc_deactivate, 271 + .is_active = i8xx_fbc_is_active, 272 + .is_compressing = i8xx_fbc_is_compressing, 273 + .nuke = i8xx_fbc_nuke, 274 + .program_cfb = i8xx_fbc_program_cfb, 275 + }; 276 + 277 + static void i965_fbc_nuke(struct intel_fbc *fbc) 278 + { 279 + struct intel_fbc_reg_params *params = &fbc->params; 280 + enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; 281 + struct drm_i915_private *dev_priv = fbc->i915; 282 + 283 + spin_lock_irq(&dev_priv->uncore.lock); 284 + intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 285 + intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane))); 286 + spin_unlock_irq(&dev_priv->uncore.lock); 287 + } 288 + 289 + static const struct intel_fbc_funcs i965_fbc_funcs = { 290 + .activate = i8xx_fbc_activate, 291 + .deactivate = i8xx_fbc_deactivate, 292 + .is_active = i8xx_fbc_is_active, 293 + .is_compressing = i8xx_fbc_is_compressing, 294 + .nuke = i965_fbc_nuke, 295 + .program_cfb = i8xx_fbc_program_cfb, 296 + }; 297 + 298 + static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc) 299 + { 300 + switch (fbc->limit) { 229 301 default: 230 - MISSING_CASE(i915->fbc.limit); 302 + MISSING_CASE(fbc->limit); 231 303 fallthrough; 232 304 case 1: 233 305 return DPFC_CTL_LIMIT_1X; ··· 322 226 } 323 227 } 324 228 325 - static void g4x_fbc_activate(struct drm_i915_private *dev_priv) 229 + static u32 g4x_dpfc_ctl(struct intel_fbc *fbc) 326 230 { 327 - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 328 - u32 dpfc_ctl; 329 - 330 - dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN; 331 - 332 - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); 333 - 334 - if (params->fence_id >= 0) { 335 - dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; 336 - intel_de_write(dev_priv, DPFC_FENCE_YOFF, 337 - params->fence_y_offset); 338 - } else { 339 - intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0); 340 - } 341 - 342 - /* enable it... */ 343 - intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 344 - } 345 - 346 - static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) 347 - { 348 - u32 dpfc_ctl; 349 - 350 - /* Disable compression */ 351 - dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL); 352 - if (dpfc_ctl & DPFC_CTL_EN) { 353 - dpfc_ctl &= ~DPFC_CTL_EN; 354 - intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl); 355 - } 356 - } 357 - 358 - static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) 359 - { 360 - return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN; 361 - } 362 - 363 - static void i8xx_fbc_recompress(struct drm_i915_private *dev_priv) 364 - { 365 - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 366 - enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; 367 - 368 - spin_lock_irq(&dev_priv->uncore.lock); 369 - intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 370 - intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane))); 371 - spin_unlock_irq(&dev_priv->uncore.lock); 372 - } 373 - 374 - static void i965_fbc_recompress(struct drm_i915_private *dev_priv) 375 - { 376 - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 377 - enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; 378 - 379 - spin_lock_irq(&dev_priv->uncore.lock); 380 - intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 381 - intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane))); 382 - spin_unlock_irq(&dev_priv->uncore.lock); 383 - } 384 - 385 - /* This function forces a CFB recompression through the nuke operation. */ 386 - static void snb_fbc_recompress(struct drm_i915_private *dev_priv) 387 - { 388 - intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE); 389 - intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE); 390 - } 391 - 392 - static void intel_fbc_recompress(struct drm_i915_private *dev_priv) 393 - { 394 - struct intel_fbc *fbc = &dev_priv->fbc; 395 - 396 - trace_intel_fbc_nuke(fbc->crtc); 397 - 398 - if (DISPLAY_VER(dev_priv) >= 6) 399 - snb_fbc_recompress(dev_priv); 400 - else if (DISPLAY_VER(dev_priv) >= 4) 401 - i965_fbc_recompress(dev_priv); 402 - else 403 - i8xx_fbc_recompress(dev_priv); 404 - } 405 - 406 - static void ilk_fbc_activate(struct drm_i915_private *dev_priv) 407 - { 408 - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; 409 - u32 dpfc_ctl; 410 - 411 - dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); 412 - 413 - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); 414 - 415 - if (params->fence_id >= 0) { 416 - dpfc_ctl |= DPFC_CTL_FENCE_EN; 417 - if (IS_IRONLAKE(dev_priv)) 418 - dpfc_ctl |= params->fence_id; 419 - if (IS_SANDYBRIDGE(dev_priv)) { 420 - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 421 - SNB_CPU_FENCE_ENABLE | params->fence_id); 422 - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 423 - params->fence_y_offset); 424 - } 425 - } else { 426 - if (IS_SANDYBRIDGE(dev_priv)) { 427 - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); 428 - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); 429 - } 430 - } 431 - 432 - intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, 433 - params->fence_y_offset); 434 - /* enable it... */ 435 - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 436 - } 437 - 438 - static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) 439 - { 440 - u32 dpfc_ctl; 441 - 442 - /* Disable compression */ 443 - dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL); 444 - if (dpfc_ctl & DPFC_CTL_EN) { 445 - dpfc_ctl &= ~DPFC_CTL_EN; 446 - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl); 447 - } 448 - } 449 - 450 - static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) 451 - { 452 - return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN; 453 - } 454 - 455 - static void gen7_fbc_activate(struct drm_i915_private *dev_priv) 456 - { 457 - struct intel_fbc *fbc = &dev_priv->fbc; 458 231 const struct intel_fbc_reg_params *params = &fbc->params; 232 + struct drm_i915_private *i915 = fbc->i915; 459 233 u32 dpfc_ctl; 460 234 461 - if (DISPLAY_VER(dev_priv) >= 10) { 462 - u32 val = 0; 235 + dpfc_ctl = g4x_dpfc_ctl_limit(fbc) | 236 + DPFC_CTL_PLANE_G4X(params->crtc.i9xx_plane); 463 237 464 - if (params->override_cfb_stride) 465 - val |= FBC_STRIDE_OVERRIDE | 466 - FBC_STRIDE(params->override_cfb_stride / fbc->limit); 467 - 468 - intel_de_write(dev_priv, GLK_FBC_STRIDE, val); 469 - } else if (DISPLAY_VER(dev_priv) == 9) { 470 - u32 val = 0; 471 - 472 - /* Display WA #0529: skl, kbl, bxt. */ 473 - if (params->override_cfb_stride) 474 - val |= CHICKEN_FBC_STRIDE_OVERRIDE | 475 - CHICKEN_FBC_STRIDE(params->override_cfb_stride / fbc->limit); 476 - 477 - intel_de_rmw(dev_priv, CHICKEN_MISC_4, 478 - CHICKEN_FBC_STRIDE_OVERRIDE | 479 - CHICKEN_FBC_STRIDE_MASK, val); 480 - } 481 - 482 - dpfc_ctl = 0; 483 - if (IS_IVYBRIDGE(dev_priv)) 484 - dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); 485 - 486 - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); 238 + if (IS_G4X(i915)) 239 + dpfc_ctl |= DPFC_CTL_SR_EN; 487 240 488 241 if (params->fence_id >= 0) { 489 - dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 490 - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 491 - SNB_CPU_FENCE_ENABLE | params->fence_id); 492 - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 493 - params->fence_y_offset); 494 - } else if (dev_priv->ggtt.num_fences) { 495 - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); 496 - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); 242 + dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X; 243 + 244 + if (DISPLAY_VER(i915) < 6) 245 + dpfc_ctl |= DPFC_CTL_FENCENO(params->fence_id); 497 246 } 498 247 499 - if (dev_priv->fbc.false_color) 500 - dpfc_ctl |= FBC_CTL_FALSE_COLOR; 501 - 502 - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 248 + return dpfc_ctl; 503 249 } 504 250 505 - static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) 251 + static void g4x_fbc_activate(struct intel_fbc *fbc) 506 252 { 507 - if (DISPLAY_VER(dev_priv) >= 5) 508 - return ilk_fbc_is_active(dev_priv); 509 - else if (IS_GM45(dev_priv)) 510 - return g4x_fbc_is_active(dev_priv); 511 - else 512 - return i8xx_fbc_is_active(dev_priv); 253 + const struct intel_fbc_reg_params *params = &fbc->params; 254 + struct drm_i915_private *i915 = fbc->i915; 255 + 256 + intel_de_write(i915, DPFC_FENCE_YOFF, 257 + params->fence_y_offset); 258 + 259 + intel_de_write(i915, DPFC_CONTROL, 260 + DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); 513 261 } 514 262 515 - static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) 263 + static void g4x_fbc_deactivate(struct intel_fbc *fbc) 516 264 { 517 - struct intel_fbc *fbc = &dev_priv->fbc; 265 + struct drm_i915_private *i915 = fbc->i915; 266 + u32 dpfc_ctl; 518 267 268 + /* Disable compression */ 269 + dpfc_ctl = intel_de_read(i915, DPFC_CONTROL); 270 + if (dpfc_ctl & DPFC_CTL_EN) { 271 + dpfc_ctl &= ~DPFC_CTL_EN; 272 + intel_de_write(i915, DPFC_CONTROL, dpfc_ctl); 273 + } 274 + } 275 + 276 + static bool g4x_fbc_is_active(struct intel_fbc *fbc) 277 + { 278 + return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN; 279 + } 280 + 281 + static bool g4x_fbc_is_compressing(struct intel_fbc *fbc) 282 + { 283 + return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK; 284 + } 285 + 286 + static void g4x_fbc_program_cfb(struct intel_fbc *fbc) 287 + { 288 + struct drm_i915_private *i915 = fbc->i915; 289 + 290 + intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start); 291 + } 292 + 293 + static const struct intel_fbc_funcs g4x_fbc_funcs = { 294 + .activate = g4x_fbc_activate, 295 + .deactivate = g4x_fbc_deactivate, 296 + .is_active = g4x_fbc_is_active, 297 + .is_compressing = g4x_fbc_is_compressing, 298 + .nuke = i965_fbc_nuke, 299 + .program_cfb = g4x_fbc_program_cfb, 300 + }; 301 + 302 + static void ilk_fbc_activate(struct intel_fbc *fbc) 303 + { 304 + struct intel_fbc_reg_params *params = &fbc->params; 305 + struct drm_i915_private *i915 = fbc->i915; 306 + 307 + intel_de_write(i915, ILK_DPFC_FENCE_YOFF, 308 + params->fence_y_offset); 309 + 310 + intel_de_write(i915, ILK_DPFC_CONTROL, 311 + DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); 312 + } 313 + 314 + static void ilk_fbc_deactivate(struct intel_fbc *fbc) 315 + { 316 + struct drm_i915_private *i915 = fbc->i915; 317 + u32 dpfc_ctl; 318 + 319 + /* Disable compression */ 320 + dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL); 321 + if (dpfc_ctl & DPFC_CTL_EN) { 322 + dpfc_ctl &= ~DPFC_CTL_EN; 323 + intel_de_write(i915, ILK_DPFC_CONTROL, dpfc_ctl); 324 + } 325 + } 326 + 327 + static bool ilk_fbc_is_active(struct intel_fbc *fbc) 328 + { 329 + return intel_de_read(fbc->i915, ILK_DPFC_CONTROL) & DPFC_CTL_EN; 330 + } 331 + 332 + static bool ilk_fbc_is_compressing(struct intel_fbc *fbc) 333 + { 334 + return intel_de_read(fbc->i915, ILK_DPFC_STATUS) & DPFC_COMP_SEG_MASK; 335 + } 336 + 337 + static void ilk_fbc_program_cfb(struct intel_fbc *fbc) 338 + { 339 + struct drm_i915_private *i915 = fbc->i915; 340 + 341 + intel_de_write(i915, ILK_DPFC_CB_BASE, fbc->compressed_fb.start); 342 + } 343 + 344 + static const struct intel_fbc_funcs ilk_fbc_funcs = { 345 + .activate = ilk_fbc_activate, 346 + .deactivate = ilk_fbc_deactivate, 347 + .is_active = ilk_fbc_is_active, 348 + .is_compressing = ilk_fbc_is_compressing, 349 + .nuke = i965_fbc_nuke, 350 + .program_cfb = ilk_fbc_program_cfb, 351 + }; 352 + 353 + static void snb_fbc_program_fence(struct intel_fbc *fbc) 354 + { 355 + const struct intel_fbc_reg_params *params = &fbc->params; 356 + struct drm_i915_private *i915 = fbc->i915; 357 + u32 ctl = 0; 358 + 359 + if (params->fence_id >= 0) 360 + ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(params->fence_id); 361 + 362 + intel_de_write(i915, SNB_DPFC_CTL_SA, ctl); 363 + intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, params->fence_y_offset); 364 + } 365 + 366 + static void snb_fbc_activate(struct intel_fbc *fbc) 367 + { 368 + snb_fbc_program_fence(fbc); 369 + 370 + ilk_fbc_activate(fbc); 371 + } 372 + 373 + static void snb_fbc_nuke(struct intel_fbc *fbc) 374 + { 375 + struct drm_i915_private *i915 = fbc->i915; 376 + 377 + intel_de_write(i915, MSG_FBC_REND_STATE, FBC_REND_NUKE); 378 + intel_de_posting_read(i915, MSG_FBC_REND_STATE); 379 + } 380 + 381 + static const struct intel_fbc_funcs snb_fbc_funcs = { 382 + .activate = snb_fbc_activate, 383 + .deactivate = ilk_fbc_deactivate, 384 + .is_active = ilk_fbc_is_active, 385 + .is_compressing = ilk_fbc_is_compressing, 386 + .nuke = snb_fbc_nuke, 387 + .program_cfb = ilk_fbc_program_cfb, 388 + }; 389 + 390 + static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc) 391 + { 392 + const struct intel_fbc_reg_params *params = &fbc->params; 393 + struct drm_i915_private *i915 = fbc->i915; 394 + u32 val = 0; 395 + 396 + if (params->override_cfb_stride) 397 + val |= FBC_STRIDE_OVERRIDE | 398 + FBC_STRIDE(params->override_cfb_stride / fbc->limit); 399 + 400 + intel_de_write(i915, GLK_FBC_STRIDE, val); 401 + } 402 + 403 + static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) 404 + { 405 + const struct intel_fbc_reg_params *params = &fbc->params; 406 + struct drm_i915_private *i915 = fbc->i915; 407 + u32 val = 0; 408 + 409 + /* Display WA #0529: skl, kbl, bxt. */ 410 + if (params->override_cfb_stride) 411 + val |= CHICKEN_FBC_STRIDE_OVERRIDE | 412 + CHICKEN_FBC_STRIDE(params->override_cfb_stride / fbc->limit); 413 + 414 + intel_de_rmw(i915, CHICKEN_MISC_4, 415 + CHICKEN_FBC_STRIDE_OVERRIDE | 416 + CHICKEN_FBC_STRIDE_MASK, val); 417 + } 418 + 419 + static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) 420 + { 421 + const struct intel_fbc_reg_params *params = &fbc->params; 422 + struct drm_i915_private *i915 = fbc->i915; 423 + u32 dpfc_ctl; 424 + 425 + dpfc_ctl = g4x_dpfc_ctl_limit(fbc); 426 + 427 + if (IS_IVYBRIDGE(i915)) 428 + dpfc_ctl |= DPFC_CTL_PLANE_IVB(params->crtc.i9xx_plane); 429 + 430 + if (params->fence_id >= 0) 431 + dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB; 432 + 433 + if (fbc->false_color) 434 + dpfc_ctl |= DPFC_CTL_FALSE_COLOR; 435 + 436 + return dpfc_ctl; 437 + } 438 + 439 + static void ivb_fbc_activate(struct intel_fbc *fbc) 440 + { 441 + struct drm_i915_private *i915 = fbc->i915; 442 + 443 + if (DISPLAY_VER(i915) >= 10) 444 + glk_fbc_program_cfb_stride(fbc); 445 + else if (DISPLAY_VER(i915) == 9) 446 + skl_fbc_program_cfb_stride(fbc); 447 + 448 + if (i915->ggtt.num_fences) 449 + snb_fbc_program_fence(fbc); 450 + 451 + intel_de_write(i915, ILK_DPFC_CONTROL, 452 + DPFC_CTL_EN | ivb_dpfc_ctl(fbc)); 453 + } 454 + 455 + static bool ivb_fbc_is_compressing(struct intel_fbc *fbc) 456 + { 457 + return intel_de_read(fbc->i915, ILK_DPFC_STATUS2) & DPFC_COMP_SEG_MASK_IVB; 458 + } 459 + 460 + static void ivb_fbc_set_false_color(struct intel_fbc *fbc, 461 + bool enable) 462 + { 463 + intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL, 464 + DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0); 465 + } 466 + 467 + static const struct intel_fbc_funcs ivb_fbc_funcs = { 468 + .activate = ivb_fbc_activate, 469 + .deactivate = ilk_fbc_deactivate, 470 + .is_active = ilk_fbc_is_active, 471 + .is_compressing = ivb_fbc_is_compressing, 472 + .nuke = snb_fbc_nuke, 473 + .program_cfb = ilk_fbc_program_cfb, 474 + .set_false_color = ivb_fbc_set_false_color, 475 + }; 476 + 477 + static bool intel_fbc_hw_is_active(struct intel_fbc *fbc) 478 + { 479 + return fbc->funcs->is_active(fbc); 480 + } 481 + 482 + static void intel_fbc_hw_activate(struct intel_fbc *fbc) 483 + { 519 484 trace_intel_fbc_activate(fbc->crtc); 520 485 521 486 fbc->active = true; 522 487 fbc->activated = true; 523 488 524 - if (DISPLAY_VER(dev_priv) >= 7) 525 - gen7_fbc_activate(dev_priv); 526 - else if (DISPLAY_VER(dev_priv) >= 5) 527 - ilk_fbc_activate(dev_priv); 528 - else if (IS_GM45(dev_priv)) 529 - g4x_fbc_activate(dev_priv); 530 - else 531 - i8xx_fbc_activate(dev_priv); 489 + fbc->funcs->activate(fbc); 532 490 } 533 491 534 - static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) 492 + static void intel_fbc_hw_deactivate(struct intel_fbc *fbc) 535 493 { 536 - struct intel_fbc *fbc = &dev_priv->fbc; 537 - 538 494 trace_intel_fbc_deactivate(fbc->crtc); 539 495 540 496 fbc->active = false; 541 497 542 - if (DISPLAY_VER(dev_priv) >= 5) 543 - ilk_fbc_deactivate(dev_priv); 544 - else if (IS_GM45(dev_priv)) 545 - g4x_fbc_deactivate(dev_priv); 546 - else 547 - i8xx_fbc_deactivate(dev_priv); 498 + fbc->funcs->deactivate(fbc); 499 + } 500 + 501 + bool intel_fbc_is_compressing(struct intel_fbc *fbc) 502 + { 503 + return fbc->funcs->is_compressing(fbc); 504 + } 505 + 506 + static void intel_fbc_nuke(struct intel_fbc *fbc) 507 + { 508 + trace_intel_fbc_nuke(fbc->crtc); 509 + 510 + fbc->funcs->nuke(fbc); 511 + } 512 + 513 + int intel_fbc_set_false_color(struct intel_fbc *fbc, bool enable) 514 + { 515 + if (!fbc->funcs || !fbc->funcs->set_false_color) 516 + return -ENODEV; 517 + 518 + mutex_lock(&fbc->lock); 519 + 520 + fbc->false_color = enable; 521 + 522 + fbc->funcs->set_false_color(fbc, enable); 523 + 524 + mutex_unlock(&fbc->lock); 525 + 526 + return 0; 548 527 } 549 528 550 529 /** 551 530 * intel_fbc_is_active - Is FBC active? 552 - * @dev_priv: i915 device instance 531 + * @fbc: The FBC instance 553 532 * 554 533 * This function is used to verify the current state of FBC. 555 534 * 556 535 * FIXME: This should be tracked in the plane config eventually 557 536 * instead of queried at runtime for most callers. 558 537 */ 559 - bool intel_fbc_is_active(struct drm_i915_private *dev_priv) 538 + bool intel_fbc_is_active(struct intel_fbc *fbc) 560 539 { 561 - return dev_priv->fbc.active; 540 + return fbc->active; 562 541 } 563 542 564 - static void intel_fbc_activate(struct drm_i915_private *dev_priv) 543 + static void intel_fbc_activate(struct intel_fbc *fbc) 565 544 { 566 - intel_fbc_hw_activate(dev_priv); 567 - intel_fbc_recompress(dev_priv); 545 + intel_fbc_hw_activate(fbc); 546 + intel_fbc_nuke(fbc); 568 547 } 569 548 570 - static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, 571 - const char *reason) 549 + static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason) 572 550 { 573 - struct intel_fbc *fbc = &dev_priv->fbc; 551 + struct drm_i915_private *i915 = fbc->i915; 574 552 575 - drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); 553 + drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); 576 554 577 555 if (fbc->active) 578 - intel_fbc_hw_deactivate(dev_priv); 556 + intel_fbc_hw_deactivate(fbc); 579 557 580 558 fbc->no_fbc_reason = reason; 581 559 } ··· 662 492 return BIT_ULL(32); 663 493 } 664 494 665 - static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv) 495 + static u64 intel_fbc_stolen_end(struct drm_i915_private *i915) 666 496 { 667 497 u64 end; 668 498 ··· 670 500 * reserved range size, so it always assumes the maximum (8mb) is used. 671 501 * If we enable FBC using a CFB on that memory range we'll get FIFO 672 502 * underruns, even if that range is not reserved by the BIOS. */ 673 - if (IS_BROADWELL(dev_priv) || (DISPLAY_VER(dev_priv) == 9 && 674 - !IS_BROXTON(dev_priv))) 675 - end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024; 503 + if (IS_BROADWELL(i915) || 504 + (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915))) 505 + end = resource_size(&i915->dsm) - 8 * 1024 * 1024; 676 506 else 677 507 end = U64_MAX; 678 508 679 - return min(end, intel_fbc_cfb_base_max(dev_priv)); 509 + return min(end, intel_fbc_cfb_base_max(i915)); 680 510 } 681 511 682 512 static int intel_fbc_min_limit(int fb_cpp) ··· 684 514 return fb_cpp == 2 ? 2 : 1; 685 515 } 686 516 687 - static int intel_fbc_max_limit(struct drm_i915_private *dev_priv) 517 + static int intel_fbc_max_limit(struct drm_i915_private *i915) 688 518 { 689 519 /* WaFbcOnly1to1Ratio:ctg */ 690 - if (IS_G4X(dev_priv)) 520 + if (IS_G4X(i915)) 691 521 return 1; 692 522 693 523 /* ··· 697 527 return 4; 698 528 } 699 529 700 - static int find_compression_limit(struct drm_i915_private *dev_priv, 530 + static int find_compression_limit(struct intel_fbc *fbc, 701 531 unsigned int size, int min_limit) 702 532 { 703 - struct intel_fbc *fbc = &dev_priv->fbc; 704 - u64 end = intel_fbc_stolen_end(dev_priv); 533 + struct drm_i915_private *i915 = fbc->i915; 534 + u64 end = intel_fbc_stolen_end(i915); 705 535 int ret, limit = min_limit; 706 536 707 537 size /= limit; 708 538 709 539 /* Try to over-allocate to reduce reallocations and fragmentation. */ 710 - ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb, 540 + ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, 711 541 size <<= 1, 4096, 0, end); 712 542 if (ret == 0) 713 543 return limit; 714 544 715 - for (; limit <= intel_fbc_max_limit(dev_priv); limit <<= 1) { 716 - ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb, 545 + for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) { 546 + ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, 717 547 size >>= 1, 4096, 0, end); 718 548 if (ret == 0) 719 549 return limit; ··· 722 552 return 0; 723 553 } 724 554 725 - static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, 555 + static int intel_fbc_alloc_cfb(struct intel_fbc *fbc, 726 556 unsigned int size, int min_limit) 727 557 { 728 - struct intel_fbc *fbc = &dev_priv->fbc; 558 + struct drm_i915_private *i915 = fbc->i915; 729 559 int ret; 730 560 731 - drm_WARN_ON(&dev_priv->drm, 561 + drm_WARN_ON(&i915->drm, 732 562 drm_mm_node_allocated(&fbc->compressed_fb)); 733 - drm_WARN_ON(&dev_priv->drm, 563 + drm_WARN_ON(&i915->drm, 734 564 drm_mm_node_allocated(&fbc->compressed_llb)); 735 565 736 - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) { 737 - ret = i915_gem_stolen_insert_node(dev_priv, &fbc->compressed_llb, 566 + if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) { 567 + ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb, 738 568 4096, 4096); 739 569 if (ret) 740 570 goto err; 741 571 } 742 572 743 - ret = find_compression_limit(dev_priv, size, min_limit); 573 + ret = find_compression_limit(fbc, size, min_limit); 744 574 if (!ret) 745 575 goto err_llb; 746 576 else if (ret > min_limit) 747 - drm_info_once(&dev_priv->drm, 577 + drm_info_once(&i915->drm, 748 578 "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); 749 579 750 580 fbc->limit = ret; 751 581 752 - drm_dbg_kms(&dev_priv->drm, 582 + drm_dbg_kms(&i915->drm, 753 583 "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n", 754 584 fbc->compressed_fb.size, fbc->limit); 755 585 ··· 757 587 758 588 err_llb: 759 589 if (drm_mm_node_allocated(&fbc->compressed_llb)) 760 - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb); 590 + i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); 761 591 err: 762 - if (drm_mm_initialized(&dev_priv->mm.stolen)) 763 - drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 592 + if (drm_mm_initialized(&i915->mm.stolen)) 593 + drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); 764 594 return -ENOSPC; 765 595 } 766 596 767 - static void intel_fbc_program_cfb(struct drm_i915_private *dev_priv) 597 + static void intel_fbc_program_cfb(struct intel_fbc *fbc) 768 598 { 769 - struct intel_fbc *fbc = &dev_priv->fbc; 770 - 771 - if (DISPLAY_VER(dev_priv) >= 5) { 772 - intel_de_write(dev_priv, ILK_DPFC_CB_BASE, 773 - fbc->compressed_fb.start); 774 - } else if (IS_GM45(dev_priv)) { 775 - intel_de_write(dev_priv, DPFC_CB_BASE, 776 - fbc->compressed_fb.start); 777 - } else { 778 - GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, 779 - fbc->compressed_fb.start, 780 - U32_MAX)); 781 - GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, 782 - fbc->compressed_llb.start, 783 - U32_MAX)); 784 - 785 - intel_de_write(dev_priv, FBC_CFB_BASE, 786 - dev_priv->dsm.start + fbc->compressed_fb.start); 787 - intel_de_write(dev_priv, FBC_LL_BASE, 788 - dev_priv->dsm.start + fbc->compressed_llb.start); 789 - } 599 + fbc->funcs->program_cfb(fbc); 790 600 } 791 601 792 - static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 602 + static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) 793 603 { 794 - struct intel_fbc *fbc = &dev_priv->fbc; 604 + struct drm_i915_private *i915 = fbc->i915; 795 605 796 - if (WARN_ON(intel_fbc_hw_is_active(dev_priv))) 606 + if (WARN_ON(intel_fbc_hw_is_active(fbc))) 797 607 return; 798 608 799 609 if (drm_mm_node_allocated(&fbc->compressed_llb)) 800 - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb); 610 + i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); 801 611 if (drm_mm_node_allocated(&fbc->compressed_fb)) 802 - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); 612 + i915_gem_stolen_remove_node(i915, &fbc->compressed_fb); 803 613 } 804 614 805 - void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 615 + void intel_fbc_cleanup(struct drm_i915_private *i915) 806 616 { 807 - struct intel_fbc *fbc = &dev_priv->fbc; 617 + struct intel_fbc *fbc = &i915->fbc; 808 618 809 - if (!HAS_FBC(dev_priv)) 619 + if (!HAS_FBC(i915)) 810 620 return; 811 621 812 622 mutex_lock(&fbc->lock); 813 - __intel_fbc_cleanup_cfb(dev_priv); 623 + __intel_fbc_cleanup_cfb(fbc); 814 624 mutex_unlock(&fbc->lock); 815 625 } 816 626 817 - static bool stride_is_valid(struct drm_i915_private *dev_priv, 627 + static bool stride_is_valid(struct drm_i915_private *i915, 818 628 u64 modifier, unsigned int stride) 819 629 { 820 630 /* This should have been caught earlier. */ 821 - if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0)) 631 + if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0)) 822 632 return false; 823 633 824 634 /* Below are the additional FBC restrictions. */ 825 635 if (stride < 512) 826 636 return false; 827 637 828 - if (DISPLAY_VER(dev_priv) == 2 || DISPLAY_VER(dev_priv) == 3) 638 + if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3) 829 639 return stride == 4096 || stride == 8192; 830 640 831 - if (DISPLAY_VER(dev_priv) == 4 && !IS_G4X(dev_priv) && stride < 2048) 641 + if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048) 832 642 return false; 833 643 834 644 /* Display WA #1105: skl,bxt,kbl,cfl,glk */ 835 - if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) && 645 + if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) && 836 646 modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) 837 647 return false; 838 648 ··· 822 672 return true; 823 673 } 824 674 825 - static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, 675 + static bool pixel_format_is_valid(struct drm_i915_private *i915, 826 676 u32 pixel_format) 827 677 { 828 678 switch (pixel_format) { ··· 832 682 case DRM_FORMAT_XRGB1555: 833 683 case DRM_FORMAT_RGB565: 834 684 /* 16bpp not supported on gen2 */ 835 - if (DISPLAY_VER(dev_priv) == 2) 685 + if (DISPLAY_VER(i915) == 2) 836 686 return false; 837 687 /* WaFbcOnly1to1Ratio:ctg */ 838 - if (IS_G4X(dev_priv)) 688 + if (IS_G4X(i915)) 839 689 return false; 840 690 return true; 841 691 default: ··· 843 693 } 844 694 } 845 695 846 - static bool rotation_is_valid(struct drm_i915_private *dev_priv, 696 + static bool rotation_is_valid(struct drm_i915_private *i915, 847 697 u32 pixel_format, unsigned int rotation) 848 698 { 849 - if (DISPLAY_VER(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 && 699 + if (DISPLAY_VER(i915) >= 9 && pixel_format == DRM_FORMAT_RGB565 && 850 700 drm_rotation_90_or_270(rotation)) 851 701 return false; 852 - else if (DISPLAY_VER(dev_priv) <= 4 && !IS_G4X(dev_priv) && 702 + else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) && 853 703 rotation != DRM_MODE_ROTATE_0) 854 704 return false; 855 705 ··· 862 712 * the X and Y offset registers. That's why we include the src x/y offsets 863 713 * instead of just looking at the plane size. 864 714 */ 865 - static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) 715 + static bool intel_fbc_hw_tracking_covers_screen(struct intel_fbc *fbc, 716 + struct intel_crtc *crtc) 866 717 { 867 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 868 - struct intel_fbc *fbc = &dev_priv->fbc; 718 + struct drm_i915_private *i915 = fbc->i915; 869 719 unsigned int effective_w, effective_h, max_w, max_h; 870 720 871 - if (DISPLAY_VER(dev_priv) >= 10) { 721 + if (DISPLAY_VER(i915) >= 10) { 872 722 max_w = 5120; 873 723 max_h = 4096; 874 - } else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { 724 + } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) { 875 725 max_w = 4096; 876 726 max_h = 4096; 877 - } else if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) { 727 + } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) { 878 728 max_w = 4096; 879 729 max_h = 2048; 880 730 } else { ··· 890 740 return effective_w <= max_w && effective_h <= max_h; 891 741 } 892 742 893 - static bool tiling_is_valid(struct drm_i915_private *dev_priv, 743 + static bool tiling_is_valid(struct drm_i915_private *i915, 894 744 u64 modifier) 895 745 { 896 746 switch (modifier) { 897 747 case DRM_FORMAT_MOD_LINEAR: 898 748 case I915_FORMAT_MOD_Y_TILED: 899 749 case I915_FORMAT_MOD_Yf_TILED: 900 - return DISPLAY_VER(dev_priv) >= 9; 750 + return DISPLAY_VER(i915) >= 9; 901 751 case I915_FORMAT_MOD_X_TILED: 902 752 return true; 903 753 default: ··· 909 759 const struct intel_crtc_state *crtc_state, 910 760 const struct intel_plane_state *plane_state) 911 761 { 912 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 913 - struct intel_fbc *fbc = &dev_priv->fbc; 762 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 763 + struct intel_fbc *fbc = &i915->fbc; 914 764 struct intel_fbc_state_cache *cache = &fbc->state_cache; 915 765 struct drm_framebuffer *fb = plane_state->hw.fb; 916 766 ··· 919 769 return; 920 770 921 771 cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags; 922 - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 772 + if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 923 773 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; 924 774 925 775 cache->plane.rotation = plane_state->hw.rotation; ··· 944 794 945 795 cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); 946 796 947 - drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && 797 + drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE && 948 798 !plane_state->ggtt_vma->fence); 949 799 950 800 if (plane_state->flags & PLANE_HAS_FENCE && ··· 956 806 cache->psr2_active = crtc_state->has_psr2; 957 807 } 958 808 959 - static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) 809 + static bool intel_fbc_cfb_size_changed(struct intel_fbc *fbc) 960 810 { 961 - struct intel_fbc *fbc = &dev_priv->fbc; 962 - 963 - return intel_fbc_cfb_size(dev_priv, &fbc->state_cache) > 811 + return intel_fbc_cfb_size(fbc, &fbc->state_cache) > 964 812 fbc->compressed_fb.size * fbc->limit; 965 813 } 966 814 967 - static u16 intel_fbc_override_cfb_stride(struct drm_i915_private *dev_priv, 815 + static u16 intel_fbc_override_cfb_stride(struct intel_fbc *fbc, 968 816 const struct intel_fbc_state_cache *cache) 969 817 { 970 818 unsigned int stride = _intel_fbc_cfb_stride(cache); 971 - unsigned int stride_aligned = intel_fbc_cfb_stride(dev_priv, cache); 819 + unsigned int stride_aligned = intel_fbc_cfb_stride(fbc, cache); 972 820 973 821 /* 974 822 * Override stride in 64 byte units per 4 line segment. ··· 976 828 * we always need to use the override there. 977 829 */ 978 830 if (stride != stride_aligned || 979 - (DISPLAY_VER(dev_priv) == 9 && 831 + (DISPLAY_VER(fbc->i915) == 9 && 980 832 cache->fb.modifier == DRM_FORMAT_MOD_LINEAR)) 981 833 return stride_aligned * 4 / 64; 982 834 983 835 return 0; 984 836 } 985 837 986 - static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) 838 + static bool intel_fbc_can_enable(struct intel_fbc *fbc) 987 839 { 988 - struct intel_fbc *fbc = &dev_priv->fbc; 840 + struct drm_i915_private *i915 = fbc->i915; 989 841 990 - if (intel_vgpu_active(dev_priv)) { 842 + if (intel_vgpu_active(i915)) { 991 843 fbc->no_fbc_reason = "VGPU is active"; 992 844 return false; 993 845 } 994 846 995 - if (!dev_priv->params.enable_fbc) { 847 + if (!i915->params.enable_fbc) { 996 848 fbc->no_fbc_reason = "disabled per module param or by default"; 997 849 return false; 998 850 } ··· 1007 859 1008 860 static bool intel_fbc_can_activate(struct intel_crtc *crtc) 1009 861 { 1010 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1011 - struct intel_fbc *fbc = &dev_priv->fbc; 862 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 863 + struct intel_fbc *fbc = &i915->fbc; 1012 864 struct intel_fbc_state_cache *cache = &fbc->state_cache; 1013 865 1014 - if (!intel_fbc_can_enable(dev_priv)) 866 + if (!intel_fbc_can_enable(fbc)) 1015 867 return false; 1016 868 1017 869 if (!cache->plane.visible) { ··· 1032 884 return false; 1033 885 } 1034 886 1035 - if (!intel_fbc_hw_tracking_covers_screen(crtc)) { 887 + if (!intel_fbc_hw_tracking_covers_screen(fbc, crtc)) { 1036 888 fbc->no_fbc_reason = "mode too large for compression"; 1037 889 return false; 1038 890 } ··· 1054 906 * For now this will effectively disable FBC with 90/270 degree 1055 907 * rotation. 1056 908 */ 1057 - if (DISPLAY_VER(dev_priv) < 9 && cache->fence_id < 0) { 909 + if (DISPLAY_VER(i915) < 9 && cache->fence_id < 0) { 1058 910 fbc->no_fbc_reason = "framebuffer not tiled or fenced"; 1059 911 return false; 1060 912 } 1061 913 1062 - if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { 914 + if (!pixel_format_is_valid(i915, cache->fb.format->format)) { 1063 915 fbc->no_fbc_reason = "pixel format is invalid"; 1064 916 return false; 1065 917 } 1066 918 1067 - if (!rotation_is_valid(dev_priv, cache->fb.format->format, 919 + if (!rotation_is_valid(i915, cache->fb.format->format, 1068 920 cache->plane.rotation)) { 1069 921 fbc->no_fbc_reason = "rotation unsupported"; 1070 922 return false; 1071 923 } 1072 924 1073 - if (!tiling_is_valid(dev_priv, cache->fb.modifier)) { 925 + if (!tiling_is_valid(i915, cache->fb.modifier)) { 1074 926 fbc->no_fbc_reason = "tiling unsupported"; 1075 927 return false; 1076 928 } 1077 929 1078 - if (!stride_is_valid(dev_priv, cache->fb.modifier, 930 + if (!stride_is_valid(i915, cache->fb.modifier, 1079 931 cache->fb.stride * cache->fb.format->cpp[0])) { 1080 932 fbc->no_fbc_reason = "framebuffer stride not supported"; 1081 933 return false; ··· 1088 940 } 1089 941 1090 942 /* WaFbcExceedCdClockThreshold:hsw,bdw */ 1091 - if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && 1092 - cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { 943 + if ((IS_HASWELL(i915) || IS_BROADWELL(i915)) && 944 + cache->crtc.hsw_bdw_pixel_rate >= i915->cdclk.hw.cdclk * 95 / 100) { 1093 945 fbc->no_fbc_reason = "pixel rate is too big"; 1094 946 return false; 1095 947 } ··· 1104 956 * we didn't get any invalidate/deactivate calls, but this would require 1105 957 * a lot of tracking just for a specific case. If we conclude it's an 1106 958 * important case, we can implement it later. */ 1107 - if (intel_fbc_cfb_size_changed(dev_priv)) { 959 + if (intel_fbc_cfb_size_changed(fbc)) { 1108 960 fbc->no_fbc_reason = "CFB requirements changed"; 1109 961 return false; 1110 962 } ··· 1114 966 * having a Y offset that isn't divisible by 4 causes FIFO underrun 1115 967 * and screen flicker. 1116 968 */ 1117 - if (DISPLAY_VER(dev_priv) >= 9 && 969 + if (DISPLAY_VER(i915) >= 9 && 1118 970 (fbc->state_cache.plane.adjusted_y & 3)) { 1119 971 fbc->no_fbc_reason = "plane Y offset is misaligned"; 1120 972 return false; 1121 973 } 1122 974 1123 975 /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ 1124 - if (DISPLAY_VER(dev_priv) >= 11 && 976 + if (DISPLAY_VER(i915) >= 11 && 1125 977 (cache->plane.src_h + cache->plane.adjusted_y) % 4) { 1126 978 fbc->no_fbc_reason = "plane height + offset is non-modulo of 4"; 1127 979 return false; ··· 1132 984 * Recommendation is to keep this combination disabled 1133 985 * Bspec: 50422 HSD: 14010260002 1134 986 */ 1135 - if (fbc->state_cache.psr2_active && DISPLAY_VER(dev_priv) >= 12) { 987 + if (fbc->state_cache.psr2_active && DISPLAY_VER(i915) >= 12) { 1136 988 fbc->no_fbc_reason = "not supported with PSR2"; 1137 989 return false; 1138 990 } ··· 1140 992 return true; 1141 993 } 1142 994 1143 - static void intel_fbc_get_reg_params(struct intel_crtc *crtc, 1144 - struct intel_fbc_reg_params *params) 995 + static void intel_fbc_get_reg_params(struct intel_fbc *fbc, 996 + struct intel_crtc *crtc) 1145 997 { 1146 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1147 - struct intel_fbc *fbc = &dev_priv->fbc; 1148 - struct intel_fbc_state_cache *cache = &fbc->state_cache; 998 + const struct intel_fbc_state_cache *cache = &fbc->state_cache; 999 + struct intel_fbc_reg_params *params = &fbc->params; 1149 1000 1150 1001 /* Since all our fields are integer types, use memset here so the 1151 1002 * comparison function can rely on memcmp because the padding will be ··· 1163 1016 params->fb.modifier = cache->fb.modifier; 1164 1017 params->fb.stride = cache->fb.stride; 1165 1018 1166 - params->cfb_stride = intel_fbc_cfb_stride(dev_priv, cache); 1167 - params->cfb_size = intel_fbc_cfb_size(dev_priv, cache); 1168 - params->override_cfb_stride = intel_fbc_override_cfb_stride(dev_priv, cache); 1019 + params->cfb_stride = intel_fbc_cfb_stride(fbc, cache); 1020 + params->cfb_size = intel_fbc_cfb_size(fbc, cache); 1021 + params->override_cfb_stride = intel_fbc_override_cfb_stride(fbc, cache); 1169 1022 1170 1023 params->plane_visible = cache->plane.visible; 1171 1024 } ··· 1173 1026 static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) 1174 1027 { 1175 1028 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1176 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1177 - const struct intel_fbc *fbc = &dev_priv->fbc; 1029 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1030 + struct intel_fbc *fbc = &i915->fbc; 1178 1031 const struct intel_fbc_state_cache *cache = &fbc->state_cache; 1179 1032 const struct intel_fbc_reg_params *params = &fbc->params; 1180 1033 ··· 1196 1049 if (params->fb.stride != cache->fb.stride) 1197 1050 return false; 1198 1051 1199 - if (params->cfb_stride != intel_fbc_cfb_stride(dev_priv, cache)) 1052 + if (params->cfb_stride != intel_fbc_cfb_stride(fbc, cache)) 1200 1053 return false; 1201 1054 1202 - if (params->cfb_size != intel_fbc_cfb_size(dev_priv, cache)) 1055 + if (params->cfb_size != intel_fbc_cfb_size(fbc, cache)) 1203 1056 return false; 1204 1057 1205 - if (params->override_cfb_stride != intel_fbc_override_cfb_stride(dev_priv, cache)) 1058 + if (params->override_cfb_stride != intel_fbc_override_cfb_stride(fbc, cache)) 1206 1059 return false; 1207 1060 1208 1061 return true; ··· 1216 1069 intel_atomic_get_new_crtc_state(state, crtc); 1217 1070 const struct intel_plane_state *plane_state = 1218 1071 intel_atomic_get_new_plane_state(state, plane); 1219 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1220 - struct intel_fbc *fbc = &dev_priv->fbc; 1072 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1073 + struct intel_fbc *fbc = plane->fbc; 1221 1074 const char *reason = "update pending"; 1222 1075 bool need_vblank_wait = false; 1223 1076 1224 - if (!plane->has_fbc || !plane_state) 1077 + if (!fbc || !plane_state) 1225 1078 return need_vblank_wait; 1226 1079 1227 1080 mutex_lock(&fbc->lock); ··· 1233 1086 fbc->flip_pending = true; 1234 1087 1235 1088 if (!intel_fbc_can_flip_nuke(crtc_state)) { 1236 - intel_fbc_deactivate(dev_priv, reason); 1089 + intel_fbc_deactivate(fbc, reason); 1237 1090 1238 1091 /* 1239 1092 * Display WA #1198: glk+ ··· 1249 1102 * if at least one frame has already passed. 1250 1103 */ 1251 1104 if (fbc->activated && 1252 - DISPLAY_VER(dev_priv) >= 10) 1105 + DISPLAY_VER(i915) >= 10) 1253 1106 need_vblank_wait = true; 1254 1107 fbc->activated = false; 1255 1108 } ··· 1259 1112 return need_vblank_wait; 1260 1113 } 1261 1114 1262 - /** 1263 - * __intel_fbc_disable - disable FBC 1264 - * @dev_priv: i915 device instance 1265 - * 1266 - * This is the low level function that actually disables FBC. Callers should 1267 - * grab the FBC lock. 1268 - */ 1269 - static void __intel_fbc_disable(struct drm_i915_private *dev_priv) 1115 + static void __intel_fbc_disable(struct intel_fbc *fbc) 1270 1116 { 1271 - struct intel_fbc *fbc = &dev_priv->fbc; 1117 + struct drm_i915_private *i915 = fbc->i915; 1272 1118 struct intel_crtc *crtc = fbc->crtc; 1273 1119 1274 - drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); 1275 - drm_WARN_ON(&dev_priv->drm, !fbc->crtc); 1276 - drm_WARN_ON(&dev_priv->drm, fbc->active); 1120 + drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); 1121 + drm_WARN_ON(&i915->drm, !fbc->crtc); 1122 + drm_WARN_ON(&i915->drm, fbc->active); 1277 1123 1278 - drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n", 1124 + drm_dbg_kms(&i915->drm, "Disabling FBC on pipe %c\n", 1279 1125 pipe_name(crtc->pipe)); 1280 1126 1281 - __intel_fbc_cleanup_cfb(dev_priv); 1127 + __intel_fbc_cleanup_cfb(fbc); 1282 1128 1283 1129 fbc->crtc = NULL; 1284 1130 } 1285 1131 1286 1132 static void __intel_fbc_post_update(struct intel_crtc *crtc) 1287 1133 { 1288 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1289 - struct intel_fbc *fbc = &dev_priv->fbc; 1134 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1135 + struct intel_fbc *fbc = &i915->fbc; 1290 1136 1291 - drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); 1137 + drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); 1292 1138 1293 1139 if (fbc->crtc != crtc) 1294 1140 return; 1295 1141 1296 1142 fbc->flip_pending = false; 1297 1143 1298 - if (!dev_priv->params.enable_fbc) { 1299 - intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); 1300 - __intel_fbc_disable(dev_priv); 1144 + if (!i915->params.enable_fbc) { 1145 + intel_fbc_deactivate(fbc, "disabled at runtime per module param"); 1146 + __intel_fbc_disable(fbc); 1301 1147 1302 1148 return; 1303 1149 } 1304 1150 1305 - intel_fbc_get_reg_params(crtc, &fbc->params); 1151 + intel_fbc_get_reg_params(fbc, crtc); 1306 1152 1307 1153 if (!intel_fbc_can_activate(crtc)) 1308 1154 return; 1309 1155 1310 1156 if (!fbc->busy_bits) 1311 - intel_fbc_activate(dev_priv); 1157 + intel_fbc_activate(fbc); 1312 1158 else 1313 - intel_fbc_deactivate(dev_priv, "frontbuffer write"); 1159 + intel_fbc_deactivate(fbc, "frontbuffer write"); 1314 1160 } 1315 1161 1316 1162 void intel_fbc_post_update(struct intel_atomic_state *state, 1317 1163 struct intel_crtc *crtc) 1318 1164 { 1319 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1320 1165 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 1321 1166 const struct intel_plane_state *plane_state = 1322 1167 intel_atomic_get_new_plane_state(state, plane); 1323 - struct intel_fbc *fbc = &dev_priv->fbc; 1168 + struct intel_fbc *fbc = plane->fbc; 1324 1169 1325 - if (!plane->has_fbc || !plane_state) 1170 + if (!fbc || !plane_state) 1326 1171 return; 1327 1172 1328 1173 mutex_lock(&fbc->lock); ··· 1330 1191 return fbc->possible_framebuffer_bits; 1331 1192 } 1332 1193 1333 - void intel_fbc_invalidate(struct drm_i915_private *dev_priv, 1194 + void intel_fbc_invalidate(struct drm_i915_private *i915, 1334 1195 unsigned int frontbuffer_bits, 1335 1196 enum fb_op_origin origin) 1336 1197 { 1337 - struct intel_fbc *fbc = &dev_priv->fbc; 1198 + struct intel_fbc *fbc = &i915->fbc; 1338 1199 1339 - if (!HAS_FBC(dev_priv)) 1200 + if (!HAS_FBC(i915)) 1340 1201 return; 1341 1202 1342 1203 if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) ··· 1347 1208 fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; 1348 1209 1349 1210 if (fbc->crtc && fbc->busy_bits) 1350 - intel_fbc_deactivate(dev_priv, "frontbuffer write"); 1211 + intel_fbc_deactivate(fbc, "frontbuffer write"); 1351 1212 1352 1213 mutex_unlock(&fbc->lock); 1353 1214 } 1354 1215 1355 - void intel_fbc_flush(struct drm_i915_private *dev_priv, 1216 + void intel_fbc_flush(struct drm_i915_private *i915, 1356 1217 unsigned int frontbuffer_bits, enum fb_op_origin origin) 1357 1218 { 1358 - struct intel_fbc *fbc = &dev_priv->fbc; 1219 + struct intel_fbc *fbc = &i915->fbc; 1359 1220 1360 - if (!HAS_FBC(dev_priv)) 1221 + if (!HAS_FBC(i915)) 1361 1222 return; 1362 1223 1363 1224 mutex_lock(&fbc->lock); ··· 1370 1231 if (!fbc->busy_bits && fbc->crtc && 1371 1232 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { 1372 1233 if (fbc->active) 1373 - intel_fbc_recompress(dev_priv); 1234 + intel_fbc_nuke(fbc); 1374 1235 else if (!fbc->flip_pending) 1375 1236 __intel_fbc_post_update(fbc->crtc); 1376 1237 } ··· 1381 1242 1382 1243 /** 1383 1244 * intel_fbc_choose_crtc - select a CRTC to enable FBC on 1384 - * @dev_priv: i915 device instance 1245 + * @i915: i915 device instance 1385 1246 * @state: the atomic state structure 1386 1247 * 1387 1248 * This function looks at the proposed state for CRTCs and planes, then chooses ··· 1389 1250 * true. 1390 1251 * 1391 1252 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe 1392 - * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. 1253 + * enable FBC for the chosen CRTC. If it does, it will set i915->fbc.crtc. 1393 1254 */ 1394 - void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, 1255 + void intel_fbc_choose_crtc(struct drm_i915_private *i915, 1395 1256 struct intel_atomic_state *state) 1396 1257 { 1397 - struct intel_fbc *fbc = &dev_priv->fbc; 1258 + struct intel_fbc *fbc = &i915->fbc; 1398 1259 struct intel_plane *plane; 1399 1260 struct intel_plane_state *plane_state; 1400 1261 bool crtc_chosen = false; ··· 1407 1268 !intel_atomic_get_new_crtc_state(state, fbc->crtc)) 1408 1269 goto out; 1409 1270 1410 - if (!intel_fbc_can_enable(dev_priv)) 1271 + if (!intel_fbc_can_enable(fbc)) 1411 1272 goto out; 1412 1273 1413 1274 /* Simply choose the first CRTC that is compatible and has a visible ··· 1418 1279 struct intel_crtc_state *crtc_state; 1419 1280 struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); 1420 1281 1421 - if (!plane->has_fbc) 1282 + if (plane->fbc != fbc) 1422 1283 continue; 1423 1284 1424 1285 if (!plane_state->uapi.visible) ··· 1451 1312 static void intel_fbc_enable(struct intel_atomic_state *state, 1452 1313 struct intel_crtc *crtc) 1453 1314 { 1454 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1315 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1455 1316 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 1456 1317 const struct intel_crtc_state *crtc_state = 1457 1318 intel_atomic_get_new_crtc_state(state, crtc); 1458 1319 const struct intel_plane_state *plane_state = 1459 1320 intel_atomic_get_new_plane_state(state, plane); 1460 - struct intel_fbc *fbc = &dev_priv->fbc; 1461 - struct intel_fbc_state_cache *cache = &fbc->state_cache; 1321 + struct intel_fbc *fbc = plane->fbc; 1322 + struct intel_fbc_state_cache *cache; 1462 1323 int min_limit; 1463 1324 1464 - if (!plane->has_fbc || !plane_state) 1325 + if (!fbc || !plane_state) 1465 1326 return; 1327 + 1328 + cache = &fbc->state_cache; 1466 1329 1467 1330 min_limit = intel_fbc_min_limit(plane_state->hw.fb ? 1468 1331 plane_state->hw.fb->format->cpp[0] : 0); ··· 1476 1335 goto out; 1477 1336 1478 1337 if (fbc->limit >= min_limit && 1479 - !intel_fbc_cfb_size_changed(dev_priv)) 1338 + !intel_fbc_cfb_size_changed(fbc)) 1480 1339 goto out; 1481 1340 1482 - __intel_fbc_disable(dev_priv); 1341 + __intel_fbc_disable(fbc); 1483 1342 } 1484 1343 1485 - drm_WARN_ON(&dev_priv->drm, fbc->active); 1344 + drm_WARN_ON(&i915->drm, fbc->active); 1486 1345 1487 1346 intel_fbc_update_state_cache(crtc, crtc_state, plane_state); 1488 1347 ··· 1490 1349 if (!cache->plane.visible) 1491 1350 goto out; 1492 1351 1493 - if (intel_fbc_alloc_cfb(dev_priv, 1494 - intel_fbc_cfb_size(dev_priv, cache), min_limit)) { 1352 + if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(fbc, cache), min_limit)) { 1495 1353 cache->plane.visible = false; 1496 1354 fbc->no_fbc_reason = "not enough stolen memory"; 1497 1355 goto out; 1498 1356 } 1499 1357 1500 - drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", 1358 + drm_dbg_kms(&i915->drm, "Enabling FBC on pipe %c\n", 1501 1359 pipe_name(crtc->pipe)); 1502 1360 fbc->no_fbc_reason = "FBC enabled but not active yet\n"; 1503 1361 1504 1362 fbc->crtc = crtc; 1505 1363 1506 - intel_fbc_program_cfb(dev_priv); 1364 + intel_fbc_program_cfb(fbc); 1507 1365 out: 1508 1366 mutex_unlock(&fbc->lock); 1509 1367 } ··· 1515 1375 */ 1516 1376 void intel_fbc_disable(struct intel_crtc *crtc) 1517 1377 { 1518 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1519 1378 struct intel_plane *plane = to_intel_plane(crtc->base.primary); 1520 - struct intel_fbc *fbc = &dev_priv->fbc; 1379 + struct intel_fbc *fbc = plane->fbc; 1521 1380 1522 - if (!plane->has_fbc) 1381 + if (!fbc) 1523 1382 return; 1524 1383 1525 1384 mutex_lock(&fbc->lock); 1526 1385 if (fbc->crtc == crtc) 1527 - __intel_fbc_disable(dev_priv); 1386 + __intel_fbc_disable(fbc); 1528 1387 mutex_unlock(&fbc->lock); 1529 1388 } 1530 1389 ··· 1551 1412 1552 1413 /** 1553 1414 * intel_fbc_global_disable - globally disable FBC 1554 - * @dev_priv: i915 device instance 1415 + * @i915: i915 device instance 1555 1416 * 1556 1417 * This function disables FBC regardless of which CRTC is associated with it. 1557 1418 */ 1558 - void intel_fbc_global_disable(struct drm_i915_private *dev_priv) 1419 + void intel_fbc_global_disable(struct drm_i915_private *i915) 1559 1420 { 1560 - struct intel_fbc *fbc = &dev_priv->fbc; 1421 + struct intel_fbc *fbc = &i915->fbc; 1561 1422 1562 - if (!HAS_FBC(dev_priv)) 1423 + if (!HAS_FBC(i915)) 1563 1424 return; 1564 1425 1565 1426 mutex_lock(&fbc->lock); 1566 1427 if (fbc->crtc) { 1567 - drm_WARN_ON(&dev_priv->drm, fbc->crtc->active); 1568 - __intel_fbc_disable(dev_priv); 1428 + drm_WARN_ON(&i915->drm, fbc->crtc->active); 1429 + __intel_fbc_disable(fbc); 1569 1430 } 1570 1431 mutex_unlock(&fbc->lock); 1571 1432 } 1572 1433 1573 1434 static void intel_fbc_underrun_work_fn(struct work_struct *work) 1574 1435 { 1575 - struct drm_i915_private *dev_priv = 1436 + struct drm_i915_private *i915 = 1576 1437 container_of(work, struct drm_i915_private, fbc.underrun_work); 1577 - struct intel_fbc *fbc = &dev_priv->fbc; 1438 + struct intel_fbc *fbc = &i915->fbc; 1578 1439 1579 1440 mutex_lock(&fbc->lock); 1580 1441 ··· 1582 1443 if (fbc->underrun_detected || !fbc->crtc) 1583 1444 goto out; 1584 1445 1585 - drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n"); 1446 + drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n"); 1586 1447 fbc->underrun_detected = true; 1587 1448 1588 - intel_fbc_deactivate(dev_priv, "FIFO underrun"); 1449 + intel_fbc_deactivate(fbc, "FIFO underrun"); 1589 1450 out: 1590 1451 mutex_unlock(&fbc->lock); 1591 1452 } 1592 1453 1593 1454 /* 1594 1455 * intel_fbc_reset_underrun - reset FBC fifo underrun status. 1595 - * @dev_priv: i915 device instance 1456 + * @fbc: The FBC instance 1596 1457 * 1597 1458 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we 1598 1459 * want to re-enable FBC after an underrun to increase test coverage. 1599 1460 */ 1600 - int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) 1461 + int intel_fbc_reset_underrun(struct intel_fbc *fbc) 1601 1462 { 1463 + struct drm_i915_private *i915 = fbc->i915; 1602 1464 int ret; 1603 1465 1604 - cancel_work_sync(&dev_priv->fbc.underrun_work); 1466 + cancel_work_sync(&fbc->underrun_work); 1605 1467 1606 - ret = mutex_lock_interruptible(&dev_priv->fbc.lock); 1468 + ret = mutex_lock_interruptible(&fbc->lock); 1607 1469 if (ret) 1608 1470 return ret; 1609 1471 1610 - if (dev_priv->fbc.underrun_detected) { 1611 - drm_dbg_kms(&dev_priv->drm, 1472 + if (fbc->underrun_detected) { 1473 + drm_dbg_kms(&i915->drm, 1612 1474 "Re-allowing FBC after fifo underrun\n"); 1613 - dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared"; 1475 + fbc->no_fbc_reason = "FIFO underrun cleared"; 1614 1476 } 1615 1477 1616 - dev_priv->fbc.underrun_detected = false; 1617 - mutex_unlock(&dev_priv->fbc.lock); 1478 + fbc->underrun_detected = false; 1479 + mutex_unlock(&fbc->lock); 1618 1480 1619 1481 return 0; 1620 1482 } 1621 1483 1622 1484 /** 1623 1485 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun 1624 - * @dev_priv: i915 device instance 1486 + * @fbc: The FBC instance 1625 1487 * 1626 1488 * Without FBC, most underruns are harmless and don't really cause too many 1627 1489 * problems, except for an annoying message on dmesg. With FBC, underruns can ··· 1634 1494 * 1635 1495 * This function is called from the IRQ handler. 1636 1496 */ 1637 - void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) 1497 + void intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) 1638 1498 { 1639 - struct intel_fbc *fbc = &dev_priv->fbc; 1640 - 1641 - if (!HAS_FBC(dev_priv)) 1499 + if (!HAS_FBC(fbc->i915)) 1642 1500 return; 1643 1501 1644 1502 /* There's no guarantee that underrun_detected won't be set to true ··· 1660 1522 * space to change the value during runtime without sanitizing it again. IGT 1661 1523 * relies on being able to change i915.enable_fbc at runtime. 1662 1524 */ 1663 - static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) 1525 + static int intel_sanitize_fbc_option(struct drm_i915_private *i915) 1664 1526 { 1665 - if (dev_priv->params.enable_fbc >= 0) 1666 - return !!dev_priv->params.enable_fbc; 1527 + if (i915->params.enable_fbc >= 0) 1528 + return !!i915->params.enable_fbc; 1667 1529 1668 - if (!HAS_FBC(dev_priv)) 1530 + if (!HAS_FBC(i915)) 1669 1531 return 0; 1670 1532 1671 - if (IS_BROADWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 9) 1533 + if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9) 1672 1534 return 1; 1673 1535 1674 1536 return 0; 1675 1537 } 1676 1538 1677 - static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) 1539 + static bool need_fbc_vtd_wa(struct drm_i915_private *i915) 1678 1540 { 1679 1541 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ 1680 1542 if (intel_vtd_active() && 1681 - (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { 1682 - drm_info(&dev_priv->drm, 1543 + (IS_SKYLAKE(i915) || IS_BROXTON(i915))) { 1544 + drm_info(&i915->drm, 1683 1545 "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); 1684 1546 return true; 1685 1547 } ··· 1689 1551 1690 1552 /** 1691 1553 * intel_fbc_init - Initialize FBC 1692 - * @dev_priv: the i915 device 1554 + * @i915: the i915 device 1693 1555 * 1694 1556 * This function might be called during PM init process. 1695 1557 */ 1696 - void intel_fbc_init(struct drm_i915_private *dev_priv) 1558 + void intel_fbc_init(struct drm_i915_private *i915) 1697 1559 { 1698 - struct intel_fbc *fbc = &dev_priv->fbc; 1560 + struct intel_fbc *fbc = &i915->fbc; 1699 1561 1562 + fbc->i915 = i915; 1700 1563 INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); 1701 1564 mutex_init(&fbc->lock); 1702 1565 fbc->active = false; 1703 1566 1704 - if (!drm_mm_initialized(&dev_priv->mm.stolen)) 1705 - mkwrite_device_info(dev_priv)->display.has_fbc = false; 1567 + if (!drm_mm_initialized(&i915->mm.stolen)) 1568 + mkwrite_device_info(i915)->display.has_fbc = false; 1706 1569 1707 - if (need_fbc_vtd_wa(dev_priv)) 1708 - mkwrite_device_info(dev_priv)->display.has_fbc = false; 1570 + if (need_fbc_vtd_wa(i915)) 1571 + mkwrite_device_info(i915)->display.has_fbc = false; 1709 1572 1710 - dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv); 1711 - drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n", 1712 - dev_priv->params.enable_fbc); 1573 + i915->params.enable_fbc = intel_sanitize_fbc_option(i915); 1574 + drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", 1575 + i915->params.enable_fbc); 1713 1576 1714 - if (!HAS_FBC(dev_priv)) { 1577 + if (!HAS_FBC(i915)) { 1715 1578 fbc->no_fbc_reason = "unsupported by this chipset"; 1716 1579 return; 1717 1580 } 1718 1581 1582 + if (DISPLAY_VER(i915) >= 7) 1583 + fbc->funcs = &ivb_fbc_funcs; 1584 + else if (DISPLAY_VER(i915) == 6) 1585 + fbc->funcs = &snb_fbc_funcs; 1586 + else if (DISPLAY_VER(i915) == 5) 1587 + fbc->funcs = &ilk_fbc_funcs; 1588 + else if (IS_G4X(i915)) 1589 + fbc->funcs = &g4x_fbc_funcs; 1590 + else if (DISPLAY_VER(i915) == 4) 1591 + fbc->funcs = &i965_fbc_funcs; 1592 + else 1593 + fbc->funcs = &i8xx_fbc_funcs; 1594 + 1719 1595 /* We still don't have any sort of hardware state readout for FBC, so 1720 1596 * deactivate it in case the BIOS activated it to make sure software 1721 1597 * matches the hardware state. */ 1722 - if (intel_fbc_hw_is_active(dev_priv)) 1723 - intel_fbc_hw_deactivate(dev_priv); 1598 + if (intel_fbc_hw_is_active(fbc)) 1599 + intel_fbc_hw_deactivate(fbc); 1724 1600 }
+7 -4
drivers/gpu/drm/i915/display/intel_fbc.h
··· 14 14 struct intel_atomic_state; 15 15 struct intel_crtc; 16 16 struct intel_crtc_state; 17 + struct intel_fbc; 17 18 struct intel_plane_state; 18 19 19 20 void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, 20 21 struct intel_atomic_state *state); 21 - bool intel_fbc_is_active(struct drm_i915_private *dev_priv); 22 + bool intel_fbc_is_active(struct intel_fbc *fbc); 23 + bool intel_fbc_is_compressing(struct intel_fbc *fbc); 22 24 bool intel_fbc_pre_update(struct intel_atomic_state *state, 23 25 struct intel_crtc *crtc); 24 26 void intel_fbc_post_update(struct intel_atomic_state *state, 25 27 struct intel_crtc *crtc); 26 28 void intel_fbc_init(struct drm_i915_private *dev_priv); 29 + void intel_fbc_cleanup(struct drm_i915_private *dev_priv); 27 30 void intel_fbc_update(struct intel_atomic_state *state, 28 31 struct intel_crtc *crtc); 29 32 void intel_fbc_disable(struct intel_crtc *crtc); ··· 36 33 enum fb_op_origin origin); 37 34 void intel_fbc_flush(struct drm_i915_private *dev_priv, 38 35 unsigned int frontbuffer_bits, enum fb_op_origin origin); 39 - void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv); 40 - void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv); 41 - int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv); 36 + void intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc); 37 + int intel_fbc_reset_underrun(struct intel_fbc *fbc); 38 + int intel_fbc_set_false_color(struct intel_fbc *fbc, bool enable); 42 39 43 40 #endif /* __INTEL_FBC_H__ */
+37 -99
drivers/gpu/drm/i915/display/intel_fdi.c
··· 8 8 #include "intel_de.h" 9 9 #include "intel_display_types.h" 10 10 #include "intel_fdi.h" 11 - #include "intel_sbi.h" 12 11 13 12 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 14 13 enum pipe pipe, bool state) ··· 886 887 DP_TP_CTL_ENABLE); 887 888 } 888 889 890 + void hsw_fdi_disable(struct intel_encoder *encoder) 891 + { 892 + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 893 + u32 val; 894 + 895 + /* 896 + * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) 897 + * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, 898 + * step 13 is the correct place for it. Step 18 is where it was 899 + * originally before the BUN. 900 + */ 901 + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 902 + val &= ~FDI_RX_ENABLE; 903 + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); 904 + 905 + val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); 906 + val &= ~DDI_BUF_CTL_ENABLE; 907 + intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val); 908 + 909 + intel_wait_ddi_buf_idle(dev_priv, PORT_E); 910 + 911 + intel_ddi_disable_clock(encoder); 912 + 913 + val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); 914 + val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 915 + val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); 916 + intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val); 917 + 918 + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 919 + val &= ~FDI_PCDCLK; 920 + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); 921 + 922 + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 923 + val &= ~FDI_RX_PLL_ENABLE; 924 + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); 925 + } 926 + 889 927 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) 890 928 { 891 929 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); ··· 1040 1004 1041 1005 intel_de_posting_read(dev_priv, reg); 1042 1006 udelay(100); 1043 - } 1044 - 1045 - static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv) 1046 - { 1047 - u32 tmp; 1048 - 1049 - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 1050 - tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 1051 - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 1052 - 1053 - if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & 1054 - FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 1055 - drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); 1056 - 1057 - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 1058 - tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 1059 - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 1060 - 1061 - if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & 1062 - FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 1063 - drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); 1064 - } 1065 - 1066 - /* WaMPhyProgramming:hsw */ 1067 - void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv) 1068 - { 1069 - u32 tmp; 1070 - 1071 - lpt_fdi_reset_mphy(dev_priv); 1072 - 1073 - tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 1074 - tmp &= ~(0xFF << 24); 1075 - tmp |= (0x12 << 24); 1076 - intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 1077 - 1078 - tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 1079 - tmp |= (1 << 11); 1080 - intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 1081 - 1082 - tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 1083 - tmp |= (1 << 11); 1084 - intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 1085 - 1086 - tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 1087 - tmp |= (1 << 24) | (1 << 21) | (1 << 18); 1088 - intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 1089 - 1090 - tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 1091 - tmp |= (1 << 24) | (1 << 21) | (1 << 18); 1092 - intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 1093 - 1094 - tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 1095 - tmp &= ~(7 << 13); 1096 - tmp |= (5 << 13); 1097 - intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 1098 - 1099 - tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 1100 - tmp &= ~(7 << 13); 1101 - tmp |= (5 << 13); 1102 - intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 1103 - 1104 - tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 1105 - tmp &= ~0xFF; 1106 - tmp |= 0x1C; 1107 - intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 1108 - 1109 - tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 1110 - tmp &= ~0xFF; 1111 - tmp |= 0x1C; 1112 - intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 1113 - 1114 - tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 1115 - tmp &= ~(0xFF << 16); 1116 - tmp |= (0x1C << 16); 1117 - intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 1118 - 1119 - tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 1120 - tmp &= ~(0xFF << 16); 1121 - tmp |= (0x1C << 16); 1122 - intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 1123 - 1124 - tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 1125 - tmp |= (1 << 27); 1126 - intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 1127 - 1128 - tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 1129 - tmp |= (1 << 27); 1130 - intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 1131 - 1132 - tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 1133 - tmp &= ~(0xF << 28); 1134 - tmp |= (4 << 28); 1135 - intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 1136 - 1137 - tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 1138 - tmp &= ~(0xF << 28); 1139 - tmp |= (4 << 28); 1140 - intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 1141 1007 } 1142 1008 1143 1009 static const struct intel_fdi_funcs ilk_funcs = {
+1 -1
drivers/gpu/drm/i915/display/intel_fdi.h
··· 23 23 void intel_fdi_init_hook(struct drm_i915_private *dev_priv); 24 24 void hsw_fdi_link_train(struct intel_encoder *encoder, 25 25 const struct intel_crtc_state *crtc_state); 26 + void hsw_fdi_disable(struct intel_encoder *encoder); 26 27 void intel_fdi_pll_freq_update(struct drm_i915_private *i915); 27 - void lpt_fdi_program_mphy(struct drm_i915_private *i915); 28 28 29 29 void intel_fdi_link_train(struct intel_crtc *crtc, 30 30 const struct intel_crtc_state *crtc_state);
+1 -1
drivers/gpu/drm/i915/display/intel_fifo_underrun.c
··· 434 434 drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe)); 435 435 } 436 436 437 - intel_fbc_handle_fifo_underrun_irq(dev_priv); 437 + intel_fbc_handle_fifo_underrun_irq(&dev_priv->fbc); 438 438 } 439 439 440 440 /**
+11 -2
drivers/gpu/drm/i915/display/intel_gmbus.c
··· 334 334 algo->data = bus; 335 335 } 336 336 337 + static bool has_gmbus_irq(struct drm_i915_private *i915) 338 + { 339 + /* 340 + * encoder->shutdown() may want to use GMBUS 341 + * after irqs have already been disabled. 342 + */ 343 + return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915); 344 + } 345 + 337 346 static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) 338 347 { 339 348 DEFINE_WAIT(wait); ··· 353 344 * we also need to check for NAKs besides the hw ready/idle signal, we 354 345 * need to wake up periodically and check that ourselves. 355 346 */ 356 - if (!HAS_GMBUS_IRQ(dev_priv)) 347 + if (!has_gmbus_irq(dev_priv)) 357 348 irq_en = 0; 358 349 359 350 add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); ··· 384 375 385 376 /* Important: The hw handles only the first bit, so set only one! */ 386 377 irq_enable = 0; 387 - if (HAS_GMBUS_IRQ(dev_priv)) 378 + if (has_gmbus_irq(dev_priv)) 388 379 irq_enable = GMBUS_IDLE_EN; 389 380 390 381 add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
+72 -45
drivers/gpu/drm/i915/display/intel_hdmi.c
··· 1800 1800 READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI; 1801 1801 } 1802 1802 1803 + static bool intel_hdmi_is_ycbcr420(const struct intel_crtc_state *crtc_state) 1804 + { 1805 + return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420; 1806 + } 1807 + 1803 1808 static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, 1804 1809 bool respect_downstream_limits, 1805 1810 bool has_hdmi_sink) ··· 1869 1864 return MODE_OK; 1870 1865 } 1871 1866 1872 - static int intel_hdmi_port_clock(int clock, int bpc) 1867 + static int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output) 1873 1868 { 1869 + /* YCBCR420 TMDS rate requirement is half the pixel clock */ 1870 + if (ycbcr420_output) 1871 + clock /= 2; 1872 + 1874 1873 /* 1875 1874 * Need to adjust the port link by: 1876 1875 * 1.5x for 12bpc ··· 1883 1874 return clock * bpc / 8; 1884 1875 } 1885 1876 1886 - static bool intel_hdmi_bpc_possible(struct drm_connector *connector, 1887 - int bpc, bool has_hdmi_sink, bool ycbcr420_output) 1877 + static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc) 1888 1878 { 1889 - struct drm_i915_private *i915 = to_i915(connector->dev); 1879 + switch (bpc) { 1880 + case 12: 1881 + return !HAS_GMCH(i915); 1882 + case 10: 1883 + return DISPLAY_VER(i915) >= 11; 1884 + case 8: 1885 + return true; 1886 + default: 1887 + MISSING_CASE(bpc); 1888 + return false; 1889 + } 1890 + } 1891 + 1892 + static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector, 1893 + int bpc, bool has_hdmi_sink, bool ycbcr420_output) 1894 + { 1890 1895 const struct drm_display_info *info = &connector->display_info; 1891 1896 const struct drm_hdmi_info *hdmi = &info->hdmi; 1892 1897 1893 1898 switch (bpc) { 1894 1899 case 12: 1895 - if (HAS_GMCH(i915)) 1896 - return false; 1897 - 1898 1900 if (!has_hdmi_sink) 1899 1901 return false; 1900 1902 ··· 1914 1894 else 1915 1895 return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36; 1916 1896 case 10: 1917 - if (DISPLAY_VER(i915) < 11) 1918 - return false; 1919 - 1920 1897 if (!has_hdmi_sink) 1921 1898 return false; 1922 1899 ··· 1933 1916 intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock, 1934 1917 bool has_hdmi_sink, bool ycbcr420_output) 1935 1918 { 1919 + struct drm_i915_private *i915 = to_i915(connector->dev); 1936 1920 struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); 1937 1921 enum drm_mode_status status; 1938 1922 1939 - if (ycbcr420_output) 1940 - clock /= 2; 1941 - 1942 1923 /* check if we can do 8bpc */ 1943 - status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 8), 1924 + status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 8, ycbcr420_output), 1944 1925 true, has_hdmi_sink); 1945 1926 1946 1927 /* if we can't do 8bpc we may still be able to do 12bpc */ 1947 1928 if (status != MODE_OK && 1948 - intel_hdmi_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output)) 1949 - status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12), 1929 + intel_hdmi_source_bpc_possible(i915, 12) && 1930 + intel_hdmi_sink_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output)) 1931 + status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 12, ycbcr420_output), 1950 1932 true, has_hdmi_sink); 1951 1933 1952 1934 /* if we can't do 8,12bpc we may still be able to do 10bpc */ 1953 1935 if (status != MODE_OK && 1954 - intel_hdmi_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output)) 1955 - status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10), 1936 + intel_hdmi_source_bpc_possible(i915, 10) && 1937 + intel_hdmi_sink_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output)) 1938 + status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 10, ycbcr420_output), 1956 1939 true, has_hdmi_sink); 1957 1940 1958 1941 return status; ··· 2017 2000 if (connector_state->crtc != crtc_state->uapi.crtc) 2018 2001 continue; 2019 2002 2020 - if (!intel_hdmi_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output)) 2003 + if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output)) 2021 2004 return false; 2022 2005 } 2023 2006 ··· 2032 2015 const struct drm_display_mode *adjusted_mode = 2033 2016 &crtc_state->hw.adjusted_mode; 2034 2017 2018 + if (!intel_hdmi_source_bpc_possible(dev_priv, bpc)) 2019 + return false; 2020 + 2035 2021 /* 2036 2022 * HDMI deep color affects the clocks, so it's only possible 2037 2023 * when not cloning with other encoder types. ··· 2043 2023 return false; 2044 2024 2045 2025 /* Display Wa_1405510057:icl,ehl */ 2046 - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && 2026 + if (intel_hdmi_is_ycbcr420(crtc_state) && 2047 2027 bpc == 10 && DISPLAY_VER(dev_priv) == 11 && 2048 2028 (adjusted_mode->crtc_hblank_end - 2049 2029 adjusted_mode->crtc_hblank_start) % 8 == 2) ··· 2051 2031 2052 2032 return intel_hdmi_deep_color_possible(crtc_state, bpc, 2053 2033 crtc_state->has_hdmi_sink, 2054 - crtc_state->output_format == 2055 - INTEL_OUTPUT_FORMAT_YCBCR420); 2034 + intel_hdmi_is_ycbcr420(crtc_state)); 2056 2035 } 2057 2036 2058 2037 static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, ··· 2059 2040 int clock) 2060 2041 { 2061 2042 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 2043 + bool ycbcr420_output = intel_hdmi_is_ycbcr420(crtc_state); 2062 2044 int bpc; 2063 2045 2064 2046 for (bpc = 12; bpc >= 10; bpc -= 2) { 2065 2047 if (hdmi_deep_color_possible(crtc_state, bpc) && 2066 2048 hdmi_port_clock_valid(intel_hdmi, 2067 - intel_hdmi_port_clock(clock, bpc), 2049 + intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output), 2068 2050 true, crtc_state->has_hdmi_sink) == MODE_OK) 2069 2051 return bpc; 2070 2052 } ··· 2085 2065 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 2086 2066 clock *= 2; 2087 2067 2088 - /* YCBCR420 TMDS rate requirement is half the pixel clock */ 2089 - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2090 - clock /= 2; 2091 - 2092 2068 bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock); 2093 2069 2094 - crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc); 2070 + crtc_state->port_clock = intel_hdmi_tmds_clock(clock, bpc, 2071 + intel_hdmi_is_ycbcr420(crtc_state)); 2095 2072 2096 2073 /* 2097 2074 * pipe_bpp could already be below 8bpc due to ··· 2158 2141 return intel_conn_state->force_audio == HDMI_AUDIO_ON; 2159 2142 } 2160 2143 2144 + static enum intel_output_format 2145 + intel_hdmi_output_format(struct intel_connector *connector, 2146 + bool ycbcr_420_output) 2147 + { 2148 + if (connector->base.ycbcr_420_allowed && ycbcr_420_output) 2149 + return INTEL_OUTPUT_FORMAT_YCBCR420; 2150 + else 2151 + return INTEL_OUTPUT_FORMAT_RGB; 2152 + } 2153 + 2161 2154 static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, 2162 2155 struct intel_crtc_state *crtc_state, 2163 2156 const struct drm_connector_state *conn_state) 2164 2157 { 2165 - struct drm_connector *connector = conn_state->connector; 2166 - struct drm_i915_private *i915 = to_i915(connector->dev); 2158 + struct intel_connector *connector = to_intel_connector(conn_state->connector); 2167 2159 const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 2160 + const struct drm_display_info *info = &connector->base.display_info; 2161 + struct drm_i915_private *i915 = to_i915(connector->base.dev); 2162 + bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); 2168 2163 int ret; 2169 - bool ycbcr_420_only; 2170 2164 2171 - ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, adjusted_mode); 2172 - if (connector->ycbcr_420_allowed && ycbcr_420_only) { 2173 - crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2174 - } else { 2175 - if (!connector->ycbcr_420_allowed && ycbcr_420_only) 2176 - drm_dbg_kms(&i915->drm, 2177 - "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); 2165 + crtc_state->output_format = intel_hdmi_output_format(connector, ycbcr_420_only); 2166 + 2167 + if (ycbcr_420_only && !intel_hdmi_is_ycbcr420(crtc_state)) { 2168 + drm_dbg_kms(&i915->drm, 2169 + "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); 2178 2170 crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; 2179 2171 } 2180 2172 2181 2173 ret = intel_hdmi_compute_clock(encoder, crtc_state); 2182 2174 if (ret) { 2183 - if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420 && 2184 - connector->ycbcr_420_allowed && 2185 - drm_mode_is_420_also(&connector->display_info, adjusted_mode)) { 2186 - crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; 2187 - ret = intel_hdmi_compute_clock(encoder, crtc_state); 2188 - } 2175 + if (intel_hdmi_is_ycbcr420(crtc_state) || 2176 + !connector->base.ycbcr_420_allowed || 2177 + !drm_mode_is_420_also(info, adjusted_mode)) 2178 + return ret; 2179 + 2180 + crtc_state->output_format = intel_hdmi_output_format(connector, true); 2181 + ret = intel_hdmi_compute_clock(encoder, crtc_state); 2189 2182 } 2190 2183 2191 2184 return ret; ··· 2235 2208 if (ret) 2236 2209 return ret; 2237 2210 2238 - if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 2211 + if (intel_hdmi_is_ycbcr420(pipe_config)) { 2239 2212 ret = intel_panel_fitting(pipe_config, conn_state); 2240 2213 if (ret) 2241 2214 return ret;
+21 -21
drivers/gpu/drm/i915/display/intel_lpe_audio.c
··· 74 74 #include "intel_de.h" 75 75 #include "intel_lpe_audio.h" 76 76 77 - #define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL) 77 + #define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->audio.lpe.platdev != NULL) 78 78 79 79 static struct platform_device * 80 80 lpe_audio_platdev_create(struct drm_i915_private *dev_priv) ··· 96 96 return ERR_PTR(-ENOMEM); 97 97 } 98 98 99 - rsc[0].start = rsc[0].end = dev_priv->lpe_audio.irq; 99 + rsc[0].start = rsc[0].end = dev_priv->audio.lpe.irq; 100 100 rsc[0].flags = IORESOURCE_IRQ; 101 101 rsc[0].name = "hdmi-lpe-audio-irq"; 102 102 ··· 148 148 * than us fiddle with its internals. 149 149 */ 150 150 151 - platform_device_unregister(dev_priv->lpe_audio.platdev); 151 + platform_device_unregister(dev_priv->audio.lpe.platdev); 152 152 } 153 153 154 154 static void lpe_audio_irq_unmask(struct irq_data *d) ··· 167 167 168 168 static int lpe_audio_irq_init(struct drm_i915_private *dev_priv) 169 169 { 170 - int irq = dev_priv->lpe_audio.irq; 170 + int irq = dev_priv->audio.lpe.irq; 171 171 172 172 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 173 173 irq_set_chip_and_handler_name(irq, ··· 204 204 { 205 205 int ret; 206 206 207 - dev_priv->lpe_audio.irq = irq_alloc_desc(0); 208 - if (dev_priv->lpe_audio.irq < 0) { 207 + dev_priv->audio.lpe.irq = irq_alloc_desc(0); 208 + if (dev_priv->audio.lpe.irq < 0) { 209 209 drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n", 210 - dev_priv->lpe_audio.irq); 211 - ret = dev_priv->lpe_audio.irq; 210 + dev_priv->audio.lpe.irq); 211 + ret = dev_priv->audio.lpe.irq; 212 212 goto err; 213 213 } 214 214 215 - drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->lpe_audio.irq); 215 + drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->audio.lpe.irq); 216 216 217 217 ret = lpe_audio_irq_init(dev_priv); 218 218 ··· 223 223 goto err_free_irq; 224 224 } 225 225 226 - dev_priv->lpe_audio.platdev = lpe_audio_platdev_create(dev_priv); 226 + dev_priv->audio.lpe.platdev = lpe_audio_platdev_create(dev_priv); 227 227 228 - if (IS_ERR(dev_priv->lpe_audio.platdev)) { 229 - ret = PTR_ERR(dev_priv->lpe_audio.platdev); 228 + if (IS_ERR(dev_priv->audio.lpe.platdev)) { 229 + ret = PTR_ERR(dev_priv->audio.lpe.platdev); 230 230 drm_err(&dev_priv->drm, 231 231 "Failed to create lpe audio platform device: %d\n", 232 232 ret); ··· 241 241 242 242 return 0; 243 243 err_free_irq: 244 - irq_free_desc(dev_priv->lpe_audio.irq); 244 + irq_free_desc(dev_priv->audio.lpe.irq); 245 245 err: 246 - dev_priv->lpe_audio.irq = -1; 247 - dev_priv->lpe_audio.platdev = NULL; 246 + dev_priv->audio.lpe.irq = -1; 247 + dev_priv->audio.lpe.platdev = NULL; 248 248 return ret; 249 249 } 250 250 ··· 262 262 if (!HAS_LPE_AUDIO(dev_priv)) 263 263 return; 264 264 265 - ret = generic_handle_irq(dev_priv->lpe_audio.irq); 265 + ret = generic_handle_irq(dev_priv->audio.lpe.irq); 266 266 if (ret) 267 267 drm_err_ratelimited(&dev_priv->drm, 268 268 "error handling LPE audio irq: %d\n", ret); ··· 303 303 304 304 lpe_audio_platdev_destroy(dev_priv); 305 305 306 - irq_free_desc(dev_priv->lpe_audio.irq); 306 + irq_free_desc(dev_priv->audio.lpe.irq); 307 307 308 - dev_priv->lpe_audio.irq = -1; 309 - dev_priv->lpe_audio.platdev = NULL; 308 + dev_priv->audio.lpe.irq = -1; 309 + dev_priv->audio.lpe.platdev = NULL; 310 310 } 311 311 312 312 /** ··· 333 333 if (!HAS_LPE_AUDIO(dev_priv)) 334 334 return; 335 335 336 - pdata = dev_get_platdata(&dev_priv->lpe_audio.platdev->dev); 336 + pdata = dev_get_platdata(&dev_priv->audio.lpe.platdev->dev); 337 337 ppdata = &pdata->port[port - PORT_B]; 338 338 339 339 spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags); ··· 361 361 } 362 362 363 363 if (pdata->notify_audio_lpe) 364 - pdata->notify_audio_lpe(dev_priv->lpe_audio.platdev, port - PORT_B); 364 + pdata->notify_audio_lpe(dev_priv->audio.lpe.platdev, port - PORT_B); 365 365 366 366 spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags); 367 367 }
+501
drivers/gpu/drm/i915/display/intel_pch_display.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #include "g4x_dp.h" 7 + #include "intel_crt.h" 8 + #include "intel_de.h" 9 + #include "intel_display_types.h" 10 + #include "intel_fdi.h" 11 + #include "intel_lvds.h" 12 + #include "intel_pch_display.h" 13 + #include "intel_pch_refclk.h" 14 + #include "intel_pps.h" 15 + #include "intel_sdvo.h" 16 + 17 + static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 18 + enum pipe pipe, enum port port, 19 + i915_reg_t dp_reg) 20 + { 21 + enum pipe port_pipe; 22 + bool state; 23 + 24 + state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); 25 + 26 + I915_STATE_WARN(state && port_pipe == pipe, 27 + "PCH DP %c enabled on transcoder %c, should be disabled\n", 28 + port_name(port), pipe_name(pipe)); 29 + 30 + I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 31 + "IBX PCH DP %c still using transcoder B\n", 32 + port_name(port)); 33 + } 34 + 35 + static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 36 + enum pipe pipe, enum port port, 37 + i915_reg_t hdmi_reg) 38 + { 39 + enum pipe port_pipe; 40 + bool state; 41 + 42 + state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); 43 + 44 + I915_STATE_WARN(state && port_pipe == pipe, 45 + "PCH HDMI %c enabled on transcoder %c, should be disabled\n", 46 + port_name(port), pipe_name(pipe)); 47 + 48 + I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, 49 + "IBX PCH HDMI %c still using transcoder B\n", 50 + port_name(port)); 51 + } 52 + 53 + static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 54 + enum pipe pipe) 55 + { 56 + enum pipe port_pipe; 57 + 58 + assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); 59 + assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); 60 + assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); 61 + 62 + I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && 63 + port_pipe == pipe, 64 + "PCH VGA enabled on transcoder %c, should be disabled\n", 65 + pipe_name(pipe)); 66 + 67 + I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && 68 + port_pipe == pipe, 69 + "PCH LVDS enabled on transcoder %c, should be disabled\n", 70 + pipe_name(pipe)); 71 + 72 + /* PCH SDVOB multiplex with HDMIB */ 73 + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); 74 + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); 75 + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); 76 + } 77 + 78 + static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 79 + enum pipe pipe) 80 + { 81 + u32 val; 82 + bool enabled; 83 + 84 + val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); 85 + enabled = !!(val & TRANS_ENABLE); 86 + I915_STATE_WARN(enabled, 87 + "transcoder assertion failed, should be off on pipe %c but is still active\n", 88 + pipe_name(pipe)); 89 + } 90 + 91 + static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, 92 + enum pipe pch_transcoder) 93 + { 94 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 95 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 96 + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 97 + 98 + intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), 99 + intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); 100 + intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), 101 + intel_de_read(dev_priv, HBLANK(cpu_transcoder))); 102 + intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), 103 + intel_de_read(dev_priv, HSYNC(cpu_transcoder))); 104 + 105 + intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), 106 + intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); 107 + intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), 108 + intel_de_read(dev_priv, VBLANK(cpu_transcoder))); 109 + intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), 110 + intel_de_read(dev_priv, VSYNC(cpu_transcoder))); 111 + intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), 112 + intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); 113 + } 114 + 115 + static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) 116 + { 117 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 118 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 119 + enum pipe pipe = crtc->pipe; 120 + i915_reg_t reg; 121 + u32 val, pipeconf_val; 122 + 123 + /* Make sure PCH DPLL is enabled */ 124 + assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); 125 + 126 + /* FDI must be feeding us bits for PCH ports */ 127 + assert_fdi_tx_enabled(dev_priv, pipe); 128 + assert_fdi_rx_enabled(dev_priv, pipe); 129 + 130 + if (HAS_PCH_CPT(dev_priv)) { 131 + reg = TRANS_CHICKEN2(pipe); 132 + val = intel_de_read(dev_priv, reg); 133 + /* 134 + * Workaround: Set the timing override bit 135 + * before enabling the pch transcoder. 136 + */ 137 + val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 138 + /* Configure frame start delay to match the CPU */ 139 + val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 140 + val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 141 + intel_de_write(dev_priv, reg, val); 142 + } 143 + 144 + reg = PCH_TRANSCONF(pipe); 145 + val = intel_de_read(dev_priv, reg); 146 + pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); 147 + 148 + if (HAS_PCH_IBX(dev_priv)) { 149 + /* Configure frame start delay to match the CPU */ 150 + val &= ~TRANS_FRAME_START_DELAY_MASK; 151 + val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 152 + 153 + /* 154 + * Make the BPC in transcoder be consistent with 155 + * that in pipeconf reg. For HDMI we must use 8bpc 156 + * here for both 8bpc and 12bpc. 157 + */ 158 + val &= ~PIPECONF_BPC_MASK; 159 + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 160 + val |= PIPECONF_8BPC; 161 + else 162 + val |= pipeconf_val & PIPECONF_BPC_MASK; 163 + } 164 + 165 + val &= ~TRANS_INTERLACE_MASK; 166 + if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { 167 + if (HAS_PCH_IBX(dev_priv) && 168 + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) 169 + val |= TRANS_LEGACY_INTERLACED_ILK; 170 + else 171 + val |= TRANS_INTERLACED; 172 + } else { 173 + val |= TRANS_PROGRESSIVE; 174 + } 175 + 176 + intel_de_write(dev_priv, reg, val | TRANS_ENABLE); 177 + if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) 178 + drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", 179 + pipe_name(pipe)); 180 + } 181 + 182 + static void ilk_disable_pch_transcoder(struct intel_crtc *crtc) 183 + { 184 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 185 + enum pipe pipe = crtc->pipe; 186 + i915_reg_t reg; 187 + u32 val; 188 + 189 + /* FDI relies on the transcoder */ 190 + assert_fdi_tx_disabled(dev_priv, pipe); 191 + assert_fdi_rx_disabled(dev_priv, pipe); 192 + 193 + /* Ports must be off as well */ 194 + assert_pch_ports_disabled(dev_priv, pipe); 195 + 196 + reg = PCH_TRANSCONF(pipe); 197 + val = intel_de_read(dev_priv, reg); 198 + val &= ~TRANS_ENABLE; 199 + intel_de_write(dev_priv, reg, val); 200 + /* wait for PCH transcoder off, transcoder state */ 201 + if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) 202 + drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", 203 + pipe_name(pipe)); 204 + 205 + if (HAS_PCH_CPT(dev_priv)) { 206 + /* Workaround: Clear the timing override chicken bit again. */ 207 + reg = TRANS_CHICKEN2(pipe); 208 + val = intel_de_read(dev_priv, reg); 209 + val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 210 + intel_de_write(dev_priv, reg, val); 211 + } 212 + } 213 + 214 + /* 215 + * Enable PCH resources required for PCH ports: 216 + * - PCH PLLs 217 + * - FDI training & RX/TX 218 + * - update transcoder timings 219 + * - DP transcoding bits 220 + * - transcoder 221 + */ 222 + void ilk_pch_enable(struct intel_atomic_state *state, 223 + struct intel_crtc *crtc) 224 + { 225 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 226 + const struct intel_crtc_state *crtc_state = 227 + intel_atomic_get_new_crtc_state(state, crtc); 228 + enum pipe pipe = crtc->pipe; 229 + u32 temp; 230 + 231 + assert_pch_transcoder_disabled(dev_priv, pipe); 232 + 233 + /* For PCH output, training FDI link */ 234 + intel_fdi_link_train(crtc, crtc_state); 235 + 236 + /* 237 + * We need to program the right clock selection 238 + * before writing the pixel multiplier into the DPLL. 239 + */ 240 + if (HAS_PCH_CPT(dev_priv)) { 241 + u32 sel; 242 + 243 + temp = intel_de_read(dev_priv, PCH_DPLL_SEL); 244 + temp |= TRANS_DPLL_ENABLE(pipe); 245 + sel = TRANS_DPLLB_SEL(pipe); 246 + if (crtc_state->shared_dpll == 247 + intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 248 + temp |= sel; 249 + else 250 + temp &= ~sel; 251 + intel_de_write(dev_priv, PCH_DPLL_SEL, temp); 252 + } 253 + 254 + /* 255 + * XXX: pch pll's can be enabled any time before we enable the PCH 256 + * transcoder, and we actually should do this to not upset any PCH 257 + * transcoder that already use the clock when we share it. 258 + * 259 + * Note that enable_shared_dpll tries to do the right thing, but 260 + * get_shared_dpll unconditionally resets the pll - we need that 261 + * to have the right LVDS enable sequence. 262 + */ 263 + intel_enable_shared_dpll(crtc_state); 264 + 265 + /* set transcoder timing, panel must allow it */ 266 + assert_pps_unlocked(dev_priv, pipe); 267 + ilk_pch_transcoder_set_timings(crtc_state, pipe); 268 + 269 + intel_fdi_normal_train(crtc); 270 + 271 + /* For PCH DP, enable TRANS_DP_CTL */ 272 + if (HAS_PCH_CPT(dev_priv) && 273 + intel_crtc_has_dp_encoder(crtc_state)) { 274 + const struct drm_display_mode *adjusted_mode = 275 + &crtc_state->hw.adjusted_mode; 276 + u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 277 + i915_reg_t reg = TRANS_DP_CTL(pipe); 278 + enum port port; 279 + 280 + temp = intel_de_read(dev_priv, reg); 281 + temp &= ~(TRANS_DP_PORT_SEL_MASK | 282 + TRANS_DP_SYNC_MASK | 283 + TRANS_DP_BPC_MASK); 284 + temp |= TRANS_DP_OUTPUT_ENABLE; 285 + temp |= bpc << 9; /* same format but at 11:9 */ 286 + 287 + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 288 + temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 289 + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 290 + temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 291 + 292 + port = intel_get_crtc_new_encoder(state, crtc_state)->port; 293 + drm_WARN_ON(&dev_priv->drm, port < PORT_B || port > PORT_D); 294 + temp |= TRANS_DP_PORT_SEL(port); 295 + 296 + intel_de_write(dev_priv, reg, temp); 297 + } 298 + 299 + ilk_enable_pch_transcoder(crtc_state); 300 + } 301 + 302 + void ilk_pch_disable(struct intel_atomic_state *state, 303 + struct intel_crtc *crtc) 304 + { 305 + ilk_fdi_disable(crtc); 306 + } 307 + 308 + void ilk_pch_post_disable(struct intel_atomic_state *state, 309 + struct intel_crtc *crtc) 310 + { 311 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 312 + enum pipe pipe = crtc->pipe; 313 + 314 + ilk_disable_pch_transcoder(crtc); 315 + 316 + if (HAS_PCH_CPT(dev_priv)) { 317 + i915_reg_t reg; 318 + u32 temp; 319 + 320 + /* disable TRANS_DP_CTL */ 321 + reg = TRANS_DP_CTL(pipe); 322 + temp = intel_de_read(dev_priv, reg); 323 + temp &= ~(TRANS_DP_OUTPUT_ENABLE | 324 + TRANS_DP_PORT_SEL_MASK); 325 + temp |= TRANS_DP_PORT_SEL_NONE; 326 + intel_de_write(dev_priv, reg, temp); 327 + 328 + /* disable DPLL_SEL */ 329 + temp = intel_de_read(dev_priv, PCH_DPLL_SEL); 330 + temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 331 + intel_de_write(dev_priv, PCH_DPLL_SEL, temp); 332 + } 333 + 334 + ilk_fdi_pll_disable(crtc); 335 + } 336 + 337 + static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state) 338 + { 339 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 340 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 341 + 342 + /* read out port_clock from the DPLL */ 343 + i9xx_crtc_clock_get(crtc, crtc_state); 344 + 345 + /* 346 + * In case there is an active pipe without active ports, 347 + * we may need some idea for the dotclock anyway. 348 + * Calculate one based on the FDI configuration. 349 + */ 350 + crtc_state->hw.adjusted_mode.crtc_clock = 351 + intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, crtc_state), 352 + &crtc_state->fdi_m_n); 353 + } 354 + 355 + void ilk_pch_get_config(struct intel_crtc_state *crtc_state) 356 + { 357 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 358 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 359 + struct intel_shared_dpll *pll; 360 + enum pipe pipe = crtc->pipe; 361 + enum intel_dpll_id pll_id; 362 + bool pll_active; 363 + u32 tmp; 364 + 365 + if ((intel_de_read(dev_priv, PCH_TRANSCONF(pipe)) & TRANS_ENABLE) == 0) 366 + return; 367 + 368 + crtc_state->has_pch_encoder = true; 369 + 370 + tmp = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); 371 + crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 372 + FDI_DP_PORT_WIDTH_SHIFT) + 1; 373 + 374 + ilk_get_fdi_m_n_config(crtc, crtc_state); 375 + 376 + if (HAS_PCH_IBX(dev_priv)) { 377 + /* 378 + * The pipe->pch transcoder and pch transcoder->pll 379 + * mapping is fixed. 380 + */ 381 + pll_id = (enum intel_dpll_id) pipe; 382 + } else { 383 + tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); 384 + if (tmp & TRANS_DPLLB_SEL(pipe)) 385 + pll_id = DPLL_ID_PCH_PLL_B; 386 + else 387 + pll_id = DPLL_ID_PCH_PLL_A; 388 + } 389 + 390 + crtc_state->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, pll_id); 391 + pll = crtc_state->shared_dpll; 392 + 393 + pll_active = intel_dpll_get_hw_state(dev_priv, pll, 394 + &crtc_state->dpll_hw_state); 395 + drm_WARN_ON(&dev_priv->drm, !pll_active); 396 + 397 + tmp = crtc_state->dpll_hw_state.dpll; 398 + crtc_state->pixel_multiplier = 399 + ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 400 + >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 401 + 402 + ilk_pch_clock_get(crtc_state); 403 + } 404 + 405 + static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 406 + enum transcoder cpu_transcoder) 407 + { 408 + u32 val, pipeconf_val; 409 + 410 + /* FDI must be feeding us bits for PCH ports */ 411 + assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 412 + assert_fdi_rx_enabled(dev_priv, PIPE_A); 413 + 414 + val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); 415 + /* Workaround: set timing override bit. */ 416 + val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 417 + /* Configure frame start delay to match the CPU */ 418 + val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 419 + val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); 420 + intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 421 + 422 + val = TRANS_ENABLE; 423 + pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); 424 + 425 + if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 426 + PIPECONF_INTERLACED_ILK) 427 + val |= TRANS_INTERLACED; 428 + else 429 + val |= TRANS_PROGRESSIVE; 430 + 431 + intel_de_write(dev_priv, LPT_TRANSCONF, val); 432 + if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, 433 + TRANS_STATE_ENABLE, 100)) 434 + drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); 435 + } 436 + 437 + static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 438 + { 439 + u32 val; 440 + 441 + val = intel_de_read(dev_priv, LPT_TRANSCONF); 442 + val &= ~TRANS_ENABLE; 443 + intel_de_write(dev_priv, LPT_TRANSCONF, val); 444 + /* wait for PCH transcoder off, transcoder state */ 445 + if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, 446 + TRANS_STATE_ENABLE, 50)) 447 + drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); 448 + 449 + /* Workaround: clear timing override bit. */ 450 + val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); 451 + val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 452 + intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); 453 + } 454 + 455 + void lpt_pch_enable(struct intel_atomic_state *state, 456 + struct intel_crtc *crtc) 457 + { 458 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 459 + const struct intel_crtc_state *crtc_state = 460 + intel_atomic_get_new_crtc_state(state, crtc); 461 + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 462 + 463 + assert_pch_transcoder_disabled(dev_priv, PIPE_A); 464 + 465 + lpt_program_iclkip(crtc_state); 466 + 467 + /* Set transcoder timing. */ 468 + ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); 469 + 470 + lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 471 + } 472 + 473 + void lpt_pch_disable(struct intel_atomic_state *state, 474 + struct intel_crtc *crtc) 475 + { 476 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 477 + 478 + lpt_disable_pch_transcoder(dev_priv); 479 + 480 + lpt_disable_iclkip(dev_priv); 481 + } 482 + 483 + void lpt_pch_get_config(struct intel_crtc_state *crtc_state) 484 + { 485 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 486 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 487 + u32 tmp; 488 + 489 + if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0) 490 + return; 491 + 492 + crtc_state->has_pch_encoder = true; 493 + 494 + tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); 495 + crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 496 + FDI_DP_PORT_WIDTH_SHIFT) + 1; 497 + 498 + ilk_get_fdi_m_n_config(crtc, crtc_state); 499 + 500 + crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); 501 + }
+27
drivers/gpu/drm/i915/display/intel_pch_display.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #ifndef _INTEL_PCH_DISPLAY_H_ 7 + #define _INTEL_PCH_DISPLAY_H_ 8 + 9 + struct intel_atomic_state; 10 + struct intel_crtc; 11 + struct intel_crtc_state; 12 + 13 + void ilk_pch_enable(struct intel_atomic_state *state, 14 + struct intel_crtc *crtc); 15 + void ilk_pch_disable(struct intel_atomic_state *state, 16 + struct intel_crtc *crtc); 17 + void ilk_pch_post_disable(struct intel_atomic_state *state, 18 + struct intel_crtc *crtc); 19 + void ilk_pch_get_config(struct intel_crtc_state *crtc_state); 20 + 21 + void lpt_pch_enable(struct intel_atomic_state *state, 22 + struct intel_crtc *crtc); 23 + void lpt_pch_disable(struct intel_atomic_state *state, 24 + struct intel_crtc *crtc); 25 + void lpt_pch_get_config(struct intel_crtc_state *crtc_state); 26 + 27 + #endif
+648
drivers/gpu/drm/i915/display/intel_pch_refclk.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #include "intel_de.h" 7 + #include "intel_display_types.h" 8 + #include "intel_panel.h" 9 + #include "intel_pch_refclk.h" 10 + #include "intel_sbi.h" 11 + 12 + static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv) 13 + { 14 + u32 tmp; 15 + 16 + tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 17 + tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 18 + intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 19 + 20 + if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & 21 + FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 22 + drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); 23 + 24 + tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); 25 + tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 26 + intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); 27 + 28 + if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & 29 + FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 30 + drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); 31 + } 32 + 33 + /* WaMPhyProgramming:hsw */ 34 + static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv) 35 + { 36 + u32 tmp; 37 + 38 + lpt_fdi_reset_mphy(dev_priv); 39 + 40 + tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 41 + tmp &= ~(0xFF << 24); 42 + tmp |= (0x12 << 24); 43 + intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 44 + 45 + tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 46 + tmp |= (1 << 11); 47 + intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 48 + 49 + tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 50 + tmp |= (1 << 11); 51 + intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 52 + 53 + tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 54 + tmp |= (1 << 24) | (1 << 21) | (1 << 18); 55 + intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 56 + 57 + tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 58 + tmp |= (1 << 24) | (1 << 21) | (1 << 18); 59 + intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 60 + 61 + tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 62 + tmp &= ~(7 << 13); 63 + tmp |= (5 << 13); 64 + intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 65 + 66 + tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 67 + tmp &= ~(7 << 13); 68 + tmp |= (5 << 13); 69 + intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 70 + 71 + tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 72 + tmp &= ~0xFF; 73 + tmp |= 0x1C; 74 + intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 75 + 76 + tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 77 + tmp &= ~0xFF; 78 + tmp |= 0x1C; 79 + intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 80 + 81 + tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 82 + tmp &= ~(0xFF << 16); 83 + tmp |= (0x1C << 16); 84 + intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 85 + 86 + tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 87 + tmp &= ~(0xFF << 16); 88 + tmp |= (0x1C << 16); 89 + intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 90 + 91 + tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 92 + tmp |= (1 << 27); 93 + intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 94 + 95 + tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 96 + tmp |= (1 << 27); 97 + intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 98 + 99 + tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 100 + tmp &= ~(0xF << 28); 101 + tmp |= (4 << 28); 102 + intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 103 + 104 + tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 105 + tmp &= ~(0xF << 28); 106 + tmp |= (4 << 28); 107 + intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 108 + } 109 + 110 + void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 111 + { 112 + u32 temp; 113 + 114 + intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); 115 + 116 + mutex_lock(&dev_priv->sb_lock); 117 + 118 + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 119 + temp |= SBI_SSCCTL_DISABLE; 120 + intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 121 + 122 + mutex_unlock(&dev_priv->sb_lock); 123 + } 124 + 125 + /* Program iCLKIP clock to the desired frequency */ 126 + void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) 127 + { 128 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 129 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 130 + int clock = crtc_state->hw.adjusted_mode.crtc_clock; 131 + u32 divsel, phaseinc, auxdiv, phasedir = 0; 132 + u32 temp; 133 + 134 + lpt_disable_iclkip(dev_priv); 135 + 136 + /* The iCLK virtual clock root frequency is in MHz, 137 + * but the adjusted_mode->crtc_clock in KHz. To get the 138 + * divisors, it is necessary to divide one by another, so we 139 + * convert the virtual clock precision to KHz here for higher 140 + * precision. 141 + */ 142 + for (auxdiv = 0; auxdiv < 2; auxdiv++) { 143 + u32 iclk_virtual_root_freq = 172800 * 1000; 144 + u32 iclk_pi_range = 64; 145 + u32 desired_divisor; 146 + 147 + desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 148 + clock << auxdiv); 149 + divsel = (desired_divisor / iclk_pi_range) - 2; 150 + phaseinc = desired_divisor % iclk_pi_range; 151 + 152 + /* 153 + * Near 20MHz is a corner case which is 154 + * out of range for the 7-bit divisor 155 + */ 156 + if (divsel <= 0x7f) 157 + break; 158 + } 159 + 160 + /* This should not happen with any sane values */ 161 + drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 162 + ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 163 + drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) & 164 + ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 165 + 166 + drm_dbg_kms(&dev_priv->drm, 167 + "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 168 + clock, auxdiv, divsel, phasedir, phaseinc); 169 + 170 + mutex_lock(&dev_priv->sb_lock); 171 + 172 + /* Program SSCDIVINTPHASE6 */ 173 + temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 174 + temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 175 + temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 176 + temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 177 + temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 178 + temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 179 + temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 180 + intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 181 + 182 + /* Program SSCAUXDIV */ 183 + temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 184 + temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 185 + temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 186 + intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 187 + 188 + /* Enable modulator and associated divider */ 189 + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 190 + temp &= ~SBI_SSCCTL_DISABLE; 191 + intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 192 + 193 + mutex_unlock(&dev_priv->sb_lock); 194 + 195 + /* Wait for initialization time */ 196 + udelay(24); 197 + 198 + intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); 199 + } 200 + 201 + int lpt_get_iclkip(struct drm_i915_private *dev_priv) 202 + { 203 + u32 divsel, phaseinc, auxdiv; 204 + u32 iclk_virtual_root_freq = 172800 * 1000; 205 + u32 iclk_pi_range = 64; 206 + u32 desired_divisor; 207 + u32 temp; 208 + 209 + if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 210 + return 0; 211 + 212 + mutex_lock(&dev_priv->sb_lock); 213 + 214 + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 215 + if (temp & SBI_SSCCTL_DISABLE) { 216 + mutex_unlock(&dev_priv->sb_lock); 217 + return 0; 218 + } 219 + 220 + temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 221 + divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 222 + SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 223 + phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 224 + SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 225 + 226 + temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 227 + auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 228 + SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 229 + 230 + mutex_unlock(&dev_priv->sb_lock); 231 + 232 + desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 233 + 234 + return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 235 + desired_divisor << auxdiv); 236 + } 237 + 238 + /* Implements 3 different sequences from BSpec chapter "Display iCLK 239 + * Programming" based on the parameters passed: 240 + * - Sequence to enable CLKOUT_DP 241 + * - Sequence to enable CLKOUT_DP without spread 242 + * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 243 + */ 244 + static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 245 + bool with_spread, bool with_fdi) 246 + { 247 + u32 reg, tmp; 248 + 249 + if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, 250 + "FDI requires downspread\n")) 251 + with_spread = true; 252 + if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && 253 + with_fdi, "LP PCH doesn't have FDI\n")) 254 + with_fdi = false; 255 + 256 + mutex_lock(&dev_priv->sb_lock); 257 + 258 + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 259 + tmp &= ~SBI_SSCCTL_DISABLE; 260 + tmp |= SBI_SSCCTL_PATHALT; 261 + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 262 + 263 + udelay(24); 264 + 265 + if (with_spread) { 266 + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 267 + tmp &= ~SBI_SSCCTL_PATHALT; 268 + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 269 + 270 + if (with_fdi) 271 + lpt_fdi_program_mphy(dev_priv); 272 + } 273 + 274 + reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 275 + tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 276 + tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 277 + intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 278 + 279 + mutex_unlock(&dev_priv->sb_lock); 280 + } 281 + 282 + /* Sequence to disable CLKOUT_DP */ 283 + void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 284 + { 285 + u32 reg, tmp; 286 + 287 + mutex_lock(&dev_priv->sb_lock); 288 + 289 + reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 290 + tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 291 + tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 292 + intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 293 + 294 + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 295 + if (!(tmp & SBI_SSCCTL_DISABLE)) { 296 + if (!(tmp & SBI_SSCCTL_PATHALT)) { 297 + tmp |= SBI_SSCCTL_PATHALT; 298 + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 299 + udelay(32); 300 + } 301 + tmp |= SBI_SSCCTL_DISABLE; 302 + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 303 + } 304 + 305 + mutex_unlock(&dev_priv->sb_lock); 306 + } 307 + 308 + #define BEND_IDX(steps) ((50 + (steps)) / 5) 309 + 310 + static const u16 sscdivintphase[] = { 311 + [BEND_IDX( 50)] = 0x3B23, 312 + [BEND_IDX( 45)] = 0x3B23, 313 + [BEND_IDX( 40)] = 0x3C23, 314 + [BEND_IDX( 35)] = 0x3C23, 315 + [BEND_IDX( 30)] = 0x3D23, 316 + [BEND_IDX( 25)] = 0x3D23, 317 + [BEND_IDX( 20)] = 0x3E23, 318 + [BEND_IDX( 15)] = 0x3E23, 319 + [BEND_IDX( 10)] = 0x3F23, 320 + [BEND_IDX( 5)] = 0x3F23, 321 + [BEND_IDX( 0)] = 0x0025, 322 + [BEND_IDX( -5)] = 0x0025, 323 + [BEND_IDX(-10)] = 0x0125, 324 + [BEND_IDX(-15)] = 0x0125, 325 + [BEND_IDX(-20)] = 0x0225, 326 + [BEND_IDX(-25)] = 0x0225, 327 + [BEND_IDX(-30)] = 0x0325, 328 + [BEND_IDX(-35)] = 0x0325, 329 + [BEND_IDX(-40)] = 0x0425, 330 + [BEND_IDX(-45)] = 0x0425, 331 + [BEND_IDX(-50)] = 0x0525, 332 + }; 333 + 334 + /* 335 + * Bend CLKOUT_DP 336 + * steps -50 to 50 inclusive, in steps of 5 337 + * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 338 + * change in clock period = -(steps / 10) * 5.787 ps 339 + */ 340 + static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 341 + { 342 + u32 tmp; 343 + int idx = BEND_IDX(steps); 344 + 345 + if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) 346 + return; 347 + 348 + if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) 349 + return; 350 + 351 + mutex_lock(&dev_priv->sb_lock); 352 + 353 + if (steps % 10 != 0) 354 + tmp = 0xAAAAAAAB; 355 + else 356 + tmp = 0x00000000; 357 + intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 358 + 359 + tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 360 + tmp &= 0xffff0000; 361 + tmp |= sscdivintphase[idx]; 362 + intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 363 + 364 + mutex_unlock(&dev_priv->sb_lock); 365 + } 366 + 367 + #undef BEND_IDX 368 + 369 + static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) 370 + { 371 + u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 372 + u32 ctl = intel_de_read(dev_priv, SPLL_CTL); 373 + 374 + if ((ctl & SPLL_PLL_ENABLE) == 0) 375 + return false; 376 + 377 + if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && 378 + (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 379 + return true; 380 + 381 + if (IS_BROADWELL(dev_priv) && 382 + (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) 383 + return true; 384 + 385 + return false; 386 + } 387 + 388 + static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, 389 + enum intel_dpll_id id) 390 + { 391 + u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); 392 + u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); 393 + 394 + if ((ctl & WRPLL_PLL_ENABLE) == 0) 395 + return false; 396 + 397 + if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) 398 + return true; 399 + 400 + if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && 401 + (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && 402 + (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) 403 + return true; 404 + 405 + return false; 406 + } 407 + 408 + static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 409 + { 410 + struct intel_encoder *encoder; 411 + bool has_fdi = false; 412 + 413 + for_each_intel_encoder(&dev_priv->drm, encoder) { 414 + switch (encoder->type) { 415 + case INTEL_OUTPUT_ANALOG: 416 + has_fdi = true; 417 + break; 418 + default: 419 + break; 420 + } 421 + } 422 + 423 + /* 424 + * The BIOS may have decided to use the PCH SSC 425 + * reference so we must not disable it until the 426 + * relevant PLLs have stopped relying on it. We'll 427 + * just leave the PCH SSC reference enabled in case 428 + * any active PLL is using it. It will get disabled 429 + * after runtime suspend if we don't have FDI. 430 + * 431 + * TODO: Move the whole reference clock handling 432 + * to the modeset sequence proper so that we can 433 + * actually enable/disable/reconfigure these things 434 + * safely. To do that we need to introduce a real 435 + * clock hierarchy. That would also allow us to do 436 + * clock bending finally. 437 + */ 438 + dev_priv->pch_ssc_use = 0; 439 + 440 + if (spll_uses_pch_ssc(dev_priv)) { 441 + drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); 442 + dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); 443 + } 444 + 445 + if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { 446 + drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); 447 + dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); 448 + } 449 + 450 + if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { 451 + drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); 452 + dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); 453 + } 454 + 455 + if (dev_priv->pch_ssc_use) 456 + return; 457 + 458 + if (has_fdi) { 459 + lpt_bend_clkout_dp(dev_priv, 0); 460 + lpt_enable_clkout_dp(dev_priv, true, true); 461 + } else { 462 + lpt_disable_clkout_dp(dev_priv); 463 + } 464 + } 465 + 466 + static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) 467 + { 468 + struct intel_encoder *encoder; 469 + int i; 470 + u32 val, final; 471 + bool has_lvds = false; 472 + bool has_cpu_edp = false; 473 + bool has_panel = false; 474 + bool has_ck505 = false; 475 + bool can_ssc = false; 476 + bool using_ssc_source = false; 477 + 478 + /* We need to take the global config into account */ 479 + for_each_intel_encoder(&dev_priv->drm, encoder) { 480 + switch (encoder->type) { 481 + case INTEL_OUTPUT_LVDS: 482 + has_panel = true; 483 + has_lvds = true; 484 + break; 485 + case INTEL_OUTPUT_EDP: 486 + has_panel = true; 487 + if (encoder->port == PORT_A) 488 + has_cpu_edp = true; 489 + break; 490 + default: 491 + break; 492 + } 493 + } 494 + 495 + if (HAS_PCH_IBX(dev_priv)) { 496 + has_ck505 = dev_priv->vbt.display_clock_mode; 497 + can_ssc = has_ck505; 498 + } else { 499 + has_ck505 = false; 500 + can_ssc = true; 501 + } 502 + 503 + /* Check if any DPLLs are using the SSC source */ 504 + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { 505 + u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); 506 + 507 + if (!(temp & DPLL_VCO_ENABLE)) 508 + continue; 509 + 510 + if ((temp & PLL_REF_INPUT_MASK) == 511 + PLLB_REF_INPUT_SPREADSPECTRUMIN) { 512 + using_ssc_source = true; 513 + break; 514 + } 515 + } 516 + 517 + drm_dbg_kms(&dev_priv->drm, 518 + "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 519 + has_panel, has_lvds, has_ck505, using_ssc_source); 520 + 521 + /* Ironlake: try to setup display ref clock before DPLL 522 + * enabling. This is only under driver's control after 523 + * PCH B stepping, previous chipset stepping should be 524 + * ignoring this setting. 525 + */ 526 + val = intel_de_read(dev_priv, PCH_DREF_CONTROL); 527 + 528 + /* As we must carefully and slowly disable/enable each source in turn, 529 + * compute the final state we want first and check if we need to 530 + * make any changes at all. 531 + */ 532 + final = val; 533 + final &= ~DREF_NONSPREAD_SOURCE_MASK; 534 + if (has_ck505) 535 + final |= DREF_NONSPREAD_CK505_ENABLE; 536 + else 537 + final |= DREF_NONSPREAD_SOURCE_ENABLE; 538 + 539 + final &= ~DREF_SSC_SOURCE_MASK; 540 + final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 541 + final &= ~DREF_SSC1_ENABLE; 542 + 543 + if (has_panel) { 544 + final |= DREF_SSC_SOURCE_ENABLE; 545 + 546 + if (intel_panel_use_ssc(dev_priv) && can_ssc) 547 + final |= DREF_SSC1_ENABLE; 548 + 549 + if (has_cpu_edp) { 550 + if (intel_panel_use_ssc(dev_priv) && can_ssc) 551 + final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 552 + else 553 + final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 554 + } else { 555 + final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 556 + } 557 + } else if (using_ssc_source) { 558 + final |= DREF_SSC_SOURCE_ENABLE; 559 + final |= DREF_SSC1_ENABLE; 560 + } 561 + 562 + if (final == val) 563 + return; 564 + 565 + /* Always enable nonspread source */ 566 + val &= ~DREF_NONSPREAD_SOURCE_MASK; 567 + 568 + if (has_ck505) 569 + val |= DREF_NONSPREAD_CK505_ENABLE; 570 + else 571 + val |= DREF_NONSPREAD_SOURCE_ENABLE; 572 + 573 + if (has_panel) { 574 + val &= ~DREF_SSC_SOURCE_MASK; 575 + val |= DREF_SSC_SOURCE_ENABLE; 576 + 577 + /* SSC must be turned on before enabling the CPU output */ 578 + if (intel_panel_use_ssc(dev_priv) && can_ssc) { 579 + drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); 580 + val |= DREF_SSC1_ENABLE; 581 + } else { 582 + val &= ~DREF_SSC1_ENABLE; 583 + } 584 + 585 + /* Get SSC going before enabling the outputs */ 586 + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 587 + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 588 + udelay(200); 589 + 590 + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 591 + 592 + /* Enable CPU source on CPU attached eDP */ 593 + if (has_cpu_edp) { 594 + if (intel_panel_use_ssc(dev_priv) && can_ssc) { 595 + drm_dbg_kms(&dev_priv->drm, 596 + "Using SSC on eDP\n"); 597 + val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 598 + } else { 599 + val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 600 + } 601 + } else { 602 + val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 603 + } 604 + 605 + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 606 + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 607 + udelay(200); 608 + } else { 609 + drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); 610 + 611 + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 612 + 613 + /* Turn off CPU output */ 614 + val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 615 + 616 + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 617 + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 618 + udelay(200); 619 + 620 + if (!using_ssc_source) { 621 + drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); 622 + 623 + /* Turn off the SSC source */ 624 + val &= ~DREF_SSC_SOURCE_MASK; 625 + val |= DREF_SSC_SOURCE_DISABLE; 626 + 627 + /* Turn off SSC1 */ 628 + val &= ~DREF_SSC1_ENABLE; 629 + 630 + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); 631 + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); 632 + udelay(200); 633 + } 634 + } 635 + 636 + BUG_ON(val != final); 637 + } 638 + 639 + /* 640 + * Initialize reference clocks when the driver loads 641 + */ 642 + void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 643 + { 644 + if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 645 + ilk_init_pch_refclk(dev_priv); 646 + else if (HAS_PCH_LPT(dev_priv)) 647 + lpt_init_pch_refclk(dev_priv); 648 + }
+21
drivers/gpu/drm/i915/display/intel_pch_refclk.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #ifndef _INTEL_PCH_REFCLK_H_ 7 + #define _INTEL_PCH_REFCLK_H_ 8 + 9 + #include <linux/types.h> 10 + 11 + struct drm_i915_private; 12 + struct intel_crtc_state; 13 + 14 + void lpt_program_iclkip(const struct intel_crtc_state *crtc_state); 15 + void lpt_disable_iclkip(struct drm_i915_private *dev_priv); 16 + int lpt_get_iclkip(struct drm_i915_private *dev_priv); 17 + 18 + void intel_init_pch_refclk(struct drm_i915_private *dev_priv); 19 + void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); 20 + 21 + #endif
+78 -47
drivers/gpu/drm/i915/display/intel_psr.c
··· 28 28 29 29 #include "i915_drv.h" 30 30 #include "intel_atomic.h" 31 + #include "intel_crtc.h" 31 32 #include "intel_de.h" 32 33 #include "intel_display_types.h" 33 34 #include "intel_dp_aux.h" 34 35 #include "intel_hdmi.h" 35 36 #include "intel_psr.h" 36 37 #include "intel_snps_phy.h" 37 - #include "intel_sprite.h" 38 38 #include "skl_universal_plane.h" 39 39 40 40 /** ··· 588 588 static bool 589 589 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) 590 590 { 591 - if (DISPLAY_VER(dev_priv) >= 12) 591 + if (IS_ALDERLAKE_P(dev_priv)) 592 + return trans == TRANSCODER_A || trans == TRANSCODER_B; 593 + else if (DISPLAY_VER(dev_priv) >= 12) 592 594 return trans == TRANSCODER_A; 593 595 else 594 596 return trans == TRANSCODER_EDP; ··· 1348 1346 */ 1349 1347 void intel_psr_pause(struct intel_dp *intel_dp) 1350 1348 { 1349 + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1351 1350 struct intel_psr *psr = &intel_dp->psr; 1352 1351 1353 1352 if (!CAN_PSR(intel_dp)) ··· 1360 1357 mutex_unlock(&psr->lock); 1361 1358 return; 1362 1359 } 1360 + 1361 + /* If we ever hit this, we will need to add refcount to pause/resume */ 1362 + drm_WARN_ON(&dev_priv->drm, psr->paused); 1363 1363 1364 1364 intel_psr_exit(intel_dp); 1365 1365 intel_psr_wait_exit_locked(intel_dp); ··· 1469 1463 val |= plane_state->uapi.dst.x1; 1470 1464 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val); 1471 1465 1472 - /* TODO: consider auxiliary surfaces */ 1473 - x = plane_state->uapi.src.x1 >> 16; 1474 - y = (plane_state->uapi.src.y1 >> 16) + clip->y1; 1466 + x = plane_state->view.color_plane[color_plane].x; 1467 + 1468 + /* 1469 + * From Bspec: UV surface Start Y Position = half of Y plane Y 1470 + * start position. 1471 + */ 1472 + if (!color_plane) 1473 + y = plane_state->view.color_plane[color_plane].y + clip->y1; 1474 + else 1475 + y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2; 1476 + 1475 1477 val = y << 16 | x; 1478 + 1476 1479 intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id), 1477 1480 val); 1478 1481 ··· 1573 1558 * also planes are not updated if they have a negative X 1574 1559 * position so for now doing a full update in this cases 1575 1560 * 1576 - * TODO: We are missing multi-planar formats handling, until it is 1577 - * implemented it will send full frame updates. 1578 - * 1579 1561 * Plane scaling and rotation is not supported by selective fetch and both 1580 1562 * properties can change without a modeset, so need to be check at every 1581 1563 * atomic commmit. ··· 1582 1570 if (plane_state->uapi.dst.y1 < 0 || 1583 1571 plane_state->uapi.dst.x1 < 0 || 1584 1572 plane_state->scaler_id >= 0 || 1585 - plane_state->hw.fb->format->num_planes > 1 || 1586 1573 plane_state->uapi.rotation != DRM_MODE_ROTATE_0) 1587 1574 return false; 1588 1575 ··· 1707 1696 for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, 1708 1697 new_plane_state, i) { 1709 1698 struct drm_rect *sel_fetch_area, inter; 1699 + struct intel_plane *linked = new_plane_state->planar_linked_plane; 1710 1700 1711 1701 if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc || 1712 1702 !new_plane_state->uapi.visible) ··· 1726 1714 sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1; 1727 1715 sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1; 1728 1716 crtc_state->update_planes |= BIT(plane->id); 1717 + 1718 + /* 1719 + * Sel_fetch_area is calculated for UV plane. Use 1720 + * same area for Y plane as well. 1721 + */ 1722 + if (linked) { 1723 + struct intel_plane_state *linked_new_plane_state; 1724 + struct drm_rect *linked_sel_fetch_area; 1725 + 1726 + linked_new_plane_state = intel_atomic_get_plane_state(state, linked); 1727 + if (IS_ERR(linked_new_plane_state)) 1728 + return PTR_ERR(linked_new_plane_state); 1729 + 1730 + linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area; 1731 + linked_sel_fetch_area->y1 = sel_fetch_area->y1; 1732 + linked_sel_fetch_area->y2 = sel_fetch_area->y2; 1733 + crtc_state->update_planes |= BIT(linked->id); 1734 + } 1729 1735 } 1730 1736 1731 1737 skip_sel_fetch_set_loop: ··· 1751 1721 return 0; 1752 1722 } 1753 1723 1754 - static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state, 1755 - const struct intel_crtc_state *crtc_state) 1724 + void intel_psr_pre_plane_update(struct intel_atomic_state *state, 1725 + struct intel_crtc *crtc) 1756 1726 { 1727 + struct drm_i915_private *i915 = to_i915(state->base.dev); 1728 + const struct intel_crtc_state *crtc_state = 1729 + intel_atomic_get_new_crtc_state(state, crtc); 1757 1730 struct intel_encoder *encoder; 1731 + 1732 + if (!HAS_PSR(i915)) 1733 + return; 1758 1734 1759 1735 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, 1760 1736 crtc_state->uapi.encoder_mask) { ··· 1776 1740 * - All planes will go inactive 1777 1741 * - Changing between PSR versions 1778 1742 */ 1743 + needs_to_disable |= intel_crtc_needs_modeset(crtc_state); 1779 1744 needs_to_disable |= !crtc_state->has_psr; 1780 1745 needs_to_disable |= !crtc_state->active_planes; 1781 1746 needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled; ··· 1786 1749 1787 1750 mutex_unlock(&psr->lock); 1788 1751 } 1789 - } 1790 - 1791 - void intel_psr_pre_plane_update(const struct intel_atomic_state *state) 1792 - { 1793 - struct drm_i915_private *dev_priv = to_i915(state->base.dev); 1794 - struct intel_crtc_state *crtc_state; 1795 - struct intel_crtc *crtc; 1796 - int i; 1797 - 1798 - if (!HAS_PSR(dev_priv)) 1799 - return; 1800 - 1801 - for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) 1802 - _intel_psr_pre_plane_update(state, crtc_state); 1803 1752 } 1804 1753 1805 1754 static void _intel_psr_post_plane_update(const struct intel_atomic_state *state, ··· 1832 1809 _intel_psr_post_plane_update(state, crtc_state); 1833 1810 } 1834 1811 1835 - /** 1836 - * psr_wait_for_idle - wait for PSR1 to idle 1837 - * @intel_dp: Intel DP 1838 - * @out_value: PSR status in case of failure 1839 - * 1840 - * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. 1841 - * 1842 - */ 1843 - static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value) 1812 + static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp) 1813 + { 1814 + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1815 + 1816 + /* 1817 + * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough. 1818 + * As all higher states has bit 4 of PSR2 state set we can just wait for 1819 + * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared. 1820 + */ 1821 + return intel_de_wait_for_clear(dev_priv, 1822 + EDP_PSR2_STATUS(intel_dp->psr.transcoder), 1823 + EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50); 1824 + } 1825 + 1826 + static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp) 1844 1827 { 1845 1828 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1846 1829 ··· 1856 1827 * exit training time + 1.5 ms of aux channel handshake. 50 ms is 1857 1828 * defensive enough to cover everything. 1858 1829 */ 1859 - return __intel_wait_for_register(&dev_priv->uncore, 1860 - EDP_PSR_STATUS(intel_dp->psr.transcoder), 1861 - EDP_PSR_STATUS_STATE_MASK, 1862 - EDP_PSR_STATUS_STATE_IDLE, 2, 50, 1863 - out_value); 1830 + return intel_de_wait_for_clear(dev_priv, 1831 + EDP_PSR_STATUS(intel_dp->psr.transcoder), 1832 + EDP_PSR_STATUS_STATE_MASK, 50); 1864 1833 } 1865 1834 1866 1835 /** 1867 - * intel_psr_wait_for_idle - wait for PSR1 to idle 1836 + * intel_psr_wait_for_idle - wait for PSR be ready for a pipe update 1868 1837 * @new_crtc_state: new CRTC state 1869 1838 * 1870 1839 * This function is expected to be called from pipe_update_start() where it is ··· 1879 1852 for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder, 1880 1853 new_crtc_state->uapi.encoder_mask) { 1881 1854 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1882 - u32 psr_status; 1855 + int ret; 1883 1856 1884 1857 mutex_lock(&intel_dp->psr.lock); 1885 - if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) { 1858 + 1859 + if (!intel_dp->psr.enabled) { 1886 1860 mutex_unlock(&intel_dp->psr.lock); 1887 1861 continue; 1888 1862 } 1889 1863 1890 - /* when the PSR1 is enabled */ 1891 - if (psr_wait_for_idle(intel_dp, &psr_status)) 1892 - drm_err(&dev_priv->drm, 1893 - "PSR idle timed out 0x%x, atomic update may fail\n", 1894 - psr_status); 1864 + if (intel_dp->psr.psr2_enabled) 1865 + ret = _psr2_ready_for_pipe_update_locked(intel_dp); 1866 + else 1867 + ret = _psr1_ready_for_pipe_update_locked(intel_dp); 1868 + 1869 + if (ret) 1870 + drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n"); 1871 + 1895 1872 mutex_unlock(&intel_dp->psr.lock); 1896 1873 } 1897 1874 }
+2 -1
drivers/gpu/drm/i915/display/intel_psr.h
··· 20 20 struct intel_encoder; 21 21 22 22 void intel_psr_init_dpcd(struct intel_dp *intel_dp); 23 - void intel_psr_pre_plane_update(const struct intel_atomic_state *state); 23 + void intel_psr_pre_plane_update(struct intel_atomic_state *state, 24 + struct intel_crtc *crtc); 24 25 void intel_psr_post_plane_update(const struct intel_atomic_state *state); 25 26 void intel_psr_disable(struct intel_dp *intel_dp, 26 27 const struct intel_crtc_state *old_crtc_state);
+1 -1
drivers/gpu/drm/i915/display/intel_snps_phy.c
··· 58 58 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 59 59 const struct intel_ddi_buf_trans *trans; 60 60 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 61 - int level = intel_ddi_level(encoder, crtc_state, 0); 62 61 int n_entries, ln; 63 62 64 63 trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); ··· 65 66 return; 66 67 67 68 for (ln = 0; ln < 4; ln++) { 69 + int level = intel_ddi_level(encoder, crtc_state, ln); 68 70 u32 val = 0; 69 71 70 72 val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, trans->entries[level].snps.vswing);
+142 -115
drivers/gpu/drm/i915/display/intel_sprite.c
··· 45 45 #include "intel_atomic_plane.h" 46 46 #include "intel_de.h" 47 47 #include "intel_display_types.h" 48 + #include "intel_fb.h" 48 49 #include "intel_frontbuffer.h" 49 50 #include "intel_sprite.h" 50 51 #include "i9xx_plane.h" ··· 119 118 } 120 119 121 120 static void 122 - chv_update_csc(const struct intel_plane_state *plane_state) 121 + chv_sprite_update_csc(const struct intel_plane_state *plane_state) 123 122 { 124 123 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 125 124 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); ··· 191 190 #define COS_0 1 192 191 193 192 static void 194 - vlv_update_clrc(const struct intel_plane_state *plane_state) 193 + vlv_sprite_update_clrc(const struct intel_plane_state *plane_state) 195 194 { 196 195 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 197 196 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); ··· 394 393 return sprctl; 395 394 } 396 395 397 - static void vlv_update_gamma(const struct intel_plane_state *plane_state) 396 + static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state) 398 397 { 399 398 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 400 399 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); ··· 418 417 } 419 418 420 419 static void 421 - vlv_update_plane(struct intel_plane *plane, 422 - const struct intel_crtc_state *crtc_state, 423 - const struct intel_plane_state *plane_state) 420 + vlv_sprite_update_noarm(struct intel_plane *plane, 421 + const struct intel_crtc_state *crtc_state, 422 + const struct intel_plane_state *plane_state) 424 423 { 425 424 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 426 425 enum pipe pipe = plane->pipe; 427 426 enum plane_id plane_id = plane->id; 428 - u32 sprsurf_offset = plane_state->view.color_plane[0].offset; 429 - u32 linear_offset; 430 - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 431 427 int crtc_x = plane_state->uapi.dst.x1; 432 428 int crtc_y = plane_state->uapi.dst.y1; 433 429 u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); 434 430 u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); 435 - u32 x = plane_state->view.color_plane[0].x; 436 - u32 y = plane_state->view.color_plane[0].y; 437 431 unsigned long irqflags; 438 - u32 sprctl; 439 - 440 - sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state); 441 432 442 433 /* Sizes are 0 based */ 443 434 crtc_w--; 444 435 crtc_h--; 445 436 446 - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 447 - 448 437 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 449 438 450 439 intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id), 451 - plane_state->view.color_plane[0].stride); 440 + plane_state->view.color_plane[0].mapping_stride); 452 441 intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id), 453 442 (crtc_y << 16) | crtc_x); 454 443 intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id), 455 444 (crtc_h << 16) | crtc_w); 456 - intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0); 445 + 446 + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 447 + } 448 + 449 + static void 450 + vlv_sprite_update_arm(struct intel_plane *plane, 451 + const struct intel_crtc_state *crtc_state, 452 + const struct intel_plane_state *plane_state) 453 + { 454 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 455 + enum pipe pipe = plane->pipe; 456 + enum plane_id plane_id = plane->id; 457 + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 458 + u32 sprsurf_offset = plane_state->view.color_plane[0].offset; 459 + u32 x = plane_state->view.color_plane[0].x; 460 + u32 y = plane_state->view.color_plane[0].y; 461 + u32 sprctl, linear_offset; 462 + unsigned long irqflags; 463 + 464 + sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state); 465 + 466 + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 467 + 468 + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 457 469 458 470 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) 459 - chv_update_csc(plane_state); 471 + chv_sprite_update_csc(plane_state); 460 472 461 473 if (key->flags) { 462 474 intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id), ··· 479 465 intel_de_write_fw(dev_priv, SPKEYMAXVAL(pipe, plane_id), 480 466 key->max_value); 481 467 } 468 + 469 + intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0); 482 470 483 471 intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset); 484 472 intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), (y << 16) | x); ··· 494 478 intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), 495 479 intel_plane_ggtt_offset(plane_state) + sprsurf_offset); 496 480 497 - vlv_update_clrc(plane_state); 498 - vlv_update_gamma(plane_state); 481 + vlv_sprite_update_clrc(plane_state); 482 + vlv_sprite_update_gamma(plane_state); 499 483 500 484 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 501 485 } 502 486 503 487 static void 504 - vlv_disable_plane(struct intel_plane *plane, 505 - const struct intel_crtc_state *crtc_state) 488 + vlv_sprite_disable_arm(struct intel_plane *plane, 489 + const struct intel_crtc_state *crtc_state) 506 490 { 507 491 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 508 492 enum pipe pipe = plane->pipe; ··· 518 502 } 519 503 520 504 static bool 521 - vlv_plane_get_hw_state(struct intel_plane *plane, 522 - enum pipe *pipe) 505 + vlv_sprite_get_hw_state(struct intel_plane *plane, 506 + enum pipe *pipe) 523 507 { 524 508 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 525 509 enum intel_display_power_domain power_domain; ··· 821 805 i++; 822 806 } 823 807 824 - static void ivb_update_gamma(const struct intel_plane_state *plane_state) 808 + static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state) 825 809 { 826 810 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 827 811 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); ··· 851 835 } 852 836 853 837 static void 854 - ivb_update_plane(struct intel_plane *plane, 855 - const struct intel_crtc_state *crtc_state, 856 - const struct intel_plane_state *plane_state) 838 + ivb_sprite_update_noarm(struct intel_plane *plane, 839 + const struct intel_crtc_state *crtc_state, 840 + const struct intel_plane_state *plane_state) 857 841 { 858 842 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 859 843 enum pipe pipe = plane->pipe; 860 - u32 sprsurf_offset = plane_state->view.color_plane[0].offset; 861 - u32 linear_offset; 862 - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 863 844 int crtc_x = plane_state->uapi.dst.x1; 864 845 int crtc_y = plane_state->uapi.dst.y1; 865 846 u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); 866 847 u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); 867 - u32 x = plane_state->view.color_plane[0].x; 868 - u32 y = plane_state->view.color_plane[0].y; 869 848 u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 870 849 u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 871 - u32 sprctl, sprscale = 0; 850 + u32 sprscale = 0; 872 851 unsigned long irqflags; 873 - 874 - sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state); 875 852 876 853 /* Sizes are 0 based */ 877 854 src_w--; ··· 875 866 if (crtc_w != src_w || crtc_h != src_h) 876 867 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 877 868 878 - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 879 - 880 869 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 881 870 882 871 intel_de_write_fw(dev_priv, SPRSTRIDE(pipe), 883 - plane_state->view.color_plane[0].stride); 872 + plane_state->view.color_plane[0].mapping_stride); 884 873 intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x); 885 874 intel_de_write_fw(dev_priv, SPRSIZE(pipe), (crtc_h << 16) | crtc_w); 886 875 if (IS_IVYBRIDGE(dev_priv)) 887 876 intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale); 877 + 878 + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 879 + } 880 + 881 + static void 882 + ivb_sprite_update_arm(struct intel_plane *plane, 883 + const struct intel_crtc_state *crtc_state, 884 + const struct intel_plane_state *plane_state) 885 + { 886 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 887 + enum pipe pipe = plane->pipe; 888 + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 889 + u32 sprsurf_offset = plane_state->view.color_plane[0].offset; 890 + u32 x = plane_state->view.color_plane[0].x; 891 + u32 y = plane_state->view.color_plane[0].y; 892 + u32 sprctl, linear_offset; 893 + unsigned long irqflags; 894 + 895 + sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state); 896 + 897 + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 898 + 899 + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 888 900 889 901 if (key->flags) { 890 902 intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value); ··· 932 902 intel_de_write_fw(dev_priv, SPRSURF(pipe), 933 903 intel_plane_ggtt_offset(plane_state) + sprsurf_offset); 934 904 935 - ivb_update_gamma(plane_state); 905 + ivb_sprite_update_gamma(plane_state); 936 906 937 907 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 938 908 } 939 909 940 910 static void 941 - ivb_disable_plane(struct intel_plane *plane, 942 - const struct intel_crtc_state *crtc_state) 911 + ivb_sprite_disable_arm(struct intel_plane *plane, 912 + const struct intel_crtc_state *crtc_state) 943 913 { 944 914 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 945 915 enum pipe pipe = plane->pipe; ··· 957 927 } 958 928 959 929 static bool 960 - ivb_plane_get_hw_state(struct intel_plane *plane, 961 - enum pipe *pipe) 930 + ivb_sprite_get_hw_state(struct intel_plane *plane, 931 + enum pipe *pipe) 962 932 { 963 933 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 964 934 enum intel_display_power_domain power_domain; ··· 1136 1106 return dvscntr; 1137 1107 } 1138 1108 1139 - static void g4x_update_gamma(const struct intel_plane_state *plane_state) 1109 + static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state) 1140 1110 { 1141 1111 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1142 1112 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); ··· 1166 1136 gamma[i] = (i << 10) / 16; 1167 1137 } 1168 1138 1169 - static void ilk_update_gamma(const struct intel_plane_state *plane_state) 1139 + static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state) 1170 1140 { 1171 1141 struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1172 1142 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); ··· 1193 1163 } 1194 1164 1195 1165 static void 1196 - g4x_update_plane(struct intel_plane *plane, 1197 - const struct intel_crtc_state *crtc_state, 1198 - const struct intel_plane_state *plane_state) 1166 + g4x_sprite_update_noarm(struct intel_plane *plane, 1167 + const struct intel_crtc_state *crtc_state, 1168 + const struct intel_plane_state *plane_state) 1199 1169 { 1200 1170 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1201 1171 enum pipe pipe = plane->pipe; 1202 - u32 dvssurf_offset = plane_state->view.color_plane[0].offset; 1203 - u32 linear_offset; 1204 - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 1205 1172 int crtc_x = plane_state->uapi.dst.x1; 1206 1173 int crtc_y = plane_state->uapi.dst.y1; 1207 1174 u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); 1208 1175 u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); 1209 - u32 x = plane_state->view.color_plane[0].x; 1210 - u32 y = plane_state->view.color_plane[0].y; 1211 1176 u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 1212 1177 u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 1213 - u32 dvscntr, dvsscale = 0; 1178 + u32 dvsscale = 0; 1214 1179 unsigned long irqflags; 1215 - 1216 - dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state); 1217 1180 1218 1181 /* Sizes are 0 based */ 1219 1182 src_w--; ··· 1217 1194 if (crtc_w != src_w || crtc_h != src_h) 1218 1195 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; 1219 1196 1220 - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 1221 - 1222 1197 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1223 1198 1224 1199 intel_de_write_fw(dev_priv, DVSSTRIDE(pipe), 1225 - plane_state->view.color_plane[0].stride); 1200 + plane_state->view.color_plane[0].mapping_stride); 1226 1201 intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x); 1227 1202 intel_de_write_fw(dev_priv, DVSSIZE(pipe), (crtc_h << 16) | crtc_w); 1228 1203 intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale); 1204 + 1205 + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1206 + } 1207 + 1208 + static void 1209 + g4x_sprite_update_arm(struct intel_plane *plane, 1210 + const struct intel_crtc_state *crtc_state, 1211 + const struct intel_plane_state *plane_state) 1212 + { 1213 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1214 + enum pipe pipe = plane->pipe; 1215 + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 1216 + u32 dvssurf_offset = plane_state->view.color_plane[0].offset; 1217 + u32 x = plane_state->view.color_plane[0].x; 1218 + u32 y = plane_state->view.color_plane[0].y; 1219 + u32 dvscntr, linear_offset; 1220 + unsigned long irqflags; 1221 + 1222 + dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state); 1223 + 1224 + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 1225 + 1226 + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1229 1227 1230 1228 if (key->flags) { 1231 1229 intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value); ··· 1268 1224 intel_plane_ggtt_offset(plane_state) + dvssurf_offset); 1269 1225 1270 1226 if (IS_G4X(dev_priv)) 1271 - g4x_update_gamma(plane_state); 1227 + g4x_sprite_update_gamma(plane_state); 1272 1228 else 1273 - ilk_update_gamma(plane_state); 1229 + ilk_sprite_update_gamma(plane_state); 1274 1230 1275 1231 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1276 1232 } 1277 1233 1278 1234 static void 1279 - g4x_disable_plane(struct intel_plane *plane, 1280 - const struct intel_crtc_state *crtc_state) 1235 + g4x_sprite_disable_arm(struct intel_plane *plane, 1236 + const struct intel_crtc_state *crtc_state) 1281 1237 { 1282 1238 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1283 1239 enum pipe pipe = plane->pipe; ··· 1294 1250 } 1295 1251 1296 1252 static bool 1297 - g4x_plane_get_hw_state(struct intel_plane *plane, 1298 - enum pipe *pipe) 1253 + g4x_sprite_get_hw_state(struct intel_plane *plane, 1254 + enum pipe *pipe) 1299 1255 { 1300 1256 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1301 1257 enum intel_display_power_domain power_domain; ··· 1343 1299 int src_x, src_w, src_h, crtc_w, crtc_h; 1344 1300 const struct drm_display_mode *adjusted_mode = 1345 1301 &crtc_state->hw.adjusted_mode; 1346 - unsigned int stride = plane_state->view.color_plane[0].stride; 1302 + unsigned int stride = plane_state->view.color_plane[0].mapping_stride; 1347 1303 unsigned int cpp = fb->format->cpp[0]; 1348 1304 unsigned int width_bytes; 1349 1305 int min_width, min_height; ··· 1611 1567 return ret; 1612 1568 } 1613 1569 1614 - static const u32 g4x_plane_formats[] = { 1570 + static const u32 g4x_sprite_formats[] = { 1615 1571 DRM_FORMAT_XRGB8888, 1616 1572 DRM_FORMAT_YUYV, 1617 1573 DRM_FORMAT_YVYU, ··· 1619 1575 DRM_FORMAT_VYUY, 1620 1576 }; 1621 1577 1622 - static const u64 i9xx_plane_format_modifiers[] = { 1623 - I915_FORMAT_MOD_X_TILED, 1624 - DRM_FORMAT_MOD_LINEAR, 1625 - DRM_FORMAT_MOD_INVALID 1626 - }; 1627 - 1628 - static const u32 snb_plane_formats[] = { 1578 + static const u32 snb_sprite_formats[] = { 1629 1579 DRM_FORMAT_XRGB8888, 1630 1580 DRM_FORMAT_XBGR8888, 1631 1581 DRM_FORMAT_XRGB2101010, ··· 1632 1594 DRM_FORMAT_VYUY, 1633 1595 }; 1634 1596 1635 - static const u32 vlv_plane_formats[] = { 1597 + static const u32 vlv_sprite_formats[] = { 1636 1598 DRM_FORMAT_C8, 1637 1599 DRM_FORMAT_RGB565, 1638 1600 DRM_FORMAT_XRGB8888, ··· 1667 1629 static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, 1668 1630 u32 format, u64 modifier) 1669 1631 { 1670 - switch (modifier) { 1671 - case DRM_FORMAT_MOD_LINEAR: 1672 - case I915_FORMAT_MOD_X_TILED: 1673 - break; 1674 - default: 1632 + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) 1675 1633 return false; 1676 - } 1677 1634 1678 1635 switch (format) { 1679 1636 case DRM_FORMAT_XRGB8888: ··· 1688 1655 static bool snb_sprite_format_mod_supported(struct drm_plane *_plane, 1689 1656 u32 format, u64 modifier) 1690 1657 { 1691 - switch (modifier) { 1692 - case DRM_FORMAT_MOD_LINEAR: 1693 - case I915_FORMAT_MOD_X_TILED: 1694 - break; 1695 - default: 1658 + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) 1696 1659 return false; 1697 - } 1698 1660 1699 1661 switch (format) { 1700 1662 case DRM_FORMAT_XRGB8888: ··· 1714 1686 static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane, 1715 1687 u32 format, u64 modifier) 1716 1688 { 1717 - switch (modifier) { 1718 - case DRM_FORMAT_MOD_LINEAR: 1719 - case I915_FORMAT_MOD_X_TILED: 1720 - break; 1721 - default: 1689 + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) 1722 1690 return false; 1723 - } 1724 1691 1725 1692 switch (format) { 1726 1693 case DRM_FORMAT_C8: ··· 1785 1762 return plane; 1786 1763 1787 1764 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1788 - plane->update_plane = vlv_update_plane; 1789 - plane->disable_plane = vlv_disable_plane; 1790 - plane->get_hw_state = vlv_plane_get_hw_state; 1765 + plane->update_noarm = vlv_sprite_update_noarm; 1766 + plane->update_arm = vlv_sprite_update_arm; 1767 + plane->disable_arm = vlv_sprite_disable_arm; 1768 + plane->get_hw_state = vlv_sprite_get_hw_state; 1791 1769 plane->check_plane = vlv_sprite_check; 1792 1770 plane->max_stride = i965_plane_max_stride; 1793 1771 plane->min_cdclk = vlv_plane_min_cdclk; ··· 1797 1773 formats = chv_pipe_b_sprite_formats; 1798 1774 num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats); 1799 1775 } else { 1800 - formats = vlv_plane_formats; 1801 - num_formats = ARRAY_SIZE(vlv_plane_formats); 1776 + formats = vlv_sprite_formats; 1777 + num_formats = ARRAY_SIZE(vlv_sprite_formats); 1802 1778 } 1803 - modifiers = i9xx_plane_format_modifiers; 1804 1779 1805 1780 plane_funcs = &vlv_sprite_funcs; 1806 1781 } else if (DISPLAY_VER(dev_priv) >= 7) { 1807 - plane->update_plane = ivb_update_plane; 1808 - plane->disable_plane = ivb_disable_plane; 1809 - plane->get_hw_state = ivb_plane_get_hw_state; 1782 + plane->update_noarm = ivb_sprite_update_noarm; 1783 + plane->update_arm = ivb_sprite_update_arm; 1784 + plane->disable_arm = ivb_sprite_disable_arm; 1785 + plane->get_hw_state = ivb_sprite_get_hw_state; 1810 1786 plane->check_plane = g4x_sprite_check; 1811 1787 1812 1788 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { ··· 1817 1793 plane->min_cdclk = ivb_sprite_min_cdclk; 1818 1794 } 1819 1795 1820 - formats = snb_plane_formats; 1821 - num_formats = ARRAY_SIZE(snb_plane_formats); 1822 - modifiers = i9xx_plane_format_modifiers; 1796 + formats = snb_sprite_formats; 1797 + num_formats = ARRAY_SIZE(snb_sprite_formats); 1823 1798 1824 1799 plane_funcs = &snb_sprite_funcs; 1825 1800 } else { 1826 - plane->update_plane = g4x_update_plane; 1827 - plane->disable_plane = g4x_disable_plane; 1828 - plane->get_hw_state = g4x_plane_get_hw_state; 1801 + plane->update_noarm = g4x_sprite_update_noarm; 1802 + plane->update_arm = g4x_sprite_update_arm; 1803 + plane->disable_arm = g4x_sprite_disable_arm; 1804 + plane->get_hw_state = g4x_sprite_get_hw_state; 1829 1805 plane->check_plane = g4x_sprite_check; 1830 1806 plane->max_stride = g4x_sprite_max_stride; 1831 1807 plane->min_cdclk = g4x_sprite_min_cdclk; 1832 1808 1833 - modifiers = i9xx_plane_format_modifiers; 1834 1809 if (IS_SANDYBRIDGE(dev_priv)) { 1835 - formats = snb_plane_formats; 1836 - num_formats = ARRAY_SIZE(snb_plane_formats); 1810 + formats = snb_sprite_formats; 1811 + num_formats = ARRAY_SIZE(snb_sprite_formats); 1837 1812 1838 1813 plane_funcs = &snb_sprite_funcs; 1839 1814 } else { 1840 - formats = g4x_plane_formats; 1841 - num_formats = ARRAY_SIZE(g4x_plane_formats); 1815 + formats = g4x_sprite_formats; 1816 + num_formats = ARRAY_SIZE(g4x_sprite_formats); 1842 1817 1843 1818 plane_funcs = &g4x_sprite_funcs; 1844 1819 } ··· 1856 1833 plane->id = PLANE_SPRITE0 + sprite; 1857 1834 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); 1858 1835 1836 + modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X); 1837 + 1859 1838 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 1860 1839 0, plane_funcs, 1861 1840 formats, num_formats, modifiers, 1862 1841 DRM_PLANE_TYPE_OVERLAY, 1863 1842 "sprite %c", sprite_name(pipe, sprite)); 1843 + kfree(modifiers); 1844 + 1864 1845 if (ret) 1865 1846 goto fail; 1866 1847
-4
drivers/gpu/drm/i915/display/intel_sprite.h
··· 27 27 #define VBLANK_EVASION_TIME_US 100 28 28 #endif 29 29 30 - int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 31 - int usecs); 32 30 struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, 33 31 enum pipe pipe, int plane); 34 32 int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, 35 33 struct drm_file *file_priv); 36 - void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); 37 - void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); 38 34 int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); 39 35 int chv_plane_check_rotation(const struct intel_plane_state *plane_state); 40 36
+31 -34
drivers/gpu/drm/i915/display/intel_vdsc.c
··· 442 442 } 443 443 } 444 444 445 - int intel_dsc_compute_params(struct intel_encoder *encoder, 446 - struct intel_crtc_state *pipe_config) 445 + int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) 447 446 { 448 - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 447 + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 448 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 449 449 struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config; 450 450 u16 compressed_bpp = pipe_config->dsc.compressed_bpp; 451 451 const struct rc_parameters *rc_params; ··· 598 598 pps_val |= DSC_422_ENABLE; 599 599 if (vdsc_cfg->vbr_enable) 600 600 pps_val |= DSC_VBR_ENABLE; 601 - drm_info(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val); 601 + drm_dbg_kms(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val); 602 602 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 603 603 intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0, 604 604 pps_val); ··· 622 622 /* Populate PICTURE_PARAMETER_SET_1 registers */ 623 623 pps_val = 0; 624 624 pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel); 625 - drm_info(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val); 625 + drm_dbg_kms(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val); 626 626 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 627 627 intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1, 628 628 pps_val); ··· 647 647 pps_val = 0; 648 648 pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | 649 649 DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); 650 - drm_info(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val); 650 + drm_dbg_kms(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val); 651 651 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 652 652 intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2, 653 653 pps_val); ··· 672 672 pps_val = 0; 673 673 pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) | 674 674 DSC_SLICE_WIDTH(vdsc_cfg->slice_width); 675 - drm_info(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val); 675 + drm_dbg_kms(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val); 676 676 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 677 677 intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3, 678 678 pps_val); ··· 697 697 pps_val = 0; 698 698 pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) | 699 699 DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay); 700 - drm_info(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val); 700 + drm_dbg_kms(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val); 701 701 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 702 702 intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4, 703 703 pps_val); ··· 722 722 pps_val = 0; 723 723 pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) | 724 724 DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval); 725 - drm_info(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val); 725 + drm_dbg_kms(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val); 726 726 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 727 727 intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5, 728 728 pps_val); ··· 749 749 DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) | 750 750 DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) | 751 751 DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp); 752 - drm_info(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val); 752 + drm_dbg_kms(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val); 753 753 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 754 754 intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6, 755 755 pps_val); ··· 774 774 pps_val = 0; 775 775 pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) | 776 776 DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset); 777 - drm_info(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val); 777 + drm_dbg_kms(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val); 778 778 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 779 779 intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7, 780 780 pps_val); ··· 799 799 pps_val = 0; 800 800 pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) | 801 801 DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset); 802 - drm_info(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val); 802 + drm_dbg_kms(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val); 803 803 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 804 804 intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8, 805 805 pps_val); ··· 824 824 pps_val = 0; 825 825 pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) | 826 826 DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST); 827 - drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val); 827 + drm_dbg_kms(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val); 828 828 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 829 829 intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9, 830 830 pps_val); ··· 851 851 DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) | 852 852 DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) | 853 853 DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST); 854 - drm_info(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val); 854 + drm_dbg_kms(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val); 855 855 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 856 856 intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10, 857 857 pps_val); ··· 879 879 vdsc_cfg->slice_width) | 880 880 DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height / 881 881 vdsc_cfg->slice_height); 882 - drm_info(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val); 882 + drm_dbg_kms(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val); 883 883 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 884 884 intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16, 885 885 pps_val); ··· 906 906 rc_buf_thresh_dword[i / 4] |= 907 907 (u32)(vdsc_cfg->rc_buf_thresh[i] << 908 908 BITS_PER_BYTE * (i % 4)); 909 - drm_info(&dev_priv->drm, " RC_BUF_THRESH%d = 0x%08x\n", i, 910 - rc_buf_thresh_dword[i / 4]); 909 + drm_dbg_kms(&dev_priv->drm, "RC_BUF_THRESH_%d = 0x%08x\n", i, 910 + rc_buf_thresh_dword[i / 4]); 911 911 } 912 912 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 913 913 intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0, ··· 963 963 RC_MAX_QP_SHIFT) | 964 964 (vdsc_cfg->rc_range_params[i].range_min_qp << 965 965 RC_MIN_QP_SHIFT)) << 16 * (i % 2)); 966 - drm_info(&dev_priv->drm, " RC_RANGE_PARAM_%d = 0x%08x\n", i, 967 - rc_range_params_dword[i / 2]); 966 + drm_dbg_kms(&dev_priv->drm, "RC_RANGE_PARAM_%d = 0x%08x\n", i, 967 + rc_range_params_dword[i / 2]); 968 968 } 969 969 if (!is_pipe_dsc(crtc, cpu_transcoder)) { 970 970 intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0, ··· 1055 1055 } 1056 1056 } 1057 1057 1058 - static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, 1059 - const struct intel_crtc_state *crtc_state) 1058 + void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, 1059 + const struct intel_crtc_state *crtc_state) 1060 1060 { 1061 1061 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1062 1062 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 1063 1063 struct mipi_dsi_device *dsi; 1064 1064 struct drm_dsc_picture_parameter_set pps; 1065 1065 enum port port; 1066 + 1067 + if (!crtc_state->dsc.compression_enable) 1068 + return; 1066 1069 1067 1070 drm_dsc_pps_payload_pack(&pps, vdsc_cfg); 1068 1071 ··· 1077 1074 } 1078 1075 } 1079 1076 1080 - static void intel_dsc_dp_pps_write(struct intel_encoder *encoder, 1081 - const struct intel_crtc_state *crtc_state) 1077 + void intel_dsc_dp_pps_write(struct intel_encoder *encoder, 1078 + const struct intel_crtc_state *crtc_state) 1082 1079 { 1083 - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1084 - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1080 + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 1085 1081 const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1086 1082 struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; 1083 + 1084 + if (!crtc_state->dsc.compression_enable) 1085 + return; 1087 1086 1088 1087 /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */ 1089 1088 drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header); ··· 1147 1142 } 1148 1143 } 1149 1144 1150 - void intel_dsc_enable(struct intel_encoder *encoder, 1151 - const struct intel_crtc_state *crtc_state) 1145 + void intel_dsc_enable(const struct intel_crtc_state *crtc_state) 1152 1146 { 1153 1147 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1154 1148 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); ··· 1158 1154 return; 1159 1155 1160 1156 intel_dsc_pps_configure(crtc_state); 1161 - 1162 - if (!crtc_state->bigjoiner_slave) { 1163 - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) 1164 - intel_dsc_dsi_pps_write(encoder, crtc_state); 1165 - else 1166 - intel_dsc_dp_pps_write(encoder, crtc_state); 1167 - } 1168 1157 1169 1158 dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE; 1170 1159 if (crtc_state->dsc.dsc_split) {
+6 -4
drivers/gpu/drm/i915/display/intel_vdsc.h
··· 15 15 16 16 bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state); 17 17 void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state); 18 - void intel_dsc_enable(struct intel_encoder *encoder, 19 - const struct intel_crtc_state *crtc_state); 18 + void intel_dsc_enable(const struct intel_crtc_state *crtc_state); 20 19 void intel_dsc_disable(const struct intel_crtc_state *crtc_state); 21 - int intel_dsc_compute_params(struct intel_encoder *encoder, 22 - struct intel_crtc_state *pipe_config); 20 + int intel_dsc_compute_params(struct intel_crtc_state *pipe_config); 23 21 void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state); 24 22 void intel_dsc_get_config(struct intel_crtc_state *crtc_state); 25 23 enum intel_display_power_domain 26 24 intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder); 27 25 struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc); 26 + void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, 27 + const struct intel_crtc_state *crtc_state); 28 + void intel_dsc_dp_pps_write(struct intel_encoder *encoder, 29 + const struct intel_crtc_state *crtc_state); 28 30 29 31 #endif /* __INTEL_VDSC_H__ */
+15 -3
drivers/gpu/drm/i915/display/intel_vrr.c
··· 60 60 * Between those two points the vblank exit starts (and hence registers get 61 61 * latched) ASAP after a push is sent. 62 62 * 63 - * framestart_delay is programmable 0-3. 63 + * framestart_delay is programmable 1-4. 64 64 */ 65 65 static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state) 66 66 { ··· 138 138 i915->window2_delay; 139 139 else 140 140 /* 141 - * FIXME: s/4/framestart_delay+1/ to get consistent 141 + * FIXME: s/4/framestart_delay/ to get consistent 142 142 * earliest/latest points for register latching regardless 143 143 * of the framestart_delay used? 144 144 * 145 145 * FIXME: this really needs the extra scanline to provide consistent 146 146 * behaviour for all framestart_delay values. Otherwise with 147 - * framestart_delay==3 we will end up extending the min vblank by 147 + * framestart_delay==4 we will end up extending the min vblank by 148 148 * one extra line. 149 149 */ 150 150 crtc_state->vrr.pipeline_full = ··· 191 191 192 192 intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 193 193 TRANS_PUSH_EN | TRANS_PUSH_SEND); 194 + } 195 + 196 + bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state) 197 + { 198 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 199 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 200 + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 201 + 202 + if (!crtc_state->vrr.enable) 203 + return false; 204 + 205 + return intel_de_read(dev_priv, TRANS_PUSH(cpu_transcoder)) & TRANS_PUSH_SEND; 194 206 } 195 207 196 208 void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
+1
drivers/gpu/drm/i915/display/intel_vrr.h
··· 23 23 void intel_vrr_enable(struct intel_encoder *encoder, 24 24 const struct intel_crtc_state *crtc_state); 25 25 void intel_vrr_send_push(const struct intel_crtc_state *crtc_state); 26 + bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state); 26 27 void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state); 27 28 void intel_vrr_get_config(struct intel_crtc *crtc, 28 29 struct intel_crtc_state *crtc_state);
+1
drivers/gpu/drm/i915/display/skl_scaler.c
··· 4 4 */ 5 5 #include "intel_de.h" 6 6 #include "intel_display_types.h" 7 + #include "intel_fb.h" 7 8 #include "skl_scaler.h" 8 9 #include "skl_universal_plane.h" 9 10
+224 -238
drivers/gpu/drm/i915/display/skl_universal_plane.c
··· 163 163 DRM_FORMAT_XVYU16161616, 164 164 }; 165 165 166 - static const u64 skl_plane_format_modifiers_noccs[] = { 167 - I915_FORMAT_MOD_Yf_TILED, 168 - I915_FORMAT_MOD_Y_TILED, 169 - I915_FORMAT_MOD_X_TILED, 170 - DRM_FORMAT_MOD_LINEAR, 171 - DRM_FORMAT_MOD_INVALID 172 - }; 173 - 174 - static const u64 skl_plane_format_modifiers_ccs[] = { 175 - I915_FORMAT_MOD_Yf_TILED_CCS, 176 - I915_FORMAT_MOD_Y_TILED_CCS, 177 - I915_FORMAT_MOD_Yf_TILED, 178 - I915_FORMAT_MOD_Y_TILED, 179 - I915_FORMAT_MOD_X_TILED, 180 - DRM_FORMAT_MOD_LINEAR, 181 - DRM_FORMAT_MOD_INVALID 182 - }; 183 - 184 - static const u64 gen12_plane_format_modifiers_mc_ccs[] = { 185 - I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, 186 - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, 187 - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, 188 - I915_FORMAT_MOD_Y_TILED, 189 - I915_FORMAT_MOD_X_TILED, 190 - DRM_FORMAT_MOD_LINEAR, 191 - DRM_FORMAT_MOD_INVALID 192 - }; 193 - 194 - static const u64 gen12_plane_format_modifiers_rc_ccs[] = { 195 - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, 196 - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, 197 - I915_FORMAT_MOD_Y_TILED, 198 - I915_FORMAT_MOD_X_TILED, 199 - DRM_FORMAT_MOD_LINEAR, 200 - DRM_FORMAT_MOD_INVALID 201 - }; 202 - 203 - static const u64 adlp_step_a_plane_format_modifiers[] = { 204 - I915_FORMAT_MOD_Y_TILED, 205 - I915_FORMAT_MOD_X_TILED, 206 - DRM_FORMAT_MOD_LINEAR, 207 - DRM_FORMAT_MOD_INVALID 208 - }; 209 - 210 166 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 211 167 { 212 168 switch (format) { ··· 589 633 { 590 634 const struct drm_framebuffer *fb = plane_state->hw.fb; 591 635 unsigned int rotation = plane_state->hw.rotation; 592 - u32 stride = plane_state->view.color_plane[color_plane].stride; 636 + u32 stride = plane_state->view.color_plane[color_plane].scanout_stride; 593 637 594 638 if (color_plane >= fb->format->num_planes) 595 639 return 0; ··· 598 642 } 599 643 600 644 static void 601 - skl_disable_plane(struct intel_plane *plane, 602 - const struct intel_crtc_state *crtc_state) 645 + skl_plane_disable_arm(struct intel_plane *plane, 646 + const struct intel_crtc_state *crtc_state) 603 647 { 604 648 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 605 649 enum plane_id plane_id = plane->id; ··· 941 985 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; 942 986 } 943 987 988 + if (plane_state->force_black) 989 + plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE; 990 + 944 991 return plane_color_ctl; 945 992 } 946 993 ··· 967 1008 } 968 1009 } 969 1010 970 - static void intel_load_plane_csc_black(struct intel_plane *intel_plane) 1011 + static u32 skl_plane_surf(const struct intel_plane_state *plane_state, 1012 + int color_plane) 971 1013 { 972 - struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 973 - enum pipe pipe = intel_plane->pipe; 974 - enum plane_id plane = intel_plane->id; 975 - u16 postoff = 0; 1014 + u32 plane_surf; 976 1015 977 - drm_dbg_kms(&dev_priv->drm, "plane color CTM to black %s:%d\n", 978 - intel_plane->base.name, plane); 979 - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 0), 0); 980 - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 1), 0); 1016 + plane_surf = intel_plane_ggtt_offset(plane_state) + 1017 + skl_surf_address(plane_state, color_plane); 981 1018 982 - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 2), 0); 983 - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 3), 0); 1019 + if (plane_state->decrypt) 1020 + plane_surf |= PLANE_SURF_DECRYPT; 984 1021 985 - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 4), 0); 986 - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 5), 0); 1022 + return plane_surf; 1023 + } 987 1024 988 - intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 0), 0); 989 - intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 1), 0); 990 - intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 2), 0); 1025 + static void icl_plane_csc_load_black(struct intel_plane *plane) 1026 + { 1027 + struct drm_i915_private *i915 = to_i915(plane->base.dev); 1028 + enum plane_id plane_id = plane->id; 1029 + enum pipe pipe = plane->pipe; 991 1030 992 - intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 0), postoff); 993 - intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 1), postoff); 994 - intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 2), postoff); 1031 + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0); 1032 + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0); 1033 + 1034 + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0); 1035 + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0); 1036 + 1037 + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0); 1038 + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0); 1039 + 1040 + intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0); 1041 + intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0); 1042 + intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0); 1043 + 1044 + intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0); 1045 + intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0); 1046 + intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); 995 1047 } 996 1048 997 1049 static void 998 - skl_program_plane(struct intel_plane *plane, 999 - const struct intel_crtc_state *crtc_state, 1000 - const struct intel_plane_state *plane_state, 1001 - int color_plane) 1050 + skl_program_plane_noarm(struct intel_plane *plane, 1051 + const struct intel_crtc_state *crtc_state, 1052 + const struct intel_plane_state *plane_state, 1053 + int color_plane) 1002 1054 { 1003 1055 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1004 1056 enum plane_id plane_id = plane->id; 1005 1057 enum pipe pipe = plane->pipe; 1006 - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 1007 - u32 surf_addr = skl_surf_address(plane_state, color_plane); 1008 1058 u32 stride = skl_plane_stride(plane_state, color_plane); 1009 1059 const struct drm_framebuffer *fb = plane_state->hw.fb; 1010 - int aux_plane = skl_main_to_aux_plane(fb, color_plane); 1011 1060 int crtc_x = plane_state->uapi.dst.x1; 1012 1061 int crtc_y = plane_state->uapi.dst.y1; 1013 - u32 x = plane_state->view.color_plane[color_plane].x; 1014 - u32 y = plane_state->view.color_plane[color_plane].y; 1015 1062 u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; 1016 1063 u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; 1017 - u8 alpha = plane_state->hw.alpha >> 8; 1018 - u32 plane_color_ctl = 0, aux_dist = 0; 1019 1064 unsigned long irqflags; 1020 - u32 keymsk, keymax, plane_surf; 1021 - u32 plane_ctl = plane_state->ctl; 1022 - 1023 - plane_ctl |= skl_plane_ctl_crtc(crtc_state); 1024 - 1025 - if (DISPLAY_VER(dev_priv) >= 10) 1026 - plane_color_ctl = plane_state->color_ctl | 1027 - glk_plane_color_ctl_crtc(crtc_state); 1028 1065 1029 1066 /* Sizes are 0 based */ 1030 1067 src_w--; 1031 1068 src_h--; 1032 - 1033 - keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); 1034 - 1035 - keymsk = key->channel_mask & 0x7ffffff; 1036 - if (alpha < 0xff) 1037 - keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; 1038 1069 1039 1070 /* The scaler will handle the output position */ 1040 1071 if (plane_state->scaler_id >= 0) { ··· 1032 1083 crtc_y = 0; 1033 1084 } 1034 1085 1035 - if (aux_plane) { 1036 - aux_dist = skl_surf_address(plane_state, aux_plane) - surf_addr; 1037 - 1038 - if (DISPLAY_VER(dev_priv) < 12) 1039 - aux_dist |= skl_plane_stride(plane_state, aux_plane); 1040 - } 1041 - 1042 1086 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1087 + 1088 + /* 1089 + * FIXME: pxp session invalidation can hit any time even at time of commit 1090 + * or after the commit, display content will be garbage. 1091 + */ 1092 + if (plane_state->force_black) 1093 + icl_plane_csc_load_black(plane); 1043 1094 1044 1095 intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride); 1045 1096 intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), ··· 1047 1098 intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), 1048 1099 (src_h << 16) | src_w); 1049 1100 1050 - intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); 1051 - 1052 1101 if (icl_is_hdr_plane(dev_priv, plane_id)) 1053 1102 intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 1054 1103 plane_state->cus_ctl); 1055 1104 1056 - if (DISPLAY_VER(dev_priv) >= 10) 1057 - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), 1058 - plane_color_ctl); 1059 - 1060 1105 if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) 1061 1106 icl_program_input_csc(plane, crtc_state, plane_state); 1062 1107 1063 - if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC) 1108 + if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) 1064 1109 intel_uncore_write64_fw(&dev_priv->uncore, 1065 1110 PLANE_CC_VAL(pipe, plane_id), plane_state->ccval); 1066 1111 1067 1112 skl_write_plane_wm(plane, crtc_state); 1113 + 1114 + intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); 1115 + 1116 + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1117 + } 1118 + 1119 + static void 1120 + skl_program_plane_arm(struct intel_plane *plane, 1121 + const struct intel_crtc_state *crtc_state, 1122 + const struct intel_plane_state *plane_state, 1123 + int color_plane) 1124 + { 1125 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1126 + enum plane_id plane_id = plane->id; 1127 + enum pipe pipe = plane->pipe; 1128 + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 1129 + const struct drm_framebuffer *fb = plane_state->hw.fb; 1130 + int aux_plane = skl_main_to_aux_plane(fb, color_plane); 1131 + u32 x = plane_state->view.color_plane[color_plane].x; 1132 + u32 y = plane_state->view.color_plane[color_plane].y; 1133 + u32 keymsk, keymax, aux_dist = 0, plane_color_ctl = 0; 1134 + u8 alpha = plane_state->hw.alpha >> 8; 1135 + u32 plane_ctl = plane_state->ctl; 1136 + unsigned long irqflags; 1137 + 1138 + plane_ctl |= skl_plane_ctl_crtc(crtc_state); 1139 + 1140 + if (DISPLAY_VER(dev_priv) >= 10) 1141 + plane_color_ctl = plane_state->color_ctl | 1142 + glk_plane_color_ctl_crtc(crtc_state); 1143 + 1144 + keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); 1145 + 1146 + keymsk = key->channel_mask & 0x7ffffff; 1147 + if (alpha < 0xff) 1148 + keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; 1149 + 1150 + if (aux_plane) { 1151 + aux_dist = skl_surf_address(plane_state, aux_plane) - 1152 + skl_surf_address(plane_state, color_plane); 1153 + 1154 + if (DISPLAY_VER(dev_priv) < 12) 1155 + aux_dist |= skl_plane_stride(plane_state, aux_plane); 1156 + } 1157 + 1158 + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1068 1159 1069 1160 intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), 1070 1161 key->min_value); ··· 1114 1125 intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), 1115 1126 (y << 16) | x); 1116 1127 1128 + intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); 1129 + 1117 1130 if (DISPLAY_VER(dev_priv) < 11) 1118 1131 intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), 1119 1132 (plane_state->view.color_plane[1].y << 16) | 1120 1133 plane_state->view.color_plane[1].x); 1121 1134 1122 - intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); 1135 + if (DISPLAY_VER(dev_priv) >= 10) 1136 + intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); 1123 1137 1124 1138 /* 1125 1139 * Enable the scaler before the plane so that we don't 1126 1140 * get a catastrophic underrun even if the two operations 1127 1141 * end up happening in two different frames. 1142 + * 1143 + * TODO: split into noarm+arm pair 1128 1144 */ 1129 1145 if (plane_state->scaler_id >= 0) 1130 1146 skl_program_plane_scaler(plane, crtc_state, plane_state); ··· 1140 1146 * the control register just before the surface register. 1141 1147 */ 1142 1148 intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); 1143 - plane_surf = intel_plane_ggtt_offset(plane_state) + surf_addr; 1144 - plane_color_ctl = intel_de_read_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id)); 1145 - 1146 - /* 1147 - * FIXME: pxp session invalidation can hit any time even at time of commit 1148 - * or after the commit, display content will be garbage. 1149 - */ 1150 - if (plane_state->decrypt) { 1151 - plane_surf |= PLANE_SURF_DECRYPT; 1152 - } else if (plane_state->force_black) { 1153 - intel_load_plane_csc_black(plane); 1154 - plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE; 1155 - } 1156 - 1157 - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), 1158 - plane_color_ctl); 1159 - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), plane_surf); 1149 + intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 1150 + skl_plane_surf(plane_state, color_plane)); 1160 1151 1161 1152 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1162 1153 } ··· 1156 1177 unsigned long irqflags; 1157 1178 enum plane_id plane_id = plane->id; 1158 1179 enum pipe pipe = plane->pipe; 1159 - u32 surf_addr = plane_state->view.color_plane[0].offset; 1160 1180 u32 plane_ctl = plane_state->ctl; 1161 1181 1162 1182 plane_ctl |= skl_plane_ctl_crtc(crtc_state); ··· 1167 1189 1168 1190 intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); 1169 1191 intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 1170 - intel_plane_ggtt_offset(plane_state) + surf_addr); 1192 + skl_plane_surf(plane_state, 0)); 1171 1193 1172 1194 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1173 1195 } 1174 1196 1175 1197 static void 1176 - skl_update_plane(struct intel_plane *plane, 1177 - const struct intel_crtc_state *crtc_state, 1178 - const struct intel_plane_state *plane_state) 1198 + skl_plane_update_noarm(struct intel_plane *plane, 1199 + const struct intel_crtc_state *crtc_state, 1200 + const struct intel_plane_state *plane_state) 1179 1201 { 1180 1202 int color_plane = 0; 1181 1203 ··· 1183 1205 /* Program the UV plane on planar master */ 1184 1206 color_plane = 1; 1185 1207 1186 - skl_program_plane(plane, crtc_state, plane_state, color_plane); 1208 + skl_program_plane_noarm(plane, crtc_state, plane_state, color_plane); 1209 + } 1210 + 1211 + static void 1212 + skl_plane_update_arm(struct intel_plane *plane, 1213 + const struct intel_crtc_state *crtc_state, 1214 + const struct intel_plane_state *plane_state) 1215 + { 1216 + int color_plane = 0; 1217 + 1218 + if (plane_state->planar_linked_plane && !plane_state->planar_slave) 1219 + /* Program the UV plane on planar master */ 1220 + color_plane = 1; 1221 + 1222 + skl_program_plane_arm(plane, crtc_state, plane_state, color_plane); 1187 1223 } 1188 1224 1189 1225 static bool intel_format_is_p01x(u32 format) ··· 1224 1232 return 0; 1225 1233 1226 1234 if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) && 1227 - is_ccs_modifier(fb->modifier)) { 1235 + intel_fb_is_ccs_modifier(fb->modifier)) { 1228 1236 drm_dbg_kms(&dev_priv->drm, 1229 1237 "RC support only with 0/180 degree rotation (%x)\n", 1230 1238 rotation); ··· 1276 1284 /* Y-tiling is not supported in IF-ID Interlace mode */ 1277 1285 if (crtc_state->hw.enable && 1278 1286 crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE && 1279 - (fb->modifier == I915_FORMAT_MOD_Y_TILED || 1280 - fb->modifier == I915_FORMAT_MOD_Yf_TILED || 1281 - fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || 1282 - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS || 1283 - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || 1284 - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || 1285 - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) { 1287 + fb->modifier != DRM_FORMAT_MOD_LINEAR && 1288 + fb->modifier != I915_FORMAT_MOD_X_TILED) { 1286 1289 drm_dbg_kms(&dev_priv->drm, 1287 1290 "Y/Yf tiling not supported in IF-ID mode\n"); 1288 1291 return -EINVAL; ··· 1474 1487 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 1475 1488 int cpp = fb->format->cpp[0]; 1476 1489 1477 - while ((*x + w) * cpp > plane_state->view.color_plane[0].stride) { 1490 + while ((*x + w) * cpp > plane_state->view.color_plane[0].mapping_stride) { 1478 1491 if (*offset == 0) { 1479 1492 drm_dbg_kms(&dev_priv->drm, 1480 1493 "Unable to find suitable display surface offset due to X-tiling\n"); ··· 1523 1536 * CCS AUX surface doesn't have its own x/y offsets, we must make sure 1524 1537 * they match with the main surface x/y offsets. 1525 1538 */ 1526 - if (is_ccs_modifier(fb->modifier)) { 1539 + if (intel_fb_is_ccs_modifier(fb->modifier)) { 1527 1540 while (!skl_check_main_ccs_coordinates(plane_state, x, y, 1528 1541 offset, aux_plane)) { 1529 1542 if (offset == 0) ··· 1587 1600 offset = intel_plane_compute_aligned_offset(&x, &y, 1588 1601 plane_state, uv_plane); 1589 1602 1590 - if (is_ccs_modifier(fb->modifier)) { 1603 + if (intel_fb_is_ccs_modifier(fb->modifier)) { 1591 1604 int ccs_plane = main_to_ccs_plane(fb, uv_plane); 1592 1605 u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset; 1593 1606 u32 alignment = intel_surf_alignment(fb, uv_plane); ··· 1643 1656 int hsub, vsub; 1644 1657 int x, y; 1645 1658 1646 - if (!is_ccs_plane(fb, ccs_plane) || 1647 - is_gen12_ccs_cc_plane(fb, ccs_plane)) 1659 + if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane)) 1648 1660 continue; 1649 1661 1650 1662 intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, ··· 1685 1699 * Handle the AUX surface first since the main surface setup depends on 1686 1700 * it. 1687 1701 */ 1688 - if (is_ccs_modifier(fb->modifier)) { 1702 + if (intel_fb_is_ccs_modifier(fb->modifier)) { 1689 1703 ret = skl_check_ccs_aux_surface(plane_state); 1690 1704 if (ret) 1691 1705 return ret; ··· 1721 1735 default: 1722 1736 return true; 1723 1737 } 1738 + } 1739 + 1740 + static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj) 1741 + { 1742 + struct drm_i915_private *i915 = to_i915(obj->base.dev); 1743 + 1744 + return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0; 1745 + } 1746 + 1747 + static bool pxp_is_borked(struct drm_i915_gem_object *obj) 1748 + { 1749 + return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj); 1724 1750 } 1725 1751 1726 1752 static int skl_plane_check(struct intel_crtc_state *crtc_state, ··· 1778 1780 ret = skl_plane_check_nv12_rotation(plane_state); 1779 1781 if (ret) 1780 1782 return ret; 1783 + 1784 + if (DISPLAY_VER(dev_priv) >= 11) { 1785 + plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb)); 1786 + plane_state->force_black = pxp_is_borked(intel_fb_obj(fb)); 1787 + } 1781 1788 1782 1789 /* HW only has 8 bits pixel precision, disable plane if invisible */ 1783 1790 if (!(plane_state->hw.alpha >> 8)) ··· 1873 1870 } 1874 1871 } 1875 1872 1876 - static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, 1877 - enum pipe pipe, enum plane_id plane_id) 1878 - { 1879 - if (plane_id == PLANE_CURSOR) 1880 - return false; 1881 - 1882 - if (DISPLAY_VER(dev_priv) >= 11) 1883 - return true; 1884 - 1885 - if (IS_GEMINILAKE(dev_priv)) 1886 - return pipe != PIPE_C; 1887 - 1888 - return pipe != PIPE_C && 1889 - (plane_id == PLANE_PRIMARY || 1890 - plane_id == PLANE_SPRITE0); 1891 - } 1892 - 1893 1873 static bool skl_plane_format_mod_supported(struct drm_plane *_plane, 1894 1874 u32 format, u64 modifier) 1895 1875 { 1896 1876 struct intel_plane *plane = to_intel_plane(_plane); 1897 1877 1898 - switch (modifier) { 1899 - case DRM_FORMAT_MOD_LINEAR: 1900 - case I915_FORMAT_MOD_X_TILED: 1901 - case I915_FORMAT_MOD_Y_TILED: 1902 - case I915_FORMAT_MOD_Yf_TILED: 1903 - break; 1904 - case I915_FORMAT_MOD_Y_TILED_CCS: 1905 - case I915_FORMAT_MOD_Yf_TILED_CCS: 1906 - if (!plane->has_ccs) 1907 - return false; 1908 - break; 1909 - default: 1878 + if (!intel_fb_plane_supports_modifier(plane, modifier)) 1910 1879 return false; 1911 - } 1912 1880 1913 1881 switch (format) { 1914 1882 case DRM_FORMAT_XRGB8888: 1915 1883 case DRM_FORMAT_XBGR8888: 1916 1884 case DRM_FORMAT_ARGB8888: 1917 1885 case DRM_FORMAT_ABGR8888: 1918 - if (is_ccs_modifier(modifier)) 1886 + if (intel_fb_is_ccs_modifier(modifier)) 1919 1887 return true; 1920 1888 fallthrough; 1921 1889 case DRM_FORMAT_RGB565: ··· 1927 1953 } 1928 1954 } 1929 1955 1930 - static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv, 1931 - enum plane_id plane_id) 1932 - { 1933 - /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ 1934 - if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) || 1935 - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_D0)) 1936 - return false; 1937 - 1938 - /* Wa_22011186057 */ 1939 - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 1940 - return false; 1941 - 1942 - return plane_id < PLANE_SPRITE4; 1943 - } 1944 - 1945 1956 static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, 1946 1957 u32 format, u64 modifier) 1947 1958 { 1948 - struct drm_i915_private *dev_priv = to_i915(_plane->dev); 1949 1959 struct intel_plane *plane = to_intel_plane(_plane); 1950 1960 1951 - switch (modifier) { 1952 - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: 1953 - if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id)) 1954 - return false; 1955 - fallthrough; 1956 - case DRM_FORMAT_MOD_LINEAR: 1957 - case I915_FORMAT_MOD_X_TILED: 1958 - case I915_FORMAT_MOD_Y_TILED: 1959 - break; 1960 - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: 1961 - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: 1962 - /* Wa_22011186057 */ 1963 - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 1964 - return false; 1965 - break; 1966 - default: 1961 + if (!intel_fb_plane_supports_modifier(plane, modifier)) 1967 1962 return false; 1968 - } 1969 1963 1970 1964 switch (format) { 1971 1965 case DRM_FORMAT_XRGB8888: 1972 1966 case DRM_FORMAT_XBGR8888: 1973 1967 case DRM_FORMAT_ARGB8888: 1974 1968 case DRM_FORMAT_ABGR8888: 1975 - if (is_ccs_modifier(modifier)) 1969 + if (intel_fb_is_ccs_modifier(modifier)) 1976 1970 return true; 1977 1971 fallthrough; 1978 1972 case DRM_FORMAT_YUYV: ··· 1952 2010 case DRM_FORMAT_P010: 1953 2011 case DRM_FORMAT_P012: 1954 2012 case DRM_FORMAT_P016: 1955 - if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS) 2013 + if (intel_fb_is_mc_ccs_modifier(modifier)) 1956 2014 return true; 1957 2015 fallthrough; 1958 2016 case DRM_FORMAT_RGB565: ··· 1979 2037 default: 1980 2038 return false; 1981 2039 } 1982 - } 1983 - 1984 - static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv, 1985 - enum plane_id plane_id) 1986 - { 1987 - /* Wa_22011186057 */ 1988 - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) 1989 - return adlp_step_a_plane_format_modifiers; 1990 - else if (gen12_plane_supports_mc_ccs(dev_priv, plane_id)) 1991 - return gen12_plane_format_modifiers_mc_ccs; 1992 - else 1993 - return gen12_plane_format_modifiers_rc_ccs; 1994 2040 } 1995 2041 1996 2042 static const struct drm_plane_funcs skl_plane_funcs = { ··· 2021 2091 spin_unlock_irq(&i915->irq_lock); 2022 2092 } 2023 2093 2094 + static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915, 2095 + enum pipe pipe, enum plane_id plane_id) 2096 + { 2097 + /* Wa_22011186057 */ 2098 + if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) 2099 + return false; 2100 + 2101 + if (DISPLAY_VER(i915) >= 11) 2102 + return true; 2103 + 2104 + if (IS_GEMINILAKE(i915)) 2105 + return pipe != PIPE_C; 2106 + 2107 + return pipe != PIPE_C && 2108 + (plane_id == PLANE_PRIMARY || 2109 + plane_id == PLANE_SPRITE0); 2110 + } 2111 + 2112 + static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915, 2113 + enum plane_id plane_id) 2114 + { 2115 + if (DISPLAY_VER(i915) < 12) 2116 + return false; 2117 + 2118 + /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ 2119 + if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || 2120 + IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0)) 2121 + return false; 2122 + 2123 + /* Wa_22011186057 */ 2124 + if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) 2125 + return false; 2126 + 2127 + return plane_id < PLANE_SPRITE4; 2128 + } 2129 + 2130 + static u8 skl_get_plane_caps(struct drm_i915_private *i915, 2131 + enum pipe pipe, enum plane_id plane_id) 2132 + { 2133 + u8 caps = INTEL_PLANE_CAP_TILING_X; 2134 + 2135 + if (DISPLAY_VER(i915) < 13 || IS_ALDERLAKE_P(i915)) 2136 + caps |= INTEL_PLANE_CAP_TILING_Y; 2137 + if (DISPLAY_VER(i915) < 12) 2138 + caps |= INTEL_PLANE_CAP_TILING_Yf; 2139 + 2140 + if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) { 2141 + caps |= INTEL_PLANE_CAP_CCS_RC; 2142 + if (DISPLAY_VER(i915) >= 12) 2143 + caps |= INTEL_PLANE_CAP_CCS_RC_CC; 2144 + } 2145 + 2146 + if (gen12_plane_has_mc_ccs(i915, plane_id)) 2147 + caps |= INTEL_PLANE_CAP_CCS_MC; 2148 + 2149 + return caps; 2150 + } 2151 + 2024 2152 struct intel_plane * 2025 2153 skl_universal_plane_create(struct drm_i915_private *dev_priv, 2026 2154 enum pipe pipe, enum plane_id plane_id) ··· 2101 2113 plane->id = plane_id; 2102 2114 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id); 2103 2115 2104 - plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id); 2105 - if (plane->has_fbc) { 2106 - struct intel_fbc *fbc = &dev_priv->fbc; 2107 - 2108 - fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 2109 - } 2116 + if (skl_plane_has_fbc(dev_priv, pipe, plane_id)) 2117 + plane->fbc = &dev_priv->fbc; 2118 + if (plane->fbc) 2119 + plane->fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; 2110 2120 2111 2121 if (DISPLAY_VER(dev_priv) >= 11) { 2112 2122 plane->min_width = icl_plane_min_width; ··· 2122 2136 } 2123 2137 2124 2138 plane->max_stride = skl_plane_max_stride; 2125 - plane->update_plane = skl_update_plane; 2126 - plane->disable_plane = skl_disable_plane; 2139 + plane->update_noarm = skl_plane_update_noarm; 2140 + plane->update_arm = skl_plane_update_arm; 2141 + plane->disable_arm = skl_plane_disable_arm; 2127 2142 plane->get_hw_state = skl_plane_get_hw_state; 2128 2143 plane->check_plane = skl_plane_check; 2129 2144 ··· 2146 2159 formats = skl_get_plane_formats(dev_priv, pipe, 2147 2160 plane_id, &num_formats); 2148 2161 2149 - plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); 2150 - if (DISPLAY_VER(dev_priv) >= 12) { 2151 - modifiers = gen12_get_plane_modifiers(dev_priv, plane_id); 2162 + if (DISPLAY_VER(dev_priv) >= 12) 2152 2163 plane_funcs = &gen12_plane_funcs; 2153 - } else { 2154 - if (plane->has_ccs) 2155 - modifiers = skl_plane_format_modifiers_ccs; 2156 - else 2157 - modifiers = skl_plane_format_modifiers_noccs; 2164 + else 2158 2165 plane_funcs = &skl_plane_funcs; 2159 - } 2160 2166 2161 2167 if (plane_id == PLANE_PRIMARY) 2162 2168 plane_type = DRM_PLANE_TYPE_PRIMARY; 2163 2169 else 2164 2170 plane_type = DRM_PLANE_TYPE_OVERLAY; 2171 + 2172 + modifiers = intel_fb_plane_get_modifiers(dev_priv, 2173 + skl_get_plane_caps(dev_priv, pipe, plane_id)); 2165 2174 2166 2175 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 2167 2176 0, plane_funcs, ··· 2165 2182 plane_type, 2166 2183 "plane %d%c", plane_id + 1, 2167 2184 pipe_name(pipe)); 2185 + 2186 + kfree(modifiers); 2187 + 2168 2188 if (ret) 2169 2189 goto fail; 2170 2190
+8
drivers/gpu/drm/i915/display/vlv_dsi.c
··· 38 38 #include "intel_de.h" 39 39 #include "intel_display_types.h" 40 40 #include "intel_dsi.h" 41 + #include "intel_dsi_vbt.h" 41 42 #include "intel_fifo_underrun.h" 42 43 #include "intel_panel.h" 43 44 #include "skl_scaler.h" 45 + #include "vlv_dsi.h" 46 + #include "vlv_dsi_pll.h" 44 47 #include "vlv_sideband.h" 45 48 46 49 /* return pixels in terms of txbyteclkhs */ ··· 1261 1258 struct intel_crtc_state *pipe_config) 1262 1259 { 1263 1260 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1261 + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 1264 1262 u32 pclk; 1263 + 1265 1264 drm_dbg_kms(&dev_priv->drm, "\n"); 1266 1265 1267 1266 pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); ··· 1274 1269 } else { 1275 1270 pclk = vlv_dsi_get_pclk(encoder, pipe_config); 1276 1271 } 1272 + 1273 + if (intel_dsi->dual_link) 1274 + pclk *= 2; 1277 1275 1278 1276 if (pclk) { 1279 1277 pipe_config->hw.adjusted_mode.crtc_clock = pclk;
+19
drivers/gpu/drm/i915/display/vlv_dsi.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #ifndef __VLV_DSI_H__ 7 + #define __VLV_DSI_H__ 8 + 9 + #include <linux/types.h> 10 + 11 + enum port; 12 + struct drm_i915_private; 13 + struct intel_dsi; 14 + 15 + void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); 16 + enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); 17 + void vlv_dsi_init(struct drm_i915_private *dev_priv); 18 + 19 + #endif /* __VLV_DSI_H__ */
+1
drivers/gpu/drm/i915/display/vlv_dsi_pll.c
··· 31 31 #include "intel_de.h" 32 32 #include "intel_display_types.h" 33 33 #include "intel_dsi.h" 34 + #include "vlv_dsi_pll.h" 34 35 #include "vlv_sideband.h" 35 36 36 37 static const u16 lfsr_converts[] = {
+38
drivers/gpu/drm/i915/display/vlv_dsi_pll.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #ifndef __VLV_DSI_PLL_H__ 7 + #define __VLV_DSI_PLL_H__ 8 + 9 + #include <linux/types.h> 10 + 11 + enum port; 12 + struct drm_i915_private; 13 + struct intel_crtc_state; 14 + struct intel_encoder; 15 + 16 + int vlv_dsi_pll_compute(struct intel_encoder *encoder, 17 + struct intel_crtc_state *config); 18 + void vlv_dsi_pll_enable(struct intel_encoder *encoder, 19 + const struct intel_crtc_state *config); 20 + void vlv_dsi_pll_disable(struct intel_encoder *encoder); 21 + u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, 22 + struct intel_crtc_state *config); 23 + void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); 24 + 25 + bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); 26 + int bxt_dsi_pll_compute(struct intel_encoder *encoder, 27 + struct intel_crtc_state *config); 28 + void bxt_dsi_pll_enable(struct intel_encoder *encoder, 29 + const struct intel_crtc_state *config); 30 + void bxt_dsi_pll_disable(struct intel_encoder *encoder); 31 + u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, 32 + struct intel_crtc_state *config); 33 + void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); 34 + 35 + void assert_dsi_pll_enabled(struct drm_i915_private *i915); 36 + void assert_dsi_pll_disabled(struct drm_i915_private *i915); 37 + 38 + #endif /* __VLV_DSI_PLL_H__ */
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_context.c
··· 479 479 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) && 480 480 !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) { 481 481 drm_dbg(&i915->drm, 482 - "Bonding on gen12+ aside from TGL, RKL, and ADL_S not supported\n"); 482 + "Bonding not supported on this platform\n"); 483 483 return -ENODEV; 484 484 } 485 485
+181 -87
drivers/gpu/drm/i915/gt/intel_ggtt.c
··· 3 3 * Copyright © 2020 Intel Corporation 4 4 */ 5 5 6 + #include <linux/agp_backend.h> 6 7 #include <linux/stop_machine.h> 7 8 8 9 #include <asm/set_memory.h> 9 10 #include <asm/smp.h> 10 11 11 12 #include <drm/i915_drm.h> 13 + #include <drm/intel-gtt.h> 12 14 13 15 #include "gem/i915_gem_lmem.h" 14 16 ··· 118 116 return false; 119 117 } 120 118 121 - void i915_ggtt_suspend(struct i915_ggtt *ggtt) 119 + /** 120 + * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM 121 + * @vm: The VM to suspend the mappings for 122 + * 123 + * Suspend the memory mappings for all objects mapped to HW via the GGTT or a 124 + * DPT page table. 125 + */ 126 + void i915_ggtt_suspend_vm(struct i915_address_space *vm) 122 127 { 123 128 struct i915_vma *vma, *vn; 124 129 int open; 125 130 126 - mutex_lock(&ggtt->vm.mutex); 131 + drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); 132 + 133 + mutex_lock(&vm->mutex); 127 134 128 135 /* Skip rewriting PTE on VMA unbind. */ 129 - open = atomic_xchg(&ggtt->vm.open, 0); 136 + open = atomic_xchg(&vm->open, 0); 130 137 131 - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { 138 + list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { 132 139 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 133 140 i915_vma_wait_for_bind(vma); 134 141 ··· 150 139 } 151 140 } 152 141 153 - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 154 - ggtt->invalidate(ggtt); 155 - atomic_set(&ggtt->vm.open, open); 142 + vm->clear_range(vm, 0, vm->total); 156 143 157 - mutex_unlock(&ggtt->vm.mutex); 144 + atomic_set(&vm->open, open); 145 + 146 + mutex_unlock(&vm->mutex); 147 + } 148 + 149 + void i915_ggtt_suspend(struct i915_ggtt *ggtt) 150 + { 151 + i915_ggtt_suspend_vm(&ggtt->vm); 152 + ggtt->invalidate(ggtt); 158 153 159 154 intel_gt_check_and_clear_faults(ggtt->vm.gt); 160 155 } ··· 1270 1253 ggtt->invalidate(ggtt); 1271 1254 } 1272 1255 1273 - void i915_ggtt_resume(struct i915_ggtt *ggtt) 1256 + /** 1257 + * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM 1258 + * @vm: The VM to restore the mappings for 1259 + * 1260 + * Restore the memory mappings for all objects mapped to HW via the GGTT or a 1261 + * DPT page table. 1262 + * 1263 + * Returns %true if restoring the mapping for any object that was in a write 1264 + * domain before suspend. 1265 + */ 1266 + bool i915_ggtt_resume_vm(struct i915_address_space *vm) 1274 1267 { 1275 1268 struct i915_vma *vma; 1276 - bool flush = false; 1269 + bool write_domain_objs = false; 1277 1270 int open; 1278 1271 1279 - intel_gt_check_and_clear_faults(ggtt->vm.gt); 1272 + drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); 1280 1273 1281 1274 /* First fill our portion of the GTT with scratch pages */ 1282 - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); 1275 + vm->clear_range(vm, 0, vm->total); 1283 1276 1284 1277 /* Skip rewriting PTE on VMA unbind. */ 1285 - open = atomic_xchg(&ggtt->vm.open, 0); 1278 + open = atomic_xchg(&vm->open, 0); 1286 1279 1287 1280 /* clflush objects bound into the GGTT and rebind them. */ 1288 - list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { 1281 + list_for_each_entry(vma, &vm->bound_list, vm_link) { 1289 1282 struct drm_i915_gem_object *obj = vma->obj; 1290 1283 unsigned int was_bound = 1291 1284 atomic_read(&vma->flags) & I915_VMA_BIND_MASK; 1292 1285 1293 1286 GEM_BUG_ON(!was_bound); 1294 - vma->ops->bind_vma(&ggtt->vm, NULL, vma, 1287 + vma->ops->bind_vma(vm, NULL, vma, 1295 1288 obj ? obj->cache_level : 0, 1296 1289 was_bound); 1297 1290 if (obj) { /* only used during resume => exclusive access */ 1298 - flush |= fetch_and_zero(&obj->write_domain); 1291 + write_domain_objs |= fetch_and_zero(&obj->write_domain); 1299 1292 obj->read_domains |= I915_GEM_DOMAIN_GTT; 1300 1293 } 1301 1294 } 1302 1295 1303 - atomic_set(&ggtt->vm.open, open); 1296 + atomic_set(&vm->open, open); 1297 + 1298 + return write_domain_objs; 1299 + } 1300 + 1301 + void i915_ggtt_resume(struct i915_ggtt *ggtt) 1302 + { 1303 + bool flush; 1304 + 1305 + intel_gt_check_and_clear_faults(ggtt->vm.gt); 1306 + 1307 + flush = i915_ggtt_resume_vm(&ggtt->vm); 1308 + 1304 1309 ggtt->invalidate(ggtt); 1305 1310 1306 1311 if (flush) ··· 1427 1388 } 1428 1389 1429 1390 static struct scatterlist * 1430 - remap_pages(struct drm_i915_gem_object *obj, 1431 - unsigned int offset, unsigned int alignment_pad, 1432 - unsigned int width, unsigned int height, 1433 - unsigned int src_stride, unsigned int dst_stride, 1434 - struct sg_table *st, struct scatterlist *sg) 1391 + add_padding_pages(unsigned int count, 1392 + struct sg_table *st, struct scatterlist *sg) 1393 + { 1394 + st->nents++; 1395 + 1396 + /* 1397 + * The DE ignores the PTEs for the padding tiles, the sg entry 1398 + * here is just a convenience to indicate how many padding PTEs 1399 + * to insert at this spot. 1400 + */ 1401 + sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0); 1402 + sg_dma_address(sg) = 0; 1403 + sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE; 1404 + sg = sg_next(sg); 1405 + 1406 + return sg; 1407 + } 1408 + 1409 + static struct scatterlist * 1410 + remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj, 1411 + unsigned int offset, unsigned int alignment_pad, 1412 + unsigned int width, unsigned int height, 1413 + unsigned int src_stride, unsigned int dst_stride, 1414 + struct sg_table *st, struct scatterlist *sg, 1415 + unsigned int *gtt_offset) 1435 1416 { 1436 1417 unsigned int row; 1437 1418 1438 1419 if (!width || !height) 1439 1420 return sg; 1440 1421 1441 - if (alignment_pad) { 1442 - st->nents++; 1443 - 1444 - /* 1445 - * The DE ignores the PTEs for the padding tiles, the sg entry 1446 - * here is just a convenience to indicate how many padding PTEs 1447 - * to insert at this spot. 1448 - */ 1449 - sg_set_page(sg, NULL, alignment_pad * 4096, 0); 1450 - sg_dma_address(sg) = 0; 1451 - sg_dma_len(sg) = alignment_pad * 4096; 1452 - sg = sg_next(sg); 1453 - } 1422 + if (alignment_pad) 1423 + sg = add_padding_pages(alignment_pad, st, sg); 1454 1424 1455 1425 for (row = 0; row < height; row++) { 1456 1426 unsigned int left = width * I915_GTT_PAGE_SIZE; ··· 1496 1448 if (!left) 1497 1449 continue; 1498 1450 1499 - st->nents++; 1500 - 1501 - /* 1502 - * The DE ignores the PTEs for the padding tiles, the sg entry 1503 - * here is just a conenience to indicate how many padding PTEs 1504 - * to insert at this spot. 1505 - */ 1506 - sg_set_page(sg, NULL, left, 0); 1507 - sg_dma_address(sg) = 0; 1508 - sg_dma_len(sg) = left; 1509 - sg = sg_next(sg); 1451 + sg = add_padding_pages(left >> PAGE_SHIFT, st, sg); 1510 1452 } 1453 + 1454 + *gtt_offset += alignment_pad + dst_stride * height; 1455 + 1456 + return sg; 1457 + } 1458 + 1459 + static struct scatterlist * 1460 + remap_contiguous_pages(struct drm_i915_gem_object *obj, 1461 + unsigned int obj_offset, 1462 + unsigned int count, 1463 + struct sg_table *st, struct scatterlist *sg) 1464 + { 1465 + struct scatterlist *iter; 1466 + unsigned int offset; 1467 + 1468 + iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset); 1469 + GEM_BUG_ON(!iter); 1470 + 1471 + do { 1472 + unsigned int len; 1473 + 1474 + len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), 1475 + count << PAGE_SHIFT); 1476 + sg_set_page(sg, NULL, len, 0); 1477 + sg_dma_address(sg) = 1478 + sg_dma_address(iter) + (offset << PAGE_SHIFT); 1479 + sg_dma_len(sg) = len; 1480 + 1481 + st->nents++; 1482 + count -= len >> PAGE_SHIFT; 1483 + if (count == 0) 1484 + return sg; 1485 + 1486 + sg = __sg_next(sg); 1487 + iter = __sg_next(iter); 1488 + offset = 0; 1489 + } while (1); 1490 + } 1491 + 1492 + static struct scatterlist * 1493 + remap_linear_color_plane_pages(struct drm_i915_gem_object *obj, 1494 + unsigned int obj_offset, unsigned int alignment_pad, 1495 + unsigned int size, 1496 + struct sg_table *st, struct scatterlist *sg, 1497 + unsigned int *gtt_offset) 1498 + { 1499 + if (!size) 1500 + return sg; 1501 + 1502 + if (alignment_pad) 1503 + sg = add_padding_pages(alignment_pad, st, sg); 1504 + 1505 + sg = remap_contiguous_pages(obj, obj_offset, size, st, sg); 1506 + sg = sg_next(sg); 1507 + 1508 + *gtt_offset += alignment_pad + size; 1509 + 1510 + return sg; 1511 + } 1512 + 1513 + static struct scatterlist * 1514 + remap_color_plane_pages(const struct intel_remapped_info *rem_info, 1515 + struct drm_i915_gem_object *obj, 1516 + int color_plane, 1517 + struct sg_table *st, struct scatterlist *sg, 1518 + unsigned int *gtt_offset) 1519 + { 1520 + unsigned int alignment_pad = 0; 1521 + 1522 + if (rem_info->plane_alignment) 1523 + alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset; 1524 + 1525 + if (rem_info->plane[color_plane].linear) 1526 + sg = remap_linear_color_plane_pages(obj, 1527 + rem_info->plane[color_plane].offset, 1528 + alignment_pad, 1529 + rem_info->plane[color_plane].size, 1530 + st, sg, 1531 + gtt_offset); 1532 + 1533 + else 1534 + sg = remap_tiled_color_plane_pages(obj, 1535 + rem_info->plane[color_plane].offset, 1536 + alignment_pad, 1537 + rem_info->plane[color_plane].width, 1538 + rem_info->plane[color_plane].height, 1539 + rem_info->plane[color_plane].src_stride, 1540 + rem_info->plane[color_plane].dst_stride, 1541 + st, sg, 1542 + gtt_offset); 1511 1543 1512 1544 return sg; 1513 1545 } ··· 1616 1488 st->nents = 0; 1617 1489 sg = st->sgl; 1618 1490 1619 - for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { 1620 - unsigned int alignment_pad = 0; 1621 - 1622 - if (rem_info->plane_alignment) 1623 - alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset; 1624 - 1625 - sg = remap_pages(obj, 1626 - rem_info->plane[i].offset, alignment_pad, 1627 - rem_info->plane[i].width, rem_info->plane[i].height, 1628 - rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride, 1629 - st, sg); 1630 - 1631 - gtt_offset += alignment_pad + 1632 - rem_info->plane[i].dst_stride * rem_info->plane[i].height; 1633 - } 1491 + for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) 1492 + sg = remap_color_plane_pages(rem_info, obj, i, st, sg, &gtt_offset); 1634 1493 1635 1494 i915_sg_trim(st); 1636 1495 ··· 1639 1524 struct drm_i915_gem_object *obj) 1640 1525 { 1641 1526 struct sg_table *st; 1642 - struct scatterlist *sg, *iter; 1527 + struct scatterlist *sg; 1643 1528 unsigned int count = view->partial.size; 1644 - unsigned int offset; 1645 1529 int ret = -ENOMEM; 1646 1530 1647 1531 st = kmalloc(sizeof(*st), GFP_KERNEL); ··· 1651 1537 if (ret) 1652 1538 goto err_sg_alloc; 1653 1539 1654 - iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); 1655 - GEM_BUG_ON(!iter); 1656 - 1657 - sg = st->sgl; 1658 1540 st->nents = 0; 1659 - do { 1660 - unsigned int len; 1661 1541 1662 - len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), 1663 - count << PAGE_SHIFT); 1664 - sg_set_page(sg, NULL, len, 0); 1665 - sg_dma_address(sg) = 1666 - sg_dma_address(iter) + (offset << PAGE_SHIFT); 1667 - sg_dma_len(sg) = len; 1542 + sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl); 1668 1543 1669 - st->nents++; 1670 - count -= len >> PAGE_SHIFT; 1671 - if (count == 0) { 1672 - sg_mark_end(sg); 1673 - i915_sg_trim(st); /* Drop any unused tail entries. */ 1544 + sg_mark_end(sg); 1545 + i915_sg_trim(st); /* Drop any unused tail entries. */ 1674 1546 1675 - return st; 1676 - } 1677 - 1678 - sg = __sg_next(sg); 1679 - iter = __sg_next(iter); 1680 - offset = 0; 1681 - } while (1); 1547 + return st; 1682 1548 1683 1549 err_sg_alloc: 1684 1550 kfree(st);
+2
drivers/gpu/drm/i915/gt/intel_gt.c
··· 3 3 * Copyright © 2019 Intel Corporation 4 4 */ 5 5 6 + #include <drm/intel-gtt.h> 7 + 6 8 #include "intel_gt_debugfs.h" 7 9 8 10 #include "gem/i915_gem_lmem.h"
+2
drivers/gpu/drm/i915/gt/intel_gtt.h
··· 544 544 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt, 545 545 unsigned long lmem_pt_obj_flags); 546 546 547 + void i915_ggtt_suspend_vm(struct i915_address_space *vm); 548 + bool i915_ggtt_resume_vm(struct i915_address_space *vm); 547 549 void i915_ggtt_suspend(struct i915_ggtt *gtt); 548 550 void i915_ggtt_resume(struct i915_ggtt *ggtt); 549 551
+24
drivers/gpu/drm/i915/i915_driver.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2019 Intel Corporation 4 + */ 5 + 6 + #ifndef __I915_DRIVER_H__ 7 + #define __I915_DRIVER_H__ 8 + 9 + #include <linux/pm.h> 10 + 11 + struct pci_dev; 12 + struct pci_device_id; 13 + struct drm_i915_private; 14 + 15 + extern const struct dev_pm_ops i915_pm_ops; 16 + 17 + int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 18 + void i915_driver_remove(struct drm_i915_private *i915); 19 + void i915_driver_shutdown(struct drm_i915_private *i915); 20 + 21 + int i915_driver_resume_switcheroo(struct drm_i915_private *i915); 22 + int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); 23 + 24 + #endif /* __I915_DRIVER_H__ */
+35 -8
drivers/gpu/drm/i915/i915_drv.c drivers/gpu/drm/i915/i915_driver.c
··· 29 29 30 30 #include <linux/acpi.h> 31 31 #include <linux/device.h> 32 - #include <linux/oom.h> 33 32 #include <linux/module.h> 33 + #include <linux/oom.h> 34 34 #include <linux/pci.h> 35 35 #include <linux/pm.h> 36 36 #include <linux/pm_runtime.h> ··· 48 48 #include "display/intel_acpi.h" 49 49 #include "display/intel_bw.h" 50 50 #include "display/intel_cdclk.h" 51 - #include "display/intel_dmc.h" 52 51 #include "display/intel_display_types.h" 52 + #include "display/intel_dmc.h" 53 53 #include "display/intel_dp.h" 54 + #include "display/intel_dpt.h" 54 55 #include "display/intel_fbdev.h" 55 56 #include "display/intel_hotplug.h" 56 57 #include "display/intel_overlay.h" 58 + #include "display/intel_pch_refclk.h" 57 59 #include "display/intel_pipe_crc.h" 58 60 #include "display/intel_pps.h" 59 61 #include "display/intel_sprite.h" ··· 72 70 #include "pxp/intel_pxp_pm.h" 73 71 74 72 #include "i915_debugfs.h" 73 + #include "i915_driver.h" 75 74 #include "i915_drv.h" 76 75 #include "i915_ioc32.h" 77 76 #include "i915_irq.h" ··· 92 89 #include "intel_region_ttm.h" 93 90 #include "vlv_suspend.h" 94 91 95 - static const struct drm_driver driver; 92 + static const struct drm_driver i915_drm_driver; 96 93 97 94 static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) 98 95 { ··· 325 322 mutex_init(&dev_priv->sb_lock); 326 323 cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); 327 324 328 - mutex_init(&dev_priv->av_mutex); 325 + mutex_init(&dev_priv->audio.mutex); 329 326 mutex_init(&dev_priv->wm.wm_mutex); 330 327 mutex_init(&dev_priv->pps_mutex); 331 328 mutex_init(&dev_priv->hdcp_comp_mutex); ··· 769 766 struct intel_device_info *device_info; 770 767 struct drm_i915_private *i915; 771 768 772 - i915 = devm_drm_dev_alloc(&pdev->dev, &driver, 769 + i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver, 773 770 struct drm_i915_private, drm); 774 771 if (IS_ERR(i915)) 775 772 return i915; ··· 1130 1127 1131 1128 intel_suspend_hw(dev_priv); 1132 1129 1130 + /* Must be called before GGTT is suspended. */ 1131 + intel_dpt_suspend(dev_priv); 1133 1132 i915_ggtt_suspend(&dev_priv->ggtt); 1134 1133 1135 1134 i915_save_display(dev_priv); ··· 1188 1183 goto out; 1189 1184 } 1190 1185 1186 + /* 1187 + * FIXME: Temporary hammer to avoid freezing the machine on our DGFX 1188 + * This should be totally removed when we handle the pci states properly 1189 + * on runtime PM and on s2idle cases. 1190 + */ 1191 + if (suspend_to_idle(dev_priv)) 1192 + pci_d3cold_disable(pdev); 1193 + 1191 1194 pci_disable_device(pdev); 1192 1195 /* 1193 1196 * During hibernation on some platforms the BIOS may try to access ··· 1220 1207 return ret; 1221 1208 } 1222 1209 1223 - int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state) 1210 + int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, 1211 + pm_message_t state) 1224 1212 { 1225 1213 int error; 1226 1214 ··· 1257 1243 drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); 1258 1244 1259 1245 i915_ggtt_resume(&dev_priv->ggtt); 1246 + /* Must be called after GGTT is resumed. */ 1247 + intel_dpt_resume(dev_priv); 1260 1248 1261 1249 intel_dmc_ucode_resume(dev_priv); 1262 1250 ··· 1360 1344 1361 1345 pci_set_master(pdev); 1362 1346 1347 + pci_d3cold_enable(pdev); 1348 + 1363 1349 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 1364 1350 1365 1351 ret = vlv_resume_prepare(dev_priv, false); ··· 1382 1364 return ret; 1383 1365 } 1384 1366 1385 - int i915_resume_switcheroo(struct drm_i915_private *i915) 1367 + int i915_driver_resume_switcheroo(struct drm_i915_private *i915) 1386 1368 { 1387 1369 int ret; 1388 1370 ··· 1538 1520 { 1539 1521 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 1540 1522 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1523 + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 1541 1524 int ret; 1542 1525 1543 1526 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) ··· 1584 1565 drm_err(&dev_priv->drm, 1585 1566 "Unclaimed access detected prior to suspending\n"); 1586 1567 1568 + /* 1569 + * FIXME: Temporary hammer to avoid freezing the machine on our DGFX 1570 + * This should be totally removed when we handle the pci states properly 1571 + * on runtime PM and on s2idle cases. 1572 + */ 1573 + pci_d3cold_disable(pdev); 1587 1574 rpm->suspended = true; 1588 1575 1589 1576 /* ··· 1628 1603 { 1629 1604 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 1630 1605 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1606 + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 1631 1607 int ret; 1632 1608 1633 1609 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) ··· 1641 1615 1642 1616 intel_opregion_notify_adapter(dev_priv, PCI_D0); 1643 1617 rpm->suspended = false; 1618 + pci_d3cold_enable(pdev); 1644 1619 if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) 1645 1620 drm_dbg(&dev_priv->drm, 1646 1621 "Unclaimed access during suspend, bios?\n"); ··· 1804 1777 DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), 1805 1778 }; 1806 1779 1807 - static const struct drm_driver driver = { 1780 + static const struct drm_driver i915_drm_driver = { 1808 1781 /* Don't use MTRRs here; the Xserver or userspace app should 1809 1782 * deal with them for Intel hardware. 1810 1783 */
+31 -44
drivers/gpu/drm/i915/i915_drv.h
··· 50 50 #include <linux/stackdepot.h> 51 51 #include <linux/xarray.h> 52 52 53 - #include <drm/intel-gtt.h> 54 53 #include <drm/drm_gem.h> 55 54 #include <drm/drm_auth.h> 56 55 #include <drm/drm_cache.h> ··· 363 364 void (*read_luts)(struct intel_crtc_state *crtc_state); 364 365 }; 365 366 366 - struct intel_audio_funcs { 367 - void (*audio_codec_enable)(struct intel_encoder *encoder, 368 - const struct intel_crtc_state *crtc_state, 369 - const struct drm_connector_state *conn_state); 370 - void (*audio_codec_disable)(struct intel_encoder *encoder, 371 - const struct intel_crtc_state *old_crtc_state, 372 - const struct drm_connector_state *old_conn_state); 373 - }; 374 - 375 367 struct intel_cdclk_funcs { 376 368 void (*get_cdclk)(struct drm_i915_private *dev_priv, 377 369 struct intel_cdclk_config *cdclk_config); ··· 401 411 void (*commit_modeset_enables)(struct intel_atomic_state *state); 402 412 }; 403 413 414 + struct intel_fbc_funcs; 404 415 405 416 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ 406 417 407 418 struct intel_fbc { 419 + struct drm_i915_private *i915; 420 + const struct intel_fbc_funcs *funcs; 421 + 408 422 /* This is always the inner lock when overlapping with struct_mutex and 409 423 * it's the outer lock when overlapping with stolen_lock. */ 410 424 struct mutex lock; ··· 822 828 struct ida mock_region_instances; 823 829 }; 824 830 831 + /* intel_audio.c private */ 832 + struct intel_audio_funcs; 833 + struct intel_audio_private { 834 + /* Display internal audio functions */ 835 + const struct intel_audio_funcs *funcs; 836 + 837 + /* hda/i915 audio component */ 838 + struct i915_audio_component *component; 839 + bool component_registered; 840 + /* mutex for audio/video sync */ 841 + struct mutex mutex; 842 + int power_refcount; 843 + u32 freq_cntrl; 844 + 845 + /* Used to save the pipe-to-encoder mapping for audio */ 846 + struct intel_encoder *encoder_map[I915_MAX_PIPES]; 847 + 848 + /* necessary resource sharing with HDMI LPE audio driver. */ 849 + struct { 850 + struct platform_device *platdev; 851 + int irq; 852 + } lpe; 853 + }; 854 + 825 855 struct drm_i915_private { 826 856 struct drm_device drm; 827 857 ··· 1013 995 /* Display internal color functions */ 1014 996 const struct intel_color_funcs *color_funcs; 1015 997 1016 - /* Display internal audio functions */ 1017 - const struct intel_audio_funcs *audio_funcs; 1018 - 1019 998 /* Display CDCLK functions */ 1020 999 const struct intel_cdclk_funcs *cdclk_funcs; 1021 1000 ··· 1098 1083 1099 1084 struct drm_property *broadcast_rgb_property; 1100 1085 struct drm_property *force_audio_property; 1101 - 1102 - /* hda/i915 audio component */ 1103 - struct i915_audio_component *audio_component; 1104 - bool audio_component_registered; 1105 - /** 1106 - * av_mutex - mutex for audio/video sync 1107 - * 1108 - */ 1109 - struct mutex av_mutex; 1110 - int audio_power_refcount; 1111 - u32 audio_freq_cntrl; 1112 1086 1113 1087 u32 fdi_rx_config; 1114 1088 ··· 1231 1227 1232 1228 bool ipc_enabled; 1233 1229 1234 - /* Used to save the pipe-to-encoder mapping for audio */ 1235 - struct intel_encoder *av_enc_map[I915_MAX_PIPES]; 1236 - 1237 - /* necessary resource sharing with HDMI LPE audio driver. */ 1238 - struct { 1239 - struct platform_device *platdev; 1240 - int irq; 1241 - } lpe_audio; 1230 + struct intel_audio_private audio; 1242 1231 1243 1232 struct i915_pmu pmu; 1244 1233 ··· 1452 1455 #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) 1453 1456 #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) 1454 1457 #define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE) 1455 - #define IS_CANNONLAKE(dev_priv) 0 1456 1458 #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) 1457 1459 #define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \ 1458 1460 IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) ··· 1741 1745 1742 1746 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0) 1743 1747 1744 - #define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 12) 1748 + #define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 11) 1745 1749 1746 1750 #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) 1747 1751 ··· 1785 1789 return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915); 1786 1790 } 1787 1791 1788 - /* i915_drv.c */ 1789 - extern const struct dev_pm_ops i915_pm_ops; 1790 - 1791 - int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 1792 - void i915_driver_remove(struct drm_i915_private *i915); 1793 - void i915_driver_shutdown(struct drm_i915_private *i915); 1794 - 1795 - int i915_resume_switcheroo(struct drm_i915_private *i915); 1796 - int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); 1797 - 1792 + /* i915_getparam.c */ 1798 1793 int i915_getparam_ioctl(struct drm_device *dev, void *data, 1799 1794 struct drm_file *file_priv); 1800 1795
+42
drivers/gpu/drm/i915/i915_iosf_mbi.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #ifndef __I915_IOSF_MBI_H__ 7 + #define __I915_IOSF_MBI_H__ 8 + 9 + #if IS_ENABLED(CONFIG_IOSF_MBI) 10 + #include <asm/iosf_mbi.h> 11 + #else 12 + 13 + /* Stubs to compile for all non-x86 archs */ 14 + #define MBI_PMIC_BUS_ACCESS_BEGIN 1 15 + #define MBI_PMIC_BUS_ACCESS_END 2 16 + 17 + struct notifier_block; 18 + 19 + static inline void iosf_mbi_punit_acquire(void) {} 20 + static inline void iosf_mbi_punit_release(void) {} 21 + static inline void iosf_mbi_assert_punit_acquired(void) {} 22 + 23 + static inline 24 + int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb) 25 + { 26 + return 0; 27 + } 28 + 29 + static inline int 30 + iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(struct notifier_block *nb) 31 + { 32 + return 0; 33 + } 34 + 35 + static inline 36 + int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb) 37 + { 38 + return 0; 39 + } 40 + #endif 41 + 42 + #endif /* __I915_IOSF_MBI_H__ */
+1 -1
drivers/gpu/drm/i915/i915_irq.c
··· 3016 3016 if (IS_CHERRYVIEW(dev_priv)) 3017 3017 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3018 3018 else 3019 - intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK); 3019 + intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); 3020 3020 3021 3021 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3022 3022 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
+2 -2
drivers/gpu/drm/i915/i915_module.c
··· 24 24 25 25 /* 26 26 * Enable KMS by default, unless explicitly overriden by 27 - * either the i915.modeset prarameter or by the 28 - * vga_text_mode_force boot option. 27 + * either the i915.modeset parameter or by the 28 + * nomodeset boot option. 29 29 */ 30 30 31 31 if (i915_modparams.modeset == 0)
+18 -4
drivers/gpu/drm/i915/i915_pci.c
··· 27 27 #include <drm/drm_drv.h> 28 28 #include <drm/i915_pciids.h> 29 29 30 + #include "i915_driver.h" 30 31 #include "i915_drv.h" 31 32 #include "i915_pci.h" 32 33 ··· 145 144 .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024, \ 146 145 .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ 147 146 DRM_COLOR_LUT_EQUAL_CHANNELS, \ 147 + } 148 + #define ICL_COLORS \ 149 + .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145, \ 150 + .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ 151 + DRM_COLOR_LUT_EQUAL_CHANNELS, \ 152 + .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ 148 153 } 149 154 150 155 /* Keep in gen based order, and chronological order within a gen */ ··· 818 811 [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ 819 812 }, \ 820 813 GEN(11), \ 821 - .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }, \ 814 + ICL_COLORS, \ 822 815 .dbuf.size = 2048, \ 823 816 .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \ 824 817 .display.has_dsc = 1, \ ··· 873 866 TGL_CURSOR_OFFSETS, \ 874 867 .has_global_mocs = 1, \ 875 868 .has_pxp = 1, \ 876 - .display.has_dsb = 1 869 + .display.has_dsb = 0 /* FIXME: LUT load is broken with DSB */ 877 870 878 871 static const struct intel_device_info tgl_info = { 879 872 GEN12_FEATURES, ··· 939 932 #define XE_LPD_FEATURES \ 940 933 .abox_mask = GENMASK(1, 0), \ 941 934 .color = { .degamma_lut_size = 0, .gamma_lut_size = 0 }, \ 942 - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ 943 - BIT(TRANSCODER_C) | BIT(TRANSCODER_D), \ 944 935 .dbuf.size = 4096, \ 945 936 .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \ 946 937 BIT(DBUF_S4), \ ··· 960 955 [TRANSCODER_B] = PIPE_B_OFFSET, \ 961 956 [TRANSCODER_C] = PIPE_C_OFFSET, \ 962 957 [TRANSCODER_D] = PIPE_D_OFFSET, \ 958 + [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ 959 + [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ 963 960 }, \ 964 961 .trans_offsets = { \ 965 962 [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ 966 963 [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ 967 964 [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ 968 965 [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ 966 + [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ 967 + [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ 969 968 }, \ 970 969 XE_LPD_CURSOR_OFFSETS 971 970 ··· 978 969 XE_LPD_FEATURES, 979 970 PLATFORM(INTEL_ALDERLAKE_P), 980 971 .require_force_probe = 1, 972 + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 973 + BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | 974 + BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), 981 975 .display.has_cdclk_crawl = 1, 982 976 .display.has_modular_fia = 1, 983 977 .display.has_psr_hw_tracking = 0, ··· 1050 1038 BIT(VECS0) | BIT(VECS1) | 1051 1039 BIT(VCS0) | BIT(VCS2), 1052 1040 .require_force_probe = 1, 1041 + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 1042 + BIT(TRANSCODER_C) | BIT(TRANSCODER_D), 1053 1043 }; 1054 1044 1055 1045 #undef PLATFORM
+189 -171
drivers/gpu/drm/i915/i915_reg.h
··· 371 371 #define VLV_G3DCTL _MMIO(0x9024) 372 372 #define VLV_GSCKGCTL _MMIO(0x9028) 373 373 374 + #define FBC_LLC_READ_CTRL _MMIO(0x9044) 375 + #define FBC_LLC_FULLY_OPEN REG_BIT(30) 376 + 374 377 #define GEN6_MBCTL _MMIO(0x0907c) 375 378 #define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) 376 379 #define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) ··· 2798 2795 #define GEN12_AUX_ERR_DBG _MMIO(0x43f4) 2799 2796 2800 2797 #define FPGA_DBG _MMIO(0x42300) 2801 - #define FPGA_DBG_RM_NOCLAIM (1 << 31) 2798 + #define FPGA_DBG_RM_NOCLAIM REG_BIT(31) 2802 2799 2803 2800 #define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028) 2804 - #define CLAIM_ER_CLR (1 << 31) 2805 - #define CLAIM_ER_OVERFLOW (1 << 16) 2806 - #define CLAIM_ER_CTR_MASK 0xffff 2801 + #define CLAIM_ER_CLR REG_BIT(31) 2802 + #define CLAIM_ER_OVERFLOW REG_BIT(16) 2803 + #define CLAIM_ER_CTR_MASK REG_GENMASK(15, 0) 2807 2804 2808 2805 #define DERRMR _MMIO(0x44050) 2809 2806 /* Note that HBLANK events are reserved on bdw+ */ ··· 3310 3307 #define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */ 3311 3308 #define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */ 3312 3309 #define FBC_CONTROL _MMIO(0x3208) 3313 - #define FBC_CTL_EN REG_BIT(31) 3314 - #define FBC_CTL_PERIODIC REG_BIT(30) 3315 - #define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16) 3316 - #define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x)) 3317 - #define FBC_CTL_STOP_ON_MOD REG_BIT(15) 3318 - #define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */ 3319 - #define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm */ 3320 - #define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5) 3321 - #define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x)) 3322 - #define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0) 3323 - #define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x)) 3310 + #define FBC_CTL_EN REG_BIT(31) 3311 + #define FBC_CTL_PERIODIC REG_BIT(30) 3312 + #define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16) 3313 + #define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x)) 3314 + #define FBC_CTL_STOP_ON_MOD REG_BIT(15) 3315 + #define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */ 3316 + #define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */ 3317 + #define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5) 3318 + #define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x)) 3319 + #define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0) 3320 + #define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x)) 3324 3321 #define FBC_COMMAND _MMIO(0x320c) 3325 - #define FBC_CMD_COMPRESS (1 << 0) 3322 + #define FBC_CMD_COMPRESS REG_BIT(0) 3326 3323 #define FBC_STATUS _MMIO(0x3210) 3327 - #define FBC_STAT_COMPRESSING (1 << 31) 3328 - #define FBC_STAT_COMPRESSED (1 << 30) 3329 - #define FBC_STAT_MODIFIED (1 << 29) 3330 - #define FBC_STAT_CURRENT_LINE_SHIFT (0) 3331 - #define FBC_CONTROL2 _MMIO(0x3214) 3332 - #define FBC_CTL_FENCE_DBL (0 << 4) 3333 - #define FBC_CTL_IDLE_IMM (0 << 2) 3334 - #define FBC_CTL_IDLE_FULL (1 << 2) 3335 - #define FBC_CTL_IDLE_LINE (2 << 2) 3336 - #define FBC_CTL_IDLE_DEBUG (3 << 2) 3337 - #define FBC_CTL_CPU_FENCE (1 << 1) 3338 - #define FBC_CTL_PLANE(plane) ((plane) << 0) 3339 - #define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */ 3340 - #define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) 3324 + #define FBC_STAT_COMPRESSING REG_BIT(31) 3325 + #define FBC_STAT_COMPRESSED REG_BIT(30) 3326 + #define FBC_STAT_MODIFIED REG_BIT(29) 3327 + #define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0) 3328 + #define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */ 3329 + #define FBC_CTL_FENCE_DBL REG_BIT(4) 3330 + #define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2) 3331 + #define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0) 3332 + #define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1) 3333 + #define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2) 3334 + #define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3) 3335 + #define FBC_CTL_CPU_FENCE_EN REG_BIT(1) 3336 + #define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0) 3337 + #define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane)) 3338 + #define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */ 3339 + #define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */ 3340 + #define FBC_MOD_NUM_MASK REG_GENMASK(31, 1) 3341 + #define FBC_MOD_NUM_VALID REG_BIT(0) 3342 + #define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */ 3343 + #define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */ 3344 + #define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0) 3345 + #define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1) 3346 + #define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2) 3347 + #define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3) 3341 3348 3342 3349 #define FBC_LL_SIZE (1536) 3343 3350 3344 - #define FBC_LLC_READ_CTRL _MMIO(0x9044) 3345 - #define FBC_LLC_FULLY_OPEN (1 << 30) 3346 - 3347 3351 /* Framebuffer compression for GM45+ */ 3348 3352 #define DPFC_CB_BASE _MMIO(0x3200) 3349 - #define DPFC_CONTROL _MMIO(0x3208) 3350 - #define DPFC_CTL_EN (1 << 31) 3351 - #define DPFC_CTL_PLANE(plane) ((plane) << 30) 3352 - #define IVB_DPFC_CTL_PLANE(plane) ((plane) << 29) 3353 - #define DPFC_CTL_FENCE_EN (1 << 29) 3354 - #define IVB_DPFC_CTL_FENCE_EN (1 << 28) 3355 - #define DPFC_CTL_PERSISTENT_MODE (1 << 25) 3356 - #define DPFC_SR_EN (1 << 10) 3357 - #define DPFC_CTL_LIMIT_1X (0 << 6) 3358 - #define DPFC_CTL_LIMIT_2X (1 << 6) 3359 - #define DPFC_CTL_LIMIT_4X (2 << 6) 3360 - #define DPFC_RECOMP_CTL _MMIO(0x320c) 3361 - #define DPFC_RECOMP_STALL_EN (1 << 27) 3362 - #define DPFC_RECOMP_STALL_WM_SHIFT (16) 3363 - #define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) 3364 - #define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) 3365 - #define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) 3366 - #define DPFC_STATUS _MMIO(0x3210) 3367 - #define DPFC_INVAL_SEG_SHIFT (16) 3368 - #define DPFC_INVAL_SEG_MASK (0x07ff0000) 3369 - #define DPFC_COMP_SEG_SHIFT (0) 3370 - #define DPFC_COMP_SEG_MASK (0x000007ff) 3371 - #define DPFC_STATUS2 _MMIO(0x3214) 3372 - #define DPFC_FENCE_YOFF _MMIO(0x3218) 3373 - #define DPFC_CHICKEN _MMIO(0x3224) 3374 - #define DPFC_HT_MODIFY (1 << 31) 3375 - 3376 - /* Framebuffer compression for Ironlake */ 3377 3353 #define ILK_DPFC_CB_BASE _MMIO(0x43200) 3354 + #define DPFC_CONTROL _MMIO(0x3208) 3378 3355 #define ILK_DPFC_CONTROL _MMIO(0x43208) 3379 - #define FBC_CTL_FALSE_COLOR (1 << 10) 3380 - /* The bit 28-8 is reserved */ 3381 - #define DPFC_RESERVED (0x1FFFFF00) 3356 + #define DPFC_CTL_EN REG_BIT(31) 3357 + #define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */ 3358 + #define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane)) 3359 + #define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */ 3360 + #define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */ 3361 + #define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane)) 3362 + #define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */ 3363 + #define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */ 3364 + #define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */ 3365 + #define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */ 3366 + #define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */ 3367 + #define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6) 3368 + #define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0) 3369 + #define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1) 3370 + #define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2) 3371 + #define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0) 3372 + #define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence)) 3373 + #define DPFC_RECOMP_CTL _MMIO(0x320c) 3382 3374 #define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c) 3375 + #define DPFC_RECOMP_STALL_EN REG_BIT(27) 3376 + #define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16) 3377 + #define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0) 3378 + #define DPFC_STATUS _MMIO(0x3210) 3383 3379 #define ILK_DPFC_STATUS _MMIO(0x43210) 3384 - #define ILK_DPFC_COMP_SEG_MASK 0x7ff 3385 - #define IVB_FBC_STATUS2 _MMIO(0x43214) 3386 - #define IVB_FBC_COMP_SEG_MASK 0x7ff 3387 - #define BDW_FBC_COMP_SEG_MASK 0xfff 3380 + #define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16) 3381 + #define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0) 3382 + #define DPFC_STATUS2 _MMIO(0x3214) 3383 + #define ILK_DPFC_STATUS2 _MMIO(0x43214) 3384 + #define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0) 3385 + #define DPFC_FENCE_YOFF _MMIO(0x3218) 3388 3386 #define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) 3387 + #define DPFC_CHICKEN _MMIO(0x3224) 3389 3388 #define ILK_DPFC_CHICKEN _MMIO(0x43224) 3390 - #define ILK_DPFC_DISABLE_DUMMY0 (1 << 8) 3391 - #define ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL (1 << 14) 3392 - #define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23) 3389 + #define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */ 3390 + #define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */ 3391 + #define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */ 3392 + #define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */ 3393 + 3393 3394 #define GLK_FBC_STRIDE _MMIO(0x43228) 3394 3395 #define FBC_STRIDE_OVERRIDE REG_BIT(15) 3395 3396 #define FBC_STRIDE_MASK REG_GENMASK(14, 0) 3396 3397 #define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x)) 3398 + 3397 3399 #define ILK_FBC_RT_BASE _MMIO(0x2128) 3398 - #define ILK_FBC_RT_VALID (1 << 0) 3399 - #define SNB_FBC_FRONT_BUFFER (1 << 1) 3400 + #define ILK_FBC_RT_VALID REG_BIT(0) 3401 + #define SNB_FBC_FRONT_BUFFER REG_BIT(1) 3400 3402 3401 3403 #define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000) 3402 3404 #define ILK_FBCQ_DIS (1 << 22) ··· 3425 3417 * The following two registers are of type GTTMMADR 3426 3418 */ 3427 3419 #define SNB_DPFC_CTL_SA _MMIO(0x100100) 3428 - #define SNB_CPU_FENCE_ENABLE (1 << 29) 3429 - #define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104) 3420 + #define SNB_DPFC_FENCE_EN REG_BIT(29) 3421 + #define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0) 3422 + #define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence)) 3423 + #define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104) 3430 3424 3431 3425 /* Framebuffer compression for Ivybridge */ 3432 3426 #define IVB_FBC_RT_BASE _MMIO(0x7020) ··· 3438 3428 #define IPS_ENABLE (1 << 31) 3439 3429 3440 3430 #define MSG_FBC_REND_STATE _MMIO(0x50380) 3441 - #define FBC_REND_NUKE (1 << 2) 3442 - #define FBC_REND_CACHE_CLEAN (1 << 1) 3431 + #define FBC_REND_NUKE REG_BIT(2) 3432 + #define FBC_REND_CACHE_CLEAN REG_BIT(1) 3443 3433 3444 3434 /* 3445 3435 * GPIO regs ··· 4319 4309 4320 4310 /* Pipe A CRC regs */ 4321 4311 #define _PIPE_CRC_CTL_A 0x60050 4322 - #define PIPE_CRC_ENABLE (1 << 31) 4312 + #define PIPE_CRC_ENABLE REG_BIT(31) 4323 4313 /* skl+ source selection */ 4324 - #define PIPE_CRC_SOURCE_PLANE_1_SKL (0 << 28) 4325 - #define PIPE_CRC_SOURCE_PLANE_2_SKL (2 << 28) 4326 - #define PIPE_CRC_SOURCE_DMUX_SKL (4 << 28) 4327 - #define PIPE_CRC_SOURCE_PLANE_3_SKL (6 << 28) 4328 - #define PIPE_CRC_SOURCE_PLANE_4_SKL (7 << 28) 4329 - #define PIPE_CRC_SOURCE_PLANE_5_SKL (5 << 28) 4330 - #define PIPE_CRC_SOURCE_PLANE_6_SKL (3 << 28) 4331 - #define PIPE_CRC_SOURCE_PLANE_7_SKL (1 << 28) 4314 + #define PIPE_CRC_SOURCE_MASK_SKL REG_GENMASK(30, 28) 4315 + #define PIPE_CRC_SOURCE_PLANE_1_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 0) 4316 + #define PIPE_CRC_SOURCE_PLANE_2_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 2) 4317 + #define PIPE_CRC_SOURCE_DMUX_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 4) 4318 + #define PIPE_CRC_SOURCE_PLANE_3_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 6) 4319 + #define PIPE_CRC_SOURCE_PLANE_4_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 7) 4320 + #define PIPE_CRC_SOURCE_PLANE_5_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 5) 4321 + #define PIPE_CRC_SOURCE_PLANE_6_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 3) 4322 + #define PIPE_CRC_SOURCE_PLANE_7_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 1) 4332 4323 /* ivb+ source selection */ 4333 - #define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) 4334 - #define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29) 4335 - #define PIPE_CRC_SOURCE_PF_IVB (2 << 29) 4324 + #define PIPE_CRC_SOURCE_MASK_IVB REG_GENMASK(30, 29) 4325 + #define PIPE_CRC_SOURCE_PRIMARY_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 0) 4326 + #define PIPE_CRC_SOURCE_SPRITE_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 1) 4327 + #define PIPE_CRC_SOURCE_PF_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 2) 4336 4328 /* ilk+ source selection */ 4337 - #define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28) 4338 - #define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28) 4339 - #define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28) 4340 - /* embedded DP port on the north display block, reserved on ivb */ 4341 - #define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28) 4342 - #define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */ 4329 + #define PIPE_CRC_SOURCE_MASK_ILK REG_GENMASK(30, 28) 4330 + #define PIPE_CRC_SOURCE_PRIMARY_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 0) 4331 + #define PIPE_CRC_SOURCE_SPRITE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 1) 4332 + #define PIPE_CRC_SOURCE_PIPE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 2) 4333 + /* embedded DP port on the north display block */ 4334 + #define PIPE_CRC_SOURCE_PORT_A_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 4) 4335 + #define PIPE_CRC_SOURCE_FDI_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 5) 4343 4336 /* vlv source selection */ 4344 - #define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27) 4345 - #define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27) 4346 - #define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27) 4337 + #define PIPE_CRC_SOURCE_MASK_VLV REG_GENMASK(30, 27) 4338 + #define PIPE_CRC_SOURCE_PIPE_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 0) 4339 + #define PIPE_CRC_SOURCE_HDMIB_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 1) 4340 + #define PIPE_CRC_SOURCE_HDMIC_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 2) 4347 4341 /* with DP port the pipe source is invalid */ 4348 - #define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27) 4349 - #define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27) 4350 - #define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27) 4342 + #define PIPE_CRC_SOURCE_DP_D_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 3) 4343 + #define PIPE_CRC_SOURCE_DP_B_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 6) 4344 + #define PIPE_CRC_SOURCE_DP_C_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 7) 4351 4345 /* gen3+ source selection */ 4352 - #define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28) 4353 - #define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28) 4354 - #define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28) 4346 + #define PIPE_CRC_SOURCE_MASK_I9XX REG_GENMASK(30, 28) 4347 + #define PIPE_CRC_SOURCE_PIPE_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 0) 4348 + #define PIPE_CRC_SOURCE_SDVOB_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 1) 4349 + #define PIPE_CRC_SOURCE_SDVOC_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 2) 4355 4350 /* with DP/TV port the pipe source is invalid */ 4356 - #define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28) 4357 - #define PIPE_CRC_SOURCE_TV_PRE (4 << 28) 4358 - #define PIPE_CRC_SOURCE_TV_POST (5 << 28) 4359 - #define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28) 4360 - #define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28) 4351 + #define PIPE_CRC_SOURCE_DP_D_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 3) 4352 + #define PIPE_CRC_SOURCE_TV_PRE REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 4) 4353 + #define PIPE_CRC_SOURCE_TV_POST REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 5) 4354 + #define PIPE_CRC_SOURCE_DP_B_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 6) 4355 + #define PIPE_CRC_SOURCE_DP_C_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 7) 4361 4356 /* gen2 doesn't have source selection bits */ 4362 - #define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30) 4357 + #define PIPE_CRC_INCLUDE_BORDER_I8XX REG_BIT(30) 4363 4358 4364 4359 #define _PIPE_CRC_RES_1_A_IVB 0x60064 4365 4360 #define _PIPE_CRC_RES_2_A_IVB 0x60068 ··· 4713 4698 #define PSR_EVENT_LPSP_MODE_EXIT (1 << 1) 4714 4699 #define PSR_EVENT_PSR_DISABLE (1 << 0) 4715 4700 4716 - #define _PSR2_STATUS_A 0x60940 4717 - #define _PSR2_STATUS_EDP 0x6f940 4718 - #define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) 4719 - #define EDP_PSR2_STATUS_STATE_MASK (0xf << 28) 4720 - #define EDP_PSR2_STATUS_STATE_SHIFT 28 4701 + #define _PSR2_STATUS_A 0x60940 4702 + #define _PSR2_STATUS_EDP 0x6f940 4703 + #define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) 4704 + #define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28) 4705 + #define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8) 4721 4706 4722 4707 #define _PSR2_SU_STATUS_A 0x60914 4723 4708 #define _PSR2_SU_STATUS_EDP 0x6f914 ··· 5014 4999 #define PORT_DFT2_G4X _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154) 5015 5000 #define DC_BALANCE_RESET_VLV (1 << 31) 5016 5001 #define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0)) 5017 - #define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */ 5018 - #define PIPE_B_SCRAMBLE_RESET (1 << 1) 5019 - #define PIPE_A_SCRAMBLE_RESET (1 << 0) 5002 + #define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */ 5003 + #define PIPE_B_SCRAMBLE_RESET REG_BIT(1) 5004 + #define PIPE_A_SCRAMBLE_RESET REG_BIT(0) 5020 5005 5021 5006 /* Gen 3 SDVO bits: */ 5022 5007 #define SDVO_ENABLE (1 << 31) ··· 6281 6266 #define PIPE_STATUS_PORT_UNDERRUN_XELPD REG_BIT(26) 6282 6267 6283 6268 #define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028) 6284 - #define PIPEB_LINE_COMPARE_INT_EN (1 << 29) 6285 - #define PIPEB_HLINE_INT_EN (1 << 28) 6286 - #define PIPEB_VBLANK_INT_EN (1 << 27) 6287 - #define SPRITED_FLIP_DONE_INT_EN (1 << 26) 6288 - #define SPRITEC_FLIP_DONE_INT_EN (1 << 25) 6289 - #define PLANEB_FLIP_DONE_INT_EN (1 << 24) 6290 - #define PIPE_PSR_INT_EN (1 << 22) 6291 - #define PIPEA_LINE_COMPARE_INT_EN (1 << 21) 6292 - #define PIPEA_HLINE_INT_EN (1 << 20) 6293 - #define PIPEA_VBLANK_INT_EN (1 << 19) 6294 - #define SPRITEB_FLIP_DONE_INT_EN (1 << 18) 6295 - #define SPRITEA_FLIP_DONE_INT_EN (1 << 17) 6296 - #define PLANEA_FLIPDONE_INT_EN (1 << 16) 6297 - #define PIPEC_LINE_COMPARE_INT_EN (1 << 13) 6298 - #define PIPEC_HLINE_INT_EN (1 << 12) 6299 - #define PIPEC_VBLANK_INT_EN (1 << 11) 6300 - #define SPRITEF_FLIPDONE_INT_EN (1 << 10) 6301 - #define SPRITEE_FLIPDONE_INT_EN (1 << 9) 6302 - #define PLANEC_FLIPDONE_INT_EN (1 << 8) 6269 + #define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29) 6270 + #define PIPEB_HLINE_INT_EN REG_BIT(28) 6271 + #define PIPEB_VBLANK_INT_EN REG_BIT(27) 6272 + #define SPRITED_FLIP_DONE_INT_EN REG_BIT(26) 6273 + #define SPRITEC_FLIP_DONE_INT_EN REG_BIT(25) 6274 + #define PLANEB_FLIP_DONE_INT_EN REG_BIT(24) 6275 + #define PIPE_PSR_INT_EN REG_BIT(22) 6276 + #define PIPEA_LINE_COMPARE_INT_EN REG_BIT(21) 6277 + #define PIPEA_HLINE_INT_EN REG_BIT(20) 6278 + #define PIPEA_VBLANK_INT_EN REG_BIT(19) 6279 + #define SPRITEB_FLIP_DONE_INT_EN REG_BIT(18) 6280 + #define SPRITEA_FLIP_DONE_INT_EN REG_BIT(17) 6281 + #define PLANEA_FLIPDONE_INT_EN REG_BIT(16) 6282 + #define PIPEC_LINE_COMPARE_INT_EN REG_BIT(13) 6283 + #define PIPEC_HLINE_INT_EN REG_BIT(12) 6284 + #define PIPEC_VBLANK_INT_EN REG_BIT(11) 6285 + #define SPRITEF_FLIPDONE_INT_EN REG_BIT(10) 6286 + #define SPRITEE_FLIPDONE_INT_EN REG_BIT(9) 6287 + #define PLANEC_FLIPDONE_INT_EN REG_BIT(8) 6303 6288 6304 6289 #define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */ 6305 - #define SPRITEF_INVALID_GTT_INT_EN (1 << 27) 6306 - #define SPRITEE_INVALID_GTT_INT_EN (1 << 26) 6307 - #define PLANEC_INVALID_GTT_INT_EN (1 << 25) 6308 - #define CURSORC_INVALID_GTT_INT_EN (1 << 24) 6309 - #define CURSORB_INVALID_GTT_INT_EN (1 << 23) 6310 - #define CURSORA_INVALID_GTT_INT_EN (1 << 22) 6311 - #define SPRITED_INVALID_GTT_INT_EN (1 << 21) 6312 - #define SPRITEC_INVALID_GTT_INT_EN (1 << 20) 6313 - #define PLANEB_INVALID_GTT_INT_EN (1 << 19) 6314 - #define SPRITEB_INVALID_GTT_INT_EN (1 << 18) 6315 - #define SPRITEA_INVALID_GTT_INT_EN (1 << 17) 6316 - #define PLANEA_INVALID_GTT_INT_EN (1 << 16) 6317 - #define DPINVGTT_EN_MASK 0xff0000 6318 - #define DPINVGTT_EN_MASK_CHV 0xfff0000 6319 - #define SPRITEF_INVALID_GTT_STATUS (1 << 11) 6320 - #define SPRITEE_INVALID_GTT_STATUS (1 << 10) 6321 - #define PLANEC_INVALID_GTT_STATUS (1 << 9) 6322 - #define CURSORC_INVALID_GTT_STATUS (1 << 8) 6323 - #define CURSORB_INVALID_GTT_STATUS (1 << 7) 6324 - #define CURSORA_INVALID_GTT_STATUS (1 << 6) 6325 - #define SPRITED_INVALID_GTT_STATUS (1 << 5) 6326 - #define SPRITEC_INVALID_GTT_STATUS (1 << 4) 6327 - #define PLANEB_INVALID_GTT_STATUS (1 << 3) 6328 - #define SPRITEB_INVALID_GTT_STATUS (1 << 2) 6329 - #define SPRITEA_INVALID_GTT_STATUS (1 << 1) 6330 - #define PLANEA_INVALID_GTT_STATUS (1 << 0) 6331 - #define DPINVGTT_STATUS_MASK 0xff 6332 - #define DPINVGTT_STATUS_MASK_CHV 0xfff 6290 + #define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16) 6291 + #define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16) 6292 + #define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27) 6293 + #define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26) 6294 + #define PLANEC_INVALID_GTT_INT_EN REG_BIT(25) 6295 + #define CURSORC_INVALID_GTT_INT_EN REG_BIT(24) 6296 + #define CURSORB_INVALID_GTT_INT_EN REG_BIT(23) 6297 + #define CURSORA_INVALID_GTT_INT_EN REG_BIT(22) 6298 + #define SPRITED_INVALID_GTT_INT_EN REG_BIT(21) 6299 + #define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20) 6300 + #define PLANEB_INVALID_GTT_INT_EN REG_BIT(19) 6301 + #define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18) 6302 + #define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17) 6303 + #define PLANEA_INVALID_GTT_INT_EN REG_BIT(16) 6304 + #define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0) 6305 + #define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0) 6306 + #define SPRITEF_INVALID_GTT_STATUS REG_BIT(11) 6307 + #define SPRITEE_INVALID_GTT_STATUS REG_BIT(10) 6308 + #define PLANEC_INVALID_GTT_STATUS REG_BIT(9) 6309 + #define CURSORC_INVALID_GTT_STATUS REG_BIT(8) 6310 + #define CURSORB_INVALID_GTT_STATUS REG_BIT(7) 6311 + #define CURSORA_INVALID_GTT_STATUS REG_BIT(6) 6312 + #define SPRITED_INVALID_GTT_STATUS REG_BIT(5) 6313 + #define SPRITEC_INVALID_GTT_STATUS REG_BIT(4) 6314 + #define PLANEB_INVALID_GTT_STATUS REG_BIT(3) 6315 + #define SPRITEB_INVALID_GTT_STATUS REG_BIT(2) 6316 + #define SPRITEA_INVALID_GTT_STATUS REG_BIT(1) 6317 + #define PLANEA_INVALID_GTT_STATUS REG_BIT(0) 6333 6318 6334 6319 #define DSPARB _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030) 6335 6320 #define DSPARB_CSTART_MASK (0x7f << 7) ··· 8278 8263 8279 8264 /* 8280 8265 * The below are numbered starting from "S1" on gen11/gen12, but starting 8281 - * with gen13 display, the bspec switches to a 0-based numbering scheme 8266 + * with display 13, the bspec switches to a 0-based numbering scheme 8282 8267 * (although the addresses stay the same so new S0 = old S1, new S1 = old S2). 8283 8268 * We'll just use the 0-based numbering here for all platforms since it's the 8284 8269 * way things will be named by the hardware team going forward, plus it's more ··· 8323 8308 #define RESET_PCH_HANDSHAKE_ENABLE (1 << 4) 8324 8309 8325 8310 #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) 8326 - #define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30) 8327 - #define ICL_DELAY_PMRSP (1 << 22) 8328 - #define MASK_WAKEMEM (1 << 13) 8311 + #define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30) 8312 + #define ICL_DELAY_PMRSP REG_BIT(22) 8313 + #define DISABLE_FLR_SRC REG_BIT(15) 8314 + #define MASK_WAKEMEM REG_BIT(13) 8329 8315 8330 8316 #define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434) 8331 8317 #define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27) ··· 9797 9781 #define AUD_PIN_BUF_CTL _MMIO(0x48414) 9798 9782 #define AUD_PIN_BUF_ENABLE REG_BIT(31) 9799 9783 9784 + #define AUD_TS_CDCLK_M _MMIO(0x65ea0) 9785 + #define AUD_TS_CDCLK_M_EN REG_BIT(31) 9786 + #define AUD_TS_CDCLK_N _MMIO(0x65ea4) 9787 + 9800 9788 /* Display Audio Config Reg */ 9801 9789 #define AUD_CONFIG_BE _MMIO(0x65ef0) 9802 9790 #define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe))) ··· 10232 10212 #define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT) 10233 10213 #define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT) 10234 10214 #define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT) 10235 - #define TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) (((val) & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT) 10236 - #define TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) ((((val) & TGL_TRANS_DDI_PORT_MASK) >> TGL_TRANS_DDI_PORT_SHIFT) - 1) 10237 10215 #define TRANS_DDI_MODE_SELECT_MASK (7 << 24) 10238 10216 #define TRANS_DDI_MODE_SELECT_HDMI (0 << 24) 10239 10217 #define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
+3 -2
drivers/gpu/drm/i915/i915_switcheroo.c
··· 5 5 6 6 #include <linux/vga_switcheroo.h> 7 7 8 + #include "i915_driver.h" 8 9 #include "i915_drv.h" 9 10 #include "i915_switcheroo.h" 10 11 ··· 25 24 i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; 26 25 /* i915 resume handler doesn't set to D0 */ 27 26 pci_set_power_state(pdev, PCI_D0); 28 - i915_resume_switcheroo(i915); 27 + i915_driver_resume_switcheroo(i915); 29 28 i915->drm.switch_power_state = DRM_SWITCH_POWER_ON; 30 29 } else { 31 30 drm_info(&i915->drm, "switched off\n"); 32 31 i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; 33 - i915_suspend_switcheroo(i915, pmm); 32 + i915_driver_suspend_switcheroo(i915, pmm); 34 33 i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF; 35 34 } 36 35 }
+73 -2
drivers/gpu/drm/i915/i915_trace.h
··· 288 288 289 289 /* plane updates */ 290 290 291 - TRACE_EVENT(intel_update_plane, 291 + TRACE_EVENT(intel_plane_update_noarm, 292 292 TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), 293 293 TP_ARGS(plane, crtc), 294 294 ··· 317 317 DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) 318 318 ); 319 319 320 - TRACE_EVENT(intel_disable_plane, 320 + TRACE_EVENT(intel_plane_update_arm, 321 + TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), 322 + TP_ARGS(plane, crtc), 323 + 324 + TP_STRUCT__entry( 325 + __field(enum pipe, pipe) 326 + __field(u32, frame) 327 + __field(u32, scanline) 328 + __array(int, src, 4) 329 + __array(int, dst, 4) 330 + __string(name, plane->name) 331 + ), 332 + 333 + TP_fast_assign( 334 + __assign_str(name, plane->name); 335 + __entry->pipe = crtc->pipe; 336 + __entry->frame = intel_crtc_get_vblank_counter(crtc); 337 + __entry->scanline = intel_get_crtc_scanline(crtc); 338 + memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); 339 + memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); 340 + ), 341 + 342 + TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, 343 + pipe_name(__entry->pipe), __get_str(name), 344 + __entry->frame, __entry->scanline, 345 + DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), 346 + DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) 347 + ); 348 + 349 + TRACE_EVENT(intel_plane_disable_arm, 321 350 TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), 322 351 TP_ARGS(plane, crtc), 323 352 ··· 432 403 ); 433 404 434 405 /* pipe updates */ 406 + 407 + TRACE_EVENT(intel_crtc_vblank_work_start, 408 + TP_PROTO(struct intel_crtc *crtc), 409 + TP_ARGS(crtc), 410 + 411 + TP_STRUCT__entry( 412 + __field(enum pipe, pipe) 413 + __field(u32, frame) 414 + __field(u32, scanline) 415 + ), 416 + 417 + TP_fast_assign( 418 + __entry->pipe = crtc->pipe; 419 + __entry->frame = intel_crtc_get_vblank_counter(crtc); 420 + __entry->scanline = intel_get_crtc_scanline(crtc); 421 + ), 422 + 423 + TP_printk("pipe %c, frame=%u, scanline=%u", 424 + pipe_name(__entry->pipe), __entry->frame, 425 + __entry->scanline) 426 + ); 427 + 428 + TRACE_EVENT(intel_crtc_vblank_work_end, 429 + TP_PROTO(struct intel_crtc *crtc), 430 + TP_ARGS(crtc), 431 + 432 + TP_STRUCT__entry( 433 + __field(enum pipe, pipe) 434 + __field(u32, frame) 435 + __field(u32, scanline) 436 + ), 437 + 438 + TP_fast_assign( 439 + __entry->pipe = crtc->pipe; 440 + __entry->frame = intel_crtc_get_vblank_counter(crtc); 441 + __entry->scanline = intel_get_crtc_scanline(crtc); 442 + ), 443 + 444 + TP_printk("pipe %c, frame=%u, scanline=%u", 445 + pipe_name(__entry->pipe), __entry->frame, 446 + __entry->scanline) 447 + ); 435 448 436 449 TRACE_EVENT(intel_pipe_update_start, 437 450 TP_PROTO(struct intel_crtc *crtc),
+14 -5
drivers/gpu/drm/i915/i915_vma_types.h
··· 97 97 98 98 struct intel_remapped_plane_info { 99 99 /* in gtt pages */ 100 - u32 offset; 101 - u16 width; 102 - u16 height; 103 - u16 src_stride; 104 - u16 dst_stride; 100 + u32 offset:31; 101 + u32 linear:1; 102 + union { 103 + /* in gtt pages for !linear */ 104 + struct { 105 + u16 width; 106 + u16 height; 107 + u16 src_stride; 108 + u16 dst_stride; 109 + }; 110 + 111 + /* in gtt pages for linear */ 112 + u32 size; 113 + }; 105 114 } __packed; 106 115 107 116 struct intel_remapped_info {
+25 -16
drivers/gpu/drm/i915/intel_pm.c
··· 37 37 #include "display/intel_bw.h" 38 38 #include "display/intel_de.h" 39 39 #include "display/intel_display_types.h" 40 + #include "display/intel_fb.h" 40 41 #include "display/intel_fbc.h" 41 42 #include "display/intel_sprite.h" 42 43 #include "display/skl_universal_plane.h" ··· 161 160 * Display WA #0883: bxt 162 161 */ 163 162 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | 164 - ILK_DPFC_DISABLE_DUMMY0); 163 + DPFC_DISABLE_DUMMY0); 165 164 } 166 165 167 166 static void glk_init_clock_gating(struct drm_i915_private *dev_priv) ··· 3063 3062 * The BIOS provided WM memory latency values are often 3064 3063 * inadequate for high resolution displays. Adjust them. 3065 3064 */ 3066 - changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | 3067 - ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | 3068 - ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); 3065 + changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12); 3066 + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12); 3067 + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); 3069 3068 3070 3069 if (!changed) 3071 3070 return; ··· 3375 3374 * enabled sometime later. 3376 3375 */ 3377 3376 if (DISPLAY_VER(dev_priv) == 5 && !merged->fbc_wm_enabled && 3378 - intel_fbc_is_active(dev_priv)) { 3377 + intel_fbc_is_active(&dev_priv->fbc)) { 3379 3378 for (level = 2; level <= max_level; level++) { 3380 3379 struct intel_wm_level *wm = &merged->wm[level]; 3381 3380 ··· 5095 5094 } 5096 5095 } 5097 5096 5097 + static bool icl_need_wm1_wa(struct drm_i915_private *i915, 5098 + enum plane_id plane_id) 5099 + { 5100 + /* 5101 + * Wa_1408961008:icl, ehl 5102 + * Wa_14012656716:tgl, adl 5103 + * Underruns with WM1+ disabled 5104 + */ 5105 + return DISPLAY_VER(i915) == 11 || 5106 + (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR); 5107 + } 5108 + 5098 5109 static int 5099 5110 skl_allocate_plane_ddb(struct intel_atomic_state *state, 5100 5111 struct intel_crtc *crtc) ··· 5277 5264 skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level], 5278 5265 total[plane_id], uv_total[plane_id]); 5279 5266 5280 - /* 5281 - * Wa_1408961008:icl, ehl 5282 - * Underruns with WM1+ disabled 5283 - */ 5284 - if (DISPLAY_VER(dev_priv) == 11 && 5267 + if (icl_need_wm1_wa(dev_priv, plane_id) && 5285 5268 level == 1 && wm->wm[0].enable) { 5286 5269 wm->wm[level].blocks = wm->wm[0].blocks; 5287 5270 wm->wm[level].lines = wm->wm[0].lines; ··· 7443 7434 { 7444 7435 /* Wa_1409120013:icl,ehl */ 7445 7436 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, 7446 - ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); 7437 + DPFC_CHICKEN_COMP_DUMMY_PIXEL); 7447 7438 7448 7439 /*Wa_14010594013:icl, ehl */ 7449 7440 intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, ··· 7456 7447 if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) || 7457 7448 IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) 7458 7449 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, 7459 - ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); 7450 + DPFC_CHICKEN_COMP_DUMMY_PIXEL); 7460 7451 7461 7452 /* Wa_1409825376:tgl (pre-prod)*/ 7462 7453 if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) ··· 7518 7509 * Display WA #0873: cfl 7519 7510 */ 7520 7511 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | 7521 - ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 7512 + DPFC_NUKE_ON_ANY_MODIFICATION); 7522 7513 } 7523 7514 7524 7515 static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) ··· 7551 7542 * Display WA #0873: kbl 7552 7543 */ 7553 7544 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | 7554 - ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 7545 + DPFC_NUKE_ON_ANY_MODIFICATION); 7555 7546 } 7556 7547 7557 7548 static void skl_init_clock_gating(struct drm_i915_private *dev_priv) ··· 7578 7569 * Display WA #0873: skl 7579 7570 */ 7580 7571 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | 7581 - ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 7572 + DPFC_NUKE_ON_ANY_MODIFICATION); 7582 7573 7583 7574 /* 7584 7575 * WaFbcHighMemBwCorruptionAvoidance:skl 7585 7576 * Display WA #0883: skl 7586 7577 */ 7587 7578 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | 7588 - ILK_DPFC_DISABLE_DUMMY0); 7579 + DPFC_DISABLE_DUMMY0); 7589 7580 } 7590 7581 7591 7582 static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
+3
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 590 590 pm_runtime_use_autosuspend(kdev); 591 591 } 592 592 593 + /* Enable by default */ 594 + pm_runtime_allow(kdev); 595 + 593 596 /* 594 597 * The core calls the driver load handler with an RPM reference held. 595 598 * We drop that here and will reacquire it during unloading in
+1 -1
drivers/gpu/drm/i915/intel_uncore.c
··· 22 22 */ 23 23 24 24 #include <linux/pm_runtime.h> 25 - #include <asm/iosf_mbi.h> 26 25 27 26 #include "gt/intel_lrc_reg.h" /* for shadow reg list */ 28 27 29 28 #include "i915_drv.h" 29 + #include "i915_iosf_mbi.h" 30 30 #include "i915_trace.h" 31 31 #include "i915_vgpu.h" 32 32 #include "intel_pm.h"
+2 -1
drivers/gpu/drm/i915/pxp/intel_pxp_session.c
··· 3 3 * Copyright(c) 2020, Intel Corporation. All rights reserved. 4 4 */ 5 5 6 - #include "drm/i915_drm.h" 6 + #include <drm/i915_drm.h> 7 + 7 8 #include "i915_drv.h" 8 9 9 10 #include "intel_pxp.h"
+4 -2
drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
··· 4 4 */ 5 5 6 6 #include <linux/component.h> 7 - #include "drm/i915_pxp_tee_interface.h" 8 - #include "drm/i915_component.h" 7 + 8 + #include <drm/i915_pxp_tee_interface.h> 9 + #include <drm/i915_component.h> 10 + 9 11 #include "i915_drv.h" 10 12 #include "intel_pxp.h" 11 13 #include "intel_pxp_session.h"
+1 -2
drivers/gpu/drm/i915/vlv_sideband.c
··· 3 3 * Copyright © 2013-2021 Intel Corporation 4 4 */ 5 5 6 - #include <asm/iosf_mbi.h> 7 - 8 6 #include "i915_drv.h" 7 + #include "i915_iosf_mbi.h" 9 8 #include "vlv_sideband.h" 10 9 11 10 /*
+5 -3
include/drm/intel-gtt.h
··· 4 4 #ifndef _DRM_INTEL_GTT_H 5 5 #define _DRM_INTEL_GTT_H 6 6 7 - #include <linux/agp_backend.h> 8 - #include <linux/intel-iommu.h> 9 - #include <linux/kernel.h> 7 + #include <linux/types.h> 8 + 9 + struct agp_bridge_data; 10 + struct pci_dev; 11 + struct sg_table; 10 12 11 13 void intel_gtt_get(u64 *gtt_total, 12 14 phys_addr_t *mappable_base,