Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915/psr: Nuke PSR support for VLV and CHV

PSR hardware and hence the driver code for VLV and CHV deviates a lot from
their DDI counterparts. While the feature has been disabled for a long time
now, retaining support for these platforms is a maintenance burden. There
have been multiple refactoring commits to just keep the existing code for
these platforms in line with the rest. There are known issues that need to
be fixed to enable PSR on these platforms, and there is no PSR capable
platform in CI to ensure the code does not break again if we get around to
fixing the existing issues. On account of all these reasons, let's nuke
this code for now and bring it back if a need arises in the future.

Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Acked-by: Jani Nikula <jani.nikula@intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: José Roberto de Souza <jose.souza@intel.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180511230059.19387-1-dhinakaran.pandiyan@intel.com

authored by

Dhinakaran Pandiyan and committed by
Jani Nikula
ce3508fd 77312ae8

+27 -270
+5 -37
drivers/gpu/drm/i915/i915_debugfs.c
··· 2643 2643 { 2644 2644 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2645 2645 u32 psrperf = 0; 2646 - u32 stat[3]; 2647 - enum pipe pipe; 2648 2646 bool enabled = false; 2649 2647 bool sink_support; 2650 2648 ··· 2663 2665 seq_printf(m, "Re-enable work scheduled: %s\n", 2664 2666 yesno(work_busy(&dev_priv->psr.work.work))); 2665 2667 2666 - if (HAS_DDI(dev_priv)) { 2667 - if (dev_priv->psr.psr2_enabled) 2668 - enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; 2669 - else 2670 - enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 2671 - } else { 2672 - for_each_pipe(dev_priv, pipe) { 2673 - enum transcoder cpu_transcoder = 2674 - intel_pipe_to_cpu_transcoder(dev_priv, pipe); 2675 - enum intel_display_power_domain power_domain; 2676 - 2677 - power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 2678 - if (!intel_display_power_get_if_enabled(dev_priv, 2679 - power_domain)) 2680 - continue; 2681 - 2682 - stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2683 - VLV_EDP_PSR_CURR_STATE_MASK; 2684 - if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2685 - (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2686 - enabled = true; 2687 - 2688 - intel_display_power_put(dev_priv, power_domain); 2689 - } 2690 - } 2668 + if (dev_priv->psr.psr2_enabled) 2669 + enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; 2670 + else 2671 + enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 2691 2672 2692 2673 seq_printf(m, "Main link in standby mode: %s\n", 2693 2674 yesno(dev_priv->psr.link_standby)); 2694 2675 2695 - seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); 2696 - 2697 - if (!HAS_DDI(dev_priv)) 2698 - for_each_pipe(dev_priv, pipe) { 2699 - if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2700 - (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2701 - seq_printf(m, " pipe %c", pipe_name(pipe)); 2702 - } 2703 - seq_puts(m, "\n"); 2676 + seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled)); 2704 2677 2705 2678 /* 2706 - * VLV/CHV PSR has no kind of performance counter 2707 2679 * SKL+ Perf counter is reset to 0 everytime DC state is entered 2708 2680 */ 2709 2681 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-1
drivers/gpu/drm/i915/i915_drv.h
··· 607 607 bool link_standby; 608 608 bool colorimetry_support; 609 609 bool alpm; 610 - bool has_hw_tracking; 611 610 bool psr2_enabled; 612 611 u8 sink_sync_latency; 613 612 bool debug;
-2
drivers/gpu/drm/i915/i915_pci.c
··· 340 340 GEN(7), 341 341 .is_lp = 1, 342 342 .num_pipes = 2, 343 - .has_psr = 1, 344 343 .has_runtime_pm = 1, 345 344 .has_rc6 = 1, 346 345 .has_gmch_display = 1, ··· 432 433 .is_lp = 1, 433 434 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 434 435 .has_64bit_reloc = 1, 435 - .has_psr = 1, 436 436 .has_runtime_pm = 1, 437 437 .has_resource_streamer = 1, 438 438 .has_rc6 = 1,
-2
drivers/gpu/drm/i915/intel_drv.h
··· 1917 1917 unsigned frontbuffer_bits, 1918 1918 enum fb_op_origin origin); 1919 1919 void intel_psr_init(struct drm_i915_private *dev_priv); 1920 - void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, 1921 - unsigned frontbuffer_bits); 1922 1920 void intel_psr_compute_config(struct intel_dp *intel_dp, 1923 1921 struct intel_crtc_state *crtc_state); 1924 1922 void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
-2
drivers/gpu/drm/i915/intel_frontbuffer.c
··· 153 153 /* Remove stale busy bits due to the old buffer. */ 154 154 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; 155 155 spin_unlock(&dev_priv->fb_tracking.lock); 156 - 157 - intel_psr_single_frame_update(dev_priv, frontbuffer_bits); 158 156 } 159 157 160 158 /**
+22 -226
drivers/gpu/drm/i915/intel_psr.c
··· 97 97 { 98 98 u32 debug_mask, mask; 99 99 100 - /* No PSR interrupts on VLV/CHV */ 101 - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 102 - return; 103 - 104 100 mask = EDP_PSR_ERROR(TRANSCODER_EDP); 105 101 debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) | 106 102 EDP_PSR_PRE_ENTRY(TRANSCODER_EDP); ··· 280 284 } 281 285 } 282 286 283 - static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe) 284 - { 285 - struct drm_i915_private *dev_priv = to_i915(dev); 286 - uint32_t val; 287 - 288 - val = I915_READ(VLV_PSRSTAT(pipe)) & 289 - VLV_EDP_PSR_CURR_STATE_MASK; 290 - return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 291 - (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE); 292 - } 293 - 294 - static void vlv_psr_setup_vsc(struct intel_dp *intel_dp, 295 - const struct intel_crtc_state *crtc_state) 296 - { 297 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 298 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 299 - uint32_t val; 300 - 301 - /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */ 302 - val = I915_READ(VLV_VSCSDP(crtc->pipe)); 303 - val &= ~VLV_EDP_PSR_SDP_FREQ_MASK; 304 - val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME; 305 - I915_WRITE(VLV_VSCSDP(crtc->pipe), val); 306 - } 307 - 308 287 static void hsw_psr_setup_vsc(struct intel_dp *intel_dp, 309 288 const struct intel_crtc_state *crtc_state) 310 289 { ··· 310 339 311 340 intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state, 312 341 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); 313 - } 314 - 315 - static void vlv_psr_enable_sink(struct intel_dp *intel_dp) 316 - { 317 - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 318 - DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); 319 342 } 320 343 321 344 static void hsw_psr_setup_aux(struct intel_dp *intel_dp) ··· 366 401 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); 367 402 368 403 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); 369 - } 370 - 371 - static void vlv_psr_enable_source(struct intel_dp *intel_dp, 372 - const struct intel_crtc_state *crtc_state) 373 - { 374 - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 375 - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 376 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 377 - 378 - /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */ 379 - I915_WRITE(VLV_PSRCTL(crtc->pipe), 380 - VLV_EDP_PSR_MODE_SW_TIMER | 381 - VLV_EDP_PSR_SRC_TRANSMITTER_STATE | 382 - VLV_EDP_PSR_ENABLE); 383 - } 384 - 385 - static void vlv_psr_activate(struct intel_dp *intel_dp) 386 - { 387 - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 388 - struct drm_device *dev = dig_port->base.base.dev; 389 - struct drm_i915_private *dev_priv = to_i915(dev); 390 - struct drm_crtc *crtc = dig_port->base.base.crtc; 391 - enum pipe pipe = to_intel_crtc(crtc)->pipe; 392 - 393 - /* 394 - * Let's do the transition from PSR_state 1 (inactive) to 395 - * PSR_state 2 (transition to active - static frame transmission). 396 - * Then Hardware is responsible for the transition to 397 - * PSR_state 3 (active - no Remote Frame Buffer (RFB) update). 398 - */ 399 - I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) | 400 - VLV_EDP_PSR_ACTIVE_ENTRY); 401 404 } 402 405 403 406 static void hsw_activate_psr1(struct intel_dp *intel_dp) ··· 536 603 * ones. Since by Display design transcoder EDP is tied to port A 537 604 * we can safely escape based on the port A. 538 605 */ 539 - if (HAS_DDI(dev_priv) && dig_port->base.port != PORT_A) { 606 + if (dig_port->base.port != PORT_A) { 540 607 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); 541 - return; 542 - } 543 - 544 - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 545 - !dev_priv->psr.link_standby) { 546 - DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n"); 547 608 return; 548 609 } 549 610 ··· 688 761 * enabled. 689 762 * However on some platforms we face issues when first 690 763 * activation follows a modeset so quickly. 691 - * - On VLV/CHV we get bank screen on first activation 692 764 * - On HSW/BDW we get a recoverable frozen screen until 693 765 * next exit-activate sequence. 694 766 */ ··· 697 771 698 772 unlock: 699 773 mutex_unlock(&dev_priv->psr.lock); 700 - } 701 - 702 - static void vlv_psr_disable(struct intel_dp *intel_dp, 703 - const struct intel_crtc_state *old_crtc_state) 704 - { 705 - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 706 - struct drm_device *dev = intel_dig_port->base.base.dev; 707 - struct drm_i915_private *dev_priv = to_i915(dev); 708 - struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 709 - uint32_t val; 710 - 711 - if (dev_priv->psr.active) { 712 - /* Put VLV PSR back to PSR_state 0 (disabled). */ 713 - if (intel_wait_for_register(dev_priv, 714 - VLV_PSRSTAT(crtc->pipe), 715 - VLV_EDP_PSR_IN_TRANS, 716 - 0, 717 - 1)) 718 - WARN(1, "PSR transition took longer than expected\n"); 719 - 720 - val = I915_READ(VLV_PSRCTL(crtc->pipe)); 721 - val &= ~VLV_EDP_PSR_ACTIVE_ENTRY; 722 - val &= ~VLV_EDP_PSR_ENABLE; 723 - val &= ~VLV_EDP_PSR_MODE_MASK; 724 - I915_WRITE(VLV_PSRCTL(crtc->pipe), val); 725 - 726 - dev_priv->psr.active = false; 727 - } else { 728 - WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe)); 729 - } 730 774 } 731 775 732 776 static void hsw_psr_disable(struct intel_dp *intel_dp, ··· 791 895 if (!intel_dp) 792 896 return false; 793 897 794 - if (HAS_DDI(dev_priv)) { 795 - if (dev_priv->psr.psr2_enabled) { 796 - reg = EDP_PSR2_STATUS; 797 - mask = EDP_PSR2_STATUS_STATE_MASK; 798 - } else { 799 - reg = EDP_PSR_STATUS; 800 - mask = EDP_PSR_STATUS_STATE_MASK; 801 - } 898 + if (dev_priv->psr.psr2_enabled) { 899 + reg = EDP_PSR2_STATUS; 900 + mask = EDP_PSR2_STATUS_STATE_MASK; 802 901 } else { 803 - struct drm_crtc *crtc = 804 - dp_to_dig_port(intel_dp)->base.base.crtc; 805 - enum pipe pipe = to_intel_crtc(crtc)->pipe; 806 - 807 - reg = VLV_PSRSTAT(pipe); 808 - mask = VLV_EDP_PSR_IN_TRANS; 902 + reg = EDP_PSR_STATUS; 903 + mask = EDP_PSR_STATUS_STATE_MASK; 809 904 } 810 905 811 906 mutex_unlock(&dev_priv->psr.lock); ··· 841 954 842 955 static void intel_psr_exit(struct drm_i915_private *dev_priv) 843 956 { 844 - struct intel_dp *intel_dp = dev_priv->psr.enabled; 845 - struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 846 - enum pipe pipe = to_intel_crtc(crtc)->pipe; 847 957 u32 val; 848 958 849 959 if (!dev_priv->psr.active) 850 960 return; 851 961 852 - if (HAS_DDI(dev_priv)) { 853 - if (dev_priv->psr.psr2_enabled) { 854 - val = I915_READ(EDP_PSR2_CTL); 855 - WARN_ON(!(val & EDP_PSR2_ENABLE)); 856 - I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); 857 - } else { 858 - val = I915_READ(EDP_PSR_CTL); 859 - WARN_ON(!(val & EDP_PSR_ENABLE)); 860 - I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); 861 - } 962 + if (dev_priv->psr.psr2_enabled) { 963 + val = I915_READ(EDP_PSR2_CTL); 964 + WARN_ON(!(val & EDP_PSR2_ENABLE)); 965 + I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); 862 966 } else { 863 - val = I915_READ(VLV_PSRCTL(pipe)); 864 - 865 - /* 866 - * Here we do the transition drirectly from 867 - * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to 868 - * PSR_state 5 (exit). 869 - * PSR State 4 (active with single frame update) can be skipped. 870 - * On PSR_state 5 (exit) Hardware is responsible to transition 871 - * back to PSR_state 1 (inactive). 872 - * Now we are at Same state after vlv_psr_enable_source. 873 - */ 874 - val &= ~VLV_EDP_PSR_ACTIVE_ENTRY; 875 - I915_WRITE(VLV_PSRCTL(pipe), val); 876 - 877 - /* 878 - * Send AUX wake up - Spec says after transitioning to PSR 879 - * active we have to send AUX wake up by writing 01h in DPCD 880 - * 600h of sink device. 881 - * XXX: This might slow down the transition, but without this 882 - * HW doesn't complete the transition to PSR_state 1 and we 883 - * never get the screen updated. 884 - */ 885 - drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 886 - DP_SET_POWER_D0); 967 + val = I915_READ(EDP_PSR_CTL); 968 + WARN_ON(!(val & EDP_PSR_ENABLE)); 969 + I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); 887 970 } 888 - 889 971 dev_priv->psr.active = false; 890 - } 891 - 892 - /** 893 - * intel_psr_single_frame_update - Single Frame Update 894 - * @dev_priv: i915 device 895 - * @frontbuffer_bits: frontbuffer plane tracking bits 896 - * 897 - * Some platforms support a single frame update feature that is used to 898 - * send and update only one frame on Remote Frame Buffer. 899 - * So far it is only implemented for Valleyview and Cherryview because 900 - * hardware requires this to be done before a page flip. 901 - */ 902 - void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, 903 - unsigned frontbuffer_bits) 904 - { 905 - struct drm_crtc *crtc; 906 - enum pipe pipe; 907 - u32 val; 908 - 909 - if (!CAN_PSR(dev_priv)) 910 - return; 911 - 912 - /* 913 - * Single frame update is already supported on BDW+ but it requires 914 - * many W/A and it isn't really needed. 915 - */ 916 - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 917 - return; 918 - 919 - mutex_lock(&dev_priv->psr.lock); 920 - if (!dev_priv->psr.enabled) { 921 - mutex_unlock(&dev_priv->psr.lock); 922 - return; 923 - } 924 - 925 - crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; 926 - pipe = to_intel_crtc(crtc)->pipe; 927 - 928 - if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) { 929 - val = I915_READ(VLV_PSRCTL(pipe)); 930 - 931 - /* 932 - * We need to set this bit before writing registers for a flip. 933 - * This bit will be self-clear when it gets to the PSR active state. 934 - */ 935 - I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE); 936 - } 937 - mutex_unlock(&dev_priv->psr.lock); 938 972 } 939 973 940 974 /** ··· 880 1072 if (!CAN_PSR(dev_priv)) 881 1073 return; 882 1074 883 - if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP) 1075 + if (origin == ORIGIN_FLIP) 884 1076 return; 885 1077 886 1078 mutex_lock(&dev_priv->psr.lock); ··· 923 1115 if (!CAN_PSR(dev_priv)) 924 1116 return; 925 1117 926 - if (dev_priv->psr.has_hw_tracking && origin == ORIGIN_FLIP) 1118 + if (origin == ORIGIN_FLIP) 927 1119 return; 928 1120 929 1121 mutex_lock(&dev_priv->psr.lock); ··· 940 1132 941 1133 /* By definition flush = invalidate + flush */ 942 1134 if (frontbuffer_bits) { 943 - if (dev_priv->psr.psr2_enabled || 944 - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1135 + if (dev_priv->psr.psr2_enabled) { 945 1136 intel_psr_exit(dev_priv); 946 1137 } else { 947 1138 /* ··· 992 1185 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 993 1186 /* HSW and BDW require workarounds that we don't implement. */ 994 1187 dev_priv->psr.link_standby = false; 995 - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 996 - /* On VLV and CHV only standby mode is supported. */ 997 - dev_priv->psr.link_standby = true; 998 1188 else 999 1189 /* For new platforms let's respect VBT back again */ 1000 1190 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; ··· 1009 1205 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); 1010 1206 mutex_init(&dev_priv->psr.lock); 1011 1207 1012 - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1013 - dev_priv->psr.enable_source = vlv_psr_enable_source; 1014 - dev_priv->psr.disable_source = vlv_psr_disable; 1015 - dev_priv->psr.enable_sink = vlv_psr_enable_sink; 1016 - dev_priv->psr.activate = vlv_psr_activate; 1017 - dev_priv->psr.setup_vsc = vlv_psr_setup_vsc; 1018 - } else { 1019 - dev_priv->psr.has_hw_tracking = true; 1020 - dev_priv->psr.enable_source = hsw_psr_enable_source; 1021 - dev_priv->psr.disable_source = hsw_psr_disable; 1022 - dev_priv->psr.enable_sink = hsw_psr_enable_sink; 1023 - dev_priv->psr.activate = hsw_psr_activate; 1024 - dev_priv->psr.setup_vsc = hsw_psr_setup_vsc; 1025 - } 1208 + dev_priv->psr.enable_source = hsw_psr_enable_source; 1209 + dev_priv->psr.disable_source = hsw_psr_disable; 1210 + dev_priv->psr.enable_sink = hsw_psr_enable_sink; 1211 + dev_priv->psr.activate = hsw_psr_activate; 1212 + dev_priv->psr.setup_vsc = hsw_psr_setup_vsc; 1213 + 1026 1214 }