Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2023-11-23' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

drm/i915 feature pull for v6.8:

Features and functionality:
- Major DP MST improvements on bandwidth management, DSC (Imre, Stan, Ville)
- DP panel replay enabling (Animesh, Jouni)
- MTL C20 phy state verification (Mika)
- MTL DP DSC fractional bpp support (Ankit, Vandita, Swati, Imre)
- Audio fastset support (Ville)

Refactoring and cleanups:
- Use dma fence interfaces instead of i915_sw_fence (Jouni)
- Separate gem and display code (Jouni, Juha-Pekka)
- AUX register macro refactoring (Jani)
- Separate display module/device parameters from the rest (Jouni)
- Move display capabilities debugfs under display (Vinod)
- Makefile cleanup (Jani)
- Register cleanups (Ville)
- Enginer iterator cleanups (Tvrtko)
- Move display lock inits under display/ (Jani)
- VLV/CHV DPIO PHY register and interface refactoring (Jani)
- DSI VBT sequence refactoring (Jani, Andy Shevchenko)
- C10/C20 PHY PLL hardware readout and calculation abstractions (Lucas)
- DPLL code cleanups (Ville)
- Cleanup PXP plane protection checks (Jani)

Fixes:
- Replace VLV/CHV DSI GPIO direct access with proper GPIO API usage (Andy Shevchenko)
- Fix VLV/CHV DSI GPIO wrong initial value (Hans de Goede)
- Fix UHBR data, link M/N/TU and PBN values (Imre)
- Fix HDCP state on an enable/disable cycle (Suraj)
- Fix DP MST modeset sequence to be according to spec (Ville)
- Improved atomicity for multi-pipe commits (Ville)
- Update URLs in i915 MAINTAINERS entry and code (Jani)
- Check for VGA converter presence in eDP probe (Ville)
- Fix surface size checks (Ville)
- Fix LNL port/phy assignment (Lucas)
- Reset C10/C20 message bus harder to avoid sporadic failures (Mika)
- Fix bogus VBT HDMI level shift on BDW (Ville)
- Add workaround for LNL underruns when enabling FBC (Vinod)
- DSB refactoring (Animesh)
- DPT refactoring (Juha-Pekka)
- Disable DSC on DP MST on ICL (Imre)
- Fix PSR VSC packet setup timing (Mika)
- Fix LUT rounding and conversions (Ville)

DRM core display changes:
- DP MST fixes, helpers, refactoring to support bandwidth management (Imre)
- DP MST PBN divider value refactoring and fixes (Imre)
- DPCD register definitions (Ankit, Imre)
- Add helper to get DSC bpp precision (Ankit)
- Fix color LUT rounding (Ville)

From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87v89sl2ao.fsf@intel.com
[sima: Some conflicts in the amdgpu dp mst code]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>

+4400 -1965
+2 -2
MAINTAINERS
··· 10645 10645 M: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> 10646 10646 L: intel-gfx@lists.freedesktop.org 10647 10647 S: Supported 10648 - W: https://01.org/linuxgraphics/ 10648 + W: https://drm.pages.freedesktop.org/intel-docs/ 10649 10649 Q: http://patchwork.freedesktop.org/project/intel-gfx/ 10650 - B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs 10650 + B: https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html 10651 10651 C: irc://irc.oftc.net/intel-gfx 10652 10652 T: git git://anongit.freedesktop.org/drm-intel 10653 10653 F: Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
+4 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 85 85 #include <drm/drm_atomic_uapi.h> 86 86 #include <drm/drm_atomic_helper.h> 87 87 #include <drm/drm_blend.h> 88 + #include <drm/drm_fixed.h> 88 89 #include <drm/drm_fourcc.h> 89 90 #include <drm/drm_edid.h> 90 91 #include <drm/drm_eld.h> ··· 6911 6910 if (IS_ERR(mst_state)) 6912 6911 return PTR_ERR(mst_state); 6913 6912 6914 - if (!mst_state->pbn_div) 6915 - mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); 6913 + if (!mst_state->pbn_div.full) 6914 + mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link)); 6916 6915 6917 6916 if (!state->duplicated) { 6918 6917 int max_bpc = conn_state->max_requested_bpc; ··· 6924 6923 max_bpc); 6925 6924 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; 6926 6925 clock = adjusted_mode->clock; 6927 - dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); 6926 + dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4); 6928 6927 } 6929 6928 6930 6929 dm_new_connector_state->vcpi_slots =
+2 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
··· 31 31 #include <drm/drm_probe_helper.h> 32 32 #include <drm/amdgpu_drm.h> 33 33 #include <drm/drm_edid.h> 34 + #include <drm/drm_fixed.h> 34 35 35 36 #include "dm_services.h" 36 37 #include "amdgpu.h" ··· 211 210 struct drm_dp_mst_atomic_payload *old_payload) 212 211 { 213 212 struct drm_dp_mst_atomic_payload *pos; 214 - int pbn_per_slot = mst_state->pbn_div; 213 + int pbn_per_slot = dfixed_trunc(mst_state->pbn_div); 215 214 u8 next_payload_vc_start = mgr->next_start_slot; 216 215 u8 payload_vc_start = new_payload->vc_start_slot; 217 216 u8 allocated_time_slots;
+4 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 27 27 #include <drm/display/drm_dp_mst_helper.h> 28 28 #include <drm/drm_atomic.h> 29 29 #include <drm/drm_atomic_helper.h> 30 + #include <drm/drm_fixed.h> 30 31 #include "dm_services.h" 31 32 #include "amdgpu.h" 32 33 #include "amdgpu_dm.h" ··· 942 941 link_timeslots_used = 0; 943 942 944 943 for (i = 0; i < count; i++) 945 - link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div); 944 + link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, dfixed_trunc(mst_state->pbn_div)); 946 945 947 946 fair_pbn_alloc = 948 - (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div; 947 + (63 - link_timeslots_used) / remaining_to_increase * dfixed_trunc(mst_state->pbn_div); 949 948 950 949 if (initial_slack[next_index] > fair_pbn_alloc) { 951 950 vars[next_index].pbn += fair_pbn_alloc; ··· 1643 1642 } else { 1644 1643 /* check if mode could be supported within full_pbn */ 1645 1644 bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; 1646 - pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false); 1645 + pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4); 1647 1646 if (pbn > full_pbn) 1648 1647 return DC_FAIL_BANDWIDTH_VALIDATE; 1649 1648 }
+161
drivers/gpu/drm/display/drm_dp_helper.c
··· 2245 2245 { OUI(0x00, 0x00, 0x00), DEVICE_ID('C', 'H', '7', '5', '1', '1'), false, BIT(DP_DPCD_QUIRK_NO_SINK_COUNT) }, 2246 2246 /* Synaptics DP1.4 MST hubs can support DSC without virtual DPCD */ 2247 2247 { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) }, 2248 + /* Synaptics DP1.4 MST hubs require DSC for some modes on which it applies HBLANK expansion. */ 2249 + { OUI(0x90, 0xCC, 0x24), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) }, 2248 2250 /* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */ 2249 2251 { OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) }, 2250 2252 }; ··· 2327 2325 return 0; 2328 2326 } 2329 2327 EXPORT_SYMBOL(drm_dp_read_desc); 2328 + 2329 + /** 2330 + * drm_dp_dsc_sink_bpp_incr() - Get bits per pixel increment 2331 + * @dsc_dpcd: DSC capabilities from DPCD 2332 + * 2333 + * Returns the bpp precision supported by the DP sink. 2334 + */ 2335 + u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) 2336 + { 2337 + u8 bpp_increment_dpcd = dsc_dpcd[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT]; 2338 + 2339 + switch (bpp_increment_dpcd) { 2340 + case DP_DSC_BITS_PER_PIXEL_1_16: 2341 + return 16; 2342 + case DP_DSC_BITS_PER_PIXEL_1_8: 2343 + return 8; 2344 + case DP_DSC_BITS_PER_PIXEL_1_4: 2345 + return 4; 2346 + case DP_DSC_BITS_PER_PIXEL_1_2: 2347 + return 2; 2348 + case DP_DSC_BITS_PER_PIXEL_1_1: 2349 + return 1; 2350 + } 2351 + 2352 + return 0; 2353 + } 2354 + EXPORT_SYMBOL(drm_dp_dsc_sink_bpp_incr); 2330 2355 2331 2356 /** 2332 2357 * drm_dp_dsc_sink_max_slice_count() - Get the max slice count ··· 3927 3898 EXPORT_SYMBOL(drm_panel_dp_aux_backlight); 3928 3899 3929 3900 #endif 3901 + 3902 + /* See DP Standard v2.1 2.6.4.4.1.1, 2.8.4.4, 2.8.7 */ 3903 + static int drm_dp_link_symbol_cycles(int lane_count, int pixels, int bpp_x16, 3904 + int symbol_size, bool is_mst) 3905 + { 3906 + int cycles = DIV_ROUND_UP(pixels * bpp_x16, 16 * symbol_size * lane_count); 3907 + int align = is_mst ? 4 / lane_count : 1; 3908 + 3909 + return ALIGN(cycles, align); 3910 + } 3911 + 3912 + static int drm_dp_link_dsc_symbol_cycles(int lane_count, int pixels, int slice_count, 3913 + int bpp_x16, int symbol_size, bool is_mst) 3914 + { 3915 + int slice_pixels = DIV_ROUND_UP(pixels, slice_count); 3916 + int slice_data_cycles = drm_dp_link_symbol_cycles(lane_count, slice_pixels, 3917 + bpp_x16, symbol_size, is_mst); 3918 + int slice_eoc_cycles = is_mst ? 4 / lane_count : 1; 3919 + 3920 + return slice_count * (slice_data_cycles + slice_eoc_cycles); 3921 + } 3922 + 3923 + /** 3924 + * drm_dp_bw_overhead - Calculate the BW overhead of a DP link stream 3925 + * @lane_count: DP link lane count 3926 + * @hactive: pixel count of the active period in one scanline of the stream 3927 + * @dsc_slice_count: DSC slice count if @flags/DRM_DP_LINK_BW_OVERHEAD_DSC is set 3928 + * @bpp_x16: bits per pixel in .4 binary fixed point 3929 + * @flags: DRM_DP_OVERHEAD_x flags 3930 + * 3931 + * Calculate the BW allocation overhead of a DP link stream, depending 3932 + * on the link's 3933 + * - @lane_count 3934 + * - SST/MST mode (@flags / %DRM_DP_OVERHEAD_MST) 3935 + * - symbol size (@flags / %DRM_DP_OVERHEAD_UHBR) 3936 + * - FEC mode (@flags / %DRM_DP_OVERHEAD_FEC) 3937 + * - SSC/REF_CLK mode (@flags / %DRM_DP_OVERHEAD_SSC_REF_CLK) 3938 + * as well as the stream's 3939 + * - @hactive timing 3940 + * - @bpp_x16 color depth 3941 + * - compression mode (@flags / %DRM_DP_OVERHEAD_DSC). 3942 + * Note that this overhead doesn't account for the 8b/10b, 128b/132b 3943 + * channel coding efficiency, for that see 3944 + * @drm_dp_link_bw_channel_coding_efficiency(). 3945 + * 3946 + * Returns the overhead as 100% + overhead% in 1ppm units. 3947 + */ 3948 + int drm_dp_bw_overhead(int lane_count, int hactive, 3949 + int dsc_slice_count, 3950 + int bpp_x16, unsigned long flags) 3951 + { 3952 + int symbol_size = flags & DRM_DP_BW_OVERHEAD_UHBR ? 32 : 8; 3953 + bool is_mst = flags & DRM_DP_BW_OVERHEAD_MST; 3954 + u32 overhead = 1000000; 3955 + int symbol_cycles; 3956 + 3957 + /* 3958 + * DP Standard v2.1 2.6.4.1 3959 + * SSC downspread and ref clock variation margin: 3960 + * 5300ppm + 300ppm ~ 0.6% 3961 + */ 3962 + if (flags & DRM_DP_BW_OVERHEAD_SSC_REF_CLK) 3963 + overhead += 6000; 3964 + 3965 + /* 3966 + * DP Standard v2.1 2.6.4.1.1, 3.5.1.5.4: 3967 + * FEC symbol insertions for 8b/10b channel coding: 3968 + * After each 250 data symbols on 2-4 lanes: 3969 + * 250 LL + 5 FEC_PARITY_PH + 1 CD_ADJ (256 byte FEC block) 3970 + * After each 2 x 250 data symbols on 1 lane: 3971 + * 2 * 250 LL + 11 FEC_PARITY_PH + 1 CD_ADJ (512 byte FEC block) 3972 + * After 256 (2-4 lanes) or 128 (1 lane) FEC blocks: 3973 + * 256 * 256 bytes + 1 FEC_PM 3974 + * or 3975 + * 128 * 512 bytes + 1 FEC_PM 3976 + * (256 * 6 + 1) / (256 * 250) = 2.4015625 % 3977 + */ 3978 + if (flags & DRM_DP_BW_OVERHEAD_FEC) 3979 + overhead += 24016; 3980 + 3981 + /* 3982 + * DP Standard v2.1 2.7.9, 5.9.7 3983 + * The FEC overhead for UHBR is accounted for in its 96.71% channel 3984 + * coding efficiency. 3985 + */ 3986 + WARN_ON((flags & DRM_DP_BW_OVERHEAD_UHBR) && 3987 + (flags & DRM_DP_BW_OVERHEAD_FEC)); 3988 + 3989 + if (flags & DRM_DP_BW_OVERHEAD_DSC) 3990 + symbol_cycles = drm_dp_link_dsc_symbol_cycles(lane_count, hactive, 3991 + dsc_slice_count, 3992 + bpp_x16, symbol_size, 3993 + is_mst); 3994 + else 3995 + symbol_cycles = drm_dp_link_symbol_cycles(lane_count, hactive, 3996 + bpp_x16, symbol_size, 3997 + is_mst); 3998 + 3999 + return DIV_ROUND_UP_ULL(mul_u32_u32(symbol_cycles * symbol_size * lane_count, 4000 + overhead * 16), 4001 + hactive * bpp_x16); 4002 + } 4003 + EXPORT_SYMBOL(drm_dp_bw_overhead); 4004 + 4005 + /** 4006 + * drm_dp_bw_channel_coding_efficiency - Get a DP link's channel coding efficiency 4007 + * @is_uhbr: Whether the link has a 128b/132b channel coding 4008 + * 4009 + * Return the channel coding efficiency of the given DP link type, which is 4010 + * either 8b/10b or 128b/132b (aka UHBR). The corresponding overhead includes 4011 + * the 8b -> 10b, 128b -> 132b pixel data to link symbol conversion overhead 4012 + * and for 128b/132b any link or PHY level control symbol insertion overhead 4013 + * (LLCP, FEC, PHY sync, see DP Standard v2.1 3.5.2.18). For 8b/10b the 4014 + * corresponding FEC overhead is BW allocation specific, included in the value 4015 + * returned by drm_dp_bw_overhead(). 4016 + * 4017 + * Returns the efficiency in the 100%/coding-overhead% ratio in 4018 + * 1ppm units. 4019 + */ 4020 + int drm_dp_bw_channel_coding_efficiency(bool is_uhbr) 4021 + { 4022 + if (is_uhbr) 4023 + return 967100; 4024 + else 4025 + /* 4026 + * Note that on 8b/10b MST the efficiency is only 4027 + * 78.75% due to the 1 out of 64 MTPH packet overhead, 4028 + * not accounted for here. 4029 + */ 4030 + return 800000; 4031 + } 4032 + EXPORT_SYMBOL(drm_dp_bw_channel_coding_efficiency);
+181 -53
drivers/gpu/drm/display/drm_dp_mst_topology.c
··· 43 43 #include <drm/drm_atomic_helper.h> 44 44 #include <drm/drm_drv.h> 45 45 #include <drm/drm_edid.h> 46 + #include <drm/drm_fixed.h> 46 47 #include <drm/drm_print.h> 47 48 #include <drm/drm_probe_helper.h> 48 49 ··· 3579 3578 * value is in units of PBNs/(timeslots/1 MTP). This value can be used to 3580 3579 * convert the number of PBNs required for a given stream to the number of 3581 3580 * timeslots this stream requires in each MTP. 3581 + * 3582 + * Returns the BW / timeslot value in 20.12 fixed point format. 3582 3583 */ 3583 - int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, 3584 - int link_rate, int link_lane_count) 3584 + fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, 3585 + int link_rate, int link_lane_count) 3585 3586 { 3587 + int ch_coding_efficiency = 3588 + drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate)); 3589 + fixed20_12 ret; 3590 + 3586 3591 if (link_rate == 0 || link_lane_count == 0) 3587 3592 drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n", 3588 3593 link_rate, link_lane_count); 3589 3594 3590 - /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */ 3591 - return link_rate * link_lane_count / 54000; 3595 + /* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */ 3596 + ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count, 3597 + ch_coding_efficiency), 3598 + (1000000ULL * 8 * 5400) >> 12); 3599 + 3600 + return ret; 3592 3601 } 3593 3602 EXPORT_SYMBOL(drm_dp_get_vc_payload_bw); 3594 3603 ··· 4346 4335 } 4347 4336 } 4348 4337 4349 - req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div); 4338 + req_slots = DIV_ROUND_UP(dfixed_const(pbn), topology_state->pbn_div.full); 4350 4339 4351 4340 drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n", 4352 4341 port->connector->base.id, port->connector->name, ··· 4729 4718 4730 4719 /** 4731 4720 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode. 4732 - * @clock: dot clock for the mode 4733 - * @bpp: bpp for the mode. 4734 - * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel 4721 + * @clock: dot clock 4722 + * @bpp: bpp as .4 binary fixed point 4735 4723 * 4736 4724 * This uses the formula in the spec to calculate the PBN value for a mode. 4737 4725 */ 4738 - int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc) 4726 + int drm_dp_calc_pbn_mode(int clock, int bpp) 4739 4727 { 4740 4728 /* 4741 - * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 4742 4729 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on 4743 4730 * common multiplier to render an integer PBN for all link rate/lane 4744 4731 * counts combinations 4745 4732 * calculate 4746 - * peak_kbps *= (1006/1000) 4747 - * peak_kbps *= (64/54) 4748 - * peak_kbps *= 8 convert to bytes 4749 - * 4750 - * If the bpp is in units of 1/16, further divide by 16. Put this 4751 - * factor in the numerator rather than the denominator to avoid 4752 - * integer overflow 4733 + * peak_kbps = clock * bpp / 16 4734 + * peak_kbps *= SSC overhead / 1000000 4735 + * peak_kbps /= 8 convert to Kbytes 4736 + * peak_kBps *= (64/54) / 1000 convert to PBN 4753 4737 */ 4738 + /* 4739 + * TODO: Use the actual link and mode parameters to calculate 4740 + * the overhead. For now it's assumed that these are 4741 + * 4 link lanes, 4096 hactive pixels, which don't add any 4742 + * significant data padding overhead and that there is no DSC 4743 + * or FEC overhead. 4744 + */ 4745 + int overhead = drm_dp_bw_overhead(4, 4096, 0, bpp, 4746 + DRM_DP_BW_OVERHEAD_MST | 4747 + DRM_DP_BW_OVERHEAD_SSC_REF_CLK); 4754 4748 4755 - if (dsc) 4756 - return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006), 4757 - 8 * 54 * 1000 * 1000); 4758 - 4759 - return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006), 4760 - 8 * 54 * 1000 * 1000); 4749 + return DIV64_U64_ROUND_UP(mul_u32_u32(clock * bpp, 64 * overhead >> 4), 4750 + 1000000ULL * 8 * 54 * 1000); 4761 4751 } 4762 4752 EXPORT_SYMBOL(drm_dp_calc_pbn_mode); 4763 4753 ··· 4883 4871 state = to_drm_dp_mst_topology_state(mgr->base.state); 4884 4872 seq_printf(m, "\n*** Atomic state info ***\n"); 4885 4873 seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n", 4886 - state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div); 4874 + state->payload_mask, mgr->max_payloads, state->start_slot, 4875 + dfixed_trunc(state->pbn_div)); 4887 4876 4888 4877 seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | status | sink name |\n"); 4889 4878 for (i = 0; i < mgr->max_payloads; i++) { ··· 5149 5136 return false; 5150 5137 } 5151 5138 5139 + static bool 5140 + drm_dp_mst_port_downstream_of_parent_locked(struct drm_dp_mst_topology_mgr *mgr, 5141 + struct drm_dp_mst_port *port, 5142 + struct drm_dp_mst_port *parent) 5143 + { 5144 + if (!mgr->mst_primary) 5145 + return false; 5146 + 5147 + port = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary, 5148 + port); 5149 + if (!port) 5150 + return false; 5151 + 5152 + if (!parent) 5153 + return true; 5154 + 5155 + parent = drm_dp_mst_topology_get_port_validated_locked(mgr->mst_primary, 5156 + parent); 5157 + if (!parent) 5158 + return false; 5159 + 5160 + if (!parent->mstb) 5161 + return false; 5162 + 5163 + return drm_dp_mst_port_downstream_of_branch(port, parent->mstb); 5164 + } 5165 + 5166 + /** 5167 + * drm_dp_mst_port_downstream_of_parent - check if a port is downstream of a parent port 5168 + * @mgr: MST topology manager 5169 + * @port: the port being looked up 5170 + * @parent: the parent port 5171 + * 5172 + * The function returns %true if @port is downstream of @parent. If @parent is 5173 + * %NULL - denoting the root port - the function returns %true if @port is in 5174 + * @mgr's topology. 5175 + */ 5176 + bool 5177 + drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr, 5178 + struct drm_dp_mst_port *port, 5179 + struct drm_dp_mst_port *parent) 5180 + { 5181 + bool ret; 5182 + 5183 + mutex_lock(&mgr->lock); 5184 + ret = drm_dp_mst_port_downstream_of_parent_locked(mgr, port, parent); 5185 + mutex_unlock(&mgr->lock); 5186 + 5187 + return ret; 5188 + } 5189 + EXPORT_SYMBOL(drm_dp_mst_port_downstream_of_parent); 5190 + 5152 5191 static int 5153 5192 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, 5154 - struct drm_dp_mst_topology_state *state); 5193 + struct drm_dp_mst_topology_state *state, 5194 + struct drm_dp_mst_port **failing_port); 5155 5195 5156 5196 static int 5157 5197 drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb, 5158 - struct drm_dp_mst_topology_state *state) 5198 + struct drm_dp_mst_topology_state *state, 5199 + struct drm_dp_mst_port **failing_port) 5159 5200 { 5160 5201 struct drm_dp_mst_atomic_payload *payload; 5161 5202 struct drm_dp_mst_port *port; ··· 5238 5171 drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb); 5239 5172 5240 5173 list_for_each_entry(port, &mstb->ports, next) { 5241 - ret = drm_dp_mst_atomic_check_port_bw_limit(port, state); 5174 + ret = drm_dp_mst_atomic_check_port_bw_limit(port, state, failing_port); 5242 5175 if (ret < 0) 5243 5176 return ret; 5244 5177 ··· 5250 5183 5251 5184 static int 5252 5185 drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, 5253 - struct drm_dp_mst_topology_state *state) 5186 + struct drm_dp_mst_topology_state *state, 5187 + struct drm_dp_mst_port **failing_port) 5254 5188 { 5255 5189 struct drm_dp_mst_atomic_payload *payload; 5256 5190 int pbn_used = 0; ··· 5272 5204 drm_dbg_atomic(port->mgr->dev, 5273 5205 "[MSTB:%p] [MST PORT:%p] no BW available for the port\n", 5274 5206 port->parent, port); 5207 + *failing_port = port; 5275 5208 return -EINVAL; 5276 5209 } 5277 5210 5278 5211 pbn_used = payload->pbn; 5279 5212 } else { 5280 5213 pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb, 5281 - state); 5214 + state, 5215 + failing_port); 5282 5216 if (pbn_used <= 0) 5283 5217 return pbn_used; 5284 5218 } ··· 5289 5219 drm_dbg_atomic(port->mgr->dev, 5290 5220 "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n", 5291 5221 port->parent, port, pbn_used, port->full_pbn); 5222 + *failing_port = port; 5292 5223 return -ENOSPC; 5293 5224 } 5294 5225 ··· 5342 5271 } 5343 5272 5344 5273 if (!payload_count) 5345 - mst_state->pbn_div = 0; 5274 + mst_state->pbn_div.full = dfixed_const(0); 5346 5275 5347 5276 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n", 5348 - mgr, mst_state, mst_state->pbn_div, avail_slots, 5277 + mgr, mst_state, dfixed_trunc(mst_state->pbn_div), avail_slots, 5349 5278 mst_state->total_avail_slots - avail_slots); 5350 5279 5351 5280 return 0; ··· 5468 5397 EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc); 5469 5398 5470 5399 /** 5400 + * drm_dp_mst_atomic_check_mgr - Check the atomic state of an MST topology manager 5401 + * @state: The global atomic state 5402 + * @mgr: Manager to check 5403 + * @mst_state: The MST atomic state for @mgr 5404 + * @failing_port: Returns the port with a BW limitation 5405 + * 5406 + * Checks the given MST manager's topology state for an atomic update to ensure 5407 + * that it's valid. This includes checking whether there's enough bandwidth to 5408 + * support the new timeslot allocations in the atomic update. 5409 + * 5410 + * Any atomic drivers supporting DP MST must make sure to call this or 5411 + * the drm_dp_mst_atomic_check() function after checking the rest of their state 5412 + * in their &drm_mode_config_funcs.atomic_check() callback. 5413 + * 5414 + * See also: 5415 + * drm_dp_mst_atomic_check() 5416 + * drm_dp_atomic_find_time_slots() 5417 + * drm_dp_atomic_release_time_slots() 5418 + * 5419 + * Returns: 5420 + * - 0 if the new state is valid 5421 + * - %-ENOSPC, if the new state is invalid, because of BW limitation 5422 + * @failing_port is set to: 5423 + * - The non-root port where a BW limit check failed 5424 + * with all the ports downstream of @failing_port passing 5425 + * the BW limit check. 5426 + * The returned port pointer is valid until at least 5427 + * one payload downstream of it exists. 5428 + * - %NULL if the BW limit check failed at the root port 5429 + * with all the ports downstream of the root port passing 5430 + * the BW limit check. 5431 + * - %-EINVAL, if the new state is invalid, because the root port has 5432 + * too many payloads. 5433 + */ 5434 + int drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state, 5435 + struct drm_dp_mst_topology_mgr *mgr, 5436 + struct drm_dp_mst_topology_state *mst_state, 5437 + struct drm_dp_mst_port **failing_port) 5438 + { 5439 + int ret; 5440 + 5441 + *failing_port = NULL; 5442 + 5443 + if (!mgr->mst_state) 5444 + return 0; 5445 + 5446 + mutex_lock(&mgr->lock); 5447 + ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary, 5448 + mst_state, 5449 + failing_port); 5450 + mutex_unlock(&mgr->lock); 5451 + 5452 + if (ret < 0) 5453 + return ret; 5454 + 5455 + return drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state); 5456 + } 5457 + EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr); 5458 + 5459 + /** 5471 5460 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an 5472 5461 * atomic update is valid 5473 5462 * @state: Pointer to the new &struct drm_dp_mst_topology_state 5474 5463 * 5475 5464 * Checks the given topology state for an atomic update to ensure that it's 5476 - * valid. This includes checking whether there's enough bandwidth to support 5477 - * the new timeslot allocations in the atomic update. 5465 + * valid, calling drm_dp_mst_atomic_check_mgr() for all MST manager in the 5466 + * atomic state. This includes checking whether there's enough bandwidth to 5467 + * support the new timeslot allocations in the atomic update. 5478 5468 * 5479 5469 * Any atomic drivers supporting DP MST must make sure to call this after 5480 5470 * checking the rest of their state in their 5481 5471 * &drm_mode_config_funcs.atomic_check() callback. 5482 5472 * 5483 5473 * See also: 5474 + * drm_dp_mst_atomic_check_mgr() 5484 5475 * drm_dp_atomic_find_time_slots() 5485 5476 * drm_dp_atomic_release_time_slots() 5486 5477 * ··· 5557 5424 int i, ret = 0; 5558 5425 5559 5426 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 5560 - if (!mgr->mst_state) 5561 - continue; 5427 + struct drm_dp_mst_port *tmp_port; 5562 5428 5563 - ret = drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state); 5429 + ret = drm_dp_mst_atomic_check_mgr(state, mgr, mst_state, &tmp_port); 5564 5430 if (ret) 5565 5431 break; 5566 - 5567 - mutex_lock(&mgr->lock); 5568 - ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary, 5569 - mst_state); 5570 - mutex_unlock(&mgr->lock); 5571 - if (ret < 0) 5572 - break; 5573 - else 5574 - ret = 0; 5575 5432 } 5576 5433 5577 5434 return ret; ··· 6017 5894 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) 6018 5895 { 6019 5896 struct drm_dp_mst_port *immediate_upstream_port; 5897 + struct drm_dp_aux *immediate_upstream_aux; 6020 5898 struct drm_dp_mst_port *fec_port; 6021 5899 struct drm_dp_desc desc = {}; 6022 5900 u8 endpoint_fec; ··· 6082 5958 * - Port is on primary branch device 6083 5959 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG) 6084 5960 */ 6085 - if (drm_dp_read_desc(port->mgr->aux, &desc, true)) 5961 + if (immediate_upstream_port) 5962 + immediate_upstream_aux = &immediate_upstream_port->aux; 5963 + else 5964 + immediate_upstream_aux = port->mgr->aux; 5965 + 5966 + if (drm_dp_read_desc(immediate_upstream_aux, &desc, true)) 6086 5967 return NULL; 6087 5968 6088 - if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) && 6089 - port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 && 6090 - port->parent == port->mgr->mst_primary) { 5969 + if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) { 6091 5970 u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; 6092 5971 6093 - if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0) 5972 + if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0) 6094 5973 return NULL; 6095 5974 6096 - if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) && 5975 + if (dpcd_ext[DP_DPCD_REV] >= DP_DPCD_REV_14 && 5976 + ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) && 6097 5977 ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) 6098 - != DP_DWN_STRM_PORT_TYPE_ANALOG)) 6099 - return port->mgr->aux; 5978 + != DP_DWN_STRM_PORT_TYPE_ANALOG))) 5979 + return immediate_upstream_aux; 6100 5980 } 6101 5981 6102 5982 /*
+1 -1
drivers/gpu/drm/i915/Kconfig
··· 94 94 This option enables capturing the GPU state when a hang is detected. 95 95 This information is vital for triaging hangs and assists in debugging. 96 96 Please report any hang for triaging according to: 97 - https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs 97 + https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html 98 98 99 99 If in doubt, say "Y". 100 100
+97 -83
drivers/gpu/drm/i915/Makefile
··· 47 47 # Please keep these build lists sorted! 48 48 49 49 # core driver code 50 - i915-y += i915_driver.o \ 51 - i915_drm_client.o \ 52 - i915_config.o \ 53 - i915_getparam.o \ 54 - i915_ioctl.o \ 55 - i915_irq.o \ 56 - i915_mitigations.o \ 57 - i915_module.o \ 58 - i915_params.o \ 59 - i915_pci.o \ 60 - i915_scatterlist.o \ 61 - i915_suspend.o \ 62 - i915_switcheroo.o \ 63 - i915_sysfs.o \ 64 - i915_utils.o \ 65 - intel_clock_gating.o \ 66 - intel_device_info.o \ 67 - intel_memory_region.o \ 68 - intel_pcode.o \ 69 - intel_region_ttm.o \ 70 - intel_runtime_pm.o \ 71 - intel_sbi.o \ 72 - intel_step.o \ 73 - intel_uncore.o \ 74 - intel_wakeref.o \ 75 - vlv_sideband.o \ 76 - vlv_suspend.o 50 + i915-y += \ 51 + i915_config.o \ 52 + i915_driver.o \ 53 + i915_drm_client.o \ 54 + i915_getparam.o \ 55 + i915_ioctl.o \ 56 + i915_irq.o \ 57 + i915_mitigations.o \ 58 + i915_module.o \ 59 + i915_params.o \ 60 + i915_pci.o \ 61 + i915_scatterlist.o \ 62 + i915_suspend.o \ 63 + i915_switcheroo.o \ 64 + i915_sysfs.o \ 65 + i915_utils.o \ 66 + intel_clock_gating.o \ 67 + intel_device_info.o \ 68 + intel_memory_region.o \ 69 + intel_pcode.o \ 70 + intel_region_ttm.o \ 71 + intel_runtime_pm.o \ 72 + intel_sbi.o \ 73 + intel_step.o \ 74 + intel_uncore.o \ 75 + intel_wakeref.o \ 76 + vlv_sideband.o \ 77 + vlv_suspend.o 77 78 78 79 # core peripheral code 79 80 i915-y += \ ··· 91 90 i915_syncmap.o \ 92 91 i915_user_extensions.o 93 92 94 - i915-$(CONFIG_COMPAT) += i915_ioc32.o 93 + i915-$(CONFIG_COMPAT) += \ 94 + i915_ioc32.o 95 95 i915-$(CONFIG_DEBUG_FS) += \ 96 96 i915_debugfs.o \ 97 - i915_debugfs_params.o \ 98 - display/intel_display_debugfs.o \ 99 - display/intel_pipe_crc.o 100 - i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o 97 + i915_debugfs_params.o 98 + i915-$(CONFIG_PERF_EVENTS) += \ 99 + i915_pmu.o 101 100 102 101 # "Graphics Technology" (aka we talk to the gpu) 103 102 gt-y += \ ··· 154 153 gt/sysfs_engines.o 155 154 156 155 # x86 intel-gtt module support 157 - gt-$(CONFIG_X86) += gt/intel_ggtt_gmch.o 156 + gt-$(CONFIG_X86) += \ 157 + gt/intel_ggtt_gmch.o 158 158 # autogenerated null render state 159 159 gt-y += \ 160 160 gt/gen6_renderstate.o \ ··· 174 172 gem/i915_gem_domain.o \ 175 173 gem/i915_gem_execbuffer.o \ 176 174 gem/i915_gem_internal.o \ 177 - gem/i915_gem_object.o \ 178 175 gem/i915_gem_lmem.o \ 179 176 gem/i915_gem_mman.o \ 177 + gem/i915_gem_object.o \ 180 178 gem/i915_gem_pages.o \ 181 179 gem/i915_gem_phys.o \ 182 180 gem/i915_gem_pm.o \ ··· 193 191 gem/i915_gem_wait.o \ 194 192 gem/i915_gemfs.o 195 193 i915-y += \ 196 - $(gem-y) \ 197 - i915_active.o \ 198 - i915_cmd_parser.o \ 199 - i915_deps.o \ 200 - i915_gem_evict.o \ 201 - i915_gem_gtt.o \ 202 - i915_gem_ww.o \ 203 - i915_gem.o \ 204 - i915_query.o \ 205 - i915_request.o \ 206 - i915_scheduler.o \ 207 - i915_trace_points.o \ 208 - i915_ttm_buddy_manager.o \ 209 - i915_vma.o \ 210 - i915_vma_resource.o 194 + $(gem-y) \ 195 + i915_active.o \ 196 + i915_cmd_parser.o \ 197 + i915_deps.o \ 198 + i915_gem.o \ 199 + i915_gem_evict.o \ 200 + i915_gem_gtt.o \ 201 + i915_gem_ww.o \ 202 + i915_query.o \ 203 + i915_request.o \ 204 + i915_scheduler.o \ 205 + i915_trace_points.o \ 206 + i915_ttm_buddy_manager.o \ 207 + i915_vma.o \ 208 + i915_vma_resource.o 211 209 212 210 # general-purpose microcontroller (GuC) support 213 211 i915-y += \ 214 - gt/uc/intel_gsc_fw.o \ 215 - gt/uc/intel_gsc_proxy.o \ 216 - gt/uc/intel_gsc_uc.o \ 217 - gt/uc/intel_gsc_uc_debugfs.o \ 218 - gt/uc/intel_gsc_uc_heci_cmd_submit.o \ 219 - gt/uc/intel_guc.o \ 220 - gt/uc/intel_guc_ads.o \ 221 - gt/uc/intel_guc_capture.o \ 222 - gt/uc/intel_guc_ct.o \ 223 - gt/uc/intel_guc_debugfs.o \ 224 - gt/uc/intel_guc_fw.o \ 225 - gt/uc/intel_guc_hwconfig.o \ 226 - gt/uc/intel_guc_log.o \ 227 - gt/uc/intel_guc_log_debugfs.o \ 228 - gt/uc/intel_guc_rc.o \ 229 - gt/uc/intel_guc_slpc.o \ 230 - gt/uc/intel_guc_submission.o \ 231 - gt/uc/intel_huc.o \ 232 - gt/uc/intel_huc_debugfs.o \ 233 - gt/uc/intel_huc_fw.o \ 234 - gt/uc/intel_uc.o \ 235 - gt/uc/intel_uc_debugfs.o \ 236 - gt/uc/intel_uc_fw.o 212 + gt/uc/intel_gsc_fw.o \ 213 + gt/uc/intel_gsc_proxy.o \ 214 + gt/uc/intel_gsc_uc.o \ 215 + gt/uc/intel_gsc_uc_debugfs.o \ 216 + gt/uc/intel_gsc_uc_heci_cmd_submit.o\ 217 + gt/uc/intel_guc.o \ 218 + gt/uc/intel_guc_ads.o \ 219 + gt/uc/intel_guc_capture.o \ 220 + gt/uc/intel_guc_ct.o \ 221 + gt/uc/intel_guc_debugfs.o \ 222 + gt/uc/intel_guc_fw.o \ 223 + gt/uc/intel_guc_hwconfig.o \ 224 + gt/uc/intel_guc_log.o \ 225 + gt/uc/intel_guc_log_debugfs.o \ 226 + gt/uc/intel_guc_rc.o \ 227 + gt/uc/intel_guc_slpc.o \ 228 + gt/uc/intel_guc_submission.o \ 229 + gt/uc/intel_huc.o \ 230 + gt/uc/intel_huc_debugfs.o \ 231 + gt/uc/intel_huc_fw.o \ 232 + gt/uc/intel_uc.o \ 233 + gt/uc/intel_uc_debugfs.o \ 234 + gt/uc/intel_uc_fw.o 237 235 238 236 # graphics system controller (GSC) support 239 - i915-y += gt/intel_gsc.o 237 + i915-y += \ 238 + gt/intel_gsc.o 240 239 241 240 # graphics hardware monitoring (HWMON) support 242 - i915-$(CONFIG_HWMON) += i915_hwmon.o 241 + i915-$(CONFIG_HWMON) += \ 242 + i915_hwmon.o 243 243 244 244 # modesetting core code 245 245 i915-y += \ 246 246 display/hsw_ips.o \ 247 + display/i9xx_plane.o \ 248 + display/i9xx_wm.o \ 247 249 display/intel_atomic.o \ 248 250 display/intel_atomic_plane.o \ 249 251 display/intel_audio.o \ ··· 263 257 display/intel_display.o \ 264 258 display/intel_display_driver.o \ 265 259 display/intel_display_irq.o \ 260 + display/intel_display_params.o \ 266 261 display/intel_display_power.o \ 267 262 display/intel_display_power_map.o \ 268 263 display/intel_display_power_well.o \ ··· 275 268 display/intel_dpll.o \ 276 269 display/intel_dpll_mgr.o \ 277 270 display/intel_dpt.o \ 271 + display/intel_dpt_common.o \ 278 272 display/intel_drrs.o \ 279 273 display/intel_dsb.o \ 274 + display/intel_dsb_buffer.o \ 280 275 display/intel_fb.o \ 281 276 display/intel_fb_pin.o \ 282 277 display/intel_fbc.o \ ··· 296 287 display/intel_load_detect.o \ 297 288 display/intel_lpe_audio.o \ 298 289 display/intel_modeset_lock.o \ 299 - display/intel_modeset_verify.o \ 300 290 display/intel_modeset_setup.o \ 291 + display/intel_modeset_verify.o \ 301 292 display/intel_overlay.o \ 302 293 display/intel_pch_display.o \ 303 294 display/intel_pch_refclk.o \ ··· 311 302 display/intel_vblank.o \ 312 303 display/intel_vga.o \ 313 304 display/intel_wm.o \ 314 - display/i9xx_plane.o \ 315 - display/i9xx_wm.o \ 316 305 display/skl_scaler.o \ 317 306 display/skl_universal_plane.o \ 318 307 display/skl_watermark.o ··· 319 312 display/intel_opregion.o 320 313 i915-$(CONFIG_DRM_FBDEV_EMULATION) += \ 321 314 display/intel_fbdev.o 315 + i915-$(CONFIG_DEBUG_FS) += \ 316 + display/intel_display_debugfs.o \ 317 + display/intel_display_debugfs_params.o \ 318 + display/intel_pipe_crc.o 322 319 323 320 # modesetting output/encoder code 324 321 i915-y += \ ··· 368 357 display/vlv_dsi.o \ 369 358 display/vlv_dsi_pll.o 370 359 371 - i915-y += i915_perf.o 360 + i915-y += \ 361 + i915_perf.o 372 362 373 363 # Protected execution platform (PXP) support. Base support is required for HuC 374 364 i915-y += \ 375 365 pxp/intel_pxp.o \ 376 - pxp/intel_pxp_tee.o \ 377 - pxp/intel_pxp_huc.o 366 + pxp/intel_pxp_huc.o \ 367 + pxp/intel_pxp_tee.o 378 368 379 369 i915-$(CONFIG_DRM_I915_PXP) += \ 380 370 pxp/intel_pxp_cmd.o \ ··· 386 374 pxp/intel_pxp_session.o 387 375 388 376 # Post-mortem debug and GPU hang state capture 389 - i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o 377 + i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += \ 378 + i915_gpu_error.o 390 379 i915-$(CONFIG_DRM_I915_SELFTEST) += \ 391 380 gem/selftests/i915_gem_client_blt.o \ 392 381 gem/selftests/igt_gem_utils.o \ 393 - selftests/intel_scheduler_helpers.o \ 394 382 selftests/i915_random.o \ 395 383 selftests/i915_selftest.o \ 396 384 selftests/igt_atomic.o \ ··· 399 387 selftests/igt_mmap.o \ 400 388 selftests/igt_reset.o \ 401 389 selftests/igt_spinner.o \ 390 + selftests/intel_scheduler_helpers.o \ 402 391 selftests/librapl.o 403 392 404 393 # virtual gpu code 405 - i915-y += i915_vgpu.o 394 + i915-y += \ 395 + i915_vgpu.o 406 396 407 397 i915-$(CONFIG_DRM_I915_GVT) += \ 408 398 intel_gvt.o \
+39 -7
drivers/gpu/drm/i915/display/g4x_dp.c
··· 432 432 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 433 433 intel_de_posting_read(dev_priv, intel_dp->output_reg); 434 434 435 - intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 435 + intel_dp->DP &= ~DP_PORT_EN; 436 436 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 437 437 intel_de_posting_read(dev_priv, intel_dp->output_reg); 438 438 ··· 475 475 } 476 476 } 477 477 478 + static void g4x_dp_audio_enable(struct intel_encoder *encoder, 479 + const struct intel_crtc_state *crtc_state, 480 + const struct drm_connector_state *conn_state) 481 + { 482 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 483 + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 484 + 485 + if (!crtc_state->has_audio) 486 + return; 487 + 488 + /* Enable audio presence detect */ 489 + intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 490 + intel_de_write(i915, intel_dp->output_reg, intel_dp->DP); 491 + 492 + intel_audio_codec_enable(encoder, crtc_state, conn_state); 493 + } 494 + 495 + static void g4x_dp_audio_disable(struct intel_encoder *encoder, 496 + const struct intel_crtc_state *old_crtc_state, 497 + const struct drm_connector_state *old_conn_state) 498 + { 499 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 500 + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 501 + 502 + if (!old_crtc_state->has_audio) 503 + return; 504 + 505 + intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); 506 + 507 + /* Disable audio presence detect */ 508 + intel_dp->DP &= ~DP_AUDIO_OUTPUT_ENABLE; 509 + intel_de_write(i915, intel_dp->output_reg, intel_dp->DP); 510 + } 511 + 478 512 static void intel_disable_dp(struct intel_atomic_state *state, 479 513 struct intel_encoder *encoder, 480 514 const struct intel_crtc_state *old_crtc_state, ··· 517 483 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 518 484 519 485 intel_dp->link_trained = false; 520 - 521 - intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); 522 486 523 487 /* 524 488 * Make sure the panel is off before trying to change the mode. ··· 663 631 * fail when the power sequencer is freshly used for this port. 664 632 */ 665 633 intel_dp->DP |= DP_PORT_EN; 666 - if (crtc_state->has_audio) 667 - intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 668 634 669 635 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP); 670 636 intel_de_posting_read(dev_priv, intel_dp->output_reg); ··· 716 686 const struct drm_connector_state *conn_state) 717 687 { 718 688 intel_enable_dp(state, encoder, pipe_config, conn_state); 719 - intel_audio_codec_enable(encoder, pipe_config, conn_state); 720 689 intel_edp_backlight_on(pipe_config, conn_state); 690 + encoder->audio_enable(encoder, pipe_config, conn_state); 721 691 } 722 692 723 693 static void vlv_enable_dp(struct intel_atomic_state *state, ··· 725 695 const struct intel_crtc_state *pipe_config, 726 696 const struct drm_connector_state *conn_state) 727 697 { 728 - intel_audio_codec_enable(encoder, pipe_config, conn_state); 729 698 intel_edp_backlight_on(pipe_config, conn_state); 699 + encoder->audio_enable(encoder, pipe_config, conn_state); 730 700 } 731 701 732 702 static void g4x_pre_enable_dp(struct intel_atomic_state *state, ··· 1355 1325 intel_encoder->disable = g4x_disable_dp; 1356 1326 intel_encoder->post_disable = g4x_post_disable_dp; 1357 1327 } 1328 + intel_encoder->audio_enable = g4x_dp_audio_enable; 1329 + intel_encoder->audio_disable = g4x_dp_audio_disable; 1358 1330 1359 1331 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || 1360 1332 (HAS_PCH_CPT(dev_priv) && port != PORT_A))
+37 -29
drivers/gpu/drm/i915/display/g4x_hdmi.c
··· 228 228 temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); 229 229 230 230 temp |= SDVO_ENABLE; 231 - if (pipe_config->has_audio) 232 - temp |= HDMI_AUDIO_ENABLE; 233 231 234 232 intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); 235 233 intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); 234 + } 235 + 236 + static void g4x_hdmi_audio_enable(struct intel_encoder *encoder, 237 + const struct intel_crtc_state *crtc_state, 238 + const struct drm_connector_state *conn_state) 239 + { 240 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 241 + struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder); 242 + 243 + if (!crtc_state->has_audio) 244 + return; 245 + 246 + drm_WARN_ON(&i915->drm, !crtc_state->has_hdmi_sink); 247 + 248 + /* Enable audio presence detect */ 249 + intel_de_rmw(i915, hdmi->hdmi_reg, 0, HDMI_AUDIO_ENABLE); 250 + 251 + intel_audio_codec_enable(encoder, crtc_state, conn_state); 252 + } 253 + 254 + static void g4x_hdmi_audio_disable(struct intel_encoder *encoder, 255 + const struct intel_crtc_state *old_crtc_state, 256 + const struct drm_connector_state *old_conn_state) 257 + { 258 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 259 + struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder); 260 + 261 + if (!old_crtc_state->has_audio) 262 + return; 263 + 264 + intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); 265 + 266 + /* Disable audio presence detect */ 267 + intel_de_rmw(i915, hdmi->hdmi_reg, HDMI_AUDIO_ENABLE, 0); 236 268 } 237 269 238 270 static void g4x_enable_hdmi(struct intel_atomic_state *state, ··· 272 240 const struct intel_crtc_state *pipe_config, 273 241 const struct drm_connector_state *conn_state) 274 242 { 275 - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 276 - 277 243 g4x_hdmi_enable_port(encoder, pipe_config); 278 - 279 - drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && 280 - !pipe_config->has_hdmi_sink); 281 - intel_audio_codec_enable(encoder, pipe_config, conn_state); 282 244 } 283 245 284 246 static void ibx_enable_hdmi(struct intel_atomic_state *state, ··· 288 262 temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); 289 263 290 264 temp |= SDVO_ENABLE; 291 - if (pipe_config->has_audio) 292 - temp |= HDMI_AUDIO_ENABLE; 293 265 294 266 /* 295 267 * HW workaround, need to write this twice for issue ··· 320 296 intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); 321 297 intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); 322 298 } 323 - 324 - drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && 325 - !pipe_config->has_hdmi_sink); 326 - intel_audio_codec_enable(encoder, pipe_config, conn_state); 327 299 } 328 300 329 301 static void cpt_enable_hdmi(struct intel_atomic_state *state, ··· 337 317 temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); 338 318 339 319 temp |= SDVO_ENABLE; 340 - if (pipe_config->has_audio) 341 - temp |= HDMI_AUDIO_ENABLE; 342 320 343 321 /* 344 322 * WaEnableHDMI8bpcBefore12bpc:snb,ivb ··· 369 351 intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe), 370 352 TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0); 371 353 } 372 - 373 - drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && 374 - !pipe_config->has_hdmi_sink); 375 - intel_audio_codec_enable(encoder, pipe_config, conn_state); 376 354 } 377 355 378 356 static void vlv_enable_hdmi(struct intel_atomic_state *state, ··· 376 362 const struct intel_crtc_state *pipe_config, 377 363 const struct drm_connector_state *conn_state) 378 364 { 379 - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 380 - 381 - drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && 382 - !pipe_config->has_hdmi_sink); 383 - intel_audio_codec_enable(encoder, pipe_config, conn_state); 384 365 } 385 366 386 367 static void intel_disable_hdmi(struct intel_atomic_state *state, ··· 393 384 394 385 temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg); 395 386 396 - temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE); 387 + temp &= ~SDVO_ENABLE; 397 388 intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); 398 389 intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); 399 390 ··· 442 433 const struct intel_crtc_state *old_crtc_state, 443 434 const struct drm_connector_state *old_conn_state) 444 435 { 445 - intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); 446 - 447 436 intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state); 448 437 } 449 438 ··· 450 443 const struct intel_crtc_state *old_crtc_state, 451 444 const struct drm_connector_state *old_conn_state) 452 445 { 453 - intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); 454 446 } 455 447 456 448 static void pch_post_disable_hdmi(struct intel_atomic_state *state, ··· 756 750 else 757 751 intel_encoder->enable = g4x_enable_hdmi; 758 752 } 753 + intel_encoder->audio_enable = g4x_hdmi_audio_enable; 754 + intel_encoder->audio_disable = g4x_hdmi_audio_disable; 759 755 intel_encoder->shutdown = intel_hdmi_encoder_shutdown; 760 756 761 757 intel_encoder->type = INTEL_OUTPUT_HDMI;
+2 -2
drivers/gpu/drm/i915/display/hsw_ips.c
··· 193 193 if (!hsw_crtc_supports_ips(crtc)) 194 194 return false; 195 195 196 - if (!i915->params.enable_ips) 196 + if (!i915->display.params.enable_ips) 197 197 return false; 198 198 199 199 if (crtc_state->pipe_bpp > 24) ··· 329 329 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 330 330 331 331 seq_printf(m, "Enabled by kernel parameter: %s\n", 332 - str_yes_no(i915->params.enable_ips)); 332 + str_yes_no(i915->display.params.enable_ips)); 333 333 334 334 if (DISPLAY_VER(i915) >= 8) { 335 335 seq_puts(m, "Currently: unknown\n");
+1 -1
drivers/gpu/drm/i915/display/i9xx_wm.c
··· 2993 2993 2994 2994 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ 2995 2995 if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) && 2996 - dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) { 2996 + dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) { 2997 2997 for (level = 2; level < num_levels; level++) { 2998 2998 struct intel_wm_level *wm = &merged->wm[level]; 2999 2999
+5 -5
drivers/gpu/drm/i915/display/icl_dsi.c
··· 330 330 int bpp; 331 331 332 332 if (crtc_state->dsc.compression_enable) 333 - bpp = crtc_state->dsc.compressed_bpp; 333 + bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16); 334 334 else 335 335 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); 336 336 ··· 860 860 * compressed and non-compressed bpp. 861 861 */ 862 862 if (crtc_state->dsc.compression_enable) { 863 - mul = crtc_state->dsc.compressed_bpp; 863 + mul = to_bpp_int(crtc_state->dsc.compressed_bpp_x16); 864 864 div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); 865 865 } 866 866 ··· 884 884 int bpp, line_time_us, byte_clk_period_ns; 885 885 886 886 if (crtc_state->dsc.compression_enable) 887 - bpp = crtc_state->dsc.compressed_bpp; 887 + bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16); 888 888 else 889 889 bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); 890 890 ··· 1451 1451 struct drm_display_mode *adjusted_mode = 1452 1452 &pipe_config->hw.adjusted_mode; 1453 1453 1454 - if (pipe_config->dsc.compressed_bpp) { 1455 - int div = pipe_config->dsc.compressed_bpp; 1454 + if (pipe_config->dsc.compressed_bpp_x16) { 1455 + int div = to_bpp_int(pipe_config->dsc.compressed_bpp_x16); 1456 1456 int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); 1457 1457 1458 1458 adjusted_mode->crtc_htotal =
-3
drivers/gpu/drm/i915/display/intel_atomic.c
··· 331 331 332 332 drm_atomic_state_default_release(&state->base); 333 333 kfree(state->global_objs); 334 - 335 - i915_sw_fence_fini(&state->commit_ready); 336 - 337 334 kfree(state); 338 335 } 339 336
+47 -36
drivers/gpu/drm/i915/display/intel_atomic_plane.c
··· 31 31 * prepare/check/commit/cleanup steps. 32 32 */ 33 33 34 + #include <linux/dma-fence-chain.h> 35 + 34 36 #include <drm/drm_atomic_helper.h> 37 + #include <drm/drm_gem_atomic_helper.h> 35 38 #include <drm/drm_blend.h> 36 39 #include <drm/drm_fourcc.h> 37 40 ··· 1015 1012 return 0; 1016 1013 } 1017 1014 1015 + static int add_dma_resv_fences(struct dma_resv *resv, 1016 + struct drm_plane_state *new_plane_state) 1017 + { 1018 + struct dma_fence *fence = dma_fence_get(new_plane_state->fence); 1019 + struct dma_fence *new; 1020 + int ret; 1021 + 1022 + ret = dma_resv_get_singleton(resv, dma_resv_usage_rw(false), &new); 1023 + if (ret) 1024 + goto error; 1025 + 1026 + if (new && fence) { 1027 + struct dma_fence_chain *chain = dma_fence_chain_alloc(); 1028 + 1029 + if (!chain) { 1030 + ret = -ENOMEM; 1031 + goto error; 1032 + } 1033 + 1034 + dma_fence_chain_init(chain, fence, new, 1); 1035 + fence = &chain->base; 1036 + 1037 + } else if (new) { 1038 + fence = new; 1039 + } 1040 + 1041 + dma_fence_put(new_plane_state->fence); 1042 + new_plane_state->fence = fence; 1043 + return 0; 1044 + 1045 + error: 1046 + dma_fence_put(fence); 1047 + return ret; 1048 + } 1049 + 1018 1050 /** 1019 1051 * intel_prepare_plane_fb - Prepare fb for usage on plane 1020 1052 * @_plane: drm plane to prepare for ··· 1073 1035 struct intel_atomic_state *state = 1074 1036 to_intel_atomic_state(new_plane_state->uapi.state); 1075 1037 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1076 - const struct intel_plane_state *old_plane_state = 1038 + struct intel_plane_state *old_plane_state = 1077 1039 intel_atomic_get_old_plane_state(state, plane); 1078 1040 struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb); 1079 1041 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb); ··· 1096 1058 * can safely continue. 1097 1059 */ 1098 1060 if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) { 1099 - ret = i915_sw_fence_await_reservation(&state->commit_ready, 1100 - old_obj->base.resv, 1101 - false, 0, 1102 - GFP_KERNEL); 1061 + ret = add_dma_resv_fences(intel_bo_to_drm_bo(old_obj)->resv, 1062 + &new_plane_state->uapi); 1103 1063 if (ret < 0) 1104 1064 return ret; 1105 1065 } 1106 1066 } 1107 1067 1108 - if (new_plane_state->uapi.fence) { /* explicit fencing */ 1109 - i915_gem_fence_wait_priority(new_plane_state->uapi.fence, 1110 - &attr); 1111 - ret = i915_sw_fence_await_dma_fence(&state->commit_ready, 1112 - new_plane_state->uapi.fence, 1113 - i915_fence_timeout(dev_priv), 1114 - GFP_KERNEL); 1115 - if (ret < 0) 1116 - return ret; 1117 - } 1118 - 1119 1068 if (!obj) 1120 1069 return 0; 1121 - 1122 1070 1123 1071 ret = intel_plane_pin_fb(new_plane_state); 1124 1072 if (ret) 1125 1073 return ret; 1126 1074 1127 - i915_gem_object_wait_priority(obj, 0, &attr); 1075 + ret = drm_gem_plane_helper_prepare_fb(&plane->base, &new_plane_state->uapi); 1076 + if (ret < 0) 1077 + goto unpin_fb; 1128 1078 1129 - if (!new_plane_state->uapi.fence) { /* implicit fencing */ 1130 - struct dma_resv_iter cursor; 1131 - struct dma_fence *fence; 1079 + if (new_plane_state->uapi.fence) { 1080 + i915_gem_fence_wait_priority(new_plane_state->uapi.fence, 1081 + &attr); 1132 1082 1133 - ret = i915_sw_fence_await_reservation(&state->commit_ready, 1134 - obj->base.resv, false, 1135 - i915_fence_timeout(dev_priv), 1136 - GFP_KERNEL); 1137 - if (ret < 0) 1138 - goto unpin_fb; 1139 - 1140 - dma_resv_iter_begin(&cursor, obj->base.resv, 1141 - DMA_RESV_USAGE_WRITE); 1142 - dma_resv_for_each_fence_unlocked(&cursor, fence) { 1143 - intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, 1144 - fence); 1145 - } 1146 - dma_resv_iter_end(&cursor); 1147 - } else { 1148 1083 intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, 1149 1084 new_plane_state->uapi.fence); 1150 1085 }
+8 -8
drivers/gpu/drm/i915/display/intel_audio.c
··· 522 522 unsigned int link_clks_available, link_clks_required; 523 523 unsigned int tu_data, tu_line, link_clks_active; 524 524 unsigned int h_active, h_total, hblank_delta, pixel_clk; 525 - unsigned int fec_coeff, cdclk, vdsc_bpp; 525 + unsigned int fec_coeff, cdclk, vdsc_bppx16; 526 526 unsigned int link_clk, lanes; 527 527 unsigned int hblank_rise; 528 528 529 529 h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay; 530 530 h_total = crtc_state->hw.adjusted_mode.crtc_htotal; 531 531 pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock; 532 - vdsc_bpp = crtc_state->dsc.compressed_bpp; 532 + vdsc_bppx16 = crtc_state->dsc.compressed_bpp_x16; 533 533 cdclk = i915->display.cdclk.hw.cdclk; 534 534 /* fec= 0.972261, using rounding multiplier of 1000000 */ 535 535 fec_coeff = 972261; 536 536 link_clk = crtc_state->port_clock; 537 537 lanes = crtc_state->lane_count; 538 538 539 - drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :" 540 - "lanes = %u vdsc_bpp = %u cdclk = %u\n", 541 - h_active, link_clk, lanes, vdsc_bpp, cdclk); 539 + drm_dbg_kms(&i915->drm, 540 + "h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " BPP_X16_FMT " cdclk = %u\n", 541 + h_active, link_clk, lanes, BPP_X16_ARGS(vdsc_bppx16), cdclk); 542 542 543 - if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bpp || !cdclk)) 543 + if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bppx16 || !cdclk)) 544 544 return 0; 545 545 546 546 link_clks_available = (h_total - h_active) * link_clk / pixel_clk - 28; ··· 552 552 hblank_delta = DIV64_U64_ROUND_UP(mul_u32_u32(5 * (link_clk + cdclk), pixel_clk), 553 553 mul_u32_u32(link_clk, cdclk)); 554 554 555 - tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bpp * 8, 1000000), 556 - mul_u32_u32(link_clk * lanes, fec_coeff)); 555 + tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bppx16 * 8, 1000000), 556 + mul_u32_u32(link_clk * lanes * 16, fec_coeff)); 557 557 tu_line = div64_u64(h_active * mul_u32_u32(link_clk, fec_coeff), 558 558 mul_u32_u32(64 * pixel_clk, 1000000)); 559 559 link_clks_active = (tu_line - 1) * 64 + tu_data;
+5 -4
drivers/gpu/drm/i915/display/intel_backlight.c
··· 88 88 89 89 drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0); 90 90 91 - if (i915->params.invert_brightness < 0) 91 + if (i915->display.params.invert_brightness < 0) 92 92 return val; 93 93 94 - if (i915->params.invert_brightness > 0 || 94 + if (i915->display.params.invert_brightness > 0 || 95 95 intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)) { 96 96 return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min; 97 97 } ··· 132 132 drm_WARN_ON_ONCE(&i915->drm, 133 133 panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0); 134 134 135 - if (i915->params.invert_brightness > 0 || 136 - (i915->params.invert_brightness == 0 && intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS))) 135 + if (i915->display.params.invert_brightness > 0 || 136 + (i915->display.params.invert_brightness == 0 && 137 + intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS))) 137 138 val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min); 138 139 139 140 return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
+27 -5
drivers/gpu/drm/i915/display/intel_bios.c
··· 1116 1116 struct drm_display_mode *panel_fixed_mode; 1117 1117 int index; 1118 1118 1119 - index = i915->params.vbt_sdvo_panel_type; 1119 + index = i915->display.params.vbt_sdvo_panel_type; 1120 1120 if (index == -2) { 1121 1121 drm_dbg_kms(&i915->drm, 1122 1122 "Ignore SDVO panel mode from BIOS VBT tables.\n"); ··· 1514 1514 u8 vswing; 1515 1515 1516 1516 /* Don't read from VBT if module parameter has valid value*/ 1517 - if (i915->params.edp_vswing) { 1517 + if (i915->display.params.edp_vswing) { 1518 1518 panel->vbt.edp.low_vswing = 1519 - i915->params.edp_vswing == 1; 1519 + i915->display.params.edp_vswing == 1; 1520 1520 } else { 1521 1521 vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; 1522 1522 panel->vbt.edp.low_vswing = vswing == 0; ··· 2473 2473 devdata->child.device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT; 2474 2474 } 2475 2475 2476 + static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata, 2477 + enum port port) 2478 + { 2479 + struct drm_i915_private *i915 = devdata->i915; 2480 + 2481 + if (!intel_bios_encoder_supports_dvi(devdata)) 2482 + return; 2483 + 2484 + /* 2485 + * Some BDW machines (eg. HP Pavilion 15-ab) shipped 2486 + * with a HSW VBT where the level shifter value goes 2487 + * up to 11, whereas the BDW max is 9. 2488 + */ 2489 + if (IS_BROADWELL(i915) && devdata->child.hdmi_level_shifter_value > 9) { 2490 + drm_dbg_kms(&i915->drm, "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n", 2491 + port_name(port), devdata->child.hdmi_level_shifter_value, 9); 2492 + 2493 + devdata->child.hdmi_level_shifter_value = 9; 2494 + } 2495 + } 2496 + 2476 2497 static bool 2477 2498 intel_bios_encoder_supports_crt(const struct intel_bios_encoder_data *devdata) 2478 2499 { ··· 2673 2652 } 2674 2653 2675 2654 sanitize_device_type(devdata, port); 2655 + sanitize_hdmi_level_shift(devdata, port); 2676 2656 } 2677 2657 2678 2658 static bool has_ddi_port_info(struct drm_i915_private *i915) ··· 3414 3392 3415 3393 crtc_state->pipe_bpp = bpc * 3; 3416 3394 3417 - crtc_state->dsc.compressed_bpp = min(crtc_state->pipe_bpp, 3418 - VBT_DSC_MAX_BPP(dsc->max_bpp)); 3395 + crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(min(crtc_state->pipe_bpp, 3396 + VBT_DSC_MAX_BPP(dsc->max_bpp))); 3419 3397 3420 3398 /* 3421 3399 * FIXME: This is ugly, and slice count should take DSC engine
+3 -2
drivers/gpu/drm/i915/display/intel_cdclk.c
··· 2598 2598 * => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits 2599 2599 */ 2600 2600 int bigjoiner_interface_bits = DISPLAY_VER(i915) > 13 ? 36 : 24; 2601 - int min_cdclk_bj = (crtc_state->dsc.compressed_bpp * pixel_clock) / 2602 - (2 * bigjoiner_interface_bits); 2601 + int min_cdclk_bj = 2602 + (to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) * 2603 + pixel_clock) / (2 * bigjoiner_interface_bits); 2603 2604 2604 2605 min_cdclk = max(min_cdclk, min_cdclk_bj); 2605 2606 }
+35 -35
drivers/gpu/drm/i915/display/intel_color.c
··· 785 785 /* convert hw value with given bit_precision to lut property val */ 786 786 static u32 intel_color_lut_pack(u32 val, int bit_precision) 787 787 { 788 - u32 max = 0xffff >> (16 - bit_precision); 789 - 790 - val = clamp_val(val, 0, max); 791 - 792 - if (bit_precision < 16) 793 - val <<= 16 - bit_precision; 794 - 795 - return val; 788 + if (bit_precision > 16) 789 + return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(val, (1 << 16) - 1), 790 + (1 << bit_precision) - 1); 791 + else 792 + return DIV_ROUND_CLOSEST(val * ((1 << 16) - 1), 793 + (1 << bit_precision) - 1); 796 794 } 797 795 798 796 static u32 i9xx_lut_8(const struct drm_color_lut *color) ··· 909 911 static u16 i965_lut_11p6_max_pack(u32 val) 910 912 { 911 913 /* PIPEGCMAX is 11.6, clamp to 10.6 */ 912 - return clamp_val(val, 0, 0xffff); 914 + return min(val, 0xffffu); 913 915 } 914 916 915 917 static u32 ilk_lut_10(const struct drm_color_lut *color) ··· 1526 1528 return 35; 1527 1529 } 1528 1530 1529 - /* 1530 - * change_lut_val_precision: helper function to upscale or downscale lut values. 1531 - * Parameters 'to' and 'from' needs to be less than 32. This should be sufficient 1532 - * as currently there are no lut values exceeding 32 bit. 1533 - */ 1534 - static u32 change_lut_val_precision(u32 lut_val, int to, int from) 1531 + static u32 glk_degamma_lut(const struct drm_color_lut *color) 1535 1532 { 1536 - return mul_u32_u32(lut_val, (1 << to)) / (1 << from); 1533 + return color->green; 1534 + } 1535 + 1536 + static void glk_degamma_lut_pack(struct drm_color_lut *entry, u32 val) 1537 + { 1538 + /* PRE_CSC_GAMC_DATA is 3.16, clamp to 0.16 */ 1539 + entry->red = entry->green = entry->blue = min(val, 0xffffu); 1540 + } 1541 + 1542 + static u32 mtl_degamma_lut(const struct drm_color_lut *color) 1543 + { 1544 + return drm_color_lut_extract(color->green, 24); 1545 + } 1546 + 1547 + static void mtl_degamma_lut_pack(struct drm_color_lut *entry, u32 val) 1548 + { 1549 + /* PRE_CSC_GAMC_DATA is 3.24, clamp to 0.16 */ 1550 + entry->red = entry->green = entry->blue = 1551 + intel_color_lut_pack(min(val, 0xffffffu), 24); 1537 1552 } 1538 1553 1539 1554 static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state, ··· 1583 1572 * ToDo: Extend to max 7.0. Enable 32 bit input value 1584 1573 * as compared to just 16 to achieve this. 1585 1574 */ 1586 - u32 lut_val; 1587 - 1588 - if (DISPLAY_VER(i915) >= 14) 1589 - lut_val = change_lut_val_precision(lut[i].green, 24, 16); 1590 - else 1591 - lut_val = lut[i].green; 1592 - 1593 1575 ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), 1594 - lut_val); 1576 + DISPLAY_VER(i915) >= 14 ? 1577 + mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i])); 1595 1578 } 1596 1579 1597 1580 /* Clamp values > 1.0. */ 1598 1581 while (i++ < glk_degamma_lut_size(i915)) 1599 - ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), 1 << 16); 1582 + ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), 1583 + DISPLAY_VER(i915) >= 14 ? 1584 + 1 << 24 : 1 << 16); 1600 1585 1601 1586 ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0); 1602 1587 } ··· 3579 3572 for (i = 0; i < lut_size; i++) { 3580 3573 u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe)); 3581 3574 3582 - /* 3583 - * For MTL and beyond, convert back the 24 bit lut values 3584 - * read from HW to 16 bit values to maintain parity with 3585 - * userspace values 3586 - */ 3587 3575 if (DISPLAY_VER(dev_priv) >= 14) 3588 - val = change_lut_val_precision(val, 16, 24); 3589 - 3590 - lut[i].red = val; 3591 - lut[i].green = val; 3592 - lut[i].blue = val; 3576 + mtl_degamma_lut_pack(&lut[i], val); 3577 + else 3578 + glk_degamma_lut_pack(&lut[i], val); 3593 3579 } 3594 3580 3595 3581 intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+2 -2
drivers/gpu/drm/i915/display/intel_crt.c
··· 841 841 if (!intel_display_device_enabled(dev_priv)) 842 842 return connector_status_disconnected; 843 843 844 - if (dev_priv->params.load_detect_test) { 844 + if (dev_priv->display.params.load_detect_test) { 845 845 wakeref = intel_display_power_get(dev_priv, 846 846 intel_encoder->power_domain); 847 847 goto load_detect; ··· 901 901 else if (DISPLAY_VER(dev_priv) < 4) 902 902 status = intel_crt_load_detect(crt, 903 903 to_intel_crtc(connector->state->crtc)->pipe); 904 - else if (dev_priv->params.load_detect_test) 904 + else if (dev_priv->display.params.load_detect_test) 905 905 status = connector_status_disconnected; 906 906 else 907 907 status = connector_status_unknown;
+125 -35
drivers/gpu/drm/i915/display/intel_cx0_phy.c
··· 31 31 32 32 bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy) 33 33 { 34 - if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0) && phy < PHY_C) 34 + if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C) 35 35 return true; 36 36 37 37 return false; ··· 206 206 207 207 intel_clear_response_ready_flag(i915, port, lane); 208 208 209 + /* 210 + * FIXME: Workaround to let HW to settle 211 + * down and let the message bus to end up 212 + * in a known state 213 + */ 214 + intel_cx0_bus_reset(i915, port, lane); 215 + 209 216 return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val); 210 217 } 211 218 ··· 291 284 } 292 285 293 286 intel_clear_response_ready_flag(i915, port, lane); 287 + 288 + /* 289 + * FIXME: Workaround to let HW to settle 290 + * down and let the message bus to end up 291 + * in a known state 292 + */ 293 + intel_cx0_bus_reset(i915, port, lane); 294 294 295 295 return 0; 296 296 } ··· 1864 1850 return -EINVAL; 1865 1851 } 1866 1852 1867 - void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, 1868 - struct intel_c10pll_state *pll_state) 1853 + static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, 1854 + struct intel_c10pll_state *pll_state) 1869 1855 { 1870 1856 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1871 1857 u8 lane = INTEL_CX0_LANE0; ··· 2117 2103 return false; 2118 2104 } 2119 2105 2120 - void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, 2121 - struct intel_c20pll_state *pll_state) 2106 + static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, 2107 + struct intel_c20pll_state *pll_state) 2122 2108 { 2123 2109 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2124 2110 bool cntx; ··· 2392 2378 BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED); 2393 2379 } 2394 2380 2395 - int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, 2396 - const struct intel_c10pll_state *pll_state) 2381 + static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, 2382 + const struct intel_c10pll_state *pll_state) 2397 2383 { 2398 2384 unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1; 2399 2385 unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400; ··· 2419 2405 return tmpclk; 2420 2406 } 2421 2407 2422 - int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, 2423 - const struct intel_c20pll_state *pll_state) 2408 + static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, 2409 + const struct intel_c20pll_state *pll_state) 2424 2410 { 2425 2411 unsigned int frac, frac_en, frac_quot, frac_rem, frac_den; 2426 2412 unsigned int multiplier, refclk = 38400; ··· 3017 3003 return ICL_PORT_DPLL_DEFAULT; 3018 3004 } 3019 3005 3020 - void intel_c10pll_state_verify(struct intel_atomic_state *state, 3006 + static void intel_c10pll_state_verify(const struct intel_crtc_state *state, 3007 + struct intel_crtc *crtc, 3008 + struct intel_encoder *encoder, 3009 + struct intel_c10pll_state *mpllb_hw_state) 3010 + { 3011 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 3012 + const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10; 3013 + int i; 3014 + 3015 + for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) { 3016 + u8 expected = mpllb_sw_state->pll[i]; 3017 + 3018 + I915_STATE_WARN(i915, mpllb_hw_state->pll[i] != expected, 3019 + "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)", 3020 + crtc->base.base.id, crtc->base.name, i, 3021 + expected, mpllb_hw_state->pll[i]); 3022 + } 3023 + 3024 + I915_STATE_WARN(i915, mpllb_hw_state->tx != mpllb_sw_state->tx, 3025 + "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)", 3026 + crtc->base.base.id, crtc->base.name, 3027 + mpllb_sw_state->tx, mpllb_hw_state->tx); 3028 + 3029 + I915_STATE_WARN(i915, mpllb_hw_state->cmn != mpllb_sw_state->cmn, 3030 + "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)", 3031 + crtc->base.base.id, crtc->base.name, 3032 + mpllb_sw_state->cmn, mpllb_hw_state->cmn); 3033 + } 3034 + 3035 + void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder, 3036 + struct intel_cx0pll_state *pll_state) 3037 + { 3038 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3039 + enum phy phy = intel_port_to_phy(i915, encoder->port); 3040 + 3041 + if (intel_is_c10phy(i915, phy)) 3042 + intel_c10pll_readout_hw_state(encoder, &pll_state->c10); 3043 + else 3044 + intel_c20pll_readout_hw_state(encoder, &pll_state->c20); 3045 + } 3046 + 3047 + int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder, 3048 + const struct intel_cx0pll_state *pll_state) 3049 + { 3050 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3051 + enum phy phy = intel_port_to_phy(i915, encoder->port); 3052 + 3053 + if (intel_is_c10phy(i915, phy)) 3054 + return intel_c10pll_calc_port_clock(encoder, &pll_state->c10); 3055 + 3056 + return intel_c20pll_calc_port_clock(encoder, &pll_state->c20); 3057 + } 3058 + 3059 + static void intel_c20pll_state_verify(const struct intel_crtc_state *state, 3060 + struct intel_crtc *crtc, 3061 + struct intel_encoder *encoder, 3062 + struct intel_c20pll_state *mpll_hw_state) 3063 + { 3064 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 3065 + const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20; 3066 + bool use_mplla; 3067 + int i; 3068 + 3069 + use_mplla = intel_c20_use_mplla(mpll_hw_state->clock); 3070 + if (use_mplla) { 3071 + for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) { 3072 + I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i], 3073 + "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)", 3074 + crtc->base.base.id, crtc->base.name, i, 3075 + mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]); 3076 + } 3077 + } else { 3078 + for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) { 3079 + I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i], 3080 + "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)", 3081 + crtc->base.base.id, crtc->base.name, i, 3082 + mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]); 3083 + } 3084 + } 3085 + 3086 + for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) { 3087 + I915_STATE_WARN(i915, mpll_hw_state->tx[i] != mpll_sw_state->tx[i], 3088 + "[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)", 3089 + crtc->base.base.id, crtc->base.name, i, 3090 + mpll_sw_state->tx[i], mpll_hw_state->tx[i]); 3091 + } 3092 + 3093 + for (i = 0; i < ARRAY_SIZE(mpll_sw_state->cmn); i++) { 3094 + I915_STATE_WARN(i915, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i], 3095 + "[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)", 3096 + crtc->base.base.id, crtc->base.name, i, 3097 + mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]); 3098 + } 3099 + } 3100 + 3101 + void intel_cx0pll_state_verify(struct intel_atomic_state *state, 3021 3102 struct intel_crtc *crtc) 3022 3103 { 3023 3104 struct drm_i915_private *i915 = to_i915(state->base.dev); 3024 3105 const struct intel_crtc_state *new_crtc_state = 3025 3106 intel_atomic_get_new_crtc_state(state, crtc); 3026 - struct intel_c10pll_state mpllb_hw_state = {}; 3027 - const struct intel_c10pll_state *mpllb_sw_state = &new_crtc_state->cx0pll_state.c10; 3028 3107 struct intel_encoder *encoder; 3108 + struct intel_cx0pll_state mpll_hw_state = {}; 3029 3109 enum phy phy; 3030 - int i; 3031 3110 3032 3111 if (DISPLAY_VER(i915) < 14) 3033 3112 return; ··· 3136 3029 encoder = intel_get_crtc_new_encoder(state, new_crtc_state); 3137 3030 phy = intel_port_to_phy(i915, encoder->port); 3138 3031 3139 - if (!intel_is_c10phy(i915, phy)) 3140 - return; 3032 + intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state); 3141 3033 3142 - intel_c10pll_readout_hw_state(encoder, &mpllb_hw_state); 3143 - 3144 - for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) { 3145 - u8 expected = mpllb_sw_state->pll[i]; 3146 - 3147 - I915_STATE_WARN(i915, mpllb_hw_state.pll[i] != expected, 3148 - "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)", 3149 - crtc->base.base.id, crtc->base.name, i, 3150 - expected, mpllb_hw_state.pll[i]); 3151 - } 3152 - 3153 - I915_STATE_WARN(i915, mpllb_hw_state.tx != mpllb_sw_state->tx, 3154 - "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)", 3155 - crtc->base.base.id, crtc->base.name, 3156 - mpllb_sw_state->tx, mpllb_hw_state.tx); 3157 - 3158 - I915_STATE_WARN(i915, mpllb_hw_state.cmn != mpllb_sw_state->cmn, 3159 - "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)", 3160 - crtc->base.base.id, crtc->base.name, 3161 - mpllb_sw_state->cmn, mpllb_hw_state.cmn); 3034 + if (intel_is_c10phy(i915, phy)) 3035 + intel_c10pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c10); 3036 + else 3037 + intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20); 3162 3038 }
+8 -8
drivers/gpu/drm/i915/display/intel_cx0_phy.h
··· 16 16 struct intel_atomic_state; 17 17 struct intel_c10pll_state; 18 18 struct intel_c20pll_state; 19 + struct intel_cx0pll_state; 19 20 struct intel_crtc; 20 21 struct intel_crtc_state; 21 22 struct intel_encoder; ··· 29 28 enum icl_port_dpll_id 30 29 intel_mtl_port_pll_type(struct intel_encoder *encoder, 31 30 const struct intel_crtc_state *crtc_state); 32 - void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c10pll_state *pll_state); 31 + 33 32 int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder); 33 + void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder, 34 + struct intel_cx0pll_state *pll_state); 35 + int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder, 36 + const struct intel_cx0pll_state *pll_state); 37 + 34 38 void intel_c10pll_dump_hw_state(struct drm_i915_private *dev_priv, 35 39 const struct intel_c10pll_state *hw_state); 36 - int intel_c10pll_calc_port_clock(struct intel_encoder *encoder, 37 - const struct intel_c10pll_state *pll_state); 38 - void intel_c10pll_state_verify(struct intel_atomic_state *state, 40 + void intel_cx0pll_state_verify(struct intel_atomic_state *state, 39 41 struct intel_crtc *crtc); 40 - void intel_c20pll_readout_hw_state(struct intel_encoder *encoder, 41 - struct intel_c20pll_state *pll_state); 42 42 void intel_c20pll_dump_hw_state(struct drm_i915_private *i915, 43 43 const struct intel_c20pll_state *hw_state); 44 - int intel_c20pll_calc_port_clock(struct intel_encoder *encoder, 45 - const struct intel_c20pll_state *pll_state); 46 44 void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder, 47 45 const struct intel_crtc_state *crtc_state); 48 46 int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
+134 -43
drivers/gpu/drm/i915/display/intel_ddi.c
··· 25 25 * 26 26 */ 27 27 28 + #include <linux/iopoll.h> 28 29 #include <linux/string_helpers.h> 29 30 30 31 #include <drm/display/drm_scdc_helper.h> ··· 2211 2210 } 2212 2211 2213 2212 static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, 2214 - const struct intel_crtc_state *crtc_state) 2213 + const struct intel_crtc_state *crtc_state, 2214 + bool enable) 2215 2215 { 2216 2216 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2217 2217 2218 2218 if (!crtc_state->fec_enable) 2219 2219 return; 2220 2220 2221 - if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0) 2222 - drm_dbg_kms(&i915->drm, 2223 - "Failed to set FEC_READY in the sink\n"); 2221 + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, 2222 + enable ? DP_FEC_READY : 0) <= 0) 2223 + drm_dbg_kms(&i915->drm, "Failed to set FEC_READY to %s in the sink\n", 2224 + enable ? "enabled" : "disabled"); 2225 + 2226 + if (enable && 2227 + drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_STATUS, 2228 + DP_FEC_DECODE_EN_DETECTED | DP_FEC_DECODE_DIS_DETECTED) <= 0) 2229 + drm_dbg_kms(&i915->drm, "Failed to clear FEC detected flags\n"); 2230 + } 2231 + 2232 + static int read_fec_detected_status(struct drm_dp_aux *aux) 2233 + { 2234 + int ret; 2235 + u8 status; 2236 + 2237 + ret = drm_dp_dpcd_readb(aux, DP_FEC_STATUS, &status); 2238 + if (ret < 0) 2239 + return ret; 2240 + 2241 + return status; 2242 + } 2243 + 2244 + static void wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled) 2245 + { 2246 + struct drm_i915_private *i915 = to_i915(aux->drm_dev); 2247 + int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED; 2248 + int status; 2249 + int err; 2250 + 2251 + err = readx_poll_timeout(read_fec_detected_status, aux, status, 2252 + status & mask || status < 0, 2253 + 10000, 200000); 2254 + 2255 + if (!err && status >= 0) 2256 + return; 2257 + 2258 + if (err == -ETIMEDOUT) 2259 + drm_dbg_kms(&i915->drm, "Timeout waiting for FEC %s to get detected\n", 2260 + str_enabled_disabled(enabled)); 2261 + else 2262 + drm_dbg_kms(&i915->drm, "FEC detected status read error: %d\n", status); 2263 + } 2264 + 2265 + void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder, 2266 + const struct intel_crtc_state *crtc_state, 2267 + bool enabled) 2268 + { 2269 + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 2270 + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2271 + int ret; 2272 + 2273 + if (!crtc_state->fec_enable) 2274 + return; 2275 + 2276 + if (enabled) 2277 + ret = intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state), 2278 + DP_TP_STATUS_FEC_ENABLE_LIVE, 1); 2279 + else 2280 + ret = intel_de_wait_for_clear(i915, dp_tp_status_reg(encoder, crtc_state), 2281 + DP_TP_STATUS_FEC_ENABLE_LIVE, 1); 2282 + 2283 + if (ret) 2284 + drm_err(&i915->drm, 2285 + "Timeout waiting for FEC live state to get %s\n", 2286 + str_enabled_disabled(enabled)); 2287 + 2288 + /* 2289 + * At least the Synoptics MST hub doesn't set the detected flag for 2290 + * FEC decoding disabling so skip waiting for that. 2291 + */ 2292 + if (enabled) 2293 + wait_for_fec_detected(&intel_dp->aux, enabled); 2224 2294 } 2225 2295 2226 2296 static void intel_ddi_enable_fec(struct intel_encoder *encoder, ··· 2306 2234 0, DP_TP_CTL_FEC_ENABLE); 2307 2235 } 2308 2236 2309 - static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, 2310 - const struct intel_crtc_state *crtc_state) 2237 + static void intel_ddi_disable_fec(struct intel_encoder *encoder, 2238 + const struct intel_crtc_state *crtc_state) 2311 2239 { 2312 2240 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2313 2241 ··· 2538 2466 intel_dp_set_power(intel_dp, DP_SET_POWER_D0); 2539 2467 2540 2468 intel_dp_configure_protocol_converter(intel_dp, crtc_state); 2541 - intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); 2469 + if (!is_mst) 2470 + intel_dp_sink_enable_decompression(state, 2471 + to_intel_connector(conn_state->connector), 2472 + crtc_state); 2473 + 2542 2474 /* 2543 2475 * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit 2544 2476 * in the FEC_CONFIGURATION register to 1 before initiating link 2545 2477 * training 2546 2478 */ 2547 - intel_dp_sink_set_fec_ready(intel_dp, crtc_state); 2479 + intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true); 2548 2480 2549 2481 intel_dp_check_frl_training(intel_dp); 2550 2482 intel_dp_pcon_dsc_configure(intel_dp, crtc_state); ··· 2581 2505 /* 6.o Configure and enable FEC if needed */ 2582 2506 intel_ddi_enable_fec(encoder, crtc_state); 2583 2507 2584 - intel_dsc_dp_pps_write(encoder, crtc_state); 2508 + if (!is_mst) 2509 + intel_dsc_dp_pps_write(encoder, crtc_state); 2585 2510 } 2586 2511 2587 2512 static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, ··· 2693 2616 intel_dp_set_power(intel_dp, DP_SET_POWER_D0); 2694 2617 2695 2618 intel_dp_configure_protocol_converter(intel_dp, crtc_state); 2696 - intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); 2619 + if (!is_mst) 2620 + intel_dp_sink_enable_decompression(state, 2621 + to_intel_connector(conn_state->connector), 2622 + crtc_state); 2697 2623 /* 2698 2624 * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit 2699 2625 * in the FEC_CONFIGURATION register to 1 before initiating link 2700 2626 * training 2701 2627 */ 2702 - intel_dp_sink_set_fec_ready(intel_dp, crtc_state); 2628 + intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true); 2703 2629 2704 2630 intel_dp_check_frl_training(intel_dp); 2705 2631 intel_dp_pcon_dsc_configure(intel_dp, crtc_state); ··· 2723 2643 /* 7.l Configure and enable FEC if needed */ 2724 2644 intel_ddi_enable_fec(encoder, crtc_state); 2725 2645 2726 - intel_dsc_dp_pps_write(encoder, crtc_state); 2646 + if (!is_mst) 2647 + intel_dsc_dp_pps_write(encoder, crtc_state); 2727 2648 } 2728 2649 2729 2650 static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, ··· 2776 2695 if (!is_mst) 2777 2696 intel_dp_set_power(intel_dp, DP_SET_POWER_D0); 2778 2697 intel_dp_configure_protocol_converter(intel_dp, crtc_state); 2779 - intel_dp_sink_set_decompression_state(intel_dp, crtc_state, 2780 - true); 2781 - intel_dp_sink_set_fec_ready(intel_dp, crtc_state); 2698 + if (!is_mst) 2699 + intel_dp_sink_enable_decompression(state, 2700 + to_intel_connector(conn_state->connector), 2701 + crtc_state); 2702 + intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true); 2782 2703 intel_dp_start_link_train(intel_dp, crtc_state); 2783 2704 if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) && 2784 2705 !is_trans_port_sync_mode(crtc_state)) ··· 2788 2705 2789 2706 intel_ddi_enable_fec(encoder, crtc_state); 2790 2707 2791 - if (!is_mst) 2708 + if (!is_mst) { 2792 2709 intel_ddi_enable_transcoder_clock(encoder, crtc_state); 2793 - 2794 - intel_dsc_dp_pps_write(encoder, crtc_state); 2710 + intel_dsc_dp_pps_write(encoder, crtc_state); 2711 + } 2795 2712 } 2796 2713 2797 2714 static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, ··· 2800 2717 const struct drm_connector_state *conn_state) 2801 2718 { 2802 2719 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2720 + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2803 2721 2804 - if (HAS_DP20(dev_priv)) 2722 + if (HAS_DP20(dev_priv)) { 2805 2723 intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder), 2806 2724 crtc_state); 2725 + if (crtc_state->has_panel_replay) 2726 + drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, 2727 + DP_PANEL_REPLAY_ENABLE); 2728 + } 2807 2729 2808 2730 if (DISPLAY_VER(dev_priv) >= 14) 2809 2731 mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); ··· 2954 2866 intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 2955 2867 DP_TP_CTL_ENABLE, 0); 2956 2868 2957 - /* Disable FEC in DP Sink */ 2958 - intel_ddi_disable_fec_state(encoder, crtc_state); 2869 + intel_ddi_disable_fec(encoder, crtc_state); 2959 2870 2960 2871 if (wait) 2961 2872 intel_wait_ddi_buf_idle(dev_priv, port); ··· 2969 2882 mtl_disable_ddi_buf(encoder, crtc_state); 2970 2883 2971 2884 /* 3.f Disable DP_TP_CTL FEC Enable if it is needed */ 2972 - intel_ddi_disable_fec_state(encoder, crtc_state); 2885 + intel_ddi_disable_fec(encoder, crtc_state); 2973 2886 } else { 2974 2887 disable_ddi_buf(encoder, crtc_state); 2975 2888 } 2889 + 2890 + intel_ddi_wait_for_fec_status(encoder, crtc_state, false); 2976 2891 } 2977 2892 2978 2893 static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, ··· 3013 2924 } 3014 2925 3015 2926 intel_disable_ddi_buf(encoder, old_crtc_state); 2927 + 2928 + intel_dp_sink_set_fec_ready(intel_dp, old_crtc_state, false); 3016 2929 3017 2930 /* 3018 2931 * From TGL spec: "If single stream or multi-stream master transcoder: ··· 3201 3110 if (!dig_port->lspcon.active || intel_dp_has_hdmi_sink(&dig_port->dp)) 3202 3111 intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); 3203 3112 3204 - intel_audio_codec_enable(encoder, crtc_state, conn_state); 3205 - 3206 3113 trans_port_sync_stop_link_train(state, encoder, crtc_state); 3114 + } 3115 + 3116 + /* FIXME bad home for this function */ 3117 + i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915, 3118 + enum transcoder cpu_transcoder) 3119 + { 3120 + return DISPLAY_VER(i915) >= 14 ? 3121 + MTL_CHICKEN_TRANS(cpu_transcoder) : 3122 + CHICKEN_TRANS(cpu_transcoder); 3207 3123 } 3208 3124 3209 3125 static i915_reg_t ··· 3331 3233 intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl); 3332 3234 3333 3235 intel_wait_ddi_buf_active(dev_priv, port); 3334 - 3335 - intel_audio_codec_enable(encoder, crtc_state, conn_state); 3336 3236 } 3337 3237 3338 3238 static void intel_enable_ddi(struct intel_atomic_state *state, ··· 3348 3252 3349 3253 intel_enable_transcoder(crtc_state); 3350 3254 3255 + intel_ddi_wait_for_fec_status(encoder, crtc_state, true); 3256 + 3351 3257 intel_crtc_vblank_on(crtc_state); 3352 3258 3353 3259 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) ··· 3357 3259 else 3358 3260 intel_enable_ddi_dp(state, encoder, crtc_state, conn_state); 3359 3261 3360 - /* Enable hdcp if it's desired */ 3361 - if (conn_state->content_protection == 3362 - DRM_MODE_CONTENT_PROTECTION_DESIRED) 3363 - intel_hdcp_enable(state, encoder, crtc_state, conn_state); 3262 + intel_hdcp_enable(state, encoder, crtc_state, conn_state); 3263 + 3364 3264 } 3365 3265 3366 3266 static void intel_disable_ddi_dp(struct intel_atomic_state *state, ··· 3367 3271 const struct drm_connector_state *old_conn_state) 3368 3272 { 3369 3273 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3274 + struct intel_connector *connector = 3275 + to_intel_connector(old_conn_state->connector); 3370 3276 3371 3277 intel_dp->link_trained = false; 3372 - 3373 - intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); 3374 3278 3375 3279 intel_psr_disable(intel_dp, old_crtc_state); 3376 3280 intel_edp_backlight_off(old_conn_state); 3377 3281 /* Disable the decompression in DP Sink */ 3378 - intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state, 3379 - false); 3282 + intel_dp_sink_disable_decompression(state, 3283 + connector, old_crtc_state); 3380 3284 /* Disable Ignore_MSA bit in DP Sink */ 3381 3285 intel_dp_sink_set_msa_timing_par_ignore_state(intel_dp, old_crtc_state, 3382 3286 false); ··· 3389 3293 { 3390 3294 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3391 3295 struct drm_connector *connector = old_conn_state->connector; 3392 - 3393 - intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); 3394 3296 3395 3297 if (!intel_hdmi_handle_sink_scrambling(encoder, connector, 3396 3298 false, false)) ··· 3948 3854 static void mtl_ddi_get_config(struct intel_encoder *encoder, 3949 3855 struct intel_crtc_state *crtc_state) 3950 3856 { 3951 - struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3952 - enum phy phy = intel_port_to_phy(i915, encoder->port); 3953 3857 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 3954 3858 3955 3859 if (intel_tc_port_in_tbt_alt_mode(dig_port)) { 3956 3860 crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder); 3957 - } else if (intel_is_c10phy(i915, phy)) { 3958 - intel_c10pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c10); 3959 - crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10); 3960 3861 } else { 3961 - intel_c20pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c20); 3962 - crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20); 3862 + intel_cx0pll_readout_hw_state(encoder, &crtc_state->cx0pll_state); 3863 + crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state); 3963 3864 } 3964 3865 3965 3866 intel_ddi_get_config(encoder, crtc_state); ··· 4933 4844 encoder->post_pll_disable = intel_ddi_post_pll_disable; 4934 4845 encoder->post_disable = intel_ddi_post_disable; 4935 4846 encoder->update_pipe = intel_ddi_update_pipe; 4847 + encoder->audio_enable = intel_audio_codec_enable; 4848 + encoder->audio_disable = intel_audio_codec_disable; 4936 4849 encoder->get_hw_state = intel_ddi_get_hw_state; 4937 4850 encoder->sync_state = intel_ddi_sync_state; 4938 4851 encoder->initial_fastset_check = intel_ddi_initial_fastset_check;
+5
drivers/gpu/drm/i915/display/intel_ddi.h
··· 27 27 const struct intel_crtc_state *crtc_state); 28 28 i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder, 29 29 const struct intel_crtc_state *crtc_state); 30 + i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915, 31 + enum transcoder cpu_transcoder); 30 32 void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, 31 33 struct intel_encoder *intel_encoder, 32 34 const struct intel_crtc_state *old_crtc_state, ··· 62 60 void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, 63 61 const struct intel_crtc_state *crtc_state); 64 62 void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state); 63 + void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder, 64 + const struct intel_crtc_state *crtc_state, 65 + bool enabled); 65 66 void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, 66 67 const struct drm_connector_state *conn_state); 67 68 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+219 -341
drivers/gpu/drm/i915/display/intel_display.c
··· 48 48 #include "g4x_dp.h" 49 49 #include "g4x_hdmi.h" 50 50 #include "hsw_ips.h" 51 + #include "i915_config.h" 51 52 #include "i915_drv.h" 52 53 #include "i915_reg.h" 53 54 #include "i915_utils.h" ··· 73 72 #include "intel_dp.h" 74 73 #include "intel_dp_link_training.h" 75 74 #include "intel_dp_mst.h" 76 - #include "intel_dpio_phy.h" 77 75 #include "intel_dpll.h" 78 76 #include "intel_dpll_mgr.h" 79 77 #include "intel_dpt.h" 78 + #include "intel_dpt_common.h" 80 79 #include "intel_drrs.h" 81 80 #include "intel_dsb.h" 82 81 #include "intel_dsi.h" ··· 194 193 static void 195 194 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) 196 195 { 197 - if (enable) 198 - intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 199 - 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS); 200 - else 201 - intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 202 - DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0); 196 + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 197 + DUPS1_GATING_DIS | DUPS2_GATING_DIS, 198 + enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0); 203 199 } 204 200 205 201 /* Wa_2006604312:icl,ehl */ ··· 204 206 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 205 207 bool enable) 206 208 { 207 - if (enable) 208 - intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS); 209 - else 210 - intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0); 209 + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 210 + DPFR_GATING_DIS, 211 + enable ? DPFR_GATING_DIS : 0); 211 212 } 212 213 213 214 /* Wa_1604331009:icl,jsl,ehl */ ··· 214 217 icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, 215 218 bool enable) 216 219 { 217 - intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS, 220 + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 221 + CURSOR_GATING_DIS, 218 222 enable ? CURSOR_GATING_DIS : 0); 219 223 } 220 224 ··· 395 397 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 396 398 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; 397 399 enum pipe pipe = crtc->pipe; 398 - i915_reg_t reg; 399 400 u32 val; 400 401 401 402 drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); ··· 427 430 intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe), 428 431 0, PIPE_ARB_USE_PROG_SLOTS); 429 432 430 - reg = TRANSCONF(cpu_transcoder); 431 - val = intel_de_read(dev_priv, reg); 433 + val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); 432 434 if (val & TRANSCONF_ENABLE) { 433 435 /* we keep both pipes enabled on 830 */ 434 436 drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); 435 437 return; 436 438 } 437 439 438 - intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE); 439 - intel_de_posting_read(dev_priv, reg); 440 + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), 441 + val | TRANSCONF_ENABLE); 442 + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 440 443 441 444 /* 442 445 * Until the pipe starts PIPEDSL reads will return a stale value, ··· 455 458 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 456 459 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; 457 460 enum pipe pipe = crtc->pipe; 458 - i915_reg_t reg; 459 461 u32 val; 460 462 461 463 drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); ··· 465 469 */ 466 470 assert_planes_disabled(crtc); 467 471 468 - reg = TRANSCONF(cpu_transcoder); 469 - val = intel_de_read(dev_priv, reg); 472 + val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); 470 473 if ((val & TRANSCONF_ENABLE) == 0) 471 474 return; 472 475 ··· 480 485 if (!IS_I830(dev_priv)) 481 486 val &= ~TRANSCONF_ENABLE; 482 487 483 - if (DISPLAY_VER(dev_priv) >= 14) 484 - intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 485 - FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 486 - else if (DISPLAY_VER(dev_priv) >= 12) 487 - intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 488 + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); 489 + 490 + if (DISPLAY_VER(dev_priv) >= 12) 491 + intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder), 488 492 FECSTALL_DIS_DPTSTREAM_DPTTG, 0); 489 493 490 - intel_de_write(dev_priv, reg, val); 491 494 if ((val & TRANSCONF_ENABLE) == 0) 492 495 intel_wait_for_pipe_off(old_crtc_state); 493 496 } ··· 889 896 (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); 890 897 } 891 898 899 + static void intel_encoders_audio_enable(struct intel_atomic_state *state, 900 + struct intel_crtc *crtc) 901 + { 902 + const struct intel_crtc_state *crtc_state = 903 + intel_atomic_get_new_crtc_state(state, crtc); 904 + const struct drm_connector_state *conn_state; 905 + struct drm_connector *conn; 906 + int i; 907 + 908 + for_each_new_connector_in_state(&state->base, conn, conn_state, i) { 909 + struct intel_encoder *encoder = 910 + to_intel_encoder(conn_state->best_encoder); 911 + 912 + if (conn_state->crtc != &crtc->base) 913 + continue; 914 + 915 + if (encoder->audio_enable) 916 + encoder->audio_enable(encoder, crtc_state, conn_state); 917 + } 918 + } 919 + 920 + static void intel_encoders_audio_disable(struct intel_atomic_state *state, 921 + struct intel_crtc *crtc) 922 + { 923 + const struct intel_crtc_state *old_crtc_state = 924 + intel_atomic_get_old_crtc_state(state, crtc); 925 + const struct drm_connector_state *old_conn_state; 926 + struct drm_connector *conn; 927 + int i; 928 + 929 + for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { 930 + struct intel_encoder *encoder = 931 + to_intel_encoder(old_conn_state->best_encoder); 932 + 933 + if (old_conn_state->crtc != &crtc->base) 934 + continue; 935 + 936 + if (encoder->audio_disable) 937 + encoder->audio_disable(encoder, old_crtc_state, old_conn_state); 938 + } 939 + } 940 + 892 941 #define is_enabling(feature, old_crtc_state, new_crtc_state) \ 893 942 ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ 894 943 (new_crtc_state)->feature) ··· 941 906 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, 942 907 const struct intel_crtc_state *new_crtc_state) 943 908 { 909 + if (!new_crtc_state->hw.active) 910 + return false; 911 + 944 912 return is_enabling(active_planes, old_crtc_state, new_crtc_state); 945 913 } 946 914 947 915 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, 948 916 const struct intel_crtc_state *new_crtc_state) 949 917 { 918 + if (!old_crtc_state->hw.active) 919 + return false; 920 + 950 921 return is_disabling(active_planes, old_crtc_state, new_crtc_state); 951 922 } 952 923 ··· 969 928 static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state, 970 929 const struct intel_crtc_state *new_crtc_state) 971 930 { 931 + if (!new_crtc_state->hw.active) 932 + return false; 933 + 972 934 return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) || 973 935 (new_crtc_state->vrr.enable && 974 936 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || ··· 981 937 static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state, 982 938 const struct intel_crtc_state *new_crtc_state) 983 939 { 940 + if (!old_crtc_state->hw.active) 941 + return false; 942 + 984 943 return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) || 985 944 (old_crtc_state->vrr.enable && 986 945 (new_crtc_state->update_m_n || new_crtc_state->update_lrr || 987 946 vrr_params_changed(old_crtc_state, new_crtc_state))); 947 + } 948 + 949 + static bool audio_enabling(const struct intel_crtc_state *old_crtc_state, 950 + const struct intel_crtc_state *new_crtc_state) 951 + { 952 + if (!new_crtc_state->hw.active) 953 + return false; 954 + 955 + return is_enabling(has_audio, old_crtc_state, new_crtc_state) || 956 + (new_crtc_state->has_audio && 957 + memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 958 + } 959 + 960 + static bool audio_disabling(const struct intel_crtc_state *old_crtc_state, 961 + const struct intel_crtc_state *new_crtc_state) 962 + { 963 + if (!old_crtc_state->hw.active) 964 + return false; 965 + 966 + return is_disabling(has_audio, old_crtc_state, new_crtc_state) || 967 + (old_crtc_state->has_audio && 968 + memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); 988 969 } 989 970 990 971 #undef is_disabling ··· 1052 983 1053 984 if (intel_crtc_needs_color_update(new_crtc_state)) 1054 985 intel_color_post_update(new_crtc_state); 986 + 987 + if (audio_enabling(old_crtc_state, new_crtc_state)) 988 + intel_encoders_audio_enable(state, crtc); 1055 989 } 1056 990 1057 991 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, ··· 1137 1065 intel_vrr_disable(old_crtc_state); 1138 1066 intel_crtc_update_active_timings(old_crtc_state, false); 1139 1067 } 1068 + 1069 + if (audio_disabling(old_crtc_state, new_crtc_state)) 1070 + intel_encoders_audio_disable(state, crtc); 1140 1071 1141 1072 intel_drrs_deactivate(old_crtc_state); 1142 1073 ··· 1576 1501 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) 1577 1502 { 1578 1503 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1579 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1580 - enum transcoder transcoder = crtc_state->cpu_transcoder; 1581 - i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) : 1582 - CHICKEN_TRANS(transcoder); 1504 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 1583 1505 1584 - intel_de_rmw(dev_priv, reg, 1506 + intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder), 1585 1507 HSW_FRAME_START_DELAY_MASK, 1586 1508 HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); 1587 1509 } ··· 1856 1784 1857 1785 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) 1858 1786 { 1787 + /* 1788 + * DG2's "TC1", although TC-capable output, doesn't share the same flow 1789 + * as other platforms on the display engine side and rather rely on the 1790 + * SNPS PHY, that is programmed separately 1791 + */ 1859 1792 if (IS_DG2(dev_priv)) 1860 - /* DG2's "TC1" output uses a SNPS PHY */ 1861 1793 return false; 1862 - else if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER_FULL(dev_priv) == IP_VER(14, 0)) 1794 + 1795 + if (DISPLAY_VER(dev_priv) >= 13) 1863 1796 return phy >= PHY_F && phy <= PHY_I; 1864 1797 else if (IS_TIGERLAKE(dev_priv)) 1865 1798 return phy >= PHY_D && phy <= PHY_I; 1866 1799 else if (IS_ICELAKE(dev_priv)) 1867 1800 return phy >= PHY_C && phy <= PHY_F; 1868 - else 1869 - return false; 1801 + 1802 + return false; 1870 1803 } 1871 1804 1872 1805 bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) 1873 1806 { 1874 - if (phy == PHY_NONE) 1875 - return false; 1876 - else if (IS_DG2(dev_priv)) 1877 - /* 1878 - * All four "combo" ports and the TC1 port (PHY E) use 1879 - * Synopsis PHYs. 1880 - */ 1881 - return phy <= PHY_E; 1882 - 1883 - return false; 1807 + /* 1808 + * For DG2, and for DG2 only, all four "combo" ports and the TC1 port 1809 + * (PHY E) use Synopsis PHYs. See intel_phy_is_tc(). 1810 + */ 1811 + return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E; 1884 1812 } 1885 1813 1886 1814 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) ··· 2469 2397 } 2470 2398 2471 2399 void 2472 - intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 2400 + intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes, 2473 2401 int pixel_clock, int link_clock, 2474 - struct intel_link_m_n *m_n, 2475 - bool fec_enable) 2402 + int bw_overhead, 2403 + struct intel_link_m_n *m_n) 2476 2404 { 2477 - u32 data_clock = bits_per_pixel * pixel_clock; 2478 - 2479 - if (fec_enable) 2480 - data_clock = intel_dp_mode_to_fec_clock(data_clock); 2405 + u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock); 2406 + u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16, 2407 + bw_overhead); 2408 + u32 data_n = intel_dp_max_data_rate(link_clock, nlanes); 2481 2409 2482 2410 /* 2483 2411 * Windows/BIOS uses fixed M/N values always. Follow suit. ··· 2488 2416 */ 2489 2417 m_n->tu = 64; 2490 2418 compute_m_n(&m_n->data_m, &m_n->data_n, 2491 - data_clock, link_clock * nlanes * 8, 2419 + data_m, data_n, 2492 2420 0x8000000); 2493 2421 2494 2422 compute_m_n(&m_n->link_m, &m_n->link_n, 2495 - pixel_clock, link_clock, 2423 + pixel_clock, link_symbol_clock, 2496 2424 0x80000); 2497 2425 } 2498 2426 ··· 2908 2836 crtc_state->gmch_pfit.control = tmp; 2909 2837 crtc_state->gmch_pfit.pgm_ratios = 2910 2838 intel_de_read(dev_priv, PFIT_PGM_RATIOS); 2911 - } 2912 - 2913 - static void vlv_crtc_clock_get(struct intel_crtc *crtc, 2914 - struct intel_crtc_state *pipe_config) 2915 - { 2916 - struct drm_device *dev = crtc->base.dev; 2917 - struct drm_i915_private *dev_priv = to_i915(dev); 2918 - enum pipe pipe = crtc->pipe; 2919 - struct dpll clock; 2920 - u32 mdiv; 2921 - int refclk = 100000; 2922 - 2923 - /* In case of DSI, DPLL will not be used */ 2924 - if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 2925 - return; 2926 - 2927 - vlv_dpio_get(dev_priv); 2928 - mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 2929 - vlv_dpio_put(dev_priv); 2930 - 2931 - clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 2932 - clock.m2 = mdiv & DPIO_M2DIV_MASK; 2933 - clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 2934 - clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 2935 - clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 2936 - 2937 - pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 2938 - } 2939 - 2940 - static void chv_crtc_clock_get(struct intel_crtc *crtc, 2941 - struct intel_crtc_state *pipe_config) 2942 - { 2943 - struct drm_device *dev = crtc->base.dev; 2944 - struct drm_i915_private *dev_priv = to_i915(dev); 2945 - enum pipe pipe = crtc->pipe; 2946 - enum dpio_channel port = vlv_pipe_to_channel(pipe); 2947 - struct dpll clock; 2948 - u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 2949 - int refclk = 100000; 2950 - 2951 - /* In case of DSI, DPLL will not be used */ 2952 - if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 2953 - return; 2954 - 2955 - vlv_dpio_get(dev_priv); 2956 - cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 2957 - pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 2958 - pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 2959 - pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 2960 - pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 2961 - vlv_dpio_put(dev_priv); 2962 - 2963 - clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 2964 - clock.m2 = (pll_dw0 & 0xff) << 22; 2965 - if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 2966 - clock.m2 |= pll_dw2 & 0x3fffff; 2967 - clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 2968 - clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 2969 - clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 2970 - 2971 - pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 2972 2839 } 2973 2840 2974 2841 static enum intel_output_format ··· 3801 3790 } 3802 3791 3803 3792 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 3804 - tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ? 3805 - MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) : 3806 - CHICKEN_TRANS(pipe_config->cpu_transcoder)); 3793 + tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder)); 3807 3794 3808 3795 pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1; 3809 3796 } else { ··· 3830 3821 return true; 3831 3822 } 3832 3823 3833 - static int i9xx_pll_refclk(struct drm_device *dev, 3834 - const struct intel_crtc_state *pipe_config) 3835 - { 3836 - struct drm_i915_private *dev_priv = to_i915(dev); 3837 - u32 dpll = pipe_config->dpll_hw_state.dpll; 3838 - 3839 - if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 3840 - return dev_priv->display.vbt.lvds_ssc_freq; 3841 - else if (HAS_PCH_SPLIT(dev_priv)) 3842 - return 120000; 3843 - else if (DISPLAY_VER(dev_priv) != 2) 3844 - return 96000; 3845 - else 3846 - return 48000; 3847 - } 3848 - 3849 - /* Returns the clock of the currently programmed mode of the given pipe. */ 3850 - void i9xx_crtc_clock_get(struct intel_crtc *crtc, 3851 - struct intel_crtc_state *pipe_config) 3852 - { 3853 - struct drm_device *dev = crtc->base.dev; 3854 - struct drm_i915_private *dev_priv = to_i915(dev); 3855 - u32 dpll = pipe_config->dpll_hw_state.dpll; 3856 - u32 fp; 3857 - struct dpll clock; 3858 - int port_clock; 3859 - int refclk = i9xx_pll_refclk(dev, pipe_config); 3860 - 3861 - if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 3862 - fp = pipe_config->dpll_hw_state.fp0; 3863 - else 3864 - fp = pipe_config->dpll_hw_state.fp1; 3865 - 3866 - clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 3867 - if (IS_PINEVIEW(dev_priv)) { 3868 - clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 3869 - clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 3870 - } else { 3871 - clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 3872 - clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 3873 - } 3874 - 3875 - if (DISPLAY_VER(dev_priv) != 2) { 3876 - if (IS_PINEVIEW(dev_priv)) 3877 - clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 3878 - DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 3879 - else 3880 - clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 3881 - DPLL_FPA01_P1_POST_DIV_SHIFT); 3882 - 3883 - switch (dpll & DPLL_MODE_MASK) { 3884 - case DPLLB_MODE_DAC_SERIAL: 3885 - clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 3886 - 5 : 10; 3887 - break; 3888 - case DPLLB_MODE_LVDS: 3889 - clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 3890 - 7 : 14; 3891 - break; 3892 - default: 3893 - drm_dbg_kms(&dev_priv->drm, 3894 - "Unknown DPLL mode %08x in programmed " 3895 - "mode\n", (int)(dpll & DPLL_MODE_MASK)); 3896 - return; 3897 - } 3898 - 3899 - if (IS_PINEVIEW(dev_priv)) 3900 - port_clock = pnv_calc_dpll_params(refclk, &clock); 3901 - else 3902 - port_clock = i9xx_calc_dpll_params(refclk, &clock); 3903 - } else { 3904 - enum pipe lvds_pipe; 3905 - 3906 - if (IS_I85X(dev_priv) && 3907 - intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) && 3908 - lvds_pipe == crtc->pipe) { 3909 - u32 lvds = intel_de_read(dev_priv, LVDS); 3910 - 3911 - clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 3912 - DPLL_FPA01_P1_POST_DIV_SHIFT); 3913 - 3914 - if (lvds & LVDS_CLKB_POWER_UP) 3915 - clock.p2 = 7; 3916 - else 3917 - clock.p2 = 14; 3918 - } else { 3919 - if (dpll & PLL_P1_DIVIDE_BY_TWO) 3920 - clock.p1 = 2; 3921 - else { 3922 - clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 3923 - DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 3924 - } 3925 - if (dpll & PLL_P2_DIVIDE_BY_4) 3926 - clock.p2 = 4; 3927 - else 3928 - clock.p2 = 2; 3929 - } 3930 - 3931 - port_clock = i9xx_calc_dpll_params(refclk, &clock); 3932 - } 3933 - 3934 - /* 3935 - * This value includes pixel_multiplier. We will use 3936 - * port_clock to compute adjusted_mode.crtc_clock in the 3937 - * encoder's get_config() function. 3938 - */ 3939 - pipe_config->port_clock = port_clock; 3940 - } 3941 - 3942 3824 int intel_dotclock_calculate(int link_freq, 3943 3825 const struct intel_link_m_n *m_n) 3944 3826 { 3945 3827 /* 3946 - * The calculation for the data clock is: 3828 + * The calculation for the data clock -> pixel clock is: 3947 3829 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 3948 3830 * But we want to avoid losing precison if possible, so: 3949 3831 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 3950 3832 * 3951 - * and the link clock is simpler: 3952 - * link_clock = (m * link_clock) / n 3833 + * and for link freq (10kbs units) -> pixel clock it is: 3834 + * link_symbol_clock = link_freq * 10 / link_symbol_size 3835 + * pixel_clock = (m * link_symbol_clock) / n 3836 + * or for more precision: 3837 + * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size) 3953 3838 */ 3954 3839 3955 3840 if (!m_n->link_n) 3956 3841 return 0; 3957 3842 3958 - return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq), 3959 - m_n->link_n); 3843 + return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10), 3844 + m_n->link_n * intel_dp_link_symbol_size(link_freq)); 3960 3845 } 3961 3846 3962 3847 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config) ··· 4582 4679 if (ret) 4583 4680 return ret; 4584 4681 4682 + crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe); 4585 4683 crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe]; 4586 4684 4587 4685 if (crtc_state->pipe_bpp > to_bpp_int(crtc_state->max_link_bpp_x16)) { ··· 4961 5057 } \ 4962 5058 } while (0) 4963 5059 4964 - /* 4965 - * Checks state where we only read out the enabling, but not the entire 4966 - * state itself (like full infoframes or ELD for audio). These states 4967 - * require a full modeset on bootup to fix up. 4968 - */ 4969 - #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \ 4970 - if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ 4971 - PIPE_CONF_CHECK_BOOL(name); \ 4972 - } else { \ 4973 - pipe_config_mismatch(fastset, crtc, __stringify(name), \ 4974 - "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ 4975 - str_yes_no(current_config->name), \ 4976 - str_yes_no(pipe_config->name)); \ 4977 - ret = false; \ 4978 - } \ 4979 - } while (0) 4980 - 4981 5060 #define PIPE_CONF_CHECK_P(name) do { \ 4982 5061 if (current_config->name != pipe_config->name) { \ 4983 5062 pipe_config_mismatch(fastset, crtc, __stringify(name), \ ··· 5148 5261 PIPE_CONF_CHECK_BOOL(enhanced_framing); 5149 5262 PIPE_CONF_CHECK_BOOL(fec_enable); 5150 5263 5151 - PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); 5152 - PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); 5264 + if (!fastset) { 5265 + PIPE_CONF_CHECK_BOOL(has_audio); 5266 + PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES); 5267 + } 5153 5268 5154 5269 PIPE_CONF_CHECK_X(gmch_pfit.control); 5155 5270 /* pfit ratios are autocomputed by the hw on gen4+ */ ··· 5303 5414 5304 5415 PIPE_CONF_CHECK_I(dsc.compression_enable); 5305 5416 PIPE_CONF_CHECK_I(dsc.dsc_split); 5306 - PIPE_CONF_CHECK_I(dsc.compressed_bpp); 5417 + PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16); 5307 5418 5308 5419 PIPE_CONF_CHECK_BOOL(splitter.enable); 5309 5420 PIPE_CONF_CHECK_I(splitter.link_count); ··· 5321 5432 #undef PIPE_CONF_CHECK_X 5322 5433 #undef PIPE_CONF_CHECK_I 5323 5434 #undef PIPE_CONF_CHECK_BOOL 5324 - #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE 5325 5435 #undef PIPE_CONF_CHECK_P 5326 5436 #undef PIPE_CONF_CHECK_FLAGS 5327 5437 #undef PIPE_CONF_CHECK_COLOR_LUT ··· 5411 5523 return 0; 5412 5524 } 5413 5525 5526 + static void 5527 + intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state) 5528 + { 5529 + crtc_state->uapi.mode_changed = true; 5530 + 5531 + crtc_state->update_pipe = false; 5532 + crtc_state->update_m_n = false; 5533 + crtc_state->update_lrr = false; 5534 + } 5535 + 5414 5536 /** 5415 5537 * intel_modeset_all_pipes_late - force a full modeset on all pipes 5416 5538 * @state: intel atomic state ··· 5454 5556 if (ret) 5455 5557 return ret; 5456 5558 5457 - crtc_state->update_pipe = false; 5458 - crtc_state->update_m_n = false; 5459 - crtc_state->update_lrr = false; 5559 + intel_crtc_flag_modeset(crtc_state); 5560 + 5460 5561 crtc_state->update_planes |= crtc_state->active_planes; 5461 5562 crtc_state->async_flip_planes = 0; 5462 5563 crtc_state->do_async_flip = false; ··· 5568 5671 else 5569 5672 new_crtc_state->uapi.mode_changed = false; 5570 5673 5571 - if (intel_crtc_needs_modeset(new_crtc_state) || 5572 - intel_compare_link_m_n(&old_crtc_state->dp_m_n, 5674 + if (intel_compare_link_m_n(&old_crtc_state->dp_m_n, 5573 5675 &new_crtc_state->dp_m_n)) 5574 5676 new_crtc_state->update_m_n = false; 5575 5677 5576 - if (intel_crtc_needs_modeset(new_crtc_state) || 5577 - (old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal && 5678 + if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal && 5578 5679 old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end)) 5579 5680 new_crtc_state->update_lrr = false; 5580 5681 5581 - if (!intel_crtc_needs_modeset(new_crtc_state)) 5682 + if (intel_crtc_needs_modeset(new_crtc_state)) 5683 + intel_crtc_flag_modeset(new_crtc_state); 5684 + else 5582 5685 new_crtc_state->update_pipe = true; 5583 5686 } 5584 5687 ··· 6350 6453 if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) 6351 6454 continue; 6352 6455 6456 + if (intel_dp_mst_crtc_needs_modeset(state, crtc)) 6457 + intel_crtc_flag_modeset(new_crtc_state); 6458 + 6353 6459 if (intel_dp_mst_is_slave_trans(new_crtc_state)) { 6354 6460 enum transcoder master = new_crtc_state->mst_master_transcoder; 6355 6461 6356 - if (intel_cpu_transcoders_need_modeset(state, BIT(master))) { 6357 - new_crtc_state->uapi.mode_changed = true; 6358 - new_crtc_state->update_pipe = false; 6359 - new_crtc_state->update_m_n = false; 6360 - new_crtc_state->update_lrr = false; 6361 - } 6462 + if (intel_cpu_transcoders_need_modeset(state, BIT(master))) 6463 + intel_crtc_flag_modeset(new_crtc_state); 6362 6464 } 6363 6465 6364 6466 if (is_trans_port_sync_mode(new_crtc_state)) { ··· 6366 6470 if (new_crtc_state->master_transcoder != INVALID_TRANSCODER) 6367 6471 trans |= BIT(new_crtc_state->master_transcoder); 6368 6472 6369 - if (intel_cpu_transcoders_need_modeset(state, trans)) { 6370 - new_crtc_state->uapi.mode_changed = true; 6371 - new_crtc_state->update_pipe = false; 6372 - new_crtc_state->update_m_n = false; 6373 - new_crtc_state->update_lrr = false; 6374 - } 6473 + if (intel_cpu_transcoders_need_modeset(state, trans)) 6474 + intel_crtc_flag_modeset(new_crtc_state); 6375 6475 } 6376 6476 6377 6477 if (new_crtc_state->bigjoiner_pipes) { 6378 - if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) { 6379 - new_crtc_state->uapi.mode_changed = true; 6380 - new_crtc_state->update_pipe = false; 6381 - new_crtc_state->update_m_n = false; 6382 - new_crtc_state->update_lrr = false; 6383 - } 6478 + if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) 6479 + intel_crtc_flag_modeset(new_crtc_state); 6384 6480 } 6385 6481 } 6386 6482 ··· 6392 6504 ret = -EINVAL; 6393 6505 goto fail; 6394 6506 } 6395 - 6396 - ret = drm_dp_mst_atomic_check(&state->base); 6397 - if (ret) 6398 - goto fail; 6399 6507 6400 6508 ret = intel_atomic_check_planes(state); 6401 6509 if (ret) ··· 6628 6744 intel_crtc_enable_pipe_crc(crtc); 6629 6745 } 6630 6746 6631 - static void intel_update_crtc(struct intel_atomic_state *state, 6632 - struct intel_crtc *crtc) 6747 + static void intel_pre_update_crtc(struct intel_atomic_state *state, 6748 + struct intel_crtc *crtc) 6633 6749 { 6634 6750 struct drm_i915_private *i915 = to_i915(state->base.dev); 6635 6751 const struct intel_crtc_state *old_crtc_state = ··· 6671 6787 intel_color_commit_noarm(new_crtc_state); 6672 6788 6673 6789 intel_crtc_planes_update_noarm(state, crtc); 6790 + } 6791 + 6792 + static void intel_update_crtc(struct intel_atomic_state *state, 6793 + struct intel_crtc *crtc) 6794 + { 6795 + const struct intel_crtc_state *old_crtc_state = 6796 + intel_atomic_get_old_crtc_state(state, crtc); 6797 + struct intel_crtc_state *new_crtc_state = 6798 + intel_atomic_get_new_crtc_state(state, crtc); 6674 6799 6675 6800 /* Perform vblank evasion around commit operation */ 6676 6801 intel_pipe_update_start(state, crtc); ··· 6708 6815 * valid pipe configuration from the BIOS we need to take care 6709 6816 * of enabling them on the CRTC's first fastset. 6710 6817 */ 6711 - if (intel_crtc_needs_fastset(new_crtc_state) && !modeset && 6818 + if (intel_crtc_needs_fastset(new_crtc_state) && 6712 6819 old_crtc_state->inherited) 6713 6820 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); 6714 6821 } ··· 6746 6853 if (!intel_crtc_needs_modeset(new_crtc_state)) 6747 6854 continue; 6748 6855 6856 + intel_pre_plane_update(state, crtc); 6857 + 6749 6858 if (!old_crtc_state->hw.active) 6750 6859 continue; 6751 6860 6752 - intel_pre_plane_update(state, crtc); 6753 6861 intel_crtc_disable_planes(state, crtc); 6754 6862 } 6755 6863 ··· 6804 6910 continue; 6805 6911 6806 6912 intel_enable_crtc(state, crtc); 6913 + intel_pre_update_crtc(state, crtc); 6914 + } 6915 + 6916 + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6917 + if (!new_crtc_state->hw.active) 6918 + continue; 6919 + 6807 6920 intel_update_crtc(state, crtc); 6808 6921 } 6809 6922 } ··· 6848 6947 * So first lets enable all pipes that do not need a fullmodeset as 6849 6948 * those don't have any external dependency. 6850 6949 */ 6950 + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 6951 + enum pipe pipe = crtc->pipe; 6952 + 6953 + if ((update_pipes & BIT(pipe)) == 0) 6954 + continue; 6955 + 6956 + intel_pre_update_crtc(state, crtc); 6957 + } 6958 + 6851 6959 while (update_pipes) { 6852 6960 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, 6853 6961 new_crtc_state, i) { ··· 6933 7023 if ((update_pipes & BIT(pipe)) == 0) 6934 7024 continue; 6935 7025 7026 + intel_pre_update_crtc(state, crtc); 7027 + } 7028 + 7029 + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { 7030 + enum pipe pipe = crtc->pipe; 7031 + 7032 + if ((update_pipes & BIT(pipe)) == 0) 7033 + continue; 7034 + 6936 7035 drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, 6937 7036 entries, I915_MAX_PIPES, pipe)); 6938 7037 ··· 6975 7056 6976 7057 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 6977 7058 { 6978 - struct wait_queue_entry wait_fence, wait_reset; 6979 - struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 7059 + struct drm_i915_private *i915 = to_i915(intel_state->base.dev); 7060 + struct drm_plane *plane; 7061 + struct drm_plane_state *new_plane_state; 7062 + int ret, i; 6980 7063 6981 - init_wait_entry(&wait_fence, 0); 6982 - init_wait_entry(&wait_reset, 0); 6983 - for (;;) { 6984 - prepare_to_wait(&intel_state->commit_ready.wait, 6985 - &wait_fence, TASK_UNINTERRUPTIBLE); 6986 - prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, 6987 - I915_RESET_MODESET), 6988 - &wait_reset, TASK_UNINTERRUPTIBLE); 7064 + for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) { 7065 + if (new_plane_state->fence) { 7066 + ret = dma_fence_wait_timeout(new_plane_state->fence, false, 7067 + i915_fence_timeout(i915)); 7068 + if (ret <= 0) 7069 + break; 6989 7070 6990 - 6991 - if (i915_sw_fence_done(&intel_state->commit_ready) || 6992 - test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags)) 6993 - break; 6994 - 6995 - schedule(); 7071 + dma_fence_put(new_plane_state->fence); 7072 + new_plane_state->fence = NULL; 7073 + } 6996 7074 } 6997 - finish_wait(&intel_state->commit_ready.wait, &wait_fence); 6998 - finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags, 6999 - I915_RESET_MODESET), 7000 - &wait_reset); 7001 7075 } 7002 7076 7003 7077 static void intel_atomic_cleanup_work(struct work_struct *work) ··· 7282 7370 intel_atomic_commit_tail(state); 7283 7371 } 7284 7372 7285 - static int 7286 - intel_atomic_commit_ready(struct i915_sw_fence *fence, 7287 - enum i915_sw_fence_notify notify) 7288 - { 7289 - struct intel_atomic_state *state = 7290 - container_of(fence, struct intel_atomic_state, commit_ready); 7291 - 7292 - switch (notify) { 7293 - case FENCE_COMPLETE: 7294 - /* we do blocking waits in the worker, nothing to do here */ 7295 - break; 7296 - case FENCE_FREE: 7297 - { 7298 - struct drm_i915_private *i915 = to_i915(state->base.dev); 7299 - struct intel_atomic_helper *helper = 7300 - &i915->display.atomic_helper; 7301 - 7302 - if (llist_add(&state->freed, &helper->free_list)) 7303 - queue_work(i915->unordered_wq, &helper->free_work); 7304 - break; 7305 - } 7306 - } 7307 - 7308 - return NOTIFY_DONE; 7309 - } 7310 - 7311 7373 static void intel_atomic_track_fbs(struct intel_atomic_state *state) 7312 7374 { 7313 7375 struct intel_plane_state *old_plane_state, *new_plane_state; ··· 7303 7417 int ret = 0; 7304 7418 7305 7419 state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 7306 - 7307 - drm_atomic_state_get(&state->base); 7308 - i915_sw_fence_init(&state->commit_ready, 7309 - intel_atomic_commit_ready); 7310 7420 7311 7421 /* 7312 7422 * The intel_legacy_cursor_update() fast path takes care ··· 7336 7454 if (ret) { 7337 7455 drm_dbg_atomic(&dev_priv->drm, 7338 7456 "Preparing state failed with %i\n", ret); 7339 - i915_sw_fence_commit(&state->commit_ready); 7340 7457 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7341 7458 return ret; 7342 7459 } ··· 7351 7470 struct intel_crtc *crtc; 7352 7471 int i; 7353 7472 7354 - i915_sw_fence_commit(&state->commit_ready); 7355 - 7356 7473 for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) 7357 7474 intel_color_cleanup_commit(new_crtc_state); 7358 7475 ··· 7364 7485 drm_atomic_state_get(&state->base); 7365 7486 INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); 7366 7487 7367 - i915_sw_fence_commit(&state->commit_ready); 7368 7488 if (nonblock && state->modeset) { 7369 7489 queue_work(dev_priv->display.wq.modeset, &state->base.commit_work); 7370 7490 } else if (nonblock) {
+3 -6
drivers/gpu/drm/i915/display/intel_display.h
··· 105 105 }; 106 106 107 107 #define plane_name(p) ((p) + 'A') 108 - #define sprite_name(p, s) ((p) * DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A') 109 108 110 109 #define for_each_plane_id_on_crtc(__crtc, __p) \ 111 110 for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \ ··· 394 395 u8 active_pipes); 395 396 void intel_link_compute_m_n(u16 bpp, int nlanes, 396 397 int pixel_clock, int link_clock, 397 - struct intel_link_m_n *m_n, 398 - bool fec_enable); 398 + int bw_overhead, 399 + struct intel_link_m_n *m_n); 399 400 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, 400 401 u32 pixel_format, u64 modifier); 401 402 enum drm_mode_status ··· 481 482 void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, 482 483 enum transcoder cpu_transcoder, 483 484 struct intel_link_m_n *m_n); 484 - void i9xx_crtc_clock_get(struct intel_crtc *crtc, 485 - struct intel_crtc_state *pipe_config); 486 485 int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); 487 486 int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config); 488 487 enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port); ··· 549 552 struct drm_device *drm = &(__i915)->drm; \ 550 553 int __ret_warn_on = !!(condition); \ 551 554 if (unlikely(__ret_warn_on)) \ 552 - if (!drm_WARN(drm, i915_modparams.verbose_state_checks, format)) \ 555 + if (!drm_WARN(drm, __i915->display.params.verbose_state_checks, format)) \ 553 556 drm_err(drm, format); \ 554 557 unlikely(__ret_warn_on); \ 555 558 })
+11 -9
drivers/gpu/drm/i915/display/intel_display_core.h
··· 19 19 #include "intel_cdclk.h" 20 20 #include "intel_display_device.h" 21 21 #include "intel_display_limits.h" 22 + #include "intel_display_params.h" 22 23 #include "intel_display_power.h" 23 24 #include "intel_dpll_mgr.h" 24 25 #include "intel_fbc.h" ··· 349 348 } dbuf; 350 349 351 350 struct { 352 - wait_queue_head_t waitqueue; 353 - 354 - /* mutex to protect pmdemand programming sequence */ 355 - struct mutex lock; 356 - 357 - struct intel_global_obj obj; 358 - } pmdemand; 359 - 360 - struct { 361 351 /* 362 352 * dkl.phy_lock protects against concurrent access of the 363 353 * Dekel TypeC PHYs. ··· 436 444 } ips; 437 445 438 446 struct { 447 + wait_queue_head_t waitqueue; 448 + 449 + /* mutex to protect pmdemand programming sequence */ 450 + struct mutex lock; 451 + 452 + struct intel_global_obj obj; 453 + } pmdemand; 454 + 455 + struct { 439 456 struct i915_power_domains domains; 440 457 441 458 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ ··· 521 520 struct intel_hotplug hotplug; 522 521 struct intel_opregion opregion; 523 522 struct intel_overlay *overlay; 523 + struct intel_display_params params; 524 524 struct intel_vbt_data vbt; 525 525 struct intel_wm wm; 526 526 };
+98
drivers/gpu/drm/i915/display/intel_display_debugfs.c
··· 17 17 #include "intel_de.h" 18 18 #include "intel_crtc_state_dump.h" 19 19 #include "intel_display_debugfs.h" 20 + #include "intel_display_debugfs_params.h" 20 21 #include "intel_display_power.h" 21 22 #include "intel_display_power_well.h" 22 23 #include "intel_display_types.h" ··· 642 641 return 0; 643 642 } 644 643 644 + static int i915_display_capabilities(struct seq_file *m, void *unused) 645 + { 646 + struct drm_i915_private *i915 = node_to_i915(m->private); 647 + struct drm_printer p = drm_seq_file_printer(m); 648 + 649 + intel_display_device_info_print(DISPLAY_INFO(i915), 650 + DISPLAY_RUNTIME_INFO(i915), &p); 651 + 652 + return 0; 653 + } 654 + 645 655 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 646 656 { 647 657 struct drm_i915_private *dev_priv = node_to_i915(m->private); ··· 1071 1059 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1072 1060 {"i915_power_domain_info", i915_power_domain_info, 0}, 1073 1061 {"i915_display_info", i915_display_info, 0}, 1062 + {"i915_display_capabilities", i915_display_capabilities, 0}, 1074 1063 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 1075 1064 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 1076 1065 {"i915_ddb_info", i915_ddb_info, 0}, ··· 1111 1098 intel_hpd_debugfs_register(i915); 1112 1099 intel_psr_debugfs_register(i915); 1113 1100 intel_wm_debugfs_register(i915); 1101 + intel_display_debugfs_params(i915); 1114 1102 } 1115 1103 1116 1104 static int i915_panel_show(struct seq_file *m, void *data) ··· 1256 1242 DP_DSC_YCbCr420_Native)), 1257 1243 str_yes_no(drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, 1258 1244 DP_DSC_YCbCr444))); 1245 + seq_printf(m, "DSC_Sink_BPP_Precision: %d\n", 1246 + drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd)); 1259 1247 seq_printf(m, "Force_DSC_Enable: %s\n", 1260 1248 str_yes_no(intel_dp->force_dsc_en)); 1261 1249 if (!intel_dp_is_edp(intel_dp)) ··· 1450 1434 .write = i915_dsc_output_format_write 1451 1435 }; 1452 1436 1437 + static int i915_dsc_fractional_bpp_show(struct seq_file *m, void *data) 1438 + { 1439 + struct drm_connector *connector = m->private; 1440 + struct drm_device *dev = connector->dev; 1441 + struct drm_crtc *crtc; 1442 + struct intel_dp *intel_dp; 1443 + struct intel_connector *intel_connector = to_intel_connector(connector); 1444 + struct intel_encoder *encoder = intel_attached_encoder(intel_connector); 1445 + int ret; 1446 + 1447 + if (!encoder) 1448 + return -ENODEV; 1449 + 1450 + ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex); 1451 + if (ret) 1452 + return ret; 1453 + 1454 + crtc = connector->state->crtc; 1455 + if (connector->status != connector_status_connected || !crtc) { 1456 + ret = -ENODEV; 1457 + goto out; 1458 + } 1459 + 1460 + intel_dp = intel_attached_dp(intel_connector); 1461 + seq_printf(m, "Force_DSC_Fractional_BPP_Enable: %s\n", 1462 + str_yes_no(intel_dp->force_dsc_fractional_bpp_en)); 1463 + 1464 + out: 1465 + drm_modeset_unlock(&dev->mode_config.connection_mutex); 1466 + 1467 + return ret; 1468 + } 1469 + 1470 + static ssize_t i915_dsc_fractional_bpp_write(struct file *file, 1471 + const char __user *ubuf, 1472 + size_t len, loff_t *offp) 1473 + { 1474 + struct drm_connector *connector = 1475 + ((struct seq_file *)file->private_data)->private; 1476 + struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 1477 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1478 + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1479 + bool dsc_fractional_bpp_enable = false; 1480 + int ret; 1481 + 1482 + if (len == 0) 1483 + return 0; 1484 + 1485 + drm_dbg(&i915->drm, 1486 + "Copied %zu bytes from user to force fractional bpp for DSC\n", len); 1487 + 1488 + ret = kstrtobool_from_user(ubuf, len, &dsc_fractional_bpp_enable); 1489 + if (ret < 0) 1490 + return ret; 1491 + 1492 + drm_dbg(&i915->drm, "Got %s for DSC Fractional BPP Enable\n", 1493 + (dsc_fractional_bpp_enable) ? "true" : "false"); 1494 + intel_dp->force_dsc_fractional_bpp_en = dsc_fractional_bpp_enable; 1495 + 1496 + *offp += len; 1497 + 1498 + return len; 1499 + } 1500 + 1501 + static int i915_dsc_fractional_bpp_open(struct inode *inode, 1502 + struct file *file) 1503 + { 1504 + return single_open(file, i915_dsc_fractional_bpp_show, inode->i_private); 1505 + } 1506 + 1507 + static const struct file_operations i915_dsc_fractional_bpp_fops = { 1508 + .owner = THIS_MODULE, 1509 + .open = i915_dsc_fractional_bpp_open, 1510 + .read = seq_read, 1511 + .llseek = seq_lseek, 1512 + .release = single_release, 1513 + .write = i915_dsc_fractional_bpp_write 1514 + }; 1515 + 1453 1516 /* 1454 1517 * Returns the Current CRTC's bpc. 1455 1518 * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc ··· 1606 1511 1607 1512 debugfs_create_file("i915_dsc_output_format", 0644, root, 1608 1513 connector, &i915_dsc_output_format_fops); 1514 + 1515 + debugfs_create_file("i915_dsc_fractional_bpp", 0644, root, 1516 + connector, &i915_dsc_fractional_bpp_fops); 1609 1517 } 1610 1518 1611 1519 if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+176
drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include <linux/kernel.h> 7 + 8 + #include <drm/drm_drv.h> 9 + 10 + #include "intel_display_debugfs_params.h" 11 + #include "i915_drv.h" 12 + #include "intel_display_params.h" 13 + 14 + /* int param */ 15 + static int intel_display_param_int_show(struct seq_file *m, void *data) 16 + { 17 + int *value = m->private; 18 + 19 + seq_printf(m, "%d\n", *value); 20 + 21 + return 0; 22 + } 23 + 24 + static int intel_display_param_int_open(struct inode *inode, struct file *file) 25 + { 26 + return single_open(file, intel_display_param_int_show, inode->i_private); 27 + } 28 + 29 + static ssize_t intel_display_param_int_write(struct file *file, 30 + const char __user *ubuf, size_t len, 31 + loff_t *offp) 32 + { 33 + struct seq_file *m = file->private_data; 34 + int *value = m->private; 35 + int ret; 36 + 37 + ret = kstrtoint_from_user(ubuf, len, 0, value); 38 + if (ret) { 39 + /* support boolean values too */ 40 + bool b; 41 + 42 + ret = kstrtobool_from_user(ubuf, len, &b); 43 + if (!ret) 44 + *value = b; 45 + } 46 + 47 + return ret ?: len; 48 + } 49 + 50 + static const struct file_operations intel_display_param_int_fops = { 51 + .owner = THIS_MODULE, 52 + .open = intel_display_param_int_open, 53 + .read = seq_read, 54 + .write = intel_display_param_int_write, 55 + .llseek = default_llseek, 56 + .release = single_release, 57 + }; 58 + 59 + static const struct file_operations intel_display_param_int_fops_ro = { 60 + .owner = THIS_MODULE, 61 + .open = intel_display_param_int_open, 62 + .read = seq_read, 63 + .llseek = default_llseek, 64 + .release = single_release, 65 + }; 66 + 67 + /* unsigned int param */ 68 + static int intel_display_param_uint_show(struct seq_file *m, void *data) 69 + { 70 + unsigned int *value = m->private; 71 + 72 + seq_printf(m, "%u\n", *value); 73 + 74 + return 0; 75 + } 76 + 77 + static int intel_display_param_uint_open(struct inode *inode, struct file *file) 78 + { 79 + return single_open(file, intel_display_param_uint_show, inode->i_private); 80 + } 81 + 82 + static ssize_t intel_display_param_uint_write(struct file *file, 83 + const char __user *ubuf, size_t len, 84 + loff_t *offp) 85 + { 86 + struct seq_file *m = file->private_data; 87 + unsigned int *value = m->private; 88 + int ret; 89 + 90 + ret = kstrtouint_from_user(ubuf, len, 0, value); 91 + if (ret) { 92 + /* support boolean values too */ 93 + bool b; 94 + 95 + ret = kstrtobool_from_user(ubuf, len, &b); 96 + if (!ret) 97 + *value = b; 98 + } 99 + 100 + return ret ?: len; 101 + } 102 + 103 + static const struct file_operations intel_display_param_uint_fops = { 104 + .owner = THIS_MODULE, 105 + .open = intel_display_param_uint_open, 106 + .read = seq_read, 107 + .write = intel_display_param_uint_write, 108 + .llseek = default_llseek, 109 + .release = single_release, 110 + }; 111 + 112 + static const struct file_operations intel_display_param_uint_fops_ro = { 113 + .owner = THIS_MODULE, 114 + .open = intel_display_param_uint_open, 115 + .read = seq_read, 116 + .llseek = default_llseek, 117 + .release = single_release, 118 + }; 119 + 120 + #define RO(mode) (((mode) & 0222) == 0) 121 + 122 + __maybe_unused static struct dentry * 123 + intel_display_debugfs_create_int(const char *name, umode_t mode, 124 + struct dentry *parent, int *value) 125 + { 126 + return debugfs_create_file_unsafe(name, mode, parent, value, 127 + RO(mode) ? &intel_display_param_int_fops_ro : 128 + &intel_display_param_int_fops); 129 + } 130 + 131 + __maybe_unused static struct dentry * 132 + intel_display_debugfs_create_uint(const char *name, umode_t mode, 133 + struct dentry *parent, unsigned int *value) 134 + { 135 + return debugfs_create_file_unsafe(name, mode, parent, value, 136 + RO(mode) ? &intel_display_param_uint_fops_ro : 137 + &intel_display_param_uint_fops); 138 + } 139 + 140 + #define _intel_display_param_create_file(parent, name, mode, valp) \ 141 + do { \ 142 + if (mode) \ 143 + _Generic(valp, \ 144 + bool * : debugfs_create_bool, \ 145 + int * : intel_display_debugfs_create_int, \ 146 + unsigned int * : intel_display_debugfs_create_uint, \ 147 + unsigned long * : debugfs_create_ulong, \ 148 + char ** : debugfs_create_str) \ 149 + (name, mode, parent, valp); \ 150 + } while (0) 151 + 152 + /* add a subdirectory with files for each intel display param */ 153 + void intel_display_debugfs_params(struct drm_i915_private *i915) 154 + { 155 + struct drm_minor *minor = i915->drm.primary; 156 + struct dentry *dir; 157 + char dirname[16]; 158 + 159 + snprintf(dirname, sizeof(dirname), "%s_params", i915->drm.driver->name); 160 + dir = debugfs_lookup(dirname, minor->debugfs_root); 161 + if (!dir) 162 + dir = debugfs_create_dir(dirname, minor->debugfs_root); 163 + if (IS_ERR(dir)) 164 + return; 165 + 166 + /* 167 + * Note: We could create files for params needing special handling 168 + * here. Set mode in params to 0 to skip the generic create file, or 169 + * just let the generic create file fail silently with -EEXIST. 170 + */ 171 + 172 + #define REGISTER(T, x, unused, mode, ...) _intel_display_param_create_file( \ 173 + dir, #x, mode, &i915->display.params.x); 174 + INTEL_DISPLAY_PARAMS_FOR_EACH(REGISTER); 175 + #undef REGISTER 176 + }
+13
drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_DISPLAY_DEBUGFS_PARAMS__ 7 + #define __INTEL_DISPLAY_DEBUGFS_PARAMS__ 8 + 9 + struct drm_i915_private; 10 + 11 + void intel_display_debugfs_params(struct drm_i915_private *i915); 12 + 13 + #endif /* __INTEL_DISPLAY_DEBUGFS_PARAMS__ */
+11 -2
drivers/gpu/drm/i915/display/intel_display_device.c
··· 12 12 #include "intel_de.h" 13 13 #include "intel_display.h" 14 14 #include "intel_display_device.h" 15 + #include "intel_display_params.h" 15 16 #include "intel_display_power.h" 16 17 #include "intel_display_reg_defs.h" 17 18 #include "intel_fbc.h" ··· 938 937 DISPLAY_RUNTIME_INFO(i915)->ip.rel = rel; 939 938 DISPLAY_RUNTIME_INFO(i915)->ip.step = step; 940 939 } 940 + 941 + intel_display_params_copy(&i915->display.params); 942 + } 943 + 944 + void intel_display_device_remove(struct drm_i915_private *i915) 945 + { 946 + intel_display_params_free(&i915->display.params); 941 947 } 942 948 943 949 static void __intel_display_device_info_runtime_init(struct drm_i915_private *i915) ··· 1113 1105 } 1114 1106 1115 1107 /* Disable nuclear pageflip by default on pre-g4x */ 1116 - if (!i915->params.nuclear_pageflip && 1108 + if (!i915->display.params.nuclear_pageflip && 1117 1109 DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) 1118 1110 i915->drm.driver_features &= ~DRIVER_ATOMIC; 1119 1111 } ··· 1153 1145 /* Only valid when HAS_DISPLAY() is true */ 1154 1146 drm_WARN_ON(&i915->drm, !HAS_DISPLAY(i915)); 1155 1147 1156 - return !i915->params.disable_display && !intel_opregion_headless_sku(i915); 1148 + return !i915->display.params.disable_display && 1149 + !intel_opregion_headless_sku(i915); 1157 1150 }
+1
drivers/gpu/drm/i915/display/intel_display_device.h
··· 161 161 162 162 bool intel_display_device_enabled(struct drm_i915_private *i915); 163 163 void intel_display_device_probe(struct drm_i915_private *i915); 164 + void intel_display_device_remove(struct drm_i915_private *i915); 164 165 void intel_display_device_info_runtime_init(struct drm_i915_private *i915); 165 166 166 167 void intel_display_device_info_print(const struct intel_display_device_info *info,
+7
drivers/gpu/drm/i915/display/intel_display_driver.c
··· 181 181 if (!HAS_DISPLAY(i915)) 182 182 return; 183 183 184 + spin_lock_init(&i915->display.fb_tracking.lock); 185 + mutex_init(&i915->display.backlight.lock); 186 + mutex_init(&i915->display.audio.mutex); 187 + mutex_init(&i915->display.wm.wm_mutex); 188 + mutex_init(&i915->display.pps.mutex); 189 + mutex_init(&i915->display.hdcp.hdcp_mutex); 190 + 184 191 intel_display_irq_init(i915); 185 192 intel_dkl_phy_init(i915); 186 193 intel_color_init_hooks(i915);
+217
drivers/gpu/drm/i915/display/intel_display_params.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "intel_display_params.h" 7 + #include "i915_drv.h" 8 + 9 + #define intel_display_param_named(name, T, perm, desc) \ 10 + module_param_named(name, intel_display_modparams.name, T, perm); \ 11 + MODULE_PARM_DESC(name, desc) 12 + #define intel_display_param_named_unsafe(name, T, perm, desc) \ 13 + module_param_named_unsafe(name, intel_display_modparams.name, T, perm); \ 14 + MODULE_PARM_DESC(name, desc) 15 + 16 + static struct intel_display_params intel_display_modparams __read_mostly = { 17 + #define MEMBER(T, member, value, ...) .member = (value), 18 + INTEL_DISPLAY_PARAMS_FOR_EACH(MEMBER) 19 + #undef MEMBER 20 + }; 21 + /* 22 + * Note: As a rule, keep module parameter sysfs permissions read-only 23 + * 0400. Runtime changes are only supported through i915 debugfs. 24 + * 25 + * For any exceptions requiring write access and runtime changes through module 26 + * parameter sysfs, prevent debugfs file creation by setting the parameter's 27 + * debugfs mode to 0. 28 + */ 29 + 30 + intel_display_param_named_unsafe(vbt_firmware, charp, 0400, 31 + "Load VBT from specified file under /lib/firmware"); 32 + 33 + intel_display_param_named_unsafe(lvds_channel_mode, int, 0400, 34 + "Specify LVDS channel mode " 35 + "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); 36 + 37 + intel_display_param_named_unsafe(panel_use_ssc, int, 0400, 38 + "Use Spread Spectrum Clock with panels [LVDS/eDP] " 39 + "(default: auto from VBT)"); 40 + 41 + intel_display_param_named_unsafe(vbt_sdvo_panel_type, int, 0400, 42 + "Override/Ignore selection of SDVO panel mode in the VBT " 43 + "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); 44 + 45 + intel_display_param_named_unsafe(enable_dc, int, 0400, 46 + "Enable power-saving display C-states. " 47 + "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; " 48 + "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)"); 49 + 50 + intel_display_param_named_unsafe(enable_dpt, bool, 0400, 51 + "Enable display page table (DPT) (default: true)"); 52 + 53 + intel_display_param_named_unsafe(enable_sagv, bool, 0400, 54 + "Enable system agent voltage/frequency scaling (SAGV) (default: true)"); 55 + 56 + intel_display_param_named_unsafe(disable_power_well, int, 0400, 57 + "Disable display power wells when possible " 58 + "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); 59 + 60 + intel_display_param_named_unsafe(enable_ips, bool, 0400, "Enable IPS (default: true)"); 61 + 62 + intel_display_param_named_unsafe(invert_brightness, int, 0400, 63 + "Invert backlight brightness " 64 + "(-1 force normal, 0 machine defaults, 1 force inversion), please " 65 + "report PCI device ID, subsystem vendor and subsystem device ID " 66 + "to dri-devel@lists.freedesktop.org, if your machine needs it. " 67 + "It will then be included in an upcoming module version."); 68 + 69 + /* WA to get away with the default setting in VBT for early platforms.Will be removed */ 70 + intel_display_param_named_unsafe(edp_vswing, int, 0400, 71 + "Ignore/Override vswing pre-emph table selection from VBT " 72 + "(0=use value from vbt [default], 1=low power swing(200mV)," 73 + "2=default swing(400mV))"); 74 + 75 + intel_display_param_named(enable_dpcd_backlight, int, 0400, 76 + "Enable support for DPCD backlight control" 77 + "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)"); 78 + 79 + intel_display_param_named_unsafe(load_detect_test, bool, 0400, 80 + "Force-enable the VGA load detect code for testing (default:false). " 81 + "For developers only."); 82 + 83 + intel_display_param_named_unsafe(force_reset_modeset_test, bool, 0400, 84 + "Force a modeset during gpu reset for testing (default:false). " 85 + "For developers only."); 86 + 87 + intel_display_param_named(disable_display, bool, 0400, 88 + "Disable display (default: false)"); 89 + 90 + intel_display_param_named(verbose_state_checks, bool, 0400, 91 + "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); 92 + 93 + intel_display_param_named_unsafe(nuclear_pageflip, bool, 0400, 94 + "Force enable atomic functionality on platforms that don't have full support yet."); 95 + 96 + intel_display_param_named_unsafe(enable_dp_mst, bool, 0400, 97 + "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)"); 98 + 99 + intel_display_param_named_unsafe(enable_fbc, int, 0400, 100 + "Enable frame buffer compression for power savings " 101 + "(default: -1 (use per-chip default))"); 102 + 103 + intel_display_param_named_unsafe(enable_psr, int, 0400, 104 + "Enable PSR " 105 + "(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) " 106 + "Default: -1 (use per-chip default)"); 107 + 108 + intel_display_param_named(psr_safest_params, bool, 0400, 109 + "Replace PSR VBT parameters by the safest and not optimal ones. This " 110 + "is helpful to detect if PSR issues are related to bad values set in " 111 + " VBT. (0=use VBT parameters, 1=use safest parameters)" 112 + "Default: 0"); 113 + 114 + intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400, 115 + "Enable PSR2 selective fetch " 116 + "(0=disabled, 1=enabled) " 117 + "Default: 1"); 118 + 119 + __maybe_unused 120 + static void _param_print_bool(struct drm_printer *p, const char *driver_name, 121 + const char *name, bool val) 122 + { 123 + drm_printf(p, "%s.%s=%s\n", driver_name, name, str_yes_no(val)); 124 + } 125 + 126 + __maybe_unused 127 + static void _param_print_int(struct drm_printer *p, const char *driver_name, 128 + const char *name, int val) 129 + { 130 + drm_printf(p, "%s.%s=%d\n", driver_name, name, val); 131 + } 132 + 133 + __maybe_unused 134 + static void _param_print_uint(struct drm_printer *p, const char *driver_name, 135 + const char *name, unsigned int val) 136 + { 137 + drm_printf(p, "%s.%s=%u\n", driver_name, name, val); 138 + } 139 + 140 + __maybe_unused 141 + static void _param_print_ulong(struct drm_printer *p, const char *driver_name, 142 + const char *name, unsigned long val) 143 + { 144 + drm_printf(p, "%s.%s=%lu\n", driver_name, name, val); 145 + } 146 + 147 + __maybe_unused 148 + static void _param_print_charp(struct drm_printer *p, const char *driver_name, 149 + const char *name, const char *val) 150 + { 151 + drm_printf(p, "%s.%s=%s\n", driver_name, name, val); 152 + } 153 + 154 + #define _param_print(p, driver_name, name, val) \ 155 + _Generic(val, \ 156 + bool : _param_print_bool, \ 157 + int : _param_print_int, \ 158 + unsigned int : _param_print_uint, \ 159 + unsigned long : _param_print_ulong, \ 160 + char * : _param_print_charp)(p, driver_name, name, val) 161 + 162 + /** 163 + * intel_display_params_dump - dump intel display modparams 164 + * @i915: i915 device 165 + * @p: the &drm_printer 166 + * 167 + * Pretty printer for i915 modparams. 168 + */ 169 + void intel_display_params_dump(struct drm_i915_private *i915, struct drm_printer *p) 170 + { 171 + #define PRINT(T, x, ...) _param_print(p, i915->drm.driver->name, #x, i915->display.params.x); 172 + INTEL_DISPLAY_PARAMS_FOR_EACH(PRINT); 173 + #undef PRINT 174 + } 175 + 176 + __maybe_unused static void _param_dup_charp(char **valp) 177 + { 178 + *valp = kstrdup(*valp ? *valp : "", GFP_ATOMIC); 179 + } 180 + 181 + __maybe_unused static void _param_nop(void *valp) 182 + { 183 + } 184 + 185 + #define _param_dup(valp) \ 186 + _Generic(valp, \ 187 + char ** : _param_dup_charp, \ 188 + default : _param_nop) \ 189 + (valp) 190 + 191 + void intel_display_params_copy(struct intel_display_params *dest) 192 + { 193 + *dest = intel_display_modparams; 194 + #define DUP(T, x, ...) _param_dup(&dest->x); 195 + INTEL_DISPLAY_PARAMS_FOR_EACH(DUP); 196 + #undef DUP 197 + } 198 + 199 + __maybe_unused static void _param_free_charp(char **valp) 200 + { 201 + kfree(*valp); 202 + *valp = NULL; 203 + } 204 + 205 + #define _param_free(valp) \ 206 + _Generic(valp, \ 207 + char ** : _param_free_charp, \ 208 + default : _param_nop) \ 209 + (valp) 210 + 211 + /* free the allocated members, *not* the passed in params itself */ 212 + void intel_display_params_free(struct intel_display_params *params) 213 + { 214 + #define FREE(T, x, ...) _param_free(&params->x); 215 + INTEL_DISPLAY_PARAMS_FOR_EACH(FREE); 216 + #undef FREE 217 + }
+61
drivers/gpu/drm/i915/display/intel_display_params.h
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef _INTEL_DISPLAY_PARAMS_H_ 7 + #define _INTEL_DISPLAY_PARAMS_H_ 8 + 9 + #include <linux/types.h> 10 + 11 + struct drm_printer; 12 + struct drm_i915_private; 13 + 14 + /* 15 + * Invoke param, a function-like macro, for each intel display param, with 16 + * arguments: 17 + * 18 + * param(type, name, value, mode) 19 + * 20 + * type: parameter type, one of {bool, int, unsigned int, unsigned long, char *} 21 + * name: name of the parameter 22 + * value: initial/default value of the parameter 23 + * mode: debugfs file permissions, one of {0400, 0600, 0}, use 0 to not create 24 + * debugfs file 25 + */ 26 + #define INTEL_DISPLAY_PARAMS_FOR_EACH(param) \ 27 + param(char *, vbt_firmware, NULL, 0400) \ 28 + param(int, lvds_channel_mode, 0, 0400) \ 29 + param(int, panel_use_ssc, -1, 0600) \ 30 + param(int, vbt_sdvo_panel_type, -1, 0400) \ 31 + param(int, enable_dc, -1, 0400) \ 32 + param(bool, enable_dpt, true, 0400) \ 33 + param(bool, enable_sagv, true, 0600) \ 34 + param(int, disable_power_well, -1, 0400) \ 35 + param(bool, enable_ips, true, 0600) \ 36 + param(int, invert_brightness, 0, 0600) \ 37 + param(int, edp_vswing, 0, 0400) \ 38 + param(int, enable_dpcd_backlight, -1, 0600) \ 39 + param(bool, load_detect_test, false, 0600) \ 40 + param(bool, force_reset_modeset_test, false, 0600) \ 41 + param(bool, disable_display, false, 0400) \ 42 + param(bool, verbose_state_checks, true, 0400) \ 43 + param(bool, nuclear_pageflip, false, 0400) \ 44 + param(bool, enable_dp_mst, true, 0600) \ 45 + param(int, enable_fbc, -1, 0600) \ 46 + param(int, enable_psr, -1, 0600) \ 47 + param(bool, psr_safest_params, false, 0400) \ 48 + param(bool, enable_psr2_sel_fetch, true, 0400) \ 49 + 50 + #define MEMBER(T, member, ...) T member; 51 + struct intel_display_params { 52 + INTEL_DISPLAY_PARAMS_FOR_EACH(MEMBER); 53 + }; 54 + #undef MEMBER 55 + 56 + void intel_display_params_dump(struct drm_i915_private *i915, 57 + struct drm_printer *p); 58 + void intel_display_params_copy(struct intel_display_params *dest); 59 + void intel_display_params_free(struct intel_display_params *params); 60 + 61 + #endif
+7 -7
drivers/gpu/drm/i915/display/intel_display_power.c
··· 967 967 DISPLAY_VER(dev_priv) >= 11 ? 968 968 DC_STATE_EN_DC9 : 0; 969 969 970 - if (!dev_priv->params.disable_power_well) 970 + if (!dev_priv->display.params.disable_power_well) 971 971 max_dc = 0; 972 972 973 973 if (enable_dc >= 0 && enable_dc <= max_dc) { ··· 1016 1016 { 1017 1017 struct i915_power_domains *power_domains = &dev_priv->display.power.domains; 1018 1018 1019 - dev_priv->params.disable_power_well = 1019 + dev_priv->display.params.disable_power_well = 1020 1020 sanitize_disable_power_well_option(dev_priv, 1021 - dev_priv->params.disable_power_well); 1021 + dev_priv->display.params.disable_power_well); 1022 1022 power_domains->allowed_dc_mask = 1023 - get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 1023 + get_allowed_dc_mask(dev_priv, dev_priv->display.params.enable_dc); 1024 1024 1025 1025 power_domains->target_dc_state = 1026 1026 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); ··· 1950 1950 intel_display_power_get(i915, POWER_DOMAIN_INIT); 1951 1951 1952 1952 /* Disable power support if the user asked so. */ 1953 - if (!i915->params.disable_power_well) { 1953 + if (!i915->display.params.disable_power_well) { 1954 1954 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); 1955 1955 i915->display.power.domains.disable_wakeref = intel_display_power_get(i915, 1956 1956 POWER_DOMAIN_INIT); ··· 1977 1977 fetch_and_zero(&i915->display.power.domains.init_wakeref); 1978 1978 1979 1979 /* Remove the refcount we took to keep power well support disabled. */ 1980 - if (!i915->params.disable_power_well) 1980 + if (!i915->display.params.disable_power_well) 1981 1981 intel_display_power_put(i915, POWER_DOMAIN_INIT, 1982 1982 fetch_and_zero(&i915->display.power.domains.disable_wakeref)); 1983 1983 ··· 2096 2096 * Even if power well support was disabled we still want to disable 2097 2097 * power wells if power domains must be deinitialized for suspend. 2098 2098 */ 2099 - if (!i915->params.disable_power_well) 2099 + if (!i915->display.params.disable_power_well) 2100 2100 intel_display_power_put(i915, POWER_DOMAIN_INIT, 2101 2101 fetch_and_zero(&i915->display.power.domains.disable_wakeref)); 2102 2102
+9 -14
drivers/gpu/drm/i915/display/intel_display_power_well.c
··· 1400 1400 { 1401 1401 enum i915_power_well_id id = i915_power_well_instance(power_well)->id; 1402 1402 enum dpio_phy phy; 1403 - enum pipe pipe; 1404 1403 u32 tmp; 1405 1404 1406 1405 drm_WARN_ON_ONCE(&dev_priv->drm, 1407 1406 id != VLV_DISP_PW_DPIO_CMN_BC && 1408 1407 id != CHV_DISP_PW_DPIO_CMN_D); 1409 1408 1410 - if (id == VLV_DISP_PW_DPIO_CMN_BC) { 1411 - pipe = PIPE_A; 1409 + if (id == VLV_DISP_PW_DPIO_CMN_BC) 1412 1410 phy = DPIO_PHY0; 1413 - } else { 1414 - pipe = PIPE_C; 1411 + else 1415 1412 phy = DPIO_PHY1; 1416 - } 1417 1413 1418 1414 /* since ref/cri clock was enabled */ 1419 1415 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ ··· 1424 1428 vlv_dpio_get(dev_priv); 1425 1429 1426 1430 /* Enable dynamic power down */ 1427 - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1431 + tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28); 1428 1432 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1429 1433 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1430 - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1434 + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp); 1431 1435 1432 1436 if (id == VLV_DISP_PW_DPIO_CMN_BC) { 1433 - tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1437 + tmp = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW6_CH1); 1434 1438 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1435 - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1439 + vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW6_CH1, tmp); 1436 1440 } else { 1437 1441 /* 1438 1442 * Force the non-existing CL2 off. BXT does this 1439 1443 * too, so maybe it saves some power even though 1440 1444 * CL2 doesn't exist? 1441 1445 */ 1442 - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1446 + tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30); 1443 1447 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1444 - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1448 + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp); 1445 1449 } 1446 1450 1447 1451 vlv_dpio_put(dev_priv); ··· 1495 1499 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1496 1500 enum dpio_channel ch, bool override, unsigned int mask) 1497 1501 { 1498 - enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1499 1502 u32 reg, val, expected, actual; 1500 1503 1501 1504 /* ··· 1513 1518 reg = _CHV_CMN_DW6_CH1; 1514 1519 1515 1520 vlv_dpio_get(dev_priv); 1516 - val = vlv_dpio_read(dev_priv, pipe, reg); 1521 + val = vlv_dpio_read(dev_priv, phy, reg); 1517 1522 vlv_dpio_put(dev_priv); 1518 1523 1519 1524 /*
+1 -1
drivers/gpu/drm/i915/display/intel_display_reset.c
··· 29 29 return; 30 30 31 31 /* reset doesn't touch the display */ 32 - if (!dev_priv->params.force_reset_modeset_test && 32 + if (!dev_priv->display.params.force_reset_modeset_test && 33 33 !gpu_reset_clobbers_display(dev_priv)) 34 34 return; 35 35
+17 -14
drivers/gpu/drm/i915/display/intel_display_types.h
··· 198 198 struct intel_encoder *, 199 199 const struct intel_crtc_state *, 200 200 const struct drm_connector_state *); 201 + void (*audio_enable)(struct intel_encoder *encoder, 202 + const struct intel_crtc_state *crtc_state, 203 + const struct drm_connector_state *conn_state); 204 + void (*audio_disable)(struct intel_encoder *encoder, 205 + const struct intel_crtc_state *old_crtc_state, 206 + const struct drm_connector_state *old_conn_state); 201 207 /* Read out the current hw state of this connector, returning true if 202 208 * the encoder is active. If the encoder is enabled it also set the pipe 203 209 * it is connected to in the pipe parameter. */ ··· 630 624 struct drm_dp_aux *dsc_decompression_aux; 631 625 u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]; 632 626 u8 fec_capability; 627 + 628 + u8 dsc_hblank_expansion_quirk:1; 629 + u8 dsc_decompression_enabled:1; 633 630 } dp; 634 631 635 632 /* Work struct to schedule a uevent on link train failure */ ··· 684 675 bool skip_intermediate_wm; 685 676 686 677 bool rps_interactive; 687 - 688 - struct i915_sw_fence commit_ready; 689 678 690 679 struct llist_node freed; 691 680 }; ··· 1217 1210 bool has_psr2; 1218 1211 bool enable_psr2_sel_fetch; 1219 1212 bool req_psr2_sdp_prior_scanline; 1213 + bool has_panel_replay; 1220 1214 bool wm_level_disabled; 1221 1215 u32 dc3co_exitline; 1222 1216 u16 su_y_granularity; ··· 1369 1361 struct { 1370 1362 bool compression_enable; 1371 1363 bool dsc_split; 1372 - u16 compressed_bpp; 1364 + /* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */ 1365 + u16 compressed_bpp_x16; 1373 1366 u8 slice_count; 1374 1367 struct drm_dsc_config config; 1375 1368 } dsc; ··· 1716 1707 bool irq_aux_error; 1717 1708 u16 su_w_granularity; 1718 1709 u16 su_y_granularity; 1710 + bool source_panel_replay_support; 1711 + bool sink_panel_replay_support; 1712 + bool panel_replay_enabled; 1719 1713 u32 dc3co_exitline; 1720 1714 u32 dc3co_exit_delay; 1721 1715 struct delayed_work dc3co_work; 1716 + u8 entry_setup_frames; 1722 1717 }; 1723 1718 1724 1719 struct intel_dp { ··· 1821 1808 /* Display stream compression testing */ 1822 1809 bool force_dsc_en; 1823 1810 int force_dsc_output_format; 1811 + bool force_dsc_fractional_bpp_en; 1824 1812 int force_dsc_bpc; 1825 1813 1826 1814 bool hobl_failed; ··· 2005 1991 } 2006 1992 2007 1993 #define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev) 2008 - 2009 - #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \ 2010 - (intel_dp)->psr.source_support) 2011 - 2012 - static inline bool intel_encoder_can_psr(struct intel_encoder *encoder) 2013 - { 2014 - if (!intel_encoder_is_dp(encoder)) 2015 - return false; 2016 - 2017 - return CAN_PSR(enc_to_intel_dp(encoder)); 2018 - } 2019 1994 2020 1995 static inline struct intel_digital_port * 2021 1996 hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
+392 -110
drivers/gpu/drm/i915/display/intel_dp.c
··· 85 85 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 86 86 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 87 87 88 - /* DP DSC FEC Overhead factor = 1/(0.972261) */ 89 - #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 88 + /* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */ 89 + #define DP_DSC_FEC_OVERHEAD_FACTOR 1028530 90 90 91 91 /* Compliance test status bits */ 92 92 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 ··· 124 124 /* Is link rate UHBR and thus 128b/132b? */ 125 125 bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state) 126 126 { 127 - return crtc_state->port_clock >= 1000000; 127 + return drm_dp_is_uhbr_rate(crtc_state->port_clock); 128 + } 129 + 130 + /** 131 + * intel_dp_link_symbol_size - get the link symbol size for a given link rate 132 + * @rate: link rate in 10kbit/s units 133 + * 134 + * Returns the link symbol size in bits/symbol units depending on the link 135 + * rate -> channel coding. 136 + */ 137 + int intel_dp_link_symbol_size(int rate) 138 + { 139 + return drm_dp_is_uhbr_rate(rate) ? 32 : 10; 140 + } 141 + 142 + /** 143 + * intel_dp_link_symbol_clock - convert link rate to link symbol clock 144 + * @rate: link rate in 10kbit/s units 145 + * 146 + * Returns the link symbol clock frequency in kHz units depending on the 147 + * link rate and channel coding. 148 + */ 149 + int intel_dp_link_symbol_clock(int rate) 150 + { 151 + return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate)); 128 152 } 129 153 130 154 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) ··· 355 331 /* 356 332 * The required data bandwidth for a mode with given pixel clock and bpp. This 357 333 * is the required net bandwidth independent of the data bandwidth efficiency. 334 + * 335 + * TODO: check if callers of this functions should use 336 + * intel_dp_effective_data_rate() instead. 358 337 */ 359 338 int 360 339 intel_dp_link_required(int pixel_clock, int bpp) 361 340 { 362 341 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 363 342 return DIV_ROUND_UP(pixel_clock * bpp, 8); 343 + } 344 + 345 + /** 346 + * intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead 347 + * @pixel_clock: pixel clock in kHz 348 + * @bpp_x16: bits per pixel .4 fixed point format 349 + * @bw_overhead: BW allocation overhead in 1ppm units 350 + * 351 + * Return the effective pixel data rate in kB/sec units taking into account 352 + * the provided SSC, FEC, DSC BW allocation overhead. 353 + */ 354 + int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, 355 + int bw_overhead) 356 + { 357 + return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead), 358 + 1000000 * 16 * 8); 364 359 } 365 360 366 361 /* ··· 405 362 int 406 363 intel_dp_max_data_rate(int max_link_rate, int max_lanes) 407 364 { 408 - if (max_link_rate >= 1000000) { 409 - /* 410 - * UHBR rates always use 128b/132b channel encoding, and have 411 - * 97.71% data bandwidth efficiency. Consider max_link_rate the 412 - * link bit rate in units of 10000 bps. 413 - */ 414 - int max_link_rate_kbps = max_link_rate * 10; 365 + int ch_coding_efficiency = 366 + drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate)); 367 + int max_link_rate_kbps = max_link_rate * 10; 415 368 416 - max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000); 417 - max_link_rate = max_link_rate_kbps / 8; 418 - } 419 - 369 + /* 370 + * UHBR rates always use 128b/132b channel encoding, and have 371 + * 97.71% data bandwidth efficiency. Consider max_link_rate the 372 + * link bit rate in units of 10000 bps. 373 + */ 420 374 /* 421 375 * Lower than UHBR rates always use 8b/10b channel encoding, and have 422 376 * 80% data bandwidth efficiency for SST non-FEC. However, this turns 423 - * out to be a nop by coincidence, and can be skipped: 377 + * out to be a nop by coincidence: 424 378 * 425 379 * int max_link_rate_kbps = max_link_rate * 10; 426 - * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10); 380 + * max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10); 427 381 * max_link_rate = max_link_rate_kbps / 8; 428 382 */ 429 - 430 - return max_link_rate * max_lanes; 383 + return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes, 384 + ch_coding_efficiency), 385 + 1000000 * 8); 431 386 } 432 387 433 388 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) ··· 721 680 722 681 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 723 682 { 724 - return div_u64(mul_u32_u32(mode_clock, 1000000U), 725 - DP_DSC_FEC_OVERHEAD_FACTOR); 683 + return div_u64(mul_u32_u32(mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR), 684 + 1000000U); 685 + } 686 + 687 + int intel_dp_bw_fec_overhead(bool fec_enabled) 688 + { 689 + /* 690 + * TODO: Calculate the actual overhead for a given mode. 691 + * The hard-coded 1/0.972261=2.853% overhead factor 692 + * corresponds (for instance) to the 8b/10b DP FEC 2.4% + 693 + * 0.453% DSC overhead. This is enough for a 3840 width mode, 694 + * which has a DSC overhead of up to ~0.2%, but may not be 695 + * enough for a 1024 width mode where this is ~0.8% (on a 4 696 + * lane DP link, with 2 DSC slices and 8 bpp color depth). 697 + */ 698 + return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000; 726 699 } 727 700 728 701 static int ··· 1424 1369 return false; 1425 1370 } 1426 1371 1427 - static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1428 - const struct intel_connector *connector, 1429 - const struct intel_crtc_state *pipe_config) 1372 + bool intel_dp_supports_fec(struct intel_dp *intel_dp, 1373 + const struct intel_connector *connector, 1374 + const struct intel_crtc_state *pipe_config) 1430 1375 { 1431 1376 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 1432 1377 drm_dp_sink_supports_fec(connector->dp.fec_capability); ··· 1439 1384 return false; 1440 1385 1441 1386 return intel_dsc_source_support(crtc_state) && 1387 + connector->dp.dsc_decompression_aux && 1442 1388 drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd); 1443 1389 } 1444 1390 ··· 1773 1717 return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format); 1774 1718 } 1775 1719 1776 - static bool is_bw_sufficient_for_dsc_config(u16 compressed_bpp, u32 link_clock, 1720 + static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock, 1777 1721 u32 lane_count, u32 mode_clock, 1778 1722 enum intel_output_format output_format, 1779 1723 int timeslots) 1780 1724 { 1781 1725 u32 available_bw, required_bw; 1782 1726 1783 - available_bw = (link_clock * lane_count * timeslots) / 8; 1784 - required_bw = compressed_bpp * (intel_dp_mode_to_fec_clock(mode_clock)); 1727 + available_bw = (link_clock * lane_count * timeslots * 16) / 8; 1728 + required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock)); 1785 1729 1786 1730 return available_bw > required_bw; 1787 1731 } ··· 1789 1733 static int dsc_compute_link_config(struct intel_dp *intel_dp, 1790 1734 struct intel_crtc_state *pipe_config, 1791 1735 struct link_config_limits *limits, 1792 - u16 compressed_bpp, 1736 + u16 compressed_bppx16, 1793 1737 int timeslots) 1794 1738 { 1795 1739 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; ··· 1804 1748 for (lane_count = limits->min_lane_count; 1805 1749 lane_count <= limits->max_lane_count; 1806 1750 lane_count <<= 1) { 1807 - if (!is_bw_sufficient_for_dsc_config(compressed_bpp, link_rate, lane_count, 1808 - adjusted_mode->clock, 1751 + if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate, 1752 + lane_count, adjusted_mode->clock, 1809 1753 pipe_config->output_format, 1810 1754 timeslots)) 1811 1755 continue; ··· 1847 1791 return 0; 1848 1792 } 1849 1793 1850 - static int dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config) 1794 + int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config) 1851 1795 { 1852 1796 /* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */ 1853 1797 switch (pipe_config->output_format) { ··· 1864 1808 return 0; 1865 1809 } 1866 1810 1867 - static int dsc_sink_max_compressed_bpp(const struct intel_connector *connector, 1868 - struct intel_crtc_state *pipe_config, 1869 - int bpc) 1811 + int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector, 1812 + struct intel_crtc_state *pipe_config, 1813 + int bpc) 1870 1814 { 1871 1815 return intel_dp_dsc_max_sink_compressed_bppx16(connector, 1872 1816 pipe_config, bpc) >> 4; ··· 1918 1862 ret = dsc_compute_link_config(intel_dp, 1919 1863 pipe_config, 1920 1864 limits, 1921 - valid_dsc_bpp[i], 1865 + valid_dsc_bpp[i] << 4, 1922 1866 timeslots); 1923 1867 if (ret == 0) { 1924 - pipe_config->dsc.compressed_bpp = valid_dsc_bpp[i]; 1868 + pipe_config->dsc.compressed_bpp_x16 = 1869 + to_bpp_x16(valid_dsc_bpp[i]); 1925 1870 return 0; 1926 1871 } 1927 1872 } ··· 1938 1881 */ 1939 1882 static int 1940 1883 xelpd_dsc_compute_link_config(struct intel_dp *intel_dp, 1884 + const struct intel_connector *connector, 1941 1885 struct intel_crtc_state *pipe_config, 1942 1886 struct link_config_limits *limits, 1943 1887 int dsc_max_bpp, ··· 1946 1888 int pipe_bpp, 1947 1889 int timeslots) 1948 1890 { 1949 - u16 compressed_bpp; 1891 + u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd); 1892 + struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1893 + u16 compressed_bppx16; 1894 + u8 bppx16_step; 1950 1895 int ret; 1951 1896 1952 - /* Compressed BPP should be less than the Input DSC bpp */ 1953 - dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); 1897 + if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1) 1898 + bppx16_step = 16; 1899 + else 1900 + bppx16_step = 16 / bppx16_incr; 1954 1901 1955 - for (compressed_bpp = dsc_max_bpp; 1956 - compressed_bpp >= dsc_min_bpp; 1957 - compressed_bpp--) { 1902 + /* Compressed BPP should be less than the Input DSC bpp */ 1903 + dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step); 1904 + dsc_min_bpp = dsc_min_bpp << 4; 1905 + 1906 + for (compressed_bppx16 = dsc_max_bpp; 1907 + compressed_bppx16 >= dsc_min_bpp; 1908 + compressed_bppx16 -= bppx16_step) { 1909 + if (intel_dp->force_dsc_fractional_bpp_en && 1910 + !to_bpp_frac(compressed_bppx16)) 1911 + continue; 1958 1912 ret = dsc_compute_link_config(intel_dp, 1959 1913 pipe_config, 1960 1914 limits, 1961 - compressed_bpp, 1915 + compressed_bppx16, 1962 1916 timeslots); 1963 1917 if (ret == 0) { 1964 - pipe_config->dsc.compressed_bpp = compressed_bpp; 1918 + pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16; 1919 + if (intel_dp->force_dsc_fractional_bpp_en && 1920 + to_bpp_frac(compressed_bppx16)) 1921 + drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n"); 1922 + 1965 1923 return 0; 1966 1924 } 1967 1925 } ··· 1998 1924 int dsc_joiner_max_bpp; 1999 1925 2000 1926 dsc_src_min_bpp = dsc_src_min_compressed_bpp(); 2001 - dsc_sink_min_bpp = dsc_sink_min_compressed_bpp(pipe_config); 1927 + dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); 2002 1928 dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); 2003 1929 dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); 2004 1930 2005 1931 dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); 2006 - dsc_sink_max_bpp = dsc_sink_max_compressed_bpp(connector, pipe_config, pipe_bpp / 3); 1932 + dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, 1933 + pipe_config, 1934 + pipe_bpp / 3); 2007 1935 dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; 2008 1936 2009 1937 dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock, ··· 2015 1939 dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16)); 2016 1940 2017 1941 if (DISPLAY_VER(i915) >= 13) 2018 - return xelpd_dsc_compute_link_config(intel_dp, pipe_config, limits, 1942 + return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits, 2019 1943 dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots); 2020 1944 return icl_dsc_compute_link_config(intel_dp, pipe_config, limits, 2021 1945 dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots); ··· 2160 2084 pipe_config->lane_count = limits->max_lane_count; 2161 2085 2162 2086 dsc_src_min_bpp = dsc_src_min_compressed_bpp(); 2163 - dsc_sink_min_bpp = dsc_sink_min_compressed_bpp(pipe_config); 2087 + dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config); 2164 2088 dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp); 2165 2089 dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); 2166 2090 2167 2091 dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp); 2168 - dsc_sink_max_bpp = dsc_sink_max_compressed_bpp(connector, pipe_config, pipe_bpp / 3); 2092 + dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, 2093 + pipe_config, 2094 + pipe_bpp / 3); 2169 2095 dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp; 2170 2096 dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16)); 2171 2097 2172 2098 /* Compressed BPP should be less than the Input DSC bpp */ 2173 2099 dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1); 2174 2100 2175 - pipe_config->dsc.compressed_bpp = max(dsc_min_bpp, dsc_max_bpp); 2101 + pipe_config->dsc.compressed_bpp_x16 = 2102 + to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp)); 2176 2103 2177 2104 pipe_config->pipe_bpp = pipe_bpp; 2178 2105 ··· 2197 2118 &pipe_config->hw.adjusted_mode; 2198 2119 int ret; 2199 2120 2200 - pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 2201 - intel_dp_supports_fec(intel_dp, connector, pipe_config); 2121 + pipe_config->fec_enable = pipe_config->fec_enable || 2122 + (!intel_dp_is_edp(intel_dp) && 2123 + intel_dp_supports_fec(intel_dp, connector, pipe_config)); 2202 2124 2203 2125 if (!intel_dp_supports_dsc(connector, pipe_config)) 2204 2126 return -EINVAL; ··· 2264 2184 ret = intel_dp_dsc_compute_params(connector, pipe_config); 2265 2185 if (ret < 0) { 2266 2186 drm_dbg_kms(&dev_priv->drm, 2267 - "Cannot compute valid DSC parameters for Input Bpp = %d " 2268 - "Compressed BPP = %d\n", 2187 + "Cannot compute valid DSC parameters for Input Bpp = %d" 2188 + "Compressed BPP = " BPP_X16_FMT "\n", 2269 2189 pipe_config->pipe_bpp, 2270 - pipe_config->dsc.compressed_bpp); 2190 + BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16)); 2271 2191 return ret; 2272 2192 } 2273 2193 2274 2194 pipe_config->dsc.compression_enable = true; 2275 2195 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 2276 - "Compressed Bpp = %d Slice Count = %d\n", 2196 + "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n", 2277 2197 pipe_config->pipe_bpp, 2278 - pipe_config->dsc.compressed_bpp, 2198 + BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16), 2279 2199 pipe_config->dsc.slice_count); 2280 2200 2281 2201 return 0; ··· 2387 2307 { 2388 2308 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2389 2309 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 2310 + const struct intel_connector *connector = 2311 + to_intel_connector(conn_state->connector); 2390 2312 const struct drm_display_mode *adjusted_mode = 2391 2313 &pipe_config->hw.adjusted_mode; 2392 2314 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); ··· 2396 2314 bool joiner_needs_dsc = false; 2397 2315 bool dsc_needed; 2398 2316 int ret = 0; 2317 + 2318 + if (pipe_config->fec_enable && 2319 + !intel_dp_supports_fec(intel_dp, connector, pipe_config)) 2320 + return -EINVAL; 2399 2321 2400 2322 if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay, 2401 2323 adjusted_mode->crtc_clock)) ··· 2448 2362 2449 2363 if (pipe_config->dsc.compression_enable) { 2450 2364 drm_dbg_kms(&i915->drm, 2451 - "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 2365 + "DP lane count %d clock %d Input bpp %d Compressed bpp " BPP_X16_FMT "\n", 2452 2366 pipe_config->lane_count, pipe_config->port_clock, 2453 2367 pipe_config->pipe_bpp, 2454 - pipe_config->dsc.compressed_bpp); 2368 + BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16)); 2455 2369 2456 2370 drm_dbg_kms(&i915->drm, 2457 2371 "DP link rate required %i available %i\n", 2458 2372 intel_dp_link_required(adjusted_mode->crtc_clock, 2459 - pipe_config->dsc.compressed_bpp), 2373 + to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)), 2460 2374 intel_dp_max_data_rate(pipe_config->port_clock, 2461 2375 pipe_config->lane_count)); 2462 2376 } else { ··· 2525 2439 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2526 2440 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2527 2441 2528 - /* 2529 - * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2530 - * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2531 - * Colorimetry Format indication. 2532 - */ 2533 - vsc->revision = 0x5; 2442 + if (crtc_state->has_panel_replay) { 2443 + /* 2444 + * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 2445 + * VSC SDP supporting 3D stereo, Panel Replay, and Pixel 2446 + * Encoding/Colorimetry Format indication. 2447 + */ 2448 + vsc->revision = 0x7; 2449 + } else { 2450 + /* 2451 + * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 2452 + * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 2453 + * Colorimetry Format indication. 2454 + */ 2455 + vsc->revision = 0x5; 2456 + } 2457 + 2534 2458 vsc->length = 0x13; 2535 2459 2536 2460 /* DP 1.4a spec, Table 2-120 */ ··· 2649 2553 vsc->revision = 0x4; 2650 2554 vsc->length = 0xe; 2651 2555 } 2556 + } else if (crtc_state->has_panel_replay) { 2557 + if (intel_dp->psr.colorimetry_support && 2558 + intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 2559 + /* [Panel Replay with colorimetry info] */ 2560 + intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 2561 + vsc); 2562 + } else { 2563 + /* 2564 + * [Panel Replay without colorimetry info] 2565 + * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223 2566 + * VSC SDP supporting 3D stereo + Panel Replay. 2567 + */ 2568 + vsc->revision = 0x6; 2569 + vsc->length = 0x10; 2570 + } 2652 2571 } else { 2653 2572 /* 2654 2573 * [PSR1] ··· 2740 2629 static void 2741 2630 intel_dp_drrs_compute_config(struct intel_connector *connector, 2742 2631 struct intel_crtc_state *pipe_config, 2743 - int link_bpp) 2632 + int link_bpp_x16) 2744 2633 { 2745 2634 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2746 2635 const struct drm_display_mode *downclock_mode = ··· 2765 2654 if (pipe_config->splitter.enable) 2766 2655 pixel_clock /= pipe_config->splitter.link_count; 2767 2656 2768 - intel_link_compute_m_n(link_bpp, pipe_config->lane_count, pixel_clock, 2769 - pipe_config->port_clock, &pipe_config->dp_m2_n2, 2770 - pipe_config->fec_enable); 2657 + intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, pixel_clock, 2658 + pipe_config->port_clock, 2659 + intel_dp_bw_fec_overhead(pipe_config->fec_enable), 2660 + &pipe_config->dp_m2_n2); 2771 2661 2772 2662 /* FIXME: abstract this better */ 2773 2663 if (pipe_config->splitter.enable) ··· 2869 2757 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2870 2758 const struct drm_display_mode *fixed_mode; 2871 2759 struct intel_connector *connector = intel_dp->attached_connector; 2872 - int ret = 0, link_bpp; 2760 + int ret = 0, link_bpp_x16; 2873 2761 2874 2762 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A) 2875 2763 pipe_config->has_pch_encoder = true; ··· 2918 2806 drm_dp_enhanced_frame_cap(intel_dp->dpcd); 2919 2807 2920 2808 if (pipe_config->dsc.compression_enable) 2921 - link_bpp = pipe_config->dsc.compressed_bpp; 2809 + link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16; 2922 2810 else 2923 - link_bpp = intel_dp_output_bpp(pipe_config->output_format, 2924 - pipe_config->pipe_bpp); 2811 + link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format, 2812 + pipe_config->pipe_bpp)); 2925 2813 2926 2814 if (intel_dp->mso_link_count) { 2927 2815 int n = intel_dp->mso_link_count; ··· 2945 2833 2946 2834 intel_dp_audio_compute_config(encoder, pipe_config, conn_state); 2947 2835 2948 - intel_link_compute_m_n(link_bpp, 2836 + intel_link_compute_m_n(link_bpp_x16, 2949 2837 pipe_config->lane_count, 2950 2838 adjusted_mode->crtc_clock, 2951 2839 pipe_config->port_clock, 2952 - &pipe_config->dp_m_n, 2953 - pipe_config->fec_enable); 2840 + intel_dp_bw_fec_overhead(pipe_config->fec_enable), 2841 + &pipe_config->dp_m_n); 2954 2842 2955 2843 /* FIXME: abstract this better */ 2956 2844 if (pipe_config->splitter.enable) ··· 2961 2849 2962 2850 intel_vrr_compute_config(pipe_config, conn_state); 2963 2851 intel_psr_compute_config(intel_dp, pipe_config, conn_state); 2964 - intel_dp_drrs_compute_config(connector, pipe_config, link_bpp); 2852 + intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16); 2965 2853 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 2966 2854 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 2967 2855 ··· 3029 2917 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 3030 2918 } 3031 2919 3032 - void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 3033 - const struct intel_crtc_state *crtc_state, 3034 - bool enable) 2920 + static int 2921 + write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set) 3035 2922 { 3036 - struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3037 - int ret; 2923 + int err; 2924 + u8 val; 3038 2925 3039 - if (!crtc_state->dsc.compression_enable) 3040 - return; 2926 + err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, &val); 2927 + if (err < 0) 2928 + return err; 3041 2929 3042 - ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 3043 - enable ? DP_DECOMPRESSION_EN : 0); 3044 - if (ret < 0) 2930 + if (set) 2931 + val |= flag; 2932 + else 2933 + val &= ~flag; 2934 + 2935 + return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, val); 2936 + } 2937 + 2938 + static void 2939 + intel_dp_sink_set_dsc_decompression(struct intel_connector *connector, 2940 + bool enable) 2941 + { 2942 + struct drm_i915_private *i915 = to_i915(connector->base.dev); 2943 + 2944 + if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux, 2945 + DP_DECOMPRESSION_EN, enable) < 0) 3045 2946 drm_dbg_kms(&i915->drm, 3046 2947 "Failed to %s sink decompression state\n", 3047 2948 str_enable_disable(enable)); 2949 + } 2950 + 2951 + static void 2952 + intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector, 2953 + bool enable) 2954 + { 2955 + struct drm_i915_private *i915 = to_i915(connector->base.dev); 2956 + struct drm_dp_aux *aux = connector->port ? 2957 + connector->port->passthrough_aux : NULL; 2958 + 2959 + if (!aux) 2960 + return; 2961 + 2962 + if (write_dsc_decompression_flag(aux, 2963 + DP_DSC_PASSTHROUGH_EN, enable) < 0) 2964 + drm_dbg_kms(&i915->drm, 2965 + "Failed to %s sink compression passthrough state\n", 2966 + str_enable_disable(enable)); 2967 + } 2968 + 2969 + static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state, 2970 + const struct intel_connector *connector, 2971 + bool for_get_ref) 2972 + { 2973 + struct drm_i915_private *i915 = to_i915(state->base.dev); 2974 + struct drm_connector *_connector_iter; 2975 + struct drm_connector_state *old_conn_state; 2976 + struct drm_connector_state *new_conn_state; 2977 + int ref_count = 0; 2978 + int i; 2979 + 2980 + /* 2981 + * On SST the decompression AUX device won't be shared, each connector 2982 + * uses for this its own AUX targeting the sink device. 2983 + */ 2984 + if (!connector->mst_port) 2985 + return connector->dp.dsc_decompression_enabled ? 1 : 0; 2986 + 2987 + for_each_oldnew_connector_in_state(&state->base, _connector_iter, 2988 + old_conn_state, new_conn_state, i) { 2989 + const struct intel_connector * 2990 + connector_iter = to_intel_connector(_connector_iter); 2991 + 2992 + if (connector_iter->mst_port != connector->mst_port) 2993 + continue; 2994 + 2995 + if (!connector_iter->dp.dsc_decompression_enabled) 2996 + continue; 2997 + 2998 + drm_WARN_ON(&i915->drm, 2999 + (for_get_ref && !new_conn_state->crtc) || 3000 + (!for_get_ref && !old_conn_state->crtc)); 3001 + 3002 + if (connector_iter->dp.dsc_decompression_aux == 3003 + connector->dp.dsc_decompression_aux) 3004 + ref_count++; 3005 + } 3006 + 3007 + return ref_count; 3008 + } 3009 + 3010 + static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state, 3011 + struct intel_connector *connector) 3012 + { 3013 + bool ret = intel_dp_dsc_aux_ref_count(state, connector, true) == 0; 3014 + 3015 + connector->dp.dsc_decompression_enabled = true; 3016 + 3017 + return ret; 3018 + } 3019 + 3020 + static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state, 3021 + struct intel_connector *connector) 3022 + { 3023 + connector->dp.dsc_decompression_enabled = false; 3024 + 3025 + return intel_dp_dsc_aux_ref_count(state, connector, false) == 0; 3026 + } 3027 + 3028 + /** 3029 + * intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device 3030 + * @state: atomic state 3031 + * @connector: connector to enable the decompression for 3032 + * @new_crtc_state: new state for the CRTC driving @connector 3033 + * 3034 + * Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD 3035 + * register of the appropriate sink/branch device. On SST this is always the 3036 + * sink device, whereas on MST based on each device's DSC capabilities it's 3037 + * either the last branch device (enabling decompression in it) or both the 3038 + * last branch device (enabling passthrough in it) and the sink device 3039 + * (enabling decompression in it). 3040 + */ 3041 + void intel_dp_sink_enable_decompression(struct intel_atomic_state *state, 3042 + struct intel_connector *connector, 3043 + const struct intel_crtc_state *new_crtc_state) 3044 + { 3045 + struct drm_i915_private *i915 = to_i915(state->base.dev); 3046 + 3047 + if (!new_crtc_state->dsc.compression_enable) 3048 + return; 3049 + 3050 + if (drm_WARN_ON(&i915->drm, 3051 + !connector->dp.dsc_decompression_aux || 3052 + connector->dp.dsc_decompression_enabled)) 3053 + return; 3054 + 3055 + if (!intel_dp_dsc_aux_get_ref(state, connector)) 3056 + return; 3057 + 3058 + intel_dp_sink_set_dsc_passthrough(connector, true); 3059 + intel_dp_sink_set_dsc_decompression(connector, true); 3060 + } 3061 + 3062 + /** 3063 + * intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device 3064 + * @state: atomic state 3065 + * @connector: connector to disable the decompression for 3066 + * @old_crtc_state: old state for the CRTC driving @connector 3067 + * 3068 + * Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD 3069 + * register of the appropriate sink/branch device, corresponding to the 3070 + * sequence in intel_dp_sink_enable_decompression(). 3071 + */ 3072 + void intel_dp_sink_disable_decompression(struct intel_atomic_state *state, 3073 + struct intel_connector *connector, 3074 + const struct intel_crtc_state *old_crtc_state) 3075 + { 3076 + struct drm_i915_private *i915 = to_i915(state->base.dev); 3077 + 3078 + if (!old_crtc_state->dsc.compression_enable) 3079 + return; 3080 + 3081 + if (drm_WARN_ON(&i915->drm, 3082 + !connector->dp.dsc_decompression_aux || 3083 + !connector->dp.dsc_decompression_enabled)) 3084 + return; 3085 + 3086 + if (!intel_dp_dsc_aux_put_ref(state, connector)) 3087 + return; 3088 + 3089 + intel_dp_sink_set_dsc_decompression(connector, false); 3090 + intel_dp_sink_set_dsc_passthrough(connector, false); 3048 3091 } 3049 3092 3050 3093 static void ··· 4038 3771 { 4039 3772 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4040 3773 4041 - return i915->params.enable_dp_mst && 3774 + return i915->display.params.enable_dp_mst && 4042 3775 intel_dp_mst_source_support(intel_dp) && 4043 3776 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 4044 3777 } ··· 4056 3789 encoder->base.base.id, encoder->base.name, 4057 3790 str_yes_no(intel_dp_mst_source_support(intel_dp)), 4058 3791 str_yes_no(sink_can_mst), 4059 - str_yes_no(i915->params.enable_dp_mst)); 3792 + str_yes_no(i915->display.params.enable_dp_mst)); 4060 3793 4061 3794 if (!intel_dp_mst_source_support(intel_dp)) 4062 3795 return; 4063 3796 4064 3797 intel_dp->is_mst = sink_can_mst && 4065 - i915->params.enable_dp_mst; 3798 + i915->display.params.enable_dp_mst; 4066 3799 4067 3800 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4068 3801 intel_dp->is_mst); ··· 4132 3865 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 4133 3866 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 4134 3867 3868 + if (vsc->revision == 0x6) { 3869 + sdp->db[0] = 1; 3870 + sdp->db[3] = 1; 3871 + } 3872 + 4135 3873 /* 4136 - * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 4137 - * per DP 1.4a spec. 3874 + * Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry 3875 + * Format as per DP 1.4a spec and DP 2.0 respectively. 4138 3876 */ 4139 - if (vsc->revision != 0x5) 3877 + if (!(vsc->revision == 0x5 || vsc->revision == 0x7)) 4140 3878 goto out; 4141 3879 4142 3880 /* VSC SDP Payload for DB16 through DB18 */ ··· 4321 4049 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 4322 4050 u32 val = intel_de_read(dev_priv, reg) & ~dip_enable; 4323 4051 4324 - /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 4052 + /* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */ 4053 + if (!enable && HAS_DSC(dev_priv)) 4054 + val &= ~VDIP_ENABLE_PPS; 4055 + 4325 4056 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 4326 4057 if (!crtc_state->has_psr) 4327 4058 val &= ~VIDEO_DIP_ENABLE_VSC_HSW; ··· 5684 5409 if (status == connector_status_disconnected) { 5685 5410 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 5686 5411 memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd)); 5412 + intel_dp->psr.sink_panel_replay_support = false; 5687 5413 5688 5414 if (intel_dp->is_mst) { 5689 5415 drm_dbg_kms(&dev_priv->drm, ··· 6313 6037 * (eg. Acer Chromebook C710), so we'll check it only if multiple 6314 6038 * ports are attempting to use the same AUX CH, according to VBT. 6315 6039 */ 6316 - if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) && 6317 - !intel_digital_port_connected(encoder)) { 6040 + if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) { 6318 6041 /* 6319 6042 * If this fails, presume the DPCD answer came 6320 6043 * from some other port using the same AUX CH. ··· 6321 6046 * FIXME maybe cleaner to check this before the 6322 6047 * DPCD read? Would need sort out the VDD handling... 6323 6048 */ 6324 - drm_info(&dev_priv->drm, 6325 - "[ENCODER:%d:%s] HPD is down, disabling eDP\n", 6326 - encoder->base.base.id, encoder->base.name); 6327 - goto out_vdd_off; 6049 + if (!intel_digital_port_connected(encoder)) { 6050 + drm_info(&dev_priv->drm, 6051 + "[ENCODER:%d:%s] HPD is down, disabling eDP\n", 6052 + encoder->base.base.id, encoder->base.name); 6053 + goto out_vdd_off; 6054 + } 6055 + 6056 + /* 6057 + * Unfortunately even the HPD based detection fails on 6058 + * eg. Asus B360M-A (CFL+CNP), so as a last resort fall 6059 + * back to checking for a VGA branch device. Only do this 6060 + * on known affected platforms to minimize false positives. 6061 + */ 6062 + if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) && 6063 + (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) == 6064 + DP_DWN_STRM_PORT_TYPE_ANALOG) { 6065 + drm_info(&dev_priv->drm, 6066 + "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n", 6067 + encoder->base.base.id, encoder->base.name); 6068 + goto out_vdd_off; 6069 + } 6328 6070 } 6329 6071 6330 6072 mutex_lock(&dev_priv->drm.mode_config.mutex); ··· 6528 6236 if (ret) 6529 6237 drm_dbg_kms(&dev_priv->drm, 6530 6238 "HDCP init failed, skipping.\n"); 6531 - } 6532 - 6533 - /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 6534 - * 0xd. Failure to do so will result in spurious interrupts being 6535 - * generated on the port when a cable is not attached. 6536 - */ 6537 - if (IS_G45(dev_priv)) { 6538 - u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 6539 - intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 6540 - (temp & ~0xf) | 0xd); 6541 6239 } 6542 6240 6543 6241 intel_dp->frl.is_trained = false;
+23 -3
drivers/gpu/drm/i915/display/intel_dp.h
··· 57 57 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode); 58 58 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 59 59 const struct intel_crtc_state *crtc_state); 60 - void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 61 - const struct intel_crtc_state *crtc_state, 62 - bool enable); 60 + void intel_dp_sink_enable_decompression(struct intel_atomic_state *state, 61 + struct intel_connector *connector, 62 + const struct intel_crtc_state *new_crtc_state); 63 + void intel_dp_sink_disable_decompression(struct intel_atomic_state *state, 64 + struct intel_connector *connector, 65 + const struct intel_crtc_state *old_crtc_state); 63 66 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); 64 67 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder); 65 68 void intel_dp_encoder_flush_work(struct drm_encoder *encoder); ··· 81 78 bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp); 82 79 bool intel_dp_is_edp(struct intel_dp *intel_dp); 83 80 bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state); 81 + int intel_dp_link_symbol_size(int rate); 82 + int intel_dp_link_symbol_clock(int rate); 84 83 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 85 84 enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port, 86 85 bool long_hpd); ··· 103 98 104 99 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); 105 100 int intel_dp_link_required(int pixel_clock, int bpp); 101 + int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16, 102 + int bw_overhead); 106 103 int intel_dp_max_data_rate(int max_link_rate, int max_lanes); 107 104 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp); 108 105 bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, ··· 132 125 enum intel_output_format output_format, 133 126 u32 pipe_bpp, 134 127 u32 timeslots); 128 + int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config); 129 + int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector, 130 + struct intel_crtc_state *pipe_config, 131 + int bpc); 135 132 u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector, 136 133 int mode_clock, int mode_hdisplay, 137 134 bool bigjoiner); ··· 147 136 return ~((1 << lane_count) - 1) & 0xf; 148 137 } 149 138 139 + bool intel_dp_supports_fec(struct intel_dp *intel_dp, 140 + const struct intel_connector *connector, 141 + const struct intel_crtc_state *pipe_config); 150 142 u32 intel_dp_mode_to_fec_clock(u32 mode_clock); 143 + int intel_dp_bw_fec_overhead(bool fec_enabled); 144 + 145 + bool intel_dp_supports_fec(struct intel_dp *intel_dp, 146 + const struct intel_connector *connector, 147 + const struct intel_crtc_state *pipe_config); 148 + 151 149 u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp); 152 150 153 151 void intel_ddi_update_pipe(struct intel_atomic_state *state,
+62 -37
drivers/gpu/drm/i915/display/intel_dp_aux.c
··· 74 74 75 75 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 76 76 { 77 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 77 + struct drm_i915_private *i915 = dp_to_i915(intel_dp); 78 78 79 79 if (index) 80 80 return 0; ··· 83 83 * The clock divider is based off the hrawclk, and would like to run at 84 84 * 2MHz. So, take the hrawclk value and divide by 2000 and use that 85 85 */ 86 - return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000); 86 + return DIV_ROUND_CLOSEST(RUNTIME_INFO(i915)->rawclk_freq, 2000); 87 87 } 88 88 89 89 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 90 90 { 91 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 91 + struct drm_i915_private *i915 = dp_to_i915(intel_dp); 92 92 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 93 93 u32 freq; 94 94 ··· 101 101 * divide by 2000 and use that 102 102 */ 103 103 if (dig_port->aux_ch == AUX_CH_A) 104 - freq = dev_priv->display.cdclk.hw.cdclk; 104 + freq = i915->display.cdclk.hw.cdclk; 105 105 else 106 - freq = RUNTIME_INFO(dev_priv)->rawclk_freq; 106 + freq = RUNTIME_INFO(i915)->rawclk_freq; 107 107 return DIV_ROUND_CLOSEST(freq, 2000); 108 108 } 109 109 110 110 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 111 111 { 112 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 112 + struct drm_i915_private *i915 = dp_to_i915(intel_dp); 113 113 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 114 114 115 - if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { 115 + if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) { 116 116 /* Workaround for non-ULT HSW */ 117 117 switch (index) { 118 118 case 0: return 63; ··· 165 165 u32 aux_clock_divider) 166 166 { 167 167 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 168 - struct drm_i915_private *dev_priv = 169 - to_i915(dig_port->base.base.dev); 168 + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 170 169 u32 timeout; 171 170 172 171 /* Max timeout value on G4x-BDW: 1.6ms */ 173 - if (IS_BROADWELL(dev_priv)) 172 + if (IS_BROADWELL(i915)) 174 173 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 175 174 else 176 175 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; ··· 228 229 u32 aux_send_ctl_flags) 229 230 { 230 231 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 231 - struct drm_i915_private *i915 = 232 - to_i915(dig_port->base.base.dev); 232 + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 233 233 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 234 234 bool is_tc_port = intel_phy_is_tc(i915, phy); 235 235 i915_reg_t ch_ctl, ch_data[5]; ··· 529 531 return ret; 530 532 } 531 533 534 + static i915_reg_t vlv_aux_ctl_reg(struct intel_dp *intel_dp) 535 + { 536 + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 537 + enum aux_ch aux_ch = dig_port->aux_ch; 538 + 539 + switch (aux_ch) { 540 + case AUX_CH_B: 541 + case AUX_CH_C: 542 + case AUX_CH_D: 543 + return VLV_DP_AUX_CH_CTL(aux_ch); 544 + default: 545 + MISSING_CASE(aux_ch); 546 + return VLV_DP_AUX_CH_CTL(AUX_CH_B); 547 + } 548 + } 549 + 550 + static i915_reg_t vlv_aux_data_reg(struct intel_dp *intel_dp, int index) 551 + { 552 + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 553 + enum aux_ch aux_ch = dig_port->aux_ch; 554 + 555 + switch (aux_ch) { 556 + case AUX_CH_B: 557 + case AUX_CH_C: 558 + case AUX_CH_D: 559 + return VLV_DP_AUX_CH_DATA(aux_ch, index); 560 + default: 561 + MISSING_CASE(aux_ch); 562 + return VLV_DP_AUX_CH_DATA(AUX_CH_B, index); 563 + } 564 + } 565 + 532 566 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) 533 567 { 534 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 535 568 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 536 569 enum aux_ch aux_ch = dig_port->aux_ch; 537 570 ··· 579 550 580 551 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) 581 552 { 582 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 583 553 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 584 554 enum aux_ch aux_ch = dig_port->aux_ch; 585 555 ··· 595 567 596 568 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) 597 569 { 598 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 599 570 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 600 571 enum aux_ch aux_ch = dig_port->aux_ch; 601 572 ··· 613 586 614 587 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) 615 588 { 616 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 617 589 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 618 590 enum aux_ch aux_ch = dig_port->aux_ch; 619 591 ··· 631 605 632 606 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) 633 607 { 634 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 635 608 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 636 609 enum aux_ch aux_ch = dig_port->aux_ch; 637 610 ··· 650 625 651 626 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) 652 627 { 653 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 654 628 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 655 629 enum aux_ch aux_ch = dig_port->aux_ch; 656 630 ··· 669 645 670 646 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp) 671 647 { 672 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 673 648 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 674 649 enum aux_ch aux_ch = dig_port->aux_ch; 675 650 ··· 691 668 692 669 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) 693 670 { 694 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 695 671 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 696 672 enum aux_ch aux_ch = dig_port->aux_ch; 697 673 ··· 713 691 714 692 static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp) 715 693 { 716 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 694 + struct drm_i915_private *i915 = dp_to_i915(intel_dp); 717 695 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 718 696 enum aux_ch aux_ch = dig_port->aux_ch; 719 697 ··· 724 702 case AUX_CH_USBC2: 725 703 case AUX_CH_USBC3: 726 704 case AUX_CH_USBC4: 727 - return XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch); 705 + return XELPDP_DP_AUX_CH_CTL(i915, aux_ch); 728 706 default: 729 707 MISSING_CASE(aux_ch); 730 - return XELPDP_DP_AUX_CH_CTL(dev_priv, AUX_CH_A); 708 + return XELPDP_DP_AUX_CH_CTL(i915, AUX_CH_A); 731 709 } 732 710 } 733 711 734 712 static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index) 735 713 { 736 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 714 + struct drm_i915_private *i915 = dp_to_i915(intel_dp); 737 715 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 738 716 enum aux_ch aux_ch = dig_port->aux_ch; 739 717 ··· 744 722 case AUX_CH_USBC2: 745 723 case AUX_CH_USBC3: 746 724 case AUX_CH_USBC4: 747 - return XELPDP_DP_AUX_CH_DATA(dev_priv, aux_ch, index); 725 + return XELPDP_DP_AUX_CH_DATA(i915, aux_ch, index); 748 726 default: 749 727 MISSING_CASE(aux_ch); 750 - return XELPDP_DP_AUX_CH_DATA(dev_priv, AUX_CH_A, index); 728 + return XELPDP_DP_AUX_CH_DATA(i915, AUX_CH_A, index); 751 729 } 752 730 } 753 731 ··· 761 739 762 740 void intel_dp_aux_init(struct intel_dp *intel_dp) 763 741 { 764 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 742 + struct drm_i915_private *i915 = dp_to_i915(intel_dp); 765 743 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 766 744 struct intel_encoder *encoder = &dig_port->base; 767 745 enum aux_ch aux_ch = dig_port->aux_ch; 768 746 char buf[AUX_CH_NAME_BUFSIZE]; 769 747 770 - if (DISPLAY_VER(dev_priv) >= 14) { 748 + if (DISPLAY_VER(i915) >= 14) { 771 749 intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg; 772 750 intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg; 773 - } else if (DISPLAY_VER(dev_priv) >= 12) { 751 + } else if (DISPLAY_VER(i915) >= 12) { 774 752 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg; 775 753 intel_dp->aux_ch_data_reg = tgl_aux_data_reg; 776 - } else if (DISPLAY_VER(dev_priv) >= 9) { 754 + } else if (DISPLAY_VER(i915) >= 9) { 777 755 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; 778 756 intel_dp->aux_ch_data_reg = skl_aux_data_reg; 779 - } else if (HAS_PCH_SPLIT(dev_priv)) { 757 + } else if (HAS_PCH_SPLIT(i915)) { 780 758 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg; 781 759 intel_dp->aux_ch_data_reg = ilk_aux_data_reg; 760 + } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 761 + intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg; 762 + intel_dp->aux_ch_data_reg = vlv_aux_data_reg; 782 763 } else { 783 764 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg; 784 765 intel_dp->aux_ch_data_reg = g4x_aux_data_reg; 785 766 } 786 767 787 - if (DISPLAY_VER(dev_priv) >= 9) 768 + if (DISPLAY_VER(i915) >= 9) 788 769 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; 789 - else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) 770 + else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) 790 771 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 791 - else if (HAS_PCH_SPLIT(dev_priv)) 772 + else if (HAS_PCH_SPLIT(i915)) 792 773 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 793 774 else 794 775 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; 795 776 796 - if (DISPLAY_VER(dev_priv) >= 9) 777 + if (DISPLAY_VER(i915) >= 9) 797 778 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; 798 779 else 799 780 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; 800 781 801 - intel_dp->aux.drm_dev = &dev_priv->drm; 782 + intel_dp->aux.drm_dev = &i915->drm; 802 783 drm_dp_aux_init(&intel_dp->aux); 803 784 804 785 /* Failure to allocate our preferred name is not critical */ 805 786 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s", 806 - aux_ch_name(dev_priv, buf, sizeof(buf), aux_ch), 787 + aux_ch_name(i915, buf, sizeof(buf), aux_ch), 807 788 encoder->base.name); 808 789 809 790 intel_dp->aux.transfer = intel_dp_aux_transfer;
+2 -2
drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
··· 146 146 * HDR static metadata we need to start maintaining table of 147 147 * ranges for such panels. 148 148 */ 149 - if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL && 149 + if (i915->display.params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL && 150 150 !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type & 151 151 BIT(HDMI_STATIC_METADATA_TYPE1))) { 152 152 drm_info(&i915->drm, ··· 489 489 /* Check the VBT and user's module parameters to figure out which 490 490 * interfaces to probe 491 491 */ 492 - switch (i915->params.enable_dpcd_backlight) { 492 + switch (i915->display.params.enable_dpcd_backlight) { 493 493 case INTEL_DP_AUX_BACKLIGHT_OFF: 494 494 return -ENODEV; 495 495 case INTEL_DP_AUX_BACKLIGHT_AUTO:
+8 -6
drivers/gpu/drm/i915/display/intel_dp_aux_regs.h
··· 21 21 #define __xe2lpd_aux_ch_idx(aux_ch) \ 22 22 (aux_ch >= AUX_CH_USBC1 ? aux_ch : AUX_CH_USBC4 + 1 + (aux_ch) - AUX_CH_A) 23 23 24 - /* TODO: Remove implicit dev_priv */ 25 - #define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010) 26 - #define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110) 24 + #define _DPA_AUX_CH_CTL 0x64010 25 + #define _DPB_AUX_CH_CTL 0x64110 27 26 #define _XELPDP_USBC1_AUX_CH_CTL 0x16f210 28 27 #define _XELPDP_USBC2_AUX_CH_CTL 0x16f410 29 28 #define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, \ 30 29 _DPB_AUX_CH_CTL) 30 + #define VLV_DP_AUX_CH_CTL(aux_ch) _MMIO(VLV_DISPLAY_BASE + \ 31 + _PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)) 31 32 #define _XELPDP_DP_AUX_CH_CTL(aux_ch) \ 32 33 _MMIO(_PICK_EVEN_2RANGES(aux_ch, AUX_CH_USBC1, \ 33 34 _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL, \ ··· 70 69 #define DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK REG_GENMASK(4, 0) /* skl+ */ 71 70 #define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) REG_FIELD_PREP(DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK, (c) - 1) 72 71 73 - /* TODO: Remove implicit dev_priv */ 74 - #define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014) 75 - #define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114) 72 + #define _DPA_AUX_CH_DATA1 0x64014 73 + #define _DPB_AUX_CH_DATA1 0x64114 76 74 #define _XELPDP_USBC1_AUX_CH_DATA1 0x16f214 77 75 #define _XELPDP_USBC2_AUX_CH_DATA1 0x16f414 78 76 #define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, \ 79 77 _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ 78 + #define VLV_DP_AUX_CH_DATA(aux_ch, i) _MMIO(VLV_DISPLAY_BASE + _PORT(aux_ch, _DPA_AUX_CH_DATA1, \ 79 + _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ 80 80 #define _XELPDP_DP_AUX_CH_DATA(aux_ch, i) \ 81 81 _MMIO(_PICK_EVEN_2RANGES(aux_ch, AUX_CH_USBC1, \ 82 82 _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1, \
+558 -106
drivers/gpu/drm/i915/display/intel_dp_mst.c
··· 26 26 #include <drm/drm_atomic.h> 27 27 #include <drm/drm_atomic_helper.h> 28 28 #include <drm/drm_edid.h> 29 + #include <drm/drm_fixed.h> 29 30 #include <drm/drm_probe_helper.h> 30 31 31 32 #include "i915_drv.h" ··· 44 43 #include "intel_dpio_phy.h" 45 44 #include "intel_hdcp.h" 46 45 #include "intel_hotplug.h" 46 + #include "intel_link_bw.h" 47 + #include "intel_psr.h" 48 + #include "intel_vdsc.h" 47 49 #include "skl_scaler.h" 48 50 49 51 static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp, ··· 68 64 } 69 65 70 66 return 0; 67 + } 68 + 69 + static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state, 70 + const struct intel_connector *connector, 71 + bool ssc, bool dsc, int bpp_x16) 72 + { 73 + const struct drm_display_mode *adjusted_mode = 74 + &crtc_state->hw.adjusted_mode; 75 + unsigned long flags = DRM_DP_BW_OVERHEAD_MST; 76 + int dsc_slice_count = 0; 77 + int overhead; 78 + 79 + flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0; 80 + flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0; 81 + flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0; 82 + 83 + if (dsc) { 84 + flags |= DRM_DP_BW_OVERHEAD_DSC; 85 + /* TODO: add support for bigjoiner */ 86 + dsc_slice_count = intel_dp_dsc_get_slice_count(connector, 87 + adjusted_mode->clock, 88 + adjusted_mode->hdisplay, 89 + false); 90 + } 91 + 92 + overhead = drm_dp_bw_overhead(crtc_state->lane_count, 93 + adjusted_mode->hdisplay, 94 + dsc_slice_count, 95 + bpp_x16, 96 + flags); 97 + 98 + /* 99 + * TODO: clarify whether a minimum required by the fixed FEC overhead 100 + * in the bspec audio programming sequence is required here. 101 + */ 102 + return max(overhead, intel_dp_bw_fec_overhead(crtc_state->fec_enable)); 103 + } 104 + 105 + static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state, 106 + const struct intel_connector *connector, 107 + int overhead, 108 + int bpp_x16, 109 + struct intel_link_m_n *m_n) 110 + { 111 + const struct drm_display_mode *adjusted_mode = 112 + &crtc_state->hw.adjusted_mode; 113 + 114 + /* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */ 115 + intel_link_compute_m_n(bpp_x16, crtc_state->lane_count, 116 + adjusted_mode->crtc_clock, 117 + crtc_state->port_clock, 118 + overhead, 119 + m_n); 120 + 121 + m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n); 122 + } 123 + 124 + static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead) 125 + { 126 + int effective_data_rate = 127 + intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead); 128 + 129 + /* 130 + * TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted 131 + * to calculate PBN with the BW overhead passed to it. 132 + */ 133 + return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000); 71 134 } 72 135 73 136 static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, ··· 165 94 crtc_state->lane_count = limits->max_lane_count; 166 95 crtc_state->port_clock = limits->max_rate; 167 96 97 + if (dsc) { 98 + if (!intel_dp_supports_fec(intel_dp, connector, crtc_state)) 99 + return -EINVAL; 100 + 101 + crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state); 102 + } 103 + 168 104 mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr, 169 105 crtc_state->port_clock, 170 106 crtc_state->lane_count); 171 107 108 + drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n", 109 + min_bpp, max_bpp); 110 + 172 111 for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) { 112 + int local_bw_overhead; 113 + int remote_bw_overhead; 114 + int link_bpp_x16; 115 + int remote_tu; 116 + 173 117 drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp); 174 118 175 119 ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc); 176 120 if (ret) 177 121 continue; 178 122 179 - crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, 180 - dsc ? bpp << 4 : bpp, 181 - dsc); 123 + link_bpp_x16 = to_bpp_x16(dsc ? bpp : 124 + intel_dp_output_bpp(crtc_state->output_format, bpp)); 125 + 126 + local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector, 127 + false, dsc, link_bpp_x16); 128 + remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector, 129 + true, dsc, link_bpp_x16); 130 + 131 + intel_dp_mst_compute_m_n(crtc_state, connector, 132 + local_bw_overhead, 133 + link_bpp_x16, 134 + &crtc_state->dp_m_n); 135 + 136 + /* 137 + * The TU size programmed to the HW determines which slots in 138 + * an MTP frame are used for this stream, which needs to match 139 + * the payload size programmed to the first downstream branch 140 + * device's payload table. 141 + * 142 + * Note that atm the payload's PBN value DRM core sends via 143 + * the ALLOCATE_PAYLOAD side-band message matches the payload 144 + * size (which it calculates from the PBN value) it programs 145 + * to the first branch device's payload table. The allocation 146 + * in the payload table could be reduced though (to 147 + * crtc_state->dp_m_n.tu), provided that the driver doesn't 148 + * enable SSC on the corresponding link. 149 + */ 150 + crtc_state->pbn = intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock, 151 + link_bpp_x16, 152 + remote_bw_overhead); 153 + 154 + remote_tu = DIV_ROUND_UP(dfixed_const(crtc_state->pbn), mst_state->pbn_div.full); 155 + 156 + drm_WARN_ON(&i915->drm, remote_tu < crtc_state->dp_m_n.tu); 157 + crtc_state->dp_m_n.tu = remote_tu; 182 158 183 159 slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr, 184 160 connector->port, ··· 234 116 return slots; 235 117 236 118 if (slots >= 0) { 237 - ret = drm_dp_mst_atomic_check(state); 238 - /* 239 - * If we got slots >= 0 and we can fit those based on check 240 - * then we can exit the loop. Otherwise keep trying. 241 - */ 242 - if (!ret) 243 - break; 119 + drm_WARN_ON(&i915->drm, slots != crtc_state->dp_m_n.tu); 120 + 121 + break; 244 122 } 245 123 } 246 124 ··· 251 137 if (!dsc) 252 138 crtc_state->pipe_bpp = bpp; 253 139 else 254 - crtc_state->dsc.compressed_bpp = bpp; 140 + crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(bpp); 255 141 drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc); 256 142 } 257 143 ··· 263 149 struct drm_connector_state *conn_state, 264 150 struct link_config_limits *limits) 265 151 { 266 - const struct drm_display_mode *adjusted_mode = 267 - &crtc_state->hw.adjusted_mode; 268 152 int slots = -EINVAL; 269 - int link_bpp; 270 153 271 154 /* 272 155 * FIXME: allocate the BW according to link_bpp, which in the case of ··· 278 167 if (slots < 0) 279 168 return slots; 280 169 281 - link_bpp = intel_dp_output_bpp(crtc_state->output_format, crtc_state->pipe_bpp); 282 - 283 - intel_link_compute_m_n(link_bpp, 284 - crtc_state->lane_count, 285 - adjusted_mode->crtc_clock, 286 - crtc_state->port_clock, 287 - &crtc_state->dp_m_n, 288 - crtc_state->fec_enable); 289 - crtc_state->dp_m_n.tu = slots; 290 - 291 170 return 0; 292 171 } 293 172 ··· 289 188 struct intel_connector *connector = 290 189 to_intel_connector(conn_state->connector); 291 190 struct drm_i915_private *i915 = to_i915(connector->base.dev); 292 - const struct drm_display_mode *adjusted_mode = 293 - &crtc_state->hw.adjusted_mode; 294 191 int slots = -EINVAL; 295 192 int i, num_bpc; 296 193 u8 dsc_bpc[3] = {}; 297 194 int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp; 298 195 u8 dsc_max_bpc; 299 - bool need_timeslot_recalc = false; 300 - u32 last_compressed_bpp; 196 + int min_compressed_bpp, max_compressed_bpp; 301 197 302 198 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 303 199 if (DISPLAY_VER(i915) >= 12) ··· 330 232 if (max_bpp > sink_max_bpp) 331 233 max_bpp = sink_max_bpp; 332 234 333 - min_bpp = max(min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16)); 334 - max_bpp = min(max_bpp, to_bpp_int(limits->link.max_bpp_x16)); 235 + max_compressed_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector, 236 + crtc_state, 237 + max_bpp / 3); 238 + max_compressed_bpp = min(max_compressed_bpp, 239 + to_bpp_int(limits->link.max_bpp_x16)); 335 240 336 - slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_bpp, 337 - min_bpp, limits, 338 - conn_state, 2 * 3, true); 241 + min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state); 242 + min_compressed_bpp = max(min_compressed_bpp, 243 + to_bpp_int_roundup(limits->link.min_bpp_x16)); 244 + 245 + drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n", 246 + min_compressed_bpp, max_compressed_bpp); 247 + 248 + /* Align compressed bpps according to our own constraints */ 249 + max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, max_compressed_bpp, 250 + crtc_state->pipe_bpp); 251 + min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, min_compressed_bpp, 252 + crtc_state->pipe_bpp); 253 + 254 + slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_compressed_bpp, 255 + min_compressed_bpp, limits, 256 + conn_state, 1, true); 339 257 340 258 if (slots < 0) 341 259 return slots; 342 - 343 - last_compressed_bpp = crtc_state->dsc.compressed_bpp; 344 - 345 - crtc_state->dsc.compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, 346 - last_compressed_bpp, 347 - crtc_state->pipe_bpp); 348 - 349 - if (crtc_state->dsc.compressed_bpp != last_compressed_bpp) 350 - need_timeslot_recalc = true; 351 - 352 - /* 353 - * Apparently some MST hubs dislike if vcpi slots are not matching precisely 354 - * the actual compressed bpp we use. 355 - */ 356 - if (need_timeslot_recalc) { 357 - slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, 358 - crtc_state->dsc.compressed_bpp, 359 - crtc_state->dsc.compressed_bpp, 360 - limits, conn_state, 2 * 3, true); 361 - if (slots < 0) 362 - return slots; 363 - } 364 - 365 - intel_link_compute_m_n(crtc_state->dsc.compressed_bpp, 366 - crtc_state->lane_count, 367 - adjusted_mode->crtc_clock, 368 - crtc_state->port_clock, 369 - &crtc_state->dp_m_n, 370 - crtc_state->fec_enable); 371 - crtc_state->dp_m_n.tu = slots; 372 260 373 261 return 0; 374 262 } ··· 382 298 } 383 299 384 300 static bool 301 + intel_dp_mst_dsc_source_support(const struct intel_crtc_state *crtc_state) 302 + { 303 + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 304 + 305 + /* 306 + * FIXME: Enabling DSC on ICL results in blank screen and FIFO pipe / 307 + * transcoder underruns, re-enable DSC after fixing this issue. 308 + */ 309 + return DISPLAY_VER(i915) >= 12 && intel_dsc_source_support(crtc_state); 310 + } 311 + 312 + static int mode_hblank_period_ns(const struct drm_display_mode *mode) 313 + { 314 + return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(mode->htotal - mode->hdisplay, 315 + NSEC_PER_SEC / 1000), 316 + mode->crtc_clock); 317 + } 318 + 319 + static bool 320 + hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector, 321 + const struct intel_crtc_state *crtc_state) 322 + { 323 + const struct drm_display_mode *adjusted_mode = 324 + &crtc_state->hw.adjusted_mode; 325 + 326 + if (!connector->dp.dsc_hblank_expansion_quirk) 327 + return false; 328 + 329 + if (mode_hblank_period_ns(adjusted_mode) > 300) 330 + return false; 331 + 332 + return true; 333 + } 334 + 335 + static bool 336 + adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *connector, 337 + const struct intel_crtc_state *crtc_state, 338 + struct link_config_limits *limits, 339 + bool dsc) 340 + { 341 + struct drm_i915_private *i915 = to_i915(connector->base.dev); 342 + const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 343 + int min_bpp_x16 = limits->link.min_bpp_x16; 344 + 345 + if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state)) 346 + return true; 347 + 348 + if (!dsc) { 349 + if (intel_dp_mst_dsc_source_support(crtc_state)) { 350 + drm_dbg_kms(&i915->drm, 351 + "[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n", 352 + crtc->base.base.id, crtc->base.name, 353 + connector->base.base.id, connector->base.name); 354 + return false; 355 + } 356 + 357 + drm_dbg_kms(&i915->drm, 358 + "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n", 359 + crtc->base.base.id, crtc->base.name, 360 + connector->base.base.id, connector->base.name); 361 + 362 + if (limits->link.max_bpp_x16 < to_bpp_x16(24)) 363 + return false; 364 + 365 + limits->link.min_bpp_x16 = to_bpp_x16(24); 366 + 367 + return true; 368 + } 369 + 370 + drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate); 371 + 372 + if (limits->max_rate < 540000) 373 + min_bpp_x16 = to_bpp_x16(13); 374 + else if (limits->max_rate < 810000) 375 + min_bpp_x16 = to_bpp_x16(10); 376 + 377 + if (limits->link.min_bpp_x16 >= min_bpp_x16) 378 + return true; 379 + 380 + drm_dbg_kms(&i915->drm, 381 + "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " BPP_X16_FMT " in DSC mode due to hblank expansion quirk\n", 382 + crtc->base.base.id, crtc->base.name, 383 + connector->base.base.id, connector->base.name, 384 + BPP_X16_ARGS(min_bpp_x16)); 385 + 386 + if (limits->link.max_bpp_x16 < min_bpp_x16) 387 + return false; 388 + 389 + limits->link.min_bpp_x16 = min_bpp_x16; 390 + 391 + return true; 392 + } 393 + 394 + static bool 385 395 intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp, 396 + const struct intel_connector *connector, 386 397 struct intel_crtc_state *crtc_state, 387 398 bool dsc, 388 399 struct link_config_limits *limits) ··· 505 326 506 327 intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits); 507 328 508 - return intel_dp_compute_config_link_bpp_limits(intel_dp, 509 - crtc_state, 510 - dsc, 511 - limits); 329 + if (!intel_dp_compute_config_link_bpp_limits(intel_dp, 330 + crtc_state, 331 + dsc, 332 + limits)) 333 + return false; 334 + 335 + return adjust_limits_for_dsc_hblank_expansion_quirk(connector, 336 + crtc_state, 337 + limits, 338 + dsc); 512 339 } 513 340 514 341 static int intel_dp_mst_compute_config(struct intel_encoder *encoder, ··· 524 339 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 525 340 struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 526 341 struct intel_dp *intel_dp = &intel_mst->primary->dp; 342 + const struct intel_connector *connector = 343 + to_intel_connector(conn_state->connector); 527 344 const struct drm_display_mode *adjusted_mode = 528 345 &pipe_config->hw.adjusted_mode; 529 346 struct link_config_limits limits; 530 347 bool dsc_needed; 531 348 int ret = 0; 349 + 350 + if (pipe_config->fec_enable && 351 + !intel_dp_supports_fec(intel_dp, connector, pipe_config)) 352 + return -EINVAL; 532 353 533 354 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 534 355 return -EINVAL; ··· 545 354 546 355 dsc_needed = intel_dp->force_dsc_en || 547 356 !intel_dp_mst_compute_config_limits(intel_dp, 357 + connector, 548 358 pipe_config, 549 359 false, 550 360 &limits); ··· 567 375 str_yes_no(ret), 568 376 str_yes_no(intel_dp->force_dsc_en)); 569 377 378 + if (!intel_dp_mst_dsc_source_support(pipe_config)) 379 + return -EINVAL; 380 + 570 381 if (!intel_dp_mst_compute_config_limits(intel_dp, 382 + connector, 571 383 pipe_config, 572 384 true, 573 385 &limits)) ··· 616 420 617 421 intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); 618 422 423 + intel_psr_compute_config(intel_dp, pipe_config, conn_state); 424 + 619 425 return 0; 620 426 } 621 427 ··· 657 459 return transcoders; 658 460 } 659 461 462 + static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state, 463 + struct drm_dp_mst_topology_mgr *mst_mgr, 464 + struct drm_dp_mst_port *parent_port) 465 + { 466 + const struct intel_digital_connector_state *conn_state; 467 + struct intel_connector *connector; 468 + u8 mask = 0; 469 + int i; 470 + 471 + for_each_new_intel_connector_in_state(state, connector, conn_state, i) { 472 + if (!conn_state->base.crtc) 473 + continue; 474 + 475 + if (&connector->mst_port->mst_mgr != mst_mgr) 476 + continue; 477 + 478 + if (connector->port != parent_port && 479 + !drm_dp_mst_port_downstream_of_parent(mst_mgr, 480 + connector->port, 481 + parent_port)) 482 + continue; 483 + 484 + mask |= BIT(to_intel_crtc(conn_state->base.crtc)->pipe); 485 + } 486 + 487 + return mask; 488 + } 489 + 490 + static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state, 491 + struct drm_dp_mst_topology_mgr *mst_mgr, 492 + struct intel_link_bw_limits *limits) 493 + { 494 + struct drm_i915_private *i915 = to_i915(state->base.dev); 495 + struct intel_crtc *crtc; 496 + u8 mst_pipe_mask; 497 + u8 fec_pipe_mask = 0; 498 + int ret; 499 + 500 + mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL); 501 + 502 + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mst_pipe_mask) { 503 + struct intel_crtc_state *crtc_state = 504 + intel_atomic_get_new_crtc_state(state, crtc); 505 + 506 + /* Atomic connector check should've added all the MST CRTCs. */ 507 + if (drm_WARN_ON(&i915->drm, !crtc_state)) 508 + return -EINVAL; 509 + 510 + if (crtc_state->fec_enable) 511 + fec_pipe_mask |= BIT(crtc->pipe); 512 + } 513 + 514 + if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask) 515 + return 0; 516 + 517 + limits->force_fec_pipes |= mst_pipe_mask; 518 + 519 + ret = intel_modeset_pipes_in_mask_early(state, "MST FEC", 520 + mst_pipe_mask); 521 + 522 + return ret ? : -EAGAIN; 523 + } 524 + 525 + static int intel_dp_mst_check_bw(struct intel_atomic_state *state, 526 + struct drm_dp_mst_topology_mgr *mst_mgr, 527 + struct drm_dp_mst_topology_state *mst_state, 528 + struct intel_link_bw_limits *limits) 529 + { 530 + struct drm_dp_mst_port *mst_port; 531 + u8 mst_port_pipes; 532 + int ret; 533 + 534 + ret = drm_dp_mst_atomic_check_mgr(&state->base, mst_mgr, mst_state, &mst_port); 535 + if (ret != -ENOSPC) 536 + return ret; 537 + 538 + mst_port_pipes = get_pipes_downstream_of_mst_port(state, mst_mgr, mst_port); 539 + 540 + ret = intel_link_bw_reduce_bpp(state, limits, 541 + mst_port_pipes, "MST link BW"); 542 + 543 + return ret ? : -EAGAIN; 544 + } 545 + 546 + /** 547 + * intel_dp_mst_atomic_check_link - check all modeset MST link configuration 548 + * @state: intel atomic state 549 + * @limits: link BW limits 550 + * 551 + * Check the link configuration for all modeset MST outputs. If the 552 + * configuration is invalid @limits will be updated if possible to 553 + * reduce the total BW, after which the configuration for all CRTCs in 554 + * @state must be recomputed with the updated @limits. 555 + * 556 + * Returns: 557 + * - 0 if the confugration is valid 558 + * - %-EAGAIN, if the configuration is invalid and @limits got updated 559 + * with fallback values with which the configuration of all CRTCs in 560 + * @state must be recomputed 561 + * - Other negative error, if the configuration is invalid without a 562 + * fallback possibility, or the check failed for another reason 563 + */ 564 + int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state, 565 + struct intel_link_bw_limits *limits) 566 + { 567 + struct drm_dp_mst_topology_mgr *mgr; 568 + struct drm_dp_mst_topology_state *mst_state; 569 + int ret; 570 + int i; 571 + 572 + for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) { 573 + ret = intel_dp_mst_check_fec_change(state, mgr, limits); 574 + if (ret) 575 + return ret; 576 + 577 + ret = intel_dp_mst_check_bw(state, mgr, mst_state, 578 + limits); 579 + if (ret) 580 + return ret; 581 + } 582 + 583 + return 0; 584 + } 585 + 660 586 static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder, 661 587 struct intel_crtc_state *crtc_state, 662 588 struct drm_connector_state *conn_state) ··· 801 479 * that shares the same MST stream as mode changed, 802 480 * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do 803 481 * a fastset when possible. 482 + * 483 + * On TGL+ this is required since each stream go through a master transcoder, 484 + * so if the master transcoder needs modeset, all other streams in the 485 + * topology need a modeset. All platforms need to add the atomic state 486 + * for all streams in the topology, since a modeset on one may require 487 + * changing the MST link BW usage of the others, which in turn needs a 488 + * recomputation of the corresponding CRTC states. 804 489 */ 805 490 static int 806 - intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, 807 - struct intel_atomic_state *state) 491 + intel_dp_mst_atomic_topology_check(struct intel_connector *connector, 492 + struct intel_atomic_state *state) 808 493 { 809 494 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 810 495 struct drm_connector_list_iter connector_list_iter; 811 496 struct intel_connector *connector_iter; 812 497 int ret = 0; 813 - 814 - if (DISPLAY_VER(dev_priv) < 12) 815 - return 0; 816 498 817 499 if (!intel_connector_needs_modeset(state, &connector->base)) 818 500 return 0; ··· 871 545 if (ret) 872 546 return ret; 873 547 874 - ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state); 548 + ret = intel_dp_mst_atomic_topology_check(intel_connector, state); 875 549 if (ret) 876 550 return ret; 877 551 ··· 913 587 struct intel_dp *intel_dp = &dig_port->dp; 914 588 struct intel_connector *connector = 915 589 to_intel_connector(old_conn_state->connector); 916 - struct drm_dp_mst_topology_state *new_mst_state = 917 - drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); 918 - struct drm_dp_mst_atomic_payload *new_payload = 919 - drm_atomic_get_mst_payload_state(new_mst_state, connector->port); 920 590 struct drm_i915_private *i915 = to_i915(connector->base.dev); 921 591 922 592 drm_dbg_kms(&i915->drm, "active links %d\n", ··· 920 598 921 599 intel_hdcp_disable(intel_mst->connector); 922 600 923 - drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload); 924 - 925 - intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); 601 + intel_dp_sink_disable_decompression(state, connector, old_crtc_state); 926 602 } 927 603 928 604 static void intel_mst_post_disable_dp(struct intel_atomic_state *state, ··· 954 634 955 635 intel_disable_transcoder(old_crtc_state); 956 636 637 + drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload); 638 + 957 639 clear_act_sent(encoder, old_crtc_state); 958 640 959 641 intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), ··· 967 645 old_payload, new_payload); 968 646 969 647 intel_ddi_disable_transcoder_func(old_crtc_state); 648 + 649 + intel_dsc_disable(old_crtc_state); 970 650 971 651 if (DISPLAY_VER(dev_priv) >= 9) 972 652 skl_scaler_disable(old_crtc_state); ··· 986 662 * BSpec 4287: disable DIP after the transcoder is disabled and before 987 663 * the transcoder clock select is set to none. 988 664 */ 989 - if (last_mst_stream) 990 - intel_dp_set_infoframes(&dig_port->base, false, 991 - old_crtc_state, NULL); 665 + intel_dp_set_infoframes(&dig_port->base, false, 666 + old_crtc_state, NULL); 992 667 /* 993 668 * From TGL spec: "If multi-stream slave transcoder: Configure 994 669 * Transcoder Clock Select to direct no clock to the transcoder" ··· 1077 754 1078 755 drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true); 1079 756 757 + intel_dp_sink_enable_decompression(state, connector, pipe_config); 758 + 1080 759 if (first_mst_stream) 1081 760 dig_port->base.pre_enable(state, &dig_port->base, 1082 761 pipe_config, NULL); ··· 1101 776 if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream) 1102 777 intel_ddi_enable_transcoder_clock(encoder, pipe_config); 1103 778 779 + intel_dsc_dp_pps_write(&dig_port->base, pipe_config); 1104 780 intel_ddi_set_dp_msa(pipe_config, conn_state); 1105 781 } 1106 782 ··· 1118 792 struct drm_dp_mst_topology_state *mst_state = 1119 793 drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); 1120 794 enum transcoder trans = pipe_config->cpu_transcoder; 795 + bool first_mst_stream = intel_dp->active_mst_links == 1; 1121 796 1122 797 drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder); 1123 - 1124 - clear_act_sent(encoder, pipe_config); 1125 798 1126 799 if (intel_dp_is_uhbr(pipe_config)) { 1127 800 const struct drm_display_mode *adjusted_mode = ··· 1135 810 1136 811 intel_ddi_enable_transcoder_func(encoder, pipe_config); 1137 812 813 + clear_act_sent(encoder, pipe_config); 814 + 1138 815 intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(trans), 0, 1139 816 TRANS_DDI_DP_VC_PAYLOAD_ALLOC); 1140 817 ··· 1145 818 1146 819 wait_for_act_sent(encoder, pipe_config); 1147 820 821 + if (first_mst_stream) 822 + intel_ddi_wait_for_fec_status(encoder, pipe_config, true); 823 + 1148 824 drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base, 1149 825 drm_atomic_get_mst_payload_state(mst_state, connector->port)); 1150 826 1151 - if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable) 1152 - intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0, 1153 - FECSTALL_DIS_DPTSTREAM_DPTTG); 1154 - else if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable) 1155 - intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0, 1156 - FECSTALL_DIS_DPTSTREAM_DPTTG); 827 + if (DISPLAY_VER(dev_priv) >= 12) 828 + intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans), 829 + FECSTALL_DIS_DPTSTREAM_DPTTG, 830 + pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0); 1157 831 1158 832 intel_audio_sdp_split_update(pipe_config); 1159 833 ··· 1162 834 1163 835 intel_crtc_vblank_on(pipe_config); 1164 836 1165 - intel_audio_codec_enable(encoder, pipe_config, conn_state); 1166 - 1167 - /* Enable hdcp if it's desired */ 1168 - if (conn_state->content_protection == 1169 - DRM_MODE_CONTENT_PROTECTION_DESIRED) 1170 - intel_hdcp_enable(state, encoder, pipe_config, conn_state); 837 + intel_hdcp_enable(state, encoder, pipe_config, conn_state); 1171 838 } 1172 839 1173 840 static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder, ··· 1297 974 if (ret) 1298 975 return ret; 1299 976 977 + /* 978 + * TODO: 979 + * - Also check if compression would allow for the mode 980 + * - Calculate the overhead using drm_dp_bw_overhead() / 981 + * drm_dp_bw_channel_coding_efficiency(), similarly to the 982 + * compute config code, as drm_dp_calc_pbn_mode() doesn't 983 + * account with all the overheads. 984 + * - Check here and during compute config the BW reported by 985 + * DFP_Link_Available_Payload_Bandwidth_Number (or the 986 + * corresponding link capabilities of the sink) in case the 987 + * stream is uncompressed for it by the last branch device. 988 + */ 1300 989 if (mode_rate > max_rate || mode->clock > max_dotclk || 1301 - drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) { 990 + drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) { 1302 991 *status = MODE_CLOCK_HIGH; 1303 992 return 0; 1304 993 } ··· 1474 1139 intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector); 1475 1140 } 1476 1141 1142 + static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector) 1143 + { 1144 + struct drm_i915_private *i915 = to_i915(connector->base.dev); 1145 + struct drm_dp_desc desc; 1146 + u8 dpcd[DP_RECEIVER_CAP_SIZE]; 1147 + 1148 + if (!connector->dp.dsc_decompression_aux) 1149 + return false; 1150 + 1151 + if (drm_dp_read_desc(connector->dp.dsc_decompression_aux, 1152 + &desc, true) < 0) 1153 + return false; 1154 + 1155 + if (!drm_dp_has_quirk(&desc, 1156 + DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC)) 1157 + return false; 1158 + 1159 + if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd) < 0) 1160 + return false; 1161 + 1162 + if (!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE)) 1163 + return false; 1164 + 1165 + drm_dbg_kms(&i915->drm, 1166 + "[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n", 1167 + connector->base.base.id, connector->base.name); 1168 + 1169 + return true; 1170 + } 1171 + 1477 1172 static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, 1478 1173 struct drm_dp_mst_port *port, 1479 1174 const char *pathprop) ··· 1526 1161 intel_connector->port = port; 1527 1162 drm_dp_mst_get_port_malloc(port); 1528 1163 1164 + intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port); 1165 + intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector); 1166 + intel_connector->dp.dsc_hblank_expansion_quirk = 1167 + detect_dsc_hblank_expansion_quirk(intel_connector); 1168 + 1529 1169 connector = &intel_connector->base; 1530 1170 ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, 1531 1171 DRM_MODE_CONNECTOR_DisplayPort); ··· 1541 1171 } 1542 1172 1543 1173 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); 1544 - 1545 - /* 1546 - * TODO: set the AUX for the actual MST port decompressing the stream. 1547 - * At the moment the driver only supports enabling this globally in the 1548 - * first downstream MST branch, via intel_dp's (root port) AUX. 1549 - */ 1550 - intel_connector->dp.dsc_decompression_aux = &intel_dp->aux; 1551 - intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector); 1552 1174 1553 1175 for_each_pipe(dev_priv, pipe) { 1554 1176 struct drm_encoder *enc = ··· 1622 1260 intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; 1623 1261 intel_encoder->pre_enable = intel_mst_pre_enable_dp; 1624 1262 intel_encoder->enable = intel_mst_enable_dp; 1263 + intel_encoder->audio_enable = intel_audio_codec_enable; 1264 + intel_encoder->audio_disable = intel_audio_codec_disable; 1625 1265 intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; 1626 1266 intel_encoder->get_config = intel_dp_mst_enc_get_config; 1627 1267 intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check; ··· 1770 1406 } 1771 1407 1772 1408 return 0; 1409 + } 1410 + 1411 + static struct intel_connector * 1412 + get_connector_in_state_for_crtc(struct intel_atomic_state *state, 1413 + const struct intel_crtc *crtc) 1414 + { 1415 + struct drm_connector_state *old_conn_state; 1416 + struct drm_connector_state *new_conn_state; 1417 + struct drm_connector *_connector; 1418 + int i; 1419 + 1420 + for_each_oldnew_connector_in_state(&state->base, _connector, 1421 + old_conn_state, new_conn_state, i) { 1422 + struct intel_connector *connector = 1423 + to_intel_connector(_connector); 1424 + 1425 + if (old_conn_state->crtc == &crtc->base || 1426 + new_conn_state->crtc == &crtc->base) 1427 + return connector; 1428 + } 1429 + 1430 + return NULL; 1431 + } 1432 + 1433 + /** 1434 + * intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC 1435 + * @state: atomic state 1436 + * @crtc: CRTC for which to check the modeset requirement 1437 + * 1438 + * Check if any change in a MST topology requires a forced modeset on @crtc in 1439 + * this topology. One such change is enabling/disabling the DSC decompression 1440 + * state in the first branch device's UFP DPCD as required by one CRTC, while 1441 + * the other @crtc in the same topology is still active, requiring a full modeset 1442 + * on @crtc. 1443 + */ 1444 + bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state, 1445 + struct intel_crtc *crtc) 1446 + { 1447 + const struct intel_connector *crtc_connector; 1448 + const struct drm_connector_state *conn_state; 1449 + const struct drm_connector *_connector; 1450 + int i; 1451 + 1452 + if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc), 1453 + INTEL_OUTPUT_DP_MST)) 1454 + return false; 1455 + 1456 + crtc_connector = get_connector_in_state_for_crtc(state, crtc); 1457 + 1458 + if (!crtc_connector) 1459 + /* None of the connectors in the topology needs modeset */ 1460 + return false; 1461 + 1462 + for_each_new_connector_in_state(&state->base, _connector, conn_state, i) { 1463 + const struct intel_connector *connector = 1464 + to_intel_connector(_connector); 1465 + const struct intel_crtc_state *new_crtc_state; 1466 + const struct intel_crtc_state *old_crtc_state; 1467 + struct intel_crtc *crtc_iter; 1468 + 1469 + if (connector->mst_port != crtc_connector->mst_port || 1470 + !conn_state->crtc) 1471 + continue; 1472 + 1473 + crtc_iter = to_intel_crtc(conn_state->crtc); 1474 + 1475 + new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc_iter); 1476 + old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc_iter); 1477 + 1478 + if (!intel_crtc_needs_modeset(new_crtc_state)) 1479 + continue; 1480 + 1481 + if (old_crtc_state->dsc.compression_enable == 1482 + new_crtc_state->dsc.compression_enable) 1483 + continue; 1484 + /* 1485 + * Toggling the decompression flag because of this stream in 1486 + * the first downstream branch device's UFP DPCD may reset the 1487 + * whole branch device. To avoid the reset while other streams 1488 + * are also active modeset the whole MST topology in this 1489 + * case. 1490 + */ 1491 + if (connector->dp.dsc_decompression_aux == 1492 + &connector->mst_port->aux) 1493 + return true; 1494 + } 1495 + 1496 + return false; 1773 1497 }
+5
drivers/gpu/drm/i915/display/intel_dp_mst.h
··· 13 13 struct intel_crtc_state; 14 14 struct intel_digital_port; 15 15 struct intel_dp; 16 + struct intel_link_bw_limits; 16 17 17 18 int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id); 18 19 void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port); ··· 23 22 bool intel_dp_mst_source_support(struct intel_dp *intel_dp); 24 23 int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state, 25 24 struct intel_crtc *crtc); 25 + int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state, 26 + struct intel_link_bw_limits *limits); 27 + bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state, 28 + struct intel_crtc *crtc); 26 29 27 30 #endif /* __INTEL_DP_MST_H__ */
+94 -77
drivers/gpu/drm/i915/display/intel_dpio_phy.c
··· 666 666 } 667 667 } 668 668 669 + enum dpio_phy vlv_pipe_to_phy(enum pipe pipe) 670 + { 671 + switch (pipe) { 672 + default: 673 + MISSING_CASE(pipe); 674 + fallthrough; 675 + case PIPE_A: 676 + case PIPE_B: 677 + return DPIO_PHY0; 678 + case PIPE_C: 679 + return DPIO_PHY1; 680 + } 681 + } 682 + 669 683 enum dpio_channel vlv_pipe_to_channel(enum pipe pipe) 670 684 { 671 685 switch (pipe) { ··· 703 689 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 704 690 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 705 691 enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); 706 - enum pipe pipe = crtc->pipe; 692 + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 707 693 u32 val; 708 694 int i; 709 695 710 696 vlv_dpio_get(dev_priv); 711 697 712 698 /* Clear calc init */ 713 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); 699 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch)); 714 700 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 715 701 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); 716 702 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; 717 - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); 703 + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val); 718 704 719 705 if (crtc_state->lane_count > 2) { 720 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); 706 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch)); 721 707 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 722 708 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); 723 709 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; 724 - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); 710 + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val); 725 711 } 726 712 727 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); 713 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW9(ch)); 728 714 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); 729 715 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; 730 - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); 716 + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW9(ch), val); 731 717 732 718 if (crtc_state->lane_count > 2) { 733 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); 719 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW9(ch)); 734 720 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); 735 721 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; 736 - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); 722 + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW9(ch), val); 737 723 } 738 724 739 725 /* Program swing deemph */ 740 726 for (i = 0; i < crtc_state->lane_count; i++) { 741 - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); 727 + val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i)); 742 728 val &= ~DPIO_SWING_DEEMPH9P5_MASK; 743 729 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; 744 - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); 730 + vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val); 745 731 } 746 732 747 733 /* Program swing margin */ 748 734 for (i = 0; i < crtc_state->lane_count; i++) { 749 - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); 735 + val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i)); 750 736 751 737 val &= ~DPIO_SWING_MARGIN000_MASK; 752 738 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; ··· 759 745 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); 760 746 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; 761 747 762 - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); 748 + vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val); 763 749 } 764 750 765 751 /* ··· 769 755 * 27 for ch0 and ch1. 770 756 */ 771 757 for (i = 0; i < crtc_state->lane_count; i++) { 772 - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); 758 + val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW3(ch, i)); 773 759 if (uniq_trans_scale) 774 760 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; 775 761 else 776 762 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; 777 - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); 763 + vlv_dpio_write(dev_priv, phy, CHV_TX_DW3(ch, i), val); 778 764 } 779 765 780 766 /* Start swing calculation */ 781 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); 767 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch)); 782 768 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; 783 - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); 769 + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val); 784 770 785 771 if (crtc_state->lane_count > 2) { 786 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); 772 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch)); 787 773 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; 788 - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); 774 + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val); 789 775 } 790 776 791 777 vlv_dpio_put(dev_priv); ··· 796 782 bool reset) 797 783 { 798 784 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 799 - enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 800 785 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 801 - enum pipe pipe = crtc->pipe; 786 + enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 787 + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 802 788 u32 val; 803 789 804 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); 790 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch)); 805 791 if (reset) 806 792 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); 807 793 else 808 794 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; 809 - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); 795 + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW0(ch), val); 810 796 811 797 if (crtc_state->lane_count > 2) { 812 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); 798 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW0(ch)); 813 799 if (reset) 814 800 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); 815 801 else 816 802 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; 817 - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); 803 + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW0(ch), val); 818 804 } 819 805 820 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); 806 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW1(ch)); 821 807 val |= CHV_PCS_REQ_SOFTRESET_EN; 822 808 if (reset) 823 809 val &= ~DPIO_PCS_CLK_SOFT_RESET; 824 810 else 825 811 val |= DPIO_PCS_CLK_SOFT_RESET; 826 - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); 812 + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW1(ch), val); 827 813 828 814 if (crtc_state->lane_count > 2) { 829 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); 815 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW1(ch)); 830 816 val |= CHV_PCS_REQ_SOFTRESET_EN; 831 817 if (reset) 832 818 val &= ~DPIO_PCS_CLK_SOFT_RESET; 833 819 else 834 820 val |= DPIO_PCS_CLK_SOFT_RESET; 835 - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); 821 + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW1(ch), val); 836 822 } 837 823 } 838 824 ··· 843 829 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 844 830 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 845 831 enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); 832 + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 846 833 enum pipe pipe = crtc->pipe; 847 834 unsigned int lane_mask = 848 835 intel_dp_unused_lane_mask(crtc_state->lane_count); ··· 866 851 867 852 /* program left/right clock distribution */ 868 853 if (pipe != PIPE_B) { 869 - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); 854 + val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0); 870 855 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); 871 856 if (ch == DPIO_CH0) 872 857 val |= CHV_BUFLEFTENA1_FORCE; 873 858 if (ch == DPIO_CH1) 874 859 val |= CHV_BUFRIGHTENA1_FORCE; 875 - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); 860 + vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val); 876 861 } else { 877 - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); 862 + val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1); 878 863 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); 879 864 if (ch == DPIO_CH0) 880 865 val |= CHV_BUFLEFTENA2_FORCE; 881 866 if (ch == DPIO_CH1) 882 867 val |= CHV_BUFRIGHTENA2_FORCE; 883 - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); 868 + vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val); 884 869 } 885 870 886 871 /* program clock channel usage */ 887 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); 872 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch)); 888 873 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; 889 874 if (pipe != PIPE_B) 890 875 val &= ~CHV_PCS_USEDCLKCHANNEL; 891 876 else 892 877 val |= CHV_PCS_USEDCLKCHANNEL; 893 - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); 878 + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val); 894 879 895 880 if (crtc_state->lane_count > 2) { 896 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); 881 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch)); 897 882 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; 898 883 if (pipe != PIPE_B) 899 884 val &= ~CHV_PCS_USEDCLKCHANNEL; 900 885 else 901 886 val |= CHV_PCS_USEDCLKCHANNEL; 902 - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); 887 + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val); 903 888 } 904 889 905 890 /* ··· 907 892 * matches the pipe, but here we need to 908 893 * pick the CL based on the port. 909 894 */ 910 - val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); 895 + val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch)); 911 896 if (pipe != PIPE_B) 912 897 val &= ~CHV_CMN_USEDCLKCHANNEL; 913 898 else 914 899 val |= CHV_CMN_USEDCLKCHANNEL; 915 - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); 900 + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val); 916 901 917 902 vlv_dpio_put(dev_priv); 918 903 } ··· 925 910 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 926 911 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 927 912 enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); 928 - enum pipe pipe = crtc->pipe; 913 + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 929 914 int data, i, stagger; 930 915 u32 val; 931 916 932 917 vlv_dpio_get(dev_priv); 933 918 934 919 /* allow hardware to manage TX FIFO reset source */ 935 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); 920 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch)); 936 921 val &= ~DPIO_LANEDESKEW_STRAP_OVRD; 937 - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); 922 + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val); 938 923 939 924 if (crtc_state->lane_count > 2) { 940 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); 925 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch)); 941 926 val &= ~DPIO_LANEDESKEW_STRAP_OVRD; 942 - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); 927 + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val); 943 928 } 944 929 945 930 /* Program Tx lane latency optimal setting*/ ··· 949 934 data = 0x0; 950 935 else 951 936 data = (i == 1) ? 0x0 : 0x1; 952 - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), 937 + vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i), 953 938 data << DPIO_UPAR_SHIFT); 954 939 } 955 940 ··· 965 950 else 966 951 stagger = 0x2; 967 952 968 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); 953 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch)); 969 954 val |= DPIO_TX2_STAGGER_MASK(0x1f); 970 - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); 955 + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val); 971 956 972 957 if (crtc_state->lane_count > 2) { 973 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); 958 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch)); 974 959 val |= DPIO_TX2_STAGGER_MASK(0x1f); 975 - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); 960 + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val); 976 961 } 977 962 978 - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), 963 + vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW12(ch), 979 964 DPIO_LANESTAGGER_STRAP(stagger) | 980 965 DPIO_LANESTAGGER_STRAP_OVRD | 981 966 DPIO_TX1_STAGGER_MASK(0x1f) | ··· 983 968 DPIO_TX2_STAGGER_MULT(0)); 984 969 985 970 if (crtc_state->lane_count > 2) { 986 - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), 971 + vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW12(ch), 987 972 DPIO_LANESTAGGER_STRAP(stagger) | 988 973 DPIO_LANESTAGGER_STRAP_OVRD | 989 974 DPIO_TX1_STAGGER_MASK(0x1f) | ··· 1013 998 { 1014 999 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1015 1000 enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe; 1001 + enum dpio_phy phy = vlv_pipe_to_phy(pipe); 1016 1002 u32 val; 1017 1003 1018 1004 vlv_dpio_get(dev_priv); 1019 1005 1020 1006 /* disable left/right clock distribution */ 1021 1007 if (pipe != PIPE_B) { 1022 - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); 1008 + val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0); 1023 1009 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); 1024 - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); 1010 + vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val); 1025 1011 } else { 1026 - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); 1012 + val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1); 1027 1013 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); 1028 - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); 1014 + vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val); 1029 1015 } 1030 1016 1031 1017 vlv_dpio_put(dev_priv); ··· 1052 1036 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 1053 1037 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1054 1038 enum dpio_channel port = vlv_dig_port_to_channel(dig_port); 1055 - enum pipe pipe = crtc->pipe; 1039 + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 1056 1040 1057 1041 vlv_dpio_get(dev_priv); 1058 1042 1059 - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); 1060 - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); 1061 - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 1043 + vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), 0x00000000); 1044 + vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(port), demph_reg_value); 1045 + vlv_dpio_write(dev_priv, phy, VLV_TX_DW2(port), 1062 1046 uniqtranscale_reg_value); 1063 - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); 1047 + vlv_dpio_write(dev_priv, phy, VLV_TX_DW3(port), 0x0C782040); 1064 1048 1065 1049 if (tx3_demph) 1066 - vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph); 1050 + vlv_dpio_write(dev_priv, phy, VLV_TX3_DW4(port), tx3_demph); 1067 1051 1068 - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); 1069 - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); 1070 - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); 1052 + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11(port), 0x00030000); 1053 + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9(port), preemph_reg_value); 1054 + vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); 1071 1055 1072 1056 vlv_dpio_put(dev_priv); 1073 1057 } ··· 1079 1063 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1080 1064 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1081 1065 enum dpio_channel port = vlv_dig_port_to_channel(dig_port); 1082 - enum pipe pipe = crtc->pipe; 1066 + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 1083 1067 1084 1068 /* Program Tx lane resets to default */ 1085 1069 vlv_dpio_get(dev_priv); 1086 1070 1087 - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 1071 + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port), 1088 1072 DPIO_PCS_TX_LANE2_RESET | 1089 1073 DPIO_PCS_TX_LANE1_RESET); 1090 - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 1074 + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port), 1091 1075 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 1092 1076 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 1093 1077 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 1094 1078 DPIO_PCS_CLK_SOFT_RESET); 1095 1079 1096 1080 /* Fix up inter-pair skew failure */ 1097 - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); 1098 - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); 1099 - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); 1081 + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12(port), 0x00750f00); 1082 + vlv_dpio_write(dev_priv, phy, VLV_TX_DW11(port), 0x00001500); 1083 + vlv_dpio_write(dev_priv, phy, VLV_TX_DW14(port), 0x40400000); 1100 1084 1101 1085 vlv_dpio_put(dev_priv); 1102 1086 } ··· 1110 1094 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1111 1095 enum dpio_channel port = vlv_dig_port_to_channel(dig_port); 1112 1096 enum pipe pipe = crtc->pipe; 1097 + enum dpio_phy phy = vlv_pipe_to_phy(pipe); 1113 1098 u32 val; 1114 1099 1115 1100 vlv_dpio_get(dev_priv); 1116 1101 1117 1102 /* Enable clock channels for this port */ 1118 - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); 1103 + val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(port)); 1119 1104 val = 0; 1120 1105 if (pipe) 1121 1106 val |= (1<<21); 1122 1107 else 1123 1108 val &= ~(1<<21); 1124 1109 val |= 0x001000c4; 1125 - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); 1110 + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8(port), val); 1126 1111 1127 1112 /* Program lane clock */ 1128 - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); 1129 - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); 1113 + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14(port), 0x00760018); 1114 + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23(port), 0x00400888); 1130 1115 1131 1116 vlv_dpio_put(dev_priv); 1132 1117 } ··· 1139 1122 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1140 1123 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1141 1124 enum dpio_channel port = vlv_dig_port_to_channel(dig_port); 1142 - enum pipe pipe = crtc->pipe; 1125 + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 1143 1126 1144 1127 vlv_dpio_get(dev_priv); 1145 - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000); 1146 - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060); 1128 + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port), 0x00000000); 1129 + vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port), 0x00e00060); 1147 1130 vlv_dpio_put(dev_priv); 1148 1131 }
+5
drivers/gpu/drm/i915/display/intel_dpio_phy.h
··· 44 44 45 45 enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port); 46 46 enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port); 47 + enum dpio_phy vlv_pipe_to_phy(enum pipe pipe); 47 48 enum dpio_channel vlv_pipe_to_channel(enum pipe pipe); 48 49 49 50 void chv_set_phy_signal_level(struct intel_encoder *encoder, ··· 114 113 return DPIO_CH0; 115 114 } 116 115 static inline enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port) 116 + { 117 + return DPIO_PHY0; 118 + } 119 + static inline enum dpio_phy vlv_pipe_to_phy(enum pipe pipe) 117 120 { 118 121 return DPIO_PHY0; 119 122 }
+220 -50
drivers/gpu/drm/i915/display/intel_dpll.c
··· 16 16 #include "intel_dpio_phy.h" 17 17 #include "intel_dpll.h" 18 18 #include "intel_lvds.h" 19 + #include "intel_lvds_regs.h" 19 20 #include "intel_panel.h" 20 21 #include "intel_pps.h" 21 22 #include "intel_snps_phy.h" ··· 312 311 * divided-down version of it. 313 312 */ 314 313 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 315 - int pnv_calc_dpll_params(int refclk, struct dpll *clock) 314 + static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 316 315 { 317 316 clock->m = clock->m2 + 2; 318 317 clock->p = clock->p1 * clock->p2; ··· 343 342 return clock->dot; 344 343 } 345 344 346 - int vlv_calc_dpll_params(int refclk, struct dpll *clock) 345 + static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 347 346 { 348 347 clock->m = clock->m1 * clock->m2; 349 348 clock->p = clock->p1 * clock->p2 * 5; ··· 367 366 DIV_ROUND_CLOSEST(clock->vco, clock->p); 368 367 369 368 return clock->dot; 369 + } 370 + 371 + static int i9xx_pll_refclk(struct drm_device *dev, 372 + const struct intel_crtc_state *pipe_config) 373 + { 374 + struct drm_i915_private *dev_priv = to_i915(dev); 375 + u32 dpll = pipe_config->dpll_hw_state.dpll; 376 + 377 + if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 378 + return dev_priv->display.vbt.lvds_ssc_freq; 379 + else if (HAS_PCH_SPLIT(dev_priv)) 380 + return 120000; 381 + else if (DISPLAY_VER(dev_priv) != 2) 382 + return 96000; 383 + else 384 + return 48000; 385 + } 386 + 387 + /* Returns the clock of the currently programmed mode of the given pipe. */ 388 + void i9xx_crtc_clock_get(struct intel_crtc *crtc, 389 + struct intel_crtc_state *pipe_config) 390 + { 391 + struct drm_device *dev = crtc->base.dev; 392 + struct drm_i915_private *dev_priv = to_i915(dev); 393 + u32 dpll = pipe_config->dpll_hw_state.dpll; 394 + u32 fp; 395 + struct dpll clock; 396 + int port_clock; 397 + int refclk = i9xx_pll_refclk(dev, pipe_config); 398 + 399 + if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 400 + fp = pipe_config->dpll_hw_state.fp0; 401 + else 402 + fp = pipe_config->dpll_hw_state.fp1; 403 + 404 + clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 405 + if (IS_PINEVIEW(dev_priv)) { 406 + clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 407 + clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 408 + } else { 409 + clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 410 + clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 411 + } 412 + 413 + if (DISPLAY_VER(dev_priv) != 2) { 414 + if (IS_PINEVIEW(dev_priv)) 415 + clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 416 + DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 417 + else 418 + clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 419 + DPLL_FPA01_P1_POST_DIV_SHIFT); 420 + 421 + switch (dpll & DPLL_MODE_MASK) { 422 + case DPLLB_MODE_DAC_SERIAL: 423 + clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 424 + 5 : 10; 425 + break; 426 + case DPLLB_MODE_LVDS: 427 + clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 428 + 7 : 14; 429 + break; 430 + default: 431 + drm_dbg_kms(&dev_priv->drm, 432 + "Unknown DPLL mode %08x in programmed " 433 + "mode\n", (int)(dpll & DPLL_MODE_MASK)); 434 + return; 435 + } 436 + 437 + if (IS_PINEVIEW(dev_priv)) 438 + port_clock = pnv_calc_dpll_params(refclk, &clock); 439 + else 440 + port_clock = i9xx_calc_dpll_params(refclk, &clock); 441 + } else { 442 + enum pipe lvds_pipe; 443 + 444 + if (IS_I85X(dev_priv) && 445 + intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) && 446 + lvds_pipe == crtc->pipe) { 447 + u32 lvds = intel_de_read(dev_priv, LVDS); 448 + 449 + clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 450 + DPLL_FPA01_P1_POST_DIV_SHIFT); 451 + 452 + if (lvds & LVDS_CLKB_POWER_UP) 453 + clock.p2 = 7; 454 + else 455 + clock.p2 = 14; 456 + } else { 457 + if (dpll & PLL_P1_DIVIDE_BY_TWO) 458 + clock.p1 = 2; 459 + else { 460 + clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 461 + DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 462 + } 463 + if (dpll & PLL_P2_DIVIDE_BY_4) 464 + clock.p2 = 4; 465 + else 466 + clock.p2 = 2; 467 + } 468 + 469 + port_clock = i9xx_calc_dpll_params(refclk, &clock); 470 + } 471 + 472 + /* 473 + * This value includes pixel_multiplier. We will use 474 + * port_clock to compute adjusted_mode.crtc_clock in the 475 + * encoder's get_config() function. 476 + */ 477 + pipe_config->port_clock = port_clock; 478 + } 479 + 480 + void vlv_crtc_clock_get(struct intel_crtc *crtc, 481 + struct intel_crtc_state *pipe_config) 482 + { 483 + struct drm_device *dev = crtc->base.dev; 484 + struct drm_i915_private *dev_priv = to_i915(dev); 485 + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 486 + struct dpll clock; 487 + u32 mdiv; 488 + int refclk = 100000; 489 + 490 + /* In case of DSI, DPLL will not be used */ 491 + if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 492 + return; 493 + 494 + vlv_dpio_get(dev_priv); 495 + mdiv = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(crtc->pipe)); 496 + vlv_dpio_put(dev_priv); 497 + 498 + clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 499 + clock.m2 = mdiv & DPIO_M2DIV_MASK; 500 + clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 501 + clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 502 + clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 503 + 504 + pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 505 + } 506 + 507 + void chv_crtc_clock_get(struct intel_crtc *crtc, 508 + struct intel_crtc_state *pipe_config) 509 + { 510 + struct drm_device *dev = crtc->base.dev; 511 + struct drm_i915_private *dev_priv = to_i915(dev); 512 + enum dpio_channel port = vlv_pipe_to_channel(crtc->pipe); 513 + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 514 + struct dpll clock; 515 + u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 516 + int refclk = 100000; 517 + 518 + /* In case of DSI, DPLL will not be used */ 519 + if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 520 + return; 521 + 522 + vlv_dpio_get(dev_priv); 523 + cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(port)); 524 + pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(port)); 525 + pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(port)); 526 + pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(port)); 527 + pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port)); 528 + vlv_dpio_put(dev_priv); 529 + 530 + clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 531 + clock.m2 = (pll_dw0 & 0xff) << 22; 532 + if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 533 + clock.m2 |= pll_dw2 & 0x3fffff; 534 + clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 535 + clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 536 + clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 537 + 538 + pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 370 539 } 371 540 372 541 /* ··· 1174 1003 static int mtl_crtc_compute_clock(struct intel_atomic_state *state, 1175 1004 struct intel_crtc *crtc) 1176 1005 { 1177 - struct drm_i915_private *i915 = to_i915(state->base.dev); 1178 1006 struct intel_crtc_state *crtc_state = 1179 1007 intel_atomic_get_new_crtc_state(state, crtc); 1180 1008 struct intel_encoder *encoder = 1181 1009 intel_get_crtc_new_encoder(state, crtc_state); 1182 - enum phy phy = intel_port_to_phy(i915, encoder->port); 1183 1010 int ret; 1184 1011 1185 1012 ret = intel_cx0pll_calc_state(crtc_state, encoder); ··· 1185 1016 return ret; 1186 1017 1187 1018 /* TODO: Do the readback via intel_compute_shared_dplls() */ 1188 - if (intel_is_c10phy(i915, phy)) 1189 - crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10); 1190 - else 1191 - crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20); 1019 + crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state); 1192 1020 1193 1021 crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state); 1194 1022 ··· 1811 1645 } 1812 1646 1813 1647 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, 1814 - enum pipe pipe) 1648 + enum dpio_phy phy) 1815 1649 { 1816 1650 u32 reg_val; 1817 1651 ··· 1819 1653 * PLLB opamp always calibrates to max value of 0x3f, force enable it 1820 1654 * and set it to a reasonable value instead. 1821 1655 */ 1822 - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 1656 + reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1)); 1823 1657 reg_val &= 0xffffff00; 1824 1658 reg_val |= 0x00000030; 1825 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 1659 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val); 1826 1660 1827 - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 1661 + reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13); 1828 1662 reg_val &= 0x00ffffff; 1829 1663 reg_val |= 0x8c000000; 1830 - vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 1664 + vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val); 1831 1665 1832 - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 1666 + reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1)); 1833 1667 reg_val &= 0xffffff00; 1834 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 1668 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val); 1835 1669 1836 - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 1670 + reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13); 1837 1671 reg_val &= 0x00ffffff; 1838 1672 reg_val |= 0xb0000000; 1839 - vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 1673 + vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val); 1840 1674 } 1841 1675 1842 1676 static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state) 1843 1677 { 1844 1678 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1845 1679 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1680 + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 1846 1681 enum pipe pipe = crtc->pipe; 1847 1682 u32 mdiv; 1848 1683 u32 bestn, bestm1, bestm2, bestp1, bestp2; ··· 1861 1694 1862 1695 /* PLL B needs special handling */ 1863 1696 if (pipe == PIPE_B) 1864 - vlv_pllb_recal_opamp(dev_priv, pipe); 1697 + vlv_pllb_recal_opamp(dev_priv, phy); 1865 1698 1866 1699 /* Set up Tx target for periodic Rcomp update */ 1867 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 1700 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9_BCAST, 0x0100000f); 1868 1701 1869 1702 /* Disable target IRef on PLL */ 1870 - reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 1703 + reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW8(pipe)); 1871 1704 reg_val &= 0x00ffffff; 1872 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 1705 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW8(pipe), reg_val); 1873 1706 1874 1707 /* Disable fast lock */ 1875 - vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 1708 + vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610); 1876 1709 1877 1710 /* Set idtafcrecal before PLL is enabled */ 1878 1711 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); ··· 1886 1719 * Note: don't use the DAC post divider as it seems unstable. 1887 1720 */ 1888 1721 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 1889 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 1722 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv); 1890 1723 1891 1724 mdiv |= DPIO_ENABLE_CALIBRATION; 1892 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 1725 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv); 1893 1726 1894 1727 /* Set HBR and RBR LPF coefficients */ 1895 1728 if (crtc_state->port_clock == 162000 || 1896 1729 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) || 1897 1730 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 1898 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 1731 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe), 1899 1732 0x009f0003); 1900 1733 else 1901 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 1734 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe), 1902 1735 0x00d0000f); 1903 1736 1904 1737 if (intel_crtc_has_dp_encoder(crtc_state)) { 1905 1738 /* Use SSC source */ 1906 1739 if (pipe == PIPE_A) 1907 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 1740 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe), 1908 1741 0x0df40000); 1909 1742 else 1910 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 1743 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe), 1911 1744 0x0df70000); 1912 1745 } else { /* HDMI or VGA */ 1913 1746 /* Use bend source */ 1914 1747 if (pipe == PIPE_A) 1915 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 1748 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe), 1916 1749 0x0df70000); 1917 1750 else 1918 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 1751 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe), 1919 1752 0x0df40000); 1920 1753 } 1921 1754 1922 - coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 1755 + coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(pipe)); 1923 1756 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 1924 1757 if (intel_crtc_has_dp_encoder(crtc_state)) 1925 1758 coreclk |= 0x01000000; 1926 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 1759 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(pipe), coreclk); 1927 1760 1928 - vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 1761 + vlv_dpio_write(dev_priv, phy, VLV_PLL_DW11(pipe), 0x87871000); 1929 1762 1930 1763 vlv_dpio_put(dev_priv); 1931 1764 } ··· 1976 1809 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1977 1810 enum pipe pipe = crtc->pipe; 1978 1811 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1812 + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 1979 1813 u32 loopfilter, tribuf_calcntr; 1980 1814 u32 bestm2, bestp1, bestp2, bestm2_frac; 1981 1815 u32 dpio_val; ··· 1993 1825 vlv_dpio_get(dev_priv); 1994 1826 1995 1827 /* p1 and p2 divider */ 1996 - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 1828 + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(port), 1997 1829 5 << DPIO_CHV_S1_DIV_SHIFT | 1998 1830 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 1999 1831 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 2000 1832 1 << DPIO_CHV_K_DIV_SHIFT); 2001 1833 2002 1834 /* Feedback post-divider - m2 */ 2003 - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 1835 + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(port), bestm2); 2004 1836 2005 1837 /* Feedback refclk divider - n and m1 */ 2006 - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 1838 + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(port), 2007 1839 DPIO_CHV_M1_DIV_BY_2 | 2008 1840 1 << DPIO_CHV_N_DIV_SHIFT); 2009 1841 2010 1842 /* M2 fraction division */ 2011 - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 1843 + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(port), bestm2_frac); 2012 1844 2013 1845 /* M2 fraction division enable */ 2014 - dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 1846 + dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port)); 2015 1847 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 2016 1848 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 2017 1849 if (bestm2_frac) 2018 1850 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 2019 - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 1851 + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(port), dpio_val); 2020 1852 2021 1853 /* Program digital lock detect threshold */ 2022 - dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 1854 + dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(port)); 2023 1855 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 2024 1856 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 2025 1857 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 2026 1858 if (!bestm2_frac) 2027 1859 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 2028 - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 1860 + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(port), dpio_val); 2029 1861 2030 1862 /* Loop filter */ 2031 1863 if (vco == 5400000) { ··· 2050 1882 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 2051 1883 tribuf_calcntr = 0; 2052 1884 } 2053 - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 1885 + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(port), loopfilter); 2054 1886 2055 - dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 1887 + dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(port)); 2056 1888 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 2057 1889 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 2058 - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 1890 + vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(port), dpio_val); 2059 1891 2060 1892 /* AFC Recal */ 2061 - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 2062 - vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 1893 + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), 1894 + vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)) | 2063 1895 DPIO_AFC_RECAL); 2064 1896 2065 1897 vlv_dpio_put(dev_priv); ··· 2071 1903 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2072 1904 enum pipe pipe = crtc->pipe; 2073 1905 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1906 + enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe); 2074 1907 u32 tmp; 2075 1908 2076 1909 vlv_dpio_get(dev_priv); 2077 1910 2078 1911 /* Enable back the 10bit clock to display controller */ 2079 - tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1912 + tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)); 2080 1913 tmp |= DPIO_DCLKP_EN; 2081 - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1914 + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), tmp); 2082 1915 2083 1916 vlv_dpio_put(dev_priv); 2084 1917 ··· 2200 2031 void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 2201 2032 { 2202 2033 enum dpio_channel port = vlv_pipe_to_channel(pipe); 2034 + enum dpio_phy phy = vlv_pipe_to_phy(pipe); 2203 2035 u32 val; 2204 2036 2205 2037 /* Make sure the pipe isn't still relying on us */ ··· 2217 2047 vlv_dpio_get(dev_priv); 2218 2048 2219 2049 /* Disable 10bit clock to display controller */ 2220 - val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 2050 + val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)); 2221 2051 val &= ~DPIO_DCLKP_EN; 2222 - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 2052 + vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), val); 2223 2053 2224 2054 vlv_dpio_put(dev_priv); 2225 2055 }
+7 -2
drivers/gpu/drm/i915/display/intel_dpll.h
··· 20 20 struct intel_crtc *crtc); 21 21 int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state, 22 22 struct intel_crtc *crtc); 23 - int vlv_calc_dpll_params(int refclk, struct dpll *clock); 24 - int pnv_calc_dpll_params(int refclk, struct dpll *clock); 25 23 int i9xx_calc_dpll_params(int refclk, struct dpll *clock); 26 24 u32 i9xx_dpll_compute_fp(const struct dpll *dpll); 27 25 void vlv_compute_dpll(struct intel_crtc_state *crtc_state); ··· 38 40 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, 39 41 struct dpll *best_clock); 40 42 int chv_calc_dpll_params(int refclk, struct dpll *pll_clock); 43 + 44 + void i9xx_crtc_clock_get(struct intel_crtc *crtc, 45 + struct intel_crtc_state *pipe_config); 46 + void vlv_crtc_clock_get(struct intel_crtc *crtc, 47 + struct intel_crtc_state *pipe_config); 48 + void chv_crtc_clock_get(struct intel_crtc *crtc, 49 + struct intel_crtc_state *pipe_config); 41 50 42 51 void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe); 43 52 void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
+95 -92
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
··· 219 219 return MG_PLL_ENABLE(tc_port); 220 220 } 221 221 222 + static void _intel_enable_shared_dpll(struct drm_i915_private *i915, 223 + struct intel_shared_dpll *pll) 224 + { 225 + if (pll->info->power_domain) 226 + pll->wakeref = intel_display_power_get(i915, pll->info->power_domain); 227 + 228 + pll->info->funcs->enable(i915, pll); 229 + pll->on = true; 230 + } 231 + 232 + static void _intel_disable_shared_dpll(struct drm_i915_private *i915, 233 + struct intel_shared_dpll *pll) 234 + { 235 + pll->info->funcs->disable(i915, pll); 236 + pll->on = false; 237 + 238 + if (pll->info->power_domain) 239 + intel_display_power_put(i915, pll->info->power_domain, pll->wakeref); 240 + } 241 + 222 242 /** 223 243 * intel_enable_shared_dpll - enable a CRTC's shared DPLL 224 244 * @crtc_state: CRTC, and its state, which has a shared DPLL ··· 278 258 drm_WARN_ON(&i915->drm, pll->on); 279 259 280 260 drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name); 281 - pll->info->funcs->enable(i915, pll); 282 - pll->on = true; 261 + 262 + _intel_enable_shared_dpll(i915, pll); 283 263 284 264 out: 285 265 mutex_unlock(&i915->display.dpll.lock); ··· 324 304 goto out; 325 305 326 306 drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name); 327 - pll->info->funcs->disable(i915, pll); 328 - pll->on = false; 307 + 308 + _intel_disable_shared_dpll(i915, pll); 329 309 330 310 out: 331 311 mutex_unlock(&i915->display.dpll.lock); ··· 651 631 }; 652 632 653 633 static const struct dpll_info pch_plls[] = { 654 - { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 }, 655 - { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 }, 656 - { }, 634 + { .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, }, 635 + { .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, }, 636 + {} 657 637 }; 658 638 659 639 static const struct intel_dpll_mgr pch_pll_mgr = { ··· 1259 1239 }; 1260 1240 1261 1241 static const struct dpll_info hsw_plls[] = { 1262 - { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 }, 1263 - { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 }, 1264 - { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 }, 1265 - { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON }, 1266 - { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON }, 1267 - { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON }, 1268 - { }, 1242 + { .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, }, 1243 + { .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, }, 1244 + { .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, }, 1245 + { .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810, 1246 + .flags = INTEL_DPLL_ALWAYS_ON, }, 1247 + { .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350, 1248 + .flags = INTEL_DPLL_ALWAYS_ON, }, 1249 + { .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700, 1250 + .flags = INTEL_DPLL_ALWAYS_ON, }, 1251 + {} 1269 1252 }; 1270 1253 1271 1254 static const struct intel_dpll_mgr hsw_pll_mgr = { ··· 1944 1921 }; 1945 1922 1946 1923 static const struct dpll_info skl_plls[] = { 1947 - { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON }, 1948 - { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, 1949 - { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, 1950 - { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 }, 1951 - { }, 1924 + { .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0, 1925 + .flags = INTEL_DPLL_ALWAYS_ON, }, 1926 + { .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, }, 1927 + { .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, }, 1928 + { .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, }, 1929 + {} 1952 1930 }; 1953 1931 1954 1932 static const struct intel_dpll_mgr skl_pll_mgr = { ··· 2400 2376 }; 2401 2377 2402 2378 static const struct dpll_info bxt_plls[] = { 2403 - { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 }, 2404 - { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 }, 2405 - { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 }, 2406 - { }, 2379 + { .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, }, 2380 + { .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, }, 2381 + { .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, }, 2382 + {} 2407 2383 }; 2408 2384 2409 2385 static const struct intel_dpll_mgr bxt_pll_mgr = { ··· 3858 3834 { 3859 3835 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll); 3860 3836 3861 - if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && 3862 - pll->info->id == DPLL_ID_EHL_DPLL4) { 3863 - 3864 - /* 3865 - * We need to disable DC states when this DPLL is enabled. 3866 - * This can be done by taking a reference on DPLL4 power 3867 - * domain. 3868 - */ 3869 - pll->wakeref = intel_display_power_get(i915, 3870 - POWER_DOMAIN_DC_OFF); 3871 - } 3872 - 3873 3837 icl_pll_power_enable(i915, pll, enable_reg); 3874 3838 3875 3839 icl_dpll_write(i915, pll); ··· 3953 3941 i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll); 3954 3942 3955 3943 icl_pll_disable(i915, pll, enable_reg); 3956 - 3957 - if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && 3958 - pll->info->id == DPLL_ID_EHL_DPLL4) 3959 - intel_display_power_put(i915, POWER_DOMAIN_DC_OFF, 3960 - pll->wakeref); 3961 3944 } 3962 3945 3963 3946 static void tbt_pll_disable(struct drm_i915_private *i915, ··· 4021 4014 }; 4022 4015 4023 4016 static const struct dpll_info icl_plls[] = { 4024 - { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, 4025 - { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, 4026 - { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, 4027 - { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, 4028 - { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, 4029 - { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, 4030 - { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, 4031 - { }, 4017 + { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, }, 4018 + { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, }, 4019 + { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, }, 4020 + { .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, }, 4021 + { .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, }, 4022 + { .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, }, 4023 + { .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, }, 4024 + {} 4032 4025 }; 4033 4026 4034 4027 static const struct intel_dpll_mgr icl_pll_mgr = { ··· 4042 4035 }; 4043 4036 4044 4037 static const struct dpll_info ehl_plls[] = { 4045 - { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, 4046 - { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, 4047 - { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 }, 4048 - { }, 4038 + { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, }, 4039 + { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, }, 4040 + { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, 4041 + .power_domain = POWER_DOMAIN_DC_OFF, }, 4042 + {} 4049 4043 }; 4050 4044 4051 4045 static const struct intel_dpll_mgr ehl_pll_mgr = { ··· 4066 4058 }; 4067 4059 4068 4060 static const struct dpll_info tgl_plls[] = { 4069 - { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, 4070 - { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, 4071 - { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, 4072 - { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, 4073 - { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, 4074 - { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, 4075 - { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, 4076 - { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 }, 4077 - { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 }, 4078 - { }, 4061 + { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, }, 4062 + { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, }, 4063 + { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, }, 4064 + { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, }, 4065 + { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, }, 4066 + { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, }, 4067 + { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, }, 4068 + { .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, }, 4069 + { .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, }, 4070 + {} 4079 4071 }; 4080 4072 4081 4073 static const struct intel_dpll_mgr tgl_pll_mgr = { ··· 4089 4081 }; 4090 4082 4091 4083 static const struct dpll_info rkl_plls[] = { 4092 - { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, 4093 - { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, 4094 - { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 }, 4095 - { }, 4084 + { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, }, 4085 + { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, }, 4086 + { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, }, 4087 + {} 4096 4088 }; 4097 4089 4098 4090 static const struct intel_dpll_mgr rkl_pll_mgr = { ··· 4105 4097 }; 4106 4098 4107 4099 static const struct dpll_info dg1_plls[] = { 4108 - { "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 }, 4109 - { "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 }, 4110 - { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 }, 4111 - { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 }, 4112 - { }, 4100 + { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, }, 4101 + { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, }, 4102 + { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, }, 4103 + { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, }, 4104 + {} 4113 4105 }; 4114 4106 4115 4107 static const struct intel_dpll_mgr dg1_pll_mgr = { ··· 4122 4114 }; 4123 4115 4124 4116 static const struct dpll_info adls_plls[] = { 4125 - { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, 4126 - { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, 4127 - { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 }, 4128 - { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 }, 4129 - { }, 4117 + { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, }, 4118 + { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, }, 4119 + { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, }, 4120 + { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, }, 4121 + {} 4130 4122 }; 4131 4123 4132 4124 static const struct intel_dpll_mgr adls_pll_mgr = { ··· 4139 4131 }; 4140 4132 4141 4133 static const struct dpll_info adlp_plls[] = { 4142 - { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, 4143 - { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, 4144 - { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, 4145 - { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, 4146 - { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, 4147 - { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, 4148 - { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, 4149 - { }, 4134 + { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, }, 4135 + { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, }, 4136 + { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL, }, 4137 + { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, }, 4138 + { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, }, 4139 + { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, }, 4140 + { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, }, 4141 + {} 4150 4142 }; 4151 4143 4152 4144 static const struct intel_dpll_mgr adlp_pll_mgr = { ··· 4373 4365 4374 4366 pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state); 4375 4367 4376 - if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && 4377 - pll->on && 4378 - pll->info->id == DPLL_ID_EHL_DPLL4) { 4379 - pll->wakeref = intel_display_power_get(i915, 4380 - POWER_DOMAIN_DC_OFF); 4381 - } 4368 + if (pll->on && pll->info->power_domain) 4369 + pll->wakeref = intel_display_power_get(i915, pll->info->power_domain); 4382 4370 4383 4371 pll->state.pipe_mask = 0; 4384 4372 for_each_intel_crtc(&i915->drm, crtc) { ··· 4421 4417 "%s enabled but not in use, disabling\n", 4422 4418 pll->info->name); 4423 4419 4424 - pll->info->funcs->disable(i915, pll); 4425 - pll->on = false; 4420 + _intel_disable_shared_dpll(i915, pll); 4426 4421 } 4427 4422 4428 4423 void intel_dpll_sanitize_state(struct drm_i915_private *i915)
+6
drivers/gpu/drm/i915/display/intel_dpll_mgr.h
··· 27 27 28 28 #include <linux/types.h> 29 29 30 + #include "intel_display_power.h" 30 31 #include "intel_wakeref.h" 31 32 32 33 #define for_each_shared_dpll(__i915, __pll, __i) \ ··· 270 269 * @id: unique indentifier for this DPLL 271 270 */ 272 271 enum intel_dpll_id id; 272 + 273 + /** 274 + * @power_domain: extra power domain required by the DPLL 275 + */ 276 + enum intel_display_power_domain power_domain; 273 277 274 278 #define INTEL_DPLL_ALWAYS_ON (1 << 0) 275 279 /**
-24
drivers/gpu/drm/i915/display/intel_dpt.c
··· 9 9 #include "gt/gen8_ppgtt.h" 10 10 11 11 #include "i915_drv.h" 12 - #include "i915_reg.h" 13 - #include "intel_de.h" 14 12 #include "intel_display_types.h" 15 13 #include "intel_dpt.h" 16 14 #include "intel_fb.h" ··· 316 318 i915_vm_put(&dpt->vm); 317 319 } 318 320 319 - void intel_dpt_configure(struct intel_crtc *crtc) 320 - { 321 - struct drm_i915_private *i915 = to_i915(crtc->base.dev); 322 - 323 - if (DISPLAY_VER(i915) == 14) { 324 - enum pipe pipe = crtc->pipe; 325 - enum plane_id plane_id; 326 - 327 - for_each_plane_id_on_crtc(crtc, plane_id) { 328 - if (plane_id == PLANE_CURSOR) 329 - continue; 330 - 331 - intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id), 332 - PLANE_CHICKEN_DISABLE_DPT, 333 - i915->params.enable_dpt ? 0 : PLANE_CHICKEN_DISABLE_DPT); 334 - } 335 - } else if (DISPLAY_VER(i915) == 13) { 336 - intel_de_rmw(i915, CHICKEN_MISC_2, 337 - CHICKEN_MISC_DISABLE_DPT, 338 - i915->params.enable_dpt ? 0 : CHICKEN_MISC_DISABLE_DPT); 339 - } 340 - }
-2
drivers/gpu/drm/i915/display/intel_dpt.h
··· 10 10 11 11 struct i915_address_space; 12 12 struct i915_vma; 13 - struct intel_crtc; 14 13 struct intel_framebuffer; 15 14 16 15 void intel_dpt_destroy(struct i915_address_space *vm); ··· 19 20 void intel_dpt_resume(struct drm_i915_private *i915); 20 21 struct i915_address_space * 21 22 intel_dpt_create(struct intel_framebuffer *fb); 22 - void intel_dpt_configure(struct intel_crtc *crtc); 23 23 24 24 #endif /* __INTEL_DPT_H__ */
+34
drivers/gpu/drm/i915/display/intel_dpt_common.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "i915_reg.h" 7 + #include "intel_de.h" 8 + #include "intel_display_types.h" 9 + #include "intel_dpt_common.h" 10 + 11 + void intel_dpt_configure(struct intel_crtc *crtc) 12 + { 13 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 14 + 15 + if (DISPLAY_VER(i915) == 14) { 16 + enum pipe pipe = crtc->pipe; 17 + enum plane_id plane_id; 18 + 19 + for_each_plane_id_on_crtc(crtc, plane_id) { 20 + if (plane_id == PLANE_CURSOR) 21 + continue; 22 + 23 + intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id), 24 + PLANE_CHICKEN_DISABLE_DPT, 25 + i915->display.params.enable_dpt ? 0 : 26 + PLANE_CHICKEN_DISABLE_DPT); 27 + } 28 + } else if (DISPLAY_VER(i915) == 13) { 29 + intel_de_rmw(i915, CHICKEN_MISC_2, 30 + CHICKEN_MISC_DISABLE_DPT, 31 + i915->display.params.enable_dpt ? 0 : 32 + CHICKEN_MISC_DISABLE_DPT); 33 + } 34 + }
+13
drivers/gpu/drm/i915/display/intel_dpt_common.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_DPT_COMMON_H__ 7 + #define __INTEL_DPT_COMMON_H__ 8 + 9 + struct intel_crtc; 10 + 11 + void intel_dpt_configure(struct intel_crtc *crtc); 12 + 13 + #endif /* __INTEL_DPT_COMMON_H__ */
+36 -62
drivers/gpu/drm/i915/display/intel_dsb.c
··· 4 4 * 5 5 */ 6 6 7 - #include "gem/i915_gem_internal.h" 8 - #include "gem/i915_gem_lmem.h" 9 - 10 7 #include "i915_drv.h" 11 8 #include "i915_irq.h" 12 9 #include "i915_reg.h" ··· 11 14 #include "intel_de.h" 12 15 #include "intel_display_types.h" 13 16 #include "intel_dsb.h" 17 + #include "intel_dsb_buffer.h" 14 18 #include "intel_dsb_regs.h" 15 19 #include "intel_vblank.h" 16 20 #include "intel_vrr.h" 17 21 #include "skl_watermark.h" 18 22 19 - struct i915_vma; 23 + #define CACHELINE_BYTES 64 20 24 21 25 enum dsb_id { 22 26 INVALID_DSB = -1, ··· 30 32 struct intel_dsb { 31 33 enum dsb_id id; 32 34 33 - u32 *cmd_buf; 34 - struct i915_vma *vma; 35 + struct intel_dsb_buffer dsb_buf; 35 36 struct intel_crtc *crtc; 36 37 37 38 /* ··· 106 109 { 107 110 struct intel_crtc *crtc = dsb->crtc; 108 111 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 109 - const u32 *buf = dsb->cmd_buf; 110 112 int i; 111 113 112 114 drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] DSB %d commands {\n", 113 115 crtc->base.base.id, crtc->base.name, dsb->id); 114 116 for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4) 115 117 drm_dbg_kms(&i915->drm, 116 - " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", 117 - i * 4, buf[i], buf[i+1], buf[i+2], buf[i+3]); 118 + " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4, 119 + intel_dsb_buffer_read(&dsb->dsb_buf, i), 120 + intel_dsb_buffer_read(&dsb->dsb_buf, i + 1), 121 + intel_dsb_buffer_read(&dsb->dsb_buf, i + 2), 122 + intel_dsb_buffer_read(&dsb->dsb_buf, i + 3)); 118 123 drm_dbg_kms(&i915->drm, "}\n"); 119 124 } 120 125 ··· 128 129 129 130 static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw) 130 131 { 131 - u32 *buf = dsb->cmd_buf; 132 - 133 132 if (!assert_dsb_has_room(dsb)) 134 133 return; 135 134 ··· 136 139 137 140 dsb->ins_start_offset = dsb->free_pos; 138 141 139 - buf[dsb->free_pos++] = ldw; 140 - buf[dsb->free_pos++] = udw; 142 + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, ldw); 143 + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, udw); 141 144 } 142 145 143 146 static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb, 144 147 u32 opcode, i915_reg_t reg) 145 148 { 146 - const u32 *buf = dsb->cmd_buf; 147 149 u32 prev_opcode, prev_reg; 148 150 149 151 /* ··· 153 157 if (dsb->free_pos == 0) 154 158 return false; 155 159 156 - prev_opcode = buf[dsb->ins_start_offset + 1] & ~DSB_REG_VALUE_MASK; 157 - prev_reg = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK; 160 + prev_opcode = intel_dsb_buffer_read(&dsb->dsb_buf, 161 + dsb->ins_start_offset + 1) & ~DSB_REG_VALUE_MASK; 162 + prev_reg = intel_dsb_buffer_read(&dsb->dsb_buf, 163 + dsb->ins_start_offset + 1) & DSB_REG_VALUE_MASK; 158 164 159 165 return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg); 160 166 } ··· 189 191 void intel_dsb_reg_write(struct intel_dsb *dsb, 190 192 i915_reg_t reg, u32 val) 191 193 { 194 + u32 old_val; 195 + 192 196 /* 193 197 * For example the buffer will look like below for 3 dwords for auto 194 198 * increment register: ··· 214 214 (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) | 215 215 i915_mmio_reg_offset(reg)); 216 216 } else { 217 - u32 *buf = dsb->cmd_buf; 218 - 219 217 if (!assert_dsb_has_room(dsb)) 220 218 return; 221 219 222 220 /* convert to indexed write? */ 223 221 if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) { 224 - u32 prev_val = buf[dsb->ins_start_offset + 0]; 222 + u32 prev_val = intel_dsb_buffer_read(&dsb->dsb_buf, 223 + dsb->ins_start_offset + 0); 225 224 226 - buf[dsb->ins_start_offset + 0] = 1; /* count */ 227 - buf[dsb->ins_start_offset + 1] = 228 - (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) | 229 - i915_mmio_reg_offset(reg); 230 - buf[dsb->ins_start_offset + 2] = prev_val; 225 + intel_dsb_buffer_write(&dsb->dsb_buf, 226 + dsb->ins_start_offset + 0, 1); /* count */ 227 + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1, 228 + (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) | 229 + i915_mmio_reg_offset(reg)); 230 + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, prev_val); 231 231 232 232 dsb->free_pos++; 233 233 } 234 234 235 - buf[dsb->free_pos++] = val; 235 + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val); 236 236 /* Update the count */ 237 - buf[dsb->ins_start_offset]++; 237 + old_val = intel_dsb_buffer_read(&dsb->dsb_buf, dsb->ins_start_offset); 238 + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset, old_val + 1); 238 239 239 240 /* if number of data words is odd, then the last dword should be 0.*/ 240 241 if (dsb->free_pos & 0x1) 241 - buf[dsb->free_pos] = 0; 242 + intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0); 242 243 } 243 244 } 244 245 ··· 298 297 aligned_tail = ALIGN(tail, CACHELINE_BYTES); 299 298 300 299 if (aligned_tail > tail) 301 - memset(&dsb->cmd_buf[dsb->free_pos], 0, 302 - aligned_tail - tail); 300 + intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0, 301 + aligned_tail - tail); 303 302 304 303 dsb->free_pos = aligned_tail / 4; 305 304 } ··· 318 317 319 318 intel_dsb_align_tail(dsb); 320 319 321 - i915_gem_object_flush_map(dsb->vma->obj); 320 + intel_dsb_buffer_flush_map(&dsb->dsb_buf); 322 321 } 323 322 324 323 static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state) ··· 362 361 ctrl | DSB_ENABLE); 363 362 364 363 intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id), 365 - i915_ggtt_offset(dsb->vma)); 364 + intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf)); 366 365 367 366 if (dewake_scanline >= 0) { 368 367 int diff, hw_dewake_scanline; ··· 384 383 } 385 384 386 385 intel_de_write_fw(dev_priv, DSB_TAIL(pipe, dsb->id), 387 - i915_ggtt_offset(dsb->vma) + tail); 386 + intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail); 388 387 } 389 388 390 389 /** ··· 409 408 enum pipe pipe = crtc->pipe; 410 409 411 410 if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) { 412 - u32 offset = i915_ggtt_offset(dsb->vma); 411 + u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf); 413 412 414 413 intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id), 415 414 DSB_ENABLE | DSB_HALT); ··· 446 445 { 447 446 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 448 447 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 449 - struct drm_i915_gem_object *obj; 450 448 intel_wakeref_t wakeref; 451 449 struct intel_dsb *dsb; 452 - struct i915_vma *vma; 453 450 unsigned int size; 454 - u32 *buf; 455 451 456 452 if (!HAS_DSB(i915)) 457 453 return NULL; ··· 462 464 /* ~1 qword per instruction, full cachelines */ 463 465 size = ALIGN(max_cmds * 8, CACHELINE_BYTES); 464 466 465 - if (HAS_LMEM(i915)) { 466 - obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size), 467 - I915_BO_ALLOC_CONTIGUOUS); 468 - if (IS_ERR(obj)) 469 - goto out_put_rpm; 470 - } else { 471 - obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size)); 472 - if (IS_ERR(obj)) 473 - goto out_put_rpm; 474 - 475 - i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); 476 - } 477 - 478 - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); 479 - if (IS_ERR(vma)) { 480 - i915_gem_object_put(obj); 467 + if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size)) 481 468 goto out_put_rpm; 482 - } 483 - 484 - buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC); 485 - if (IS_ERR(buf)) { 486 - i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); 487 - goto out_put_rpm; 488 - } 489 469 490 470 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 491 471 492 472 dsb->id = DSB1; 493 - dsb->vma = vma; 494 473 dsb->crtc = crtc; 495 - dsb->cmd_buf = buf; 496 474 dsb->size = size / 4; /* in dwords */ 497 475 dsb->free_pos = 0; 498 476 dsb->ins_start_offset = 0; ··· 496 522 */ 497 523 void intel_dsb_cleanup(struct intel_dsb *dsb) 498 524 { 499 - i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP); 525 + intel_dsb_buffer_cleanup(&dsb->dsb_buf); 500 526 kfree(dsb); 501 527 }
+82
drivers/gpu/drm/i915/display/intel_dsb_buffer.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2023, Intel Corporation. 4 + */ 5 + 6 + #include "gem/i915_gem_internal.h" 7 + #include "gem/i915_gem_lmem.h" 8 + #include "i915_drv.h" 9 + #include "i915_vma.h" 10 + #include "intel_display_types.h" 11 + #include "intel_dsb_buffer.h" 12 + 13 + u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf) 14 + { 15 + return i915_ggtt_offset(dsb_buf->vma); 16 + } 17 + 18 + void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val) 19 + { 20 + dsb_buf->cmd_buf[idx] = val; 21 + } 22 + 23 + u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) 24 + { 25 + return dsb_buf->cmd_buf[idx]; 26 + } 27 + 28 + void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size) 29 + { 30 + WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf)); 31 + 32 + memset(&dsb_buf->cmd_buf[idx], val, size); 33 + } 34 + 35 + bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size) 36 + { 37 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 38 + struct drm_i915_gem_object *obj; 39 + struct i915_vma *vma; 40 + u32 *buf; 41 + 42 + if (HAS_LMEM(i915)) { 43 + obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size), 44 + I915_BO_ALLOC_CONTIGUOUS); 45 + if (IS_ERR(obj)) 46 + return false; 47 + } else { 48 + obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size)); 49 + if (IS_ERR(obj)) 50 + return false; 51 + 52 + i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); 53 + } 54 + 55 + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); 56 + if (IS_ERR(vma)) { 57 + i915_gem_object_put(obj); 58 + return false; 59 + } 60 + 61 + buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC); 62 + if (IS_ERR(buf)) { 63 + i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); 64 + return false; 65 + } 66 + 67 + dsb_buf->vma = vma; 68 + dsb_buf->cmd_buf = buf; 69 + dsb_buf->buf_size = size; 70 + 71 + return true; 72 + } 73 + 74 + void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf) 75 + { 76 + i915_vma_unpin_and_release(&dsb_buf->vma, I915_VMA_RELEASE_MAP); 77 + } 78 + 79 + void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf) 80 + { 81 + i915_gem_object_flush_map(dsb_buf->vma->obj); 82 + }
+29
drivers/gpu/drm/i915/display/intel_dsb_buffer.h
··· 1 + /* SPDX-License-Identifier: MIT 2 + * 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef _INTEL_DSB_BUFFER_H 7 + #define _INTEL_DSB_BUFFER_H 8 + 9 + #include <linux/types.h> 10 + 11 + struct intel_crtc; 12 + struct i915_vma; 13 + 14 + struct intel_dsb_buffer { 15 + u32 *cmd_buf; 16 + struct i915_vma *vma; 17 + size_t buf_size; 18 + }; 19 + 20 + u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf); 21 + void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val); 22 + u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx); 23 + void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size); 24 + bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, 25 + size_t size); 26 + void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf); 27 + void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf); 28 + 29 + #endif
+136 -219
drivers/gpu/drm/i915/display/intel_dsi_vbt.c
··· 55 55 #define MIPI_VIRTUAL_CHANNEL_SHIFT 1 56 56 #define MIPI_PORT_SHIFT 3 57 57 58 - /* base offsets for gpio pads */ 59 - #define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130 60 - #define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120 61 - #define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110 62 - #define VLV_GPIO_NC_3_PANEL0_VDDEN 0x4140 63 - #define VLV_GPIO_NC_4_PANEL0_BKLTEN 0x4150 64 - #define VLV_GPIO_NC_5_PANEL0_BKLTCTL 0x4160 65 - #define VLV_GPIO_NC_6_HV_DDI1_HPD 0x4180 66 - #define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA 0x4190 67 - #define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL 0x4170 68 - #define VLV_GPIO_NC_9_PANEL1_VDDEN 0x4100 69 - #define VLV_GPIO_NC_10_PANEL1_BKLTEN 0x40E0 70 - #define VLV_GPIO_NC_11_PANEL1_BKLTCTL 0x40F0 71 - 72 - #define VLV_GPIO_PCONF0(base_offset) (base_offset) 73 - #define VLV_GPIO_PAD_VAL(base_offset) ((base_offset) + 8) 74 - 75 - struct gpio_map { 76 - u16 base_offset; 77 - bool init; 78 - }; 79 - 80 - static struct gpio_map vlv_gpio_table[] = { 81 - { VLV_GPIO_NC_0_HV_DDI0_HPD }, 82 - { VLV_GPIO_NC_1_HV_DDI0_DDC_SDA }, 83 - { VLV_GPIO_NC_2_HV_DDI0_DDC_SCL }, 84 - { VLV_GPIO_NC_3_PANEL0_VDDEN }, 85 - { VLV_GPIO_NC_4_PANEL0_BKLTEN }, 86 - { VLV_GPIO_NC_5_PANEL0_BKLTCTL }, 87 - { VLV_GPIO_NC_6_HV_DDI1_HPD }, 88 - { VLV_GPIO_NC_7_HV_DDI1_DDC_SDA }, 89 - { VLV_GPIO_NC_8_HV_DDI1_DDC_SCL }, 90 - { VLV_GPIO_NC_9_PANEL1_VDDEN }, 91 - { VLV_GPIO_NC_10_PANEL1_BKLTEN }, 92 - { VLV_GPIO_NC_11_PANEL1_BKLTCTL }, 93 - }; 94 - 95 58 struct i2c_adapter_lookup { 96 59 u16 slave_addr; 97 60 struct intel_dsi *intel_dsi; ··· 65 102 #define CHV_GPIO_IDX_START_E 73 66 103 #define CHV_GPIO_IDX_START_SW 100 67 104 #define CHV_GPIO_IDX_START_SE 198 68 - 69 - #define CHV_VBT_MAX_PINS_PER_FMLY 15 70 - 71 - #define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8) 72 - #define CHV_GPIO_GPIOEN (1 << 15) 73 - #define CHV_GPIO_GPIOCFG_GPIO (0 << 8) 74 - #define CHV_GPIO_GPIOCFG_GPO (1 << 8) 75 - #define CHV_GPIO_GPIOCFG_GPI (2 << 8) 76 - #define CHV_GPIO_GPIOCFG_HIZ (3 << 8) 77 - #define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1) 78 - 79 - #define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4) 80 - #define CHV_GPIO_CFGLOCK (1 << 31) 81 105 82 106 /* ICL DSI Display GPIO Pins */ 83 107 #define ICL_GPIO_DDSP_HPD_A 0 ··· 92 142 if (seq_port) { 93 143 if (intel_dsi->ports & BIT(PORT_B)) 94 144 return PORT_B; 95 - else if (intel_dsi->ports & BIT(PORT_C)) 145 + if (intel_dsi->ports & BIT(PORT_C)) 96 146 return PORT_C; 97 147 } 98 148 ··· 193 243 return data; 194 244 } 195 245 196 - static void vlv_exec_gpio(struct intel_connector *connector, 197 - u8 gpio_source, u8 gpio_index, bool value) 246 + static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index, 247 + const char *con_id, u8 idx, bool value) 198 248 { 199 249 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 200 - struct gpio_map *map; 201 - u16 pconf0, padval; 202 - u32 tmp; 203 - u8 port; 250 + /* XXX: this table is a quick ugly hack. */ 251 + static struct gpio_desc *soc_gpio_table[U8_MAX + 1]; 252 + struct gpio_desc *gpio_desc = soc_gpio_table[gpio_index]; 204 253 205 - if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) { 206 - drm_dbg_kms(&dev_priv->drm, "unknown gpio index %u\n", 207 - gpio_index); 208 - return; 209 - } 210 - 211 - map = &vlv_gpio_table[gpio_index]; 212 - 213 - if (connector->panel.vbt.dsi.seq_version >= 3) { 214 - /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */ 215 - port = IOSF_PORT_GPIO_NC; 254 + if (gpio_desc) { 255 + gpiod_set_value(gpio_desc, value); 216 256 } else { 217 - if (gpio_source == 0) { 218 - port = IOSF_PORT_GPIO_NC; 219 - } else if (gpio_source == 1) { 257 + gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, con_id, idx, 258 + value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW); 259 + if (IS_ERR(gpio_desc)) { 260 + drm_err(&dev_priv->drm, 261 + "GPIO index %u request failed (%pe)\n", 262 + gpio_index, gpio_desc); 263 + return; 264 + } 265 + 266 + soc_gpio_table[gpio_index] = gpio_desc; 267 + } 268 + } 269 + 270 + static void soc_opaque_gpio_set_value(struct intel_connector *connector, 271 + u8 gpio_index, const char *chip, 272 + const char *con_id, u8 idx, bool value) 273 + { 274 + struct gpiod_lookup_table *lookup; 275 + 276 + lookup = kzalloc(struct_size(lookup, table, 2), GFP_KERNEL); 277 + if (!lookup) 278 + return; 279 + 280 + lookup->dev_id = "0000:00:02.0"; 281 + lookup->table[0] = 282 + GPIO_LOOKUP_IDX(chip, idx, con_id, idx, GPIO_ACTIVE_HIGH); 283 + 284 + gpiod_add_lookup_table(lookup); 285 + 286 + soc_gpio_set_value(connector, gpio_index, con_id, idx, value); 287 + 288 + gpiod_remove_lookup_table(lookup); 289 + kfree(lookup); 290 + } 291 + 292 + static void vlv_gpio_set_value(struct intel_connector *connector, 293 + u8 gpio_source, u8 gpio_index, bool value) 294 + { 295 + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 296 + 297 + /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */ 298 + if (connector->panel.vbt.dsi.seq_version < 3) { 299 + if (gpio_source == 1) { 220 300 drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n"); 221 301 return; 222 - } else { 302 + } 303 + if (gpio_source > 1) { 223 304 drm_dbg_kms(&dev_priv->drm, 224 305 "unknown gpio source %u\n", gpio_source); 225 306 return; 226 307 } 227 308 } 228 309 229 - pconf0 = VLV_GPIO_PCONF0(map->base_offset); 230 - padval = VLV_GPIO_PAD_VAL(map->base_offset); 231 - 232 - vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO)); 233 - if (!map->init) { 234 - /* FIXME: remove constant below */ 235 - vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00); 236 - map->init = true; 237 - } 238 - 239 - tmp = 0x4 | value; 240 - vlv_iosf_sb_write(dev_priv, port, padval, tmp); 241 - vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO)); 310 + soc_opaque_gpio_set_value(connector, gpio_index, 311 + "INT33FC:01", "Panel N", gpio_index, value); 242 312 } 243 313 244 - static void chv_exec_gpio(struct intel_connector *connector, 245 - u8 gpio_source, u8 gpio_index, bool value) 314 + static void chv_gpio_set_value(struct intel_connector *connector, 315 + u8 gpio_source, u8 gpio_index, bool value) 246 316 { 247 317 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 248 - u16 cfg0, cfg1; 249 - u16 family_num; 250 - u8 port; 251 318 252 319 if (connector->panel.vbt.dsi.seq_version >= 3) { 253 320 if (gpio_index >= CHV_GPIO_IDX_START_SE) { 254 321 /* XXX: it's unclear whether 255->57 is part of SE. */ 255 - gpio_index -= CHV_GPIO_IDX_START_SE; 256 - port = CHV_IOSF_PORT_GPIO_SE; 322 + soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:03", "Panel SE", 323 + gpio_index - CHV_GPIO_IDX_START_SE, value); 257 324 } else if (gpio_index >= CHV_GPIO_IDX_START_SW) { 258 - gpio_index -= CHV_GPIO_IDX_START_SW; 259 - port = CHV_IOSF_PORT_GPIO_SW; 325 + soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:00", "Panel SW", 326 + gpio_index - CHV_GPIO_IDX_START_SW, value); 260 327 } else if (gpio_index >= CHV_GPIO_IDX_START_E) { 261 - gpio_index -= CHV_GPIO_IDX_START_E; 262 - port = CHV_IOSF_PORT_GPIO_E; 328 + soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:02", "Panel E", 329 + gpio_index - CHV_GPIO_IDX_START_E, value); 263 330 } else { 264 - port = CHV_IOSF_PORT_GPIO_N; 331 + soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:01", "Panel N", 332 + gpio_index - CHV_GPIO_IDX_START_N, value); 265 333 } 266 334 } else { 267 335 /* XXX: The spec is unclear about CHV GPIO on seq v2 */ ··· 296 328 return; 297 329 } 298 330 299 - port = CHV_IOSF_PORT_GPIO_N; 331 + soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:01", "Panel N", 332 + gpio_index - CHV_GPIO_IDX_START_N, value); 300 333 } 301 - 302 - family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY; 303 - gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY; 304 - 305 - cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index); 306 - cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index); 307 - 308 - vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO)); 309 - vlv_iosf_sb_write(dev_priv, port, cfg1, 0); 310 - vlv_iosf_sb_write(dev_priv, port, cfg0, 311 - CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO | 312 - CHV_GPIO_GPIOTXSTATE(value)); 313 - vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO)); 314 334 } 315 335 316 - static void bxt_exec_gpio(struct intel_connector *connector, 317 - u8 gpio_source, u8 gpio_index, bool value) 336 + static void bxt_gpio_set_value(struct intel_connector *connector, 337 + u8 gpio_index, bool value) 318 338 { 319 - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 320 - /* XXX: this table is a quick ugly hack. */ 321 - static struct gpio_desc *bxt_gpio_table[U8_MAX + 1]; 322 - struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index]; 323 - 324 - if (!gpio_desc) { 325 - gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, 326 - NULL, gpio_index, 327 - value ? GPIOD_OUT_LOW : 328 - GPIOD_OUT_HIGH); 329 - 330 - if (IS_ERR_OR_NULL(gpio_desc)) { 331 - drm_err(&dev_priv->drm, 332 - "GPIO index %u request failed (%ld)\n", 333 - gpio_index, PTR_ERR(gpio_desc)); 334 - return; 335 - } 336 - 337 - bxt_gpio_table[gpio_index] = gpio_desc; 338 - } 339 - 340 - gpiod_set_value(gpio_desc, value); 341 - } 342 - 343 - static void icl_exec_gpio(struct intel_connector *connector, 344 - u8 gpio_source, u8 gpio_index, bool value) 345 - { 346 - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 347 - 348 - drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n"); 339 + soc_gpio_set_value(connector, gpio_index, NULL, gpio_index, value); 349 340 } 350 341 351 342 enum { ··· 389 462 static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) 390 463 { 391 464 struct drm_device *dev = intel_dsi->base.base.dev; 392 - struct drm_i915_private *dev_priv = to_i915(dev); 465 + struct drm_i915_private *i915 = to_i915(dev); 393 466 struct intel_connector *connector = intel_dsi->attached_connector; 394 - u8 gpio_source, gpio_index = 0, gpio_number; 467 + u8 gpio_source = 0, gpio_index = 0, gpio_number; 395 468 bool value; 396 - bool native = DISPLAY_VER(dev_priv) >= 11; 469 + int size; 470 + bool native = DISPLAY_VER(i915) >= 11; 397 471 398 - if (connector->panel.vbt.dsi.seq_version >= 3) 399 - gpio_index = *data++; 472 + if (connector->panel.vbt.dsi.seq_version >= 3) { 473 + size = 3; 400 474 401 - gpio_number = *data++; 475 + gpio_index = data[0]; 476 + gpio_number = data[1]; 477 + value = data[2] & BIT(0); 402 478 403 - /* gpio source in sequence v2 only */ 404 - if (connector->panel.vbt.dsi.seq_version == 2) 405 - gpio_source = (*data >> 1) & 3; 406 - else 407 - gpio_source = 0; 479 + if (connector->panel.vbt.dsi.seq_version >= 4 && data[2] & BIT(1)) 480 + native = false; 481 + } else { 482 + size = 2; 408 483 409 - if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1)) 410 - native = false; 484 + gpio_number = data[0]; 485 + value = data[1] & BIT(0); 411 486 412 - /* pull up/down */ 413 - value = *data++ & 1; 487 + if (connector->panel.vbt.dsi.seq_version == 2) 488 + gpio_source = (data[1] >> 1) & 3; 489 + } 414 490 415 - drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n", 491 + drm_dbg_kms(&i915->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n", 416 492 gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value)); 417 493 418 494 if (native) 419 - icl_native_gpio_set_value(dev_priv, gpio_number, value); 420 - else if (DISPLAY_VER(dev_priv) >= 11) 421 - icl_exec_gpio(connector, gpio_source, gpio_index, value); 422 - else if (IS_VALLEYVIEW(dev_priv)) 423 - vlv_exec_gpio(connector, gpio_source, gpio_number, value); 424 - else if (IS_CHERRYVIEW(dev_priv)) 425 - chv_exec_gpio(connector, gpio_source, gpio_number, value); 426 - else 427 - bxt_exec_gpio(connector, gpio_source, gpio_index, value); 495 + icl_native_gpio_set_value(i915, gpio_number, value); 496 + else if (DISPLAY_VER(i915) >= 9) 497 + bxt_gpio_set_value(connector, gpio_index, value); 498 + else if (IS_VALLEYVIEW(i915)) 499 + vlv_gpio_set_value(connector, gpio_source, gpio_number, value); 500 + else if (IS_CHERRYVIEW(i915)) 501 + chv_gpio_set_value(connector, gpio_source, gpio_number, value); 428 502 429 - return data; 503 + return data + size; 430 504 } 431 505 432 506 #ifdef CONFIG_ACPI ··· 586 658 */ 587 659 588 660 static const char * const seq_name[] = { 661 + [MIPI_SEQ_END] = "MIPI_SEQ_END", 589 662 [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET", 590 663 [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP", 591 664 [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON", ··· 602 673 603 674 static const char *sequence_name(enum mipi_seq seq_id) 604 675 { 605 - if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id]) 676 + if (seq_id < ARRAY_SIZE(seq_name)) 606 677 return seq_name[seq_id]; 607 - else 608 - return "(unknown)"; 678 + 679 + return "(unknown)"; 609 680 } 610 681 611 682 static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, ··· 636 707 if (connector->panel.vbt.dsi.seq_version >= 3) 637 708 data += 4; 638 709 639 - while (1) { 710 + while (*data != MIPI_SEQ_ELEM_END) { 640 711 u8 operation_byte = *data++; 641 712 u8 operation_size = 0; 642 - 643 - if (operation_byte == MIPI_SEQ_ELEM_END) 644 - break; 645 713 646 714 if (operation_byte < ARRAY_SIZE(exec_elem)) 647 715 mipi_elem_exec = exec_elem[operation_byte]; ··· 799 873 * multiply by 100 to preserve remainder 800 874 */ 801 875 if (intel_dsi->video_mode == BURST_MODE) { 802 - if (mipi_config->target_burst_mode_freq) { 803 - u32 bitrate = intel_dsi_bitrate(intel_dsi); 876 + u32 bitrate; 804 877 805 - /* 806 - * Sometimes the VBT contains a slightly lower clock, 807 - * then the bitrate we have calculated, in this case 808 - * just replace it with the calculated bitrate. 809 - */ 810 - if (mipi_config->target_burst_mode_freq < bitrate && 811 - intel_fuzzy_clock_check( 812 - mipi_config->target_burst_mode_freq, 813 - bitrate)) 814 - mipi_config->target_burst_mode_freq = bitrate; 815 - 816 - if (mipi_config->target_burst_mode_freq < bitrate) { 817 - drm_err(&dev_priv->drm, 818 - "Burst mode freq is less than computed\n"); 819 - return false; 820 - } 821 - 822 - burst_mode_ratio = DIV_ROUND_UP( 823 - mipi_config->target_burst_mode_freq * 100, 824 - bitrate); 825 - 826 - intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100); 827 - } else { 828 - drm_err(&dev_priv->drm, 829 - "Burst mode target is not set\n"); 878 + if (mipi_config->target_burst_mode_freq == 0) { 879 + drm_err(&dev_priv->drm, "Burst mode target is not set\n"); 830 880 return false; 831 881 } 882 + 883 + bitrate = intel_dsi_bitrate(intel_dsi); 884 + 885 + /* 886 + * Sometimes the VBT contains a slightly lower clock, then 887 + * the bitrate we have calculated, in this case just replace it 888 + * with the calculated bitrate. 889 + */ 890 + if (mipi_config->target_burst_mode_freq < bitrate && 891 + intel_fuzzy_clock_check(mipi_config->target_burst_mode_freq, 892 + bitrate)) 893 + mipi_config->target_burst_mode_freq = bitrate; 894 + 895 + if (mipi_config->target_burst_mode_freq < bitrate) { 896 + drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n"); 897 + return false; 898 + } 899 + 900 + burst_mode_ratio = 901 + DIV_ROUND_UP(mipi_config->target_burst_mode_freq * 100, bitrate); 902 + 903 + intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100); 832 904 } else 833 905 burst_mode_ratio = 100; 834 906 ··· 888 964 struct intel_connector *connector = intel_dsi->attached_connector; 889 965 struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; 890 966 enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; 967 + struct gpiod_lookup_table *gpiod_lookup_table = NULL; 891 968 bool want_backlight_gpio = false; 892 969 bool want_panel_gpio = false; 893 970 struct pinctrl *pinctrl; ··· 896 971 897 972 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 898 973 mipi_config->pwm_blc == PPS_BLC_PMIC) { 899 - gpiod_add_lookup_table(&pmic_panel_gpio_table); 974 + gpiod_lookup_table = &pmic_panel_gpio_table; 900 975 want_panel_gpio = true; 901 976 } 902 977 903 978 if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) { 904 - gpiod_add_lookup_table(&soc_panel_gpio_table); 979 + gpiod_lookup_table = &soc_panel_gpio_table; 905 980 want_panel_gpio = true; 906 981 want_backlight_gpio = true; 907 982 ··· 917 992 drm_err(&dev_priv->drm, 918 993 "Failed to set pinmux to PWM\n"); 919 994 } 995 + 996 + if (gpiod_lookup_table) 997 + gpiod_add_lookup_table(gpiod_lookup_table); 920 998 921 999 if (want_panel_gpio) { 922 1000 intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags); ··· 939 1011 intel_dsi->gpio_backlight = NULL; 940 1012 } 941 1013 } 1014 + 1015 + if (gpiod_lookup_table) 1016 + gpiod_remove_lookup_table(gpiod_lookup_table); 942 1017 } 943 1018 944 1019 void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi) 945 1020 { 946 - struct drm_device *dev = intel_dsi->base.base.dev; 947 - struct drm_i915_private *dev_priv = to_i915(dev); 948 - struct intel_connector *connector = intel_dsi->attached_connector; 949 - struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; 950 - 951 1021 if (intel_dsi->gpio_panel) { 952 1022 gpiod_put(intel_dsi->gpio_panel); 953 1023 intel_dsi->gpio_panel = NULL; ··· 954 1028 if (intel_dsi->gpio_backlight) { 955 1029 gpiod_put(intel_dsi->gpio_backlight); 956 1030 intel_dsi->gpio_backlight = NULL; 957 - } 958 - 959 - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 960 - mipi_config->pwm_blc == PPS_BLC_PMIC) 961 - gpiod_remove_lookup_table(&pmic_panel_gpio_table); 962 - 963 - if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) { 964 - pinctrl_unregister_mappings(soc_pwm_pinctrl_map); 965 - gpiod_remove_lookup_table(&soc_panel_gpio_table); 966 1031 } 967 1032 }
+4 -4
drivers/gpu/drm/i915/display/intel_fb.c
··· 764 764 765 765 bool intel_fb_uses_dpt(const struct drm_framebuffer *fb) 766 766 { 767 - return fb && to_i915(fb->dev)->params.enable_dpt && 767 + return fb && to_i915(fb->dev)->display.params.enable_dpt && 768 768 intel_fb_modifier_uses_dpt(to_i915(fb->dev), fb->modifier); 769 769 } 770 770 ··· 1930 1930 if (!atomic_read(&front->bits)) 1931 1931 return 0; 1932 1932 1933 - if (dma_resv_test_signaled(obj->base.resv, dma_resv_usage_rw(false))) 1933 + if (dma_resv_test_signaled(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false))) 1934 1934 goto flush; 1935 1935 1936 - ret = dma_resv_get_singleton(obj->base.resv, dma_resv_usage_rw(false), 1936 + ret = dma_resv_get_singleton(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false), 1937 1937 &fence); 1938 1938 if (ret || !fence) 1939 1939 goto flush; ··· 2093 2093 } 2094 2094 } 2095 2095 2096 - fb->obj[i] = &obj->base; 2096 + fb->obj[i] = intel_bo_to_drm_bo(obj); 2097 2097 } 2098 2098 2099 2099 ret = intel_fill_fb_info(dev_priv, intel_fb);
+48 -9
drivers/gpu/drm/i915/display/intel_fbc.c
··· 608 608 static void ivb_fbc_activate(struct intel_fbc *fbc) 609 609 { 610 610 struct drm_i915_private *i915 = fbc->i915; 611 + u32 dpfc_ctl; 611 612 612 613 if (DISPLAY_VER(i915) >= 10) 613 614 glk_fbc_program_cfb_stride(fbc); ··· 618 617 if (intel_gt_support_legacy_fencing(to_gt(i915))) 619 618 snb_fbc_program_fence(fbc); 620 619 620 + /* wa_14019417088 Alternative WA*/ 621 + dpfc_ctl = ivb_dpfc_ctl(fbc); 622 + if (DISPLAY_VER(i915) >= 20) 623 + intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); 624 + 621 625 intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), 622 - DPFC_CTL_EN | ivb_dpfc_ctl(fbc)); 626 + DPFC_CTL_EN | dpfc_ctl); 623 627 } 624 628 625 629 static bool ivb_fbc_is_compressing(struct intel_fbc *fbc) ··· 1028 1022 struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 1029 1023 unsigned int effective_w, effective_h, max_w, max_h; 1030 1024 1031 - if (DISPLAY_VER(i915) >= 10) { 1025 + if (DISPLAY_VER(i915) >= 11) { 1026 + max_w = 8192; 1027 + max_h = 4096; 1028 + } else if (DISPLAY_VER(i915) >= 10) { 1032 1029 max_w = 5120; 1033 1030 max_h = 4096; 1034 - } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) { 1031 + } else if (DISPLAY_VER(i915) >= 7) { 1035 1032 max_w = 4096; 1036 1033 max_h = 4096; 1037 1034 } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) { ··· 1051 1042 (drm_rect_height(&plane_state->uapi.src) >> 16); 1052 1043 1053 1044 return effective_w <= max_w && effective_h <= max_h; 1045 + } 1046 + 1047 + static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state) 1048 + { 1049 + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 1050 + unsigned int w, h, max_w, max_h; 1051 + 1052 + if (DISPLAY_VER(i915) >= 10) { 1053 + max_w = 5120; 1054 + max_h = 4096; 1055 + } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) { 1056 + max_w = 4096; 1057 + max_h = 4096; 1058 + } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) { 1059 + max_w = 4096; 1060 + max_h = 2048; 1061 + } else { 1062 + max_w = 2048; 1063 + max_h = 1536; 1064 + } 1065 + 1066 + w = drm_rect_width(&plane_state->uapi.src) >> 16; 1067 + h = drm_rect_height(&plane_state->uapi.src) >> 16; 1068 + 1069 + return w <= max_w && h <= max_h; 1054 1070 } 1055 1071 1056 1072 static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state) ··· 1208 1174 return 0; 1209 1175 } 1210 1176 1211 - if (!i915->params.enable_fbc) { 1177 + if (!i915->display.params.enable_fbc) { 1212 1178 plane_state->no_fbc_reason = "disabled per module param or by default"; 1213 1179 return 0; 1214 1180 } ··· 1275 1241 return 0; 1276 1242 } 1277 1243 1278 - if (!intel_fbc_hw_tracking_covers_screen(plane_state)) { 1244 + if (!intel_fbc_plane_size_valid(plane_state)) { 1279 1245 plane_state->no_fbc_reason = "plane size too big"; 1246 + return 0; 1247 + } 1248 + 1249 + if (!intel_fbc_hw_tracking_covers_screen(plane_state)) { 1250 + plane_state->no_fbc_reason = "surface size too big"; 1280 1251 return 0; 1281 1252 } 1282 1253 ··· 1790 1751 */ 1791 1752 static int intel_sanitize_fbc_option(struct drm_i915_private *i915) 1792 1753 { 1793 - if (i915->params.enable_fbc >= 0) 1794 - return !!i915->params.enable_fbc; 1754 + if (i915->display.params.enable_fbc >= 0) 1755 + return !!i915->display.params.enable_fbc; 1795 1756 1796 1757 if (!HAS_FBC(i915)) 1797 1758 return 0; ··· 1863 1824 if (need_fbc_vtd_wa(i915)) 1864 1825 DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0; 1865 1826 1866 - i915->params.enable_fbc = intel_sanitize_fbc_option(i915); 1827 + i915->display.params.enable_fbc = intel_sanitize_fbc_option(i915); 1867 1828 drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", 1868 - i915->params.enable_fbc); 1829 + i915->display.params.enable_fbc); 1869 1830 1870 1831 for_each_fbc_id(i915, fbc_id) 1871 1832 i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
+6 -2
drivers/gpu/drm/i915/display/intel_fdi.c
··· 10 10 #include "intel_crtc.h" 11 11 #include "intel_ddi.h" 12 12 #include "intel_de.h" 13 + #include "intel_dp.h" 13 14 #include "intel_display_types.h" 14 15 #include "intel_fdi.h" 15 16 #include "intel_fdi_regs.h" ··· 339 338 340 339 pipe_config->fdi_lanes = lane; 341 340 342 - intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 343 - link_bw, &pipe_config->fdi_m_n, false); 341 + intel_link_compute_m_n(to_bpp_x16(pipe_config->pipe_bpp), 342 + lane, fdi_dotclock, 343 + link_bw, 344 + intel_dp_bw_fec_overhead(false), 345 + &pipe_config->fdi_m_n); 344 346 345 347 return 0; 346 348 }
-2
drivers/gpu/drm/i915/display/intel_frontbuffer.c
··· 265 265 spin_unlock(&intel_bo_to_i915(obj)->display.fb_tracking.lock); 266 266 267 267 i915_active_fini(&front->write); 268 - 269 - i915_gem_object_put(obj); 270 268 kfree_rcu(front, rcu); 271 269 } 272 270
+29 -8
drivers/gpu/drm/i915/display/intel_hdcp.c
··· 923 923 return 0; 924 924 } 925 925 926 - static int _intel_hdcp_enable(struct intel_connector *connector) 926 + static int intel_hdcp1_enable(struct intel_connector *connector) 927 927 { 928 928 struct drm_i915_private *i915 = to_i915(connector->base.dev); 929 929 struct intel_hdcp *hdcp = &connector->hdcp; ··· 1058 1058 goto out; 1059 1059 } 1060 1060 1061 - ret = _intel_hdcp_enable(connector); 1061 + ret = intel_hdcp1_enable(connector); 1062 1062 if (ret) { 1063 1063 drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret); 1064 1064 intel_hdcp_update_value(connector, ··· 2324 2324 return 0; 2325 2325 } 2326 2326 2327 - int intel_hdcp_enable(struct intel_atomic_state *state, 2328 - struct intel_encoder *encoder, 2329 - const struct intel_crtc_state *pipe_config, 2330 - const struct drm_connector_state *conn_state) 2327 + static int _intel_hdcp_enable(struct intel_atomic_state *state, 2328 + struct intel_encoder *encoder, 2329 + const struct intel_crtc_state *pipe_config, 2330 + const struct drm_connector_state *conn_state) 2331 2331 { 2332 2332 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2333 2333 struct intel_connector *connector = ··· 2388 2388 */ 2389 2389 if (ret && intel_hdcp_capable(connector) && 2390 2390 hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) { 2391 - ret = _intel_hdcp_enable(connector); 2391 + ret = intel_hdcp1_enable(connector); 2392 2392 } 2393 2393 2394 2394 if (!ret) { ··· 2402 2402 mutex_unlock(&dig_port->hdcp_mutex); 2403 2403 mutex_unlock(&hdcp->mutex); 2404 2404 return ret; 2405 + } 2406 + 2407 + void intel_hdcp_enable(struct intel_atomic_state *state, 2408 + struct intel_encoder *encoder, 2409 + const struct intel_crtc_state *crtc_state, 2410 + const struct drm_connector_state *conn_state) 2411 + { 2412 + struct intel_connector *connector = 2413 + to_intel_connector(conn_state->connector); 2414 + struct intel_hdcp *hdcp = &connector->hdcp; 2415 + 2416 + /* 2417 + * Enable hdcp if it's desired or if userspace is enabled and 2418 + * driver set its state to undesired 2419 + */ 2420 + if (conn_state->content_protection == 2421 + DRM_MODE_CONTENT_PROTECTION_DESIRED || 2422 + (conn_state->content_protection == 2423 + DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value == 2424 + DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) 2425 + _intel_hdcp_enable(state, encoder, crtc_state, conn_state); 2405 2426 } 2406 2427 2407 2428 int intel_hdcp_disable(struct intel_connector *connector) ··· 2512 2491 } 2513 2492 2514 2493 if (desired_and_not_enabled || content_protection_type_changed) 2515 - intel_hdcp_enable(state, encoder, crtc_state, conn_state); 2494 + _intel_hdcp_enable(state, encoder, crtc_state, conn_state); 2516 2495 } 2517 2496 2518 2497 void intel_hdcp_component_fini(struct drm_i915_private *i915)
+4 -4
drivers/gpu/drm/i915/display/intel_hdcp.h
··· 28 28 int intel_hdcp_init(struct intel_connector *connector, 29 29 struct intel_digital_port *dig_port, 30 30 const struct intel_hdcp_shim *hdcp_shim); 31 - int intel_hdcp_enable(struct intel_atomic_state *state, 32 - struct intel_encoder *encoder, 33 - const struct intel_crtc_state *pipe_config, 34 - const struct drm_connector_state *conn_state); 31 + void intel_hdcp_enable(struct intel_atomic_state *state, 32 + struct intel_encoder *encoder, 33 + const struct intel_crtc_state *pipe_config, 34 + const struct drm_connector_state *conn_state); 35 35 int intel_hdcp_disable(struct intel_connector *connector); 36 36 void intel_hdcp_update_pipe(struct intel_atomic_state *state, 37 37 struct intel_encoder *encoder,
-10
drivers/gpu/drm/i915/display/intel_hdmi.c
··· 3030 3030 "HDCP init failed, skipping.\n"); 3031 3031 } 3032 3032 3033 - /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 3034 - * 0xd. Failure to do so will result in spurious interrupts being 3035 - * generated on the port when a cable is not attached. 3036 - */ 3037 - if (IS_G45(dev_priv)) { 3038 - u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 3039 - intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 3040 - (temp & ~0xf) | 0xd); 3041 - } 3042 - 3043 3033 cec_fill_conn_info_from_drm(&conn_info, connector); 3044 3034 3045 3035 intel_hdmi->cec_notifier =
+16
drivers/gpu/drm/i915/display/intel_hotplug_irq.c
··· 1361 1361 bxt_hpd_detection_setup(dev_priv); 1362 1362 } 1363 1363 1364 + static void g45_hpd_peg_band_gap_wa(struct drm_i915_private *i915) 1365 + { 1366 + /* 1367 + * For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 1368 + * 0xd. Failure to do so will result in spurious interrupts being 1369 + * generated on the port when a cable is not attached. 1370 + */ 1371 + intel_de_rmw(i915, PEG_BAND_GAP_DATA, 0xf, 0xd); 1372 + } 1373 + 1364 1374 static void i915_hpd_enable_detection(struct intel_encoder *encoder) 1365 1375 { 1366 1376 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1367 1377 u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin]; 1378 + 1379 + if (IS_G45(i915)) 1380 + g45_hpd_peg_band_gap_wa(i915); 1368 1381 1369 1382 /* HPD sense and interrupt enable are one and the same */ 1370 1383 i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en); ··· 1401 1388 if (IS_G4X(dev_priv)) 1402 1389 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 1403 1390 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 1391 + 1392 + if (IS_G45(dev_priv)) 1393 + g45_hpd_peg_band_gap_wa(dev_priv); 1404 1394 1405 1395 /* Ignore TV since it's buggy */ 1406 1396 i915_hotplug_interrupt_update_locked(dev_priv,
+2 -2
drivers/gpu/drm/i915/display/intel_lvds.c
··· 794 794 unsigned int val; 795 795 796 796 /* use the module option value if specified */ 797 - if (i915->params.lvds_channel_mode > 0) 798 - return i915->params.lvds_channel_mode == 2; 797 + if (i915->display.params.lvds_channel_mode > 0) 798 + return i915->display.params.lvds_channel_mode == 2; 799 799 800 800 /* single channel LVDS is limited to 112 MHz */ 801 801 if (fixed_mode->clock > 112999)
+6
drivers/gpu/drm/i915/display/intel_modeset_setup.c
··· 318 318 const struct intel_crtc_state *crtc_state = 319 319 to_intel_crtc_state(crtc->base.state); 320 320 321 + if (crtc_state->dsc.compression_enable) { 322 + drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux); 323 + connector->dp.dsc_decompression_enabled = true; 324 + } else { 325 + connector->dp.dsc_decompression_enabled = false; 326 + } 321 327 conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3; 322 328 } 323 329 }
+1 -1
drivers/gpu/drm/i915/display/intel_modeset_verify.c
··· 244 244 verify_crtc_state(state, crtc); 245 245 intel_shared_dpll_state_verify(state, crtc); 246 246 intel_mpllb_state_verify(state, crtc); 247 - intel_c10pll_state_verify(state, crtc); 247 + intel_cx0pll_state_verify(state, crtc); 248 248 } 249 249 250 250 void intel_modeset_verify_disabled(struct intel_atomic_state *state)
+1 -1
drivers/gpu/drm/i915/display/intel_opregion.c
··· 841 841 { 842 842 struct intel_opregion *opregion = &dev_priv->display.opregion; 843 843 const struct firmware *fw = NULL; 844 - const char *name = dev_priv->params.vbt_firmware; 844 + const char *name = dev_priv->display.params.vbt_firmware; 845 845 int ret; 846 846 847 847 if (!name || !*name)
+2 -2
drivers/gpu/drm/i915/display/intel_panel.c
··· 46 46 47 47 bool intel_panel_use_ssc(struct drm_i915_private *i915) 48 48 { 49 - if (i915->params.panel_use_ssc >= 0) 50 - return i915->params.panel_use_ssc != 0; 49 + if (i915->display.params.panel_use_ssc >= 0) 50 + return i915->display.params.panel_use_ssc != 0; 51 51 return i915->display.vbt.lvds_use_ssc && 52 52 !intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE); 53 53 }
+1
drivers/gpu/drm/i915/display/intel_pch_display.c
··· 8 8 #include "intel_crt.h" 9 9 #include "intel_de.h" 10 10 #include "intel_display_types.h" 11 + #include "intel_dpll.h" 11 12 #include "intel_fdi.h" 12 13 #include "intel_fdi_regs.h" 13 14 #include "intel_lvds.h"
+1 -1
drivers/gpu/drm/i915/display/intel_pps.c
··· 90 90 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 91 91 enum pipe pipe = intel_dp->pps.pps_pipe; 92 92 bool pll_enabled, release_cl_override = false; 93 - enum dpio_phy phy = DPIO_PHY(pipe); 93 + enum dpio_phy phy = vlv_pipe_to_phy(pipe); 94 94 enum dpio_channel ch = vlv_pipe_to_channel(pipe); 95 95 u32 DP; 96 96
+275 -96
drivers/gpu/drm/i915/display/intel_psr.c
··· 29 29 #include "i915_reg.h" 30 30 #include "intel_atomic.h" 31 31 #include "intel_crtc.h" 32 + #include "intel_ddi.h" 32 33 #include "intel_de.h" 33 34 #include "intel_display_types.h" 34 35 #include "intel_dp.h" ··· 173 172 * irrelevant for normal operation. 174 173 */ 175 174 175 + bool intel_encoder_can_psr(struct intel_encoder *encoder) 176 + { 177 + if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST) 178 + return CAN_PSR(enc_to_intel_dp(encoder)) || 179 + CAN_PANEL_REPLAY(enc_to_intel_dp(encoder)); 180 + else 181 + return false; 182 + } 183 + 176 184 static bool psr_global_enabled(struct intel_dp *intel_dp) 177 185 { 178 186 struct intel_connector *connector = intel_dp->attached_connector; ··· 189 179 190 180 switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { 191 181 case I915_PSR_DEBUG_DEFAULT: 192 - if (i915->params.enable_psr == -1) 182 + if (i915->display.params.enable_psr == -1) 193 183 return connector->panel.vbt.psr.enable; 194 - return i915->params.enable_psr; 184 + return i915->display.params.enable_psr; 195 185 case I915_PSR_DEBUG_DISABLE: 196 186 return false; 197 187 default: ··· 208 198 case I915_PSR_DEBUG_FORCE_PSR1: 209 199 return false; 210 200 default: 211 - if (i915->params.enable_psr == 1) 201 + if (i915->display.params.enable_psr == 1) 212 202 return false; 213 203 return true; 214 204 } ··· 484 474 intel_dp->psr.su_y_granularity = y; 485 475 } 486 476 487 - void intel_psr_init_dpcd(struct intel_dp *intel_dp) 477 + static void _panel_replay_init_dpcd(struct intel_dp *intel_dp) 488 478 { 489 - struct drm_i915_private *dev_priv = 479 + struct drm_i915_private *i915 = dp_to_i915(intel_dp); 480 + u8 pr_dpcd = 0; 481 + 482 + intel_dp->psr.sink_panel_replay_support = false; 483 + drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd); 484 + 485 + if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) { 486 + drm_dbg_kms(&i915->drm, 487 + "Panel replay is not supported by panel\n"); 488 + return; 489 + } 490 + 491 + drm_dbg_kms(&i915->drm, 492 + "Panel replay is supported by panel\n"); 493 + intel_dp->psr.sink_panel_replay_support = true; 494 + } 495 + 496 + static void _psr_init_dpcd(struct intel_dp *intel_dp) 497 + { 498 + struct drm_i915_private *i915 = 490 499 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 491 500 492 - drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, 493 - sizeof(intel_dp->psr_dpcd)); 494 - 495 - if (!intel_dp->psr_dpcd[0]) 496 - return; 497 - drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n", 501 + drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n", 498 502 intel_dp->psr_dpcd[0]); 499 503 500 504 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { 501 - drm_dbg_kms(&dev_priv->drm, 505 + drm_dbg_kms(&i915->drm, 502 506 "PSR support not currently available for this panel\n"); 503 507 return; 504 508 } 505 509 506 510 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 507 - drm_dbg_kms(&dev_priv->drm, 511 + drm_dbg_kms(&i915->drm, 508 512 "Panel lacks power state control, PSR cannot be enabled\n"); 509 513 return; 510 514 } ··· 527 503 intel_dp->psr.sink_sync_latency = 528 504 intel_dp_get_sink_sync_latency(intel_dp); 529 505 530 - if (DISPLAY_VER(dev_priv) >= 9 && 531 - (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) { 506 + if (DISPLAY_VER(i915) >= 9 && 507 + intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) { 532 508 bool y_req = intel_dp->psr_dpcd[1] & 533 509 DP_PSR2_SU_Y_COORDINATE_REQUIRED; 534 510 bool alpm = intel_dp_get_alpm_status(intel_dp); ··· 545 521 * GTC first. 546 522 */ 547 523 intel_dp->psr.sink_psr2_support = y_req && alpm; 548 - drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n", 524 + drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n", 549 525 intel_dp->psr.sink_psr2_support ? "" : "not "); 526 + } 527 + } 550 528 551 - if (intel_dp->psr.sink_psr2_support) { 552 - intel_dp->psr.colorimetry_support = 553 - intel_dp_get_colorimetry_status(intel_dp); 554 - intel_dp_get_su_granularity(intel_dp); 555 - } 529 + void intel_psr_init_dpcd(struct intel_dp *intel_dp) 530 + { 531 + _panel_replay_init_dpcd(intel_dp); 532 + 533 + drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, 534 + sizeof(intel_dp->psr_dpcd)); 535 + 536 + if (intel_dp->psr_dpcd[0]) 537 + _psr_init_dpcd(intel_dp); 538 + 539 + if (intel_dp->psr.sink_psr2_support) { 540 + intel_dp->psr.colorimetry_support = 541 + intel_dp_get_colorimetry_status(intel_dp); 542 + intel_dp_get_su_granularity(intel_dp); 556 543 } 557 544 } 558 545 ··· 609 574 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 610 575 u8 dpcd_val = DP_PSR_ENABLE; 611 576 612 - /* Enable ALPM at sink for psr2 */ 577 + if (intel_dp->psr.panel_replay_enabled) 578 + return; 579 + 613 580 if (intel_dp->psr.psr2_enabled) { 581 + /* Enable ALPM at sink for psr2 */ 614 582 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 615 583 DP_ALPM_ENABLE | 616 584 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE); ··· 630 592 if (intel_dp->psr.req_psr2_sdp_prior_scanline) 631 593 dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE; 632 594 595 + if (intel_dp->psr.entry_setup_frames > 0) 596 + dpcd_val |= DP_PSR_FRAME_CAPTURE; 597 + 633 598 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val); 634 599 635 600 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); ··· 647 606 if (DISPLAY_VER(dev_priv) >= 11) 648 607 val |= EDP_PSR_TP4_TIME_0us; 649 608 650 - if (dev_priv->params.psr_safest_params) { 609 + if (dev_priv->display.params.psr_safest_params) { 651 610 val |= EDP_PSR_TP1_TIME_2500us; 652 611 val |= EDP_PSR_TP2_TP3_TIME_2500us; 653 612 goto check_tp3_sel; ··· 731 690 if (DISPLAY_VER(dev_priv) >= 8) 732 691 val |= EDP_PSR_CRC_ENABLE; 733 692 693 + if (DISPLAY_VER(dev_priv) >= 20) 694 + val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames); 695 + 734 696 intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 735 697 ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val); 736 698 } ··· 744 700 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 745 701 u32 val = 0; 746 702 747 - if (dev_priv->params.psr_safest_params) 703 + if (dev_priv->display.params.psr_safest_params) 748 704 return EDP_PSR2_TP2_TIME_2500us; 749 705 750 706 if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && ··· 771 727 return psr2_block_count_lines(intel_dp) / 4; 772 728 } 773 729 730 + static u8 frames_before_su_entry(struct intel_dp *intel_dp) 731 + { 732 + u8 frames_before_su_entry; 733 + 734 + frames_before_su_entry = max_t(u8, 735 + intel_dp->psr.sink_sync_latency + 1, 736 + 2); 737 + 738 + /* Entry setup frames must be at least 1 less than frames before SU entry */ 739 + if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry) 740 + frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1; 741 + 742 + return frames_before_su_entry; 743 + } 744 + 745 + static void dg2_activate_panel_replay(struct intel_dp *intel_dp) 746 + { 747 + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 748 + 749 + intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 750 + 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME); 751 + 752 + intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0, 753 + TRANS_DP2_PANEL_REPLAY_ENABLE); 754 + } 755 + 774 756 static void hsw_activate_psr2(struct intel_dp *intel_dp) 775 757 { 776 758 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 777 759 enum transcoder cpu_transcoder = intel_dp->psr.transcoder; 778 760 u32 val = EDP_PSR2_ENABLE; 761 + u32 psr_val = 0; 779 762 780 763 val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp)); 781 764 ··· 812 741 if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12) 813 742 val |= EDP_Y_COORDINATE_ENABLE; 814 743 815 - val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2)); 744 + val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp)); 745 + 816 746 val |= intel_psr2_get_tp_time(intel_dp); 817 747 818 748 if (DISPLAY_VER(dev_priv) >= 12) { ··· 857 785 if (intel_dp->psr.req_psr2_sdp_prior_scanline) 858 786 val |= EDP_PSR2_SU_SDP_SCANLINE; 859 787 788 + if (DISPLAY_VER(dev_priv) >= 20) 789 + psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames); 790 + 860 791 if (intel_dp->psr.psr2_sel_fetch_enabled) { 861 792 u32 tmp; 862 793 ··· 873 798 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is 874 799 * recommending keep this bit unset while PSR2 is enabled. 875 800 */ 876 - intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0); 801 + intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val); 877 802 878 803 intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val); 879 804 } ··· 1018 943 { 1019 944 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1020 945 1021 - if (!dev_priv->params.enable_psr2_sel_fetch && 946 + if (!dev_priv->display.params.enable_psr2_sel_fetch && 1022 947 intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) { 1023 948 drm_dbg_kms(&dev_priv->drm, 1024 949 "PSR2 sel fetch not enabled, disabled by parameter\n"); ··· 1131 1056 fast_wake_lines > max_wake_lines) 1132 1057 return false; 1133 1058 1134 - if (i915->params.psr_safest_params) 1059 + if (i915->display.params.psr_safest_params) 1135 1060 io_wake_lines = fast_wake_lines = max_wake_lines; 1136 1061 1137 1062 /* According to Bspec lower limit should be set as 7 lines. */ ··· 1139 1064 intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7); 1140 1065 1141 1066 return true; 1067 + } 1068 + 1069 + static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp, 1070 + const struct drm_display_mode *adjusted_mode) 1071 + { 1072 + struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1073 + int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); 1074 + int entry_setup_frames = 0; 1075 + 1076 + if (psr_setup_time < 0) { 1077 + drm_dbg_kms(&i915->drm, 1078 + "PSR condition failed: Invalid PSR setup time (0x%02x)\n", 1079 + intel_dp->psr_dpcd[1]); 1080 + return -ETIME; 1081 + } 1082 + 1083 + if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > 1084 + adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { 1085 + if (DISPLAY_VER(i915) >= 20) { 1086 + /* setup entry frames can be up to 3 frames */ 1087 + entry_setup_frames = 1; 1088 + drm_dbg_kms(&i915->drm, 1089 + "PSR setup entry frames %d\n", 1090 + entry_setup_frames); 1091 + } else { 1092 + drm_dbg_kms(&i915->drm, 1093 + "PSR condition failed: PSR setup time (%d us) too long\n", 1094 + psr_setup_time); 1095 + return -ETIME; 1096 + } 1097 + } 1098 + 1099 + return entry_setup_frames; 1142 1100 } 1143 1101 1144 1102 static bool intel_psr2_config_valid(struct intel_dp *intel_dp, ··· 1314 1206 return false; 1315 1207 } 1316 1208 1317 - void intel_psr_compute_config(struct intel_dp *intel_dp, 1318 - struct intel_crtc_state *crtc_state, 1319 - struct drm_connector_state *conn_state) 1209 + static bool _psr_compute_config(struct intel_dp *intel_dp, 1210 + struct intel_crtc_state *crtc_state) 1320 1211 { 1321 1212 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1322 - const struct drm_display_mode *adjusted_mode = 1323 - &crtc_state->hw.adjusted_mode; 1324 - int psr_setup_time; 1213 + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 1214 + int entry_setup_frames; 1325 1215 1326 1216 /* 1327 1217 * Current PSR panels don't work reliably with VRR enabled 1328 1218 * So if VRR is enabled, do not enable PSR. 1329 1219 */ 1330 1220 if (crtc_state->vrr.enable) 1331 - return; 1221 + return false; 1332 1222 1333 1223 if (!CAN_PSR(intel_dp)) 1334 - return; 1224 + return false; 1225 + 1226 + entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode); 1227 + 1228 + if (entry_setup_frames >= 0) { 1229 + intel_dp->psr.entry_setup_frames = entry_setup_frames; 1230 + } else { 1231 + drm_dbg_kms(&dev_priv->drm, 1232 + "PSR condition failed: PSR setup timing not met\n"); 1233 + return false; 1234 + } 1235 + 1236 + return true; 1237 + } 1238 + 1239 + void intel_psr_compute_config(struct intel_dp *intel_dp, 1240 + struct intel_crtc_state *crtc_state, 1241 + struct drm_connector_state *conn_state) 1242 + { 1243 + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1244 + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 1335 1245 1336 1246 if (!psr_global_enabled(intel_dp)) { 1337 1247 drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n"); ··· 1368 1242 return; 1369 1243 } 1370 1244 1371 - psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); 1372 - if (psr_setup_time < 0) { 1373 - drm_dbg_kms(&dev_priv->drm, 1374 - "PSR condition failed: Invalid PSR setup time (0x%02x)\n", 1375 - intel_dp->psr_dpcd[1]); 1376 - return; 1377 - } 1245 + if (CAN_PANEL_REPLAY(intel_dp)) 1246 + crtc_state->has_panel_replay = true; 1247 + else 1248 + crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state); 1378 1249 1379 - if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > 1380 - adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { 1381 - drm_dbg_kms(&dev_priv->drm, 1382 - "PSR condition failed: PSR setup time (%d us) too long\n", 1383 - psr_setup_time); 1250 + if (!(crtc_state->has_panel_replay || crtc_state->has_psr)) 1384 1251 return; 1385 - } 1386 1252 1387 - crtc_state->has_psr = true; 1388 1253 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); 1389 1254 1390 1255 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); ··· 1396 1279 return; 1397 1280 1398 1281 intel_dp = &dig_port->dp; 1399 - if (!CAN_PSR(intel_dp)) 1282 + if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) 1400 1283 return; 1401 1284 1402 1285 mutex_lock(&intel_dp->psr.lock); 1403 1286 if (!intel_dp->psr.enabled) 1404 1287 goto unlock; 1405 1288 1406 - /* 1407 - * Not possible to read EDP_PSR/PSR2_CTL registers as it is 1408 - * enabled/disabled because of frontbuffer tracking and others. 1409 - */ 1410 - pipe_config->has_psr = true; 1289 + if (intel_dp->psr.panel_replay_enabled) { 1290 + pipe_config->has_panel_replay = true; 1291 + } else { 1292 + /* 1293 + * Not possible to read EDP_PSR/PSR2_CTL registers as it is 1294 + * enabled/disabled because of frontbuffer tracking and others. 1295 + */ 1296 + pipe_config->has_psr = true; 1297 + } 1298 + 1411 1299 pipe_config->has_psr2 = intel_dp->psr.psr2_enabled; 1412 1300 pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 1413 1301 ··· 1449 1327 1450 1328 lockdep_assert_held(&intel_dp->psr.lock); 1451 1329 1452 - /* psr1 and psr2 are mutually exclusive.*/ 1453 - if (intel_dp->psr.psr2_enabled) 1330 + /* psr1, psr2 and panel-replay are mutually exclusive.*/ 1331 + if (intel_dp->psr.panel_replay_enabled) 1332 + dg2_activate_panel_replay(intel_dp); 1333 + else if (intel_dp->psr.psr2_enabled) 1454 1334 hsw_activate_psr2(intel_dp); 1455 1335 else 1456 1336 hsw_activate_psr1(intel_dp); ··· 1576 1452 * All supported adlp panels have 1-based X granularity, this may 1577 1453 * cause issues if non-supported panels are used. 1578 1454 */ 1579 - if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0)) 1580 - intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0, 1581 - ADLP_1_BASED_X_GRANULARITY); 1582 - else if (IS_ALDERLAKE_P(dev_priv)) 1583 - intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, 1584 - ADLP_1_BASED_X_GRANULARITY); 1455 + if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) || 1456 + IS_ALDERLAKE_P(dev_priv)) 1457 + intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder), 1458 + 0, ADLP_1_BASED_X_GRANULARITY); 1585 1459 1586 1460 /* Wa_16012604467:adlp,mtl[a0,b0] */ 1587 1461 if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0)) ··· 1630 1508 drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled); 1631 1509 1632 1510 intel_dp->psr.psr2_enabled = crtc_state->has_psr2; 1511 + intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay; 1633 1512 intel_dp->psr.busy_frontbuffer_bits = 0; 1634 1513 intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe; 1635 1514 intel_dp->psr.transcoder = crtc_state->cpu_transcoder; ··· 1646 1523 if (!psr_interrupt_error_check(intel_dp)) 1647 1524 return; 1648 1525 1649 - drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", 1650 - intel_dp->psr.psr2_enabled ? "2" : "1"); 1526 + if (intel_dp->psr.panel_replay_enabled) 1527 + drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n"); 1528 + else 1529 + drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n", 1530 + intel_dp->psr.psr2_enabled ? "2" : "1"); 1531 + 1651 1532 intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc); 1652 1533 intel_snps_phy_update_psr_power_state(dev_priv, phy, true); 1653 1534 intel_psr_enable_sink(intel_dp); ··· 1680 1553 return; 1681 1554 } 1682 1555 1683 - if (intel_dp->psr.psr2_enabled) { 1556 + if (intel_dp->psr.panel_replay_enabled) { 1557 + intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 1558 + TRANS_DP2_PANEL_REPLAY_ENABLE, 0); 1559 + } else if (intel_dp->psr.psr2_enabled) { 1684 1560 tgl_disallow_dc3co_on_psr2_exit(intel_dp); 1685 1561 1686 1562 val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder), ··· 1732 1602 if (!intel_dp->psr.enabled) 1733 1603 return; 1734 1604 1735 - drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n", 1736 - intel_dp->psr.psr2_enabled ? "2" : "1"); 1605 + if (intel_dp->psr.panel_replay_enabled) 1606 + drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n"); 1607 + else 1608 + drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n", 1609 + intel_dp->psr.psr2_enabled ? "2" : "1"); 1737 1610 1738 1611 intel_psr_exit(intel_dp); 1739 1612 intel_psr_wait_exit_locked(intel_dp); ··· 1769 1636 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0); 1770 1637 1771 1638 intel_dp->psr.enabled = false; 1639 + intel_dp->psr.panel_replay_enabled = false; 1772 1640 intel_dp->psr.psr2_enabled = false; 1773 1641 intel_dp->psr.psr2_sel_fetch_enabled = false; 1774 1642 intel_dp->psr.psr2_sel_fetch_cff_enabled = false; ··· 2341 2207 intel_atomic_get_new_crtc_state(state, crtc); 2342 2208 struct intel_encoder *encoder; 2343 2209 2344 - if (!crtc_state->has_psr) 2210 + if (!(crtc_state->has_psr || crtc_state->has_panel_replay)) 2345 2211 return; 2346 2212 2347 2213 for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, ··· 2827 2693 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 2828 2694 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2829 2695 2830 - if (!HAS_PSR(dev_priv)) 2696 + if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv))) 2831 2697 return; 2698 + 2699 + if (!intel_dp_is_edp(intel_dp)) 2700 + intel_psr_init_dpcd(intel_dp); 2832 2701 2833 2702 /* 2834 2703 * HSW spec explicitly says PSR is tied to port A. ··· 2848 2711 return; 2849 2712 } 2850 2713 2851 - intel_dp->psr.source_support = true; 2714 + if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) 2715 + intel_dp->psr.source_panel_replay_support = true; 2716 + else 2717 + intel_dp->psr.source_support = true; 2852 2718 2853 2719 /* Set link_standby x link_off defaults */ 2854 2720 if (DISPLAY_VER(dev_priv) < 12) ··· 2868 2728 { 2869 2729 struct drm_dp_aux *aux = &intel_dp->aux; 2870 2730 int ret; 2731 + unsigned int offset; 2871 2732 2872 - ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status); 2733 + offset = intel_dp->psr.panel_replay_enabled ? 2734 + DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS; 2735 + 2736 + ret = drm_dp_dpcd_readb(aux, offset, status); 2873 2737 if (ret != 1) 2874 2738 return ret; 2875 2739 2876 - ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status); 2740 + offset = intel_dp->psr.panel_replay_enabled ? 2741 + DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS; 2742 + 2743 + ret = drm_dp_dpcd_readb(aux, offset, error_status); 2877 2744 if (ret != 1) 2878 2745 return ret; 2879 2746 ··· 3101 2954 status = live_status[status_val]; 3102 2955 } 3103 2956 3104 - seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); 2957 + seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val); 3105 2958 } 3106 2959 3107 2960 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) ··· 3114 2967 bool enabled; 3115 2968 u32 val; 3116 2969 3117 - seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support)); 2970 + seq_printf(m, "Sink support: PSR = %s", 2971 + str_yes_no(psr->sink_support)); 2972 + 3118 2973 if (psr->sink_support) 3119 2974 seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]); 3120 - seq_puts(m, "\n"); 2975 + seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support)); 3121 2976 3122 - if (!psr->sink_support) 2977 + if (!(psr->sink_support || psr->sink_panel_replay_support)) 3123 2978 return 0; 3124 2979 3125 2980 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 3126 2981 mutex_lock(&psr->lock); 3127 2982 3128 - if (psr->enabled) 2983 + if (psr->panel_replay_enabled) 2984 + status = "Panel Replay Enabled"; 2985 + else if (psr->enabled) 3129 2986 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; 3130 2987 else 3131 2988 status = "disabled"; ··· 3142 2991 goto unlock; 3143 2992 } 3144 2993 3145 - if (psr->psr2_enabled) { 2994 + if (psr->panel_replay_enabled) { 2995 + val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder)); 2996 + enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE; 2997 + } else if (psr->psr2_enabled) { 3146 2998 val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)); 3147 2999 enabled = val & EDP_PSR2_ENABLE; 3148 3000 } else { 3149 3001 val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)); 3150 3002 enabled = val & EDP_PSR_ENABLE; 3151 3003 } 3152 - seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", 3004 + seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n", 3153 3005 str_enabled_disabled(enabled), val); 3154 3006 psr_source_status(intel_dp, m); 3155 3007 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", ··· 3290 3136 i915, &i915_edp_psr_status_fops); 3291 3137 } 3292 3138 3139 + static const char *psr_mode_str(struct intel_dp *intel_dp) 3140 + { 3141 + if (intel_dp->psr.panel_replay_enabled) 3142 + return "PANEL-REPLAY"; 3143 + else if (intel_dp->psr.enabled) 3144 + return "PSR"; 3145 + 3146 + return "unknown"; 3147 + } 3148 + 3293 3149 static int i915_psr_sink_status_show(struct seq_file *m, void *data) 3294 3150 { 3295 3151 struct intel_connector *connector = m->private; ··· 3314 3150 "reserved", 3315 3151 "sink internal error", 3316 3152 }; 3153 + static const char * const panel_replay_status[] = { 3154 + "Sink device frame is locked to the Source device", 3155 + "Sink device is coasting, using the VTotal target", 3156 + "Sink device is governing the frame rate (frame rate unlock is granted)", 3157 + "Sink device in the process of re-locking with the Source device", 3158 + }; 3317 3159 const char *str; 3318 3160 int ret; 3319 3161 u8 status, error_status; 3162 + u32 idx; 3320 3163 3321 - if (!CAN_PSR(intel_dp)) { 3322 - seq_puts(m, "PSR Unsupported\n"); 3164 + if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) { 3165 + seq_puts(m, "PSR/Panel-Replay Unsupported\n"); 3323 3166 return -ENODEV; 3324 3167 } 3325 3168 ··· 3337 3166 if (ret) 3338 3167 return ret; 3339 3168 3340 - status &= DP_PSR_SINK_STATE_MASK; 3341 - if (status < ARRAY_SIZE(sink_status)) 3342 - str = sink_status[status]; 3343 - else 3344 - str = "unknown"; 3169 + str = "unknown"; 3170 + if (intel_dp->psr.panel_replay_enabled) { 3171 + idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT; 3172 + if (idx < ARRAY_SIZE(panel_replay_status)) 3173 + str = panel_replay_status[idx]; 3174 + } else if (intel_dp->psr.enabled) { 3175 + idx = status & DP_PSR_SINK_STATE_MASK; 3176 + if (idx < ARRAY_SIZE(sink_status)) 3177 + str = sink_status[idx]; 3178 + } 3345 3179 3346 - seq_printf(m, "Sink PSR status: 0x%x [%s]\n", status, str); 3180 + seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str); 3347 3181 3348 - seq_printf(m, "Sink PSR error status: 0x%x", error_status); 3182 + seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status); 3349 3183 3350 3184 if (error_status & (DP_PSR_RFB_STORAGE_ERROR | 3351 3185 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR | ··· 3359 3183 else 3360 3184 seq_puts(m, "\n"); 3361 3185 if (error_status & DP_PSR_RFB_STORAGE_ERROR) 3362 - seq_puts(m, "\tPSR RFB storage error\n"); 3186 + seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp)); 3363 3187 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) 3364 - seq_puts(m, "\tPSR VSC SDP uncorrectable error\n"); 3188 + seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp)); 3365 3189 if (error_status & DP_PSR_LINK_CRC_ERROR) 3366 - seq_puts(m, "\tPSR Link CRC error\n"); 3190 + seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp)); 3367 3191 3368 3192 return ret; 3369 3193 } ··· 3383 3207 struct drm_i915_private *i915 = to_i915(connector->base.dev); 3384 3208 struct dentry *root = connector->base.debugfs_entry; 3385 3209 3386 - if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 3387 - return; 3210 + if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) { 3211 + if (!(HAS_DP20(i915) && 3212 + connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort)) 3213 + return; 3214 + } 3388 3215 3389 3216 debugfs_create_file("i915_psr_sink_status", 0444, root, 3390 3217 connector, &i915_psr_sink_status_fops); 3391 3218 3392 - if (HAS_PSR(i915)) 3219 + if (HAS_PSR(i915) || HAS_DP20(i915)) 3393 3220 debugfs_create_file("i915_psr_status", 0444, root, 3394 3221 connector, &i915_psr_status_fops); 3395 3222 }
+7
drivers/gpu/drm/i915/display/intel_psr.h
··· 21 21 struct intel_plane; 22 22 struct intel_plane_state; 23 23 24 + #define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \ 25 + (intel_dp)->psr.source_support) 26 + 27 + #define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \ 28 + (intel_dp)->psr.source_panel_replay_support) 29 + 30 + bool intel_encoder_can_psr(struct intel_encoder *encoder); 24 31 void intel_psr_init_dpcd(struct intel_dp *intel_dp); 25 32 void intel_psr_pre_plane_update(struct intel_atomic_state *state, 26 33 struct intel_crtc *crtc);
+2
drivers/gpu/drm/i915/display/intel_psr_regs.h
··· 35 35 #define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 3) 36 36 #define EDP_PSR_MAX_SLEEP_TIME_MASK REG_GENMASK(24, 20) 37 37 #define EDP_PSR_MAX_SLEEP_TIME(x) REG_FIELD_PREP(EDP_PSR_MAX_SLEEP_TIME_MASK, (x)) 38 + #define LNL_EDP_PSR_ENTRY_SETUP_FRAMES_MASK REG_GENMASK(17, 16) 39 + #define LNL_EDP_PSR_ENTRY_SETUP_FRAMES(x) REG_FIELD_PREP(LNL_EDP_PSR_ENTRY_SETUP_FRAMES_MASK, (x)) 38 40 #define EDP_PSR_SKIP_AUX_EXIT REG_BIT(12) 39 41 #define EDP_PSR_TP_MASK REG_BIT(11) 40 42 #define EDP_PSR_TP_TP1_TP2 REG_FIELD_PREP(EDP_PSR_TP_MASK, 0)
-3
drivers/gpu/drm/i915/display/intel_qp_tables.c
··· 34 34 * These qp tables are as per the C model 35 35 * and it has the rows pointing to bpps which increment 36 36 * in steps of 0.5 37 - * We do not support fractional bpps as of today, 38 - * hence we would skip the fractional bpps during 39 - * our references for qp calclulations. 40 37 */ 41 38 static const u8 rc_range_minqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = { 42 39 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+17 -6
drivers/gpu/drm/i915/display/intel_sdvo.c
··· 1788 1788 intel_sdvo_get_eld(intel_sdvo, pipe_config); 1789 1789 } 1790 1790 1791 - static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo) 1791 + static void intel_sdvo_disable_audio(struct intel_encoder *encoder, 1792 + const struct intel_crtc_state *old_crtc_state, 1793 + const struct drm_connector_state *old_conn_state) 1792 1794 { 1795 + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1796 + 1797 + if (!old_crtc_state->has_audio) 1798 + return; 1799 + 1793 1800 intel_sdvo_set_audio_state(intel_sdvo, 0); 1794 1801 } 1795 1802 1796 - static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo, 1803 + static void intel_sdvo_enable_audio(struct intel_encoder *encoder, 1797 1804 const struct intel_crtc_state *crtc_state, 1798 1805 const struct drm_connector_state *conn_state) 1799 1806 { 1807 + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1800 1808 const u8 *eld = crtc_state->eld; 1809 + 1810 + if (!crtc_state->has_audio) 1811 + return; 1801 1812 1802 1813 intel_sdvo_set_audio_state(intel_sdvo, 0); 1803 1814 ··· 1830 1819 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); 1831 1820 u32 temp; 1832 1821 1833 - if (old_crtc_state->has_audio) 1834 - intel_sdvo_disable_audio(intel_sdvo); 1822 + encoder->audio_disable(encoder, old_crtc_state, conn_state); 1835 1823 1836 1824 intel_sdvo_set_active_outputs(intel_sdvo, 0); 1837 1825 if (0) ··· 1924 1914 DRM_MODE_DPMS_ON); 1925 1915 intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo_connector->output_flag); 1926 1916 1927 - if (pipe_config->has_audio) 1928 - intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state); 1917 + encoder->audio_enable(encoder, pipe_config, conn_state); 1929 1918 } 1930 1919 1931 1920 static enum drm_mode_status ··· 3400 3391 } 3401 3392 intel_encoder->pre_enable = intel_sdvo_pre_enable; 3402 3393 intel_encoder->enable = intel_enable_sdvo; 3394 + intel_encoder->audio_enable = intel_sdvo_enable_audio; 3395 + intel_encoder->audio_disable = intel_sdvo_disable_audio; 3403 3396 intel_encoder->get_hw_state = intel_sdvo_get_hw_state; 3404 3397 intel_encoder->get_config = intel_sdvo_get_config; 3405 3398
+6 -1
drivers/gpu/drm/i915/display/intel_sprite.c
··· 48 48 #include "intel_frontbuffer.h" 49 49 #include "intel_sprite.h" 50 50 51 + static char sprite_name(struct drm_i915_private *i915, enum pipe pipe, int sprite) 52 + { 53 + return pipe * DISPLAY_RUNTIME_INFO(i915)->num_sprites[pipe] + sprite + 'A'; 54 + } 55 + 51 56 static void i9xx_plane_linear_gamma(u16 gamma[8]) 52 57 { 53 58 /* The points are not evenly spaced. */ ··· 1641 1636 0, plane_funcs, 1642 1637 formats, num_formats, modifiers, 1643 1638 DRM_PLANE_TYPE_OVERLAY, 1644 - "sprite %c", sprite_name(pipe, sprite)); 1639 + "sprite %c", sprite_name(dev_priv, pipe, sprite)); 1645 1640 kfree(modifiers); 1646 1641 1647 1642 if (ret)
+22 -7
drivers/gpu/drm/i915/display/intel_vdsc.c
··· 77 77 static void 78 78 calculate_rc_params(struct drm_dsc_config *vdsc_cfg) 79 79 { 80 + int bpp = to_bpp_int(vdsc_cfg->bits_per_pixel); 80 81 int bpc = vdsc_cfg->bits_per_component; 81 - int bpp = vdsc_cfg->bits_per_pixel >> 4; 82 82 int qp_bpc_modifier = (bpc - 8) * 2; 83 83 int uncompressed_bpg_rate; 84 84 int first_line_bpg_offset; ··· 148 148 static const s8 ofs_und8[] = { 149 149 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 150 150 }; 151 - 151 + /* 152 + * For 420 format since bits_per_pixel (bpp) is set to target bpp * 2, 153 + * QP table values for target bpp 4.0 to 4.4375 (rounded to 4.0) are 154 + * actually for bpp 8 to 8.875 (rounded to 4.0 * 2 i.e 8). 155 + * Similarly values for target bpp 4.5 to 4.8375 (rounded to 4.5) 156 + * are for bpp 9 to 9.875 (rounded to 4.5 * 2 i.e 9), and so on. 157 + */ 152 158 bpp_i = bpp - 8; 153 159 for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) { 154 160 u8 range_bpg_offset; ··· 184 178 range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK; 185 179 } 186 180 } else { 181 + /* fractional bpp part * 10000 (for precision up to 4 decimal places) */ 182 + int fractional_bits = to_bpp_frac(vdsc_cfg->bits_per_pixel); 183 + 187 184 static const s8 ofs_und6[] = { 188 185 0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12 189 186 }; ··· 200 191 10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12 201 192 }; 202 193 203 - bpp_i = (2 * (bpp - 6)); 194 + /* 195 + * QP table rows have values in increment of 0.5. 196 + * So 6.0 bpp to 6.4375 will have index 0, 6.5 to 6.9375 will have index 1, 197 + * and so on. 198 + * 0.5 fractional part with 4 decimal precision becomes 5000 199 + */ 200 + bpp_i = ((bpp - 6) + (fractional_bits < 5000 ? 0 : 1)); 201 + 204 202 for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) { 205 203 u8 range_bpg_offset; 206 204 ··· 264 248 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); 265 249 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 266 250 struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config; 267 - u16 compressed_bpp = pipe_config->dsc.compressed_bpp; 251 + u16 compressed_bpp = to_bpp_int(pipe_config->dsc.compressed_bpp_x16); 268 252 int err; 269 253 int ret; 270 254 ··· 295 279 /* Gen 11 does not support VBR */ 296 280 vdsc_cfg->vbr_enable = false; 297 281 298 - /* Gen 11 only supports integral values of bpp */ 299 - vdsc_cfg->bits_per_pixel = compressed_bpp << 4; 282 + vdsc_cfg->bits_per_pixel = pipe_config->dsc.compressed_bpp_x16; 300 283 301 284 /* 302 285 * According to DSC 1.2 specs in Section 4.1 if native_420 is set ··· 889 874 if (vdsc_cfg->native_420) 890 875 vdsc_cfg->bits_per_pixel >>= 1; 891 876 892 - crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4; 877 + crtc_state->dsc.compressed_bpp_x16 = vdsc_cfg->bits_per_pixel; 893 878 894 879 /* PPS 2 */ 895 880 pps_temp = intel_dsc_pps_read_and_verify(crtc_state, 2);
+14 -14
drivers/gpu/drm/i915/display/skl_universal_plane.c
··· 21 21 #include "skl_scaler.h" 22 22 #include "skl_universal_plane.h" 23 23 #include "skl_watermark.h" 24 - #include "gt/intel_gt.h" 25 24 #include "pxp/intel_pxp.h" 26 25 27 26 static const u32 skl_plane_formats[] = { ··· 1006 1007 * The DPT object contains only one vma, so the VMA's offset 1007 1008 * within the DPT is always 0. 1008 1009 */ 1009 - drm_WARN_ON(&i915->drm, plane_state->dpt_vma->node.start); 1010 + drm_WARN_ON(&i915->drm, plane_state->dpt_vma && 1011 + plane_state->dpt_vma->node.start); 1010 1012 drm_WARN_ON(&i915->drm, offset & 0x1fffff); 1011 1013 return offset >> 9; 1012 1014 } else { ··· 1855 1855 } 1856 1856 } 1857 1857 1858 - static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj) 1858 + static void check_protection(struct intel_plane_state *plane_state) 1859 1859 { 1860 - struct drm_i915_private *i915 = to_i915(obj->base.dev); 1860 + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1861 + struct drm_i915_private *i915 = to_i915(plane->base.dev); 1862 + const struct drm_framebuffer *fb = plane_state->hw.fb; 1863 + struct drm_i915_gem_object *obj = intel_fb_obj(fb); 1861 1864 1862 - return intel_pxp_key_check(i915->pxp, obj, false) == 0; 1863 - } 1865 + if (DISPLAY_VER(i915) < 11) 1866 + return; 1864 1867 1865 - static bool pxp_is_borked(struct drm_i915_gem_object *obj) 1866 - { 1867 - return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj); 1868 + plane_state->decrypt = intel_pxp_key_check(i915->pxp, obj, false) == 0; 1869 + plane_state->force_black = i915_gem_object_is_protected(obj) && 1870 + !plane_state->decrypt; 1868 1871 } 1869 1872 1870 1873 static int skl_plane_check(struct intel_crtc_state *crtc_state, ··· 1914 1911 if (ret) 1915 1912 return ret; 1916 1913 1917 - if (DISPLAY_VER(dev_priv) >= 11) { 1918 - plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb)); 1919 - plane_state->force_black = pxp_is_borked(intel_fb_obj(fb)); 1920 - } 1914 + check_protection(plane_state); 1921 1915 1922 1916 /* HW only has 8 bits pixel precision, disable plane if invisible */ 1923 1917 if (!(plane_state->hw.alpha >> 8)) ··· 2489 2489 goto error; 2490 2490 } 2491 2491 2492 - if (!dev_priv->params.enable_dpt && 2492 + if (!dev_priv->display.params.enable_dpt && 2493 2493 intel_fb_modifier_uses_dpt(dev_priv, fb->modifier)) { 2494 2494 drm_dbg_kms(&dev_priv->drm, "DPT disabled, skipping initial FB\n"); 2495 2495 goto error;
+3 -2
drivers/gpu/drm/i915/display/skl_watermark.c
··· 412 412 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 413 413 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 414 414 415 - if (!i915->params.enable_sagv) 415 + if (!i915->display.params.enable_sagv) 416 416 return false; 417 417 418 418 if (DISPLAY_VER(i915) >= 12) ··· 3702 3702 }; 3703 3703 3704 3704 seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915))); 3705 - seq_printf(m, "SAGV modparam: %s\n", str_enabled_disabled(i915->params.enable_sagv)); 3705 + seq_printf(m, "SAGV modparam: %s\n", 3706 + str_enabled_disabled(i915->display.params.enable_sagv)); 3706 3707 seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]); 3707 3708 seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us); 3708 3709
+12 -9
drivers/gpu/drm/i915/display/vlv_dsi.c
··· 561 561 glk_dsi_disable_mipi_io(encoder); 562 562 } 563 563 564 + static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port) 565 + { 566 + return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ? 567 + BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); 568 + } 569 + 564 570 static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) 565 571 { 566 572 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); ··· 576 570 drm_dbg_kms(&dev_priv->drm, "\n"); 577 571 for_each_dsi_port(port, intel_dsi->ports) { 578 572 /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ 579 - i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? 573 + i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ? 580 574 BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A); 581 575 582 576 intel_de_write(dev_priv, MIPI_DEVICE_READY(port), ··· 595 589 * On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI 596 590 * Port A only. MIPI Port C has no similar bit for checking. 597 591 */ 598 - if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || port == PORT_A) && 592 + if ((IS_BROXTON(dev_priv) || port == PORT_A) && 599 593 intel_de_wait_for_clear(dev_priv, port_ctrl, 600 594 AFE_LATCHOUT, 30)) 601 595 drm_err(&dev_priv->drm, "DSI LP not going Low\n"); ··· 633 627 } 634 628 635 629 for_each_dsi_port(port, intel_dsi->ports) { 636 - i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? 637 - BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); 630 + i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); 638 631 u32 temp; 639 632 640 633 temp = intel_de_read(dev_priv, port_ctrl); ··· 669 664 enum port port; 670 665 671 666 for_each_dsi_port(port, intel_dsi->ports) { 672 - i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? 673 - BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); 667 + i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); 674 668 675 669 /* de-assert ip_tg_enable signal */ 676 670 intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0); ··· 959 955 960 956 /* XXX: this only works for one DSI output */ 961 957 for_each_dsi_port(port, intel_dsi->ports) { 962 - i915_reg_t ctrl_reg = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? 963 - BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); 964 - bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE; 958 + i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); 959 + bool enabled = intel_de_read(dev_priv, port_ctrl) & DPI_ENABLE; 965 960 966 961 /* 967 962 * Due to some hardware limitations on VLV/CHV, the DPI enable
+1
drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
··· 89 89 90 90 if (!front) { 91 91 RCU_INIT_POINTER(obj->frontbuffer, NULL); 92 + drm_gem_object_put(intel_bo_to_drm_bo(obj)); 92 93 } else if (rcu_access_pointer(obj->frontbuffer)) { 93 94 cur = rcu_dereference_protected(obj->frontbuffer, true); 94 95 kref_get(&cur->ref);
+1
drivers/gpu/drm/i915/gt/intel_engine_pm.h
··· 10 10 #include "i915_request.h" 11 11 #include "intel_engine_types.h" 12 12 #include "intel_wakeref.h" 13 + #include "intel_gt.h" 13 14 #include "intel_gt_pm.h" 14 15 15 16 static inline bool
+14
drivers/gpu/drm/i915/gt/intel_gt.h
··· 167 167 (id__)++) \ 168 168 for_each_if(((gt__) = (i915__)->gt[(id__)])) 169 169 170 + /* Simple iterator over all initialised engines */ 171 + #define for_each_engine(engine__, gt__, id__) \ 172 + for ((id__) = 0; \ 173 + (id__) < I915_NUM_ENGINES; \ 174 + (id__)++) \ 175 + for_each_if ((engine__) = (gt__)->engine[(id__)]) 176 + 177 + /* Iterator over subset of engines selected by mask */ 178 + #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ 179 + for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \ 180 + (tmp__) ? \ 181 + ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ 182 + 0;) 183 + 170 184 void intel_gt_info_print(const struct intel_gt_info *info, 171 185 struct drm_printer *p); 172 186
+1 -1
drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
··· 6 6 7 7 #include <drm/drm_print.h> 8 8 9 - #include "i915_drv.h" /* for_each_engine! */ 10 9 #include "intel_engine.h" 10 + #include "intel_gt.h" 11 11 #include "intel_gt_debugfs.h" 12 12 #include "intel_gt_engines_debugfs.h" 13 13
+1 -1
drivers/gpu/drm/i915/gvt/cmd_parser.c
··· 3047 3047 3048 3048 static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 3049 3049 { 3050 - u32 per_ctx_start[CACHELINE_DWORDS] = {0}; 3050 + u32 per_ctx_start[CACHELINE_DWORDS] = {}; 3051 3051 unsigned char *bb_start_sva; 3052 3052 3053 3053 if (!wa_ctx->per_ctx.valid)
+3 -3
drivers/gpu/drm/i915/gvt/fb_decoder.c
··· 56 56 {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"}, 57 57 58 58 /* non-supported format has bpp default to 0 */ 59 - {0, 0, NULL}, 59 + {} 60 60 }; 61 61 62 62 static const struct pixel_format skl_pixel_formats[] = { ··· 76 76 {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"}, 77 77 78 78 /* non-supported format has bpp default to 0 */ 79 - {0, 0, NULL}, 79 + {} 80 80 }; 81 81 82 82 static int bdw_format_to_drm(int format) ··· 293 293 {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"}, 294 294 295 295 /* non-supported format has bpp default to 0 */ 296 - {0, 0, 0, 0, NULL}, 296 + {} 297 297 }; 298 298 299 299 static int cursor_mode_to_drm(int mode)
+1 -2
drivers/gpu/drm/i915/gvt/handlers.c
··· 538 538 int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc; 539 539 enum dpio_phy phy = DPIO_PHY0; 540 540 enum dpio_channel ch = DPIO_CH0; 541 - struct dpll clock = {0}; 541 + struct dpll clock = {}; 542 542 u32 temp; 543 543 544 544 /* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */ ··· 2576 2576 2577 2577 static int init_skl_mmio_info(struct intel_gvt *gvt) 2578 2578 { 2579 - struct drm_i915_private *dev_priv = gvt->gt->i915; 2580 2579 int ret; 2581 2580 2582 2581 MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
+3 -1
drivers/gpu/drm/i915/i915_debugfs.c
··· 32 32 33 33 #include <drm/drm_debugfs.h> 34 34 35 + #include "display/intel_display_params.h" 36 + 35 37 #include "gem/i915_gem_context.h" 36 38 #include "gt/intel_gt.h" 37 39 #include "gt/intel_gt_buffer_pool.h" ··· 69 67 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915)); 70 68 71 69 intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p); 72 - intel_display_device_info_print(DISPLAY_INFO(i915), DISPLAY_RUNTIME_INFO(i915), &p); 73 70 i915_print_iommu_status(i915, &p); 74 71 intel_gt_info_print(&to_gt(i915)->info, &p); 75 72 intel_driver_caps_print(&i915->caps, &p); 76 73 77 74 kernel_param_lock(THIS_MODULE); 78 75 i915_params_dump(&i915->params, &p); 76 + intel_display_params_dump(i915, &p); 79 77 kernel_param_unlock(THIS_MODULE); 80 78 81 79 return 0;
+2 -6
drivers/gpu/drm/i915/i915_driver.c
··· 231 231 232 232 spin_lock_init(&dev_priv->irq_lock); 233 233 spin_lock_init(&dev_priv->gpu_error.lock); 234 - mutex_init(&dev_priv->display.backlight.lock); 235 234 236 235 mutex_init(&dev_priv->sb_lock); 237 236 cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); 238 - 239 - mutex_init(&dev_priv->display.audio.mutex); 240 - mutex_init(&dev_priv->display.wm.wm_mutex); 241 - mutex_init(&dev_priv->display.pps.mutex); 242 - mutex_init(&dev_priv->display.hdcp.hdcp_mutex); 243 237 244 238 i915_memcpy_init_early(dev_priv); 245 239 intel_runtime_pm_init_early(&dev_priv->runtime_pm); ··· 903 909 intel_runtime_pm_driver_release(rpm); 904 910 905 911 i915_driver_late_release(dev_priv); 912 + 913 + intel_display_device_remove(dev_priv); 906 914 } 907 915 908 916 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
+1 -19
drivers/gpu/drm/i915/i915_drv.h
··· 396 396 return i915->gt[0]; 397 397 } 398 398 399 - /* Simple iterator over all initialised engines */ 400 - #define for_each_engine(engine__, gt__, id__) \ 401 - for ((id__) = 0; \ 402 - (id__) < I915_NUM_ENGINES; \ 403 - (id__)++) \ 404 - for_each_if ((engine__) = (gt__)->engine[(id__)]) 405 - 406 - /* Iterator over subset of engines selected by mask */ 407 - #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ 408 - for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \ 409 - (tmp__) ? \ 410 - ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ 411 - 0;) 412 - 413 399 #define rb_to_uabi_engine(rb) \ 414 400 rb_entry_safe(rb, struct intel_engine_cs, uabi_node) 415 401 416 402 #define for_each_uabi_engine(engine__, i915__) \ 417 403 for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\ 418 404 (engine__); \ 419 - (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) 420 - 421 - #define for_each_uabi_class_engine(engine__, class__, i915__) \ 422 - for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \ 423 - (engine__) && (engine__)->uabi_class == (class__); \ 424 405 (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) 425 406 426 407 #define INTEL_INFO(i915) ((i915)->__info) ··· 556 575 #define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2) 557 576 #define IS_PONTEVECCHIO(i915) IS_PLATFORM(i915, INTEL_PONTEVECCHIO) 558 577 #define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE) 578 + #define IS_LUNARLAKE(i915) 0 559 579 560 580 #define IS_DG2_G10(i915) \ 561 581 IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
-2
drivers/gpu/drm/i915/i915_gem.c
··· 1306 1306 { 1307 1307 i915_gem_init__mm(dev_priv); 1308 1308 i915_gem_init__contexts(dev_priv); 1309 - 1310 - spin_lock_init(&dev_priv->display.fb_tracking.lock); 1311 1309 } 1312 1310 1313 1311 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
+4 -1
drivers/gpu/drm/i915/i915_gpu_error.c
··· 660 660 struct drm_printer p = i915_error_printer(m); 661 661 662 662 i915_params_dump(params, &p); 663 + intel_display_params_dump(m->i915, &p); 663 664 } 664 665 665 666 static void err_print_pciid(struct drm_i915_error_state_buf *m, ··· 1028 1027 static void cleanup_params(struct i915_gpu_coredump *error) 1029 1028 { 1030 1029 i915_params_free(&error->params); 1030 + intel_display_params_free(&error->display_params); 1031 1031 } 1032 1032 1033 1033 static void cleanup_uc(struct intel_uc_coredump *uc) ··· 1990 1988 error->suspend_count = i915->suspend_count; 1991 1989 1992 1990 i915_params_copy(&error->params, &i915->params); 1991 + intel_display_params_copy(&error->display_params); 1993 1992 memcpy(&error->device_info, 1994 1993 INTEL_INFO(i915), 1995 1994 sizeof(error->device_info)); ··· 2177 2174 ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) { 2178 2175 pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); 2179 2176 pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n"); 2180 - pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n"); 2177 + pr_info("Please see https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html for details.\n"); 2181 2178 pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 2182 2179 pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n"); 2183 2180 pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+2
drivers/gpu/drm/i915/i915_gpu_error.h
··· 15 15 #include <drm/drm_mm.h> 16 16 17 17 #include "display/intel_display_device.h" 18 + #include "display/intel_display_params.h" 18 19 #include "gt/intel_engine.h" 19 20 #include "gt/intel_gt_types.h" 20 21 #include "gt/uc/intel_uc_fw.h" ··· 215 214 struct intel_display_runtime_info display_runtime_info; 216 215 struct intel_driver_caps driver_caps; 217 216 struct i915_params params; 217 + struct intel_display_params display_params; 218 218 219 219 struct intel_overlay_error_state *overlay; 220 220
-89
drivers/gpu/drm/i915/i915_params.c
··· 67 67 "Use kernel modesetting [KMS] (0=disable, " 68 68 "1=on, -1=force vga console preference [default])"); 69 69 70 - i915_param_named_unsafe(enable_dc, int, 0400, 71 - "Enable power-saving display C-states. " 72 - "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; " 73 - "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)"); 74 - 75 - i915_param_named_unsafe(enable_fbc, int, 0400, 76 - "Enable frame buffer compression for power savings " 77 - "(default: -1 (use per-chip default))"); 78 - 79 - i915_param_named_unsafe(lvds_channel_mode, int, 0400, 80 - "Specify LVDS channel mode " 81 - "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); 82 - 83 - i915_param_named_unsafe(panel_use_ssc, int, 0400, 84 - "Use Spread Spectrum Clock with panels [LVDS/eDP] " 85 - "(default: auto from VBT)"); 86 - 87 - i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400, 88 - "Override/Ignore selection of SDVO panel mode in the VBT " 89 - "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); 90 - 91 70 i915_param_named_unsafe(reset, uint, 0400, 92 71 "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])"); 93 - 94 - i915_param_named_unsafe(vbt_firmware, charp, 0400, 95 - "Load VBT from specified file under /lib/firmware"); 96 72 97 73 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 98 74 i915_param_named(error_capture, bool, 0400, ··· 82 106 "WARNING: Disabling this can cause system wide hangs. " 83 107 "(default: true)"); 84 108 85 - i915_param_named_unsafe(enable_psr, int, 0400, 86 - "Enable PSR " 87 - "(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) " 88 - "Default: -1 (use per-chip default)"); 89 - 90 - i915_param_named(psr_safest_params, bool, 0400, 91 - "Replace PSR VBT parameters by the safest and not optimal ones. This " 92 - "is helpful to detect if PSR issues are related to bad values set in " 93 - " VBT. (0=use VBT parameters, 1=use safest parameters)"); 94 - 95 - i915_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400, 96 - "Enable PSR2 selective fetch " 97 - "(0=disabled, 1=enabled) " 98 - "Default: 0"); 99 - 100 - i915_param_named_unsafe(enable_sagv, bool, 0600, 101 - "Enable system agent voltage/frequency scaling (SAGV) (default: true)"); 102 - 103 109 i915_param_named_unsafe(force_probe, charp, 0400, 104 110 "Force probe options for specified supported devices. " 105 111 "See CONFIG_DRM_I915_FORCE_PROBE for details."); 106 - 107 - i915_param_named_unsafe(disable_power_well, int, 0400, 108 - "Disable display power wells when possible " 109 - "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); 110 - 111 - i915_param_named_unsafe(enable_ips, int, 0400, "Enable IPS (default: true)"); 112 - 113 - i915_param_named_unsafe(enable_dpt, bool, 0400, 114 - "Enable display page table (DPT) (default: true)"); 115 - 116 - i915_param_named_unsafe(load_detect_test, bool, 0400, 117 - "Force-enable the VGA load detect code for testing (default:false). " 118 - "For developers only."); 119 - 120 - i915_param_named_unsafe(force_reset_modeset_test, bool, 0400, 121 - "Force a modeset during gpu reset for testing (default:false). " 122 - "For developers only."); 123 - 124 - i915_param_named_unsafe(invert_brightness, int, 0400, 125 - "Invert backlight brightness " 126 - "(-1 force normal, 0 machine defaults, 1 force inversion), please " 127 - "report PCI device ID, subsystem vendor and subsystem device ID " 128 - "to dri-devel@lists.freedesktop.org, if your machine needs it. " 129 - "It will then be included in an upcoming module version."); 130 - 131 - i915_param_named(disable_display, bool, 0400, 132 - "Disable display (default: false)"); 133 112 134 113 i915_param_named(memtest, bool, 0400, 135 114 "Perform a read/write test of all device memory on module load (default: off)"); ··· 92 161 i915_param_named(mmio_debug, int, 0400, 93 162 "Enable the MMIO debug code for the first N failures (default: off). " 94 163 "This may negatively affect performance."); 95 - 96 - /* Special case writable file */ 97 - i915_param_named(verbose_state_checks, bool, 0600, 98 - "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); 99 - 100 - i915_param_named_unsafe(nuclear_pageflip, bool, 0400, 101 - "Force enable atomic functionality on platforms that don't have full support yet."); 102 - 103 - /* WA to get away with the default setting in VBT for early platforms.Will be removed */ 104 - i915_param_named_unsafe(edp_vswing, int, 0400, 105 - "Ignore/Override vswing pre-emph table selection from VBT " 106 - "(0=use value from vbt [default], 1=low power swing(200mV)," 107 - "2=default swing(400mV))"); 108 164 109 165 i915_param_named_unsafe(enable_guc, int, 0400, 110 166 "Enable GuC load for GuC submission and/or HuC load. " ··· 114 196 i915_param_named_unsafe(gsc_firmware_path, charp, 0400, 115 197 "GSC firmware path to use instead of the default one"); 116 198 117 - i915_param_named_unsafe(enable_dp_mst, bool, 0400, 118 - "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)"); 119 - 120 199 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) 121 200 i915_param_named_unsafe(inject_probe_failure, uint, 0400, 122 201 "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); 123 202 #endif 124 - 125 - i915_param_named(enable_dpcd_backlight, int, 0400, 126 - "Enable support for DPCD backlight control" 127 - "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)"); 128 203 129 204 #if IS_ENABLED(CONFIG_DRM_I915_GVT) 130 205 i915_param_named(enable_gvt, bool, 0400,
-22
drivers/gpu/drm/i915/i915_params.h
··· 46 46 * debugfs file 47 47 */ 48 48 #define I915_PARAMS_FOR_EACH(param) \ 49 - param(char *, vbt_firmware, NULL, 0400) \ 50 49 param(int, modeset, -1, 0400) \ 51 - param(int, lvds_channel_mode, 0, 0400) \ 52 - param(int, panel_use_ssc, -1, 0600) \ 53 - param(int, vbt_sdvo_panel_type, -1, 0400) \ 54 - param(int, enable_dc, -1, 0400) \ 55 - param(int, enable_fbc, -1, 0600) \ 56 - param(int, enable_psr, -1, 0600) \ 57 - param(bool, enable_dpt, true, 0400) \ 58 - param(bool, psr_safest_params, false, 0400) \ 59 - param(bool, enable_psr2_sel_fetch, true, 0400) \ 60 - param(bool, enable_sagv, true, 0600) \ 61 - param(int, disable_power_well, -1, 0400) \ 62 - param(int, enable_ips, 1, 0600) \ 63 - param(int, invert_brightness, 0, 0600) \ 64 50 param(int, enable_guc, -1, 0400) \ 65 51 param(int, guc_log_level, -1, 0400) \ 66 52 param(char *, guc_firmware_path, NULL, 0400) \ ··· 55 69 param(char *, gsc_firmware_path, NULL, 0400) \ 56 70 param(bool, memtest, false, 0400) \ 57 71 param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \ 58 - param(int, edp_vswing, 0, 0400) \ 59 72 param(unsigned int, reset, 3, 0600) \ 60 73 param(unsigned int, inject_probe_failure, 0, 0) \ 61 - param(int, enable_dpcd_backlight, -1, 0600) \ 62 74 param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \ 63 75 param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \ 64 76 param(unsigned int, lmem_size, 0, 0400) \ 65 77 param(unsigned int, lmem_bar_size, 0, 0400) \ 66 78 /* leave bools at the end to not create holes */ \ 67 79 param(bool, enable_hangcheck, true, 0600) \ 68 - param(bool, load_detect_test, false, 0600) \ 69 - param(bool, force_reset_modeset_test, false, 0600) \ 70 80 param(bool, error_capture, true, IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) ? 0600 : 0) \ 71 - param(bool, disable_display, false, 0400) \ 72 - param(bool, verbose_state_checks, true, 0) \ 73 - param(bool, nuclear_pageflip, false, 0400) \ 74 - param(bool, enable_dp_mst, true, 0600) \ 75 81 param(bool, enable_gvt, false, IS_ENABLED(CONFIG_DRM_I915_GVT) ? 0400 : 0) 76 82 77 83 #define MEMBER(T, member, ...) T member;
-2
drivers/gpu/drm/i915/i915_reg.h
··· 195 195 #define DPIO_SFR_BYPASS (1 << 1) 196 196 #define DPIO_CMNRST (1 << 0) 197 197 198 - #define DPIO_PHY(pipe) ((pipe) >> 1) 199 - 200 198 /* 201 199 * Per pipe/PLL DPIO regs 202 200 */
+1 -1
drivers/gpu/drm/i915/i915_utils.h
··· 40 40 struct drm_i915_private; 41 41 struct timer_list; 42 42 43 - #define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs" 43 + #define FDO_BUG_URL "https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html" 44 44 45 45 #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ 46 46 __stringify(x), (long)(x))
-2
drivers/gpu/drm/i915/intel_runtime_pm.h
··· 11 11 12 12 #include "intel_wakeref.h" 13 13 14 - #include "i915_utils.h" 15 - 16 14 struct device; 17 15 struct drm_i915_private; 18 16 struct drm_printer;
+2
drivers/gpu/drm/i915/selftests/intel_uncore.c
··· 24 24 25 25 #include "../i915_selftest.h" 26 26 27 + #include "gt/intel_gt.h" 28 + 27 29 static int intel_fw_table_check(const struct intel_forcewake_range *ranges, 28 30 unsigned int num_ranges, 29 31 bool is_watertight)
+14 -13
drivers/gpu/drm/i915/soc/intel_gmch.c
··· 33 33 i915->gmch.pdev); 34 34 } 35 35 36 + static int mchbar_reg(struct drm_i915_private *i915) 37 + { 38 + return GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915; 39 + } 40 + 36 41 /* Allocate space for the MCH regs if needed, return nonzero on error */ 37 42 static int 38 43 intel_alloc_mchbar_resource(struct drm_i915_private *i915) 39 44 { 40 - int reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915; 41 45 u32 temp_lo, temp_hi = 0; 42 46 u64 mchbar_addr; 43 47 int ret; 44 48 45 49 if (GRAPHICS_VER(i915) >= 4) 46 - pci_read_config_dword(i915->gmch.pdev, reg + 4, &temp_hi); 47 - pci_read_config_dword(i915->gmch.pdev, reg, &temp_lo); 50 + pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915) + 4, &temp_hi); 51 + pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp_lo); 48 52 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 49 53 50 54 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ ··· 72 68 } 73 69 74 70 if (GRAPHICS_VER(i915) >= 4) 75 - pci_write_config_dword(i915->gmch.pdev, reg + 4, 71 + pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915) + 4, 76 72 upper_32_bits(i915->gmch.mch_res.start)); 77 73 78 - pci_write_config_dword(i915->gmch.pdev, reg, 74 + pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915), 79 75 lower_32_bits(i915->gmch.mch_res.start)); 80 76 return 0; 81 77 } ··· 83 79 /* Setup MCHBAR if possible, return true if we should disable it again */ 84 80 void intel_gmch_bar_setup(struct drm_i915_private *i915) 85 81 { 86 - int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915; 87 82 u32 temp; 88 83 bool enabled; 89 84 ··· 95 92 pci_read_config_dword(i915->gmch.pdev, DEVEN, &temp); 96 93 enabled = !!(temp & DEVEN_MCHBAR_EN); 97 94 } else { 98 - pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp); 95 + pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp); 99 96 enabled = temp & 1; 100 97 } 101 98 ··· 113 110 pci_write_config_dword(i915->gmch.pdev, DEVEN, 114 111 temp | DEVEN_MCHBAR_EN); 115 112 } else { 116 - pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp); 117 - pci_write_config_dword(i915->gmch.pdev, mchbar_reg, temp | 1); 113 + pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp); 114 + pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915), temp | 1); 118 115 } 119 116 } 120 117 121 118 void intel_gmch_bar_teardown(struct drm_i915_private *i915) 122 119 { 123 - int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915; 124 - 125 120 if (i915->gmch.mchbar_need_disable) { 126 121 if (IS_I915G(i915) || IS_I915GM(i915)) { 127 122 u32 deven_val; ··· 132 131 } else { 133 132 u32 mchbar_val; 134 133 135 - pci_read_config_dword(i915->gmch.pdev, mchbar_reg, 134 + pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), 136 135 &mchbar_val); 137 136 mchbar_val &= ~1; 138 - pci_write_config_dword(i915->gmch.pdev, mchbar_reg, 137 + pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915), 139 138 mchbar_val); 140 139 } 141 140 }
+6 -23
drivers/gpu/drm/i915/vlv_sideband.c
··· 166 166 return val; 167 167 } 168 168 169 - u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg) 170 - { 171 - u32 val = 0; 172 - 173 - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port, 174 - SB_CRRDDA_NP, reg, &val); 175 - 176 - return val; 177 - } 178 - 179 - void vlv_iosf_sb_write(struct drm_i915_private *i915, 180 - u8 port, u32 reg, u32 val) 181 - { 182 - vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port, 183 - SB_CRWRDA_NP, reg, &val); 184 - } 185 - 186 169 u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg) 187 170 { 188 171 u32 val = 0; ··· 210 227 return IOSF_PORT_DPIO; 211 228 } 212 229 213 - u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg) 230 + u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg) 214 231 { 215 - u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); 232 + u32 port = vlv_dpio_phy_iosf_port(i915, phy); 216 233 u32 val = 0; 217 234 218 235 vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val); ··· 222 239 * so ideally we should check the register offset instead... 223 240 */ 224 241 drm_WARN(&i915->drm, val == 0xffffffff, 225 - "DPIO read pipe %c reg 0x%x == 0x%x\n", 226 - pipe_name(pipe), reg, val); 242 + "DPIO PHY%d read reg 0x%x == 0x%x\n", 243 + phy, reg, val); 227 244 228 245 return val; 229 246 } 230 247 231 248 void vlv_dpio_write(struct drm_i915_private *i915, 232 - enum pipe pipe, int reg, u32 val) 249 + enum dpio_phy phy, int reg, u32 val) 233 250 { 234 - u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe)); 251 + u32 port = vlv_dpio_phy_iosf_port(i915, phy); 235 252 236 253 vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val); 237 254 }
+3 -6
drivers/gpu/drm/i915/vlv_sideband.h
··· 11 11 12 12 #include "vlv_sideband_reg.h" 13 13 14 - enum pipe; 14 + enum dpio_phy; 15 15 struct drm_i915_private; 16 16 17 17 enum { ··· 26 26 }; 27 27 28 28 void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports); 29 - u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg); 30 - void vlv_iosf_sb_write(struct drm_i915_private *i915, 31 - u8 port, u32 reg, u32 val); 32 29 void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports); 33 30 34 31 static inline void vlv_bunit_get(struct drm_i915_private *i915) ··· 72 75 vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_DPIO)); 73 76 } 74 77 75 - u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg); 78 + u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg); 76 79 void vlv_dpio_write(struct drm_i915_private *i915, 77 - enum pipe pipe, int reg, u32 val); 80 + enum dpio_phy phy, int reg, u32 val); 78 81 79 82 static inline void vlv_dpio_put(struct drm_i915_private *i915) 80 83 {
+5 -4
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 40 40 #include <drm/drm_edid.h> 41 41 #include <drm/drm_eld.h> 42 42 #include <drm/drm_fb_helper.h> 43 + #include <drm/drm_fixed.h> 43 44 #include <drm/drm_probe_helper.h> 44 45 #include <drm/drm_vblank.h> 45 46 ··· 947 946 if (ret == 0) { 948 947 nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 949 948 payload->vc_start_slot, payload->time_slots, 950 - payload->pbn, payload->time_slots * mst_state->pbn_div); 949 + payload->pbn, 950 + payload->time_slots * dfixed_trunc(mst_state->pbn_div)); 951 951 } else { 952 952 nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0); 953 953 } ··· 985 983 const int clock = crtc_state->adjusted_mode.clock; 986 984 987 985 asyh->or.bpc = connector->display_info.bpc; 988 - asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, 989 - false); 986 + asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3 << 4); 990 987 } 991 988 992 989 mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr); 993 990 if (IS_ERR(mst_state)) 994 991 return PTR_ERR(mst_state); 995 992 996 - if (!mst_state->pbn_div) { 993 + if (!mst_state->pbn_div.full) { 997 994 struct nouveau_encoder *outp = mstc->mstm->outp; 998 995 999 996 mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr,
+163 -3
drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
··· 42 42 .clock = 332880, 43 43 .bpp = 24, 44 44 .dsc = true, 45 - .expected = 50 45 + .expected = 1191 46 46 }, 47 47 { 48 48 .clock = 324540, 49 49 .bpp = 24, 50 50 .dsc = true, 51 - .expected = 49 51 + .expected = 1161 52 52 }, 53 53 }; 54 54 ··· 56 56 { 57 57 const struct drm_dp_mst_calc_pbn_mode_test *params = test->param_value; 58 58 59 - KUNIT_EXPECT_EQ(test, drm_dp_calc_pbn_mode(params->clock, params->bpp, params->dsc), 59 + KUNIT_EXPECT_EQ(test, drm_dp_calc_pbn_mode(params->clock, params->bpp << 4), 60 60 params->expected); 61 61 } 62 62 ··· 67 67 68 68 KUNIT_ARRAY_PARAM(drm_dp_mst_calc_pbn_mode, drm_dp_mst_calc_pbn_mode_cases, 69 69 dp_mst_calc_pbn_mode_desc); 70 + 71 + struct drm_dp_mst_calc_pbn_div_test { 72 + int link_rate; 73 + int lane_count; 74 + fixed20_12 expected; 75 + }; 76 + 77 + #define fp_init(__int, __frac) { \ 78 + .full = (__int) * (1 << 12) + \ 79 + (__frac) * (1 << 12) / 100000 \ 80 + } 81 + 82 + static const struct drm_dp_mst_calc_pbn_div_test drm_dp_mst_calc_pbn_div_dp1_4_cases[] = { 83 + /* 84 + * UHBR rates (DP Standard v2.1 2.7.6.3, specifying the rounded to 85 + * closest value to 2 decimal places): 86 + * .expected = .link_rate * .lane_count * 0.9671 / 8 / 54 / 100 87 + * DP1.4 rates (DP Standard v2.1 2.6.4.2): 88 + * .expected = .link_rate * .lane_count * 0.8000 / 8 / 54 / 100 89 + * 90 + * truncated to 5 decimal places. 91 + */ 92 + { 93 + .link_rate = 2000000, 94 + .lane_count = 4, 95 + .expected = fp_init(179, 9259), /* 179.09259 */ 96 + }, 97 + { 98 + .link_rate = 2000000, 99 + .lane_count = 2, 100 + .expected = fp_init(89, 54629), 101 + }, 102 + { 103 + .link_rate = 2000000, 104 + .lane_count = 1, 105 + .expected = fp_init(44, 77314), 106 + }, 107 + { 108 + .link_rate = 1350000, 109 + .lane_count = 4, 110 + .expected = fp_init(120, 88750), 111 + }, 112 + { 113 + .link_rate = 1350000, 114 + .lane_count = 2, 115 + .expected = fp_init(60, 44375), 116 + }, 117 + { 118 + .link_rate = 1350000, 119 + .lane_count = 1, 120 + .expected = fp_init(30, 22187), 121 + }, 122 + { 123 + .link_rate = 1000000, 124 + .lane_count = 4, 125 + .expected = fp_init(89, 54629), 126 + }, 127 + { 128 + .link_rate = 1000000, 129 + .lane_count = 2, 130 + .expected = fp_init(44, 77314), 131 + }, 132 + { 133 + .link_rate = 1000000, 134 + .lane_count = 1, 135 + .expected = fp_init(22, 38657), 136 + }, 137 + { 138 + .link_rate = 810000, 139 + .lane_count = 4, 140 + .expected = fp_init(60, 0), 141 + }, 142 + { 143 + .link_rate = 810000, 144 + .lane_count = 2, 145 + .expected = fp_init(30, 0), 146 + }, 147 + { 148 + .link_rate = 810000, 149 + .lane_count = 1, 150 + .expected = fp_init(15, 0), 151 + }, 152 + { 153 + .link_rate = 540000, 154 + .lane_count = 4, 155 + .expected = fp_init(40, 0), 156 + }, 157 + { 158 + .link_rate = 540000, 159 + .lane_count = 2, 160 + .expected = fp_init(20, 0), 161 + }, 162 + { 163 + .link_rate = 540000, 164 + .lane_count = 1, 165 + .expected = fp_init(10, 0), 166 + }, 167 + { 168 + .link_rate = 270000, 169 + .lane_count = 4, 170 + .expected = fp_init(20, 0), 171 + }, 172 + { 173 + .link_rate = 270000, 174 + .lane_count = 2, 175 + .expected = fp_init(10, 0), 176 + }, 177 + { 178 + .link_rate = 270000, 179 + .lane_count = 1, 180 + .expected = fp_init(5, 0), 181 + }, 182 + { 183 + .link_rate = 162000, 184 + .lane_count = 4, 185 + .expected = fp_init(12, 0), 186 + }, 187 + { 188 + .link_rate = 162000, 189 + .lane_count = 2, 190 + .expected = fp_init(6, 0), 191 + }, 192 + { 193 + .link_rate = 162000, 194 + .lane_count = 1, 195 + .expected = fp_init(3, 0), 196 + }, 197 + }; 198 + 199 + static void drm_test_dp_mst_calc_pbn_div(struct kunit *test) 200 + { 201 + const struct drm_dp_mst_calc_pbn_div_test *params = test->param_value; 202 + /* mgr->dev is only needed by drm_dbg_kms(), but it's not called for the test cases. */ 203 + struct drm_dp_mst_topology_mgr *mgr = test->priv; 204 + 205 + KUNIT_EXPECT_EQ(test, drm_dp_get_vc_payload_bw(mgr, params->link_rate, params->lane_count).full, 206 + params->expected.full); 207 + } 208 + 209 + static void dp_mst_calc_pbn_div_desc(const struct drm_dp_mst_calc_pbn_div_test *t, char *desc) 210 + { 211 + sprintf(desc, "Link rate %d lane count %d", t->link_rate, t->lane_count); 212 + } 213 + 214 + KUNIT_ARRAY_PARAM(drm_dp_mst_calc_pbn_div, drm_dp_mst_calc_pbn_div_dp1_4_cases, 215 + dp_mst_calc_pbn_div_desc); 70 216 71 217 static u8 data[] = { 0xff, 0x00, 0xdd }; 72 218 ··· 562 416 563 417 static struct kunit_case drm_dp_mst_helper_tests[] = { 564 418 KUNIT_CASE_PARAM(drm_test_dp_mst_calc_pbn_mode, drm_dp_mst_calc_pbn_mode_gen_params), 419 + KUNIT_CASE_PARAM(drm_test_dp_mst_calc_pbn_div, drm_dp_mst_calc_pbn_div_gen_params), 565 420 KUNIT_CASE_PARAM(drm_test_dp_mst_sideband_msg_req_decode, 566 421 drm_dp_mst_sideband_msg_req_gen_params), 567 422 { } 568 423 }; 569 424 425 + static int drm_dp_mst_helper_tests_init(struct kunit *test) 426 + { 427 + struct drm_dp_mst_topology_mgr *mgr; 428 + 429 + mgr = kunit_kzalloc(test, sizeof(*mgr), GFP_KERNEL); 430 + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mgr); 431 + 432 + test->priv = mgr; 433 + 434 + return 0; 435 + } 436 + 570 437 static struct kunit_suite drm_dp_mst_helper_test_suite = { 571 438 .name = "drm_dp_mst_helper", 439 + .init = drm_dp_mst_helper_tests_init, 572 440 .test_cases = drm_dp_mst_helper_tests, 573 441 }; 574 442
+25
include/drm/display/drm_dp.h
··· 148 148 #define DP_RECEIVE_PORT_0_CAP_0 0x008 149 149 # define DP_LOCAL_EDID_PRESENT (1 << 1) 150 150 # define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2) 151 + # define DP_HBLANK_EXPANSION_CAPABLE (1 << 3) 151 152 152 153 #define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009 153 154 ··· 544 543 /* DFP Capability Extension */ 545 544 #define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */ 546 545 546 + #define DP_PANEL_REPLAY_CAP 0x0b0 /* DP 2.0 */ 547 + # define DP_PANEL_REPLAY_SUPPORT (1 << 0) 548 + # define DP_PANEL_REPLAY_SU_SUPPORT (1 << 1) 549 + 547 550 /* Link Configuration */ 548 551 #define DP_LINK_BW_SET 0x100 549 552 # define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */ ··· 704 699 705 700 #define DP_DSC_ENABLE 0x160 /* DP 1.4 */ 706 701 # define DP_DECOMPRESSION_EN (1 << 0) 702 + # define DP_DSC_PASSTHROUGH_EN (1 << 1) 707 703 #define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */ 708 704 709 705 #define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ ··· 721 715 722 716 #define DP_BRANCH_DEVICE_CTRL 0x1a1 723 717 # define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0) 718 + 719 + #define PANEL_REPLAY_CONFIG 0x1b0 /* DP 2.0 */ 720 + # define DP_PANEL_REPLAY_ENABLE (1 << 0) 721 + # define DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN (1 << 3) 722 + # define DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN (1 << 4) 723 + # define DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN (1 << 5) 724 + # define DP_PANEL_REPLAY_SU_ENABLE (1 << 6) 724 725 725 726 #define DP_PAYLOAD_ALLOCATE_SET 0x1c0 726 727 #define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1 ··· 1117 1104 #define DP_LANE2_3_STATUS_ESI 0x200d /* status same as 0x203 */ 1118 1105 #define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */ 1119 1106 #define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */ 1107 + 1108 + #define DP_PANEL_REPLAY_ERROR_STATUS 0x2020 /* DP 2.1*/ 1109 + # define DP_PANEL_REPLAY_LINK_CRC_ERROR (1 << 0) 1110 + # define DP_PANEL_REPLAY_RFB_STORAGE_ERROR (1 << 1) 1111 + # define DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2) 1112 + 1113 + #define DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS 0x2022 /* DP 2.1 */ 1114 + # define DP_SINK_DEVICE_PANEL_REPLAY_STATUS_MASK (7 << 0) 1115 + # define DP_SINK_FRAME_LOCKED_SHIFT 3 1116 + # define DP_SINK_FRAME_LOCKED_MASK (3 << 3) 1117 + # define DP_SINK_FRAME_LOCKED_STATUS_VALID_SHIFT 5 1118 + # define DP_SINK_FRAME_LOCKED_STATUS_VALID_MASK (1 << 5) 1120 1119 1121 1120 /* Extended Receiver Capability: See DP_DPCD_REV for definitions */ 1122 1121 #define DP_DP13_DPCD_REV 0x2200
+32
include/drm/display/drm_dp_helper.h
··· 164 164 } 165 165 166 166 /* DP/eDP DSC support */ 167 + u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); 167 168 u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], 168 169 bool is_edp); 169 170 u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]); ··· 250 249 drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]) 251 250 { 252 251 return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP); 252 + } 253 + 254 + /** 255 + * drm_dp_is_uhbr_rate - Determine if a link rate is UHBR 256 + * @link_rate: link rate in 10kbits/s units 257 + * 258 + * Determine if the provided link rate is an UHBR rate. 259 + * 260 + * Returns: %True if @link_rate is an UHBR rate. 261 + */ 262 + static inline bool drm_dp_is_uhbr_rate(int link_rate) 263 + { 264 + return link_rate >= 1000000; 253 265 } 254 266 255 267 /* ··· 646 632 * the DP_MAX_LINK_RATE register reporting a lower max multiplier. 647 633 */ 648 634 DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS, 635 + /** 636 + * @DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC: 637 + * 638 + * The device applies HBLANK expansion for some modes, but this 639 + * requires enabling DSC. 640 + */ 641 + DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC, 649 642 }; 650 643 651 644 /** ··· 801 780 bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], 802 781 const u8 port_cap[4], u8 color_spc); 803 782 int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc); 783 + 784 + #define DRM_DP_BW_OVERHEAD_MST BIT(0) 785 + #define DRM_DP_BW_OVERHEAD_UHBR BIT(1) 786 + #define DRM_DP_BW_OVERHEAD_SSC_REF_CLK BIT(2) 787 + #define DRM_DP_BW_OVERHEAD_FEC BIT(3) 788 + #define DRM_DP_BW_OVERHEAD_DSC BIT(4) 789 + 790 + int drm_dp_bw_overhead(int lane_count, int hactive, 791 + int dsc_slice_count, 792 + int bpp_x16, unsigned long flags); 793 + int drm_dp_bw_channel_coding_efficiency(bool is_uhbr); 804 794 805 795 #endif /* _DRM_DP_HELPER_H_ */
+12 -4
include/drm/display/drm_dp_mst_helper.h
··· 25 25 #include <linux/types.h> 26 26 #include <drm/display/drm_dp_helper.h> 27 27 #include <drm/drm_atomic.h> 28 + #include <drm/drm_fixed.h> 28 29 29 30 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 30 31 #include <linux/stackdepot.h> ··· 618 617 * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this 619 618 * out itself. 620 619 */ 621 - int pbn_div; 620 + fixed20_12 pbn_div; 622 621 }; 623 622 624 623 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) ··· 840 839 struct drm_dp_mst_topology_mgr *mgr, 841 840 struct drm_dp_mst_port *port); 842 841 843 - int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, 844 - int link_rate, int link_lane_count); 842 + fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, 843 + int link_rate, int link_lane_count); 845 844 846 - int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); 845 + int drm_dp_calc_pbn_mode(int clock, int bpp); 847 846 848 847 void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap); 849 848 ··· 893 892 struct drm_dp_mst_atomic_payload * 894 893 drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state, 895 894 struct drm_dp_mst_port *port); 895 + bool drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr, 896 + struct drm_dp_mst_port *port, 897 + struct drm_dp_mst_port *parent); 896 898 int __must_check 897 899 drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, 898 900 struct drm_dp_mst_topology_mgr *mgr, ··· 917 913 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, 918 914 struct drm_dp_mst_port *port, 919 915 struct drm_dp_query_stream_enc_status_ack_reply *status); 916 + int __must_check drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state, 917 + struct drm_dp_mst_topology_mgr *mgr, 918 + struct drm_dp_mst_topology_state *mst_state, 919 + struct drm_dp_mst_port **failing_port); 920 920 int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); 921 921 int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state, 922 922 struct drm_dp_mst_topology_mgr *mgr);
+8 -11
include/drm/drm_color_mgmt.h
··· 36 36 * 37 37 * Extract a degamma/gamma LUT value provided by user (in the form of 38 38 * &drm_color_lut entries) and round it to the precision supported by the 39 - * hardware. 39 + * hardware, following OpenGL int<->float conversion rules 40 + * (see eg. OpenGL 4.6 specification - 2.3.5 Fixed-Point Data Conversions). 40 41 */ 41 42 static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision) 42 43 { 43 - u32 val = user_input; 44 - u32 max = 0xffff >> (16 - bit_precision); 45 - 46 - /* Round only if we're not using full precision. */ 47 - if (bit_precision < 16) { 48 - val += 1UL << (16 - bit_precision - 1); 49 - val >>= 16 - bit_precision; 50 - } 51 - 52 - return clamp_val(val, 0, max); 44 + if (bit_precision > 16) 45 + return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(user_input, (1 << bit_precision) - 1), 46 + (1 << 16) - 1); 47 + else 48 + return DIV_ROUND_CLOSEST(user_input * ((1 << bit_precision) - 1), 49 + (1 << 16) - 1); 53 50 } 54 51 55 52 u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n);