Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2023-04-06' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

- Fix DPT+shmem combo and add i915.enable_dpt modparam (Ville)
- i915.enable_sagv module parameter (Ville)
- Correction to QGV related register addresses (Vinod)
- IPS debugfs per-crtc and new file for false_color (Ville)
- More clean-up and reorganization of Display code (Jani)
- DP DSC related fixes and improvements (Stanislav, Ankit, Suraj, Swati)
- Make utility pin asserts more accurate (Ville)
- Meteor Lake enabling (Daniele)
- High refresh rate PSR fixes (Jouni)
- Cursor and Plane chicken register fixes (Ville)
- Align the ADL-P TypeC sequences with hardware specification (Imre)
- Documentation build fixes and improvements to catch bugs earlier (Lee, Jani)
- PL1 power limit hwmon entry changed to use 0 as disabled state (Ashutosh)
- DP aux sync fix and improvements (Ville)
- DP MST fixes and w/a (Stanislav)
- Limit PXP drm-errors or warning on firmware API failures (Alan)

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZC7RR3Laet8ywHRo@intel.com

+3970 -2893
+3 -1
Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
··· 14 14 15 15 The power controller will throttle the operating frequency 16 16 if the power averaged over a window (typically seconds) 17 - exceeds this limit. 17 + exceeds this limit. A read value of 0 means that the PL1 18 + power limit is disabled, writing 0 disables the 19 + limit. Writing values > 0 will enable the power limit. 18 20 19 21 Only supported for particular Intel i915 graphics platforms. 20 22
-6
drivers/gpu/drm/i915/Kconfig
··· 164 164 source "drivers/gpu/drm/i915/Kconfig.profile" 165 165 endmenu 166 166 167 - menu "drm/i915 Unstable Evolution" 168 - visible if EXPERT && STAGING && BROKEN 169 - depends on DRM_I915 170 - source "drivers/gpu/drm/i915/Kconfig.unstable" 171 - endmenu 172 - 173 167 config DRM_I915_GVT 174 168 bool
-21
drivers/gpu/drm/i915/Kconfig.unstable
··· 1 - # SPDX-License-Identifier: GPL-2.0-only 2 - config DRM_I915_UNSTABLE 3 - bool "Enable unstable API for early prototype development" 4 - depends on EXPERT 5 - depends on STAGING 6 - depends on BROKEN # should never be enabled by distros! 7 - # We use the dependency on !COMPILE_TEST to not be enabled in 8 - # allmodconfig or allyesconfig configurations 9 - depends on !COMPILE_TEST 10 - default n 11 - help 12 - Enable prototype uAPI under general discussion before they are 13 - finalized. Such prototypes may be withdrawn or substantially 14 - changed before release. They are only enabled here so that a wide 15 - number of interested parties (userspace driver developers) can 16 - verify that the uAPI meet their expectations. These uAPI should 17 - never be used in production. 18 - 19 - Recommended for driver developers _only_. 20 - 21 - If in the slightest bit of doubt, say "N".
+10 -2
drivers/gpu/drm/i915/Makefile
··· 47 47 i915_switcheroo.o \ 48 48 i915_sysfs.o \ 49 49 i915_utils.o \ 50 + intel_clock_gating.o \ 50 51 intel_device_info.o \ 51 52 intel_memory_region.o \ 52 53 intel_pcode.o \ 53 - intel_pm.o \ 54 54 intel_region_ttm.o \ 55 55 intel_runtime_pm.o \ 56 56 intel_sbi.o \ ··· 369 369 obj-$(CONFIG_DRM_I915) += i915.o 370 370 obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o 371 371 372 + # kernel-doc test 373 + # 374 + # Enable locally for CONFIG_DRM_I915_WERROR=y. See also scripts/Makefile.build 375 + ifdef CONFIG_DRM_I915_WERROR 376 + cmd_checkdoc = $(srctree)/scripts/kernel-doc -none $< 377 + endif 378 + 372 379 # header test 373 380 374 381 # exclude some broken headers from the test coverage ··· 387 380 $(shell cd $(srctree)/$(src) && find * -name '*.h'))) 388 381 389 382 quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@) 390 - cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@ 383 + cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; \ 384 + $(srctree)/scripts/kernel-doc -none $<; touch $@ 391 385 392 386 $(obj)/%.hdrtest: $(src)/%.h FORCE 393 387 $(call if_changed_dep,hdrtest)
+63 -10
drivers/gpu/drm/i915/display/hsw_ips.c
··· 14 14 { 15 15 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 16 16 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 17 + u32 val; 17 18 18 19 if (!crtc_state->ips_enabled) 19 20 return; ··· 27 26 drm_WARN_ON(&i915->drm, 28 27 !(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); 29 28 29 + val = IPS_ENABLE; 30 + 31 + if (i915->display.ips.false_color) 32 + val |= IPS_FALSE_COLOR; 33 + 30 34 if (IS_BROADWELL(i915)) { 31 35 drm_WARN_ON(&i915->drm, 32 36 snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 33 - IPS_ENABLE | IPS_PCODE_CONTROL)); 37 + val | IPS_PCODE_CONTROL)); 34 38 /* 35 39 * Quoting Art Runyan: "its not safe to expect any particular 36 40 * value in IPS_CTL bit 31 after enabling IPS through the ··· 43 37 * so we need to just enable it and continue on. 44 38 */ 45 39 } else { 46 - intel_de_write(i915, IPS_CTL, IPS_ENABLE); 40 + intel_de_write(i915, IPS_CTL, val); 47 41 /* 48 42 * The bit only becomes 1 in the next vblank, so this wait here 49 43 * is essentially intel_wait_for_vblank. If we don't have this ··· 274 268 } 275 269 } 276 270 271 + static int hsw_ips_debugfs_false_color_get(void *data, u64 *val) 272 + { 273 + struct intel_crtc *crtc = data; 274 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 275 + 276 + *val = i915->display.ips.false_color; 277 + 278 + return 0; 279 + } 280 + 281 + static int hsw_ips_debugfs_false_color_set(void *data, u64 val) 282 + { 283 + struct intel_crtc *crtc = data; 284 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 285 + struct intel_crtc_state *crtc_state; 286 + int ret; 287 + 288 + ret = drm_modeset_lock(&crtc->base.mutex, NULL); 289 + if (ret) 290 + return ret; 291 + 292 + i915->display.ips.false_color = val; 293 + 294 + crtc_state = to_intel_crtc_state(crtc->base.state); 295 + 296 + if (!crtc_state->hw.active) 297 + goto unlock; 298 + 299 + if (crtc_state->uapi.commit && 300 + !try_wait_for_completion(&crtc_state->uapi.commit->hw_done)) 301 + goto unlock; 302 + 303 + hsw_ips_enable(crtc_state); 304 + 305 + unlock: 306 + drm_modeset_unlock(&crtc->base.mutex); 307 + 308 + return ret; 309 + } 310 + 311 + DEFINE_DEBUGFS_ATTRIBUTE(hsw_ips_debugfs_false_color_fops, 312 + hsw_ips_debugfs_false_color_get, 313 + hsw_ips_debugfs_false_color_set, 314 + "%llu\n"); 315 + 277 316 static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused) 278 317 { 279 - struct drm_i915_private *i915 = m->private; 318 + struct intel_crtc *crtc = m->private; 319 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 280 320 intel_wakeref_t wakeref; 281 - 282 - if (!HAS_IPS(i915)) 283 - return -ENODEV; 284 321 285 322 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 286 323 ··· 346 297 347 298 DEFINE_SHOW_ATTRIBUTE(hsw_ips_debugfs_status); 348 299 349 - void hsw_ips_debugfs_register(struct drm_i915_private *i915) 300 + void hsw_ips_crtc_debugfs_add(struct intel_crtc *crtc) 350 301 { 351 - struct drm_minor *minor = i915->drm.primary; 302 + if (!hsw_crtc_supports_ips(crtc)) 303 + return; 352 304 353 - debugfs_create_file("i915_ips_status", 0444, minor->debugfs_root, 354 - i915, &hsw_ips_debugfs_status_fops); 305 + debugfs_create_file("i915_ips_false_color", 0644, crtc->base.debugfs_entry, 306 + crtc, &hsw_ips_debugfs_false_color_fops); 307 + 308 + debugfs_create_file("i915_ips_status", 0444, crtc->base.debugfs_entry, 309 + crtc, &hsw_ips_debugfs_status_fops); 355 310 }
+1 -2
drivers/gpu/drm/i915/display/hsw_ips.h
··· 8 8 9 9 #include <linux/types.h> 10 10 11 - struct drm_i915_private; 12 11 struct intel_atomic_state; 13 12 struct intel_crtc; 14 13 struct intel_crtc_state; ··· 22 23 int hsw_ips_compute_config(struct intel_atomic_state *state, 23 24 struct intel_crtc *crtc); 24 25 void hsw_ips_get_config(struct intel_crtc_state *crtc_state); 25 - void hsw_ips_debugfs_register(struct drm_i915_private *i915); 26 + void hsw_ips_crtc_debugfs_add(struct intel_crtc *crtc); 26 27 27 28 #endif /* __HSW_IPS_H__ */
-2
drivers/gpu/drm/i915/display/icl_dsi.c
··· 1552 1552 if (crtc_state->dsc.slice_count > 1) 1553 1553 crtc_state->dsc.dsc_split = true; 1554 1554 1555 - vdsc_cfg->convert_rgb = true; 1556 - 1557 1555 /* FIXME: initialize from VBT */ 1558 1556 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1559 1557
+1
drivers/gpu/drm/i915/display/intel_crt.c
··· 44 44 #include "intel_de.h" 45 45 #include "intel_display_types.h" 46 46 #include "intel_fdi.h" 47 + #include "intel_fdi_regs.h" 47 48 #include "intel_fifo_underrun.h" 48 49 #include "intel_gmbus.h" 49 50 #include "intel_hotplug.h"
+2 -2
drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
··· 123 123 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", 124 124 }; 125 125 126 - static const char *output_formats(enum intel_output_format format) 126 + const char *intel_output_format_name(enum intel_output_format format) 127 127 { 128 128 if (format >= ARRAY_SIZE(output_format_str)) 129 129 return "invalid"; ··· 181 181 "active: %s, output_types: %s (0x%x), output format: %s\n", 182 182 str_yes_no(pipe_config->hw.active), 183 183 buf, pipe_config->output_types, 184 - output_formats(pipe_config->output_format)); 184 + intel_output_format_name(pipe_config->output_format)); 185 185 186 186 drm_dbg_kms(&i915->drm, 187 187 "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+2
drivers/gpu/drm/i915/display/intel_crtc_state_dump.h
··· 8 8 9 9 struct intel_crtc_state; 10 10 struct intel_atomic_state; 11 + enum intel_output_format; 11 12 12 13 void intel_crtc_state_dump(const struct intel_crtc_state *crtc_state, 13 14 struct intel_atomic_state *state, 14 15 const char *context); 16 + const char *intel_output_format_name(enum intel_output_format format); 15 17 16 18 #endif /* __INTEL_CRTC_STATE_H__ */
+28 -43
drivers/gpu/drm/i915/display/intel_ddi.c
··· 2720 2720 const struct drm_connector_state *old_conn_state) 2721 2721 { 2722 2722 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2723 - struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2724 - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 2725 - bool is_tc_port = intel_phy_is_tc(dev_priv, phy); 2726 2723 struct intel_crtc *slave_crtc; 2727 2724 2728 2725 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) { ··· 2769 2772 else 2770 2773 intel_ddi_post_disable_dp(state, encoder, old_crtc_state, 2771 2774 old_conn_state); 2775 + } 2776 + 2777 + static void intel_ddi_post_pll_disable(struct intel_atomic_state *state, 2778 + struct intel_encoder *encoder, 2779 + const struct intel_crtc_state *old_crtc_state, 2780 + const struct drm_connector_state *old_conn_state) 2781 + { 2782 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 2783 + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2784 + enum phy phy = intel_port_to_phy(i915, encoder->port); 2785 + bool is_tc_port = intel_phy_is_tc(i915, phy); 2772 2786 2773 2787 main_link_aux_power_domain_put(dig_port, old_crtc_state); 2774 2788 ··· 3061 3053 } 3062 3054 3063 3055 static void 3064 - intel_ddi_update_prepare(struct intel_atomic_state *state, 3065 - struct intel_encoder *encoder, 3066 - struct intel_crtc *crtc) 3067 - { 3068 - struct drm_i915_private *i915 = to_i915(state->base.dev); 3069 - struct intel_crtc_state *crtc_state = 3070 - crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL; 3071 - int required_lanes = crtc_state ? crtc_state->lane_count : 1; 3072 - 3073 - drm_WARN_ON(state->base.dev, crtc && crtc->active); 3074 - 3075 - intel_tc_port_get_link(enc_to_dig_port(encoder), 3076 - required_lanes); 3077 - if (crtc_state && crtc_state->hw.active) { 3078 - struct intel_crtc *slave_crtc; 3079 - 3080 - intel_update_active_dpll(state, crtc, encoder); 3081 - 3082 - for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, 3083 - intel_crtc_bigjoiner_slave_pipes(crtc_state)) 3084 - intel_update_active_dpll(state, slave_crtc, encoder); 3085 - } 3086 - } 3087 - 3088 - static void 3089 - intel_ddi_update_complete(struct intel_atomic_state *state, 3090 - struct intel_encoder *encoder, 3091 - struct intel_crtc *crtc) 3092 - { 3093 - intel_tc_port_put_link(enc_to_dig_port(encoder)); 3094 - } 3095 - 3096 - static void 3097 3056 intel_ddi_pre_pll_enable(struct intel_atomic_state *state, 3098 3057 struct intel_encoder *encoder, 3099 3058 const struct intel_crtc_state *crtc_state, ··· 3071 3096 enum phy phy = intel_port_to_phy(dev_priv, encoder->port); 3072 3097 bool is_tc_port = intel_phy_is_tc(dev_priv, phy); 3073 3098 3074 - if (is_tc_port) 3099 + if (is_tc_port) { 3100 + struct intel_crtc *master_crtc = 3101 + to_intel_crtc(crtc_state->uapi.crtc); 3102 + struct intel_crtc *slave_crtc; 3103 + 3075 3104 intel_tc_port_get_link(dig_port, crtc_state->lane_count); 3105 + 3106 + intel_update_active_dpll(state, master_crtc, encoder); 3107 + 3108 + for_each_intel_crtc_in_pipe_mask(&dev_priv->drm, slave_crtc, 3109 + intel_crtc_bigjoiner_slave_pipes(crtc_state)) 3110 + intel_update_active_dpll(state, slave_crtc, encoder); 3111 + } 3076 3112 3077 3113 main_link_aux_power_domain_get(dig_port, crtc_state); 3078 3114 ··· 3829 3843 3830 3844 intel_dp_encoder_flush_work(encoder); 3831 3845 if (intel_phy_is_tc(i915, phy)) 3832 - intel_tc_port_flush_work(dig_port); 3846 + intel_tc_port_cleanup(dig_port); 3833 3847 intel_display_power_flush_work(i915); 3834 3848 3835 3849 drm_encoder_cleanup(encoder); ··· 4270 4284 if (!intel_phy_is_tc(i915, phy)) 4271 4285 return; 4272 4286 4273 - intel_tc_port_flush_work(dig_port); 4287 + intel_tc_port_cleanup(dig_port); 4274 4288 } 4275 4289 4276 4290 #define port_tc_name(port) ((port) - PORT_TC1 + '1') ··· 4384 4398 encoder->pre_pll_enable = intel_ddi_pre_pll_enable; 4385 4399 encoder->pre_enable = intel_ddi_pre_enable; 4386 4400 encoder->disable = intel_disable_ddi; 4401 + encoder->post_pll_disable = intel_ddi_post_pll_disable; 4387 4402 encoder->post_disable = intel_ddi_post_disable; 4388 4403 encoder->update_pipe = intel_ddi_update_pipe; 4389 4404 encoder->get_hw_state = intel_ddi_get_hw_state; ··· 4528 4541 is_legacy ? "legacy" : "non-legacy"); 4529 4542 } 4530 4543 4531 - intel_tc_port_init(dig_port, is_legacy); 4532 - 4533 - encoder->update_prepare = intel_ddi_update_prepare; 4534 - encoder->update_complete = intel_ddi_update_complete; 4544 + if (intel_tc_port_init(dig_port, is_legacy) < 0) 4545 + goto err; 4535 4546 } 4536 4547 4537 4548 drm_WARN_ON(&dev_priv->drm, port > PORT_I);
+14 -81
drivers/gpu/drm/i915/display/intel_display.c
··· 63 63 #include "intel_audio.h" 64 64 #include "intel_bw.h" 65 65 #include "intel_cdclk.h" 66 + #include "intel_clock_gating.h" 66 67 #include "intel_color.h" 67 68 #include "intel_crt.h" 68 69 #include "intel_crtc.h" ··· 106 105 #include "intel_pcode.h" 107 106 #include "intel_pipe_crc.h" 108 107 #include "intel_plane_initial.h" 109 - #include "intel_pm.h" 110 108 #include "intel_pps.h" 111 109 #include "intel_psr.h" 112 110 #include "intel_quirks.h" ··· 850 850 */ 851 851 intel_pps_unlock_regs_wa(i915); 852 852 intel_modeset_init_hw(i915); 853 - intel_init_clock_gating(i915); 853 + intel_clock_gating_init(i915); 854 854 intel_hpd_init(i915); 855 855 856 856 ret = __intel_display_resume(i915, state, ctx); ··· 1320 1320 intel_frontbuffer_flip(dev_priv, fb_bits); 1321 1321 } 1322 1322 1323 - /* 1324 - * intel_connector_primary_encoder - get the primary encoder for a connector 1325 - * @connector: connector for which to return the encoder 1326 - * 1327 - * Returns the primary encoder for a connector. There is a 1:1 mapping from 1328 - * all connectors to their encoder, except for DP-MST connectors which have 1329 - * both a virtual and a primary encoder. These DP-MST primary encoders can be 1330 - * pointed to by as many DP-MST connectors as there are pipes. 1331 - */ 1332 - static struct intel_encoder * 1333 - intel_connector_primary_encoder(struct intel_connector *connector) 1334 - { 1335 - struct intel_encoder *encoder; 1336 - 1337 - if (connector->mst_port) 1338 - return &dp_to_dig_port(connector->mst_port)->base; 1339 - 1340 - encoder = intel_attached_encoder(connector); 1341 - drm_WARN_ON(connector->base.dev, !encoder); 1342 - 1343 - return encoder; 1344 - } 1345 - 1346 1323 static void intel_encoders_update_prepare(struct intel_atomic_state *state) 1347 1324 { 1348 1325 struct drm_i915_private *i915 = to_i915(state->base.dev); 1349 1326 struct intel_crtc_state *new_crtc_state, *old_crtc_state; 1350 1327 struct intel_crtc *crtc; 1351 - struct drm_connector_state *new_conn_state; 1352 - struct drm_connector *connector; 1353 1328 int i; 1354 1329 1355 1330 /* ··· 1339 1364 new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; 1340 1365 new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; 1341 1366 } 1342 - } 1343 - 1344 - if (!state->modeset) 1345 - return; 1346 - 1347 - for_each_new_connector_in_state(&state->base, connector, new_conn_state, 1348 - i) { 1349 - struct intel_connector *intel_connector; 1350 - struct intel_encoder *encoder; 1351 - struct intel_crtc *crtc; 1352 - 1353 - if (!intel_connector_needs_modeset(state, connector)) 1354 - continue; 1355 - 1356 - intel_connector = to_intel_connector(connector); 1357 - encoder = intel_connector_primary_encoder(intel_connector); 1358 - if (!encoder->update_prepare) 1359 - continue; 1360 - 1361 - crtc = new_conn_state->crtc ? 1362 - to_intel_crtc(new_conn_state->crtc) : NULL; 1363 - encoder->update_prepare(state, encoder, crtc); 1364 - } 1365 - } 1366 - 1367 - static void intel_encoders_update_complete(struct intel_atomic_state *state) 1368 - { 1369 - struct drm_connector_state *new_conn_state; 1370 - struct drm_connector *connector; 1371 - int i; 1372 - 1373 - if (!state->modeset) 1374 - return; 1375 - 1376 - for_each_new_connector_in_state(&state->base, connector, new_conn_state, 1377 - i) { 1378 - struct intel_connector *intel_connector; 1379 - struct intel_encoder *encoder; 1380 - struct intel_crtc *crtc; 1381 - 1382 - if (!intel_connector_needs_modeset(state, connector)) 1383 - continue; 1384 - 1385 - intel_connector = to_intel_connector(connector); 1386 - encoder = intel_connector_primary_encoder(intel_connector); 1387 - if (!encoder->update_complete) 1388 - continue; 1389 - 1390 - crtc = new_conn_state->crtc ? 1391 - to_intel_crtc(new_conn_state->crtc) : NULL; 1392 - encoder->update_complete(state, encoder, crtc); 1393 1367 } 1394 1368 } 1395 1369 ··· 1829 1905 1830 1906 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 1831 1907 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 1908 + 1909 + intel_disable_shared_dpll(old_crtc_state); 1832 1910 } 1833 1911 1834 1912 static void hsw_crtc_disable(struct intel_atomic_state *state, ··· 1848 1922 intel_encoders_disable(state, crtc); 1849 1923 intel_encoders_post_disable(state, crtc); 1850 1924 } 1925 + 1926 + intel_disable_shared_dpll(old_crtc_state); 1927 + 1928 + intel_encoders_post_pll_disable(state, crtc); 1851 1929 1852 1930 intel_dmc_disable_pipe(i915, crtc->pipe); 1853 1931 } ··· 6902 6972 intel_atomic_get_new_crtc_state(state, crtc); 6903 6973 bool modeset = intel_crtc_needs_modeset(new_crtc_state); 6904 6974 6975 + if (old_crtc_state->inherited || 6976 + intel_crtc_needs_modeset(new_crtc_state)) { 6977 + if (HAS_DPT(i915)) 6978 + intel_dpt_configure(crtc); 6979 + } 6980 + 6905 6981 if (!modeset) { 6906 6982 if (new_crtc_state->preload_luts && 6907 6983 intel_crtc_needs_color_update(new_crtc_state)) ··· 6971 7035 dev_priv->display.funcs.display->crtc_disable(state, crtc); 6972 7036 crtc->active = false; 6973 7037 intel_fbc_disable(crtc); 6974 - intel_disable_shared_dpll(old_crtc_state); 6975 7038 6976 7039 if (!new_crtc_state->hw.active) 6977 7040 intel_initial_watermarks(state, crtc); ··· 7368 7433 7369 7434 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7370 7435 dev_priv->display.funcs.display->commit_modeset_enables(state); 7371 - 7372 - intel_encoders_update_complete(state); 7373 7436 7374 7437 if (state->modeset) 7375 7438 intel_set_cdclk_post_plane_update(state);
-7
drivers/gpu/drm/i915/display/intel_display.h
··· 164 164 I915_MAX_TC_PORTS 165 165 }; 166 166 167 - enum tc_port_mode { 168 - TC_PORT_DISCONNECTED, 169 - TC_PORT_TBT_ALT, 170 - TC_PORT_DP_ALT, 171 - TC_PORT_LEGACY, 172 - }; 173 - 174 167 enum aux_ch { 175 168 AUX_CH_NONE = -1, 176 169
+4
drivers/gpu/drm/i915/display/intel_display_core.h
··· 419 419 } hti; 420 420 421 421 struct { 422 + bool false_color; 423 + } ips; 424 + 425 + struct { 422 426 struct i915_power_domains domains; 423 427 424 428 /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
+81 -2
drivers/gpu/drm/i915/display/intel_display_debugfs.c
··· 13 13 #include "i915_irq.h" 14 14 #include "i915_reg.h" 15 15 #include "intel_de.h" 16 + #include "intel_crtc_state_dump.h" 16 17 #include "intel_display_debugfs.h" 17 18 #include "intel_display_power.h" 18 19 #include "intel_display_power_well.h" ··· 29 28 #include "intel_hotplug.h" 30 29 #include "intel_panel.h" 31 30 #include "intel_psr.h" 31 + #include "intel_psr_regs.h" 32 32 #include "intel_sprite.h" 33 33 #include "intel_wm.h" 34 34 ··· 1094 1092 ARRAY_SIZE(intel_display_debugfs_list), 1095 1093 minor->debugfs_root, minor); 1096 1094 1097 - hsw_ips_debugfs_register(i915); 1098 1095 intel_dmc_debugfs_register(i915); 1099 1096 intel_fbc_debugfs_register(i915); 1100 1097 intel_hpd_debugfs_register(i915); ··· 1236 1235 str_yes_no(crtc_state->dsc.compression_enable)); 1237 1236 seq_printf(m, "DSC_Sink_Support: %s\n", 1238 1237 str_yes_no(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); 1238 + seq_printf(m, "DSC_Output_Format_Sink_Support: RGB: %s YCBCR420: %s YCBCR444: %s\n", 1239 + str_yes_no(drm_dp_dsc_sink_supports_format(intel_dp->dsc_dpcd, 1240 + DP_DSC_RGB)), 1241 + str_yes_no(drm_dp_dsc_sink_supports_format(intel_dp->dsc_dpcd, 1242 + DP_DSC_YCbCr420_Native)), 1243 + str_yes_no(drm_dp_dsc_sink_supports_format(intel_dp->dsc_dpcd, 1244 + DP_DSC_YCbCr444))); 1239 1245 seq_printf(m, "Force_DSC_Enable: %s\n", 1240 1246 str_yes_no(intel_dp->force_dsc_en)); 1241 1247 if (!intel_dp_is_edp(intel_dp)) ··· 1368 1360 .write = i915_dsc_bpc_write 1369 1361 }; 1370 1362 1363 + static int i915_dsc_output_format_show(struct seq_file *m, void *data) 1364 + { 1365 + struct drm_connector *connector = m->private; 1366 + struct drm_device *dev = connector->dev; 1367 + struct drm_crtc *crtc; 1368 + struct intel_crtc_state *crtc_state; 1369 + struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 1370 + int ret; 1371 + 1372 + if (!encoder) 1373 + return -ENODEV; 1374 + 1375 + ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex); 1376 + if (ret) 1377 + return ret; 1378 + 1379 + crtc = connector->state->crtc; 1380 + if (connector->status != connector_status_connected || !crtc) { 1381 + ret = -ENODEV; 1382 + goto out; 1383 + } 1384 + 1385 + crtc_state = to_intel_crtc_state(crtc->state); 1386 + seq_printf(m, "DSC_Output_Format: %s\n", 1387 + intel_output_format_name(crtc_state->output_format)); 1388 + 1389 + out: drm_modeset_unlock(&dev->mode_config.connection_mutex); 1390 + 1391 + return ret; 1392 + } 1393 + 1394 + static ssize_t i915_dsc_output_format_write(struct file *file, 1395 + const char __user *ubuf, 1396 + size_t len, loff_t *offp) 1397 + { 1398 + struct drm_connector *connector = 1399 + ((struct seq_file *)file->private_data)->private; 1400 + struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector)); 1401 + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1402 + int dsc_output_format = 0; 1403 + int ret; 1404 + 1405 + ret = kstrtoint_from_user(ubuf, len, 0, &dsc_output_format); 1406 + if (ret < 0) 1407 + return ret; 1408 + 1409 + intel_dp->force_dsc_output_format = dsc_output_format; 1410 + *offp += len; 1411 + 1412 + return len; 1413 + } 1414 + 1415 + static int i915_dsc_output_format_open(struct inode *inode, 1416 + struct file *file) 1417 + { 1418 + return single_open(file, i915_dsc_output_format_show, inode->i_private); 1419 + } 1420 + 1421 + static const struct file_operations i915_dsc_output_format_fops = { 1422 + .owner = THIS_MODULE, 1423 + .open = i915_dsc_output_format_open, 1424 + .read = seq_read, 1425 + .llseek = seq_lseek, 1426 + .release = single_release, 1427 + .write = i915_dsc_output_format_write 1428 + }; 1429 + 1371 1430 /* 1372 1431 * Returns the Current CRTC's bpc. 1373 1432 * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc ··· 1471 1396 1472 1397 /** 1473 1398 * intel_connector_debugfs_add - add i915 specific connector debugfs files 1474 - * @connector: pointer to a registered drm_connector 1399 + * @intel_connector: pointer to a registered drm_connector 1475 1400 * 1476 1401 * Cleanup will be done by drm_connector_unregister() through a call to 1477 1402 * drm_debugfs_connector_remove(). ··· 1509 1434 1510 1435 debugfs_create_file("i915_dsc_bpc", 0644, root, 1511 1436 connector, &i915_dsc_bpc_fops); 1437 + 1438 + debugfs_create_file("i915_dsc_output_format", 0644, root, 1439 + connector, &i915_dsc_output_format_fops); 1512 1440 } 1513 1441 1514 1442 if (connector->connector_type == DRM_MODE_CONNECTOR_DSI || ··· 1539 1461 crtc_updates_add(crtc); 1540 1462 intel_drrs_crtc_debugfs_add(crtc); 1541 1463 intel_fbc_crtc_debugfs_add(crtc); 1464 + hsw_ips_crtc_debugfs_add(crtc); 1542 1465 1543 1466 debugfs_create_file("i915_current_bpc", 0444, root, crtc, 1544 1467 &i915_current_bpc_fops);
+7 -3
drivers/gpu/drm/i915/display/intel_display_power.c
··· 19 19 #include "intel_mchbar_regs.h" 20 20 #include "intel_pch_refclk.h" 21 21 #include "intel_pcode.h" 22 + #include "intel_pps_regs.h" 22 23 #include "intel_snps_phy.h" 23 24 #include "skl_watermark.h" 25 + #include "skl_watermark_regs.h" 24 26 #include "vlv_sideband.h" 25 27 26 28 #define for_each_power_domain_well(__dev_priv, __power_well, __domain) \ ··· 699 697 } 700 698 701 699 /** 702 - * intel_display_power_put_async - release a power domain reference asynchronously 700 + * __intel_display_power_put_async - release a power domain reference asynchronously 703 701 * @i915: i915 device instance 704 702 * @domain: power domain to reference 705 703 * @wakeref: wakeref acquired for the reference that is being released ··· 1184 1182 "CPU PWM2 enabled\n"); 1185 1183 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 1186 1184 "PCH PWM1 enabled\n"); 1187 - I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 1188 - "Utility pin enabled\n"); 1185 + I915_STATE_WARN((intel_de_read(dev_priv, UTIL_PIN_CTL) & 1186 + (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == 1187 + (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), 1188 + "Utility pin enabled in PWM mode\n"); 1189 1189 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 1190 1190 "PCH GTC enabled\n"); 1191 1191
+5 -2
drivers/gpu/drm/i915/display/intel_display_power_well.c
··· 15 15 #include "intel_dkl_phy.h" 16 16 #include "intel_dkl_phy_regs.h" 17 17 #include "intel_dmc.h" 18 + #include "intel_dp_aux_regs.h" 18 19 #include "intel_dpio_phy.h" 19 20 #include "intel_dpll.h" 20 21 #include "intel_hotplug.h" ··· 819 818 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 820 819 { 821 820 drm_WARN_ONCE(&dev_priv->drm, 822 - intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 823 - "Backlight is not disabled.\n"); 821 + (intel_de_read(dev_priv, UTIL_PIN_CTL) & 822 + (UTIL_PIN_ENABLE | UTIL_PIN_MODE_MASK)) == 823 + (UTIL_PIN_ENABLE | UTIL_PIN_MODE_PWM), 824 + "Utility pin enabled in PWM mode\n"); 824 825 drm_WARN_ONCE(&dev_priv->drm, 825 826 (intel_de_read(dev_priv, DC_STATE_EN) & 826 827 DC_STATE_EN_UPTO_DC6),
+4 -17
drivers/gpu/drm/i915/display/intel_display_types.h
··· 60 60 struct intel_ddi_buf_trans; 61 61 struct intel_fbc; 62 62 struct intel_connector; 63 + struct intel_tc_port; 63 64 64 65 /* 65 66 * Display related stuff ··· 170 169 int (*compute_config_late)(struct intel_encoder *, 171 170 struct intel_crtc_state *, 172 171 struct drm_connector_state *); 173 - void (*update_prepare)(struct intel_atomic_state *, 174 - struct intel_encoder *, 175 - struct intel_crtc *); 176 172 void (*pre_pll_enable)(struct intel_atomic_state *, 177 173 struct intel_encoder *, 178 174 const struct intel_crtc_state *, ··· 182 184 struct intel_encoder *, 183 185 const struct intel_crtc_state *, 184 186 const struct drm_connector_state *); 185 - void (*update_complete)(struct intel_atomic_state *, 186 - struct intel_encoder *, 187 - struct intel_crtc *); 188 187 void (*disable)(struct intel_atomic_state *, 189 188 struct intel_encoder *, 190 189 const struct intel_crtc_state *, ··· 1152 1157 bool has_psr2; 1153 1158 bool enable_psr2_sel_fetch; 1154 1159 bool req_psr2_sdp_prior_scanline; 1160 + bool wm_level_disabled; 1155 1161 u32 dc3co_exitline; 1156 1162 u16 su_y_granularity; 1157 1163 struct drm_dp_vsc_sdp psr_vsc; ··· 1736 1740 1737 1741 /* Display stream compression testing */ 1738 1742 bool force_dsc_en; 1743 + int force_dsc_output_format; 1739 1744 int force_dsc_bpc; 1740 1745 1741 1746 bool hobl_failed; ··· 1777 1780 intel_wakeref_t ddi_io_wakeref; 1778 1781 intel_wakeref_t aux_wakeref; 1779 1782 1780 - struct mutex tc_lock; /* protects the TypeC port mode */ 1781 - intel_wakeref_t tc_lock_wakeref; 1782 - enum intel_display_power_domain tc_lock_power_domain; 1783 - struct delayed_work tc_disconnect_phy_work; 1784 - int tc_link_refcount; 1785 - bool tc_legacy_port:1; 1786 - char tc_port_name[8]; 1787 - enum tc_port_mode tc_mode; 1788 - enum tc_port_mode tc_init_mode; 1789 - enum phy_fia tc_phy_fia; 1790 - u8 tc_phy_fia_idx; 1783 + struct intel_tc_port *tc; 1791 1784 1792 1785 /* protects num_hdcp_streams reference count, hdcp_port_data and hdcp_auth_status */ 1793 1786 struct mutex hdcp_mutex;
+45 -3
drivers/gpu/drm/i915/display/intel_dp.c
··· 76 76 #include "intel_tc.h" 77 77 #include "intel_vdsc.h" 78 78 #include "intel_vrr.h" 79 + #include "intel_crtc_state_dump.h" 79 80 80 81 /* DP DSC throughput values used for slice count calculations KPixels/s */ 81 82 #define DP_DSC_PEAK_PIXEL_RATE 2720000 ··· 834 833 { 835 834 struct intel_dp *intel_dp = intel_attached_dp(connector); 836 835 836 + if (intel_dp->force_dsc_output_format) 837 + return intel_dp->force_dsc_output_format; 838 + 837 839 if (!connector->base.ycbcr_420_allowed || !ycbcr_420_output) 838 840 return INTEL_OUTPUT_FORMAT_RGB; 839 841 ··· 1494 1490 vdsc_cfg->dsc_version_minor = 1495 1491 min(intel_dp_source_dsc_version_minor(intel_dp), 1496 1492 intel_dp_sink_dsc_version_minor(intel_dp)); 1497 - 1498 - vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 1499 - DP_DSC_RGB; 1493 + if (vdsc_cfg->convert_rgb) 1494 + vdsc_cfg->convert_rgb = 1495 + intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 1496 + DP_DSC_RGB; 1500 1497 1501 1498 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 1502 1499 if (!line_buf_depth) { ··· 1520 1515 return drm_dsc_compute_rc_parameters(vdsc_cfg); 1521 1516 } 1522 1517 1518 + static bool intel_dp_dsc_supports_format(struct intel_dp *intel_dp, 1519 + enum intel_output_format output_format) 1520 + { 1521 + u8 sink_dsc_format; 1522 + 1523 + switch (output_format) { 1524 + case INTEL_OUTPUT_FORMAT_RGB: 1525 + sink_dsc_format = DP_DSC_RGB; 1526 + break; 1527 + case INTEL_OUTPUT_FORMAT_YCBCR444: 1528 + sink_dsc_format = DP_DSC_YCbCr444; 1529 + break; 1530 + case INTEL_OUTPUT_FORMAT_YCBCR420: 1531 + if (min(intel_dp_source_dsc_version_minor(intel_dp), 1532 + intel_dp_sink_dsc_version_minor(intel_dp)) < 2) 1533 + return false; 1534 + sink_dsc_format = DP_DSC_YCbCr420_Native; 1535 + break; 1536 + default: 1537 + return false; 1538 + } 1539 + 1540 + return drm_dp_dsc_sink_supports_format(intel_dp->dsc_dpcd, sink_dsc_format); 1541 + } 1542 + 1523 1543 int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 1524 1544 struct intel_crtc_state *pipe_config, 1525 1545 struct drm_connector_state *conn_state, ··· 1563 1533 intel_dp_supports_fec(intel_dp, pipe_config); 1564 1534 1565 1535 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 1536 + return -EINVAL; 1537 + 1538 + if (!intel_dp_dsc_supports_format(intel_dp, pipe_config->output_format)) 1566 1539 return -EINVAL; 1567 1540 1568 1541 if (compute_pipe_bpp) ··· 1615 1582 pipe_config->bigjoiner_pipes, 1616 1583 pipe_bpp, 1617 1584 timeslots); 1585 + /* 1586 + * According to DSC 1.2a Section 4.1.1 Table 4.1 the maximum 1587 + * supported PPS value can be 63.9375 and with the further 1588 + * mention that bpp should be programmed double the target bpp 1589 + * restricting our target bpp to be 31.9375 at max 1590 + */ 1591 + if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1592 + dsc_max_output_bpp = min_t(u16, dsc_max_output_bpp, 31 << 4); 1593 + 1618 1594 if (!dsc_max_output_bpp) { 1619 1595 drm_dbg_kms(&dev_priv->drm, 1620 1596 "Compressed BPP not supported\n");
+30 -3
drivers/gpu/drm/i915/display/intel_dp_aux.c
··· 10 10 #include "intel_de.h" 11 11 #include "intel_display_types.h" 12 12 #include "intel_dp_aux.h" 13 + #include "intel_dp_aux_regs.h" 13 14 #include "intel_pps.h" 14 15 #include "intel_tc.h" 15 16 ··· 119 118 return index ? 0 : 1; 120 119 } 121 120 121 + static int intel_dp_aux_sync_len(void) 122 + { 123 + int precharge = 16; /* 10-16 */ 124 + int preamble = 16; 125 + 126 + return precharge + preamble; 127 + } 128 + 129 + static int intel_dp_aux_fw_sync_len(void) 130 + { 131 + int precharge = 16; /* 10-16 */ 132 + int preamble = 8; 133 + 134 + return precharge + preamble; 135 + } 136 + 137 + static int g4x_dp_aux_precharge_len(void) 138 + { 139 + int precharge_min = 10; 140 + int preamble = 16; 141 + 142 + /* HW wants the length of the extra precharge in 2us units */ 143 + return (intel_dp_aux_sync_len() - 144 + precharge_min - preamble) / 2; 145 + } 146 + 122 147 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, 123 148 int send_bytes, 124 149 u32 aux_clock_divider) ··· 167 140 timeout | 168 141 DP_AUX_CH_CTL_RECEIVE_ERROR | 169 142 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 170 - (3 << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 143 + (g4x_dp_aux_precharge_len() << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 171 144 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 172 145 } 173 146 ··· 191 164 DP_AUX_CH_CTL_TIME_OUT_MAX | 192 165 DP_AUX_CH_CTL_RECEIVE_ERROR | 193 166 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 194 - DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | 195 - DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 167 + DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) | 168 + DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len()); 196 169 197 170 if (intel_tc_port_in_tbt_alt_mode(dig_port)) 198 171 ret |= DP_AUX_CH_CTL_TBT_IO;
+84
drivers/gpu/drm/i915/display/intel_dp_aux_regs.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_DP_AUX_REGS_H__ 7 + #define __INTEL_DP_AUX_REGS_H__ 8 + 9 + #include "intel_display_reg_defs.h" 10 + 11 + /* 12 + * The aux channel provides a way to talk to the signal sink for DDC etc. Max 13 + * packet size supported is 20 bytes in each direction, hence the 5 fixed data 14 + * registers 15 + */ 16 + #define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010) 17 + #define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014) 18 + 19 + #define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110) 20 + #define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114) 21 + 22 + #define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL) 23 + #define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ 24 + 25 + #define _XELPDP_USBC1_AUX_CH_CTL 0x16F210 26 + #define _XELPDP_USBC2_AUX_CH_CTL 0x16F410 27 + #define _XELPDP_USBC3_AUX_CH_CTL 0x16F610 28 + #define _XELPDP_USBC4_AUX_CH_CTL 0x16F810 29 + 30 + #define XELPDP_DP_AUX_CH_CTL(aux_ch) _MMIO(_PICK(aux_ch, \ 31 + _DPA_AUX_CH_CTL, \ 32 + _DPB_AUX_CH_CTL, \ 33 + 0, /* port/aux_ch C is non-existent */ \ 34 + _XELPDP_USBC1_AUX_CH_CTL, \ 35 + _XELPDP_USBC2_AUX_CH_CTL, \ 36 + _XELPDP_USBC3_AUX_CH_CTL, \ 37 + _XELPDP_USBC4_AUX_CH_CTL)) 38 + 39 + #define _XELPDP_USBC1_AUX_CH_DATA1 0x16F214 40 + #define _XELPDP_USBC2_AUX_CH_DATA1 0x16F414 41 + #define _XELPDP_USBC3_AUX_CH_DATA1 0x16F614 42 + #define _XELPDP_USBC4_AUX_CH_DATA1 0x16F814 43 + 44 + #define XELPDP_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PICK(aux_ch, \ 45 + _DPA_AUX_CH_DATA1, \ 46 + _DPB_AUX_CH_DATA1, \ 47 + 0, /* port/aux_ch C is non-existent */ \ 48 + _XELPDP_USBC1_AUX_CH_DATA1, \ 49 + _XELPDP_USBC2_AUX_CH_DATA1, \ 50 + _XELPDP_USBC3_AUX_CH_DATA1, \ 51 + _XELPDP_USBC4_AUX_CH_DATA1) + (i) * 4) 52 + 53 + #define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) 54 + #define DP_AUX_CH_CTL_DONE (1 << 30) 55 + #define DP_AUX_CH_CTL_INTERRUPT (1 << 29) 56 + #define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28) 57 + #define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26) 58 + #define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26) 59 + #define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26) 60 + #define DP_AUX_CH_CTL_TIME_OUT_MAX (3 << 26) /* Varies per platform */ 61 + #define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26) 62 + #define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) 63 + #define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) 64 + #define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20 65 + #define XELPDP_DP_AUX_CH_CTL_POWER_REQUEST REG_BIT(19) 66 + #define XELPDP_DP_AUX_CH_CTL_POWER_STATUS REG_BIT(18) 67 + #define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16) 68 + #define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16 69 + #define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15) 70 + #define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14) 71 + #define DP_AUX_CH_CTL_SYNC_TEST (1 << 13) 72 + #define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12) 73 + #define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) 74 + #define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) 75 + #define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 76 + #define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14) 77 + #define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13) 78 + #define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12) 79 + #define DP_AUX_CH_CTL_TBT_IO (1 << 11) 80 + #define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (0x1f << 5) 81 + #define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5) 82 + #define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1) 83 + 84 + #endif /* __INTEL_DP_AUX_REGS_H__ */
+45 -5
drivers/gpu/drm/i915/display/intel_dp_mst.c
··· 45 45 #include "intel_hotplug.h" 46 46 #include "skl_scaler.h" 47 47 48 + static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp, 49 + const struct drm_display_mode *adjusted_mode, 50 + struct intel_crtc_state *crtc_state, 51 + bool dsc) 52 + { 53 + if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) <= 13 && dsc) { 54 + int output_bpp = bpp; 55 + /* DisplayPort 2 128b/132b, bits per lane is always 32 */ 56 + int symbol_clock = crtc_state->port_clock / 32; 57 + 58 + if (output_bpp * adjusted_mode->crtc_clock >= 59 + symbol_clock * 72) { 60 + drm_dbg_kms(&i915->drm, "UHBR check failed(required bw %d available %d)\n", 61 + output_bpp * adjusted_mode->crtc_clock, symbol_clock * 72); 62 + return -EINVAL; 63 + } 64 + } 65 + 66 + return 0; 67 + } 68 + 48 69 static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder, 49 70 struct intel_crtc_state *crtc_state, 50 71 int max_bpp, ··· 102 81 } 103 82 104 83 for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) { 84 + drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp); 85 + 86 + ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc); 87 + if (ret) 88 + continue; 89 + 105 90 crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, 106 91 dsc ? bpp << 4 : bpp, 107 92 dsc); 108 - 109 - drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp); 110 93 111 94 slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr, 112 95 connector->port, ··· 129 104 } 130 105 } 131 106 132 - /* Despite slots are non-zero, we still failed the atomic check */ 133 - if (ret && slots >= 0) 107 + /* We failed to find a proper bpp/timeslots, return error */ 108 + if (ret) 134 109 slots = ret; 135 110 136 111 if (slots < 0) { ··· 257 232 return slots; 258 233 } 259 234 260 - intel_link_compute_m_n(crtc_state->pipe_bpp, 235 + intel_link_compute_m_n(crtc_state->dsc.compressed_bpp, 261 236 crtc_state->lane_count, 262 237 adjusted_mode->crtc_clock, 263 238 crtc_state->port_clock, ··· 646 621 647 622 drm_dbg_kms(&dev_priv->drm, "active links %d\n", 648 623 intel_dp->active_mst_links); 624 + } 625 + 626 + static void intel_mst_post_pll_disable_dp(struct intel_atomic_state *state, 627 + struct intel_encoder *encoder, 628 + const struct intel_crtc_state *old_crtc_state, 629 + const struct drm_connector_state *old_conn_state) 630 + { 631 + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); 632 + struct intel_digital_port *dig_port = intel_mst->primary; 633 + struct intel_dp *intel_dp = &dig_port->dp; 634 + 635 + if (intel_dp->active_mst_links == 0 && 636 + dig_port->base.post_pll_disable) 637 + dig_port->base.post_pll_disable(state, encoder, old_crtc_state, old_conn_state); 649 638 } 650 639 651 640 static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state, ··· 1185 1146 intel_encoder->compute_config_late = intel_dp_mst_compute_config_late; 1186 1147 intel_encoder->disable = intel_mst_disable_dp; 1187 1148 intel_encoder->post_disable = intel_mst_post_disable_dp; 1149 + intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp; 1188 1150 intel_encoder->update_pipe = intel_ddi_update_pipe; 1189 1151 intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; 1190 1152 intel_encoder->pre_enable = intel_mst_pre_enable_dp;
+27
drivers/gpu/drm/i915/display/intel_dpt.c
··· 9 9 #include "gt/gen8_ppgtt.h" 10 10 11 11 #include "i915_drv.h" 12 + #include "i915_reg.h" 13 + #include "intel_de.h" 12 14 #include "intel_display_types.h" 13 15 #include "intel_dpt.h" 14 16 #include "intel_fb.h" ··· 303 301 vm->pte_encode = gen8_ggtt_pte_encode; 304 302 305 303 dpt->obj = dpt_obj; 304 + dpt->obj->is_dpt = true; 306 305 307 306 return &dpt->vm; 308 307 } ··· 312 309 { 313 310 struct i915_dpt *dpt = i915_vm_to_dpt(vm); 314 311 312 + dpt->obj->is_dpt = false; 315 313 i915_vm_put(&dpt->vm); 314 + } 315 + 316 + void intel_dpt_configure(struct intel_crtc *crtc) 317 + { 318 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 319 + 320 + if (DISPLAY_VER(i915) == 14) { 321 + enum pipe pipe = crtc->pipe; 322 + enum plane_id plane_id; 323 + 324 + for_each_plane_id_on_crtc(crtc, plane_id) { 325 + if (plane_id == PLANE_CURSOR) 326 + continue; 327 + 328 + intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id), 329 + PLANE_CHICKEN_DISABLE_DPT, 330 + i915->params.enable_dpt ? 0 : PLANE_CHICKEN_DISABLE_DPT); 331 + } 332 + } else if (DISPLAY_VER(i915) == 13) { 333 + intel_de_rmw(i915, CHICKEN_MISC_2, 334 + CHICKEN_MISC_DISABLE_DPT, 335 + i915->params.enable_dpt ? 0 : CHICKEN_MISC_DISABLE_DPT); 336 + } 316 337 }
+2
drivers/gpu/drm/i915/display/intel_dpt.h
··· 10 10 11 11 struct i915_address_space; 12 12 struct i915_vma; 13 + struct intel_crtc; 13 14 struct intel_framebuffer; 14 15 15 16 void intel_dpt_destroy(struct i915_address_space *vm); ··· 20 19 void intel_dpt_resume(struct drm_i915_private *i915); 21 20 struct i915_address_space * 22 21 intel_dpt_create(struct intel_framebuffer *fb); 22 + void intel_dpt_configure(struct intel_crtc *crtc); 23 23 24 24 #endif /* __INTEL_DPT_H__ */
+1
drivers/gpu/drm/i915/display/intel_dsb.c
··· 11 11 #include "intel_de.h" 12 12 #include "intel_display_types.h" 13 13 #include "intel_dsb.h" 14 + #include "intel_dsb_regs.h" 14 15 15 16 struct i915_vma; 16 17
+67
drivers/gpu/drm/i915/display/intel_dsb_regs.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_DSB_REGS_H__ 7 + #define __INTEL_DSB_REGS_H__ 8 + 9 + #include "intel_display_reg_defs.h" 10 + 11 + /* This register controls the Display State Buffer (DSB) engines. */ 12 + #define _DSBSL_INSTANCE_BASE 0x70B00 13 + #define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \ 14 + (pipe) * 0x1000 + (id) * 0x100) 15 + #define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0) 16 + #define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4) 17 + #define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8) 18 + #define DSB_ENABLE REG_BIT(31) 19 + #define DSB_BUF_REITERATE REG_BIT(29) 20 + #define DSB_WAIT_FOR_VBLANK REG_BIT(28) 21 + #define DSB_WAIT_FOR_LINE_IN REG_BIT(27) 22 + #define DSB_HALT REG_BIT(16) 23 + #define DSB_NON_POSTED REG_BIT(8) 24 + #define DSB_STATUS_BUSY REG_BIT(0) 25 + #define DSB_MMIOCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xc) 26 + #define DSB_MMIO_DEAD_CLOCKS_ENABLE REG_BIT(31) 27 + #define DSB_MMIO_DEAD_CLOCKS_COUNT_MASK REG_GENMASK(15, 8) 28 + #define DSB_MMIO_DEAD_CLOCKS_COUNT(x) REG_FIELD_PREP(DSB_MMIO_DEAD_CLOCK_COUNT_MASK, (x)) 29 + #define DSB_MMIO_CYCLES_MASK REG_GENMASK(7, 0) 30 + #define DSB_MMIO_CYCLES(x) REG_FIELD_PREP(DSB_MMIO_CYCLES_MASK, (x)) 31 + #define DSB_POLLFUNC(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x10) 32 + #define DSB_POLL_ENABLE REG_BIT(31) 33 + #define DSB_POLL_WAIT_MASK REG_GENMASK(30, 23) 34 + #define DSB_POLL_WAIT(x) REG_FIELD_PREP(DSB_POLL_WAIT_MASK, (x)) /* usec */ 35 + #define DSB_POLL_COUNT_MASK REG_GENMASK(22, 15) 36 + #define DSB_POLL_COUNT(x) REG_FIELD_PREP(DSB_POLL_COUNT_MASK, (x)) 37 + #define DSB_DEBUG(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x14) 38 + #define DSB_POLLMASK(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x1c) 39 + #define DSB_STATUS(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x24) 40 + #define DSB_INTERRUPT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x28) 41 + #define DSB_ATS_FAULT_INT_EN REG_BIT(20) 42 + #define DSB_GTT_FAULT_INT_EN REG_BIT(19) 43 + #define DSB_RSPTIMEOUT_INT_EN REG_BIT(18) 44 + #define DSB_POLL_ERR_INT_EN REG_BIT(17) 45 + #define DSB_PROG_INT_EN REG_BIT(16) 46 + #define DSB_ATS_FAULT_INT_STATUS REG_BIT(4) 47 + #define DSB_GTT_FAULT_INT_STATUS REG_BIT(3) 48 + #define DSB_RSPTIMEOUT_INT_STATUS REG_BIT(2) 49 + #define DSB_POLL_ERR_INT_STATUS REG_BIT(1) 50 + #define DSB_PROG_INT_STATUS REG_BIT(0) 51 + #define DSB_CURRENT_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x2c) 52 + #define DSB_RM_TIMEOUT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x30) 53 + #define DSB_RM_CLAIM_TIMEOUT REG_BIT(31) 54 + #define DSB_RM_READY_TIMEOUT REG_BIT(30) 55 + #define DSB_RM_CLAIM_TIMEOUT_COUNT_MASK REG_GENMASK(23, 16) 56 + #define DSB_RM_CLAIM_TIMEOUT_COUNT(x) REG_FIELD_PREP(DSB_RM_CLAIM_TIMEOUT_COUNT_MASK, (x)) /* clocks */ 57 + #define DSB_RM_READY_TIMEOUT_VALUE_MASK REG_GENMASK(15, 0) 58 + #define DSB_RM_READY_TIMEOUT_VALUE(x) REG_FIELD_PREP(DSB_RM_READY_TIMEOUT_VALUE, (x)) /* usec */ 59 + #define DSB_RMTIMEOUTREG_CAPTURE(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x34) 60 + #define DSB_PMCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x38) 61 + #define DSB_PMCTRL_2(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x3c) 62 + #define DSB_PF_LN_LOWER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x40) 63 + #define DSB_PF_LN_UPPER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x44) 64 + #define DSB_BUFRPT_CNT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x48) 65 + #define DSB_CHICKEN(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xf0) 66 + 67 + #endif /* __INTEL_DSB_REGS_H__ */
+1
drivers/gpu/drm/i915/display/intel_dsi_vbt.c
··· 46 46 #include "intel_dsi.h" 47 47 #include "intel_dsi_vbt.h" 48 48 #include "intel_gmbus_regs.h" 49 + #include "intel_pps_regs.h" 49 50 #include "vlv_dsi.h" 50 51 #include "vlv_dsi_regs.h" 51 52 #include "vlv_sideband.h"
+6 -5
drivers/gpu/drm/i915/display/intel_fb.c
··· 716 716 } 717 717 } 718 718 719 - static bool intel_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier) 719 + bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier) 720 720 { 721 - return DISPLAY_VER(i915) >= 13 && modifier != DRM_FORMAT_MOD_LINEAR; 721 + return HAS_DPT(i915) && modifier != DRM_FORMAT_MOD_LINEAR; 722 722 } 723 723 724 724 bool intel_fb_uses_dpt(const struct drm_framebuffer *fb) 725 725 { 726 - return fb && intel_modifier_uses_dpt(to_i915(fb->dev), fb->modifier); 726 + return fb && to_i915(fb->dev)->params.enable_dpt && 727 + intel_fb_modifier_uses_dpt(to_i915(fb->dev), fb->modifier); 727 728 } 728 729 729 730 unsigned int intel_cursor_alignment(const struct drm_i915_private *i915) ··· 1190 1189 { 1191 1190 struct drm_i915_private *i915 = to_i915(fb->base.dev); 1192 1191 1193 - return IS_ALDERLAKE_P(i915) && fb->base.modifier != DRM_FORMAT_MOD_LINEAR; 1192 + return IS_ALDERLAKE_P(i915) && intel_fb_uses_dpt(&fb->base); 1194 1193 } 1195 1194 1196 1195 static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation) ··· 1706 1705 * The new CCS hash mode makes remapping impossible 1707 1706 */ 1708 1707 if (DISPLAY_VER(dev_priv) < 4 || intel_fb_is_ccs_modifier(modifier) || 1709 - intel_modifier_uses_dpt(dev_priv, modifier)) 1708 + intel_fb_modifier_uses_dpt(dev_priv, modifier)) 1710 1709 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); 1711 1710 else if (DISPLAY_VER(dev_priv) >= 7) 1712 1711 return 256 * 1024;
+1
drivers/gpu/drm/i915/display/intel_fb.h
··· 92 92 struct drm_file *filp, 93 93 const struct drm_mode_fb_cmd2 *user_mode_cmd); 94 94 95 + bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier); 95 96 bool intel_fb_uses_dpt(const struct drm_framebuffer *fb); 96 97 97 98 #endif /* __INTEL_FB_H__ */
+1
drivers/gpu/drm/i915/display/intel_fdi.c
··· 12 12 #include "intel_de.h" 13 13 #include "intel_display_types.h" 14 14 #include "intel_fdi.h" 15 + #include "intel_fdi_regs.h" 15 16 16 17 struct intel_fdi_funcs { 17 18 void (*fdi_link_train)(struct intel_crtc *crtc,
+151
drivers/gpu/drm/i915/display/intel_fdi_regs.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_FDI_REGS_H__ 7 + #define __INTEL_FDI_REGS_H__ 8 + 9 + #include "intel_display_reg_defs.h" 10 + 11 + #define FDI_PLL_BIOS_0 _MMIO(0x46000) 12 + #define FDI_PLL_FB_CLOCK_MASK 0xff 13 + #define FDI_PLL_BIOS_1 _MMIO(0x46004) 14 + #define FDI_PLL_BIOS_2 _MMIO(0x46008) 15 + #define DISPLAY_PORT_PLL_BIOS_0 _MMIO(0x4600c) 16 + #define DISPLAY_PORT_PLL_BIOS_1 _MMIO(0x46010) 17 + #define DISPLAY_PORT_PLL_BIOS_2 _MMIO(0x46014) 18 + 19 + #define FDI_PLL_FREQ_CTL _MMIO(0x46030) 20 + #define FDI_PLL_FREQ_CHANGE_REQUEST (1 << 24) 21 + #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 22 + #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff 23 + 24 + #define _FDI_RXA_CHICKEN 0xc200c 25 + #define _FDI_RXB_CHICKEN 0xc2010 26 + #define FDI_RX_PHASE_SYNC_POINTER_OVR (1 << 1) 27 + #define FDI_RX_PHASE_SYNC_POINTER_EN (1 << 0) 28 + #define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) 29 + 30 + /* CPU: FDI_TX */ 31 + #define _FDI_TXA_CTL 0x60100 32 + #define _FDI_TXB_CTL 0x61100 33 + #define FDI_TX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL) 34 + #define FDI_TX_DISABLE (0 << 31) 35 + #define FDI_TX_ENABLE (1 << 31) 36 + #define FDI_LINK_TRAIN_PATTERN_1 (0 << 28) 37 + #define FDI_LINK_TRAIN_PATTERN_2 (1 << 28) 38 + #define FDI_LINK_TRAIN_PATTERN_IDLE (2 << 28) 39 + #define FDI_LINK_TRAIN_NONE (3 << 28) 40 + #define FDI_LINK_TRAIN_VOLTAGE_0_4V (0 << 25) 41 + #define FDI_LINK_TRAIN_VOLTAGE_0_6V (1 << 25) 42 + #define FDI_LINK_TRAIN_VOLTAGE_0_8V (2 << 25) 43 + #define FDI_LINK_TRAIN_VOLTAGE_1_2V (3 << 25) 44 + #define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0 << 22) 45 + #define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1 << 22) 46 + #define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2 << 22) 47 + #define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3 << 22) 48 + /* ILK always use 400mV 0dB for voltage swing and pre-emphasis level. 49 + SNB has different settings. */ 50 + /* SNB A-stepping */ 51 + #define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22) 52 + #define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22) 53 + #define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22) 54 + #define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22) 55 + /* SNB B-stepping */ 56 + #define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0 << 22) 57 + #define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a << 22) 58 + #define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39 << 22) 59 + #define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38 << 22) 60 + #define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f << 22) 61 + #define FDI_DP_PORT_WIDTH_SHIFT 19 62 + #define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT) 63 + #define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT) 64 + #define FDI_TX_ENHANCE_FRAME_ENABLE (1 << 18) 65 + /* Ironlake: hardwired to 1 */ 66 + #define FDI_TX_PLL_ENABLE (1 << 14) 67 + 68 + /* Ivybridge has different bits for lolz */ 69 + #define FDI_LINK_TRAIN_PATTERN_1_IVB (0 << 8) 70 + #define FDI_LINK_TRAIN_PATTERN_2_IVB (1 << 8) 71 + #define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2 << 8) 72 + #define FDI_LINK_TRAIN_NONE_IVB (3 << 8) 73 + 74 + /* both Tx and Rx */ 75 + #define FDI_COMPOSITE_SYNC (1 << 11) 76 + #define FDI_LINK_TRAIN_AUTO (1 << 10) 77 + #define FDI_SCRAMBLING_ENABLE (0 << 7) 78 + #define FDI_SCRAMBLING_DISABLE (1 << 7) 79 + 80 + /* FDI_RX, FDI_X is hard-wired to Transcoder_X */ 81 + #define _FDI_RXA_CTL 0xf000c 82 + #define _FDI_RXB_CTL 0xf100c 83 + #define FDI_RX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) 84 + #define FDI_RX_ENABLE (1 << 31) 85 + /* train, dp width same as FDI_TX */ 86 + #define FDI_FS_ERRC_ENABLE (1 << 27) 87 + #define FDI_FE_ERRC_ENABLE (1 << 26) 88 + #define FDI_RX_POLARITY_REVERSED_LPT (1 << 16) 89 + #define FDI_8BPC (0 << 16) 90 + #define FDI_10BPC (1 << 16) 91 + #define FDI_6BPC (2 << 16) 92 + #define FDI_12BPC (3 << 16) 93 + #define FDI_RX_LINK_REVERSAL_OVERRIDE (1 << 15) 94 + #define FDI_DMI_LINK_REVERSE_MASK (1 << 14) 95 + #define FDI_RX_PLL_ENABLE (1 << 13) 96 + #define FDI_FS_ERR_CORRECT_ENABLE (1 << 11) 97 + #define FDI_FE_ERR_CORRECT_ENABLE (1 << 10) 98 + #define FDI_FS_ERR_REPORT_ENABLE (1 << 9) 99 + #define FDI_FE_ERR_REPORT_ENABLE (1 << 8) 100 + #define FDI_RX_ENHANCE_FRAME_ENABLE (1 << 6) 101 + #define FDI_PCDCLK (1 << 4) 102 + /* CPT */ 103 + #define FDI_AUTO_TRAINING (1 << 10) 104 + #define FDI_LINK_TRAIN_PATTERN_1_CPT (0 << 8) 105 + #define FDI_LINK_TRAIN_PATTERN_2_CPT (1 << 8) 106 + #define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2 << 8) 107 + #define FDI_LINK_TRAIN_NORMAL_CPT (3 << 8) 108 + #define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3 << 8) 109 + 110 + #define _FDI_RXA_MISC 0xf0010 111 + #define _FDI_RXB_MISC 0xf1010 112 + #define FDI_RX_PWRDN_LANE1_MASK (3 << 26) 113 + #define FDI_RX_PWRDN_LANE1_VAL(x) ((x) << 26) 114 + #define FDI_RX_PWRDN_LANE0_MASK (3 << 24) 115 + #define FDI_RX_PWRDN_LANE0_VAL(x) ((x) << 24) 116 + #define FDI_RX_TP1_TO_TP2_48 (2 << 20) 117 + #define FDI_RX_TP1_TO_TP2_64 (3 << 20) 118 + #define FDI_RX_FDI_DELAY_90 (0x90 << 0) 119 + #define FDI_RX_MISC(pipe) _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) 120 + 121 + #define _FDI_RXA_TUSIZE1 0xf0030 122 + #define _FDI_RXA_TUSIZE2 0xf0038 123 + #define _FDI_RXB_TUSIZE1 0xf1030 124 + #define _FDI_RXB_TUSIZE2 0xf1038 125 + #define FDI_RX_TUSIZE1(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) 126 + #define FDI_RX_TUSIZE2(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) 127 + 128 + /* FDI_RX interrupt register format */ 129 + #define FDI_RX_INTER_LANE_ALIGN (1 << 10) 130 + #define FDI_RX_SYMBOL_LOCK (1 << 9) /* train 2 */ 131 + #define FDI_RX_BIT_LOCK (1 << 8) /* train 1 */ 132 + #define FDI_RX_TRAIN_PATTERN_2_FAIL (1 << 7) 133 + #define FDI_RX_FS_CODE_ERR (1 << 6) 134 + #define FDI_RX_FE_CODE_ERR (1 << 5) 135 + #define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1 << 4) 136 + #define FDI_RX_HDCP_LINK_FAIL (1 << 3) 137 + #define FDI_RX_PIXEL_FIFO_OVERFLOW (1 << 2) 138 + #define FDI_RX_CROSS_CLOCK_OVERFLOW (1 << 1) 139 + #define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1 << 0) 140 + 141 + #define _FDI_RXA_IIR 0xf0014 142 + #define _FDI_RXA_IMR 0xf0018 143 + #define _FDI_RXB_IIR 0xf1014 144 + #define _FDI_RXB_IMR 0xf1018 145 + #define FDI_RX_IIR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR) 146 + #define FDI_RX_IMR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR) 147 + 148 + #define FDI_PLL_CTL_1 _MMIO(0xfe000) 149 + #define FDI_PLL_CTL_2 _MMIO(0xfe004) 150 + 151 + #endif /* __INTEL_FDI_REGS_H__ */
+1
drivers/gpu/drm/i915/display/intel_lvds.c
··· 51 51 #include "intel_lvds.h" 52 52 #include "intel_lvds_regs.h" 53 53 #include "intel_panel.h" 54 + #include "intel_pps_regs.h" 54 55 55 56 /* Private structure for the integrated LVDS support */ 56 57 struct intel_lvds_pps {
-1
drivers/gpu/drm/i915/display/intel_modeset_setup.c
··· 100 100 101 101 intel_fbc_disable(crtc); 102 102 intel_update_watermarks(i915); 103 - intel_disable_shared_dpll(crtc_state); 104 103 105 104 intel_display_power_put_all_in_set(i915, &crtc->enabled_power_domains); 106 105
+1
drivers/gpu/drm/i915/display/intel_pch_display.c
··· 9 9 #include "intel_de.h" 10 10 #include "intel_display_types.h" 11 11 #include "intel_fdi.h" 12 + #include "intel_fdi_regs.h" 12 13 #include "intel_lvds.h" 13 14 #include "intel_lvds_regs.h" 14 15 #include "intel_pch_display.h"
+1
drivers/gpu/drm/i915/display/intel_pps.c
··· 15 15 #include "intel_lvds.h" 16 16 #include "intel_lvds_regs.h" 17 17 #include "intel_pps.h" 18 + #include "intel_pps_regs.h" 18 19 #include "intel_quirks.h" 19 20 20 21 static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+78
drivers/gpu/drm/i915/display/intel_pps_regs.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_PPS_REGS_H__ 7 + #define __INTEL_PPS_REGS_H__ 8 + 9 + #include "intel_display_reg_defs.h" 10 + 11 + /* Panel power sequencing */ 12 + #define PPS_BASE 0x61200 13 + #define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE) 14 + #define PCH_PPS_BASE 0xC7200 15 + 16 + #define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->display.pps.mmio_base - \ 17 + PPS_BASE + (reg) + \ 18 + (pps_idx) * 0x100) 19 + 20 + #define _PP_STATUS 0x61200 21 + #define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS) 22 + #define PP_ON REG_BIT(31) 23 + /* 24 + * Indicates that all dependencies of the panel are on: 25 + * 26 + * - PLL enabled 27 + * - pipe enabled 28 + * - LVDS/DVOB/DVOC on 29 + */ 30 + #define PP_READY REG_BIT(30) 31 + #define PP_SEQUENCE_MASK REG_GENMASK(29, 28) 32 + #define PP_SEQUENCE_NONE REG_FIELD_PREP(PP_SEQUENCE_MASK, 0) 33 + #define PP_SEQUENCE_POWER_UP REG_FIELD_PREP(PP_SEQUENCE_MASK, 1) 34 + #define PP_SEQUENCE_POWER_DOWN REG_FIELD_PREP(PP_SEQUENCE_MASK, 2) 35 + #define PP_CYCLE_DELAY_ACTIVE REG_BIT(27) 36 + #define PP_SEQUENCE_STATE_MASK REG_GENMASK(3, 0) 37 + #define PP_SEQUENCE_STATE_OFF_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x0) 38 + #define PP_SEQUENCE_STATE_OFF_S0_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x1) 39 + #define PP_SEQUENCE_STATE_OFF_S0_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x2) 40 + #define PP_SEQUENCE_STATE_OFF_S0_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x3) 41 + #define PP_SEQUENCE_STATE_ON_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x8) 42 + #define PP_SEQUENCE_STATE_ON_S1_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x9) 43 + #define PP_SEQUENCE_STATE_ON_S1_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xa) 44 + #define PP_SEQUENCE_STATE_ON_S1_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xb) 45 + #define PP_SEQUENCE_STATE_RESET REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xf) 46 + 47 + #define _PP_CONTROL 0x61204 48 + #define PP_CONTROL(pps_idx) _MMIO_PPS(pps_idx, _PP_CONTROL) 49 + #define PANEL_UNLOCK_MASK REG_GENMASK(31, 16) 50 + #define PANEL_UNLOCK_REGS REG_FIELD_PREP(PANEL_UNLOCK_MASK, 0xabcd) 51 + #define BXT_POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4) 52 + #define EDP_FORCE_VDD REG_BIT(3) 53 + #define EDP_BLC_ENABLE REG_BIT(2) 54 + #define PANEL_POWER_RESET REG_BIT(1) 55 + #define PANEL_POWER_ON REG_BIT(0) 56 + 57 + #define _PP_ON_DELAYS 0x61208 58 + #define PP_ON_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_ON_DELAYS) 59 + #define PANEL_PORT_SELECT_MASK REG_GENMASK(31, 30) 60 + #define PANEL_PORT_SELECT_LVDS REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 0) 61 + #define PANEL_PORT_SELECT_DPA REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 1) 62 + #define PANEL_PORT_SELECT_DPC REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 2) 63 + #define PANEL_PORT_SELECT_DPD REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 3) 64 + #define PANEL_PORT_SELECT_VLV(port) REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, port) 65 + #define PANEL_POWER_UP_DELAY_MASK REG_GENMASK(28, 16) 66 + #define PANEL_LIGHT_ON_DELAY_MASK REG_GENMASK(12, 0) 67 + 68 + #define _PP_OFF_DELAYS 0x6120C 69 + #define PP_OFF_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_OFF_DELAYS) 70 + #define PANEL_POWER_DOWN_DELAY_MASK REG_GENMASK(28, 16) 71 + #define PANEL_LIGHT_OFF_DELAY_MASK REG_GENMASK(12, 0) 72 + 73 + #define _PP_DIVISOR 0x61210 74 + #define PP_DIVISOR(pps_idx) _MMIO_PPS(pps_idx, _PP_DIVISOR) 75 + #define PP_REFERENCE_DIVIDER_MASK REG_GENMASK(31, 8) 76 + #define PANEL_POWER_CYCLE_DELAY_MASK REG_GENMASK(4, 0) 77 + 78 + #endif /* __INTEL_PPS_REGS_H__ */
+72 -19
drivers/gpu/drm/i915/display/intel_psr.c
··· 34 34 #include "intel_dp_aux.h" 35 35 #include "intel_hdmi.h" 36 36 #include "intel_psr.h" 37 + #include "intel_psr_regs.h" 37 38 #include "intel_snps_phy.h" 38 39 #include "skl_universal_plane.h" 39 40 ··· 520 519 return val; 521 520 } 522 521 522 + static int psr2_block_count_lines(struct intel_dp *intel_dp) 523 + { 524 + return intel_dp->psr.io_wake_lines < 9 && 525 + intel_dp->psr.fast_wake_lines < 9 ? 8 : 12; 526 + } 527 + 528 + static int psr2_block_count(struct intel_dp *intel_dp) 529 + { 530 + return psr2_block_count_lines(intel_dp) / 4; 531 + } 532 + 523 533 static void hsw_activate_psr2(struct intel_dp *intel_dp) 524 534 { 525 535 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); ··· 548 536 val |= intel_psr2_get_tp_time(intel_dp); 549 537 550 538 if (DISPLAY_VER(dev_priv) >= 12) { 551 - if (intel_dp->psr.io_wake_lines < 9 && 552 - intel_dp->psr.fast_wake_lines < 9) 553 - val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; 554 - else 539 + if (psr2_block_count(intel_dp) > 2) 555 540 val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3; 541 + else 542 + val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; 556 543 } 557 544 558 545 /* Wa_22012278275:adl-p */ ··· 969 958 return false; 970 959 } 971 960 961 + /* Vblank >= PSR2_CTL Block Count Number maximum line count */ 962 + if (crtc_state->hw.adjusted_mode.crtc_vblank_end - 963 + crtc_state->hw.adjusted_mode.crtc_vblank_start < 964 + psr2_block_count_lines(intel_dp)) { 965 + drm_dbg_kms(&dev_priv->drm, 966 + "PSR2 not enabled, too short vblank time\n"); 967 + return false; 968 + } 969 + 972 970 if (HAS_PSR2_SEL_FETCH(dev_priv)) { 973 971 if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && 974 972 !HAS_PSR_HW_TRACKING(dev_priv)) { ··· 1154 1134 } 1155 1135 } 1156 1136 1137 + /* 1138 + * Wa_16013835468 1139 + * Wa_14015648006 1140 + */ 1141 + static void wm_optimization_wa(struct intel_dp *intel_dp, 1142 + const struct intel_crtc_state *crtc_state) 1143 + { 1144 + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1145 + bool set_wa_bit = false; 1146 + 1147 + /* Wa_14015648006 */ 1148 + if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 1149 + IS_DISPLAY_VER(dev_priv, 11, 13)) 1150 + set_wa_bit |= crtc_state->wm_level_disabled; 1151 + 1152 + /* Wa_16013835468 */ 1153 + if (DISPLAY_VER(dev_priv) == 12) 1154 + set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start != 1155 + crtc_state->hw.adjusted_mode.crtc_vdisplay; 1156 + 1157 + if (set_wa_bit) 1158 + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 1159 + 0, wa_16013835468_bit_get(intel_dp)); 1160 + else 1161 + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 1162 + wa_16013835468_bit_get(intel_dp), 0); 1163 + } 1164 + 1157 1165 static void intel_psr_enable_source(struct intel_dp *intel_dp, 1158 1166 const struct intel_crtc_state *crtc_state) 1159 1167 { ··· 1225 1177 * Wa_16013835468 1226 1178 * Wa_14015648006 1227 1179 */ 1228 - if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 1229 - IS_DISPLAY_VER(dev_priv, 12, 13)) { 1230 - if (crtc_state->hw.adjusted_mode.crtc_vblank_start != 1231 - crtc_state->hw.adjusted_mode.crtc_vdisplay) 1232 - intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, 1233 - wa_16013835468_bit_get(intel_dp)); 1234 - } 1180 + wm_optimization_wa(intel_dp, crtc_state); 1235 1181 1236 1182 if (intel_dp->psr.psr2_enabled) { 1237 1183 if (DISPLAY_VER(dev_priv) == 9) ··· 1403 1361 * Wa_16013835468 1404 1362 * Wa_14015648006 1405 1363 */ 1406 - if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 1407 - IS_DISPLAY_VER(dev_priv, 12, 13)) 1364 + if (DISPLAY_VER(dev_priv) >= 11) 1408 1365 intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 1409 1366 wa_16013835468_bit_get(intel_dp), 0); 1410 1367 ··· 1969 1928 * - PSR disabled in new state 1970 1929 * - All planes will go inactive 1971 1930 * - Changing between PSR versions 1931 + * - Display WA #1136: skl, bxt 1972 1932 */ 1973 1933 needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state); 1974 1934 needs_to_disable |= !new_crtc_state->has_psr; 1975 1935 needs_to_disable |= !new_crtc_state->active_planes; 1976 1936 needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled; 1937 + needs_to_disable |= DISPLAY_VER(i915) < 11 && 1938 + new_crtc_state->wm_level_disabled; 1977 1939 1978 1940 if (psr->enabled && needs_to_disable) 1979 1941 intel_psr_disable_locked(intel_dp); 1942 + else if (psr->enabled && new_crtc_state->wm_level_disabled) 1943 + /* Wa_14015648006 */ 1944 + wm_optimization_wa(intel_dp, new_crtc_state); 1980 1945 1981 1946 mutex_unlock(&psr->lock); 1982 1947 } ··· 2001 1954 crtc_state->uapi.encoder_mask) { 2002 1955 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2003 1956 struct intel_psr *psr = &intel_dp->psr; 1957 + bool keep_disabled = false; 2004 1958 2005 1959 mutex_lock(&psr->lock); 2006 1960 2007 - if (psr->sink_not_reliable) 2008 - goto exit; 2009 - 2010 1961 drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes); 2011 1962 2012 - /* Only enable if there is active planes */ 2013 - if (!psr->enabled && crtc_state->active_planes) 1963 + keep_disabled |= psr->sink_not_reliable; 1964 + keep_disabled |= !crtc_state->active_planes; 1965 + 1966 + /* Display WA #1136: skl, bxt */ 1967 + keep_disabled |= DISPLAY_VER(dev_priv) < 11 && 1968 + crtc_state->wm_level_disabled; 1969 + 1970 + if (!psr->enabled && !keep_disabled) 2014 1971 intel_psr_enable_locked(intel_dp, crtc_state); 1972 + else if (psr->enabled && !crtc_state->wm_level_disabled) 1973 + /* Wa_14015648006 */ 1974 + wm_optimization_wa(intel_dp, crtc_state); 2015 1975 2016 1976 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ 2017 1977 if (crtc_state->crc_enabled && psr->enabled) 2018 1978 psr_force_hw_tracking_exit(intel_dp); 2019 1979 2020 - exit: 2021 1980 mutex_unlock(&psr->lock); 2022 1981 } 2023 1982 }
+260
drivers/gpu/drm/i915/display/intel_psr_regs.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_PSR_REGS_H__ 7 + #define __INTEL_PSR_REGS_H__ 8 + 9 + #include "intel_display_reg_defs.h" 10 + 11 + #define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A) 12 + #define EXITLINE_ENABLE REG_BIT(31) 13 + #define EXITLINE_MASK REG_GENMASK(12, 0) 14 + #define EXITLINE_SHIFT 0 15 + 16 + /* 17 + * HSW+ eDP PSR registers 18 + * 19 + * HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one 20 + * instance of it 21 + */ 22 + #define _SRD_CTL_A 0x60800 23 + #define _SRD_CTL_EDP 0x6f800 24 + #define EDP_PSR_CTL(tran) _MMIO_TRANS2(tran, _SRD_CTL_A) 25 + #define EDP_PSR_ENABLE (1 << 31) 26 + #define BDW_PSR_SINGLE_FRAME (1 << 30) 27 + #define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */ 28 + #define EDP_PSR_LINK_STANDBY (1 << 27) 29 + #define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3 << 25) 30 + #define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0 << 25) 31 + #define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1 << 25) 32 + #define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2 << 25) 33 + #define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3 << 25) 34 + #define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20 35 + #define EDP_PSR_SKIP_AUX_EXIT (1 << 12) 36 + #define EDP_PSR_TP1_TP2_SEL (0 << 11) 37 + #define EDP_PSR_TP1_TP3_SEL (1 << 11) 38 + #define EDP_PSR_CRC_ENABLE (1 << 10) /* BDW+ */ 39 + #define EDP_PSR_TP2_TP3_TIME_500us (0 << 8) 40 + #define EDP_PSR_TP2_TP3_TIME_100us (1 << 8) 41 + #define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8) 42 + #define EDP_PSR_TP2_TP3_TIME_0us (3 << 8) 43 + #define EDP_PSR_TP4_TIME_0US (3 << 6) /* ICL+ */ 44 + #define EDP_PSR_TP1_TIME_500us (0 << 4) 45 + #define EDP_PSR_TP1_TIME_100us (1 << 4) 46 + #define EDP_PSR_TP1_TIME_2500us (2 << 4) 47 + #define EDP_PSR_TP1_TIME_0us (3 << 4) 48 + #define EDP_PSR_IDLE_FRAME_SHIFT 0 49 + 50 + /* 51 + * Until TGL, IMR/IIR are fixed at 0x648xx. On TGL+ those registers are relative 52 + * to transcoder and bits defined for each one as if using no shift (i.e. as if 53 + * it was for TRANSCODER_EDP) 54 + */ 55 + #define EDP_PSR_IMR _MMIO(0x64834) 56 + #define EDP_PSR_IIR _MMIO(0x64838) 57 + #define _PSR_IMR_A 0x60814 58 + #define _PSR_IIR_A 0x60818 59 + #define TRANS_PSR_IMR(tran) _MMIO_TRANS2(tran, _PSR_IMR_A) 60 + #define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A) 61 + #define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \ 62 + 0 : ((trans) - TRANSCODER_A + 1) * 8) 63 + #define TGL_PSR_MASK REG_GENMASK(2, 0) 64 + #define TGL_PSR_ERROR REG_BIT(2) 65 + #define TGL_PSR_POST_EXIT REG_BIT(1) 66 + #define TGL_PSR_PRE_ENTRY REG_BIT(0) 67 + #define EDP_PSR_MASK(trans) (TGL_PSR_MASK << \ 68 + _EDP_PSR_TRANS_SHIFT(trans)) 69 + #define EDP_PSR_ERROR(trans) (TGL_PSR_ERROR << \ 70 + _EDP_PSR_TRANS_SHIFT(trans)) 71 + #define EDP_PSR_POST_EXIT(trans) (TGL_PSR_POST_EXIT << \ 72 + _EDP_PSR_TRANS_SHIFT(trans)) 73 + #define EDP_PSR_PRE_ENTRY(trans) (TGL_PSR_PRE_ENTRY << \ 74 + _EDP_PSR_TRANS_SHIFT(trans)) 75 + 76 + #define _SRD_AUX_DATA_A 0x60814 77 + #define _SRD_AUX_DATA_EDP 0x6f814 78 + #define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) + 4) /* 5 registers */ 79 + 80 + #define _SRD_STATUS_A 0x60840 81 + #define _SRD_STATUS_EDP 0x6f840 82 + #define EDP_PSR_STATUS(tran) _MMIO_TRANS2(tran, _SRD_STATUS_A) 83 + #define EDP_PSR_STATUS_STATE_MASK (7 << 29) 84 + #define EDP_PSR_STATUS_STATE_SHIFT 29 85 + #define EDP_PSR_STATUS_STATE_IDLE (0 << 29) 86 + #define EDP_PSR_STATUS_STATE_SRDONACK (1 << 29) 87 + #define EDP_PSR_STATUS_STATE_SRDENT (2 << 29) 88 + #define EDP_PSR_STATUS_STATE_BUFOFF (3 << 29) 89 + #define EDP_PSR_STATUS_STATE_BUFON (4 << 29) 90 + #define EDP_PSR_STATUS_STATE_AUXACK (5 << 29) 91 + #define EDP_PSR_STATUS_STATE_SRDOFFACK (6 << 29) 92 + #define EDP_PSR_STATUS_LINK_MASK (3 << 26) 93 + #define EDP_PSR_STATUS_LINK_FULL_OFF (0 << 26) 94 + #define EDP_PSR_STATUS_LINK_FULL_ON (1 << 26) 95 + #define EDP_PSR_STATUS_LINK_STANDBY (2 << 26) 96 + #define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20 97 + #define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f 98 + #define EDP_PSR_STATUS_COUNT_SHIFT 16 99 + #define EDP_PSR_STATUS_COUNT_MASK 0xf 100 + #define EDP_PSR_STATUS_AUX_ERROR (1 << 15) 101 + #define EDP_PSR_STATUS_AUX_SENDING (1 << 12) 102 + #define EDP_PSR_STATUS_SENDING_IDLE (1 << 9) 103 + #define EDP_PSR_STATUS_SENDING_TP2_TP3 (1 << 8) 104 + #define EDP_PSR_STATUS_SENDING_TP1 (1 << 4) 105 + #define EDP_PSR_STATUS_IDLE_MASK 0xf 106 + 107 + #define _SRD_PERF_CNT_A 0x60844 108 + #define _SRD_PERF_CNT_EDP 0x6f844 109 + #define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(tran, _SRD_PERF_CNT_A) 110 + #define EDP_PSR_PERF_CNT_MASK 0xffffff 111 + 112 + /* PSR_MASK on SKL+ */ 113 + #define _SRD_DEBUG_A 0x60860 114 + #define _SRD_DEBUG_EDP 0x6f860 115 + #define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(tran, _SRD_DEBUG_A) 116 + #define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28) 117 + #define EDP_PSR_DEBUG_MASK_LPSP (1 << 27) 118 + #define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26) 119 + #define EDP_PSR_DEBUG_MASK_HPD (1 << 25) 120 + #define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */ 121 + #define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */ 122 + 123 + #define _PSR2_CTL_A 0x60900 124 + #define _PSR2_CTL_EDP 0x6f900 125 + #define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A) 126 + #define EDP_PSR2_ENABLE (1 << 31) 127 + #define EDP_SU_TRACK_ENABLE (1 << 30) /* up to adl-p */ 128 + #define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 (0 << 28) 129 + #define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 (1 << 28) 130 + #define EDP_Y_COORDINATE_ENABLE REG_BIT(25) /* display 10, 11 and 12 */ 131 + #define EDP_PSR2_SU_SDP_SCANLINE REG_BIT(25) /* display 13+ */ 132 + #define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20) 133 + #define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20) 134 + #define EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES 8 135 + #define EDP_PSR2_IO_BUFFER_WAKE(lines) ((EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines)) << 13) 136 + #define EDP_PSR2_IO_BUFFER_WAKE_MASK (3 << 13) 137 + #define TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES 5 138 + #define TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT 13 139 + #define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT) 140 + #define TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK (7 << 13) 141 + #define EDP_PSR2_FAST_WAKE_MAX_LINES 8 142 + #define EDP_PSR2_FAST_WAKE(lines) ((EDP_PSR2_FAST_WAKE_MAX_LINES - (lines)) << 11) 143 + #define EDP_PSR2_FAST_WAKE_MASK (3 << 11) 144 + #define TGL_EDP_PSR2_FAST_WAKE_MIN_LINES 5 145 + #define TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT 10 146 + #define TGL_EDP_PSR2_FAST_WAKE(lines) (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT) 147 + #define TGL_EDP_PSR2_FAST_WAKE_MASK (7 << 10) 148 + #define EDP_PSR2_TP2_TIME_500us (0 << 8) 149 + #define EDP_PSR2_TP2_TIME_100us (1 << 8) 150 + #define EDP_PSR2_TP2_TIME_2500us (2 << 8) 151 + #define EDP_PSR2_TP2_TIME_50us (3 << 8) 152 + #define EDP_PSR2_TP2_TIME_MASK (3 << 8) 153 + #define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4 154 + #define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4) 155 + #define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4) 156 + #define EDP_PSR2_IDLE_FRAME_MASK 0xf 157 + #define EDP_PSR2_IDLE_FRAME_SHIFT 0 158 + 159 + #define _PSR_EVENT_TRANS_A 0x60848 160 + #define _PSR_EVENT_TRANS_B 0x61848 161 + #define _PSR_EVENT_TRANS_C 0x62848 162 + #define _PSR_EVENT_TRANS_D 0x63848 163 + #define _PSR_EVENT_TRANS_EDP 0x6f848 164 + #define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A) 165 + #define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17) 166 + #define PSR_EVENT_PSR2_DISABLED (1 << 16) 167 + #define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15) 168 + #define PSR_EVENT_SU_CRC_FIFO_UNDERRUN (1 << 14) 169 + #define PSR_EVENT_GRAPHICS_RESET (1 << 12) 170 + #define PSR_EVENT_PCH_INTERRUPT (1 << 11) 171 + #define PSR_EVENT_MEMORY_UP (1 << 10) 172 + #define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9) 173 + #define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8) 174 + #define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6) 175 + #define PSR_EVENT_REGISTER_UPDATE (1 << 5) /* Reserved in ICL+ */ 176 + #define PSR_EVENT_HDCP_ENABLE (1 << 4) 177 + #define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3) 178 + #define PSR_EVENT_VBI_ENABLE (1 << 2) 179 + #define PSR_EVENT_LPSP_MODE_EXIT (1 << 1) 180 + #define PSR_EVENT_PSR_DISABLE (1 << 0) 181 + 182 + #define _PSR2_STATUS_A 0x60940 183 + #define _PSR2_STATUS_EDP 0x6f940 184 + #define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) 185 + #define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28) 186 + #define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8) 187 + 188 + #define _PSR2_SU_STATUS_A 0x60914 189 + #define _PSR2_SU_STATUS_EDP 0x6f914 190 + #define _PSR2_SU_STATUS(tran, index) _MMIO_TRANS2(tran, _PSR2_SU_STATUS_A + (index) * 4) 191 + #define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3)) 192 + #define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10) 193 + #define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame)) 194 + #define PSR2_SU_STATUS_FRAMES 8 195 + 196 + #define _PSR2_MAN_TRK_CTL_A 0x60910 197 + #define _PSR2_MAN_TRK_CTL_EDP 0x6f910 198 + #define PSR2_MAN_TRK_CTL(tran) _MMIO_TRANS2(tran, _PSR2_MAN_TRK_CTL_A) 199 + #define PSR2_MAN_TRK_CTL_ENABLE REG_BIT(31) 200 + #define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK REG_GENMASK(30, 21) 201 + #define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val) 202 + #define PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK REG_GENMASK(20, 11) 203 + #define PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val) REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val) 204 + #define PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(3) 205 + #define PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(2) 206 + #define PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE REG_BIT(1) 207 + #define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK REG_GENMASK(28, 16) 208 + #define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val) 209 + #define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK REG_GENMASK(12, 0) 210 + #define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val) 211 + #define ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE REG_BIT(31) 212 + #define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14) 213 + #define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13) 214 + 215 + #define _SEL_FETCH_PLANE_BASE_1_A 0x70890 216 + #define _SEL_FETCH_PLANE_BASE_2_A 0x708B0 217 + #define _SEL_FETCH_PLANE_BASE_3_A 0x708D0 218 + #define _SEL_FETCH_PLANE_BASE_4_A 0x708F0 219 + #define _SEL_FETCH_PLANE_BASE_5_A 0x70920 220 + #define _SEL_FETCH_PLANE_BASE_6_A 0x70940 221 + #define _SEL_FETCH_PLANE_BASE_7_A 0x70960 222 + #define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880 223 + #define _SEL_FETCH_PLANE_BASE_1_B 0x71890 224 + 225 + #define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \ 226 + _SEL_FETCH_PLANE_BASE_1_A, \ 227 + _SEL_FETCH_PLANE_BASE_2_A, \ 228 + _SEL_FETCH_PLANE_BASE_3_A, \ 229 + _SEL_FETCH_PLANE_BASE_4_A, \ 230 + _SEL_FETCH_PLANE_BASE_5_A, \ 231 + _SEL_FETCH_PLANE_BASE_6_A, \ 232 + _SEL_FETCH_PLANE_BASE_7_A, \ 233 + _SEL_FETCH_PLANE_BASE_CUR_A) 234 + #define _SEL_FETCH_PLANE_BASE_1(pipe) _PIPE(pipe, _SEL_FETCH_PLANE_BASE_1_A, _SEL_FETCH_PLANE_BASE_1_B) 235 + #define _SEL_FETCH_PLANE_BASE(pipe, plane) (_SEL_FETCH_PLANE_BASE_1(pipe) - \ 236 + _SEL_FETCH_PLANE_BASE_1_A + \ 237 + _SEL_FETCH_PLANE_BASE_A(plane)) 238 + 239 + #define _SEL_FETCH_PLANE_CTL_1_A 0x70890 240 + #define PLANE_SEL_FETCH_CTL(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \ 241 + _SEL_FETCH_PLANE_CTL_1_A - \ 242 + _SEL_FETCH_PLANE_BASE_1_A) 243 + #define PLANE_SEL_FETCH_CTL_ENABLE REG_BIT(31) 244 + 245 + #define _SEL_FETCH_PLANE_POS_1_A 0x70894 246 + #define PLANE_SEL_FETCH_POS(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \ 247 + _SEL_FETCH_PLANE_POS_1_A - \ 248 + _SEL_FETCH_PLANE_BASE_1_A) 249 + 250 + #define _SEL_FETCH_PLANE_SIZE_1_A 0x70898 251 + #define PLANE_SEL_FETCH_SIZE(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \ 252 + _SEL_FETCH_PLANE_SIZE_1_A - \ 253 + _SEL_FETCH_PLANE_BASE_1_A) 254 + 255 + #define _SEL_FETCH_PLANE_OFFSET_1_A 0x7089C 256 + #define PLANE_SEL_FETCH_OFFSET(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \ 257 + _SEL_FETCH_PLANE_OFFSET_1_A - \ 258 + _SEL_FETCH_PLANE_BASE_1_A) 259 + 260 + #endif /* __INTEL_PSR_REGS_H__ */
+176 -11
drivers/gpu/drm/i915/display/intel_qp_tables.c
··· 17 17 /* from BPP 6 to 36 in steps of 0.5 */ 18 18 #define RC_RANGE_QP444_12BPC_MAX_NUM_BPP 61 19 19 20 + /* from BPP 6 to 24 in steps of 0.5 */ 21 + #define RC_RANGE_QP420_8BPC_MAX_NUM_BPP 17 22 + 23 + /* from BPP 6 to 30 in steps of 0.5 */ 24 + #define RC_RANGE_QP420_10BPC_MAX_NUM_BPP 23 25 + 26 + /* from BPP 6 to 36 in steps of 0.5 */ 27 + #define RC_RANGE_QP420_12BPC_MAX_NUM_BPP 29 28 + 20 29 /* 21 30 * These qp tables are as per the C model 22 31 * and it has the rows pointing to bpps which increment ··· 292 283 11, 11, 10, 10, 10, 10, 10, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 6, 6, 5, 5, 4 } 293 284 }; 294 285 295 - #define PARAM_TABLE(_minmax, _bpc, _row, _col) do { \ 296 - if (bpc == (_bpc)) \ 297 - return rc_range_##_minmax##qp444_##_bpc##bpc[_row][_col]; \ 286 + static const u8 rc_range_minqp420_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_8BPC_MAX_NUM_BPP] = { 287 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 288 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 289 + { 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 290 + { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 291 + { 3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 }, 292 + { 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0 }, 293 + { 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0 }, 294 + { 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 0 }, 295 + { 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 0 }, 296 + { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1 }, 297 + { 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, 1, 1 }, 298 + { 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 2, 2, 1 }, 299 + { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 3, 3, 2, 1 }, 300 + { 9, 8, 8, 7, 7, 7, 7, 7, 7, 6, 5, 5, 4, 3, 3, 3, 2 }, 301 + { 13, 12, 12, 11, 10, 10, 9, 8, 8, 7, 6, 6, 5, 5, 4, 4, 3 } 302 + }; 303 + 304 + static const u8 rc_range_maxqp420_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_8BPC_MAX_NUM_BPP] = { 305 + { 4, 4, 3, 3, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 306 + { 4, 4, 4, 4, 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0 }, 307 + { 5, 5, 5, 5, 5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0 }, 308 + { 6, 6, 6, 6, 6, 5, 4, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0 }, 309 + { 7, 7, 7, 7, 7, 5, 4, 3, 2, 2, 2, 2, 2, 1, 1, 1, 0 }, 310 + { 7, 7, 7, 7, 7, 6, 5, 4, 3, 3, 3, 2, 2, 2, 1, 1, 0 }, 311 + { 7, 7, 7, 7, 7, 6, 5, 4, 3, 3, 3, 3, 2, 2, 2, 1, 1 }, 312 + { 8, 8, 8, 8, 8, 7, 6, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1 }, 313 + { 9, 9, 9, 8, 8, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1 }, 314 + { 10, 10, 9, 9, 9, 8, 7, 6, 5, 5, 5, 4, 4, 3, 3, 2, 2 }, 315 + { 10, 10, 10, 9, 9, 8, 8, 7, 6, 6, 5, 5, 4, 4, 3, 2, 2 }, 316 + { 11, 11, 10, 10, 9, 9, 8, 7, 7, 6, 6, 5, 5, 4, 3, 3, 2 }, 317 + { 11, 11, 11, 10, 9, 9, 9, 8, 7, 7, 6, 5, 5, 4, 4, 3, 2 }, 318 + { 13, 12, 12, 11, 10, 10, 9, 8, 8, 7, 6, 6, 5, 4, 4, 4, 3 }, 319 + { 14, 13, 13, 12, 11, 11, 10, 9, 9, 8, 7, 7, 6, 6, 5, 5, 4 } 320 + }; 321 + 322 + static const u8 rc_range_minqp420_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_10BPC_MAX_NUM_BPP] = { 323 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 324 + { 4, 4, 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 325 + { 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, 326 + { 5, 5, 5, 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0 }, 327 + { 7, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0 }, 328 + { 7, 7, 7, 7, 7, 6, 5, 5, 5, 5, 5, 4, 3, 3, 2, 2, 1, 1, 1, 1, 1, 0, 0 }, 329 + { 7, 7, 7, 7, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 3, 2, 2, 2, 2, 1, 1, 1, 0 }, 330 + { 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 5, 4, 4, 4, 3, 2, 2, 2, 1, 1, 1, 0 }, 331 + { 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 2, 1, 1 }, 332 + { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1 }, 333 + { 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1 }, 334 + { 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 7, 6, 6, 5, 4, 4, 3, 3, 2, 1 }, 335 + { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 8, 7, 7, 6, 5, 4, 4, 3, 3, 2, 1 }, 336 + { 13, 12, 12, 11, 11, 11, 11, 11, 11, 10, 9, 9, 8, 7, 7, 6, 5, 5, 4, 3, 3, 337 + 2, 2 }, 338 + { 17, 16, 16, 15, 14, 14, 13, 12, 12, 11, 10, 10, 10, 9, 8, 8, 7, 6, 6, 5, 339 + 5, 4, 4 } 340 + }; 341 + 342 + static const u8 rc_range_maxqp420_10bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_10BPC_MAX_NUM_BPP] = { 343 + { 8, 8, 7, 6, 4, 4, 3, 3, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, 344 + { 8, 8, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 }, 345 + { 9, 9, 9, 8, 8, 7, 6, 5, 4, 3, 3, 3, 3, 3, 2, 1, 1, 1, 0, 0, 0, 0, 0 }, 346 + { 10, 10, 10, 9, 9, 8, 7, 6, 5, 4, 4, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 0, 347 + 0 }, 348 + { 11, 11, 11, 10, 10, 8, 7, 6, 5, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 1, 349 + 0 }, 350 + { 11, 11, 11, 10, 10, 9, 8, 7, 6, 6, 6, 5, 4, 4, 3, 3, 2, 2, 2, 2, 2, 1, 351 + 1 }, 352 + { 11, 11, 11, 11, 11, 10, 9, 8, 7, 7, 7, 6, 5, 5, 4, 3, 3, 3, 3, 2, 2, 2, 353 + 1 }, 354 + { 12, 12, 12, 12, 12, 11, 10, 9, 8, 8, 8, 7, 6, 5, 5, 4, 3, 3, 3, 2, 2, 355 + 2, 1 }, 356 + { 13, 13, 13, 12, 12, 11, 10, 10, 9, 9, 8, 8, 7, 7, 6, 5, 4, 4, 3, 3, 3, 357 + 2, 2 }, 358 + { 14, 14, 13, 13, 13, 12, 11, 10, 9, 9, 9, 8, 8, 7, 7, 6, 5, 4, 4, 3, 3, 359 + 2, 2 }, 360 + { 14, 14, 14, 13, 13, 12, 12, 11, 10, 10, 9, 9, 8, 8, 7, 6, 5, 5, 4, 4, 361 + 3, 3, 2 }, 362 + { 15, 15, 14, 14, 13, 13, 12, 11, 11, 10, 10, 9, 9, 8, 7, 7, 6, 5, 5, 4, 363 + 4, 3, 2 }, 364 + { 15, 15, 15, 14, 13, 13, 13, 12, 11, 11, 10, 9, 9, 8, 8, 7, 6, 5, 5, 4, 365 + 4, 3, 2 }, 366 + { 17, 16, 16, 15, 14, 14, 13, 12, 12, 11, 10, 10, 9, 8, 8, 7, 6, 6, 5, 4, 367 + 4, 3, 3 }, 368 + { 18, 17, 17, 16, 15, 15, 14, 13, 13, 12, 11, 11, 11, 10, 9, 9, 8, 7, 7, 369 + 6, 6, 5, 5 } 370 + }; 371 + 372 + static const u8 rc_range_minqp420_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_12BPC_MAX_NUM_BPP] = { 373 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 374 + 0, 0, 0, 0, 0 }, 375 + { 4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 376 + 0, 0, 0, 0, 0 }, 377 + { 9, 8, 8, 7, 7, 6, 5, 5, 4, 4, 4, 4, 3, 3, 3, 2, 2, 1, 0, 0, 0, 0, 0, 0, 378 + 0, 0, 0, 0, 0 }, 379 + { 10, 9, 9, 8, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 3, 2, 2, 1, 1, 1, 0, 0, 0, 380 + 0, 0, 0, 0, 0 }, 381 + { 11, 10, 10, 10, 10, 9, 9, 8, 7, 6, 6, 6, 6, 5, 5, 4, 3, 3, 3, 2, 2, 1, 382 + 0, 0, 0, 0, 0, 0, 0 }, 383 + { 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 9, 8, 7, 6, 5, 5, 4, 4, 3, 3, 3, 2, 384 + 1, 1, 0, 0, 0, 0, 0 }, 385 + { 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 9, 8, 8, 7, 6, 5, 5, 5, 5, 4, 3, 3, 386 + 2, 1, 1, 1, 1, 1, 0 }, 387 + { 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 9, 8, 8, 8, 7, 6, 6, 5, 4, 4, 388 + 3, 2, 2, 1, 1, 1, 1, 1 }, 389 + { 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, 6, 5, 390 + 5, 4, 4, 2, 2, 1, 1, 1, 1 }, 391 + { 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, 6, 392 + 5, 4, 4, 3, 2, 2, 1, 1, 1 }, 393 + { 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, 394 + 6, 5, 4, 3, 3, 2, 2, 1, 1 }, 395 + { 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 11, 10, 10, 9, 8, 8, 396 + 7, 7, 6, 5, 4, 3, 3, 2, 2, 1 }, 397 + { 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 11, 11, 10, 9, 8, 8, 398 + 7, 7, 6, 5, 4, 4, 3, 2, 2, 1 }, 399 + { 15, 15, 15, 15, 15, 15, 15, 15, 15, 14, 13, 13, 12, 11, 11, 10, 9, 9, 8, 400 + 8, 7, 6, 6, 5, 4, 4, 3, 3, 2 }, 401 + { 21, 20, 20, 19, 18, 18, 17, 16, 16, 15, 14, 14, 14, 13, 12, 12, 11, 10, 402 + 10, 10, 9, 8, 8, 7, 6, 6, 5, 5, 4 } 403 + }; 404 + 405 + static const u8 rc_range_maxqp420_12bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP420_12BPC_MAX_NUM_BPP] = { 406 + { 11, 10, 9, 8, 6, 6, 5, 5, 4, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 407 + 0, 0, 0, 0, 0, 0 }, 408 + { 12, 11, 11, 10, 9, 8, 7, 7, 6, 6, 5, 5, 4, 3, 3, 2, 1, 1, 1, 1, 1, 1, 409 + 1, 0, 0, 0, 0, 0, 0 }, 410 + { 13, 12, 12, 11, 11, 10, 9, 8, 7, 6, 6, 6, 5, 5, 4, 3, 3, 2, 1, 1, 1, 1, 411 + 1, 0, 0, 0, 0, 0, 0 }, 412 + { 14, 13, 13, 12, 12, 11, 10, 9, 8, 7, 7, 6, 6, 5, 5, 4, 3, 3, 2, 2, 2, 1, 413 + 1, 1, 0, 0, 0, 0, 0 }, 414 + { 15, 14, 14, 13, 13, 11, 10, 9, 8, 7, 7, 7, 7, 6, 6, 5, 4, 4, 4, 3, 3, 2, 415 + 1, 1, 1, 0, 0, 0, 0 }, 416 + { 15, 15, 15, 14, 14, 13, 12, 11, 10, 10, 10, 9, 8, 7, 6, 6, 5, 5, 4, 4, 417 + 4, 3, 2, 2, 1, 1, 0, 0, 0 }, 418 + { 15, 15, 15, 15, 15, 14, 13, 12, 11, 11, 11, 10, 9, 8, 7, 6, 6, 6, 6, 5, 419 + 4, 4, 3, 2, 2, 2, 1, 1, 0 }, 420 + { 16, 16, 16, 16, 16, 15, 14, 13, 12, 12, 12, 11, 10, 9, 9, 8, 7, 7, 6, 5, 421 + 5, 4, 3, 3, 2, 2, 2, 1, 1 }, 422 + { 17, 17, 17, 16, 16, 15, 14, 14, 13, 13, 12, 12, 11, 11, 10, 9, 8, 8, 7, 423 + 6, 6, 5, 5, 3, 3, 2, 2, 1, 1 }, 424 + { 18, 18, 17, 17, 17, 16, 15, 14, 13, 13, 13, 12, 12, 11, 11, 10, 9, 8, 8, 425 + 7, 6, 5, 5, 4, 3, 3, 2, 2, 1 }, 426 + { 18, 18, 18, 17, 17, 16, 16, 15, 14, 14, 13, 13, 12, 12, 11, 10, 9, 9, 8, 427 + 8, 7, 6, 5, 4, 4, 3, 3, 2, 2 }, 428 + { 19, 19, 18, 18, 17, 17, 16, 15, 15, 14, 14, 13, 13, 12, 11, 11, 10, 9, 429 + 9, 8, 8, 7, 6, 5, 4, 4, 3, 3, 2 }, 430 + { 19, 19, 19, 18, 17, 17, 17, 16, 15, 15, 14, 13, 13, 12, 12, 11, 10, 9, 431 + 9, 8, 8, 7, 6, 5, 5, 4, 3, 3, 2 }, 432 + { 21, 20, 20, 19, 18, 18, 17, 16, 16, 15, 14, 14, 13, 12, 12, 11, 10, 10, 433 + 9, 9, 8, 7, 7, 6, 5, 5, 4, 4, 3 }, 434 + { 22, 21, 21, 20, 19, 19, 18, 17, 17, 16, 15, 15, 15, 14, 13, 13, 12, 11, 435 + 11, 11, 10, 9, 9, 8, 7, 7, 6, 6, 5 } 436 + }; 437 + 438 + #define PARAM_TABLE(_minmax, _bpc, _row, _col, _is_420) do { \ 439 + if (bpc == (_bpc)) { \ 440 + if (_is_420) \ 441 + return rc_range_##_minmax##qp420_##_bpc##bpc[_row][_col]; \ 442 + else \ 443 + return rc_range_##_minmax##qp444_##_bpc##bpc[_row][_col]; \ 444 + } \ 298 445 } while (0) 299 446 300 - u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i) 447 + u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i, bool is_420) 301 448 { 302 - PARAM_TABLE(min, 8, buf_i, bpp_i); 303 - PARAM_TABLE(min, 10, buf_i, bpp_i); 304 - PARAM_TABLE(min, 12, buf_i, bpp_i); 449 + PARAM_TABLE(min, 8, buf_i, bpp_i, is_420); 450 + PARAM_TABLE(min, 10, buf_i, bpp_i, is_420); 451 + PARAM_TABLE(min, 12, buf_i, bpp_i, is_420); 305 452 306 453 MISSING_CASE(bpc); 307 454 return 0; 308 455 } 309 456 310 - u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i) 457 + u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i, bool is_420) 311 458 { 312 - PARAM_TABLE(max, 8, buf_i, bpp_i); 313 - PARAM_TABLE(max, 10, buf_i, bpp_i); 314 - PARAM_TABLE(max, 12, buf_i, bpp_i); 459 + PARAM_TABLE(max, 8, buf_i, bpp_i, is_420); 460 + PARAM_TABLE(max, 10, buf_i, bpp_i, is_420); 461 + PARAM_TABLE(max, 12, buf_i, bpp_i, is_420); 315 462 316 463 MISSING_CASE(bpc); 317 464 return 0;
+2 -2
drivers/gpu/drm/i915/display/intel_qp_tables.h
··· 8 8 9 9 #include <linux/types.h> 10 10 11 - u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i); 12 - u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i); 11 + u8 intel_lookup_range_min_qp(int bpc, int buf_i, int bpp_i, bool is_420); 12 + u8 intel_lookup_range_max_qp(int bpc, int buf_i, int bpp_i, bool is_420); 13 13 14 14 #endif
+803 -460
drivers/gpu/drm/i915/display/intel_tc.c
··· 15 15 #include "intel_mg_phy_regs.h" 16 16 #include "intel_tc.h" 17 17 18 + enum tc_port_mode { 19 + TC_PORT_DISCONNECTED, 20 + TC_PORT_TBT_ALT, 21 + TC_PORT_DP_ALT, 22 + TC_PORT_LEGACY, 23 + }; 24 + 25 + struct intel_tc_port; 26 + 27 + struct intel_tc_phy_ops { 28 + enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc); 29 + u32 (*hpd_live_status)(struct intel_tc_port *tc); 30 + bool (*is_ready)(struct intel_tc_port *tc); 31 + bool (*is_owned)(struct intel_tc_port *tc); 32 + void (*get_hw_state)(struct intel_tc_port *tc); 33 + bool (*connect)(struct intel_tc_port *tc, int required_lanes); 34 + void (*disconnect)(struct intel_tc_port *tc); 35 + void (*init)(struct intel_tc_port *tc); 36 + }; 37 + 38 + struct intel_tc_port { 39 + struct intel_digital_port *dig_port; 40 + 41 + const struct intel_tc_phy_ops *phy_ops; 42 + 43 + struct mutex lock; /* protects the TypeC port mode */ 44 + intel_wakeref_t lock_wakeref; 45 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 46 + enum intel_display_power_domain lock_power_domain; 47 + #endif 48 + struct delayed_work disconnect_phy_work; 49 + int link_refcount; 50 + bool legacy_port:1; 51 + char port_name[8]; 52 + enum tc_port_mode mode; 53 + enum tc_port_mode init_mode; 54 + enum phy_fia phy_fia; 55 + u8 phy_fia_idx; 56 + }; 57 + 58 + static enum intel_display_power_domain 59 + tc_phy_cold_off_domain(struct intel_tc_port *); 60 + static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc); 61 + static bool tc_phy_is_ready(struct intel_tc_port *tc); 62 + static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc); 63 + 18 64 static const char *tc_port_mode_name(enum tc_port_mode mode) 19 65 { 20 66 static const char * const names[] = { ··· 76 30 return names[mode]; 77 31 } 78 32 33 + static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port) 34 + { 35 + return dig_port->tc; 36 + } 37 + 38 + static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc) 39 + { 40 + return to_i915(tc->dig_port->base.base.dev); 41 + } 42 + 79 43 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port, 80 44 enum tc_port_mode mode) 81 45 { 82 46 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 83 47 enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 48 + struct intel_tc_port *tc = to_tc_port(dig_port); 84 49 85 - return intel_phy_is_tc(i915, phy) && dig_port->tc_mode == mode; 50 + return intel_phy_is_tc(i915, phy) && tc->mode == mode; 86 51 } 87 52 88 53 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port) ··· 111 54 return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY); 112 55 } 113 56 57 + /* 58 + * The display power domains used for TC ports depending on the 59 + * platform and TC mode (legacy, DP-alt, TBT): 60 + * 61 + * POWER_DOMAIN_DISPLAY_CORE: 62 + * -------------------------- 63 + * ADLP/all modes: 64 + * - TCSS/IOM access for PHY ready state. 65 + * ADLP+/all modes: 66 + * - DE/north-,south-HPD ISR access for HPD live state. 67 + * 68 + * POWER_DOMAIN_PORT_DDI_LANES_<port>: 69 + * ----------------------------------- 70 + * ICL+/all modes: 71 + * - DE/DDI_BUF access for port enabled state. 72 + * ADLP/all modes: 73 + * - DE/DDI_BUF access for PHY owned state. 74 + * 75 + * POWER_DOMAIN_AUX_USBC<TC port index>: 76 + * ------------------------------------- 77 + * ICL/legacy mode: 78 + * - TCSS/IOM,FIA access for PHY ready, owned and HPD live state 79 + * - TCSS/PHY: block TC-cold power state for using the PHY AUX and 80 + * main lanes. 81 + * ADLP/legacy, DP-alt modes: 82 + * - TCSS/PHY: block TC-cold power state for using the PHY AUX and 83 + * main lanes. 84 + * 85 + * POWER_DOMAIN_TC_COLD_OFF: 86 + * ------------------------- 87 + * TGL/legacy, DP-alt modes: 88 + * - TCSS/IOM,FIA access for PHY ready, owned and HPD live state 89 + * - TCSS/PHY: block TC-cold power state for using the PHY AUX and 90 + * main lanes. 91 + * 92 + * ICL, TGL, ADLP/TBT mode: 93 + * - TCSS/IOM,FIA access for HPD live state 94 + * - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN) 95 + * AUX and main lanes. 96 + */ 114 97 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port) 115 98 { 116 99 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 100 + struct intel_tc_port *tc = to_tc_port(dig_port); 117 101 118 - return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) || 119 - IS_ALDERLAKE_P(i915); 120 - } 121 - 122 - static enum intel_display_power_domain 123 - tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode) 124 - { 125 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 126 - 127 - if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port)) 128 - return POWER_DOMAIN_TC_COLD_OFF; 129 - 130 - return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); 102 + return tc_phy_cold_off_domain(tc) == 103 + intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); 131 104 } 132 105 133 106 static intel_wakeref_t 134 - tc_cold_block_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode, 135 - enum intel_display_power_domain *domain) 107 + __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain) 136 108 { 137 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 109 + struct drm_i915_private *i915 = tc_to_i915(tc); 138 110 139 - *domain = tc_cold_get_power_domain(dig_port, mode); 111 + *domain = tc_phy_cold_off_domain(tc); 140 112 141 113 return intel_display_power_get(i915, *domain); 142 114 } 143 115 144 116 static intel_wakeref_t 145 - tc_cold_block(struct intel_digital_port *dig_port, enum intel_display_power_domain *domain) 117 + tc_cold_block(struct intel_tc_port *tc) 146 118 { 147 - return tc_cold_block_in_mode(dig_port, dig_port->tc_mode, domain); 119 + enum intel_display_power_domain domain; 120 + intel_wakeref_t wakeref; 121 + 122 + wakeref = __tc_cold_block(tc, &domain); 123 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 124 + tc->lock_power_domain = domain; 125 + #endif 126 + return wakeref; 148 127 } 149 128 150 129 static void 151 - tc_cold_unblock(struct intel_digital_port *dig_port, enum intel_display_power_domain domain, 152 - intel_wakeref_t wakeref) 130 + __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain, 131 + intel_wakeref_t wakeref) 153 132 { 154 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 155 - 156 - /* 157 - * wakeref == -1, means some error happened saving save_depot_stack but 158 - * power should still be put down and 0 is a invalid save_depot_stack 159 - * id so can be used to skip it for non TC legacy ports. 160 - */ 161 - if (wakeref == 0) 162 - return; 133 + struct drm_i915_private *i915 = tc_to_i915(tc); 163 134 164 135 intel_display_power_put(i915, domain, wakeref); 165 136 } 166 137 167 138 static void 168 - assert_tc_cold_blocked(struct intel_digital_port *dig_port) 139 + tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref) 169 140 { 170 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 141 + enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc); 142 + 143 + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 144 + drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain); 145 + #endif 146 + __tc_cold_unblock(tc, domain, wakeref); 147 + } 148 + 149 + static void 150 + assert_display_core_power_enabled(struct intel_tc_port *tc) 151 + { 152 + struct drm_i915_private *i915 = tc_to_i915(tc); 153 + 154 + drm_WARN_ON(&i915->drm, 155 + !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE)); 156 + } 157 + 158 + static void 159 + assert_tc_cold_blocked(struct intel_tc_port *tc) 160 + { 161 + struct drm_i915_private *i915 = tc_to_i915(tc); 171 162 bool enabled; 172 163 173 164 enabled = intel_display_power_is_enabled(i915, 174 - tc_cold_get_power_domain(dig_port, 175 - dig_port->tc_mode)); 165 + tc_phy_cold_off_domain(tc)); 176 166 drm_WARN_ON(&i915->drm, !enabled); 177 167 } 178 168 179 169 static enum intel_display_power_domain 180 - tc_port_power_domain(struct intel_digital_port *dig_port) 170 + tc_port_power_domain(struct intel_tc_port *tc) 181 171 { 182 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 183 - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); 172 + struct drm_i915_private *i915 = tc_to_i915(tc); 173 + enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port); 184 174 185 175 return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1; 186 176 } 187 177 188 178 static void 189 - assert_tc_port_power_enabled(struct intel_digital_port *dig_port) 179 + assert_tc_port_power_enabled(struct intel_tc_port *tc) 190 180 { 191 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 181 + struct drm_i915_private *i915 = tc_to_i915(tc); 192 182 193 183 drm_WARN_ON(&i915->drm, 194 - !intel_display_power_is_enabled(i915, tc_port_power_domain(dig_port))); 184 + !intel_display_power_is_enabled(i915, tc_port_power_domain(tc))); 195 185 } 196 186 197 187 u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) 198 188 { 199 189 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 190 + struct intel_tc_port *tc = to_tc_port(dig_port); 200 191 u32 lane_mask; 201 192 202 - lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); 193 + lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia)); 203 194 204 195 drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff); 205 - assert_tc_cold_blocked(dig_port); 196 + assert_tc_cold_blocked(tc); 206 197 207 - lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx); 208 - return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); 198 + lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx); 199 + return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx); 209 200 } 210 201 211 202 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) 212 203 { 213 204 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 205 + struct intel_tc_port *tc = to_tc_port(dig_port); 214 206 u32 pin_mask; 215 207 216 - pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(dig_port->tc_phy_fia)); 208 + pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia)); 217 209 218 210 drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff); 219 - assert_tc_cold_blocked(dig_port); 211 + assert_tc_cold_blocked(tc); 220 212 221 - return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >> 222 - DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); 213 + return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >> 214 + DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx); 223 215 } 224 216 225 217 int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) 226 218 { 227 219 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 220 + struct intel_tc_port *tc = to_tc_port(dig_port); 221 + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); 228 222 intel_wakeref_t wakeref; 229 223 u32 lane_mask; 230 224 231 - if (dig_port->tc_mode != TC_PORT_DP_ALT) 225 + if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT) 232 226 return 4; 233 227 234 - assert_tc_cold_blocked(dig_port); 228 + assert_tc_cold_blocked(tc); 235 229 236 230 lane_mask = 0; 237 231 with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) ··· 309 201 int required_lanes) 310 202 { 311 203 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 204 + struct intel_tc_port *tc = to_tc_port(dig_port); 312 205 bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; 313 206 u32 val; 314 207 315 208 drm_WARN_ON(&i915->drm, 316 - lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY); 209 + lane_reversal && tc->mode != TC_PORT_LEGACY); 317 210 318 - assert_tc_cold_blocked(dig_port); 211 + assert_tc_cold_blocked(tc); 319 212 320 - val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia)); 321 - val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx); 213 + val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia)); 214 + val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx); 322 215 323 216 switch (required_lanes) { 324 217 case 1: 325 218 val |= lane_reversal ? 326 - DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) : 327 - DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx); 219 + DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) : 220 + DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx); 328 221 break; 329 222 case 2: 330 223 val |= lane_reversal ? 331 - DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) : 332 - DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx); 224 + DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) : 225 + DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx); 333 226 break; 334 227 case 4: 335 - val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx); 228 + val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx); 336 229 break; 337 230 default: 338 231 MISSING_CASE(required_lanes); 339 232 } 340 233 341 - intel_de_write(i915, PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val); 234 + intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val); 342 235 } 343 236 344 - static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port, 237 + static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc, 345 238 u32 live_status_mask) 346 239 { 347 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 240 + struct drm_i915_private *i915 = tc_to_i915(tc); 348 241 u32 valid_hpd_mask; 349 242 350 - if (dig_port->tc_legacy_port) 243 + drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED); 244 + 245 + if (hweight32(live_status_mask) != 1) 246 + return; 247 + 248 + if (tc->legacy_port) 351 249 valid_hpd_mask = BIT(TC_PORT_LEGACY); 352 250 else 353 251 valid_hpd_mask = BIT(TC_PORT_DP_ALT) | ··· 365 251 /* If live status mismatches the VBT flag, trust the live status. */ 366 252 drm_dbg_kms(&i915->drm, 367 253 "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n", 368 - dig_port->tc_port_name, live_status_mask, valid_hpd_mask); 254 + tc->port_name, live_status_mask, valid_hpd_mask); 369 255 370 - dig_port->tc_legacy_port = !dig_port->tc_legacy_port; 256 + tc->legacy_port = !tc->legacy_port; 371 257 } 372 258 373 - static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port) 259 + static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia) 374 260 { 375 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 261 + struct drm_i915_private *i915 = tc_to_i915(tc); 262 + enum port port = tc->dig_port->base.port; 263 + enum tc_port tc_port = intel_port_to_tc(i915, port); 264 + 265 + /* 266 + * Each Modular FIA instance houses 2 TC ports. In SOC that has more 267 + * than two TC ports, there are multiple instances of Modular FIA. 268 + */ 269 + if (modular_fia) { 270 + tc->phy_fia = tc_port / 2; 271 + tc->phy_fia_idx = tc_port % 2; 272 + } else { 273 + tc->phy_fia = FIA1; 274 + tc->phy_fia_idx = tc_port; 275 + } 276 + } 277 + 278 + /* 279 + * ICL TC PHY handlers 280 + * ------------------- 281 + */ 282 + static enum intel_display_power_domain 283 + icl_tc_phy_cold_off_domain(struct intel_tc_port *tc) 284 + { 285 + struct drm_i915_private *i915 = tc_to_i915(tc); 286 + struct intel_digital_port *dig_port = tc->dig_port; 287 + 288 + if (tc->legacy_port) 289 + return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); 290 + 291 + return POWER_DOMAIN_TC_COLD_OFF; 292 + } 293 + 294 + static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc) 295 + { 296 + struct drm_i915_private *i915 = tc_to_i915(tc); 297 + struct intel_digital_port *dig_port = tc->dig_port; 376 298 u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin]; 299 + intel_wakeref_t wakeref; 300 + u32 fia_isr; 301 + u32 pch_isr; 377 302 u32 mask = 0; 378 - u32 val; 379 303 380 - val = intel_de_read(i915, PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); 304 + with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) { 305 + fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia)); 306 + pch_isr = intel_de_read(i915, SDEISR); 307 + } 381 308 382 - if (val == 0xffffffff) { 309 + if (fia_isr == 0xffffffff) { 383 310 drm_dbg_kms(&i915->drm, 384 311 "Port %s: PHY in TCCOLD, nothing connected\n", 385 - dig_port->tc_port_name); 312 + tc->port_name); 386 313 return mask; 387 314 } 388 315 389 - if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx)) 316 + if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx)) 390 317 mask |= BIT(TC_PORT_TBT_ALT); 391 - if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx)) 318 + if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx)) 392 319 mask |= BIT(TC_PORT_DP_ALT); 393 320 394 - if (intel_de_read(i915, SDEISR) & isr_bit) 321 + if (pch_isr & isr_bit) 395 322 mask |= BIT(TC_PORT_LEGACY); 396 323 397 - /* The sink can be connected only in a single mode. */ 398 - if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1)) 399 - tc_port_fixup_legacy_flag(dig_port, mask); 400 - 401 324 return mask; 402 - } 403 - 404 - static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port) 405 - { 406 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 407 - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); 408 - u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin]; 409 - u32 val, mask = 0; 410 - 411 - /* 412 - * On ADL-P HW/FW will wake from TCCOLD to complete the read access of 413 - * registers in IOM. Note that this doesn't apply to PHY and FIA 414 - * registers. 415 - */ 416 - val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port)); 417 - if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT) 418 - mask |= BIT(TC_PORT_DP_ALT); 419 - if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT) 420 - mask |= BIT(TC_PORT_TBT_ALT); 421 - 422 - if (intel_de_read(i915, SDEISR) & isr_bit) 423 - mask |= BIT(TC_PORT_LEGACY); 424 - 425 - /* The sink can be connected only in a single mode. */ 426 - if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1)) 427 - tc_port_fixup_legacy_flag(dig_port, mask); 428 - 429 - return mask; 430 - } 431 - 432 - static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) 433 - { 434 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 435 - 436 - if (IS_ALDERLAKE_P(i915)) 437 - return adl_tc_port_live_status_mask(dig_port); 438 - 439 - return icl_tc_port_live_status_mask(dig_port); 440 325 } 441 326 442 327 /* ··· 446 333 * owned by the TBT subsystem and so switching the ownership to display is not 447 334 * required. 448 335 */ 449 - static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) 336 + static bool icl_tc_phy_is_ready(struct intel_tc_port *tc) 450 337 { 451 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 338 + struct drm_i915_private *i915 = tc_to_i915(tc); 452 339 u32 val; 453 340 454 - val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia)); 341 + assert_tc_cold_blocked(tc); 342 + 343 + val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia)); 455 344 if (val == 0xffffffff) { 456 345 drm_dbg_kms(&i915->drm, 457 - "Port %s: PHY in TCCOLD, assuming not complete\n", 458 - dig_port->tc_port_name); 346 + "Port %s: PHY in TCCOLD, assuming not ready\n", 347 + tc->port_name); 459 348 return false; 460 349 } 461 350 462 - return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx); 351 + return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx); 463 352 } 464 353 465 - /* 466 - * Return the PHY status complete flag indicating that display can acquire the 467 - * PHY ownership. The IOM firmware sets this flag when it's ready to switch 468 - * the ownership to display, regardless of what sink is connected (TBT-alt, 469 - * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT 470 - * subsystem and so switching the ownership to display is not required. 471 - */ 472 - static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port) 473 - { 474 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 475 - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); 476 - u32 val; 477 - 478 - val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port)); 479 - if (val == 0xffffffff) { 480 - drm_dbg_kms(&i915->drm, 481 - "Port %s: PHY in TCCOLD, assuming not complete\n", 482 - dig_port->tc_port_name); 483 - return false; 484 - } 485 - 486 - return val & TCSS_DDI_STATUS_READY; 487 - } 488 - 489 - static bool tc_phy_status_complete(struct intel_digital_port *dig_port) 490 - { 491 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 492 - 493 - if (IS_ALDERLAKE_P(i915)) 494 - return adl_tc_phy_status_complete(dig_port); 495 - 496 - return icl_tc_phy_status_complete(dig_port); 497 - } 498 - 499 - static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port, 354 + static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc, 500 355 bool take) 501 356 { 502 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 357 + struct drm_i915_private *i915 = tc_to_i915(tc); 503 358 u32 val; 504 359 505 - val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); 360 + assert_tc_cold_blocked(tc); 361 + 362 + val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); 506 363 if (val == 0xffffffff) { 507 364 drm_dbg_kms(&i915->drm, 508 365 "Port %s: PHY in TCCOLD, can't %s ownership\n", 509 - dig_port->tc_port_name, take ? "take" : "release"); 366 + tc->port_name, take ? "take" : "release"); 510 367 511 368 return false; 512 369 } 513 370 514 - val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); 371 + val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); 515 372 if (take) 516 - val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); 373 + val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); 517 374 518 - intel_de_write(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val); 519 - 520 - return true; 521 - } 522 - 523 - static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port, 524 - bool take) 525 - { 526 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 527 - enum port port = dig_port->base.port; 528 - 529 - intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP, 530 - take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0); 375 + intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val); 531 376 532 377 return true; 533 378 } 534 379 535 - static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take) 380 + static bool icl_tc_phy_is_owned(struct intel_tc_port *tc) 536 381 { 537 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 538 - 539 - if (IS_ALDERLAKE_P(i915)) 540 - return adl_tc_phy_take_ownership(dig_port, take); 541 - 542 - return icl_tc_phy_take_ownership(dig_port, take); 543 - } 544 - 545 - static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port) 546 - { 547 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 382 + struct drm_i915_private *i915 = tc_to_i915(tc); 548 383 u32 val; 549 384 550 - val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); 385 + assert_tc_cold_blocked(tc); 386 + 387 + val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); 551 388 if (val == 0xffffffff) { 552 389 drm_dbg_kms(&i915->drm, 553 390 "Port %s: PHY in TCCOLD, assume not owned\n", 554 - dig_port->tc_port_name); 391 + tc->port_name); 555 392 return false; 556 393 } 557 394 558 - return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); 395 + return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); 559 396 } 560 397 561 - static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port) 398 + static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc) 562 399 { 563 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 564 - enum port port = dig_port->base.port; 565 - u32 val; 400 + enum intel_display_power_domain domain; 401 + intel_wakeref_t tc_cold_wref; 566 402 567 - val = intel_de_read(i915, DDI_BUF_CTL(port)); 568 - return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP; 569 - } 403 + tc_cold_wref = __tc_cold_block(tc, &domain); 570 404 571 - static bool tc_phy_is_owned(struct intel_digital_port *dig_port) 572 - { 573 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 405 + tc->mode = tc_phy_get_current_mode(tc); 406 + if (tc->mode != TC_PORT_DISCONNECTED) 407 + tc->lock_wakeref = tc_cold_block(tc); 574 408 575 - if (IS_ALDERLAKE_P(i915)) 576 - return adl_tc_phy_is_owned(dig_port); 577 - 578 - return icl_tc_phy_is_owned(dig_port); 409 + __tc_cold_unblock(tc, domain, tc_cold_wref); 579 410 } 580 411 581 412 /* ··· 533 476 * connect and disconnect to cleanly transfer ownership with the controller and 534 477 * set the type-C power state. 535 478 */ 536 - static void icl_tc_phy_connect(struct intel_digital_port *dig_port, 537 - int required_lanes) 479 + static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc, 480 + int required_lanes) 538 481 { 539 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 540 - u32 live_status_mask; 482 + struct drm_i915_private *i915 = tc_to_i915(tc); 483 + struct intel_digital_port *dig_port = tc->dig_port; 541 484 int max_lanes; 542 485 543 - if (!tc_phy_status_complete(dig_port) && 544 - !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port)) { 545 - drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n", 546 - dig_port->tc_port_name); 547 - goto out_set_tbt_alt_mode; 548 - } 549 - 550 - live_status_mask = tc_port_live_status_mask(dig_port); 551 - if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY))) && 552 - !dig_port->tc_legacy_port) { 553 - drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n", 554 - dig_port->tc_port_name, live_status_mask); 555 - goto out_set_tbt_alt_mode; 556 - } 557 - 558 - if (!tc_phy_take_ownership(dig_port, true) && 559 - !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port)) 560 - goto out_set_tbt_alt_mode; 561 - 562 486 max_lanes = intel_tc_port_fia_max_lane_count(dig_port); 563 - if (dig_port->tc_legacy_port) { 487 + if (tc->mode == TC_PORT_LEGACY) { 564 488 drm_WARN_ON(&i915->drm, max_lanes != 4); 565 - dig_port->tc_mode = TC_PORT_LEGACY; 566 - 567 - return; 489 + return true; 568 490 } 491 + 492 + drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT); 569 493 570 494 /* 571 495 * Now we have to re-check the live state, in case the port recently 572 496 * became disconnected. Not necessary for legacy mode. 573 497 */ 574 - if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) { 498 + if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) { 575 499 drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n", 576 - dig_port->tc_port_name); 577 - goto out_release_phy; 500 + tc->port_name); 501 + return false; 578 502 } 579 503 580 504 if (max_lanes < required_lanes) { 581 505 drm_dbg_kms(&i915->drm, 582 506 "Port %s: PHY max lanes %d < required lanes %d\n", 583 - dig_port->tc_port_name, 507 + tc->port_name, 584 508 max_lanes, required_lanes); 585 - goto out_release_phy; 509 + return false; 586 510 } 587 511 588 - dig_port->tc_mode = TC_PORT_DP_ALT; 512 + return true; 513 + } 589 514 590 - return; 515 + static bool icl_tc_phy_connect(struct intel_tc_port *tc, 516 + int required_lanes) 517 + { 518 + struct drm_i915_private *i915 = tc_to_i915(tc); 519 + 520 + tc->lock_wakeref = tc_cold_block(tc); 521 + 522 + if (tc->mode == TC_PORT_TBT_ALT) 523 + return true; 524 + 525 + if ((!tc_phy_is_ready(tc) || 526 + !icl_tc_phy_take_ownership(tc, true)) && 527 + !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { 528 + drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n", 529 + tc->port_name, 530 + str_yes_no(tc_phy_is_ready(tc))); 531 + goto out_unblock_tc_cold; 532 + } 533 + 534 + 535 + if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) 536 + goto out_release_phy; 537 + 538 + return true; 591 539 592 540 out_release_phy: 593 - tc_phy_take_ownership(dig_port, false); 594 - out_set_tbt_alt_mode: 595 - dig_port->tc_mode = TC_PORT_TBT_ALT; 541 + icl_tc_phy_take_ownership(tc, false); 542 + out_unblock_tc_cold: 543 + tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); 544 + 545 + return false; 596 546 } 597 547 598 548 /* 599 549 * See the comment at the connect function. This implements the Disconnect 600 550 * Flow. 601 551 */ 602 - static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) 552 + static void icl_tc_phy_disconnect(struct intel_tc_port *tc) 603 553 { 604 - switch (dig_port->tc_mode) { 554 + switch (tc->mode) { 605 555 case TC_PORT_LEGACY: 606 556 case TC_PORT_DP_ALT: 607 - tc_phy_take_ownership(dig_port, false); 557 + icl_tc_phy_take_ownership(tc, false); 608 558 fallthrough; 609 559 case TC_PORT_TBT_ALT: 610 - dig_port->tc_mode = TC_PORT_DISCONNECTED; 611 - fallthrough; 612 - case TC_PORT_DISCONNECTED: 560 + tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); 613 561 break; 614 562 default: 615 - MISSING_CASE(dig_port->tc_mode); 563 + MISSING_CASE(tc->mode); 616 564 } 617 565 } 618 566 619 - static bool tc_phy_is_ready_and_owned(struct intel_digital_port *dig_port, 567 + static void icl_tc_phy_init(struct intel_tc_port *tc) 568 + { 569 + tc_phy_load_fia_params(tc, false); 570 + } 571 + 572 + static const struct intel_tc_phy_ops icl_tc_phy_ops = { 573 + .cold_off_domain = icl_tc_phy_cold_off_domain, 574 + .hpd_live_status = icl_tc_phy_hpd_live_status, 575 + .is_ready = icl_tc_phy_is_ready, 576 + .is_owned = icl_tc_phy_is_owned, 577 + .get_hw_state = icl_tc_phy_get_hw_state, 578 + .connect = icl_tc_phy_connect, 579 + .disconnect = icl_tc_phy_disconnect, 580 + .init = icl_tc_phy_init, 581 + }; 582 + 583 + /* 584 + * TGL TC PHY handlers 585 + * ------------------- 586 + */ 587 + static enum intel_display_power_domain 588 + tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc) 589 + { 590 + return POWER_DOMAIN_TC_COLD_OFF; 591 + } 592 + 593 + static void tgl_tc_phy_init(struct intel_tc_port *tc) 594 + { 595 + struct drm_i915_private *i915 = tc_to_i915(tc); 596 + intel_wakeref_t wakeref; 597 + u32 val; 598 + 599 + with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) 600 + val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1)); 601 + 602 + drm_WARN_ON(&i915->drm, val == 0xffffffff); 603 + 604 + tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK); 605 + } 606 + 607 + static const struct intel_tc_phy_ops tgl_tc_phy_ops = { 608 + .cold_off_domain = tgl_tc_phy_cold_off_domain, 609 + .hpd_live_status = icl_tc_phy_hpd_live_status, 610 + .is_ready = icl_tc_phy_is_ready, 611 + .is_owned = icl_tc_phy_is_owned, 612 + .get_hw_state = icl_tc_phy_get_hw_state, 613 + .connect = icl_tc_phy_connect, 614 + .disconnect = icl_tc_phy_disconnect, 615 + .init = tgl_tc_phy_init, 616 + }; 617 + 618 + /* 619 + * ADLP TC PHY handlers 620 + * -------------------- 621 + */ 622 + static enum intel_display_power_domain 623 + adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc) 624 + { 625 + struct drm_i915_private *i915 = tc_to_i915(tc); 626 + struct intel_digital_port *dig_port = tc->dig_port; 627 + 628 + if (tc->mode != TC_PORT_TBT_ALT) 629 + return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); 630 + 631 + return POWER_DOMAIN_TC_COLD_OFF; 632 + } 633 + 634 + static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc) 635 + { 636 + struct drm_i915_private *i915 = tc_to_i915(tc); 637 + struct intel_digital_port *dig_port = tc->dig_port; 638 + enum hpd_pin hpd_pin = dig_port->base.hpd_pin; 639 + u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin]; 640 + u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin]; 641 + intel_wakeref_t wakeref; 642 + u32 cpu_isr; 643 + u32 pch_isr; 644 + u32 mask = 0; 645 + 646 + with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) { 647 + cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR); 648 + pch_isr = intel_de_read(i915, SDEISR); 649 + } 650 + 651 + if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK)) 652 + mask |= BIT(TC_PORT_DP_ALT); 653 + if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK)) 654 + mask |= BIT(TC_PORT_TBT_ALT); 655 + 656 + if (pch_isr & pch_isr_bit) 657 + mask |= BIT(TC_PORT_LEGACY); 658 + 659 + return mask; 660 + } 661 + 662 + /* 663 + * Return the PHY status complete flag indicating that display can acquire the 664 + * PHY ownership. The IOM firmware sets this flag when it's ready to switch 665 + * the ownership to display, regardless of what sink is connected (TBT-alt, 666 + * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT 667 + * subsystem and so switching the ownership to display is not required. 668 + */ 669 + static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc) 670 + { 671 + struct drm_i915_private *i915 = tc_to_i915(tc); 672 + enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port); 673 + u32 val; 674 + 675 + assert_display_core_power_enabled(tc); 676 + 677 + val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port)); 678 + if (val == 0xffffffff) { 679 + drm_dbg_kms(&i915->drm, 680 + "Port %s: PHY in TCCOLD, assuming not ready\n", 681 + tc->port_name); 682 + return false; 683 + } 684 + 685 + return val & TCSS_DDI_STATUS_READY; 686 + } 687 + 688 + static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc, 689 + bool take) 690 + { 691 + struct drm_i915_private *i915 = tc_to_i915(tc); 692 + enum port port = tc->dig_port->base.port; 693 + 694 + assert_tc_port_power_enabled(tc); 695 + 696 + intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP, 697 + take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0); 698 + 699 + return true; 700 + } 701 + 702 + static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc) 703 + { 704 + struct drm_i915_private *i915 = tc_to_i915(tc); 705 + enum port port = tc->dig_port->base.port; 706 + u32 val; 707 + 708 + assert_tc_port_power_enabled(tc); 709 + 710 + val = intel_de_read(i915, DDI_BUF_CTL(port)); 711 + return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP; 712 + } 713 + 714 + static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc) 715 + { 716 + struct drm_i915_private *i915 = tc_to_i915(tc); 717 + enum intel_display_power_domain port_power_domain = 718 + tc_port_power_domain(tc); 719 + intel_wakeref_t port_wakeref; 720 + 721 + port_wakeref = intel_display_power_get(i915, port_power_domain); 722 + 723 + tc->mode = tc_phy_get_current_mode(tc); 724 + if (tc->mode != TC_PORT_DISCONNECTED) 725 + tc->lock_wakeref = tc_cold_block(tc); 726 + 727 + intel_display_power_put(i915, port_power_domain, port_wakeref); 728 + } 729 + 730 + static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) 731 + { 732 + struct drm_i915_private *i915 = tc_to_i915(tc); 733 + enum intel_display_power_domain port_power_domain = 734 + tc_port_power_domain(tc); 735 + intel_wakeref_t port_wakeref; 736 + 737 + if (tc->mode == TC_PORT_TBT_ALT) { 738 + tc->lock_wakeref = tc_cold_block(tc); 739 + return true; 740 + } 741 + 742 + port_wakeref = intel_display_power_get(i915, port_power_domain); 743 + 744 + if (!adlp_tc_phy_take_ownership(tc, true) && 745 + !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { 746 + drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n", 747 + tc->port_name); 748 + goto out_put_port_power; 749 + } 750 + 751 + if (!tc_phy_is_ready(tc) && 752 + !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { 753 + drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n", 754 + tc->port_name); 755 + goto out_release_phy; 756 + } 757 + 758 + tc->lock_wakeref = tc_cold_block(tc); 759 + 760 + if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) 761 + goto out_unblock_tc_cold; 762 + 763 + intel_display_power_put(i915, port_power_domain, port_wakeref); 764 + 765 + return true; 766 + 767 + out_unblock_tc_cold: 768 + tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); 769 + out_release_phy: 770 + adlp_tc_phy_take_ownership(tc, false); 771 + out_put_port_power: 772 + intel_display_power_put(i915, port_power_domain, port_wakeref); 773 + 774 + return false; 775 + } 776 + 777 + static void adlp_tc_phy_disconnect(struct intel_tc_port *tc) 778 + { 779 + struct drm_i915_private *i915 = tc_to_i915(tc); 780 + enum intel_display_power_domain port_power_domain = 781 + tc_port_power_domain(tc); 782 + intel_wakeref_t port_wakeref; 783 + 784 + port_wakeref = intel_display_power_get(i915, port_power_domain); 785 + 786 + tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); 787 + 788 + switch (tc->mode) { 789 + case TC_PORT_LEGACY: 790 + case TC_PORT_DP_ALT: 791 + adlp_tc_phy_take_ownership(tc, false); 792 + fallthrough; 793 + case TC_PORT_TBT_ALT: 794 + break; 795 + default: 796 + MISSING_CASE(tc->mode); 797 + } 798 + 799 + intel_display_power_put(i915, port_power_domain, port_wakeref); 800 + } 801 + 802 + static void adlp_tc_phy_init(struct intel_tc_port *tc) 803 + { 804 + tc_phy_load_fia_params(tc, true); 805 + } 806 + 807 + static const struct intel_tc_phy_ops adlp_tc_phy_ops = { 808 + .cold_off_domain = adlp_tc_phy_cold_off_domain, 809 + .hpd_live_status = adlp_tc_phy_hpd_live_status, 810 + .is_ready = adlp_tc_phy_is_ready, 811 + .is_owned = adlp_tc_phy_is_owned, 812 + .get_hw_state = adlp_tc_phy_get_hw_state, 813 + .connect = adlp_tc_phy_connect, 814 + .disconnect = adlp_tc_phy_disconnect, 815 + .init = adlp_tc_phy_init, 816 + }; 817 + 818 + /* 819 + * Generic TC PHY handlers 820 + * ----------------------- 821 + */ 822 + static enum intel_display_power_domain 823 + tc_phy_cold_off_domain(struct intel_tc_port *tc) 824 + { 825 + return tc->phy_ops->cold_off_domain(tc); 826 + } 827 + 828 + static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc) 829 + { 830 + struct drm_i915_private *i915 = tc_to_i915(tc); 831 + u32 mask; 832 + 833 + mask = tc->phy_ops->hpd_live_status(tc); 834 + 835 + /* The sink can be connected only in a single mode. */ 836 + drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1); 837 + 838 + return mask; 839 + } 840 + 841 + static bool tc_phy_is_ready(struct intel_tc_port *tc) 842 + { 843 + return tc->phy_ops->is_ready(tc); 844 + } 845 + 846 + static bool tc_phy_is_owned(struct intel_tc_port *tc) 847 + { 848 + return tc->phy_ops->is_owned(tc); 849 + } 850 + 851 + static void tc_phy_get_hw_state(struct intel_tc_port *tc) 852 + { 853 + tc->phy_ops->get_hw_state(tc); 854 + } 855 + 856 + static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc, 620 857 bool phy_is_ready, bool phy_is_owned) 621 858 { 622 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 859 + struct drm_i915_private *i915 = tc_to_i915(tc); 623 860 624 861 drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready); 625 862 626 863 return phy_is_ready && phy_is_owned; 627 864 } 628 865 629 - static bool tc_phy_is_connected(struct intel_digital_port *dig_port, 866 + static bool tc_phy_is_connected(struct intel_tc_port *tc, 630 867 enum icl_port_dpll_id port_pll_type) 631 868 { 632 - struct intel_encoder *encoder = &dig_port->base; 869 + struct intel_encoder *encoder = &tc->dig_port->base; 633 870 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 634 - bool phy_is_ready = tc_phy_status_complete(dig_port); 635 - bool phy_is_owned = tc_phy_is_owned(dig_port); 871 + bool phy_is_ready = tc_phy_is_ready(tc); 872 + bool phy_is_owned = tc_phy_is_owned(tc); 636 873 bool is_connected; 637 874 638 - if (tc_phy_is_ready_and_owned(dig_port, phy_is_ready, phy_is_owned)) 875 + if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) 639 876 is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY; 640 877 else 641 878 is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT; 642 879 643 880 drm_dbg_kms(&i915->drm, 644 881 "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n", 645 - dig_port->tc_port_name, 882 + tc->port_name, 646 883 str_yes_no(is_connected), 647 884 str_yes_no(phy_is_ready), 648 885 str_yes_no(phy_is_owned), ··· 945 594 return is_connected; 946 595 } 947 596 948 - static void tc_phy_wait_for_ready(struct intel_digital_port *dig_port) 597 + static void tc_phy_wait_for_ready(struct intel_tc_port *tc) 949 598 { 950 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 599 + struct drm_i915_private *i915 = tc_to_i915(tc); 951 600 952 - if (wait_for(tc_phy_status_complete(dig_port), 100)) 601 + if (wait_for(tc_phy_is_ready(tc), 100)) 953 602 drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n", 954 - dig_port->tc_port_name); 603 + tc->port_name); 955 604 } 956 605 957 606 static enum tc_port_mode ··· 964 613 } 965 614 966 615 static enum tc_port_mode 967 - tc_phy_hpd_live_mode(struct intel_digital_port *dig_port) 616 + tc_phy_hpd_live_mode(struct intel_tc_port *tc) 968 617 { 969 - u32 live_status_mask = tc_port_live_status_mask(dig_port); 618 + u32 live_status_mask = tc_phy_hpd_live_status(tc); 970 619 971 620 return hpd_mask_to_tc_mode(live_status_mask); 972 621 } 973 622 974 623 static enum tc_port_mode 975 - get_tc_mode_in_phy_owned_state(struct intel_digital_port *dig_port, 624 + get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc, 976 625 enum tc_port_mode live_mode) 977 626 { 978 627 switch (live_mode) { ··· 984 633 fallthrough; 985 634 case TC_PORT_TBT_ALT: 986 635 case TC_PORT_DISCONNECTED: 987 - if (dig_port->tc_legacy_port) 636 + if (tc->legacy_port) 988 637 return TC_PORT_LEGACY; 989 638 else 990 639 return TC_PORT_DP_ALT; ··· 992 641 } 993 642 994 643 static enum tc_port_mode 995 - get_tc_mode_in_phy_not_owned_state(struct intel_digital_port *dig_port, 644 + get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc, 996 645 enum tc_port_mode live_mode) 997 646 { 998 647 switch (live_mode) { ··· 1005 654 MISSING_CASE(live_mode); 1006 655 fallthrough; 1007 656 case TC_PORT_DISCONNECTED: 1008 - if (dig_port->tc_legacy_port) 657 + if (tc->legacy_port) 1009 658 return TC_PORT_DISCONNECTED; 1010 659 else 1011 660 return TC_PORT_TBT_ALT; ··· 1013 662 } 1014 663 1015 664 static enum tc_port_mode 1016 - intel_tc_port_get_current_mode(struct intel_digital_port *dig_port) 665 + tc_phy_get_current_mode(struct intel_tc_port *tc) 1017 666 { 1018 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 1019 - enum tc_port_mode live_mode = tc_phy_hpd_live_mode(dig_port); 667 + struct drm_i915_private *i915 = tc_to_i915(tc); 668 + enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc); 1020 669 bool phy_is_ready; 1021 670 bool phy_is_owned; 1022 671 enum tc_port_mode mode; ··· 1026 675 * and system resume whether or not a sink is connected. Wait here for 1027 676 * the initialization to get ready. 1028 677 */ 1029 - if (dig_port->tc_legacy_port) 1030 - tc_phy_wait_for_ready(dig_port); 678 + if (tc->legacy_port) 679 + tc_phy_wait_for_ready(tc); 1031 680 1032 - phy_is_ready = tc_phy_status_complete(dig_port); 1033 - phy_is_owned = tc_phy_is_owned(dig_port); 681 + phy_is_ready = tc_phy_is_ready(tc); 682 + phy_is_owned = tc_phy_is_owned(tc); 1034 683 1035 - if (!tc_phy_is_ready_and_owned(dig_port, phy_is_ready, phy_is_owned)) { 1036 - mode = get_tc_mode_in_phy_not_owned_state(dig_port, live_mode); 684 + if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) { 685 + mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode); 1037 686 } else { 1038 687 drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT); 1039 - mode = get_tc_mode_in_phy_owned_state(dig_port, live_mode); 688 + mode = get_tc_mode_in_phy_owned_state(tc, live_mode); 1040 689 } 1041 690 1042 691 drm_dbg_kms(&i915->drm, 1043 692 "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n", 1044 - dig_port->tc_port_name, 693 + tc->port_name, 1045 694 tc_port_mode_name(mode), 1046 695 str_yes_no(phy_is_ready), 1047 696 str_yes_no(phy_is_owned), ··· 1050 699 return mode; 1051 700 } 1052 701 1053 - static enum tc_port_mode default_tc_mode(struct intel_digital_port *dig_port) 702 + static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc) 1054 703 { 1055 - if (dig_port->tc_legacy_port) 704 + if (tc->legacy_port) 1056 705 return TC_PORT_LEGACY; 1057 706 1058 707 return TC_PORT_TBT_ALT; 1059 708 } 1060 709 1061 710 static enum tc_port_mode 1062 - hpd_mask_to_target_mode(struct intel_digital_port *dig_port, u32 live_status_mask) 711 + hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask) 1063 712 { 1064 713 enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask); 1065 714 1066 715 if (mode != TC_PORT_DISCONNECTED) 1067 716 return mode; 1068 717 1069 - return default_tc_mode(dig_port); 718 + return default_tc_mode(tc); 1070 719 } 1071 720 1072 721 static enum tc_port_mode 1073 - intel_tc_port_get_target_mode(struct intel_digital_port *dig_port) 722 + tc_phy_get_target_mode(struct intel_tc_port *tc) 1074 723 { 1075 - u32 live_status_mask = tc_port_live_status_mask(dig_port); 724 + u32 live_status_mask = tc_phy_hpd_live_status(tc); 1076 725 1077 - return hpd_mask_to_target_mode(dig_port, live_status_mask); 726 + return hpd_mask_to_target_mode(tc, live_status_mask); 1078 727 } 1079 728 1080 - static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, 729 + static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes) 730 + { 731 + struct drm_i915_private *i915 = tc_to_i915(tc); 732 + u32 live_status_mask = tc_phy_hpd_live_status(tc); 733 + bool connected; 734 + 735 + tc_port_fixup_legacy_flag(tc, live_status_mask); 736 + 737 + tc->mode = hpd_mask_to_target_mode(tc, live_status_mask); 738 + 739 + connected = tc->phy_ops->connect(tc, required_lanes); 740 + if (!connected && tc->mode != default_tc_mode(tc)) { 741 + tc->mode = default_tc_mode(tc); 742 + connected = tc->phy_ops->connect(tc, required_lanes); 743 + } 744 + 745 + drm_WARN_ON(&i915->drm, !connected); 746 + } 747 + 748 + static void tc_phy_disconnect(struct intel_tc_port *tc) 749 + { 750 + if (tc->mode != TC_PORT_DISCONNECTED) { 751 + tc->phy_ops->disconnect(tc); 752 + tc->mode = TC_PORT_DISCONNECTED; 753 + } 754 + } 755 + 756 + static void tc_phy_init(struct intel_tc_port *tc) 757 + { 758 + mutex_lock(&tc->lock); 759 + tc->phy_ops->init(tc); 760 + mutex_unlock(&tc->lock); 761 + } 762 + 763 + static void intel_tc_port_reset_mode(struct intel_tc_port *tc, 1081 764 int required_lanes, bool force_disconnect) 1082 765 { 1083 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 1084 - enum tc_port_mode old_tc_mode = dig_port->tc_mode; 766 + struct drm_i915_private *i915 = tc_to_i915(tc); 767 + struct intel_digital_port *dig_port = tc->dig_port; 768 + enum tc_port_mode old_tc_mode = tc->mode; 1085 769 1086 770 intel_display_power_flush_work(i915); 1087 771 if (!intel_tc_cold_requires_aux_pw(dig_port)) { ··· 1128 742 drm_WARN_ON(&i915->drm, aux_powered); 1129 743 } 1130 744 1131 - icl_tc_phy_disconnect(dig_port); 745 + tc_phy_disconnect(tc); 1132 746 if (!force_disconnect) 1133 - icl_tc_phy_connect(dig_port, required_lanes); 747 + tc_phy_connect(tc, required_lanes); 1134 748 1135 749 drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n", 1136 - dig_port->tc_port_name, 750 + tc->port_name, 1137 751 tc_port_mode_name(old_tc_mode), 1138 - tc_port_mode_name(dig_port->tc_mode)); 752 + tc_port_mode_name(tc->mode)); 1139 753 } 1140 754 1141 - static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port) 755 + static bool intel_tc_port_needs_reset(struct intel_tc_port *tc) 1142 756 { 1143 - return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode; 757 + return tc_phy_get_target_mode(tc) != tc->mode; 1144 758 } 1145 759 1146 - static void intel_tc_port_update_mode(struct intel_digital_port *dig_port, 760 + static void intel_tc_port_update_mode(struct intel_tc_port *tc, 1147 761 int required_lanes, bool force_disconnect) 1148 762 { 1149 - enum intel_display_power_domain domain; 1150 - intel_wakeref_t wref; 1151 - bool needs_reset = force_disconnect; 1152 - 1153 - if (!needs_reset) { 1154 - /* Get power domain required to check the hotplug live status. */ 1155 - wref = tc_cold_block(dig_port, &domain); 1156 - needs_reset = intel_tc_port_needs_reset(dig_port); 1157 - tc_cold_unblock(dig_port, domain, wref); 1158 - } 1159 - 1160 - if (!needs_reset) 1161 - return; 1162 - 1163 - /* Get power domain required for resetting the mode. */ 1164 - wref = tc_cold_block_in_mode(dig_port, TC_PORT_DISCONNECTED, &domain); 1165 - 1166 - intel_tc_port_reset_mode(dig_port, required_lanes, force_disconnect); 1167 - 1168 - /* Get power domain matching the new mode after reset. */ 1169 - tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain, 1170 - fetch_and_zero(&dig_port->tc_lock_wakeref)); 1171 - if (dig_port->tc_mode != TC_PORT_DISCONNECTED) 1172 - dig_port->tc_lock_wakeref = tc_cold_block(dig_port, 1173 - &dig_port->tc_lock_power_domain); 1174 - 1175 - tc_cold_unblock(dig_port, domain, wref); 763 + if (force_disconnect || 764 + intel_tc_port_needs_reset(tc)) 765 + intel_tc_port_reset_mode(tc, required_lanes, force_disconnect); 1176 766 } 1177 767 1178 - static void __intel_tc_port_get_link(struct intel_digital_port *dig_port) 768 + static void __intel_tc_port_get_link(struct intel_tc_port *tc) 1179 769 { 1180 - dig_port->tc_link_refcount++; 770 + tc->link_refcount++; 1181 771 } 1182 772 1183 - static void __intel_tc_port_put_link(struct intel_digital_port *dig_port) 773 + static void __intel_tc_port_put_link(struct intel_tc_port *tc) 1184 774 { 1185 - dig_port->tc_link_refcount--; 775 + tc->link_refcount--; 1186 776 } 1187 777 1188 - static bool tc_port_is_enabled(struct intel_digital_port *dig_port) 778 + static bool tc_port_is_enabled(struct intel_tc_port *tc) 1189 779 { 1190 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 780 + struct drm_i915_private *i915 = tc_to_i915(tc); 781 + struct intel_digital_port *dig_port = tc->dig_port; 1191 782 1192 - assert_tc_port_power_enabled(dig_port); 783 + assert_tc_port_power_enabled(tc); 1193 784 1194 785 return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) & 1195 786 DDI_BUF_CTL_ENABLE; ··· 1182 819 void intel_tc_port_init_mode(struct intel_digital_port *dig_port) 1183 820 { 1184 821 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 1185 - intel_wakeref_t tc_cold_wref; 1186 - enum intel_display_power_domain domain; 822 + struct intel_tc_port *tc = to_tc_port(dig_port); 1187 823 bool update_mode = false; 1188 824 1189 - mutex_lock(&dig_port->tc_lock); 825 + mutex_lock(&tc->lock); 1190 826 1191 - drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED); 1192 - drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref); 1193 - drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount); 827 + drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED); 828 + drm_WARN_ON(&i915->drm, tc->lock_wakeref); 829 + drm_WARN_ON(&i915->drm, tc->link_refcount); 1194 830 1195 - tc_cold_wref = tc_cold_block(dig_port, &domain); 1196 - 1197 - dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); 831 + tc_phy_get_hw_state(tc); 1198 832 /* 1199 833 * Save the initial mode for the state check in 1200 834 * intel_tc_port_sanitize_mode(). 1201 835 */ 1202 - dig_port->tc_init_mode = dig_port->tc_mode; 1203 - if (dig_port->tc_mode != TC_PORT_DISCONNECTED) 1204 - dig_port->tc_lock_wakeref = 1205 - tc_cold_block(dig_port, &dig_port->tc_lock_power_domain); 836 + tc->init_mode = tc->mode; 1206 837 1207 838 /* 1208 839 * The PHY needs to be connected for AUX to work during HW readout and ··· 1209 852 * cause a problem as the PHY ownership state is ignored by the 1210 853 * IOM/TCSS firmware (only display can own the PHY in that case). 1211 854 */ 1212 - if (!tc_port_is_enabled(dig_port)) { 855 + if (!tc_port_is_enabled(tc)) { 1213 856 update_mode = true; 1214 - } else if (dig_port->tc_mode == TC_PORT_DISCONNECTED) { 1215 - drm_WARN_ON(&i915->drm, !dig_port->tc_legacy_port); 857 + } else if (tc->mode == TC_PORT_DISCONNECTED) { 858 + drm_WARN_ON(&i915->drm, !tc->legacy_port); 1216 859 drm_err(&i915->drm, 1217 860 "Port %s: PHY disconnected on enabled port, connecting it\n", 1218 - dig_port->tc_port_name); 861 + tc->port_name); 1219 862 update_mode = true; 1220 863 } 1221 864 1222 865 if (update_mode) 1223 - intel_tc_port_update_mode(dig_port, 1, false); 866 + intel_tc_port_update_mode(tc, 1, false); 1224 867 1225 - /* Prevent changing dig_port->tc_mode until intel_tc_port_sanitize_mode() is called. */ 1226 - __intel_tc_port_get_link(dig_port); 868 + /* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */ 869 + __intel_tc_port_get_link(tc); 1227 870 1228 - tc_cold_unblock(dig_port, domain, tc_cold_wref); 1229 - 1230 - mutex_unlock(&dig_port->tc_lock); 871 + mutex_unlock(&tc->lock); 1231 872 } 1232 873 1233 - static bool tc_port_has_active_links(struct intel_digital_port *dig_port, 874 + static bool tc_port_has_active_links(struct intel_tc_port *tc, 1234 875 const struct intel_crtc_state *crtc_state) 1235 876 { 1236 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 877 + struct drm_i915_private *i915 = tc_to_i915(tc); 878 + struct intel_digital_port *dig_port = tc->dig_port; 1237 879 enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT; 1238 880 int active_links = 0; 1239 881 ··· 1244 888 active_links = 1; 1245 889 } 1246 890 1247 - if (active_links && !tc_phy_is_connected(dig_port, pll_type)) 891 + if (active_links && !tc_phy_is_connected(tc, pll_type)) 1248 892 drm_err(&i915->drm, 1249 893 "Port %s: PHY disconnected with %d active link(s)\n", 1250 - dig_port->tc_port_name, active_links); 894 + tc->port_name, active_links); 1251 895 1252 896 return active_links; 1253 897 } ··· 1268 912 const struct intel_crtc_state *crtc_state) 1269 913 { 1270 914 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 915 + struct intel_tc_port *tc = to_tc_port(dig_port); 1271 916 1272 - mutex_lock(&dig_port->tc_lock); 917 + mutex_lock(&tc->lock); 1273 918 1274 - drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount != 1); 1275 - if (!tc_port_has_active_links(dig_port, crtc_state)) { 919 + drm_WARN_ON(&i915->drm, tc->link_refcount != 1); 920 + if (!tc_port_has_active_links(tc, crtc_state)) { 1276 921 /* 1277 922 * TBT-alt is the default mode in any case the PHY ownership is not 1278 923 * held (regardless of the sink's connected live state), so 1279 924 * we'll just switch to disconnected mode from it here without 1280 925 * a note. 1281 926 */ 1282 - if (dig_port->tc_init_mode != TC_PORT_TBT_ALT && 1283 - dig_port->tc_init_mode != TC_PORT_DISCONNECTED) 927 + if (tc->init_mode != TC_PORT_TBT_ALT && 928 + tc->init_mode != TC_PORT_DISCONNECTED) 1284 929 drm_dbg_kms(&i915->drm, 1285 930 "Port %s: PHY left in %s mode on disabled port, disconnecting it\n", 1286 - dig_port->tc_port_name, 1287 - tc_port_mode_name(dig_port->tc_init_mode)); 1288 - icl_tc_phy_disconnect(dig_port); 1289 - __intel_tc_port_put_link(dig_port); 1290 - 1291 - tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain, 1292 - fetch_and_zero(&dig_port->tc_lock_wakeref)); 931 + tc->port_name, 932 + tc_port_mode_name(tc->init_mode)); 933 + tc_phy_disconnect(tc); 934 + __intel_tc_port_put_link(tc); 1293 935 } 1294 936 1295 937 drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n", 1296 - dig_port->tc_port_name, 1297 - tc_port_mode_name(dig_port->tc_mode)); 938 + tc->port_name, 939 + tc_port_mode_name(tc->mode)); 1298 940 1299 - mutex_unlock(&dig_port->tc_lock); 941 + mutex_unlock(&tc->lock); 1300 942 } 1301 943 1302 944 /* ··· 1311 957 { 1312 958 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 1313 959 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 960 + struct intel_tc_port *tc = to_tc_port(dig_port); 961 + u32 mask = ~0; 1314 962 1315 963 drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port)); 1316 964 1317 - return tc_port_live_status_mask(dig_port) & BIT(dig_port->tc_mode); 965 + if (tc->mode != TC_PORT_DISCONNECTED) 966 + mask = BIT(tc->mode); 967 + 968 + return tc_phy_hpd_live_status(tc) & mask; 1318 969 } 1319 970 1320 971 bool intel_tc_port_connected(struct intel_encoder *encoder) 1321 972 { 1322 973 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 974 + struct intel_tc_port *tc = to_tc_port(dig_port); 1323 975 bool is_connected; 1324 976 1325 - intel_tc_port_lock(dig_port); 977 + mutex_lock(&tc->lock); 1326 978 is_connected = intel_tc_port_connected_locked(encoder); 1327 - intel_tc_port_unlock(dig_port); 979 + mutex_unlock(&tc->lock); 1328 980 1329 981 return is_connected; 1330 982 } 1331 983 1332 - static void __intel_tc_port_lock(struct intel_digital_port *dig_port, 984 + static void __intel_tc_port_lock(struct intel_tc_port *tc, 1333 985 int required_lanes) 1334 986 { 1335 - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 987 + struct drm_i915_private *i915 = tc_to_i915(tc); 1336 988 1337 - mutex_lock(&dig_port->tc_lock); 989 + mutex_lock(&tc->lock); 1338 990 1339 - cancel_delayed_work(&dig_port->tc_disconnect_phy_work); 991 + cancel_delayed_work(&tc->disconnect_phy_work); 1340 992 1341 - if (!dig_port->tc_link_refcount) 1342 - intel_tc_port_update_mode(dig_port, required_lanes, 993 + if (!tc->link_refcount) 994 + intel_tc_port_update_mode(tc, required_lanes, 1343 995 false); 1344 996 1345 - drm_WARN_ON(&i915->drm, dig_port->tc_mode == TC_PORT_DISCONNECTED); 1346 - drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_TBT_ALT && 1347 - !tc_phy_is_owned(dig_port)); 997 + drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED); 998 + drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT && 999 + !tc_phy_is_owned(tc)); 1348 1000 } 1349 1001 1350 1002 void intel_tc_port_lock(struct intel_digital_port *dig_port) 1351 1003 { 1352 - __intel_tc_port_lock(dig_port, 1); 1004 + __intel_tc_port_lock(to_tc_port(dig_port), 1); 1353 1005 } 1354 1006 1355 - /** 1356 - * intel_tc_port_disconnect_phy_work: disconnect TypeC PHY from display port 1357 - * @dig_port: digital port 1358 - * 1007 + /* 1359 1008 * Disconnect the given digital port from its TypeC PHY (handing back the 1360 1009 * control of the PHY to the TypeC subsystem). This will happen in a delayed 1361 1010 * manner after each aux transactions and modeset disables. 1362 1011 */ 1363 1012 static void intel_tc_port_disconnect_phy_work(struct work_struct *work) 1364 1013 { 1365 - struct intel_digital_port *dig_port = 1366 - container_of(work, struct intel_digital_port, tc_disconnect_phy_work.work); 1014 + struct intel_tc_port *tc = 1015 + container_of(work, struct intel_tc_port, disconnect_phy_work.work); 1367 1016 1368 - mutex_lock(&dig_port->tc_lock); 1017 + mutex_lock(&tc->lock); 1369 1018 1370 - if (!dig_port->tc_link_refcount) 1371 - intel_tc_port_update_mode(dig_port, 1, true); 1019 + if (!tc->link_refcount) 1020 + intel_tc_port_update_mode(tc, 1, true); 1372 1021 1373 - mutex_unlock(&dig_port->tc_lock); 1022 + mutex_unlock(&tc->lock); 1374 1023 } 1375 1024 1376 1025 /** ··· 1384 1027 */ 1385 1028 void intel_tc_port_flush_work(struct intel_digital_port *dig_port) 1386 1029 { 1387 - flush_delayed_work(&dig_port->tc_disconnect_phy_work); 1030 + flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work); 1388 1031 } 1389 1032 1390 1033 void intel_tc_port_unlock(struct intel_digital_port *dig_port) 1391 1034 { 1392 - if (!dig_port->tc_link_refcount && dig_port->tc_mode != TC_PORT_DISCONNECTED) 1393 - queue_delayed_work(system_unbound_wq, &dig_port->tc_disconnect_phy_work, 1035 + struct intel_tc_port *tc = to_tc_port(dig_port); 1036 + 1037 + if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED) 1038 + queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work, 1394 1039 msecs_to_jiffies(1000)); 1395 1040 1396 - mutex_unlock(&dig_port->tc_lock); 1041 + mutex_unlock(&tc->lock); 1397 1042 } 1398 1043 1399 1044 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port) 1400 1045 { 1401 - return mutex_is_locked(&dig_port->tc_lock) || 1402 - dig_port->tc_link_refcount; 1046 + struct intel_tc_port *tc = to_tc_port(dig_port); 1047 + 1048 + return mutex_is_locked(&tc->lock) || 1049 + tc->link_refcount; 1403 1050 } 1404 1051 1405 1052 void intel_tc_port_get_link(struct intel_digital_port *dig_port, 1406 1053 int required_lanes) 1407 1054 { 1408 - __intel_tc_port_lock(dig_port, required_lanes); 1409 - __intel_tc_port_get_link(dig_port); 1055 + struct intel_tc_port *tc = to_tc_port(dig_port); 1056 + 1057 + __intel_tc_port_lock(tc, required_lanes); 1058 + __intel_tc_port_get_link(tc); 1410 1059 intel_tc_port_unlock(dig_port); 1411 1060 } 1412 1061 1413 1062 void intel_tc_port_put_link(struct intel_digital_port *dig_port) 1414 1063 { 1064 + struct intel_tc_port *tc = to_tc_port(dig_port); 1065 + 1415 1066 intel_tc_port_lock(dig_port); 1416 - __intel_tc_port_put_link(dig_port); 1067 + __intel_tc_port_put_link(tc); 1417 1068 intel_tc_port_unlock(dig_port); 1418 - 1419 - /* 1420 - * Disconnecting the PHY after the PHY's PLL gets disabled may 1421 - * hang the system on ADL-P, so disconnect the PHY here synchronously. 1422 - * TODO: remove this once the root cause of the ordering requirement 1423 - * is found/fixed. 1424 - */ 1425 - intel_tc_port_flush_work(dig_port); 1426 1069 } 1427 1070 1428 - static bool 1429 - tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port) 1430 - { 1431 - enum intel_display_power_domain domain; 1432 - intel_wakeref_t wakeref; 1433 - u32 val; 1434 - 1435 - if (!INTEL_INFO(i915)->display.has_modular_fia) 1436 - return false; 1437 - 1438 - mutex_lock(&dig_port->tc_lock); 1439 - wakeref = tc_cold_block(dig_port, &domain); 1440 - val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1)); 1441 - tc_cold_unblock(dig_port, domain, wakeref); 1442 - mutex_unlock(&dig_port->tc_lock); 1443 - 1444 - drm_WARN_ON(&i915->drm, val == 0xffffffff); 1445 - 1446 - return val & MODULAR_FIA_MASK; 1447 - } 1448 - 1449 - static void 1450 - tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port) 1451 - { 1452 - enum port port = dig_port->base.port; 1453 - enum tc_port tc_port = intel_port_to_tc(i915, port); 1454 - 1455 - /* 1456 - * Each Modular FIA instance houses 2 TC ports. In SOC that has more 1457 - * than two TC ports, there are multiple instances of Modular FIA. 1458 - */ 1459 - if (tc_has_modular_fia(i915, dig_port)) { 1460 - dig_port->tc_phy_fia = tc_port / 2; 1461 - dig_port->tc_phy_fia_idx = tc_port % 2; 1462 - } else { 1463 - dig_port->tc_phy_fia = FIA1; 1464 - dig_port->tc_phy_fia_idx = tc_port; 1465 - } 1466 - } 1467 - 1468 - void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) 1071 + int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) 1469 1072 { 1470 1073 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 1074 + struct intel_tc_port *tc; 1471 1075 enum port port = dig_port->base.port; 1472 1076 enum tc_port tc_port = intel_port_to_tc(i915, port); 1473 1077 1474 1078 if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE)) 1475 - return; 1079 + return -EINVAL; 1476 1080 1477 - snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name), 1081 + tc = kzalloc(sizeof(*tc), GFP_KERNEL); 1082 + if (!tc) 1083 + return -ENOMEM; 1084 + 1085 + dig_port->tc = tc; 1086 + tc->dig_port = dig_port; 1087 + 1088 + if (DISPLAY_VER(i915) >= 13) 1089 + tc->phy_ops = &adlp_tc_phy_ops; 1090 + else if (DISPLAY_VER(i915) >= 12) 1091 + tc->phy_ops = &tgl_tc_phy_ops; 1092 + else 1093 + tc->phy_ops = &icl_tc_phy_ops; 1094 + 1095 + snprintf(tc->port_name, sizeof(tc->port_name), 1478 1096 "%c/TC#%d", port_name(port), tc_port + 1); 1479 1097 1480 - mutex_init(&dig_port->tc_lock); 1481 - INIT_DELAYED_WORK(&dig_port->tc_disconnect_phy_work, intel_tc_port_disconnect_phy_work); 1482 - dig_port->tc_legacy_port = is_legacy; 1483 - dig_port->tc_mode = TC_PORT_DISCONNECTED; 1484 - dig_port->tc_link_refcount = 0; 1485 - tc_port_load_fia_params(i915, dig_port); 1098 + mutex_init(&tc->lock); 1099 + INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work); 1100 + tc->legacy_port = is_legacy; 1101 + tc->mode = TC_PORT_DISCONNECTED; 1102 + tc->link_refcount = 0; 1103 + 1104 + tc_phy_init(tc); 1486 1105 1487 1106 intel_tc_port_init_mode(dig_port); 1107 + 1108 + return 0; 1109 + } 1110 + 1111 + void intel_tc_port_cleanup(struct intel_digital_port *dig_port) 1112 + { 1113 + intel_tc_port_flush_work(dig_port); 1114 + 1115 + kfree(dig_port->tc); 1116 + dig_port->tc = NULL; 1488 1117 }
+2 -2
drivers/gpu/drm/i915/display/intel_tc.h
··· 6 6 #ifndef __INTEL_TC_H__ 7 7 #define __INTEL_TC_H__ 8 8 9 - #include <linux/mutex.h> 10 9 #include <linux/types.h> 11 10 12 11 struct intel_crtc_state; ··· 36 37 void intel_tc_port_put_link(struct intel_digital_port *dig_port); 37 38 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port); 38 39 39 - void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy); 40 + int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy); 41 + void intel_tc_port_cleanup(struct intel_digital_port *dig_port); 40 42 41 43 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port); 42 44
+2 -1
drivers/gpu/drm/i915/display/intel_tv.c
··· 35 35 #include <drm/drm_edid.h> 36 36 37 37 #include "i915_drv.h" 38 - #include "i915_reg.h" 39 38 #include "i915_irq.h" 39 + #include "i915_reg.h" 40 40 #include "intel_connector.h" 41 41 #include "intel_crtc.h" 42 42 #include "intel_de.h" ··· 44 44 #include "intel_dpll.h" 45 45 #include "intel_hotplug.h" 46 46 #include "intel_tv.h" 47 + #include "intel_tv_regs.h" 47 48 48 49 enum tv_margin { 49 50 TV_MARGIN_LEFT, TV_MARGIN_TOP,
+490
drivers/gpu/drm/i915/display/intel_tv_regs.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_TV_REGS_H__ 7 + #define __INTEL_TV_REGS_H__ 8 + 9 + #include "intel_display_reg_defs.h" 10 + 11 + /* TV port control */ 12 + #define TV_CTL _MMIO(0x68000) 13 + /* Enables the TV encoder */ 14 + # define TV_ENC_ENABLE (1 << 31) 15 + /* Sources the TV encoder input from pipe B instead of A. */ 16 + # define TV_ENC_PIPE_SEL_SHIFT 30 17 + # define TV_ENC_PIPE_SEL_MASK (1 << 30) 18 + # define TV_ENC_PIPE_SEL(pipe) ((pipe) << 30) 19 + /* Outputs composite video (DAC A only) */ 20 + # define TV_ENC_OUTPUT_COMPOSITE (0 << 28) 21 + /* Outputs SVideo video (DAC B/C) */ 22 + # define TV_ENC_OUTPUT_SVIDEO (1 << 28) 23 + /* Outputs Component video (DAC A/B/C) */ 24 + # define TV_ENC_OUTPUT_COMPONENT (2 << 28) 25 + /* Outputs Composite and SVideo (DAC A/B/C) */ 26 + # define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) 27 + # define TV_TRILEVEL_SYNC (1 << 21) 28 + /* Enables slow sync generation (945GM only) */ 29 + # define TV_SLOW_SYNC (1 << 20) 30 + /* Selects 4x oversampling for 480i and 576p */ 31 + # define TV_OVERSAMPLE_4X (0 << 18) 32 + /* Selects 2x oversampling for 720p and 1080i */ 33 + # define TV_OVERSAMPLE_2X (1 << 18) 34 + /* Selects no oversampling for 1080p */ 35 + # define TV_OVERSAMPLE_NONE (2 << 18) 36 + /* Selects 8x oversampling */ 37 + # define TV_OVERSAMPLE_8X (3 << 18) 38 + # define TV_OVERSAMPLE_MASK (3 << 18) 39 + /* Selects progressive mode rather than interlaced */ 40 + # define TV_PROGRESSIVE (1 << 17) 41 + /* Sets the colorburst to PAL mode. Required for non-M PAL modes. */ 42 + # define TV_PAL_BURST (1 << 16) 43 + /* Field for setting delay of Y compared to C */ 44 + # define TV_YC_SKEW_MASK (7 << 12) 45 + /* Enables a fix for 480p/576p standard definition modes on the 915GM only */ 46 + # define TV_ENC_SDP_FIX (1 << 11) 47 + /* 48 + * Enables a fix for the 915GM only. 49 + * 50 + * Not sure what it does. 51 + */ 52 + # define TV_ENC_C0_FIX (1 << 10) 53 + /* Bits that must be preserved by software */ 54 + # define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf) 55 + # define TV_FUSE_STATE_MASK (3 << 4) 56 + /* Read-only state that reports all features enabled */ 57 + # define TV_FUSE_STATE_ENABLED (0 << 4) 58 + /* Read-only state that reports that Macrovision is disabled in hardware*/ 59 + # define TV_FUSE_STATE_NO_MACROVISION (1 << 4) 60 + /* Read-only state that reports that TV-out is disabled in hardware. */ 61 + # define TV_FUSE_STATE_DISABLED (2 << 4) 62 + /* Normal operation */ 63 + # define TV_TEST_MODE_NORMAL (0 << 0) 64 + /* Encoder test pattern 1 - combo pattern */ 65 + # define TV_TEST_MODE_PATTERN_1 (1 << 0) 66 + /* Encoder test pattern 2 - full screen vertical 75% color bars */ 67 + # define TV_TEST_MODE_PATTERN_2 (2 << 0) 68 + /* Encoder test pattern 3 - full screen horizontal 75% color bars */ 69 + # define TV_TEST_MODE_PATTERN_3 (3 << 0) 70 + /* Encoder test pattern 4 - random noise */ 71 + # define TV_TEST_MODE_PATTERN_4 (4 << 0) 72 + /* Encoder test pattern 5 - linear color ramps */ 73 + # define TV_TEST_MODE_PATTERN_5 (5 << 0) 74 + /* 75 + * This test mode forces the DACs to 50% of full output. 76 + * 77 + * This is used for load detection in combination with TVDAC_SENSE_MASK 78 + */ 79 + # define TV_TEST_MODE_MONITOR_DETECT (7 << 0) 80 + # define TV_TEST_MODE_MASK (7 << 0) 81 + 82 + #define TV_DAC _MMIO(0x68004) 83 + # define TV_DAC_SAVE 0x00ffff00 84 + /* 85 + * Reports that DAC state change logic has reported change (RO). 86 + * 87 + * This gets cleared when TV_DAC_STATE_EN is cleared 88 + */ 89 + # define TVDAC_STATE_CHG (1 << 31) 90 + # define TVDAC_SENSE_MASK (7 << 28) 91 + /* Reports that DAC A voltage is above the detect threshold */ 92 + # define TVDAC_A_SENSE (1 << 30) 93 + /* Reports that DAC B voltage is above the detect threshold */ 94 + # define TVDAC_B_SENSE (1 << 29) 95 + /* Reports that DAC C voltage is above the detect threshold */ 96 + # define TVDAC_C_SENSE (1 << 28) 97 + /* 98 + * Enables DAC state detection logic, for load-based TV detection. 99 + * 100 + * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set 101 + * to off, for load detection to work. 102 + */ 103 + # define TVDAC_STATE_CHG_EN (1 << 27) 104 + /* Sets the DAC A sense value to high */ 105 + # define TVDAC_A_SENSE_CTL (1 << 26) 106 + /* Sets the DAC B sense value to high */ 107 + # define TVDAC_B_SENSE_CTL (1 << 25) 108 + /* Sets the DAC C sense value to high */ 109 + # define TVDAC_C_SENSE_CTL (1 << 24) 110 + /* Overrides the ENC_ENABLE and DAC voltage levels */ 111 + # define DAC_CTL_OVERRIDE (1 << 7) 112 + /* Sets the slew rate. Must be preserved in software */ 113 + # define ENC_TVDAC_SLEW_FAST (1 << 6) 114 + # define DAC_A_1_3_V (0 << 4) 115 + # define DAC_A_1_1_V (1 << 4) 116 + # define DAC_A_0_7_V (2 << 4) 117 + # define DAC_A_MASK (3 << 4) 118 + # define DAC_B_1_3_V (0 << 2) 119 + # define DAC_B_1_1_V (1 << 2) 120 + # define DAC_B_0_7_V (2 << 2) 121 + # define DAC_B_MASK (3 << 2) 122 + # define DAC_C_1_3_V (0 << 0) 123 + # define DAC_C_1_1_V (1 << 0) 124 + # define DAC_C_0_7_V (2 << 0) 125 + # define DAC_C_MASK (3 << 0) 126 + 127 + /* 128 + * CSC coefficients are stored in a floating point format with 9 bits of 129 + * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, 130 + * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with 131 + * -1 (0x3) being the only legal negative value. 132 + */ 133 + #define TV_CSC_Y _MMIO(0x68010) 134 + # define TV_RY_MASK 0x07ff0000 135 + # define TV_RY_SHIFT 16 136 + # define TV_GY_MASK 0x00000fff 137 + # define TV_GY_SHIFT 0 138 + 139 + #define TV_CSC_Y2 _MMIO(0x68014) 140 + # define TV_BY_MASK 0x07ff0000 141 + # define TV_BY_SHIFT 16 142 + /* 143 + * Y attenuation for component video. 144 + * 145 + * Stored in 1.9 fixed point. 146 + */ 147 + # define TV_AY_MASK 0x000003ff 148 + # define TV_AY_SHIFT 0 149 + 150 + #define TV_CSC_U _MMIO(0x68018) 151 + # define TV_RU_MASK 0x07ff0000 152 + # define TV_RU_SHIFT 16 153 + # define TV_GU_MASK 0x000007ff 154 + # define TV_GU_SHIFT 0 155 + 156 + #define TV_CSC_U2 _MMIO(0x6801c) 157 + # define TV_BU_MASK 0x07ff0000 158 + # define TV_BU_SHIFT 16 159 + /* 160 + * U attenuation for component video. 161 + * 162 + * Stored in 1.9 fixed point. 163 + */ 164 + # define TV_AU_MASK 0x000003ff 165 + # define TV_AU_SHIFT 0 166 + 167 + #define TV_CSC_V _MMIO(0x68020) 168 + # define TV_RV_MASK 0x0fff0000 169 + # define TV_RV_SHIFT 16 170 + # define TV_GV_MASK 0x000007ff 171 + # define TV_GV_SHIFT 0 172 + 173 + #define TV_CSC_V2 _MMIO(0x68024) 174 + # define TV_BV_MASK 0x07ff0000 175 + # define TV_BV_SHIFT 16 176 + /* 177 + * V attenuation for component video. 178 + * 179 + * Stored in 1.9 fixed point. 180 + */ 181 + # define TV_AV_MASK 0x000007ff 182 + # define TV_AV_SHIFT 0 183 + 184 + #define TV_CLR_KNOBS _MMIO(0x68028) 185 + /* 2s-complement brightness adjustment */ 186 + # define TV_BRIGHTNESS_MASK 0xff000000 187 + # define TV_BRIGHTNESS_SHIFT 24 188 + /* Contrast adjustment, as a 2.6 unsigned floating point number */ 189 + # define TV_CONTRAST_MASK 0x00ff0000 190 + # define TV_CONTRAST_SHIFT 16 191 + /* Saturation adjustment, as a 2.6 unsigned floating point number */ 192 + # define TV_SATURATION_MASK 0x0000ff00 193 + # define TV_SATURATION_SHIFT 8 194 + /* Hue adjustment, as an integer phase angle in degrees */ 195 + # define TV_HUE_MASK 0x000000ff 196 + # define TV_HUE_SHIFT 0 197 + 198 + #define TV_CLR_LEVEL _MMIO(0x6802c) 199 + /* Controls the DAC level for black */ 200 + # define TV_BLACK_LEVEL_MASK 0x01ff0000 201 + # define TV_BLACK_LEVEL_SHIFT 16 202 + /* Controls the DAC level for blanking */ 203 + # define TV_BLANK_LEVEL_MASK 0x000001ff 204 + # define TV_BLANK_LEVEL_SHIFT 0 205 + 206 + #define TV_H_CTL_1 _MMIO(0x68030) 207 + /* Number of pixels in the hsync. */ 208 + # define TV_HSYNC_END_MASK 0x1fff0000 209 + # define TV_HSYNC_END_SHIFT 16 210 + /* Total number of pixels minus one in the line (display and blanking). */ 211 + # define TV_HTOTAL_MASK 0x00001fff 212 + # define TV_HTOTAL_SHIFT 0 213 + 214 + #define TV_H_CTL_2 _MMIO(0x68034) 215 + /* Enables the colorburst (needed for non-component color) */ 216 + # define TV_BURST_ENA (1 << 31) 217 + /* Offset of the colorburst from the start of hsync, in pixels minus one. */ 218 + # define TV_HBURST_START_SHIFT 16 219 + # define TV_HBURST_START_MASK 0x1fff0000 220 + /* Length of the colorburst */ 221 + # define TV_HBURST_LEN_SHIFT 0 222 + # define TV_HBURST_LEN_MASK 0x0001fff 223 + 224 + #define TV_H_CTL_3 _MMIO(0x68038) 225 + /* End of hblank, measured in pixels minus one from start of hsync */ 226 + # define TV_HBLANK_END_SHIFT 16 227 + # define TV_HBLANK_END_MASK 0x1fff0000 228 + /* Start of hblank, measured in pixels minus one from start of hsync */ 229 + # define TV_HBLANK_START_SHIFT 0 230 + # define TV_HBLANK_START_MASK 0x0001fff 231 + 232 + #define TV_V_CTL_1 _MMIO(0x6803c) 233 + /* XXX */ 234 + # define TV_NBR_END_SHIFT 16 235 + # define TV_NBR_END_MASK 0x07ff0000 236 + /* XXX */ 237 + # define TV_VI_END_F1_SHIFT 8 238 + # define TV_VI_END_F1_MASK 0x00003f00 239 + /* XXX */ 240 + # define TV_VI_END_F2_SHIFT 0 241 + # define TV_VI_END_F2_MASK 0x0000003f 242 + 243 + #define TV_V_CTL_2 _MMIO(0x68040) 244 + /* Length of vsync, in half lines */ 245 + # define TV_VSYNC_LEN_MASK 0x07ff0000 246 + # define TV_VSYNC_LEN_SHIFT 16 247 + /* Offset of the start of vsync in field 1, measured in one less than the 248 + * number of half lines. 249 + */ 250 + # define TV_VSYNC_START_F1_MASK 0x00007f00 251 + # define TV_VSYNC_START_F1_SHIFT 8 252 + /* 253 + * Offset of the start of vsync in field 2, measured in one less than the 254 + * number of half lines. 255 + */ 256 + # define TV_VSYNC_START_F2_MASK 0x0000007f 257 + # define TV_VSYNC_START_F2_SHIFT 0 258 + 259 + #define TV_V_CTL_3 _MMIO(0x68044) 260 + /* Enables generation of the equalization signal */ 261 + # define TV_EQUAL_ENA (1 << 31) 262 + /* Length of vsync, in half lines */ 263 + # define TV_VEQ_LEN_MASK 0x007f0000 264 + # define TV_VEQ_LEN_SHIFT 16 265 + /* Offset of the start of equalization in field 1, measured in one less than 266 + * the number of half lines. 267 + */ 268 + # define TV_VEQ_START_F1_MASK 0x0007f00 269 + # define TV_VEQ_START_F1_SHIFT 8 270 + /* 271 + * Offset of the start of equalization in field 2, measured in one less than 272 + * the number of half lines. 273 + */ 274 + # define TV_VEQ_START_F2_MASK 0x000007f 275 + # define TV_VEQ_START_F2_SHIFT 0 276 + 277 + #define TV_V_CTL_4 _MMIO(0x68048) 278 + /* 279 + * Offset to start of vertical colorburst, measured in one less than the 280 + * number of lines from vertical start. 281 + */ 282 + # define TV_VBURST_START_F1_MASK 0x003f0000 283 + # define TV_VBURST_START_F1_SHIFT 16 284 + /* 285 + * Offset to the end of vertical colorburst, measured in one less than the 286 + * number of lines from the start of NBR. 287 + */ 288 + # define TV_VBURST_END_F1_MASK 0x000000ff 289 + # define TV_VBURST_END_F1_SHIFT 0 290 + 291 + #define TV_V_CTL_5 _MMIO(0x6804c) 292 + /* 293 + * Offset to start of vertical colorburst, measured in one less than the 294 + * number of lines from vertical start. 295 + */ 296 + # define TV_VBURST_START_F2_MASK 0x003f0000 297 + # define TV_VBURST_START_F2_SHIFT 16 298 + /* 299 + * Offset to the end of vertical colorburst, measured in one less than the 300 + * number of lines from the start of NBR. 301 + */ 302 + # define TV_VBURST_END_F2_MASK 0x000000ff 303 + # define TV_VBURST_END_F2_SHIFT 0 304 + 305 + #define TV_V_CTL_6 _MMIO(0x68050) 306 + /* 307 + * Offset to start of vertical colorburst, measured in one less than the 308 + * number of lines from vertical start. 309 + */ 310 + # define TV_VBURST_START_F3_MASK 0x003f0000 311 + # define TV_VBURST_START_F3_SHIFT 16 312 + /* 313 + * Offset to the end of vertical colorburst, measured in one less than the 314 + * number of lines from the start of NBR. 315 + */ 316 + # define TV_VBURST_END_F3_MASK 0x000000ff 317 + # define TV_VBURST_END_F3_SHIFT 0 318 + 319 + #define TV_V_CTL_7 _MMIO(0x68054) 320 + /* 321 + * Offset to start of vertical colorburst, measured in one less than the 322 + * number of lines from vertical start. 323 + */ 324 + # define TV_VBURST_START_F4_MASK 0x003f0000 325 + # define TV_VBURST_START_F4_SHIFT 16 326 + /* 327 + * Offset to the end of vertical colorburst, measured in one less than the 328 + * number of lines from the start of NBR. 329 + */ 330 + # define TV_VBURST_END_F4_MASK 0x000000ff 331 + # define TV_VBURST_END_F4_SHIFT 0 332 + 333 + #define TV_SC_CTL_1 _MMIO(0x68060) 334 + /* Turns on the first subcarrier phase generation DDA */ 335 + # define TV_SC_DDA1_EN (1 << 31) 336 + /* Turns on the first subcarrier phase generation DDA */ 337 + # define TV_SC_DDA2_EN (1 << 30) 338 + /* Turns on the first subcarrier phase generation DDA */ 339 + # define TV_SC_DDA3_EN (1 << 29) 340 + /* Sets the subcarrier DDA to reset frequency every other field */ 341 + # define TV_SC_RESET_EVERY_2 (0 << 24) 342 + /* Sets the subcarrier DDA to reset frequency every fourth field */ 343 + # define TV_SC_RESET_EVERY_4 (1 << 24) 344 + /* Sets the subcarrier DDA to reset frequency every eighth field */ 345 + # define TV_SC_RESET_EVERY_8 (2 << 24) 346 + /* Sets the subcarrier DDA to never reset the frequency */ 347 + # define TV_SC_RESET_NEVER (3 << 24) 348 + /* Sets the peak amplitude of the colorburst.*/ 349 + # define TV_BURST_LEVEL_MASK 0x00ff0000 350 + # define TV_BURST_LEVEL_SHIFT 16 351 + /* Sets the increment of the first subcarrier phase generation DDA */ 352 + # define TV_SCDDA1_INC_MASK 0x00000fff 353 + # define TV_SCDDA1_INC_SHIFT 0 354 + 355 + #define TV_SC_CTL_2 _MMIO(0x68064) 356 + /* Sets the rollover for the second subcarrier phase generation DDA */ 357 + # define TV_SCDDA2_SIZE_MASK 0x7fff0000 358 + # define TV_SCDDA2_SIZE_SHIFT 16 359 + /* Sets the increent of the second subcarrier phase generation DDA */ 360 + # define TV_SCDDA2_INC_MASK 0x00007fff 361 + # define TV_SCDDA2_INC_SHIFT 0 362 + 363 + #define TV_SC_CTL_3 _MMIO(0x68068) 364 + /* Sets the rollover for the third subcarrier phase generation DDA */ 365 + # define TV_SCDDA3_SIZE_MASK 0x7fff0000 366 + # define TV_SCDDA3_SIZE_SHIFT 16 367 + /* Sets the increent of the third subcarrier phase generation DDA */ 368 + # define TV_SCDDA3_INC_MASK 0x00007fff 369 + # define TV_SCDDA3_INC_SHIFT 0 370 + 371 + #define TV_WIN_POS _MMIO(0x68070) 372 + /* X coordinate of the display from the start of horizontal active */ 373 + # define TV_XPOS_MASK 0x1fff0000 374 + # define TV_XPOS_SHIFT 16 375 + /* Y coordinate of the display from the start of vertical active (NBR) */ 376 + # define TV_YPOS_MASK 0x00000fff 377 + # define TV_YPOS_SHIFT 0 378 + 379 + #define TV_WIN_SIZE _MMIO(0x68074) 380 + /* Horizontal size of the display window, measured in pixels*/ 381 + # define TV_XSIZE_MASK 0x1fff0000 382 + # define TV_XSIZE_SHIFT 16 383 + /* 384 + * Vertical size of the display window, measured in pixels. 385 + * 386 + * Must be even for interlaced modes. 387 + */ 388 + # define TV_YSIZE_MASK 0x00000fff 389 + # define TV_YSIZE_SHIFT 0 390 + 391 + #define TV_FILTER_CTL_1 _MMIO(0x68080) 392 + /* 393 + * Enables automatic scaling calculation. 394 + * 395 + * If set, the rest of the registers are ignored, and the calculated values can 396 + * be read back from the register. 397 + */ 398 + # define TV_AUTO_SCALE (1 << 31) 399 + /* 400 + * Disables the vertical filter. 401 + * 402 + * This is required on modes more than 1024 pixels wide */ 403 + # define TV_V_FILTER_BYPASS (1 << 29) 404 + /* Enables adaptive vertical filtering */ 405 + # define TV_VADAPT (1 << 28) 406 + # define TV_VADAPT_MODE_MASK (3 << 26) 407 + /* Selects the least adaptive vertical filtering mode */ 408 + # define TV_VADAPT_MODE_LEAST (0 << 26) 409 + /* Selects the moderately adaptive vertical filtering mode */ 410 + # define TV_VADAPT_MODE_MODERATE (1 << 26) 411 + /* Selects the most adaptive vertical filtering mode */ 412 + # define TV_VADAPT_MODE_MOST (3 << 26) 413 + /* 414 + * Sets the horizontal scaling factor. 415 + * 416 + * This should be the fractional part of the horizontal scaling factor divided 417 + * by the oversampling rate. TV_HSCALE should be less than 1, and set to: 418 + * 419 + * (src width - 1) / ((oversample * dest width) - 1) 420 + */ 421 + # define TV_HSCALE_FRAC_MASK 0x00003fff 422 + # define TV_HSCALE_FRAC_SHIFT 0 423 + 424 + #define TV_FILTER_CTL_2 _MMIO(0x68084) 425 + /* 426 + * Sets the integer part of the 3.15 fixed-point vertical scaling factor. 427 + * 428 + * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) 429 + */ 430 + # define TV_VSCALE_INT_MASK 0x00038000 431 + # define TV_VSCALE_INT_SHIFT 15 432 + /* 433 + * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. 434 + * 435 + * \sa TV_VSCALE_INT_MASK 436 + */ 437 + # define TV_VSCALE_FRAC_MASK 0x00007fff 438 + # define TV_VSCALE_FRAC_SHIFT 0 439 + 440 + #define TV_FILTER_CTL_3 _MMIO(0x68088) 441 + /* 442 + * Sets the integer part of the 3.15 fixed-point vertical scaling factor. 443 + * 444 + * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) 445 + * 446 + * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. 447 + */ 448 + # define TV_VSCALE_IP_INT_MASK 0x00038000 449 + # define TV_VSCALE_IP_INT_SHIFT 15 450 + /* 451 + * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. 452 + * 453 + * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. 454 + * 455 + * \sa TV_VSCALE_IP_INT_MASK 456 + */ 457 + # define TV_VSCALE_IP_FRAC_MASK 0x00007fff 458 + # define TV_VSCALE_IP_FRAC_SHIFT 0 459 + 460 + #define TV_CC_CONTROL _MMIO(0x68090) 461 + # define TV_CC_ENABLE (1 << 31) 462 + /* 463 + * Specifies which field to send the CC data in. 464 + * 465 + * CC data is usually sent in field 0. 466 + */ 467 + # define TV_CC_FID_MASK (1 << 27) 468 + # define TV_CC_FID_SHIFT 27 469 + /* Sets the horizontal position of the CC data. Usually 135. */ 470 + # define TV_CC_HOFF_MASK 0x03ff0000 471 + # define TV_CC_HOFF_SHIFT 16 472 + /* Sets the vertical position of the CC data. Usually 21 */ 473 + # define TV_CC_LINE_MASK 0x0000003f 474 + # define TV_CC_LINE_SHIFT 0 475 + 476 + #define TV_CC_DATA _MMIO(0x68094) 477 + # define TV_CC_RDY (1 << 31) 478 + /* Second word of CC data to be transmitted. */ 479 + # define TV_CC_DATA_2_MASK 0x007f0000 480 + # define TV_CC_DATA_2_SHIFT 16 481 + /* First word of CC data to be transmitted. */ 482 + # define TV_CC_DATA_1_MASK 0x0000007f 483 + # define TV_CC_DATA_1_SHIFT 0 484 + 485 + #define TV_H_LUMA(i) _MMIO(0x68100 + (i) * 4) /* 60 registers */ 486 + #define TV_H_CHROMA(i) _MMIO(0x68200 + (i) * 4) /* 60 registers */ 487 + #define TV_V_LUMA(i) _MMIO(0x68300 + (i) * 4) /* 43 registers */ 488 + #define TV_V_CHROMA(i) _MMIO(0x68400 + (i) * 4) /* 43 registers */ 489 + 490 + #endif /* __INTEL_TV_REGS_H__ */
+120 -12
drivers/gpu/drm/i915/display/intel_vdsc.c
··· 423 423 for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) { 424 424 /* Read range_minqp and range_max_qp from qp tables */ 425 425 rc->rc_range_params[buf_i].range_min_qp = 426 - intel_lookup_range_min_qp(bpc, buf_i, bpp_i); 426 + intel_lookup_range_min_qp(bpc, buf_i, bpp_i, vdsc_cfg->native_420); 427 427 rc->rc_range_params[buf_i].range_max_qp = 428 - intel_lookup_range_max_qp(bpc, buf_i, bpp_i); 428 + intel_lookup_range_max_qp(bpc, buf_i, bpp_i, vdsc_cfg->native_420); 429 429 430 430 /* Calculate range_bgp_offset */ 431 431 if (bpp <= 6) { ··· 448 448 } 449 449 } 450 450 451 + static int intel_dsc_slice_dimensions_valid(struct intel_crtc_state *pipe_config, 452 + struct drm_dsc_config *vdsc_cfg) 453 + { 454 + if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_RGB || 455 + pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) { 456 + if (vdsc_cfg->slice_height > 4095) 457 + return -EINVAL; 458 + if (vdsc_cfg->slice_height * vdsc_cfg->slice_width < 15000) 459 + return -EINVAL; 460 + } else if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 461 + if (vdsc_cfg->slice_width % 2) 462 + return -EINVAL; 463 + if (vdsc_cfg->slice_height % 2) 464 + return -EINVAL; 465 + if (vdsc_cfg->slice_height > 4094) 466 + return -EINVAL; 467 + if (vdsc_cfg->slice_height * vdsc_cfg->slice_width < 30000) 468 + return -EINVAL; 469 + } 470 + 471 + return 0; 472 + } 473 + 451 474 int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) 452 475 { 453 476 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); ··· 479 456 u16 compressed_bpp = pipe_config->dsc.compressed_bpp; 480 457 const struct rc_parameters *rc_params; 481 458 struct rc_parameters *rc = NULL; 459 + int err; 482 460 u8 i = 0; 483 461 484 462 vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay; 485 463 vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width, 486 464 pipe_config->dsc.slice_count); 487 465 488 - /* Gen 11 does not support YCbCr */ 466 + err = intel_dsc_slice_dimensions_valid(pipe_config, vdsc_cfg); 467 + 468 + if (err) { 469 + drm_dbg_kms(&dev_priv->drm, "Slice dimension requirements not met\n"); 470 + return err; 471 + } 472 + 473 + /* 474 + * According to DSC 1.2 specs if colorspace is YCbCr then convert_rgb is 0 475 + * else 1 476 + */ 477 + vdsc_cfg->convert_rgb = pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420 && 478 + pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR444; 479 + 480 + if (DISPLAY_VER(dev_priv) >= 14 && 481 + pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 482 + vdsc_cfg->native_420 = true; 483 + /* We do not support YcBCr422 as of now */ 484 + vdsc_cfg->native_422 = false; 489 485 vdsc_cfg->simple_422 = false; 490 486 /* Gen 11 does not support VBR */ 491 487 vdsc_cfg->vbr_enable = false; 492 488 493 489 /* Gen 11 only supports integral values of bpp */ 494 490 vdsc_cfg->bits_per_pixel = compressed_bpp << 4; 491 + 492 + /* 493 + * According to DSC 1.2 specs in Section 4.1 if native_420 is set: 494 + * -We need to double the current bpp. 495 + * -second_line_bpg_offset is 12 in general and equal to 2*(slice_height-1) if slice 496 + * height < 8. 497 + * -second_line_offset_adj is 512 as shown by emperical values to yeild best chroma 498 + * preservation in second line. 499 + * -nsl_bpg_offset is calculated as second_line_offset/slice_height -1 then rounded 500 + * up to 16 fractional bits, we left shift second line offset by 11 to preserve 11 501 + * fractional bits. 502 + */ 503 + if (vdsc_cfg->native_420) { 504 + vdsc_cfg->bits_per_pixel <<= 1; 505 + 506 + if (vdsc_cfg->slice_height >= 8) 507 + vdsc_cfg->second_line_bpg_offset = 12; 508 + else 509 + vdsc_cfg->second_line_bpg_offset = 510 + 2 * (vdsc_cfg->slice_height - 1); 511 + 512 + vdsc_cfg->second_line_offset_adj = 512; 513 + vdsc_cfg->nsl_bpg_offset = DIV_ROUND_UP(vdsc_cfg->second_line_bpg_offset << 11, 514 + vdsc_cfg->slice_height - 1); 515 + } 516 + 495 517 vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3; 496 518 497 519 for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) { ··· 663 595 DSC_VER_MIN_SHIFT | 664 596 vdsc_cfg->bits_per_component << DSC_BPC_SHIFT | 665 597 vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT; 666 - if (vdsc_cfg->dsc_version_minor == 2) 598 + if (vdsc_cfg->dsc_version_minor == 2) { 667 599 pps_val |= DSC_ALT_ICH_SEL; 600 + if (vdsc_cfg->native_420) 601 + pps_val |= DSC_NATIVE_420_ENABLE; 602 + if (vdsc_cfg->native_422) 603 + pps_val |= DSC_NATIVE_422_ENABLE; 604 + } 668 605 if (vdsc_cfg->block_pred_enable) 669 606 pps_val |= DSC_BLOCK_PREDICTION; 670 607 if (vdsc_cfg->convert_rgb) ··· 980 907 pps_val); 981 908 } 982 909 910 + if (DISPLAY_VER(dev_priv) >= 14) { 911 + /* Populate PICTURE_PARAMETER_SET_17 registers */ 912 + pps_val = 0; 913 + pps_val |= DSC_SL_BPG_OFFSET(vdsc_cfg->second_line_bpg_offset); 914 + drm_dbg_kms(&dev_priv->drm, "PPS17 = 0x%08x\n", pps_val); 915 + intel_de_write(dev_priv, 916 + MTL_DSC0_PICTURE_PARAMETER_SET_17(pipe), 917 + pps_val); 918 + if (crtc_state->dsc.dsc_split) 919 + intel_de_write(dev_priv, 920 + MTL_DSC1_PICTURE_PARAMETER_SET_17(pipe), 921 + pps_val); 922 + 923 + /* Populate PICTURE_PARAMETER_SET_18 registers */ 924 + pps_val = 0; 925 + pps_val |= DSC_NSL_BPG_OFFSET(vdsc_cfg->nsl_bpg_offset) | 926 + DSC_SL_OFFSET_ADJ(vdsc_cfg->second_line_offset_adj); 927 + drm_dbg_kms(&dev_priv->drm, "PPS18 = 0x%08x\n", pps_val); 928 + intel_de_write(dev_priv, 929 + MTL_DSC0_PICTURE_PARAMETER_SET_18(pipe), 930 + pps_val); 931 + if (crtc_state->dsc.dsc_split) 932 + intel_de_write(dev_priv, 933 + MTL_DSC1_PICTURE_PARAMETER_SET_18(pipe), 934 + pps_val); 935 + } 936 + 983 937 /* Populate the RC_BUF_THRESH registers */ 984 938 memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword)); 985 939 for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) { ··· 1281 1181 enum pipe pipe = crtc->pipe; 1282 1182 enum intel_display_power_domain power_domain; 1283 1183 intel_wakeref_t wakeref; 1284 - u32 dss_ctl1, dss_ctl2, val; 1184 + u32 dss_ctl1, dss_ctl2, pps0 = 0, pps1 = 0; 1285 1185 1286 1186 if (!intel_dsc_source_support(crtc_state)) 1287 1187 return; ··· 1304 1204 1305 1205 /* FIXME: add more state readout as needed */ 1306 1206 1307 - /* PPS1 */ 1308 - if (!is_pipe_dsc(crtc, cpu_transcoder)) 1309 - val = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1); 1310 - else 1311 - val = intel_de_read(dev_priv, 1312 - ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe)); 1313 - vdsc_cfg->bits_per_pixel = val; 1207 + /* PPS0 & PPS1 */ 1208 + if (!is_pipe_dsc(crtc, cpu_transcoder)) { 1209 + pps1 = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1); 1210 + } else { 1211 + pps0 = intel_de_read(dev_priv, 1212 + ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe)); 1213 + pps1 = intel_de_read(dev_priv, 1214 + ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe)); 1215 + } 1216 + 1217 + vdsc_cfg->bits_per_pixel = pps1; 1218 + 1219 + if (pps0 & DSC_NATIVE_420_ENABLE) 1220 + vdsc_cfg->bits_per_pixel >>= 1; 1221 + 1314 1222 crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4; 1315 1223 out: 1316 1224 intel_display_power_put(dev_priv, power_domain, wakeref);
+28
drivers/gpu/drm/i915/display/intel_vdsc_regs.h
··· 46 46 _ICL_PIPE_DSS_CTL2_PB, \ 47 47 _ICL_PIPE_DSS_CTL2_PC) 48 48 49 + /* MTL Display Stream Compression registers */ 50 + #define _MTL_DSC0_PICTURE_PARAMETER_SET_17_PB 0x782B4 51 + #define _MTL_DSC1_PICTURE_PARAMETER_SET_17_PB 0x783B4 52 + #define _MTL_DSC0_PICTURE_PARAMETER_SET_17_PC 0x784B4 53 + #define _MTL_DSC1_PICTURE_PARAMETER_SET_17_PC 0x785B4 54 + #define MTL_DSC0_PICTURE_PARAMETER_SET_17(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 55 + _MTL_DSC0_PICTURE_PARAMETER_SET_17_PB, \ 56 + _MTL_DSC0_PICTURE_PARAMETER_SET_17_PC) 57 + #define MTL_DSC1_PICTURE_PARAMETER_SET_17(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 58 + _MTL_DSC1_PICTURE_PARAMETER_SET_17_PB, \ 59 + _MTL_DSC1_PICTURE_PARAMETER_SET_17_PC) 60 + #define DSC_SL_BPG_OFFSET(offset) ((offset) << 27) 61 + 62 + #define _MTL_DSC0_PICTURE_PARAMETER_SET_18_PB 0x782B8 63 + #define _MTL_DSC1_PICTURE_PARAMETER_SET_18_PB 0x783B8 64 + #define _MTL_DSC0_PICTURE_PARAMETER_SET_18_PC 0x784B8 65 + #define _MTL_DSC1_PICTURE_PARAMETER_SET_18_PC 0x785B8 66 + #define MTL_DSC0_PICTURE_PARAMETER_SET_18(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 67 + _MTL_DSC0_PICTURE_PARAMETER_SET_18_PB, \ 68 + _MTL_DSC0_PICTURE_PARAMETER_SET_18_PC) 69 + #define MTL_DSC1_PICTURE_PARAMETER_SET_18(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 70 + _MTL_DSC1_PICTURE_PARAMETER_SET_18_PB, \ 71 + _MTL_DSC1_PICTURE_PARAMETER_SET_18_PC) 72 + #define DSC_NSL_BPG_OFFSET(offset) ((offset) << 16) 73 + #define DSC_SL_OFFSET_ADJ(offset) ((offset) << 0) 74 + 49 75 /* Icelake Display Stream Compression Registers */ 50 76 #define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200) 51 77 #define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00) ··· 85 59 #define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ 86 60 _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \ 87 61 _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC) 62 + #define DSC_NATIVE_422_ENABLE BIT(23) 63 + #define DSC_NATIVE_420_ENABLE BIT(22) 88 64 #define DSC_ALT_ICH_SEL (1 << 20) 89 65 #define DSC_VBR_ENABLE (1 << 19) 90 66 #define DSC_422_ENABLE (1 << 18)
+1 -1
drivers/gpu/drm/i915/display/intel_wm.c
··· 11 11 12 12 /** 13 13 * intel_update_watermarks - update FIFO watermark values based on current modes 14 - * @dev_priv: i915 device 14 + * @i915: i915 device 15 15 * 16 16 * Calculate watermark values for the various WM regs based on current mode 17 17 * and plane configuration.
+6
drivers/gpu/drm/i915/display/skl_universal_plane.c
··· 2475 2475 goto error; 2476 2476 } 2477 2477 2478 + if (!dev_priv->params.enable_dpt && 2479 + intel_fb_modifier_uses_dpt(dev_priv, fb->modifier)) { 2480 + drm_dbg_kms(&dev_priv->drm, "DPT disabled, skipping initial FB\n"); 2481 + goto error; 2482 + } 2483 + 2478 2484 /* 2479 2485 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 2480 2486 * while i915 HW rotation is clockwise, thats why this swapping.
+7 -4
drivers/gpu/drm/i915/display/skl_watermark.c
··· 21 21 #include "intel_pcode.h" 22 22 #include "intel_wm.h" 23 23 #include "skl_watermark.h" 24 + #include "skl_watermark_regs.h" 24 25 25 26 static void skl_sagv_disable(struct drm_i915_private *i915); 26 27 ··· 411 410 { 412 411 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 413 412 struct drm_i915_private *i915 = to_i915(crtc->base.dev); 413 + 414 + if (!i915->params.enable_sagv) 415 + return false; 414 416 415 417 if (DISPLAY_VER(i915) >= 12) 416 418 return tgl_crtc_can_enable_sagv(crtc_state); ··· 2277 2273 return level; 2278 2274 2279 2275 /* 2280 - * FIXME PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_* 2276 + * PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_* 2281 2277 * based on whether we're limited by the vblank duration. 2282 - * 2283 - * FIXME also related to skl+ w/a 1136 (also unimplemented as of 2284 - * now) perhaps? 2285 2278 */ 2279 + crtc_state->wm_level_disabled = level < i915->display.wm.num_levels - 1; 2286 2280 2287 2281 for (level++; level < i915->display.wm.num_levels; level++) { 2288 2282 enum plane_id plane_id; ··· 3698 3696 }; 3699 3697 3700 3698 seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915))); 3699 + seq_printf(m, "SAGV modparam: %s\n", str_enabled_disabled(i915->params.enable_sagv)); 3701 3700 seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]); 3702 3701 seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us); 3703 3702
+160
drivers/gpu/drm/i915/display/skl_watermark_regs.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __SKL_WATERMARK_REGS_H__ 7 + #define __SKL_WATERMARK_REGS_H__ 8 + 9 + #include "intel_display_reg_defs.h" 10 + 11 + #define _PIPEA_MBUS_DBOX_CTL 0x7003C 12 + #define _PIPEB_MBUS_DBOX_CTL 0x7103C 13 + #define PIPE_MBUS_DBOX_CTL(pipe) _MMIO_PIPE(pipe, _PIPEA_MBUS_DBOX_CTL, \ 14 + _PIPEB_MBUS_DBOX_CTL) 15 + #define MBUS_DBOX_B2B_TRANSACTIONS_MAX_MASK REG_GENMASK(24, 20) /* tgl+ */ 16 + #define MBUS_DBOX_B2B_TRANSACTIONS_MAX(x) REG_FIELD_PREP(MBUS_DBOX_B2B_TRANSACTIONS_MAX_MASK, x) 17 + #define MBUS_DBOX_B2B_TRANSACTIONS_DELAY_MASK REG_GENMASK(19, 17) /* tgl+ */ 18 + #define MBUS_DBOX_B2B_TRANSACTIONS_DELAY(x) REG_FIELD_PREP(MBUS_DBOX_B2B_TRANSACTIONS_DELAY_MASK, x) 19 + #define MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN REG_BIT(16) /* tgl+ */ 20 + #define MBUS_DBOX_BW_CREDIT_MASK REG_GENMASK(15, 14) 21 + #define MBUS_DBOX_BW_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, x) 22 + #define MBUS_DBOX_BW_4CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x2) 23 + #define MBUS_DBOX_BW_8CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x3) 24 + #define MBUS_DBOX_B_CREDIT_MASK REG_GENMASK(12, 8) 25 + #define MBUS_DBOX_B_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_B_CREDIT_MASK, x) 26 + #define MBUS_DBOX_I_CREDIT_MASK REG_GENMASK(7, 5) 27 + #define MBUS_DBOX_I_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_I_CREDIT_MASK, x) 28 + #define MBUS_DBOX_A_CREDIT_MASK REG_GENMASK(3, 0) 29 + #define MBUS_DBOX_A_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_A_CREDIT_MASK, x) 30 + 31 + #define MBUS_UBOX_CTL _MMIO(0x4503C) 32 + #define MBUS_BBOX_CTL_S1 _MMIO(0x45040) 33 + #define MBUS_BBOX_CTL_S2 _MMIO(0x45044) 34 + 35 + #define MBUS_CTL _MMIO(0x4438C) 36 + #define MBUS_JOIN REG_BIT(31) 37 + #define MBUS_HASHING_MODE_MASK REG_BIT(30) 38 + #define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0) 39 + #define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1) 40 + #define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26) 41 + #define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe) 42 + #define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7) 43 + 44 + /* Watermark register definitions for SKL */ 45 + #define _CUR_WM_A_0 0x70140 46 + #define _CUR_WM_B_0 0x71140 47 + #define _CUR_WM_SAGV_A 0x70158 48 + #define _CUR_WM_SAGV_B 0x71158 49 + #define _CUR_WM_SAGV_TRANS_A 0x7015C 50 + #define _CUR_WM_SAGV_TRANS_B 0x7115C 51 + #define _CUR_WM_TRANS_A 0x70168 52 + #define _CUR_WM_TRANS_B 0x71168 53 + #define _PLANE_WM_1_A_0 0x70240 54 + #define _PLANE_WM_1_B_0 0x71240 55 + #define _PLANE_WM_2_A_0 0x70340 56 + #define _PLANE_WM_2_B_0 0x71340 57 + #define _PLANE_WM_SAGV_1_A 0x70258 58 + #define _PLANE_WM_SAGV_1_B 0x71258 59 + #define _PLANE_WM_SAGV_2_A 0x70358 60 + #define _PLANE_WM_SAGV_2_B 0x71358 61 + #define _PLANE_WM_SAGV_TRANS_1_A 0x7025C 62 + #define _PLANE_WM_SAGV_TRANS_1_B 0x7125C 63 + #define _PLANE_WM_SAGV_TRANS_2_A 0x7035C 64 + #define _PLANE_WM_SAGV_TRANS_2_B 0x7135C 65 + #define _PLANE_WM_TRANS_1_A 0x70268 66 + #define _PLANE_WM_TRANS_1_B 0x71268 67 + #define _PLANE_WM_TRANS_2_A 0x70368 68 + #define _PLANE_WM_TRANS_2_B 0x71368 69 + #define PLANE_WM_EN (1 << 31) 70 + #define PLANE_WM_IGNORE_LINES (1 << 30) 71 + #define PLANE_WM_LINES_MASK REG_GENMASK(26, 14) 72 + #define PLANE_WM_BLOCKS_MASK REG_GENMASK(11, 0) 73 + 74 + #define _CUR_WM_0(pipe) _PIPE(pipe, _CUR_WM_A_0, _CUR_WM_B_0) 75 + #define CUR_WM(pipe, level) _MMIO(_CUR_WM_0(pipe) + ((4) * (level))) 76 + #define CUR_WM_SAGV(pipe) _MMIO_PIPE(pipe, _CUR_WM_SAGV_A, _CUR_WM_SAGV_B) 77 + #define CUR_WM_SAGV_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_SAGV_TRANS_A, _CUR_WM_SAGV_TRANS_B) 78 + #define CUR_WM_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_TRANS_A, _CUR_WM_TRANS_B) 79 + #define _PLANE_WM_1(pipe) _PIPE(pipe, _PLANE_WM_1_A_0, _PLANE_WM_1_B_0) 80 + #define _PLANE_WM_2(pipe) _PIPE(pipe, _PLANE_WM_2_A_0, _PLANE_WM_2_B_0) 81 + #define _PLANE_WM_BASE(pipe, plane) \ 82 + _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe)) 83 + #define PLANE_WM(pipe, plane, level) \ 84 + _MMIO(_PLANE_WM_BASE(pipe, plane) + ((4) * (level))) 85 + #define _PLANE_WM_SAGV_1(pipe) \ 86 + _PIPE(pipe, _PLANE_WM_SAGV_1_A, _PLANE_WM_SAGV_1_B) 87 + #define _PLANE_WM_SAGV_2(pipe) \ 88 + _PIPE(pipe, _PLANE_WM_SAGV_2_A, _PLANE_WM_SAGV_2_B) 89 + #define PLANE_WM_SAGV(pipe, plane) \ 90 + _MMIO(_PLANE(plane, _PLANE_WM_SAGV_1(pipe), _PLANE_WM_SAGV_2(pipe))) 91 + #define _PLANE_WM_SAGV_TRANS_1(pipe) \ 92 + _PIPE(pipe, _PLANE_WM_SAGV_TRANS_1_A, _PLANE_WM_SAGV_TRANS_1_B) 93 + #define _PLANE_WM_SAGV_TRANS_2(pipe) \ 94 + _PIPE(pipe, _PLANE_WM_SAGV_TRANS_2_A, _PLANE_WM_SAGV_TRANS_2_B) 95 + #define PLANE_WM_SAGV_TRANS(pipe, plane) \ 96 + _MMIO(_PLANE(plane, _PLANE_WM_SAGV_TRANS_1(pipe), _PLANE_WM_SAGV_TRANS_2(pipe))) 97 + #define _PLANE_WM_TRANS_1(pipe) \ 98 + _PIPE(pipe, _PLANE_WM_TRANS_1_A, _PLANE_WM_TRANS_1_B) 99 + #define _PLANE_WM_TRANS_2(pipe) \ 100 + _PIPE(pipe, _PLANE_WM_TRANS_2_A, _PLANE_WM_TRANS_2_B) 101 + #define PLANE_WM_TRANS(pipe, plane) \ 102 + _MMIO(_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe))) 103 + 104 + #define _PLANE_BUF_CFG_1_B 0x7127c 105 + #define _PLANE_BUF_CFG_2_B 0x7137c 106 + #define _PLANE_BUF_CFG_1(pipe) \ 107 + _PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B) 108 + #define _PLANE_BUF_CFG_2(pipe) \ 109 + _PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B) 110 + #define PLANE_BUF_CFG(pipe, plane) \ 111 + _MMIO_PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe)) 112 + 113 + #define _PLANE_NV12_BUF_CFG_1_B 0x71278 114 + #define _PLANE_NV12_BUF_CFG_2_B 0x71378 115 + #define _PLANE_NV12_BUF_CFG_1(pipe) \ 116 + _PIPE(pipe, _PLANE_NV12_BUF_CFG_1_A, _PLANE_NV12_BUF_CFG_1_B) 117 + #define _PLANE_NV12_BUF_CFG_2(pipe) \ 118 + _PIPE(pipe, _PLANE_NV12_BUF_CFG_2_A, _PLANE_NV12_BUF_CFG_2_B) 119 + #define PLANE_NV12_BUF_CFG(pipe, plane) \ 120 + _MMIO_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe)) 121 + 122 + /* SKL new cursor registers */ 123 + #define _CUR_BUF_CFG_A 0x7017c 124 + #define _CUR_BUF_CFG_B 0x7117c 125 + #define CUR_BUF_CFG(pipe) _MMIO_PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B) 126 + 127 + /* 128 + * The below are numbered starting from "S1" on gen11/gen12, but starting 129 + * with display 13, the bspec switches to a 0-based numbering scheme 130 + * (although the addresses stay the same so new S0 = old S1, new S1 = old S2). 131 + * We'll just use the 0-based numbering here for all platforms since it's the 132 + * way things will be named by the hardware team going forward, plus it's more 133 + * consistent with how most of the rest of our registers are named. 134 + */ 135 + #define _DBUF_CTL_S0 0x45008 136 + #define _DBUF_CTL_S1 0x44FE8 137 + #define _DBUF_CTL_S2 0x44300 138 + #define _DBUF_CTL_S3 0x44304 139 + #define DBUF_CTL_S(slice) _MMIO(_PICK(slice, \ 140 + _DBUF_CTL_S0, \ 141 + _DBUF_CTL_S1, \ 142 + _DBUF_CTL_S2, \ 143 + _DBUF_CTL_S3)) 144 + #define DBUF_POWER_REQUEST REG_BIT(31) 145 + #define DBUF_POWER_STATE REG_BIT(30) 146 + #define DBUF_TRACKER_STATE_SERVICE_MASK REG_GENMASK(23, 19) 147 + #define DBUF_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x) 148 + #define DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(18, 16) /* ADL-P+ */ 149 + #define DBUF_MIN_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_MIN_TRACKER_STATE_SERVICE_MASK, x) /* ADL-P+ */ 150 + 151 + #define MTL_LATENCY_LP0_LP1 _MMIO(0x45780) 152 + #define MTL_LATENCY_LP2_LP3 _MMIO(0x45784) 153 + #define MTL_LATENCY_LP4_LP5 _MMIO(0x45788) 154 + #define MTL_LATENCY_LEVEL_EVEN_MASK REG_GENMASK(12, 0) 155 + #define MTL_LATENCY_LEVEL_ODD_MASK REG_GENMASK(28, 16) 156 + 157 + #define MTL_LATENCY_SAGV _MMIO(0x4578c) 158 + #define MTL_LATENCY_QCLK_SAGV REG_GENMASK(12, 0) 159 + 160 + #endif /* __SKL_WATERMARK_REGS_H__ */
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_object.h
··· 303 303 static inline bool 304 304 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) 305 305 { 306 - return READ_ONCE(obj->frontbuffer); 306 + return READ_ONCE(obj->frontbuffer) || obj->is_dpt; 307 307 } 308 308 309 309 static inline unsigned int
+3
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
··· 491 491 */ 492 492 unsigned int cache_dirty:1; 493 493 494 + /* @is_dpt: Object houses a display page table (DPT) */ 495 + unsigned int is_dpt:1; 496 + 494 497 /** 495 498 * @read_domains: Read memory domains. 496 499 *
+7 -8
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
··· 890 890 if (HAS_LMEMBAR_SMEM_STOLEN(i915)) { 891 891 /* 892 892 * MTL dsm size is in GGC register. 893 - * Also MTL uses offset to DSMBASE in ptes, so i915 894 - * uses dsm_base = 0 to setup stolen region. 893 + * Also MTL uses offset to GSMBASE in ptes, so i915 894 + * uses dsm_base = 8MBs to setup stolen region, since 895 + * DSMBASE = GSMBASE + 8MB. 895 896 */ 896 897 ret = mtl_get_gms_size(uncore); 897 898 if (ret < 0) { ··· 900 899 return ERR_PTR(ret); 901 900 } 902 901 903 - dsm_base = 0; 902 + dsm_base = SZ_8M; 904 903 dsm_size = (resource_size_t)(ret * SZ_1M); 905 904 906 905 GEM_BUG_ON(pci_resource_len(pdev, GEN12_LMEM_BAR) != SZ_256M); 907 - GEM_BUG_ON((dsm_size + SZ_8M) > lmem_size); 906 + GEM_BUG_ON((dsm_base + dsm_size) > lmem_size); 908 907 } else { 909 908 /* Use DSM base address instead for stolen memory */ 910 909 dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE) & GEN12_BDSM_MASK; ··· 913 912 dsm_size = ALIGN_DOWN(lmem_size - dsm_base, SZ_1M); 914 913 } 915 914 916 - io_size = dsm_size; 917 - if (HAS_LMEMBAR_SMEM_STOLEN(i915)) { 918 - io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + SZ_8M; 919 - } else if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) { 915 + if (pci_resource_len(pdev, GEN12_LMEM_BAR) < lmem_size) { 920 916 io_start = 0; 921 917 io_size = 0; 922 918 } else { 923 919 io_start = pci_resource_start(pdev, GEN12_LMEM_BAR) + dsm_base; 920 + io_size = dsm_size; 924 921 } 925 922 926 923 min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
+1
drivers/gpu/drm/i915/gvt/edid.c
··· 32 32 * 33 33 */ 34 34 35 + #include "display/intel_dp_aux_regs.h" 35 36 #include "display/intel_gmbus_regs.h" 36 37 #include "gvt.h" 37 38 #include "i915_drv.h"
+5
drivers/gpu/drm/i915/gvt/handlers.c
··· 43 43 #include "intel_mchbar_regs.h" 44 44 #include "display/intel_display_types.h" 45 45 #include "display/intel_dmc_regs.h" 46 + #include "display/intel_dp_aux_regs.h" 46 47 #include "display/intel_dpio_phy.h" 47 48 #include "display/intel_fbc.h" 49 + #include "display/intel_fdi_regs.h" 50 + #include "display/intel_pps_regs.h" 51 + #include "display/intel_psr_regs.h" 52 + #include "display/skl_watermark_regs.h" 48 53 #include "display/vlv_dsi_pll_regs.h" 49 54 #include "gt/intel_gt_regs.h" 50 55
+4 -4
drivers/gpu/drm/i915/i915_driver.c
··· 79 79 #include "soc/intel_dram.h" 80 80 #include "soc/intel_gmch.h" 81 81 82 - #include "i915_file_private.h" 83 82 #include "i915_debugfs.h" 84 83 #include "i915_driver.h" 85 84 #include "i915_drm_client.h" 86 85 #include "i915_drv.h" 86 + #include "i915_file_private.h" 87 87 #include "i915_getparam.h" 88 88 #include "i915_hwmon.h" 89 89 #include "i915_ioc32.h" ··· 97 97 #include "i915_sysfs.h" 98 98 #include "i915_utils.h" 99 99 #include "i915_vgpu.h" 100 + #include "intel_clock_gating.h" 100 101 #include "intel_gvt.h" 101 102 #include "intel_memory_region.h" 102 103 #include "intel_pci_config.h" 103 104 #include "intel_pcode.h" 104 - #include "intel_pm.h" 105 105 #include "intel_region_ttm.h" 106 106 #include "vlv_suspend.h" 107 107 ··· 252 252 253 253 intel_irq_init(dev_priv); 254 254 intel_init_display_hooks(dev_priv); 255 - intel_init_clock_gating_hooks(dev_priv); 255 + intel_clock_gating_hooks_init(dev_priv); 256 256 257 257 intel_detect_preproduction_hw(dev_priv); 258 258 ··· 1244 1244 i915_gem_resume(dev_priv); 1245 1245 1246 1246 intel_modeset_init_hw(dev_priv); 1247 - intel_init_clock_gating(dev_priv); 1247 + intel_clock_gating_init(dev_priv); 1248 1248 intel_hpd_init(dev_priv); 1249 1249 1250 1250 /* MST sideband requires HPD interrupts enabled */
+2
drivers/gpu/drm/i915/i915_drv.h
··· 813 813 #define HAS_FBC(dev_priv) (RUNTIME_INFO(dev_priv)->fbc_mask != 0) 814 814 #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7) 815 815 816 + #define HAS_DPT(dev_priv) (DISPLAY_VER(dev_priv) >= 13) 817 + 816 818 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 817 819 818 820 #define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
+4 -4
drivers/gpu/drm/i915/i915_gem.c
··· 58 58 #include "i915_file_private.h" 59 59 #include "i915_trace.h" 60 60 #include "i915_vgpu.h" 61 - #include "intel_pm.h" 61 + #include "intel_clock_gating.h" 62 62 63 63 static int 64 64 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size) ··· 1164 1164 } 1165 1165 1166 1166 /* 1167 - * Despite its name intel_init_clock_gating applies both display 1167 + * Despite its name intel_clock_gating_init applies both display 1168 1168 * clock gating workarounds; GT mmio workarounds and the occasional 1169 1169 * GT power context workaround. Worse, sometimes it includes a context 1170 1170 * register workaround which we need to apply before we record the ··· 1172 1172 * 1173 1173 * FIXME: break up the workarounds and apply them at the right time! 1174 1174 */ 1175 - intel_init_clock_gating(dev_priv); 1175 + intel_clock_gating_init(dev_priv); 1176 1176 1177 1177 for_each_gt(gt, dev_priv, i) { 1178 1178 ret = intel_gt_init(gt); ··· 1216 1216 /* Minimal basic recovery for KMS */ 1217 1217 ret = i915_ggtt_enable_hw(dev_priv); 1218 1218 i915_ggtt_resume(to_gt(dev_priv)->ggtt); 1219 - intel_init_clock_gating(dev_priv); 1219 + intel_clock_gating_init(dev_priv); 1220 1220 } 1221 1221 1222 1222 i915_gem_drain_freed_objects(dev_priv);
+26
drivers/gpu/drm/i915/i915_hwmon.c
··· 349 349 } 350 350 } 351 351 352 + #define PL1_DISABLE 0 353 + 352 354 /* 353 355 * HW allows arbitrary PL1 limits to be set but silently clamps these values to 354 356 * "typical but not guaranteed" min/max values in rg.pkg_power_sku. Follow the ··· 363 361 struct i915_hwmon *hwmon = ddat->hwmon; 364 362 intel_wakeref_t wakeref; 365 363 u64 r, min, max; 364 + 365 + /* Check if PL1 limit is disabled */ 366 + with_intel_runtime_pm(ddat->uncore->rpm, wakeref) 367 + r = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit); 368 + if (!(r & PKG_PWR_LIM_1_EN)) { 369 + *val = PL1_DISABLE; 370 + return 0; 371 + } 366 372 367 373 *val = hwm_field_read_and_scale(ddat, 368 374 hwmon->rg.pkg_rapl_limit, ··· 395 385 hwm_power_max_write(struct hwm_drvdata *ddat, long val) 396 386 { 397 387 struct i915_hwmon *hwmon = ddat->hwmon; 388 + intel_wakeref_t wakeref; 398 389 u32 nval; 390 + 391 + /* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */ 392 + if (val == PL1_DISABLE) { 393 + mutex_lock(&hwmon->hwmon_lock); 394 + with_intel_runtime_pm(ddat->uncore->rpm, wakeref) { 395 + intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit, 396 + PKG_PWR_LIM_1_EN, 0); 397 + nval = intel_uncore_read(ddat->uncore, hwmon->rg.pkg_rapl_limit); 398 + } 399 + mutex_unlock(&hwmon->hwmon_lock); 400 + 401 + if (nval & PKG_PWR_LIM_1_EN) 402 + return -ENODEV; 403 + return 0; 404 + } 399 405 400 406 /* Computation in 64-bits to avoid overflow. Round to nearest. */ 401 407 nval = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_power, SF_POWER);
+2
drivers/gpu/drm/i915/i915_irq.c
··· 37 37 #include "display/intel_de.h" 38 38 #include "display/intel_display_trace.h" 39 39 #include "display/intel_display_types.h" 40 + #include "display/intel_fdi_regs.h" 40 41 #include "display/intel_fifo_underrun.h" 41 42 #include "display/intel_hotplug.h" 42 43 #include "display/intel_lpe_audio.h" 43 44 #include "display/intel_psr.h" 45 + #include "display/intel_psr_regs.h" 44 46 45 47 #include "gt/intel_breadcrumbs.h" 46 48 #include "gt/intel_gt.h"
+6
drivers/gpu/drm/i915/i915_params.c
··· 121 121 "(0=disabled, 1=enabled) " 122 122 "Default: 0"); 123 123 124 + i915_param_named_unsafe(enable_sagv, bool, 0600, 125 + "Enable system agent voltage/frequency scaling (SAGV) (default: true)"); 126 + 124 127 i915_param_named_unsafe(force_probe, charp, 0400, 125 128 "Force probe options for specified supported devices. " 126 129 "See CONFIG_DRM_I915_FORCE_PROBE for details."); ··· 133 130 "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); 134 131 135 132 i915_param_named_unsafe(enable_ips, int, 0400, "Enable IPS (default: true)"); 133 + 134 + i915_param_named_unsafe(enable_dpt, bool, 0400, 135 + "Enable display page table (DPT) (default: true)"); 136 136 137 137 i915_param_named(fastboot, int, 0400, 138 138 "Try to skip unnecessary mode sets at boot time "
+2
drivers/gpu/drm/i915/i915_params.h
··· 54 54 param(int, enable_dc, -1, 0400) \ 55 55 param(int, enable_fbc, -1, 0600) \ 56 56 param(int, enable_psr, -1, 0600) \ 57 + param(bool, enable_dpt, true, 0400) \ 57 58 param(bool, psr_safest_params, false, 0400) \ 58 59 param(bool, enable_psr2_sel_fetch, true, 0400) \ 60 + param(bool, enable_sagv, true, 0600) \ 59 61 param(int, disable_power_well, -1, 0400) \ 60 62 param(int, enable_ips, 1, 0600) \ 61 63 param(int, invert_brightness, 0, 0600) \
-3
drivers/gpu/drm/i915/i915_pci.c
··· 896 896 static const struct intel_device_info tgl_info = { 897 897 GEN12_FEATURES, 898 898 PLATFORM(INTEL_TIGERLAKE), 899 - .display.has_modular_fia = 1, 900 899 .__runtime.platform_engine_mask = 901 900 BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), 902 901 }; ··· 995 996 BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | 996 997 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), 997 998 .display.has_cdclk_crawl = 1, 998 - .display.has_modular_fia = 1, 999 999 .display.has_psr_hw_tracking = 0, 1000 1000 .__runtime.platform_engine_mask = 1001 1001 BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), ··· 1142 1144 .__runtime.graphics.ip.rel = 70, 1143 1145 .__runtime.media.ip.ver = 13, 1144 1146 PLATFORM(INTEL_METEORLAKE), 1145 - .display.has_modular_fia = 1, 1146 1147 .extra_gt_list = xelpmp_extra_gt, 1147 1148 .has_flat_ccs = 0, 1148 1149 .has_gmd_id = 1,
+22 -1221
drivers/gpu/drm/i915/i915_reg.h
··· 1067 1067 #define MBUS_ABOX_BT_CREDIT_POOL1_MASK (0x1F << 0) 1068 1068 #define MBUS_ABOX_BT_CREDIT_POOL1(x) ((x) << 0) 1069 1069 1070 - #define _PIPEA_MBUS_DBOX_CTL 0x7003C 1071 - #define _PIPEB_MBUS_DBOX_CTL 0x7103C 1072 - #define PIPE_MBUS_DBOX_CTL(pipe) _MMIO_PIPE(pipe, _PIPEA_MBUS_DBOX_CTL, \ 1073 - _PIPEB_MBUS_DBOX_CTL) 1074 - #define MBUS_DBOX_B2B_TRANSACTIONS_MAX_MASK REG_GENMASK(24, 20) /* tgl+ */ 1075 - #define MBUS_DBOX_B2B_TRANSACTIONS_MAX(x) REG_FIELD_PREP(MBUS_DBOX_B2B_TRANSACTIONS_MAX_MASK, x) 1076 - #define MBUS_DBOX_B2B_TRANSACTIONS_DELAY_MASK REG_GENMASK(19, 17) /* tgl+ */ 1077 - #define MBUS_DBOX_B2B_TRANSACTIONS_DELAY(x) REG_FIELD_PREP(MBUS_DBOX_B2B_TRANSACTIONS_DELAY_MASK, x) 1078 - #define MBUS_DBOX_REGULATE_B2B_TRANSACTIONS_EN REG_BIT(16) /* tgl+ */ 1079 - #define MBUS_DBOX_BW_CREDIT_MASK REG_GENMASK(15, 14) 1080 - #define MBUS_DBOX_BW_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, x) 1081 - #define MBUS_DBOX_BW_4CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x2) 1082 - #define MBUS_DBOX_BW_8CREDITS_MTL REG_FIELD_PREP(MBUS_DBOX_BW_CREDIT_MASK, 0x3) 1083 - #define MBUS_DBOX_B_CREDIT_MASK REG_GENMASK(12, 8) 1084 - #define MBUS_DBOX_B_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_B_CREDIT_MASK, x) 1085 - #define MBUS_DBOX_I_CREDIT_MASK REG_GENMASK(7, 5) 1086 - #define MBUS_DBOX_I_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_I_CREDIT_MASK, x) 1087 - #define MBUS_DBOX_A_CREDIT_MASK REG_GENMASK(3, 0) 1088 - #define MBUS_DBOX_A_CREDIT(x) REG_FIELD_PREP(MBUS_DBOX_A_CREDIT_MASK, x) 1089 - 1090 - #define MBUS_UBOX_CTL _MMIO(0x4503C) 1091 - #define MBUS_BBOX_CTL_S1 _MMIO(0x45040) 1092 - #define MBUS_BBOX_CTL_S2 _MMIO(0x45044) 1093 - 1094 - #define MBUS_CTL _MMIO(0x4438C) 1095 - #define MBUS_JOIN REG_BIT(31) 1096 - #define MBUS_HASHING_MODE_MASK REG_BIT(30) 1097 - #define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0) 1098 - #define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1) 1099 - #define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26) 1100 - #define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe) 1101 - #define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7) 1102 - 1103 1070 /* Make render/texture TLB fetches lower priorty than associated data 1104 1071 * fetches. This is not turned on by default 1105 1072 */ ··· 1374 1407 #define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024) 1375 1408 1376 1409 #define IPS_CTL _MMIO(0x43408) 1377 - #define IPS_ENABLE (1 << 31) 1410 + #define IPS_ENABLE REG_BIT(31) 1411 + #define IPS_FALSE_COLOR REG_BIT(4) 1378 1412 1379 1413 #define MSG_FBC_REND_STATE(fbc_id) _MMIO_PIPE((fbc_id), 0x50380, 0x50384) 1380 1414 #define FBC_REND_NUKE REG_BIT(2) ··· 1981 2013 #define PIPESRC(pipe) _MMIO_TRANS2((pipe), _PIPEASRC) 1982 2014 #define TRANS_MULT(trans) _MMIO_TRANS2((trans), _TRANS_MULT_A) 1983 2015 1984 - #define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A) 1985 - #define EXITLINE_ENABLE REG_BIT(31) 1986 - #define EXITLINE_MASK REG_GENMASK(12, 0) 1987 - #define EXITLINE_SHIFT 0 1988 - 1989 2016 /* VRR registers */ 1990 2017 #define _TRANS_VRR_CTL_A 0x60420 1991 2018 #define _TRANS_VRR_CTL_B 0x61420 ··· 2073 2110 #define TRANS_PUSH(trans) _MMIO_TRANS2(trans, _TRANS_PUSH_A) 2074 2111 #define TRANS_PUSH_EN REG_BIT(31) 2075 2112 #define TRANS_PUSH_SEND REG_BIT(30) 2076 - 2077 - /* 2078 - * HSW+ eDP PSR registers 2079 - * 2080 - * HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one 2081 - * instance of it 2082 - */ 2083 - #define _SRD_CTL_A 0x60800 2084 - #define _SRD_CTL_EDP 0x6f800 2085 - #define EDP_PSR_CTL(tran) _MMIO_TRANS2(tran, _SRD_CTL_A) 2086 - #define EDP_PSR_ENABLE (1 << 31) 2087 - #define BDW_PSR_SINGLE_FRAME (1 << 30) 2088 - #define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */ 2089 - #define EDP_PSR_LINK_STANDBY (1 << 27) 2090 - #define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3 << 25) 2091 - #define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0 << 25) 2092 - #define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1 << 25) 2093 - #define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2 << 25) 2094 - #define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3 << 25) 2095 - #define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20 2096 - #define EDP_PSR_SKIP_AUX_EXIT (1 << 12) 2097 - #define EDP_PSR_TP1_TP2_SEL (0 << 11) 2098 - #define EDP_PSR_TP1_TP3_SEL (1 << 11) 2099 - #define EDP_PSR_CRC_ENABLE (1 << 10) /* BDW+ */ 2100 - #define EDP_PSR_TP2_TP3_TIME_500us (0 << 8) 2101 - #define EDP_PSR_TP2_TP3_TIME_100us (1 << 8) 2102 - #define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8) 2103 - #define EDP_PSR_TP2_TP3_TIME_0us (3 << 8) 2104 - #define EDP_PSR_TP4_TIME_0US (3 << 6) /* ICL+ */ 2105 - #define EDP_PSR_TP1_TIME_500us (0 << 4) 2106 - #define EDP_PSR_TP1_TIME_100us (1 << 4) 2107 - #define EDP_PSR_TP1_TIME_2500us (2 << 4) 2108 - #define EDP_PSR_TP1_TIME_0us (3 << 4) 2109 - #define EDP_PSR_IDLE_FRAME_SHIFT 0 2110 - 2111 - /* 2112 - * Until TGL, IMR/IIR are fixed at 0x648xx. On TGL+ those registers are relative 2113 - * to transcoder and bits defined for each one as if using no shift (i.e. as if 2114 - * it was for TRANSCODER_EDP) 2115 - */ 2116 - #define EDP_PSR_IMR _MMIO(0x64834) 2117 - #define EDP_PSR_IIR _MMIO(0x64838) 2118 - #define _PSR_IMR_A 0x60814 2119 - #define _PSR_IIR_A 0x60818 2120 - #define TRANS_PSR_IMR(tran) _MMIO_TRANS2(tran, _PSR_IMR_A) 2121 - #define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A) 2122 - #define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \ 2123 - 0 : ((trans) - TRANSCODER_A + 1) * 8) 2124 - #define TGL_PSR_MASK REG_GENMASK(2, 0) 2125 - #define TGL_PSR_ERROR REG_BIT(2) 2126 - #define TGL_PSR_POST_EXIT REG_BIT(1) 2127 - #define TGL_PSR_PRE_ENTRY REG_BIT(0) 2128 - #define EDP_PSR_MASK(trans) (TGL_PSR_MASK << \ 2129 - _EDP_PSR_TRANS_SHIFT(trans)) 2130 - #define EDP_PSR_ERROR(trans) (TGL_PSR_ERROR << \ 2131 - _EDP_PSR_TRANS_SHIFT(trans)) 2132 - #define EDP_PSR_POST_EXIT(trans) (TGL_PSR_POST_EXIT << \ 2133 - _EDP_PSR_TRANS_SHIFT(trans)) 2134 - #define EDP_PSR_PRE_ENTRY(trans) (TGL_PSR_PRE_ENTRY << \ 2135 - _EDP_PSR_TRANS_SHIFT(trans)) 2136 - 2137 - #define _SRD_AUX_DATA_A 0x60814 2138 - #define _SRD_AUX_DATA_EDP 0x6f814 2139 - #define EDP_PSR_AUX_DATA(tran, i) _MMIO_TRANS2(tran, _SRD_AUX_DATA_A + (i) + 4) /* 5 registers */ 2140 - 2141 - #define _SRD_STATUS_A 0x60840 2142 - #define _SRD_STATUS_EDP 0x6f840 2143 - #define EDP_PSR_STATUS(tran) _MMIO_TRANS2(tran, _SRD_STATUS_A) 2144 - #define EDP_PSR_STATUS_STATE_MASK (7 << 29) 2145 - #define EDP_PSR_STATUS_STATE_SHIFT 29 2146 - #define EDP_PSR_STATUS_STATE_IDLE (0 << 29) 2147 - #define EDP_PSR_STATUS_STATE_SRDONACK (1 << 29) 2148 - #define EDP_PSR_STATUS_STATE_SRDENT (2 << 29) 2149 - #define EDP_PSR_STATUS_STATE_BUFOFF (3 << 29) 2150 - #define EDP_PSR_STATUS_STATE_BUFON (4 << 29) 2151 - #define EDP_PSR_STATUS_STATE_AUXACK (5 << 29) 2152 - #define EDP_PSR_STATUS_STATE_SRDOFFACK (6 << 29) 2153 - #define EDP_PSR_STATUS_LINK_MASK (3 << 26) 2154 - #define EDP_PSR_STATUS_LINK_FULL_OFF (0 << 26) 2155 - #define EDP_PSR_STATUS_LINK_FULL_ON (1 << 26) 2156 - #define EDP_PSR_STATUS_LINK_STANDBY (2 << 26) 2157 - #define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20 2158 - #define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f 2159 - #define EDP_PSR_STATUS_COUNT_SHIFT 16 2160 - #define EDP_PSR_STATUS_COUNT_MASK 0xf 2161 - #define EDP_PSR_STATUS_AUX_ERROR (1 << 15) 2162 - #define EDP_PSR_STATUS_AUX_SENDING (1 << 12) 2163 - #define EDP_PSR_STATUS_SENDING_IDLE (1 << 9) 2164 - #define EDP_PSR_STATUS_SENDING_TP2_TP3 (1 << 8) 2165 - #define EDP_PSR_STATUS_SENDING_TP1 (1 << 4) 2166 - #define EDP_PSR_STATUS_IDLE_MASK 0xf 2167 - 2168 - #define _SRD_PERF_CNT_A 0x60844 2169 - #define _SRD_PERF_CNT_EDP 0x6f844 2170 - #define EDP_PSR_PERF_CNT(tran) _MMIO_TRANS2(tran, _SRD_PERF_CNT_A) 2171 - #define EDP_PSR_PERF_CNT_MASK 0xffffff 2172 - 2173 - /* PSR_MASK on SKL+ */ 2174 - #define _SRD_DEBUG_A 0x60860 2175 - #define _SRD_DEBUG_EDP 0x6f860 2176 - #define EDP_PSR_DEBUG(tran) _MMIO_TRANS2(tran, _SRD_DEBUG_A) 2177 - #define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28) 2178 - #define EDP_PSR_DEBUG_MASK_LPSP (1 << 27) 2179 - #define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26) 2180 - #define EDP_PSR_DEBUG_MASK_HPD (1 << 25) 2181 - #define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */ 2182 - #define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */ 2183 - 2184 - #define _PSR2_CTL_A 0x60900 2185 - #define _PSR2_CTL_EDP 0x6f900 2186 - #define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A) 2187 - #define EDP_PSR2_ENABLE (1 << 31) 2188 - #define EDP_SU_TRACK_ENABLE (1 << 30) /* up to adl-p */ 2189 - #define TGL_EDP_PSR2_BLOCK_COUNT_NUM_2 (0 << 28) 2190 - #define TGL_EDP_PSR2_BLOCK_COUNT_NUM_3 (1 << 28) 2191 - #define EDP_Y_COORDINATE_ENABLE REG_BIT(25) /* display 10, 11 and 12 */ 2192 - #define EDP_PSR2_SU_SDP_SCANLINE REG_BIT(25) /* display 13+ */ 2193 - #define EDP_MAX_SU_DISABLE_TIME(t) ((t) << 20) 2194 - #define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f << 20) 2195 - #define EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES 8 2196 - #define EDP_PSR2_IO_BUFFER_WAKE(lines) ((EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines)) << 13) 2197 - #define EDP_PSR2_IO_BUFFER_WAKE_MASK (3 << 13) 2198 - #define TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES 5 2199 - #define TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT 13 2200 - #define TGL_EDP_PSR2_IO_BUFFER_WAKE(lines) (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT) 2201 - #define TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK (7 << 13) 2202 - #define EDP_PSR2_FAST_WAKE_MAX_LINES 8 2203 - #define EDP_PSR2_FAST_WAKE(lines) ((EDP_PSR2_FAST_WAKE_MAX_LINES - (lines)) << 11) 2204 - #define EDP_PSR2_FAST_WAKE_MASK (3 << 11) 2205 - #define TGL_EDP_PSR2_FAST_WAKE_MIN_LINES 5 2206 - #define TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT 10 2207 - #define TGL_EDP_PSR2_FAST_WAKE(lines) (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT) 2208 - #define TGL_EDP_PSR2_FAST_WAKE_MASK (7 << 10) 2209 - #define EDP_PSR2_TP2_TIME_500us (0 << 8) 2210 - #define EDP_PSR2_TP2_TIME_100us (1 << 8) 2211 - #define EDP_PSR2_TP2_TIME_2500us (2 << 8) 2212 - #define EDP_PSR2_TP2_TIME_50us (3 << 8) 2213 - #define EDP_PSR2_TP2_TIME_MASK (3 << 8) 2214 - #define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4 2215 - #define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf << 4) 2216 - #define EDP_PSR2_FRAME_BEFORE_SU(a) ((a) << 4) 2217 - #define EDP_PSR2_IDLE_FRAME_MASK 0xf 2218 - #define EDP_PSR2_IDLE_FRAME_SHIFT 0 2219 - 2220 - #define _PSR_EVENT_TRANS_A 0x60848 2221 - #define _PSR_EVENT_TRANS_B 0x61848 2222 - #define _PSR_EVENT_TRANS_C 0x62848 2223 - #define _PSR_EVENT_TRANS_D 0x63848 2224 - #define _PSR_EVENT_TRANS_EDP 0x6f848 2225 - #define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A) 2226 - #define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17) 2227 - #define PSR_EVENT_PSR2_DISABLED (1 << 16) 2228 - #define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15) 2229 - #define PSR_EVENT_SU_CRC_FIFO_UNDERRUN (1 << 14) 2230 - #define PSR_EVENT_GRAPHICS_RESET (1 << 12) 2231 - #define PSR_EVENT_PCH_INTERRUPT (1 << 11) 2232 - #define PSR_EVENT_MEMORY_UP (1 << 10) 2233 - #define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9) 2234 - #define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8) 2235 - #define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6) 2236 - #define PSR_EVENT_REGISTER_UPDATE (1 << 5) /* Reserved in ICL+ */ 2237 - #define PSR_EVENT_HDCP_ENABLE (1 << 4) 2238 - #define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3) 2239 - #define PSR_EVENT_VBI_ENABLE (1 << 2) 2240 - #define PSR_EVENT_LPSP_MODE_EXIT (1 << 1) 2241 - #define PSR_EVENT_PSR_DISABLE (1 << 0) 2242 - 2243 - #define _PSR2_STATUS_A 0x60940 2244 - #define _PSR2_STATUS_EDP 0x6f940 2245 - #define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) 2246 - #define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28) 2247 - #define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8) 2248 - 2249 - #define _PSR2_SU_STATUS_A 0x60914 2250 - #define _PSR2_SU_STATUS_EDP 0x6f914 2251 - #define _PSR2_SU_STATUS(tran, index) _MMIO_TRANS2(tran, _PSR2_SU_STATUS_A + (index) * 4) 2252 - #define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3)) 2253 - #define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10) 2254 - #define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame)) 2255 - #define PSR2_SU_STATUS_FRAMES 8 2256 - 2257 - #define _PSR2_MAN_TRK_CTL_A 0x60910 2258 - #define _PSR2_MAN_TRK_CTL_EDP 0x6f910 2259 - #define PSR2_MAN_TRK_CTL(tran) _MMIO_TRANS2(tran, _PSR2_MAN_TRK_CTL_A) 2260 - #define PSR2_MAN_TRK_CTL_ENABLE REG_BIT(31) 2261 - #define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK REG_GENMASK(30, 21) 2262 - #define PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val) 2263 - #define PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK REG_GENMASK(20, 11) 2264 - #define PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val) REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val) 2265 - #define PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(3) 2266 - #define PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(2) 2267 - #define PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE REG_BIT(1) 2268 - #define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK REG_GENMASK(28, 16) 2269 - #define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val) 2270 - #define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK REG_GENMASK(12, 0) 2271 - #define ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val) REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val) 2272 - #define ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE REG_BIT(31) 2273 - #define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14) 2274 - #define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13) 2275 2113 2276 2114 /* VGA port control */ 2277 2115 #define ADPA _MMIO(0x61100) ··· 2330 2566 #define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4) 2331 2567 #define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) 2332 2568 2333 - /* Panel power sequencing */ 2334 - #define PPS_BASE 0x61200 2335 - #define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE) 2336 - #define PCH_PPS_BASE 0xC7200 2337 - 2338 - #define _MMIO_PPS(pps_idx, reg) _MMIO(dev_priv->display.pps.mmio_base - \ 2339 - PPS_BASE + (reg) + \ 2340 - (pps_idx) * 0x100) 2341 - 2342 - #define _PP_STATUS 0x61200 2343 - #define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS) 2344 - #define PP_ON REG_BIT(31) 2345 - /* 2346 - * Indicates that all dependencies of the panel are on: 2347 - * 2348 - * - PLL enabled 2349 - * - pipe enabled 2350 - * - LVDS/DVOB/DVOC on 2351 - */ 2352 - #define PP_READY REG_BIT(30) 2353 - #define PP_SEQUENCE_MASK REG_GENMASK(29, 28) 2354 - #define PP_SEQUENCE_NONE REG_FIELD_PREP(PP_SEQUENCE_MASK, 0) 2355 - #define PP_SEQUENCE_POWER_UP REG_FIELD_PREP(PP_SEQUENCE_MASK, 1) 2356 - #define PP_SEQUENCE_POWER_DOWN REG_FIELD_PREP(PP_SEQUENCE_MASK, 2) 2357 - #define PP_CYCLE_DELAY_ACTIVE REG_BIT(27) 2358 - #define PP_SEQUENCE_STATE_MASK REG_GENMASK(3, 0) 2359 - #define PP_SEQUENCE_STATE_OFF_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x0) 2360 - #define PP_SEQUENCE_STATE_OFF_S0_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x1) 2361 - #define PP_SEQUENCE_STATE_OFF_S0_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x2) 2362 - #define PP_SEQUENCE_STATE_OFF_S0_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x3) 2363 - #define PP_SEQUENCE_STATE_ON_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x8) 2364 - #define PP_SEQUENCE_STATE_ON_S1_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x9) 2365 - #define PP_SEQUENCE_STATE_ON_S1_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xa) 2366 - #define PP_SEQUENCE_STATE_ON_S1_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xb) 2367 - #define PP_SEQUENCE_STATE_RESET REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xf) 2368 - 2369 - #define _PP_CONTROL 0x61204 2370 - #define PP_CONTROL(pps_idx) _MMIO_PPS(pps_idx, _PP_CONTROL) 2371 - #define PANEL_UNLOCK_MASK REG_GENMASK(31, 16) 2372 - #define PANEL_UNLOCK_REGS REG_FIELD_PREP(PANEL_UNLOCK_MASK, 0xabcd) 2373 - #define BXT_POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4) 2374 - #define EDP_FORCE_VDD REG_BIT(3) 2375 - #define EDP_BLC_ENABLE REG_BIT(2) 2376 - #define PANEL_POWER_RESET REG_BIT(1) 2377 - #define PANEL_POWER_ON REG_BIT(0) 2378 - 2379 - #define _PP_ON_DELAYS 0x61208 2380 - #define PP_ON_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_ON_DELAYS) 2381 - #define PANEL_PORT_SELECT_MASK REG_GENMASK(31, 30) 2382 - #define PANEL_PORT_SELECT_LVDS REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 0) 2383 - #define PANEL_PORT_SELECT_DPA REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 1) 2384 - #define PANEL_PORT_SELECT_DPC REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 2) 2385 - #define PANEL_PORT_SELECT_DPD REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 3) 2386 - #define PANEL_PORT_SELECT_VLV(port) REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, port) 2387 - #define PANEL_POWER_UP_DELAY_MASK REG_GENMASK(28, 16) 2388 - #define PANEL_LIGHT_ON_DELAY_MASK REG_GENMASK(12, 0) 2389 - 2390 - #define _PP_OFF_DELAYS 0x6120C 2391 - #define PP_OFF_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_OFF_DELAYS) 2392 - #define PANEL_POWER_DOWN_DELAY_MASK REG_GENMASK(28, 16) 2393 - #define PANEL_LIGHT_OFF_DELAY_MASK REG_GENMASK(12, 0) 2394 - 2395 - #define _PP_DIVISOR 0x61210 2396 - #define PP_DIVISOR(pps_idx) _MMIO_PPS(pps_idx, _PP_DIVISOR) 2397 - #define PP_REFERENCE_DIVIDER_MASK REG_GENMASK(31, 8) 2398 - #define PANEL_POWER_CYCLE_DELAY_MASK REG_GENMASK(4, 0) 2399 - 2400 2569 /* Panel fitting */ 2401 2570 #define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230) 2402 2571 #define PFIT_ENABLE (1 << 31) ··· 2366 2669 2367 2670 #define PCH_GTC_CTL _MMIO(0xe7000) 2368 2671 #define PCH_GTC_ENABLE (1 << 31) 2369 - 2370 - /* TV port control */ 2371 - #define TV_CTL _MMIO(0x68000) 2372 - /* Enables the TV encoder */ 2373 - # define TV_ENC_ENABLE (1 << 31) 2374 - /* Sources the TV encoder input from pipe B instead of A. */ 2375 - # define TV_ENC_PIPE_SEL_SHIFT 30 2376 - # define TV_ENC_PIPE_SEL_MASK (1 << 30) 2377 - # define TV_ENC_PIPE_SEL(pipe) ((pipe) << 30) 2378 - /* Outputs composite video (DAC A only) */ 2379 - # define TV_ENC_OUTPUT_COMPOSITE (0 << 28) 2380 - /* Outputs SVideo video (DAC B/C) */ 2381 - # define TV_ENC_OUTPUT_SVIDEO (1 << 28) 2382 - /* Outputs Component video (DAC A/B/C) */ 2383 - # define TV_ENC_OUTPUT_COMPONENT (2 << 28) 2384 - /* Outputs Composite and SVideo (DAC A/B/C) */ 2385 - # define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) 2386 - # define TV_TRILEVEL_SYNC (1 << 21) 2387 - /* Enables slow sync generation (945GM only) */ 2388 - # define TV_SLOW_SYNC (1 << 20) 2389 - /* Selects 4x oversampling for 480i and 576p */ 2390 - # define TV_OVERSAMPLE_4X (0 << 18) 2391 - /* Selects 2x oversampling for 720p and 1080i */ 2392 - # define TV_OVERSAMPLE_2X (1 << 18) 2393 - /* Selects no oversampling for 1080p */ 2394 - # define TV_OVERSAMPLE_NONE (2 << 18) 2395 - /* Selects 8x oversampling */ 2396 - # define TV_OVERSAMPLE_8X (3 << 18) 2397 - # define TV_OVERSAMPLE_MASK (3 << 18) 2398 - /* Selects progressive mode rather than interlaced */ 2399 - # define TV_PROGRESSIVE (1 << 17) 2400 - /* Sets the colorburst to PAL mode. Required for non-M PAL modes. */ 2401 - # define TV_PAL_BURST (1 << 16) 2402 - /* Field for setting delay of Y compared to C */ 2403 - # define TV_YC_SKEW_MASK (7 << 12) 2404 - /* Enables a fix for 480p/576p standard definition modes on the 915GM only */ 2405 - # define TV_ENC_SDP_FIX (1 << 11) 2406 - /* 2407 - * Enables a fix for the 915GM only. 2408 - * 2409 - * Not sure what it does. 2410 - */ 2411 - # define TV_ENC_C0_FIX (1 << 10) 2412 - /* Bits that must be preserved by software */ 2413 - # define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf) 2414 - # define TV_FUSE_STATE_MASK (3 << 4) 2415 - /* Read-only state that reports all features enabled */ 2416 - # define TV_FUSE_STATE_ENABLED (0 << 4) 2417 - /* Read-only state that reports that Macrovision is disabled in hardware*/ 2418 - # define TV_FUSE_STATE_NO_MACROVISION (1 << 4) 2419 - /* Read-only state that reports that TV-out is disabled in hardware. */ 2420 - # define TV_FUSE_STATE_DISABLED (2 << 4) 2421 - /* Normal operation */ 2422 - # define TV_TEST_MODE_NORMAL (0 << 0) 2423 - /* Encoder test pattern 1 - combo pattern */ 2424 - # define TV_TEST_MODE_PATTERN_1 (1 << 0) 2425 - /* Encoder test pattern 2 - full screen vertical 75% color bars */ 2426 - # define TV_TEST_MODE_PATTERN_2 (2 << 0) 2427 - /* Encoder test pattern 3 - full screen horizontal 75% color bars */ 2428 - # define TV_TEST_MODE_PATTERN_3 (3 << 0) 2429 - /* Encoder test pattern 4 - random noise */ 2430 - # define TV_TEST_MODE_PATTERN_4 (4 << 0) 2431 - /* Encoder test pattern 5 - linear color ramps */ 2432 - # define TV_TEST_MODE_PATTERN_5 (5 << 0) 2433 - /* 2434 - * This test mode forces the DACs to 50% of full output. 2435 - * 2436 - * This is used for load detection in combination with TVDAC_SENSE_MASK 2437 - */ 2438 - # define TV_TEST_MODE_MONITOR_DETECT (7 << 0) 2439 - # define TV_TEST_MODE_MASK (7 << 0) 2440 - 2441 - #define TV_DAC _MMIO(0x68004) 2442 - # define TV_DAC_SAVE 0x00ffff00 2443 - /* 2444 - * Reports that DAC state change logic has reported change (RO). 2445 - * 2446 - * This gets cleared when TV_DAC_STATE_EN is cleared 2447 - */ 2448 - # define TVDAC_STATE_CHG (1 << 31) 2449 - # define TVDAC_SENSE_MASK (7 << 28) 2450 - /* Reports that DAC A voltage is above the detect threshold */ 2451 - # define TVDAC_A_SENSE (1 << 30) 2452 - /* Reports that DAC B voltage is above the detect threshold */ 2453 - # define TVDAC_B_SENSE (1 << 29) 2454 - /* Reports that DAC C voltage is above the detect threshold */ 2455 - # define TVDAC_C_SENSE (1 << 28) 2456 - /* 2457 - * Enables DAC state detection logic, for load-based TV detection. 2458 - * 2459 - * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set 2460 - * to off, for load detection to work. 2461 - */ 2462 - # define TVDAC_STATE_CHG_EN (1 << 27) 2463 - /* Sets the DAC A sense value to high */ 2464 - # define TVDAC_A_SENSE_CTL (1 << 26) 2465 - /* Sets the DAC B sense value to high */ 2466 - # define TVDAC_B_SENSE_CTL (1 << 25) 2467 - /* Sets the DAC C sense value to high */ 2468 - # define TVDAC_C_SENSE_CTL (1 << 24) 2469 - /* Overrides the ENC_ENABLE and DAC voltage levels */ 2470 - # define DAC_CTL_OVERRIDE (1 << 7) 2471 - /* Sets the slew rate. Must be preserved in software */ 2472 - # define ENC_TVDAC_SLEW_FAST (1 << 6) 2473 - # define DAC_A_1_3_V (0 << 4) 2474 - # define DAC_A_1_1_V (1 << 4) 2475 - # define DAC_A_0_7_V (2 << 4) 2476 - # define DAC_A_MASK (3 << 4) 2477 - # define DAC_B_1_3_V (0 << 2) 2478 - # define DAC_B_1_1_V (1 << 2) 2479 - # define DAC_B_0_7_V (2 << 2) 2480 - # define DAC_B_MASK (3 << 2) 2481 - # define DAC_C_1_3_V (0 << 0) 2482 - # define DAC_C_1_1_V (1 << 0) 2483 - # define DAC_C_0_7_V (2 << 0) 2484 - # define DAC_C_MASK (3 << 0) 2485 - 2486 - /* 2487 - * CSC coefficients are stored in a floating point format with 9 bits of 2488 - * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, 2489 - * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with 2490 - * -1 (0x3) being the only legal negative value. 2491 - */ 2492 - #define TV_CSC_Y _MMIO(0x68010) 2493 - # define TV_RY_MASK 0x07ff0000 2494 - # define TV_RY_SHIFT 16 2495 - # define TV_GY_MASK 0x00000fff 2496 - # define TV_GY_SHIFT 0 2497 - 2498 - #define TV_CSC_Y2 _MMIO(0x68014) 2499 - # define TV_BY_MASK 0x07ff0000 2500 - # define TV_BY_SHIFT 16 2501 - /* 2502 - * Y attenuation for component video. 2503 - * 2504 - * Stored in 1.9 fixed point. 2505 - */ 2506 - # define TV_AY_MASK 0x000003ff 2507 - # define TV_AY_SHIFT 0 2508 - 2509 - #define TV_CSC_U _MMIO(0x68018) 2510 - # define TV_RU_MASK 0x07ff0000 2511 - # define TV_RU_SHIFT 16 2512 - # define TV_GU_MASK 0x000007ff 2513 - # define TV_GU_SHIFT 0 2514 - 2515 - #define TV_CSC_U2 _MMIO(0x6801c) 2516 - # define TV_BU_MASK 0x07ff0000 2517 - # define TV_BU_SHIFT 16 2518 - /* 2519 - * U attenuation for component video. 2520 - * 2521 - * Stored in 1.9 fixed point. 2522 - */ 2523 - # define TV_AU_MASK 0x000003ff 2524 - # define TV_AU_SHIFT 0 2525 - 2526 - #define TV_CSC_V _MMIO(0x68020) 2527 - # define TV_RV_MASK 0x0fff0000 2528 - # define TV_RV_SHIFT 16 2529 - # define TV_GV_MASK 0x000007ff 2530 - # define TV_GV_SHIFT 0 2531 - 2532 - #define TV_CSC_V2 _MMIO(0x68024) 2533 - # define TV_BV_MASK 0x07ff0000 2534 - # define TV_BV_SHIFT 16 2535 - /* 2536 - * V attenuation for component video. 2537 - * 2538 - * Stored in 1.9 fixed point. 2539 - */ 2540 - # define TV_AV_MASK 0x000007ff 2541 - # define TV_AV_SHIFT 0 2542 - 2543 - #define TV_CLR_KNOBS _MMIO(0x68028) 2544 - /* 2s-complement brightness adjustment */ 2545 - # define TV_BRIGHTNESS_MASK 0xff000000 2546 - # define TV_BRIGHTNESS_SHIFT 24 2547 - /* Contrast adjustment, as a 2.6 unsigned floating point number */ 2548 - # define TV_CONTRAST_MASK 0x00ff0000 2549 - # define TV_CONTRAST_SHIFT 16 2550 - /* Saturation adjustment, as a 2.6 unsigned floating point number */ 2551 - # define TV_SATURATION_MASK 0x0000ff00 2552 - # define TV_SATURATION_SHIFT 8 2553 - /* Hue adjustment, as an integer phase angle in degrees */ 2554 - # define TV_HUE_MASK 0x000000ff 2555 - # define TV_HUE_SHIFT 0 2556 - 2557 - #define TV_CLR_LEVEL _MMIO(0x6802c) 2558 - /* Controls the DAC level for black */ 2559 - # define TV_BLACK_LEVEL_MASK 0x01ff0000 2560 - # define TV_BLACK_LEVEL_SHIFT 16 2561 - /* Controls the DAC level for blanking */ 2562 - # define TV_BLANK_LEVEL_MASK 0x000001ff 2563 - # define TV_BLANK_LEVEL_SHIFT 0 2564 - 2565 - #define TV_H_CTL_1 _MMIO(0x68030) 2566 - /* Number of pixels in the hsync. */ 2567 - # define TV_HSYNC_END_MASK 0x1fff0000 2568 - # define TV_HSYNC_END_SHIFT 16 2569 - /* Total number of pixels minus one in the line (display and blanking). */ 2570 - # define TV_HTOTAL_MASK 0x00001fff 2571 - # define TV_HTOTAL_SHIFT 0 2572 - 2573 - #define TV_H_CTL_2 _MMIO(0x68034) 2574 - /* Enables the colorburst (needed for non-component color) */ 2575 - # define TV_BURST_ENA (1 << 31) 2576 - /* Offset of the colorburst from the start of hsync, in pixels minus one. */ 2577 - # define TV_HBURST_START_SHIFT 16 2578 - # define TV_HBURST_START_MASK 0x1fff0000 2579 - /* Length of the colorburst */ 2580 - # define TV_HBURST_LEN_SHIFT 0 2581 - # define TV_HBURST_LEN_MASK 0x0001fff 2582 - 2583 - #define TV_H_CTL_3 _MMIO(0x68038) 2584 - /* End of hblank, measured in pixels minus one from start of hsync */ 2585 - # define TV_HBLANK_END_SHIFT 16 2586 - # define TV_HBLANK_END_MASK 0x1fff0000 2587 - /* Start of hblank, measured in pixels minus one from start of hsync */ 2588 - # define TV_HBLANK_START_SHIFT 0 2589 - # define TV_HBLANK_START_MASK 0x0001fff 2590 - 2591 - #define TV_V_CTL_1 _MMIO(0x6803c) 2592 - /* XXX */ 2593 - # define TV_NBR_END_SHIFT 16 2594 - # define TV_NBR_END_MASK 0x07ff0000 2595 - /* XXX */ 2596 - # define TV_VI_END_F1_SHIFT 8 2597 - # define TV_VI_END_F1_MASK 0x00003f00 2598 - /* XXX */ 2599 - # define TV_VI_END_F2_SHIFT 0 2600 - # define TV_VI_END_F2_MASK 0x0000003f 2601 - 2602 - #define TV_V_CTL_2 _MMIO(0x68040) 2603 - /* Length of vsync, in half lines */ 2604 - # define TV_VSYNC_LEN_MASK 0x07ff0000 2605 - # define TV_VSYNC_LEN_SHIFT 16 2606 - /* Offset of the start of vsync in field 1, measured in one less than the 2607 - * number of half lines. 2608 - */ 2609 - # define TV_VSYNC_START_F1_MASK 0x00007f00 2610 - # define TV_VSYNC_START_F1_SHIFT 8 2611 - /* 2612 - * Offset of the start of vsync in field 2, measured in one less than the 2613 - * number of half lines. 2614 - */ 2615 - # define TV_VSYNC_START_F2_MASK 0x0000007f 2616 - # define TV_VSYNC_START_F2_SHIFT 0 2617 - 2618 - #define TV_V_CTL_3 _MMIO(0x68044) 2619 - /* Enables generation of the equalization signal */ 2620 - # define TV_EQUAL_ENA (1 << 31) 2621 - /* Length of vsync, in half lines */ 2622 - # define TV_VEQ_LEN_MASK 0x007f0000 2623 - # define TV_VEQ_LEN_SHIFT 16 2624 - /* Offset of the start of equalization in field 1, measured in one less than 2625 - * the number of half lines. 2626 - */ 2627 - # define TV_VEQ_START_F1_MASK 0x0007f00 2628 - # define TV_VEQ_START_F1_SHIFT 8 2629 - /* 2630 - * Offset of the start of equalization in field 2, measured in one less than 2631 - * the number of half lines. 2632 - */ 2633 - # define TV_VEQ_START_F2_MASK 0x000007f 2634 - # define TV_VEQ_START_F2_SHIFT 0 2635 - 2636 - #define TV_V_CTL_4 _MMIO(0x68048) 2637 - /* 2638 - * Offset to start of vertical colorburst, measured in one less than the 2639 - * number of lines from vertical start. 2640 - */ 2641 - # define TV_VBURST_START_F1_MASK 0x003f0000 2642 - # define TV_VBURST_START_F1_SHIFT 16 2643 - /* 2644 - * Offset to the end of vertical colorburst, measured in one less than the 2645 - * number of lines from the start of NBR. 2646 - */ 2647 - # define TV_VBURST_END_F1_MASK 0x000000ff 2648 - # define TV_VBURST_END_F1_SHIFT 0 2649 - 2650 - #define TV_V_CTL_5 _MMIO(0x6804c) 2651 - /* 2652 - * Offset to start of vertical colorburst, measured in one less than the 2653 - * number of lines from vertical start. 2654 - */ 2655 - # define TV_VBURST_START_F2_MASK 0x003f0000 2656 - # define TV_VBURST_START_F2_SHIFT 16 2657 - /* 2658 - * Offset to the end of vertical colorburst, measured in one less than the 2659 - * number of lines from the start of NBR. 2660 - */ 2661 - # define TV_VBURST_END_F2_MASK 0x000000ff 2662 - # define TV_VBURST_END_F2_SHIFT 0 2663 - 2664 - #define TV_V_CTL_6 _MMIO(0x68050) 2665 - /* 2666 - * Offset to start of vertical colorburst, measured in one less than the 2667 - * number of lines from vertical start. 2668 - */ 2669 - # define TV_VBURST_START_F3_MASK 0x003f0000 2670 - # define TV_VBURST_START_F3_SHIFT 16 2671 - /* 2672 - * Offset to the end of vertical colorburst, measured in one less than the 2673 - * number of lines from the start of NBR. 2674 - */ 2675 - # define TV_VBURST_END_F3_MASK 0x000000ff 2676 - # define TV_VBURST_END_F3_SHIFT 0 2677 - 2678 - #define TV_V_CTL_7 _MMIO(0x68054) 2679 - /* 2680 - * Offset to start of vertical colorburst, measured in one less than the 2681 - * number of lines from vertical start. 2682 - */ 2683 - # define TV_VBURST_START_F4_MASK 0x003f0000 2684 - # define TV_VBURST_START_F4_SHIFT 16 2685 - /* 2686 - * Offset to the end of vertical colorburst, measured in one less than the 2687 - * number of lines from the start of NBR. 2688 - */ 2689 - # define TV_VBURST_END_F4_MASK 0x000000ff 2690 - # define TV_VBURST_END_F4_SHIFT 0 2691 - 2692 - #define TV_SC_CTL_1 _MMIO(0x68060) 2693 - /* Turns on the first subcarrier phase generation DDA */ 2694 - # define TV_SC_DDA1_EN (1 << 31) 2695 - /* Turns on the first subcarrier phase generation DDA */ 2696 - # define TV_SC_DDA2_EN (1 << 30) 2697 - /* Turns on the first subcarrier phase generation DDA */ 2698 - # define TV_SC_DDA3_EN (1 << 29) 2699 - /* Sets the subcarrier DDA to reset frequency every other field */ 2700 - # define TV_SC_RESET_EVERY_2 (0 << 24) 2701 - /* Sets the subcarrier DDA to reset frequency every fourth field */ 2702 - # define TV_SC_RESET_EVERY_4 (1 << 24) 2703 - /* Sets the subcarrier DDA to reset frequency every eighth field */ 2704 - # define TV_SC_RESET_EVERY_8 (2 << 24) 2705 - /* Sets the subcarrier DDA to never reset the frequency */ 2706 - # define TV_SC_RESET_NEVER (3 << 24) 2707 - /* Sets the peak amplitude of the colorburst.*/ 2708 - # define TV_BURST_LEVEL_MASK 0x00ff0000 2709 - # define TV_BURST_LEVEL_SHIFT 16 2710 - /* Sets the increment of the first subcarrier phase generation DDA */ 2711 - # define TV_SCDDA1_INC_MASK 0x00000fff 2712 - # define TV_SCDDA1_INC_SHIFT 0 2713 - 2714 - #define TV_SC_CTL_2 _MMIO(0x68064) 2715 - /* Sets the rollover for the second subcarrier phase generation DDA */ 2716 - # define TV_SCDDA2_SIZE_MASK 0x7fff0000 2717 - # define TV_SCDDA2_SIZE_SHIFT 16 2718 - /* Sets the increent of the second subcarrier phase generation DDA */ 2719 - # define TV_SCDDA2_INC_MASK 0x00007fff 2720 - # define TV_SCDDA2_INC_SHIFT 0 2721 - 2722 - #define TV_SC_CTL_3 _MMIO(0x68068) 2723 - /* Sets the rollover for the third subcarrier phase generation DDA */ 2724 - # define TV_SCDDA3_SIZE_MASK 0x7fff0000 2725 - # define TV_SCDDA3_SIZE_SHIFT 16 2726 - /* Sets the increent of the third subcarrier phase generation DDA */ 2727 - # define TV_SCDDA3_INC_MASK 0x00007fff 2728 - # define TV_SCDDA3_INC_SHIFT 0 2729 - 2730 - #define TV_WIN_POS _MMIO(0x68070) 2731 - /* X coordinate of the display from the start of horizontal active */ 2732 - # define TV_XPOS_MASK 0x1fff0000 2733 - # define TV_XPOS_SHIFT 16 2734 - /* Y coordinate of the display from the start of vertical active (NBR) */ 2735 - # define TV_YPOS_MASK 0x00000fff 2736 - # define TV_YPOS_SHIFT 0 2737 - 2738 - #define TV_WIN_SIZE _MMIO(0x68074) 2739 - /* Horizontal size of the display window, measured in pixels*/ 2740 - # define TV_XSIZE_MASK 0x1fff0000 2741 - # define TV_XSIZE_SHIFT 16 2742 - /* 2743 - * Vertical size of the display window, measured in pixels. 2744 - * 2745 - * Must be even for interlaced modes. 2746 - */ 2747 - # define TV_YSIZE_MASK 0x00000fff 2748 - # define TV_YSIZE_SHIFT 0 2749 - 2750 - #define TV_FILTER_CTL_1 _MMIO(0x68080) 2751 - /* 2752 - * Enables automatic scaling calculation. 2753 - * 2754 - * If set, the rest of the registers are ignored, and the calculated values can 2755 - * be read back from the register. 2756 - */ 2757 - # define TV_AUTO_SCALE (1 << 31) 2758 - /* 2759 - * Disables the vertical filter. 2760 - * 2761 - * This is required on modes more than 1024 pixels wide */ 2762 - # define TV_V_FILTER_BYPASS (1 << 29) 2763 - /* Enables adaptive vertical filtering */ 2764 - # define TV_VADAPT (1 << 28) 2765 - # define TV_VADAPT_MODE_MASK (3 << 26) 2766 - /* Selects the least adaptive vertical filtering mode */ 2767 - # define TV_VADAPT_MODE_LEAST (0 << 26) 2768 - /* Selects the moderately adaptive vertical filtering mode */ 2769 - # define TV_VADAPT_MODE_MODERATE (1 << 26) 2770 - /* Selects the most adaptive vertical filtering mode */ 2771 - # define TV_VADAPT_MODE_MOST (3 << 26) 2772 - /* 2773 - * Sets the horizontal scaling factor. 2774 - * 2775 - * This should be the fractional part of the horizontal scaling factor divided 2776 - * by the oversampling rate. TV_HSCALE should be less than 1, and set to: 2777 - * 2778 - * (src width - 1) / ((oversample * dest width) - 1) 2779 - */ 2780 - # define TV_HSCALE_FRAC_MASK 0x00003fff 2781 - # define TV_HSCALE_FRAC_SHIFT 0 2782 - 2783 - #define TV_FILTER_CTL_2 _MMIO(0x68084) 2784 - /* 2785 - * Sets the integer part of the 3.15 fixed-point vertical scaling factor. 2786 - * 2787 - * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) 2788 - */ 2789 - # define TV_VSCALE_INT_MASK 0x00038000 2790 - # define TV_VSCALE_INT_SHIFT 15 2791 - /* 2792 - * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. 2793 - * 2794 - * \sa TV_VSCALE_INT_MASK 2795 - */ 2796 - # define TV_VSCALE_FRAC_MASK 0x00007fff 2797 - # define TV_VSCALE_FRAC_SHIFT 0 2798 - 2799 - #define TV_FILTER_CTL_3 _MMIO(0x68088) 2800 - /* 2801 - * Sets the integer part of the 3.15 fixed-point vertical scaling factor. 2802 - * 2803 - * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) 2804 - * 2805 - * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. 2806 - */ 2807 - # define TV_VSCALE_IP_INT_MASK 0x00038000 2808 - # define TV_VSCALE_IP_INT_SHIFT 15 2809 - /* 2810 - * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. 2811 - * 2812 - * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. 2813 - * 2814 - * \sa TV_VSCALE_IP_INT_MASK 2815 - */ 2816 - # define TV_VSCALE_IP_FRAC_MASK 0x00007fff 2817 - # define TV_VSCALE_IP_FRAC_SHIFT 0 2818 - 2819 - #define TV_CC_CONTROL _MMIO(0x68090) 2820 - # define TV_CC_ENABLE (1 << 31) 2821 - /* 2822 - * Specifies which field to send the CC data in. 2823 - * 2824 - * CC data is usually sent in field 0. 2825 - */ 2826 - # define TV_CC_FID_MASK (1 << 27) 2827 - # define TV_CC_FID_SHIFT 27 2828 - /* Sets the horizontal position of the CC data. Usually 135. */ 2829 - # define TV_CC_HOFF_MASK 0x03ff0000 2830 - # define TV_CC_HOFF_SHIFT 16 2831 - /* Sets the vertical position of the CC data. Usually 21 */ 2832 - # define TV_CC_LINE_MASK 0x0000003f 2833 - # define TV_CC_LINE_SHIFT 0 2834 - 2835 - #define TV_CC_DATA _MMIO(0x68094) 2836 - # define TV_CC_RDY (1 << 31) 2837 - /* Second word of CC data to be transmitted. */ 2838 - # define TV_CC_DATA_2_MASK 0x007f0000 2839 - # define TV_CC_DATA_2_SHIFT 16 2840 - /* First word of CC data to be transmitted. */ 2841 - # define TV_CC_DATA_1_MASK 0x0000007f 2842 - # define TV_CC_DATA_1_SHIFT 0 2843 - 2844 - #define TV_H_LUMA(i) _MMIO(0x68100 + (i) * 4) /* 60 registers */ 2845 - #define TV_H_CHROMA(i) _MMIO(0x68200 + (i) * 4) /* 60 registers */ 2846 - #define TV_V_LUMA(i) _MMIO(0x68300 + (i) * 4) /* 43 registers */ 2847 - #define TV_V_CHROMA(i) _MMIO(0x68400 + (i) * 4) /* 43 registers */ 2848 2672 2849 2673 /* Display Port */ 2850 2674 #define DP_A _MMIO(0x64000) /* eDP */ ··· 2459 3241 2460 3242 /* A fantasy */ 2461 3243 #define DP_DETECTED (1 << 2) 2462 - 2463 - /* The aux channel provides a way to talk to the 2464 - * signal sink for DDC etc. Max packet size supported 2465 - * is 20 bytes in each direction, hence the 5 fixed 2466 - * data registers 2467 - */ 2468 - #define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010) 2469 - #define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014) 2470 - 2471 - #define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110) 2472 - #define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114) 2473 - 2474 - #define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL) 2475 - #define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ 2476 - 2477 - #define _XELPDP_USBC1_AUX_CH_CTL 0x16F210 2478 - #define _XELPDP_USBC2_AUX_CH_CTL 0x16F410 2479 - #define _XELPDP_USBC3_AUX_CH_CTL 0x16F610 2480 - #define _XELPDP_USBC4_AUX_CH_CTL 0x16F810 2481 - 2482 - #define XELPDP_DP_AUX_CH_CTL(aux_ch) _MMIO(_PICK(aux_ch, \ 2483 - _DPA_AUX_CH_CTL, \ 2484 - _DPB_AUX_CH_CTL, \ 2485 - 0, /* port/aux_ch C is non-existent */ \ 2486 - _XELPDP_USBC1_AUX_CH_CTL, \ 2487 - _XELPDP_USBC2_AUX_CH_CTL, \ 2488 - _XELPDP_USBC3_AUX_CH_CTL, \ 2489 - _XELPDP_USBC4_AUX_CH_CTL)) 2490 - 2491 - #define _XELPDP_USBC1_AUX_CH_DATA1 0x16F214 2492 - #define _XELPDP_USBC2_AUX_CH_DATA1 0x16F414 2493 - #define _XELPDP_USBC3_AUX_CH_DATA1 0x16F614 2494 - #define _XELPDP_USBC4_AUX_CH_DATA1 0x16F814 2495 - 2496 - #define XELPDP_DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PICK(aux_ch, \ 2497 - _DPA_AUX_CH_DATA1, \ 2498 - _DPB_AUX_CH_DATA1, \ 2499 - 0, /* port/aux_ch C is non-existent */ \ 2500 - _XELPDP_USBC1_AUX_CH_DATA1, \ 2501 - _XELPDP_USBC2_AUX_CH_DATA1, \ 2502 - _XELPDP_USBC3_AUX_CH_DATA1, \ 2503 - _XELPDP_USBC4_AUX_CH_DATA1) + (i) * 4) 2504 - 2505 - #define DP_AUX_CH_CTL_SEND_BUSY (1 << 31) 2506 - #define DP_AUX_CH_CTL_DONE (1 << 30) 2507 - #define DP_AUX_CH_CTL_INTERRUPT (1 << 29) 2508 - #define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28) 2509 - #define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26) 2510 - #define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26) 2511 - #define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26) 2512 - #define DP_AUX_CH_CTL_TIME_OUT_MAX (3 << 26) /* Varies per platform */ 2513 - #define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26) 2514 - #define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25) 2515 - #define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20) 2516 - #define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20 2517 - #define XELPDP_DP_AUX_CH_CTL_POWER_REQUEST REG_BIT(19) 2518 - #define XELPDP_DP_AUX_CH_CTL_POWER_STATUS REG_BIT(18) 2519 - #define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16) 2520 - #define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16 2521 - #define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15) 2522 - #define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14) 2523 - #define DP_AUX_CH_CTL_SYNC_TEST (1 << 13) 2524 - #define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12) 2525 - #define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) 2526 - #define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) 2527 - #define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 2528 - #define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14) 2529 - #define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13) 2530 - #define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12) 2531 - #define DP_AUX_CH_CTL_TBT_IO (1 << 11) 2532 - #define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (0x1f << 5) 2533 - #define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5) 2534 - #define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1) 2535 3244 2536 3245 /* 2537 3246 * Computing GMCH M and N values for the Display Port link ··· 2964 3819 #define I965_CURSOR_MAX_WM 32 2965 3820 #define I965_CURSOR_DFT_WM 8 2966 3821 2967 - /* Watermark register definitions for SKL */ 2968 - #define _CUR_WM_A_0 0x70140 2969 - #define _CUR_WM_B_0 0x71140 2970 - #define _CUR_WM_SAGV_A 0x70158 2971 - #define _CUR_WM_SAGV_B 0x71158 2972 - #define _CUR_WM_SAGV_TRANS_A 0x7015C 2973 - #define _CUR_WM_SAGV_TRANS_B 0x7115C 2974 - #define _CUR_WM_TRANS_A 0x70168 2975 - #define _CUR_WM_TRANS_B 0x71168 2976 - #define _PLANE_WM_1_A_0 0x70240 2977 - #define _PLANE_WM_1_B_0 0x71240 2978 - #define _PLANE_WM_2_A_0 0x70340 2979 - #define _PLANE_WM_2_B_0 0x71340 2980 - #define _PLANE_WM_SAGV_1_A 0x70258 2981 - #define _PLANE_WM_SAGV_1_B 0x71258 2982 - #define _PLANE_WM_SAGV_2_A 0x70358 2983 - #define _PLANE_WM_SAGV_2_B 0x71358 2984 - #define _PLANE_WM_SAGV_TRANS_1_A 0x7025C 2985 - #define _PLANE_WM_SAGV_TRANS_1_B 0x7125C 2986 - #define _PLANE_WM_SAGV_TRANS_2_A 0x7035C 2987 - #define _PLANE_WM_SAGV_TRANS_2_B 0x7135C 2988 - #define _PLANE_WM_TRANS_1_A 0x70268 2989 - #define _PLANE_WM_TRANS_1_B 0x71268 2990 - #define _PLANE_WM_TRANS_2_A 0x70368 2991 - #define _PLANE_WM_TRANS_2_B 0x71368 2992 - #define PLANE_WM_EN (1 << 31) 2993 - #define PLANE_WM_IGNORE_LINES (1 << 30) 2994 - #define PLANE_WM_LINES_MASK REG_GENMASK(26, 14) 2995 - #define PLANE_WM_BLOCKS_MASK REG_GENMASK(11, 0) 2996 - 2997 - #define _CUR_WM_0(pipe) _PIPE(pipe, _CUR_WM_A_0, _CUR_WM_B_0) 2998 - #define CUR_WM(pipe, level) _MMIO(_CUR_WM_0(pipe) + ((4) * (level))) 2999 - #define CUR_WM_SAGV(pipe) _MMIO_PIPE(pipe, _CUR_WM_SAGV_A, _CUR_WM_SAGV_B) 3000 - #define CUR_WM_SAGV_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_SAGV_TRANS_A, _CUR_WM_SAGV_TRANS_B) 3001 - #define CUR_WM_TRANS(pipe) _MMIO_PIPE(pipe, _CUR_WM_TRANS_A, _CUR_WM_TRANS_B) 3002 - #define _PLANE_WM_1(pipe) _PIPE(pipe, _PLANE_WM_1_A_0, _PLANE_WM_1_B_0) 3003 - #define _PLANE_WM_2(pipe) _PIPE(pipe, _PLANE_WM_2_A_0, _PLANE_WM_2_B_0) 3004 - #define _PLANE_WM_BASE(pipe, plane) \ 3005 - _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe)) 3006 - #define PLANE_WM(pipe, plane, level) \ 3007 - _MMIO(_PLANE_WM_BASE(pipe, plane) + ((4) * (level))) 3008 - #define _PLANE_WM_SAGV_1(pipe) \ 3009 - _PIPE(pipe, _PLANE_WM_SAGV_1_A, _PLANE_WM_SAGV_1_B) 3010 - #define _PLANE_WM_SAGV_2(pipe) \ 3011 - _PIPE(pipe, _PLANE_WM_SAGV_2_A, _PLANE_WM_SAGV_2_B) 3012 - #define PLANE_WM_SAGV(pipe, plane) \ 3013 - _MMIO(_PLANE(plane, _PLANE_WM_SAGV_1(pipe), _PLANE_WM_SAGV_2(pipe))) 3014 - #define _PLANE_WM_SAGV_TRANS_1(pipe) \ 3015 - _PIPE(pipe, _PLANE_WM_SAGV_TRANS_1_A, _PLANE_WM_SAGV_TRANS_1_B) 3016 - #define _PLANE_WM_SAGV_TRANS_2(pipe) \ 3017 - _PIPE(pipe, _PLANE_WM_SAGV_TRANS_2_A, _PLANE_WM_SAGV_TRANS_2_B) 3018 - #define PLANE_WM_SAGV_TRANS(pipe, plane) \ 3019 - _MMIO(_PLANE(plane, _PLANE_WM_SAGV_TRANS_1(pipe), _PLANE_WM_SAGV_TRANS_2(pipe))) 3020 - #define _PLANE_WM_TRANS_1(pipe) \ 3021 - _PIPE(pipe, _PLANE_WM_TRANS_1_A, _PLANE_WM_TRANS_1_B) 3022 - #define _PLANE_WM_TRANS_2(pipe) \ 3023 - _PIPE(pipe, _PLANE_WM_TRANS_2_A, _PLANE_WM_TRANS_2_B) 3024 - #define PLANE_WM_TRANS(pipe, plane) \ 3025 - _MMIO(_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe))) 3026 - 3027 3822 /* define the Watermark register on Ironlake */ 3028 3823 #define _WM0_PIPEA_ILK 0x45100 3029 3824 #define _WM0_PIPEB_ILK 0x45104 ··· 3073 3988 #define CUR_FBC_EN REG_BIT(31) 3074 3989 #define CUR_FBC_HEIGHT_MASK REG_GENMASK(7, 0) 3075 3990 #define CUR_FBC_HEIGHT(h) REG_FIELD_PREP(CUR_FBC_HEIGHT_MASK, (h)) 3991 + #define _CUR_CHICKEN_A 0x700a4 /* mtl+ */ 3076 3992 #define _CURASURFLIVE 0x700ac /* g4x+ */ 3077 3993 #define _CURBCNTR 0x700c0 3078 3994 #define _CURBBASE 0x700c4 ··· 3088 4002 #define CURPOS(pipe) _MMIO_CURSOR2(pipe, _CURAPOS) 3089 4003 #define CURSIZE(pipe) _MMIO_CURSOR2(pipe, _CURASIZE) 3090 4004 #define CUR_FBC_CTL(pipe) _MMIO_CURSOR2(pipe, _CUR_FBC_CTL_A) 4005 + #define CUR_CHICKEN(pipe) _MMIO_CURSOR2(pipe, _CUR_CHICKEN_A) 3091 4006 #define CURSURFLIVE(pipe) _MMIO_CURSOR2(pipe, _CURASURFLIVE) 3092 4007 3093 4008 #define CURSOR_A_OFFSET 0x70080 ··· 3741 4654 #define PLANE_COLOR_ALPHA_DISABLE REG_FIELD_PREP(PLANE_COLOR_ALPHA_MASK, 0) 3742 4655 #define PLANE_COLOR_ALPHA_SW_PREMULTIPLY REG_FIELD_PREP(PLANE_COLOR_ALPHA_MASK, 2) 3743 4656 #define PLANE_COLOR_ALPHA_HW_PREMULTIPLY REG_FIELD_PREP(PLANE_COLOR_ALPHA_MASK, 3) 4657 + #define _PLANE_CHICKEN_1_A 0x7026C /* tgl+ */ 4658 + #define _PLANE_CHICKEN_2_A 0x7036C /* tgl+ */ 4659 + #define PLANE_CHICKEN_DISABLE_DPT REG_BIT(19) /* mtl+ */ 3744 4660 #define _PLANE_BUF_CFG_1_A 0x7027c 3745 4661 #define _PLANE_BUF_CFG_2_A 0x7037c 4662 + /* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */ 4663 + #define PLANE_BUF_END_MASK REG_GENMASK(27, 16) 4664 + #define PLANE_BUF_END(end) REG_FIELD_PREP(PLANE_BUF_END_MASK, (end)) 4665 + #define PLANE_BUF_START_MASK REG_GENMASK(11, 0) 4666 + #define PLANE_BUF_START(start) REG_FIELD_PREP(PLANE_BUF_START_MASK, (start)) 3746 4667 #define _PLANE_NV12_BUF_CFG_1_A 0x70278 3747 4668 #define _PLANE_NV12_BUF_CFG_2_A 0x70378 3748 4669 ··· 3894 4799 #define PLANE_SURFLIVE(pipe, plane) \ 3895 4800 _MMIO_PLANE(plane, _PLANE_SURFLIVE_1(pipe), _PLANE_SURFLIVE_2(pipe)) 3896 4801 3897 - #define _PLANE_BUF_CFG_1_B 0x7127c 3898 - #define _PLANE_BUF_CFG_2_B 0x7137c 3899 - /* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */ 3900 - #define PLANE_BUF_END_MASK REG_GENMASK(27, 16) 3901 - #define PLANE_BUF_END(end) REG_FIELD_PREP(PLANE_BUF_END_MASK, (end)) 3902 - #define PLANE_BUF_START_MASK REG_GENMASK(11, 0) 3903 - #define PLANE_BUF_START(start) REG_FIELD_PREP(PLANE_BUF_START_MASK, (start)) 3904 - #define _PLANE_BUF_CFG_1(pipe) \ 3905 - _PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B) 3906 - #define _PLANE_BUF_CFG_2(pipe) \ 3907 - _PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B) 3908 - #define PLANE_BUF_CFG(pipe, plane) \ 3909 - _MMIO_PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe)) 3910 - 3911 - #define _PLANE_NV12_BUF_CFG_1_B 0x71278 3912 - #define _PLANE_NV12_BUF_CFG_2_B 0x71378 3913 - #define _PLANE_NV12_BUF_CFG_1(pipe) \ 3914 - _PIPE(pipe, _PLANE_NV12_BUF_CFG_1_A, _PLANE_NV12_BUF_CFG_1_B) 3915 - #define _PLANE_NV12_BUF_CFG_2(pipe) \ 3916 - _PIPE(pipe, _PLANE_NV12_BUF_CFG_2_A, _PLANE_NV12_BUF_CFG_2_B) 3917 - #define PLANE_NV12_BUF_CFG(pipe, plane) \ 3918 - _MMIO_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe)) 4802 + #define _PLANE_CHICKEN_1_B 0x7126c 4803 + #define _PLANE_CHICKEN_2_B 0x7136c 4804 + #define _PLANE_CHICKEN_1(pipe) _PIPE(pipe, _PLANE_CHICKEN_1_A, _PLANE_CHICKEN_1_B) 4805 + #define _PLANE_CHICKEN_2(pipe) _PIPE(pipe, _PLANE_CHICKEN_2_A, _PLANE_CHICKEN_2_B) 4806 + #define PLANE_CHICKEN(pipe, plane) \ 4807 + _MMIO_PLANE(plane, _PLANE_CHICKEN_1(pipe), _PLANE_CHICKEN_2(pipe)) 3919 4808 3920 4809 #define _PLANE_AUX_DIST_1_B 0x711c0 3921 4810 #define _PLANE_AUX_DIST_2_B 0x712c0 ··· 3938 4859 #define PLANE_COLOR_CTL(pipe, plane) \ 3939 4860 _MMIO_PLANE(plane, _PLANE_COLOR_CTL_1(pipe), _PLANE_COLOR_CTL_2(pipe)) 3940 4861 3941 - #define _SEL_FETCH_PLANE_BASE_1_A 0x70890 3942 - #define _SEL_FETCH_PLANE_BASE_2_A 0x708B0 3943 - #define _SEL_FETCH_PLANE_BASE_3_A 0x708D0 3944 - #define _SEL_FETCH_PLANE_BASE_4_A 0x708F0 3945 - #define _SEL_FETCH_PLANE_BASE_5_A 0x70920 3946 - #define _SEL_FETCH_PLANE_BASE_6_A 0x70940 3947 - #define _SEL_FETCH_PLANE_BASE_7_A 0x70960 3948 - #define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880 3949 - #define _SEL_FETCH_PLANE_BASE_1_B 0x71890 3950 - 3951 - #define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \ 3952 - _SEL_FETCH_PLANE_BASE_1_A, \ 3953 - _SEL_FETCH_PLANE_BASE_2_A, \ 3954 - _SEL_FETCH_PLANE_BASE_3_A, \ 3955 - _SEL_FETCH_PLANE_BASE_4_A, \ 3956 - _SEL_FETCH_PLANE_BASE_5_A, \ 3957 - _SEL_FETCH_PLANE_BASE_6_A, \ 3958 - _SEL_FETCH_PLANE_BASE_7_A, \ 3959 - _SEL_FETCH_PLANE_BASE_CUR_A) 3960 - #define _SEL_FETCH_PLANE_BASE_1(pipe) _PIPE(pipe, _SEL_FETCH_PLANE_BASE_1_A, _SEL_FETCH_PLANE_BASE_1_B) 3961 - #define _SEL_FETCH_PLANE_BASE(pipe, plane) (_SEL_FETCH_PLANE_BASE_1(pipe) - \ 3962 - _SEL_FETCH_PLANE_BASE_1_A + \ 3963 - _SEL_FETCH_PLANE_BASE_A(plane)) 3964 - 3965 - #define _SEL_FETCH_PLANE_CTL_1_A 0x70890 3966 - #define PLANE_SEL_FETCH_CTL(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \ 3967 - _SEL_FETCH_PLANE_CTL_1_A - \ 3968 - _SEL_FETCH_PLANE_BASE_1_A) 3969 - #define PLANE_SEL_FETCH_CTL_ENABLE REG_BIT(31) 3970 - 3971 - #define _SEL_FETCH_PLANE_POS_1_A 0x70894 3972 - #define PLANE_SEL_FETCH_POS(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \ 3973 - _SEL_FETCH_PLANE_POS_1_A - \ 3974 - _SEL_FETCH_PLANE_BASE_1_A) 3975 - 3976 - #define _SEL_FETCH_PLANE_SIZE_1_A 0x70898 3977 - #define PLANE_SEL_FETCH_SIZE(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \ 3978 - _SEL_FETCH_PLANE_SIZE_1_A - \ 3979 - _SEL_FETCH_PLANE_BASE_1_A) 3980 - 3981 - #define _SEL_FETCH_PLANE_OFFSET_1_A 0x7089C 3982 - #define PLANE_SEL_FETCH_OFFSET(pipe, plane) _MMIO(_SEL_FETCH_PLANE_BASE(pipe, plane) + \ 3983 - _SEL_FETCH_PLANE_OFFSET_1_A - \ 3984 - _SEL_FETCH_PLANE_BASE_1_A) 3985 - 3986 - /* SKL new cursor registers */ 3987 - #define _CUR_BUF_CFG_A 0x7017c 3988 - #define _CUR_BUF_CFG_B 0x7117c 3989 - #define CUR_BUF_CFG(pipe) _MMIO_PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B) 3990 - 3991 4862 /* VBIOS regs */ 3992 4863 #define VGACNTRL _MMIO(0x71400) 3993 4864 # define VGA_DISP_DISABLE (1 << 31) ··· 3967 4938 #define RR_HW_LOW_POWER_FRAMES_MASK 0xff 3968 4939 #define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00 3969 4940 3970 - #define FDI_PLL_BIOS_0 _MMIO(0x46000) 3971 - #define FDI_PLL_FB_CLOCK_MASK 0xff 3972 - #define FDI_PLL_BIOS_1 _MMIO(0x46004) 3973 - #define FDI_PLL_BIOS_2 _MMIO(0x46008) 3974 - #define DISPLAY_PORT_PLL_BIOS_0 _MMIO(0x4600c) 3975 - #define DISPLAY_PORT_PLL_BIOS_1 _MMIO(0x46010) 3976 - #define DISPLAY_PORT_PLL_BIOS_2 _MMIO(0x46014) 3977 - 3978 4941 #define PCH_3DCGDIS0 _MMIO(0x46020) 3979 4942 # define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) 3980 4943 # define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) 3981 4944 3982 4945 #define PCH_3DCGDIS1 _MMIO(0x46024) 3983 4946 # define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11) 3984 - 3985 - #define FDI_PLL_FREQ_CTL _MMIO(0x46030) 3986 - #define FDI_PLL_FREQ_CHANGE_REQUEST (1 << 24) 3987 - #define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00 3988 - #define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff 3989 - 3990 4947 3991 4948 #define _PIPEA_DATA_M1 0x60030 3992 4949 #define _PIPEA_DATA_N1 0x60034 ··· 4526 5511 #define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14) 4527 5512 4528 5513 #define CHICKEN_MISC_2 _MMIO(0x42084) 5514 + #define CHICKEN_MISC_DISABLE_DPT REG_BIT(30) /* adl,dg2 */ 4529 5515 #define KBL_ARB_FILL_SPARE_14 REG_BIT(14) 4530 5516 #define KBL_ARB_FILL_SPARE_13 REG_BIT(13) 4531 5517 #define GLK_CL2_PWR_DOWN (1 << 12) ··· 4596 5580 #define DISP_ARB_CTL2 _MMIO(0x45004) 4597 5581 #define DISP_DATA_PARTITION_5_6 (1 << 6) 4598 5582 #define DISP_IPC_ENABLE (1 << 3) 4599 - 4600 - /* 4601 - * The below are numbered starting from "S1" on gen11/gen12, but starting 4602 - * with display 13, the bspec switches to a 0-based numbering scheme 4603 - * (although the addresses stay the same so new S0 = old S1, new S1 = old S2). 4604 - * We'll just use the 0-based numbering here for all platforms since it's the 4605 - * way things will be named by the hardware team going forward, plus it's more 4606 - * consistent with how most of the rest of our registers are named. 4607 - */ 4608 - #define _DBUF_CTL_S0 0x45008 4609 - #define _DBUF_CTL_S1 0x44FE8 4610 - #define _DBUF_CTL_S2 0x44300 4611 - #define _DBUF_CTL_S3 0x44304 4612 - #define DBUF_CTL_S(slice) _MMIO(_PICK(slice, \ 4613 - _DBUF_CTL_S0, \ 4614 - _DBUF_CTL_S1, \ 4615 - _DBUF_CTL_S2, \ 4616 - _DBUF_CTL_S3)) 4617 - #define DBUF_POWER_REQUEST REG_BIT(31) 4618 - #define DBUF_POWER_STATE REG_BIT(30) 4619 - #define DBUF_TRACKER_STATE_SERVICE_MASK REG_GENMASK(23, 19) 4620 - #define DBUF_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x) 4621 - #define DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(18, 16) /* ADL-P+ */ 4622 - #define DBUF_MIN_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_MIN_TRACKER_STATE_SERVICE_MASK, x) /* ADL-P+ */ 4623 5583 4624 5584 #define GEN7_MSG_CTL _MMIO(0x45010) 4625 5585 #define WAIT_FOR_PCH_RESET_ACK (1 << 1) ··· 5140 6148 #define LPT_PWM_GRANULARITY (1 << 5) 5141 6149 #define DPLS_EDP_PPS_FIX_DIS (1 << 0) 5142 6150 5143 - #define _FDI_RXA_CHICKEN 0xc200c 5144 - #define _FDI_RXB_CHICKEN 0xc2010 5145 - #define FDI_RX_PHASE_SYNC_POINTER_OVR (1 << 1) 5146 - #define FDI_RX_PHASE_SYNC_POINTER_EN (1 << 0) 5147 - #define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN) 5148 - 5149 6151 #define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020) 5150 6152 #define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31) 5151 6153 #define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30) ··· 5148 6162 #define PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14) 5149 6163 #define CNP_PWM_CGE_GATING_DISABLE (1 << 13) 5150 6164 #define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12) 5151 - 5152 - /* CPU: FDI_TX */ 5153 - #define _FDI_TXA_CTL 0x60100 5154 - #define _FDI_TXB_CTL 0x61100 5155 - #define FDI_TX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL) 5156 - #define FDI_TX_DISABLE (0 << 31) 5157 - #define FDI_TX_ENABLE (1 << 31) 5158 - #define FDI_LINK_TRAIN_PATTERN_1 (0 << 28) 5159 - #define FDI_LINK_TRAIN_PATTERN_2 (1 << 28) 5160 - #define FDI_LINK_TRAIN_PATTERN_IDLE (2 << 28) 5161 - #define FDI_LINK_TRAIN_NONE (3 << 28) 5162 - #define FDI_LINK_TRAIN_VOLTAGE_0_4V (0 << 25) 5163 - #define FDI_LINK_TRAIN_VOLTAGE_0_6V (1 << 25) 5164 - #define FDI_LINK_TRAIN_VOLTAGE_0_8V (2 << 25) 5165 - #define FDI_LINK_TRAIN_VOLTAGE_1_2V (3 << 25) 5166 - #define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0 << 22) 5167 - #define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1 << 22) 5168 - #define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2 << 22) 5169 - #define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3 << 22) 5170 - /* ILK always use 400mV 0dB for voltage swing and pre-emphasis level. 5171 - SNB has different settings. */ 5172 - /* SNB A-stepping */ 5173 - #define FDI_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22) 5174 - #define FDI_LINK_TRAIN_400MV_6DB_SNB_A (0x02 << 22) 5175 - #define FDI_LINK_TRAIN_600MV_3_5DB_SNB_A (0x01 << 22) 5176 - #define FDI_LINK_TRAIN_800MV_0DB_SNB_A (0x0 << 22) 5177 - /* SNB B-stepping */ 5178 - #define FDI_LINK_TRAIN_400MV_0DB_SNB_B (0x0 << 22) 5179 - #define FDI_LINK_TRAIN_400MV_6DB_SNB_B (0x3a << 22) 5180 - #define FDI_LINK_TRAIN_600MV_3_5DB_SNB_B (0x39 << 22) 5181 - #define FDI_LINK_TRAIN_800MV_0DB_SNB_B (0x38 << 22) 5182 - #define FDI_LINK_TRAIN_VOL_EMP_MASK (0x3f << 22) 5183 - #define FDI_DP_PORT_WIDTH_SHIFT 19 5184 - #define FDI_DP_PORT_WIDTH_MASK (7 << FDI_DP_PORT_WIDTH_SHIFT) 5185 - #define FDI_DP_PORT_WIDTH(width) (((width) - 1) << FDI_DP_PORT_WIDTH_SHIFT) 5186 - #define FDI_TX_ENHANCE_FRAME_ENABLE (1 << 18) 5187 - /* Ironlake: hardwired to 1 */ 5188 - #define FDI_TX_PLL_ENABLE (1 << 14) 5189 - 5190 - /* Ivybridge has different bits for lolz */ 5191 - #define FDI_LINK_TRAIN_PATTERN_1_IVB (0 << 8) 5192 - #define FDI_LINK_TRAIN_PATTERN_2_IVB (1 << 8) 5193 - #define FDI_LINK_TRAIN_PATTERN_IDLE_IVB (2 << 8) 5194 - #define FDI_LINK_TRAIN_NONE_IVB (3 << 8) 5195 - 5196 - /* both Tx and Rx */ 5197 - #define FDI_COMPOSITE_SYNC (1 << 11) 5198 - #define FDI_LINK_TRAIN_AUTO (1 << 10) 5199 - #define FDI_SCRAMBLING_ENABLE (0 << 7) 5200 - #define FDI_SCRAMBLING_DISABLE (1 << 7) 5201 - 5202 - /* FDI_RX, FDI_X is hard-wired to Transcoder_X */ 5203 - #define _FDI_RXA_CTL 0xf000c 5204 - #define _FDI_RXB_CTL 0xf100c 5205 - #define FDI_RX_CTL(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL) 5206 - #define FDI_RX_ENABLE (1 << 31) 5207 - /* train, dp width same as FDI_TX */ 5208 - #define FDI_FS_ERRC_ENABLE (1 << 27) 5209 - #define FDI_FE_ERRC_ENABLE (1 << 26) 5210 - #define FDI_RX_POLARITY_REVERSED_LPT (1 << 16) 5211 - #define FDI_8BPC (0 << 16) 5212 - #define FDI_10BPC (1 << 16) 5213 - #define FDI_6BPC (2 << 16) 5214 - #define FDI_12BPC (3 << 16) 5215 - #define FDI_RX_LINK_REVERSAL_OVERRIDE (1 << 15) 5216 - #define FDI_DMI_LINK_REVERSE_MASK (1 << 14) 5217 - #define FDI_RX_PLL_ENABLE (1 << 13) 5218 - #define FDI_FS_ERR_CORRECT_ENABLE (1 << 11) 5219 - #define FDI_FE_ERR_CORRECT_ENABLE (1 << 10) 5220 - #define FDI_FS_ERR_REPORT_ENABLE (1 << 9) 5221 - #define FDI_FE_ERR_REPORT_ENABLE (1 << 8) 5222 - #define FDI_RX_ENHANCE_FRAME_ENABLE (1 << 6) 5223 - #define FDI_PCDCLK (1 << 4) 5224 - /* CPT */ 5225 - #define FDI_AUTO_TRAINING (1 << 10) 5226 - #define FDI_LINK_TRAIN_PATTERN_1_CPT (0 << 8) 5227 - #define FDI_LINK_TRAIN_PATTERN_2_CPT (1 << 8) 5228 - #define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2 << 8) 5229 - #define FDI_LINK_TRAIN_NORMAL_CPT (3 << 8) 5230 - #define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3 << 8) 5231 - 5232 - #define _FDI_RXA_MISC 0xf0010 5233 - #define _FDI_RXB_MISC 0xf1010 5234 - #define FDI_RX_PWRDN_LANE1_MASK (3 << 26) 5235 - #define FDI_RX_PWRDN_LANE1_VAL(x) ((x) << 26) 5236 - #define FDI_RX_PWRDN_LANE0_MASK (3 << 24) 5237 - #define FDI_RX_PWRDN_LANE0_VAL(x) ((x) << 24) 5238 - #define FDI_RX_TP1_TO_TP2_48 (2 << 20) 5239 - #define FDI_RX_TP1_TO_TP2_64 (3 << 20) 5240 - #define FDI_RX_FDI_DELAY_90 (0x90 << 0) 5241 - #define FDI_RX_MISC(pipe) _MMIO_PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC) 5242 - 5243 - #define _FDI_RXA_TUSIZE1 0xf0030 5244 - #define _FDI_RXA_TUSIZE2 0xf0038 5245 - #define _FDI_RXB_TUSIZE1 0xf1030 5246 - #define _FDI_RXB_TUSIZE2 0xf1038 5247 - #define FDI_RX_TUSIZE1(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) 5248 - #define FDI_RX_TUSIZE2(pipe) _MMIO_PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) 5249 - 5250 - /* FDI_RX interrupt register format */ 5251 - #define FDI_RX_INTER_LANE_ALIGN (1 << 10) 5252 - #define FDI_RX_SYMBOL_LOCK (1 << 9) /* train 2 */ 5253 - #define FDI_RX_BIT_LOCK (1 << 8) /* train 1 */ 5254 - #define FDI_RX_TRAIN_PATTERN_2_FAIL (1 << 7) 5255 - #define FDI_RX_FS_CODE_ERR (1 << 6) 5256 - #define FDI_RX_FE_CODE_ERR (1 << 5) 5257 - #define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1 << 4) 5258 - #define FDI_RX_HDCP_LINK_FAIL (1 << 3) 5259 - #define FDI_RX_PIXEL_FIFO_OVERFLOW (1 << 2) 5260 - #define FDI_RX_CROSS_CLOCK_OVERFLOW (1 << 1) 5261 - #define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1 << 0) 5262 - 5263 - #define _FDI_RXA_IIR 0xf0014 5264 - #define _FDI_RXA_IMR 0xf0018 5265 - #define _FDI_RXB_IIR 0xf1014 5266 - #define _FDI_RXB_IMR 0xf1018 5267 - #define FDI_RX_IIR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR) 5268 - #define FDI_RX_IMR(pipe) _MMIO_PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR) 5269 - 5270 - #define FDI_PLL_CTL_1 _MMIO(0xfe000) 5271 - #define FDI_PLL_CTL_2 _MMIO(0xfe004) 5272 6165 5273 6166 #define _PCH_DP_B 0xe4100 5274 6167 #define PCH_DP_B _MMIO(_PCH_DP_B) ··· 6518 7653 #define OROM_OFFSET _MMIO(0x1020c0) 6519 7654 #define OROM_OFFSET_MASK REG_GENMASK(20, 16) 6520 7655 6521 - /* This register controls the Display State Buffer (DSB) engines. */ 6522 - #define _DSBSL_INSTANCE_BASE 0x70B00 6523 - #define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \ 6524 - (pipe) * 0x1000 + (id) * 0x100) 6525 - #define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0) 6526 - #define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4) 6527 - #define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8) 6528 - #define DSB_ENABLE REG_BIT(31) 6529 - #define DSB_BUF_REITERATE REG_BIT(29) 6530 - #define DSB_WAIT_FOR_VBLANK REG_BIT(28) 6531 - #define DSB_WAIT_FOR_LINE_IN REG_BIT(27) 6532 - #define DSB_HALT REG_BIT(16) 6533 - #define DSB_NON_POSTED REG_BIT(8) 6534 - #define DSB_STATUS_BUSY REG_BIT(0) 6535 - #define DSB_MMIOCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xc) 6536 - #define DSB_MMIO_DEAD_CLOCKS_ENABLE REG_BIT(31) 6537 - #define DSB_MMIO_DEAD_CLOCKS_COUNT_MASK REG_GENMASK(15, 8) 6538 - #define DSB_MMIO_DEAD_CLOCKS_COUNT(x) REG_FIELD_PREP(DSB_MMIO_DEAD_CLOCK_COUNT_MASK, (x)) 6539 - #define DSB_MMIO_CYCLES_MASK REG_GENMASK(7, 0) 6540 - #define DSB_MMIO_CYCLES(x) REG_FIELD_PREP(DSB_MMIO_CYCLES_MASK, (x)) 6541 - #define DSB_POLLFUNC(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x10) 6542 - #define DSB_POLL_ENABLE REG_BIT(31) 6543 - #define DSB_POLL_WAIT_MASK REG_GENMASK(30, 23) 6544 - #define DSB_POLL_WAIT(x) REG_FIELD_PREP(DSB_POLL_WAIT_MASK, (x)) /* usec */ 6545 - #define DSB_POLL_COUNT_MASK REG_GENMASK(22, 15) 6546 - #define DSB_POLL_COUNT(x) REG_FIELD_PREP(DSB_POLL_COUNT_MASK, (x)) 6547 - #define DSB_DEBUG(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x14) 6548 - #define DSB_POLLMASK(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x1c) 6549 - #define DSB_STATUS(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x24) 6550 - #define DSB_INTERRUPT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x28) 6551 - #define DSB_ATS_FAULT_INT_EN REG_BIT(20) 6552 - #define DSB_GTT_FAULT_INT_EN REG_BIT(19) 6553 - #define DSB_RSPTIMEOUT_INT_EN REG_BIT(18) 6554 - #define DSB_POLL_ERR_INT_EN REG_BIT(17) 6555 - #define DSB_PROG_INT_EN REG_BIT(16) 6556 - #define DSB_ATS_FAULT_INT_STATUS REG_BIT(4) 6557 - #define DSB_GTT_FAULT_INT_STATUS REG_BIT(3) 6558 - #define DSB_RSPTIMEOUT_INT_STATUS REG_BIT(2) 6559 - #define DSB_POLL_ERR_INT_STATUS REG_BIT(1) 6560 - #define DSB_PROG_INT_STATUS REG_BIT(0) 6561 - #define DSB_CURRENT_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x2c) 6562 - #define DSB_RM_TIMEOUT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x30) 6563 - #define DSB_RM_CLAIM_TIMEOUT REG_BIT(31) 6564 - #define DSB_RM_READY_TIMEOUT REG_BIT(30) 6565 - #define DSB_RM_CLAIM_TIMEOUT_COUNT_MASK REG_GENMASK(23, 16) 6566 - #define DSB_RM_CLAIM_TIMEOUT_COUNT(x) REG_FIELD_PREP(DSB_RM_CLAIM_TIMEOUT_COUNT_MASK, (x)) /* clocks */ 6567 - #define DSB_RM_READY_TIMEOUT_VALUE_MASK REG_GENMASK(15, 0) 6568 - #define DSB_RM_READY_TIMEOUT_VALUE(x) REG_FIELD_PREP(DSB_RM_READY_TIMEOUT_VALUE, (x)) /* usec */ 6569 - #define DSB_RMTIMEOUTREG_CAPTURE(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x34) 6570 - #define DSB_PMCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x38) 6571 - #define DSB_PMCTRL_2(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x3c) 6572 - #define DSB_PF_LN_LOWER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x40) 6573 - #define DSB_PF_LN_UPPER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x44) 6574 - #define DSB_BUFRPT_CNT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x48) 6575 - #define DSB_CHICKEN(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xf0) 6576 - 6577 7656 #define CLKREQ_POLICY _MMIO(0x101038) 6578 7657 #define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1) 6579 7658 ··· 6529 7720 #define MTL_CLKGATE_DIS_TRANS(trans) _MMIO_TRANS2(trans, _MTL_CLKGATE_DIS_TRANS_A) 6530 7721 #define MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS REG_BIT(7) 6531 7722 6532 - #define MTL_LATENCY_LP0_LP1 _MMIO(0x45780) 6533 - #define MTL_LATENCY_LP2_LP3 _MMIO(0x45784) 6534 - #define MTL_LATENCY_LP4_LP5 _MMIO(0x45788) 6535 - #define MTL_LATENCY_LEVEL_EVEN_MASK REG_GENMASK(12, 0) 6536 - #define MTL_LATENCY_LEVEL_ODD_MASK REG_GENMASK(28, 16) 6537 - 6538 - #define MTL_LATENCY_SAGV _MMIO(0x4578b) 6539 - #define MTL_LATENCY_QCLK_SAGV REG_GENMASK(12, 0) 6540 - 6541 7723 #define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700) 6542 7724 #define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8) 6543 7725 #define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4) 6544 7726 #define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0) 6545 7727 6546 - #define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(0x45710 + (point) * 2) 7728 + #define MTL_MEM_SS_INFO_QGV_POINT_OFFSET 0x45710 7729 + #define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8) 6547 7730 #define MTL_TRCD_MASK REG_GENMASK(31, 24) 6548 7731 #define MTL_TRP_MASK REG_GENMASK(23, 16) 6549 7732 #define MTL_DCLK_MASK REG_GENMASK(15, 0) 6550 7733 6551 - #define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(0x45714 + (point) * 2) 7734 + #define MTL_MEM_SS_INFO_QGV_POINT_HIGH(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8 + 4) 6552 7735 #define MTL_TRAS_MASK REG_GENMASK(16, 8) 6553 7736 #define MTL_TRDPRE_MASK REG_GENMASK(7, 0) 6554 7737
+888
drivers/gpu/drm/i915/intel_clock_gating.c
··· 1 + /* 2 + * Copyright © 2012 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 + * 26 + */ 27 + 28 + #include "display/intel_de.h" 29 + #include "display/intel_display.h" 30 + #include "display/intel_display_trace.h" 31 + #include "display/skl_watermark.h" 32 + 33 + #include "gt/intel_engine_regs.h" 34 + #include "gt/intel_gt.h" 35 + #include "gt/intel_gt_mcr.h" 36 + #include "gt/intel_gt_regs.h" 37 + 38 + #include "i915_drv.h" 39 + #include "intel_clock_gating.h" 40 + #include "intel_mchbar_regs.h" 41 + #include "vlv_sideband.h" 42 + 43 + struct drm_i915_clock_gating_funcs { 44 + void (*init_clock_gating)(struct drm_i915_private *i915); 45 + }; 46 + 47 + static void gen9_init_clock_gating(struct drm_i915_private *i915) 48 + { 49 + if (HAS_LLC(i915)) { 50 + /* 51 + * WaCompressedResourceDisplayNewHashMode:skl,kbl 52 + * Display WA #0390: skl,kbl 53 + * 54 + * Must match Sampler, Pixel Back End, and Media. See 55 + * WaCompressedResourceSamplerPbeMediaNewHashMode. 56 + */ 57 + intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, SKL_DE_COMPRESSED_HASH_MODE); 58 + } 59 + 60 + /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ 61 + intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, SKL_EDP_PSR_FIX_RDWRAP); 62 + 63 + /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */ 64 + intel_uncore_rmw(&i915->uncore, GEN8_CHICKEN_DCPR_1, 0, MASK_WAKEMEM); 65 + 66 + /* 67 + * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl 68 + * Display WA #0859: skl,bxt,kbl,glk,cfl 69 + */ 70 + intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_MEMORY_WAKE); 71 + } 72 + 73 + static void bxt_init_clock_gating(struct drm_i915_private *i915) 74 + { 75 + gen9_init_clock_gating(i915); 76 + 77 + /* WaDisableSDEUnitClockGating:bxt */ 78 + intel_uncore_rmw(&i915->uncore, GEN8_UCGCTL6, 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 79 + 80 + /* 81 + * FIXME: 82 + * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. 83 + */ 84 + intel_uncore_rmw(&i915->uncore, GEN8_UCGCTL6, 0, GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); 85 + 86 + /* 87 + * Wa: Backlight PWM may stop in the asserted state, causing backlight 88 + * to stay fully on. 89 + */ 90 + intel_uncore_write(&i915->uncore, GEN9_CLKGATE_DIS_0, 91 + intel_uncore_read(&i915->uncore, GEN9_CLKGATE_DIS_0) | 92 + PWM1_GATING_DIS | PWM2_GATING_DIS); 93 + 94 + /* 95 + * Lower the display internal timeout. 96 + * This is needed to avoid any hard hangs when DSI port PLL 97 + * is off and a MMIO access is attempted by any privilege 98 + * application, using batch buffers or any other means. 99 + */ 100 + intel_uncore_write(&i915->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950)); 101 + 102 + /* 103 + * WaFbcTurnOffFbcWatermark:bxt 104 + * Display WA #0562: bxt 105 + */ 106 + intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS); 107 + 108 + /* 109 + * WaFbcHighMemBwCorruptionAvoidance:bxt 110 + * Display WA #0883: bxt 111 + */ 112 + intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0); 113 + } 114 + 115 + static void glk_init_clock_gating(struct drm_i915_private *i915) 116 + { 117 + gen9_init_clock_gating(i915); 118 + 119 + /* 120 + * WaDisablePWMClockGating:glk 121 + * Backlight PWM may stop in the asserted state, causing backlight 122 + * to stay fully on. 123 + */ 124 + intel_uncore_write(&i915->uncore, GEN9_CLKGATE_DIS_0, 125 + intel_uncore_read(&i915->uncore, GEN9_CLKGATE_DIS_0) | 126 + PWM1_GATING_DIS | PWM2_GATING_DIS); 127 + } 128 + 129 + static void ibx_init_clock_gating(struct drm_i915_private *i915) 130 + { 131 + /* 132 + * On Ibex Peak and Cougar Point, we need to disable clock 133 + * gating for the panel power sequencer or it will fail to 134 + * start up when no ports are active. 135 + */ 136 + intel_uncore_write(&i915->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 137 + } 138 + 139 + static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) 140 + { 141 + enum pipe pipe; 142 + 143 + for_each_pipe(dev_priv, pipe) { 144 + intel_uncore_rmw(&dev_priv->uncore, DSPCNTR(pipe), 0, DISP_TRICKLE_FEED_DISABLE); 145 + 146 + intel_uncore_rmw(&dev_priv->uncore, DSPSURF(pipe), 0, 0); 147 + intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe)); 148 + } 149 + } 150 + 151 + static void ilk_init_clock_gating(struct drm_i915_private *i915) 152 + { 153 + u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 154 + 155 + /* 156 + * Required for FBC 157 + * WaFbcDisableDpfcClockGating:ilk 158 + */ 159 + dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | 160 + ILK_DPFCUNIT_CLOCK_GATE_DISABLE | 161 + ILK_DPFDUNIT_CLOCK_GATE_ENABLE; 162 + 163 + intel_uncore_write(&i915->uncore, PCH_3DCGDIS0, 164 + MARIUNIT_CLOCK_GATE_DISABLE | 165 + SVSMUNIT_CLOCK_GATE_DISABLE); 166 + intel_uncore_write(&i915->uncore, PCH_3DCGDIS1, 167 + VFMUNIT_CLOCK_GATE_DISABLE); 168 + 169 + /* 170 + * According to the spec the following bits should be set in 171 + * order to enable memory self-refresh 172 + * The bit 22/21 of 0x42004 173 + * The bit 5 of 0x42020 174 + * The bit 15 of 0x45000 175 + */ 176 + intel_uncore_write(&i915->uncore, ILK_DISPLAY_CHICKEN2, 177 + (intel_uncore_read(&i915->uncore, ILK_DISPLAY_CHICKEN2) | 178 + ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 179 + dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; 180 + intel_uncore_write(&i915->uncore, DISP_ARB_CTL, 181 + (intel_uncore_read(&i915->uncore, DISP_ARB_CTL) | 182 + DISP_FBC_WM_DIS)); 183 + 184 + /* 185 + * Based on the document from hardware guys the following bits 186 + * should be set unconditionally in order to enable FBC. 187 + * The bit 22 of 0x42000 188 + * The bit 22 of 0x42004 189 + * The bit 7,8,9 of 0x42020. 190 + */ 191 + if (IS_IRONLAKE_M(i915)) { 192 + /* WaFbcAsynchFlipDisableFbcQueue:ilk */ 193 + intel_uncore_rmw(&i915->uncore, ILK_DISPLAY_CHICKEN1, 0, ILK_FBCQ_DIS); 194 + intel_uncore_rmw(&i915->uncore, ILK_DISPLAY_CHICKEN2, 0, ILK_DPARB_GATE); 195 + } 196 + 197 + intel_uncore_write(&i915->uncore, ILK_DSPCLK_GATE_D, dspclk_gate); 198 + 199 + intel_uncore_rmw(&i915->uncore, ILK_DISPLAY_CHICKEN2, 0, ILK_ELPIN_409_SELECT); 200 + 201 + g4x_disable_trickle_feed(i915); 202 + 203 + ibx_init_clock_gating(i915); 204 + } 205 + 206 + static void cpt_init_clock_gating(struct drm_i915_private *i915) 207 + { 208 + enum pipe pipe; 209 + u32 val; 210 + 211 + /* 212 + * On Ibex Peak and Cougar Point, we need to disable clock 213 + * gating for the panel power sequencer or it will fail to 214 + * start up when no ports are active. 215 + */ 216 + intel_uncore_write(&i915->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | 217 + PCH_DPLUNIT_CLOCK_GATE_DISABLE | 218 + PCH_CPUNIT_CLOCK_GATE_DISABLE); 219 + intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN2, 0, DPLS_EDP_PPS_FIX_DIS); 220 + /* The below fixes the weird display corruption, a few pixels shifted 221 + * downward, on (only) LVDS of some HP laptops with IVY. 222 + */ 223 + for_each_pipe(i915, pipe) { 224 + val = intel_uncore_read(&i915->uncore, TRANS_CHICKEN2(pipe)); 225 + val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 226 + val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 227 + if (i915->display.vbt.fdi_rx_polarity_inverted) 228 + val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 229 + val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; 230 + val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; 231 + intel_uncore_write(&i915->uncore, TRANS_CHICKEN2(pipe), val); 232 + } 233 + /* WADP0ClockGatingDisable */ 234 + for_each_pipe(i915, pipe) { 235 + intel_uncore_write(&i915->uncore, TRANS_CHICKEN1(pipe), 236 + TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 237 + } 238 + } 239 + 240 + static void gen6_check_mch_setup(struct drm_i915_private *i915) 241 + { 242 + u32 tmp; 243 + 244 + tmp = intel_uncore_read(&i915->uncore, MCH_SSKPD); 245 + if (REG_FIELD_GET(SSKPD_WM0_MASK_SNB, tmp) != 12) 246 + drm_dbg_kms(&i915->drm, 247 + "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", 248 + tmp); 249 + } 250 + 251 + static void gen6_init_clock_gating(struct drm_i915_private *i915) 252 + { 253 + u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 254 + 255 + intel_uncore_write(&i915->uncore, ILK_DSPCLK_GATE_D, dspclk_gate); 256 + 257 + intel_uncore_rmw(&i915->uncore, ILK_DISPLAY_CHICKEN2, 0, ILK_ELPIN_409_SELECT); 258 + 259 + intel_uncore_write(&i915->uncore, GEN6_UCGCTL1, 260 + intel_uncore_read(&i915->uncore, GEN6_UCGCTL1) | 261 + GEN6_BLBUNIT_CLOCK_GATE_DISABLE | 262 + GEN6_CSUNIT_CLOCK_GATE_DISABLE); 263 + 264 + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 265 + * gating disable must be set. Failure to set it results in 266 + * flickering pixels due to Z write ordering failures after 267 + * some amount of runtime in the Mesa "fire" demo, and Unigine 268 + * Sanctuary and Tropics, and apparently anything else with 269 + * alpha test or pixel discard. 270 + * 271 + * According to the spec, bit 11 (RCCUNIT) must also be set, 272 + * but we didn't debug actual testcases to find it out. 273 + * 274 + * WaDisableRCCUnitClockGating:snb 275 + * WaDisableRCPBUnitClockGating:snb 276 + */ 277 + intel_uncore_write(&i915->uncore, GEN6_UCGCTL2, 278 + GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 279 + GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 280 + 281 + /* 282 + * According to the spec the following bits should be 283 + * set in order to enable memory self-refresh and fbc: 284 + * The bit21 and bit22 of 0x42000 285 + * The bit21 and bit22 of 0x42004 286 + * The bit5 and bit7 of 0x42020 287 + * The bit14 of 0x70180 288 + * The bit14 of 0x71180 289 + * 290 + * WaFbcAsynchFlipDisableFbcQueue:snb 291 + */ 292 + intel_uncore_write(&i915->uncore, ILK_DISPLAY_CHICKEN1, 293 + intel_uncore_read(&i915->uncore, ILK_DISPLAY_CHICKEN1) | 294 + ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 295 + intel_uncore_write(&i915->uncore, ILK_DISPLAY_CHICKEN2, 296 + intel_uncore_read(&i915->uncore, ILK_DISPLAY_CHICKEN2) | 297 + ILK_DPARB_GATE | ILK_VSDPFD_FULL); 298 + intel_uncore_write(&i915->uncore, ILK_DSPCLK_GATE_D, 299 + intel_uncore_read(&i915->uncore, ILK_DSPCLK_GATE_D) | 300 + ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 301 + ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 302 + 303 + g4x_disable_trickle_feed(i915); 304 + 305 + cpt_init_clock_gating(i915); 306 + 307 + gen6_check_mch_setup(i915); 308 + } 309 + 310 + static void lpt_init_clock_gating(struct drm_i915_private *i915) 311 + { 312 + /* 313 + * TODO: this bit should only be enabled when really needed, then 314 + * disabled when not needed anymore in order to save power. 315 + */ 316 + if (HAS_PCH_LPT_LP(i915)) 317 + intel_uncore_rmw(&i915->uncore, SOUTH_DSPCLK_GATE_D, 318 + 0, PCH_LP_PARTITION_LEVEL_DISABLE); 319 + 320 + /* WADPOClockGatingDisable:hsw */ 321 + intel_uncore_rmw(&i915->uncore, TRANS_CHICKEN1(PIPE_A), 322 + 0, TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 323 + } 324 + 325 + static void gen8_set_l3sqc_credits(struct drm_i915_private *i915, 326 + int general_prio_credits, 327 + int high_prio_credits) 328 + { 329 + u32 misccpctl; 330 + u32 val; 331 + 332 + /* WaTempDisableDOPClkGating:bdw */ 333 + misccpctl = intel_uncore_rmw(&i915->uncore, GEN7_MISCCPCTL, 334 + GEN7_DOP_CLOCK_GATE_ENABLE, 0); 335 + 336 + val = intel_gt_mcr_read_any(to_gt(i915), GEN8_L3SQCREG1); 337 + val &= ~L3_PRIO_CREDITS_MASK; 338 + val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits); 339 + val |= L3_HIGH_PRIO_CREDITS(high_prio_credits); 340 + intel_gt_mcr_multicast_write(to_gt(i915), GEN8_L3SQCREG1, val); 341 + 342 + /* 343 + * Wait at least 100 clocks before re-enabling clock gating. 344 + * See the definition of L3SQCREG1 in BSpec. 345 + */ 346 + intel_gt_mcr_read_any(to_gt(i915), GEN8_L3SQCREG1); 347 + udelay(1); 348 + intel_uncore_write(&i915->uncore, GEN7_MISCCPCTL, misccpctl); 349 + } 350 + 351 + static void icl_init_clock_gating(struct drm_i915_private *i915) 352 + { 353 + /* Wa_1409120013:icl,ehl */ 354 + intel_uncore_write(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 355 + DPFC_CHICKEN_COMP_DUMMY_PIXEL); 356 + 357 + /*Wa_14010594013:icl, ehl */ 358 + intel_uncore_rmw(&i915->uncore, GEN8_CHICKEN_DCPR_1, 359 + 0, ICL_DELAY_PMRSP); 360 + } 361 + 362 + static void gen12lp_init_clock_gating(struct drm_i915_private *i915) 363 + { 364 + /* Wa_1409120013 */ 365 + if (DISPLAY_VER(i915) == 12) 366 + intel_uncore_write(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 367 + DPFC_CHICKEN_COMP_DUMMY_PIXEL); 368 + 369 + /* Wa_14013723622:tgl,rkl,dg1,adl-s */ 370 + if (DISPLAY_VER(i915) == 12) 371 + intel_uncore_rmw(&i915->uncore, CLKREQ_POLICY, 372 + CLKREQ_POLICY_MEM_UP_OVRD, 0); 373 + } 374 + 375 + static void adlp_init_clock_gating(struct drm_i915_private *i915) 376 + { 377 + gen12lp_init_clock_gating(i915); 378 + 379 + /* Wa_22011091694:adlp */ 380 + intel_de_rmw(i915, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS); 381 + 382 + /* Bspec/49189 Initialize Sequence */ 383 + intel_de_rmw(i915, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0); 384 + } 385 + 386 + static void xehpsdv_init_clock_gating(struct drm_i915_private *i915) 387 + { 388 + /* Wa_22010146351:xehpsdv */ 389 + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) 390 + intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS); 391 + } 392 + 393 + static void dg2_init_clock_gating(struct drm_i915_private *i915) 394 + { 395 + /* Wa_22010954014:dg2 */ 396 + intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, 397 + SGSI_SIDECLK_DIS); 398 + 399 + /* 400 + * Wa_14010733611:dg2_g10 401 + * Wa_22010146351:dg2_g10 402 + */ 403 + if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) 404 + intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, 405 + SGR_DIS | SGGI_DIS); 406 + } 407 + 408 + static void pvc_init_clock_gating(struct drm_i915_private *i915) 409 + { 410 + /* Wa_14012385139:pvc */ 411 + if (IS_PVC_BD_STEP(i915, STEP_A0, STEP_B0)) 412 + intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS); 413 + 414 + /* Wa_22010954014:pvc */ 415 + if (IS_PVC_BD_STEP(i915, STEP_A0, STEP_B0)) 416 + intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS); 417 + } 418 + 419 + static void cnp_init_clock_gating(struct drm_i915_private *i915) 420 + { 421 + if (!HAS_PCH_CNP(i915)) 422 + return; 423 + 424 + /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */ 425 + intel_uncore_rmw(&i915->uncore, SOUTH_DSPCLK_GATE_D, 0, CNP_PWM_CGE_GATING_DISABLE); 426 + } 427 + 428 + static void cfl_init_clock_gating(struct drm_i915_private *i915) 429 + { 430 + cnp_init_clock_gating(i915); 431 + gen9_init_clock_gating(i915); 432 + 433 + /* WAC6entrylatency:cfl */ 434 + intel_uncore_rmw(&i915->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN); 435 + 436 + /* 437 + * WaFbcTurnOffFbcWatermark:cfl 438 + * Display WA #0562: cfl 439 + */ 440 + intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS); 441 + 442 + /* 443 + * WaFbcNukeOnHostModify:cfl 444 + * Display WA #0873: cfl 445 + */ 446 + intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 447 + 0, DPFC_NUKE_ON_ANY_MODIFICATION); 448 + } 449 + 450 + static void kbl_init_clock_gating(struct drm_i915_private *i915) 451 + { 452 + gen9_init_clock_gating(i915); 453 + 454 + /* WAC6entrylatency:kbl */ 455 + intel_uncore_rmw(&i915->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN); 456 + 457 + /* WaDisableSDEUnitClockGating:kbl */ 458 + if (IS_KBL_GRAPHICS_STEP(i915, 0, STEP_C0)) 459 + intel_uncore_rmw(&i915->uncore, GEN8_UCGCTL6, 460 + 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 461 + 462 + /* WaDisableGamClockGating:kbl */ 463 + if (IS_KBL_GRAPHICS_STEP(i915, 0, STEP_C0)) 464 + intel_uncore_rmw(&i915->uncore, GEN6_UCGCTL1, 465 + 0, GEN6_GAMUNIT_CLOCK_GATE_DISABLE); 466 + 467 + /* 468 + * WaFbcTurnOffFbcWatermark:kbl 469 + * Display WA #0562: kbl 470 + */ 471 + intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS); 472 + 473 + /* 474 + * WaFbcNukeOnHostModify:kbl 475 + * Display WA #0873: kbl 476 + */ 477 + intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 478 + 0, DPFC_NUKE_ON_ANY_MODIFICATION); 479 + } 480 + 481 + static void skl_init_clock_gating(struct drm_i915_private *i915) 482 + { 483 + gen9_init_clock_gating(i915); 484 + 485 + /* WaDisableDopClockGating:skl */ 486 + intel_uncore_rmw(&i915->uncore, GEN7_MISCCPCTL, 487 + GEN7_DOP_CLOCK_GATE_ENABLE, 0); 488 + 489 + /* WAC6entrylatency:skl */ 490 + intel_uncore_rmw(&i915->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN); 491 + 492 + /* 493 + * WaFbcTurnOffFbcWatermark:skl 494 + * Display WA #0562: skl 495 + */ 496 + intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS); 497 + 498 + /* 499 + * WaFbcNukeOnHostModify:skl 500 + * Display WA #0873: skl 501 + */ 502 + intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 503 + 0, DPFC_NUKE_ON_ANY_MODIFICATION); 504 + 505 + /* 506 + * WaFbcHighMemBwCorruptionAvoidance:skl 507 + * Display WA #0883: skl 508 + */ 509 + intel_uncore_rmw(&i915->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0); 510 + } 511 + 512 + static void bdw_init_clock_gating(struct drm_i915_private *i915) 513 + { 514 + enum pipe pipe; 515 + 516 + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 517 + intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(PIPE_A), 0, HSW_FBCQ_DIS); 518 + 519 + /* WaSwitchSolVfFArbitrationPriority:bdw */ 520 + intel_uncore_rmw(&i915->uncore, GAM_ECOCHK, 0, HSW_ECOCHK_ARB_PRIO_SOL); 521 + 522 + /* WaPsrDPAMaskVBlankInSRD:bdw */ 523 + intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, DPA_MASK_VBLANK_SRD); 524 + 525 + for_each_pipe(i915, pipe) { 526 + /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 527 + intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe), 528 + 0, BDW_DPRS_MASK_VBLANK_SRD); 529 + } 530 + 531 + /* WaVSRefCountFullforceMissDisable:bdw */ 532 + /* WaDSRefCountFullforceMissDisable:bdw */ 533 + intel_uncore_rmw(&i915->uncore, GEN7_FF_THREAD_MODE, 534 + GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME, 0); 535 + 536 + intel_uncore_write(&i915->uncore, RING_PSMI_CTL(RENDER_RING_BASE), 537 + _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 538 + 539 + /* WaDisableSDEUnitClockGating:bdw */ 540 + intel_uncore_rmw(&i915->uncore, GEN8_UCGCTL6, 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 541 + 542 + /* WaProgramL3SqcReg1Default:bdw */ 543 + gen8_set_l3sqc_credits(i915, 30, 2); 544 + 545 + /* WaKVMNotificationOnConfigChange:bdw */ 546 + intel_uncore_rmw(&i915->uncore, CHICKEN_PAR2_1, 547 + 0, KVM_CONFIG_CHANGE_NOTIFICATION_SELECT); 548 + 549 + lpt_init_clock_gating(i915); 550 + 551 + /* WaDisableDopClockGating:bdw 552 + * 553 + * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP 554 + * clock gating. 555 + */ 556 + intel_uncore_rmw(&i915->uncore, GEN6_UCGCTL1, 0, GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE); 557 + } 558 + 559 + static void hsw_init_clock_gating(struct drm_i915_private *i915) 560 + { 561 + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 562 + intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(PIPE_A), 0, HSW_FBCQ_DIS); 563 + 564 + /* This is required by WaCatErrorRejectionIssue:hsw */ 565 + intel_uncore_rmw(&i915->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 566 + 0, GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 567 + 568 + /* WaSwitchSolVfFArbitrationPriority:hsw */ 569 + intel_uncore_rmw(&i915->uncore, GAM_ECOCHK, 0, HSW_ECOCHK_ARB_PRIO_SOL); 570 + 571 + lpt_init_clock_gating(i915); 572 + } 573 + 574 + static void ivb_init_clock_gating(struct drm_i915_private *i915) 575 + { 576 + intel_uncore_write(&i915->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 577 + 578 + /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 579 + intel_uncore_rmw(&i915->uncore, ILK_DISPLAY_CHICKEN1, 0, ILK_FBCQ_DIS); 580 + 581 + /* WaDisableBackToBackFlipFix:ivb */ 582 + intel_uncore_write(&i915->uncore, IVB_CHICKEN3, 583 + CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 584 + CHICKEN3_DGMG_DONE_FIX_DISABLE); 585 + 586 + if (IS_IVB_GT1(i915)) 587 + intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2, 588 + _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 589 + else { 590 + /* must write both registers */ 591 + intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2, 592 + _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 593 + intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2_GT2, 594 + _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 595 + } 596 + 597 + /* 598 + * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 599 + * This implements the WaDisableRCZUnitClockGating:ivb workaround. 600 + */ 601 + intel_uncore_write(&i915->uncore, GEN6_UCGCTL2, 602 + GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 603 + 604 + /* This is required by WaCatErrorRejectionIssue:ivb */ 605 + intel_uncore_rmw(&i915->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 606 + 0, GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 607 + 608 + g4x_disable_trickle_feed(i915); 609 + 610 + intel_uncore_rmw(&i915->uncore, GEN6_MBCUNIT_SNPCR, GEN6_MBC_SNPCR_MASK, 611 + GEN6_MBC_SNPCR_MED); 612 + 613 + if (!HAS_PCH_NOP(i915)) 614 + cpt_init_clock_gating(i915); 615 + 616 + gen6_check_mch_setup(i915); 617 + } 618 + 619 + static void vlv_init_clock_gating(struct drm_i915_private *i915) 620 + { 621 + /* WaDisableBackToBackFlipFix:vlv */ 622 + intel_uncore_write(&i915->uncore, IVB_CHICKEN3, 623 + CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 624 + CHICKEN3_DGMG_DONE_FIX_DISABLE); 625 + 626 + /* WaDisableDopClockGating:vlv */ 627 + intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2, 628 + _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 629 + 630 + /* This is required by WaCatErrorRejectionIssue:vlv */ 631 + intel_uncore_rmw(&i915->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 632 + 0, GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 633 + 634 + /* 635 + * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 636 + * This implements the WaDisableRCZUnitClockGating:vlv workaround. 637 + */ 638 + intel_uncore_write(&i915->uncore, GEN6_UCGCTL2, 639 + GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 640 + 641 + /* WaDisableL3Bank2xClockGate:vlv 642 + * Disabling L3 clock gating- MMIO 940c[25] = 1 643 + * Set bit 25, to disable L3_BANK_2x_CLK_GATING */ 644 + intel_uncore_rmw(&i915->uncore, GEN7_UCGCTL4, 0, GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 645 + 646 + /* 647 + * WaDisableVLVClockGating_VBIIssue:vlv 648 + * Disable clock gating on th GCFG unit to prevent a delay 649 + * in the reporting of vblank events. 650 + */ 651 + intel_uncore_write(&i915->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 652 + } 653 + 654 + static void chv_init_clock_gating(struct drm_i915_private *i915) 655 + { 656 + /* WaVSRefCountFullforceMissDisable:chv */ 657 + /* WaDSRefCountFullforceMissDisable:chv */ 658 + intel_uncore_rmw(&i915->uncore, GEN7_FF_THREAD_MODE, 659 + GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME, 0); 660 + 661 + /* WaDisableSemaphoreAndSyncFlipWait:chv */ 662 + intel_uncore_write(&i915->uncore, RING_PSMI_CTL(RENDER_RING_BASE), 663 + _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 664 + 665 + /* WaDisableCSUnitClockGating:chv */ 666 + intel_uncore_rmw(&i915->uncore, GEN6_UCGCTL1, 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE); 667 + 668 + /* WaDisableSDEUnitClockGating:chv */ 669 + intel_uncore_rmw(&i915->uncore, GEN8_UCGCTL6, 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 670 + 671 + /* 672 + * WaProgramL3SqcReg1Default:chv 673 + * See gfxspecs/Related Documents/Performance Guide/ 674 + * LSQC Setting Recommendations. 675 + */ 676 + gen8_set_l3sqc_credits(i915, 38, 2); 677 + } 678 + 679 + static void g4x_init_clock_gating(struct drm_i915_private *i915) 680 + { 681 + u32 dspclk_gate; 682 + 683 + intel_uncore_write(&i915->uncore, RENCLK_GATE_D1, 0); 684 + intel_uncore_write(&i915->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 685 + GS_UNIT_CLOCK_GATE_DISABLE | 686 + CL_UNIT_CLOCK_GATE_DISABLE); 687 + intel_uncore_write(&i915->uncore, RAMCLK_GATE_D, 0); 688 + dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 689 + OVRUNIT_CLOCK_GATE_DISABLE | 690 + OVCUNIT_CLOCK_GATE_DISABLE; 691 + if (IS_GM45(i915)) 692 + dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 693 + intel_uncore_write(&i915->uncore, DSPCLK_GATE_D(i915), dspclk_gate); 694 + 695 + g4x_disable_trickle_feed(i915); 696 + } 697 + 698 + static void i965gm_init_clock_gating(struct drm_i915_private *i915) 699 + { 700 + struct intel_uncore *uncore = &i915->uncore; 701 + 702 + intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 703 + intel_uncore_write(uncore, RENCLK_GATE_D2, 0); 704 + intel_uncore_write(uncore, DSPCLK_GATE_D(i915), 0); 705 + intel_uncore_write(uncore, RAMCLK_GATE_D, 0); 706 + intel_uncore_write16(uncore, DEUC, 0); 707 + intel_uncore_write(uncore, 708 + MI_ARB_STATE, 709 + _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 710 + } 711 + 712 + static void i965g_init_clock_gating(struct drm_i915_private *i915) 713 + { 714 + intel_uncore_write(&i915->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 715 + I965_RCC_CLOCK_GATE_DISABLE | 716 + I965_RCPB_CLOCK_GATE_DISABLE | 717 + I965_ISC_CLOCK_GATE_DISABLE | 718 + I965_FBC_CLOCK_GATE_DISABLE); 719 + intel_uncore_write(&i915->uncore, RENCLK_GATE_D2, 0); 720 + intel_uncore_write(&i915->uncore, MI_ARB_STATE, 721 + _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 722 + } 723 + 724 + static void gen3_init_clock_gating(struct drm_i915_private *i915) 725 + { 726 + u32 dstate = intel_uncore_read(&i915->uncore, D_STATE); 727 + 728 + dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 729 + DSTATE_DOT_CLOCK_GATING; 730 + intel_uncore_write(&i915->uncore, D_STATE, dstate); 731 + 732 + if (IS_PINEVIEW(i915)) 733 + intel_uncore_write(&i915->uncore, ECOSKPD(RENDER_RING_BASE), 734 + _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); 735 + 736 + /* IIR "flip pending" means done if this bit is set */ 737 + intel_uncore_write(&i915->uncore, ECOSKPD(RENDER_RING_BASE), 738 + _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); 739 + 740 + /* interrupts should cause a wake up from C3 */ 741 + intel_uncore_write(&i915->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); 742 + 743 + /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 744 + intel_uncore_write(&i915->uncore, MI_ARB_STATE, 745 + _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 746 + 747 + intel_uncore_write(&i915->uncore, MI_ARB_STATE, 748 + _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 749 + } 750 + 751 + static void i85x_init_clock_gating(struct drm_i915_private *i915) 752 + { 753 + intel_uncore_write(&i915->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 754 + 755 + /* interrupts should cause a wake up from C3 */ 756 + intel_uncore_write(&i915->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | 757 + _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); 758 + 759 + intel_uncore_write(&i915->uncore, MEM_MODE, 760 + _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE)); 761 + 762 + /* 763 + * Have FBC ignore 3D activity since we use software 764 + * render tracking, and otherwise a pure 3D workload 765 + * (even if it just renders a single frame and then does 766 + * abosultely nothing) would not allow FBC to recompress 767 + * until a 2D blit occurs. 768 + */ 769 + intel_uncore_write(&i915->uncore, SCPD0, 770 + _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D)); 771 + } 772 + 773 + static void i830_init_clock_gating(struct drm_i915_private *i915) 774 + { 775 + intel_uncore_write(&i915->uncore, MEM_MODE, 776 + _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) | 777 + _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE)); 778 + } 779 + 780 + void intel_clock_gating_init(struct drm_i915_private *i915) 781 + { 782 + i915->clock_gating_funcs->init_clock_gating(i915); 783 + } 784 + 785 + static void nop_init_clock_gating(struct drm_i915_private *i915) 786 + { 787 + drm_dbg_kms(&i915->drm, 788 + "No clock gating settings or workarounds applied.\n"); 789 + } 790 + 791 + #define CG_FUNCS(platform) \ 792 + static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = { \ 793 + .init_clock_gating = platform##_init_clock_gating, \ 794 + } 795 + 796 + CG_FUNCS(pvc); 797 + CG_FUNCS(dg2); 798 + CG_FUNCS(xehpsdv); 799 + CG_FUNCS(adlp); 800 + CG_FUNCS(gen12lp); 801 + CG_FUNCS(icl); 802 + CG_FUNCS(cfl); 803 + CG_FUNCS(skl); 804 + CG_FUNCS(kbl); 805 + CG_FUNCS(bxt); 806 + CG_FUNCS(glk); 807 + CG_FUNCS(bdw); 808 + CG_FUNCS(chv); 809 + CG_FUNCS(hsw); 810 + CG_FUNCS(ivb); 811 + CG_FUNCS(vlv); 812 + CG_FUNCS(gen6); 813 + CG_FUNCS(ilk); 814 + CG_FUNCS(g4x); 815 + CG_FUNCS(i965gm); 816 + CG_FUNCS(i965g); 817 + CG_FUNCS(gen3); 818 + CG_FUNCS(i85x); 819 + CG_FUNCS(i830); 820 + CG_FUNCS(nop); 821 + #undef CG_FUNCS 822 + 823 + /** 824 + * intel_clock_gating_hooks_init - setup the clock gating hooks 825 + * @i915: device private 826 + * 827 + * Setup the hooks that configure which clocks of a given platform can be 828 + * gated and also apply various GT and display specific workarounds for these 829 + * platforms. Note that some GT specific workarounds are applied separately 830 + * when GPU contexts or batchbuffers start their execution. 831 + */ 832 + void intel_clock_gating_hooks_init(struct drm_i915_private *i915) 833 + { 834 + if (IS_METEORLAKE(i915)) 835 + i915->clock_gating_funcs = &nop_clock_gating_funcs; 836 + else if (IS_PONTEVECCHIO(i915)) 837 + i915->clock_gating_funcs = &pvc_clock_gating_funcs; 838 + else if (IS_DG2(i915)) 839 + i915->clock_gating_funcs = &dg2_clock_gating_funcs; 840 + else if (IS_XEHPSDV(i915)) 841 + i915->clock_gating_funcs = &xehpsdv_clock_gating_funcs; 842 + else if (IS_ALDERLAKE_P(i915)) 843 + i915->clock_gating_funcs = &adlp_clock_gating_funcs; 844 + else if (GRAPHICS_VER(i915) == 12) 845 + i915->clock_gating_funcs = &gen12lp_clock_gating_funcs; 846 + else if (GRAPHICS_VER(i915) == 11) 847 + i915->clock_gating_funcs = &icl_clock_gating_funcs; 848 + else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) 849 + i915->clock_gating_funcs = &cfl_clock_gating_funcs; 850 + else if (IS_SKYLAKE(i915)) 851 + i915->clock_gating_funcs = &skl_clock_gating_funcs; 852 + else if (IS_KABYLAKE(i915)) 853 + i915->clock_gating_funcs = &kbl_clock_gating_funcs; 854 + else if (IS_BROXTON(i915)) 855 + i915->clock_gating_funcs = &bxt_clock_gating_funcs; 856 + else if (IS_GEMINILAKE(i915)) 857 + i915->clock_gating_funcs = &glk_clock_gating_funcs; 858 + else if (IS_BROADWELL(i915)) 859 + i915->clock_gating_funcs = &bdw_clock_gating_funcs; 860 + else if (IS_CHERRYVIEW(i915)) 861 + i915->clock_gating_funcs = &chv_clock_gating_funcs; 862 + else if (IS_HASWELL(i915)) 863 + i915->clock_gating_funcs = &hsw_clock_gating_funcs; 864 + else if (IS_IVYBRIDGE(i915)) 865 + i915->clock_gating_funcs = &ivb_clock_gating_funcs; 866 + else if (IS_VALLEYVIEW(i915)) 867 + i915->clock_gating_funcs = &vlv_clock_gating_funcs; 868 + else if (GRAPHICS_VER(i915) == 6) 869 + i915->clock_gating_funcs = &gen6_clock_gating_funcs; 870 + else if (GRAPHICS_VER(i915) == 5) 871 + i915->clock_gating_funcs = &ilk_clock_gating_funcs; 872 + else if (IS_G4X(i915)) 873 + i915->clock_gating_funcs = &g4x_clock_gating_funcs; 874 + else if (IS_I965GM(i915)) 875 + i915->clock_gating_funcs = &i965gm_clock_gating_funcs; 876 + else if (IS_I965G(i915)) 877 + i915->clock_gating_funcs = &i965g_clock_gating_funcs; 878 + else if (GRAPHICS_VER(i915) == 3) 879 + i915->clock_gating_funcs = &gen3_clock_gating_funcs; 880 + else if (IS_I85X(i915) || IS_I865G(i915)) 881 + i915->clock_gating_funcs = &i85x_clock_gating_funcs; 882 + else if (GRAPHICS_VER(i915) == 2) 883 + i915->clock_gating_funcs = &i830_clock_gating_funcs; 884 + else { 885 + MISSING_CASE(INTEL_DEVID(i915)); 886 + i915->clock_gating_funcs = &nop_clock_gating_funcs; 887 + } 888 + }
+14
drivers/gpu/drm/i915/intel_clock_gating.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2019 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_CLOCK_GATING_H__ 7 + #define __INTEL_CLOCK_GATING_H__ 8 + 9 + struct drm_i915_private; 10 + 11 + void intel_clock_gating_init(struct drm_i915_private *i915); 12 + void intel_clock_gating_hooks_init(struct drm_i915_private *i915); 13 + 14 + #endif /* __INTEL_CLOCK_GATING_H__ */
-1
drivers/gpu/drm/i915/intel_device_info.h
··· 193 193 func(has_hotplug); \ 194 194 func(has_hti); \ 195 195 func(has_ipc); \ 196 - func(has_modular_fia); \ 197 196 func(has_overlay); \ 198 197 func(has_psr); \ 199 198 func(has_psr_hw_tracking); \
+4
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
··· 7 7 #include "display/intel_backlight_regs.h" 8 8 #include "display/intel_display_types.h" 9 9 #include "display/intel_dmc_regs.h" 10 + #include "display/intel_dp_aux_regs.h" 10 11 #include "display/intel_dpio_phy.h" 12 + #include "display/intel_fdi_regs.h" 11 13 #include "display/intel_lvds_regs.h" 14 + #include "display/intel_psr_regs.h" 15 + #include "display/skl_watermark_regs.h" 12 16 #include "display/vlv_dsi_pll_regs.h" 13 17 #include "gt/intel_gt_regs.h" 14 18 #include "gvt/gvt.h"
-885
drivers/gpu/drm/i915/intel_pm.c
··· 1 - /* 2 - * Copyright © 2012 Intel Corporation 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice (including the next 12 - * paragraph) shall be included in all copies or substantial portions of the 13 - * Software. 14 - * 15 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 - * IN THE SOFTWARE. 22 - * 23 - * Authors: 24 - * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 - * 26 - */ 27 - 28 - #include "display/intel_de.h" 29 - #include "display/intel_display.h" 30 - #include "display/intel_display_trace.h" 31 - #include "display/skl_watermark.h" 32 - 33 - #include "gt/intel_engine_regs.h" 34 - #include "gt/intel_gt.h" 35 - #include "gt/intel_gt_mcr.h" 36 - #include "gt/intel_gt_regs.h" 37 - 38 - #include "i915_drv.h" 39 - #include "intel_mchbar_regs.h" 40 - #include "intel_pm.h" 41 - #include "vlv_sideband.h" 42 - 43 - struct drm_i915_clock_gating_funcs { 44 - void (*init_clock_gating)(struct drm_i915_private *i915); 45 - }; 46 - 47 - static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) 48 - { 49 - if (HAS_LLC(dev_priv)) { 50 - /* 51 - * WaCompressedResourceDisplayNewHashMode:skl,kbl 52 - * Display WA #0390: skl,kbl 53 - * 54 - * Must match Sampler, Pixel Back End, and Media. See 55 - * WaCompressedResourceSamplerPbeMediaNewHashMode. 56 - */ 57 - intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PAR1_1, 0, SKL_DE_COMPRESSED_HASH_MODE); 58 - } 59 - 60 - /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ 61 - intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PAR1_1, 0, SKL_EDP_PSR_FIX_RDWRAP); 62 - 63 - /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */ 64 - intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, 0, MASK_WAKEMEM); 65 - 66 - /* 67 - * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl 68 - * Display WA #0859: skl,bxt,kbl,glk,cfl 69 - */ 70 - intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, 0, DISP_FBC_MEMORY_WAKE); 71 - } 72 - 73 - static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) 74 - { 75 - gen9_init_clock_gating(dev_priv); 76 - 77 - /* WaDisableSDEUnitClockGating:bxt */ 78 - intel_uncore_rmw(&dev_priv->uncore, GEN8_UCGCTL6, 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 79 - 80 - /* 81 - * FIXME: 82 - * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. 83 - */ 84 - intel_uncore_rmw(&dev_priv->uncore, GEN8_UCGCTL6, 0, GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); 85 - 86 - /* 87 - * Wa: Backlight PWM may stop in the asserted state, causing backlight 88 - * to stay fully on. 89 - */ 90 - intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) | 91 - PWM1_GATING_DIS | PWM2_GATING_DIS); 92 - 93 - /* 94 - * Lower the display internal timeout. 95 - * This is needed to avoid any hard hangs when DSI port PLL 96 - * is off and a MMIO access is attempted by any privilege 97 - * application, using batch buffers or any other means. 98 - */ 99 - intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950)); 100 - 101 - /* 102 - * WaFbcTurnOffFbcWatermark:bxt 103 - * Display WA #0562: bxt 104 - */ 105 - intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS); 106 - 107 - /* 108 - * WaFbcHighMemBwCorruptionAvoidance:bxt 109 - * Display WA #0883: bxt 110 - */ 111 - intel_uncore_rmw(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0); 112 - } 113 - 114 - static void glk_init_clock_gating(struct drm_i915_private *dev_priv) 115 - { 116 - gen9_init_clock_gating(dev_priv); 117 - 118 - /* 119 - * WaDisablePWMClockGating:glk 120 - * Backlight PWM may stop in the asserted state, causing backlight 121 - * to stay fully on. 122 - */ 123 - intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) | 124 - PWM1_GATING_DIS | PWM2_GATING_DIS); 125 - } 126 - 127 - static void ibx_init_clock_gating(struct drm_i915_private *dev_priv) 128 - { 129 - /* 130 - * On Ibex Peak and Cougar Point, we need to disable clock 131 - * gating for the panel power sequencer or it will fail to 132 - * start up when no ports are active. 133 - */ 134 - intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 135 - } 136 - 137 - static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) 138 - { 139 - enum pipe pipe; 140 - 141 - for_each_pipe(dev_priv, pipe) { 142 - intel_uncore_rmw(&dev_priv->uncore, DSPCNTR(pipe), 0, DISP_TRICKLE_FEED_DISABLE); 143 - 144 - intel_uncore_rmw(&dev_priv->uncore, DSPSURF(pipe), 0, 0); 145 - intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe)); 146 - } 147 - } 148 - 149 - static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) 150 - { 151 - u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 152 - 153 - /* 154 - * Required for FBC 155 - * WaFbcDisableDpfcClockGating:ilk 156 - */ 157 - dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | 158 - ILK_DPFCUNIT_CLOCK_GATE_DISABLE | 159 - ILK_DPFDUNIT_CLOCK_GATE_ENABLE; 160 - 161 - intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0, 162 - MARIUNIT_CLOCK_GATE_DISABLE | 163 - SVSMUNIT_CLOCK_GATE_DISABLE); 164 - intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1, 165 - VFMUNIT_CLOCK_GATE_DISABLE); 166 - 167 - /* 168 - * According to the spec the following bits should be set in 169 - * order to enable memory self-refresh 170 - * The bit 22/21 of 0x42004 171 - * The bit 5 of 0x42020 172 - * The bit 15 of 0x45000 173 - */ 174 - intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, 175 - (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) | 176 - ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 177 - dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; 178 - intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, 179 - (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | 180 - DISP_FBC_WM_DIS)); 181 - 182 - /* 183 - * Based on the document from hardware guys the following bits 184 - * should be set unconditionally in order to enable FBC. 185 - * The bit 22 of 0x42000 186 - * The bit 22 of 0x42004 187 - * The bit 7,8,9 of 0x42020. 188 - */ 189 - if (IS_IRONLAKE_M(dev_priv)) { 190 - /* WaFbcAsynchFlipDisableFbcQueue:ilk */ 191 - intel_uncore_rmw(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1, 0, ILK_FBCQ_DIS); 192 - intel_uncore_rmw(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, 0, ILK_DPARB_GATE); 193 - } 194 - 195 - intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate); 196 - 197 - intel_uncore_rmw(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, 0, ILK_ELPIN_409_SELECT); 198 - 199 - g4x_disable_trickle_feed(dev_priv); 200 - 201 - ibx_init_clock_gating(dev_priv); 202 - } 203 - 204 - static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) 205 - { 206 - enum pipe pipe; 207 - u32 val; 208 - 209 - /* 210 - * On Ibex Peak and Cougar Point, we need to disable clock 211 - * gating for the panel power sequencer or it will fail to 212 - * start up when no ports are active. 213 - */ 214 - intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | 215 - PCH_DPLUNIT_CLOCK_GATE_DISABLE | 216 - PCH_CPUNIT_CLOCK_GATE_DISABLE); 217 - intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN2, 0, DPLS_EDP_PPS_FIX_DIS); 218 - /* The below fixes the weird display corruption, a few pixels shifted 219 - * downward, on (only) LVDS of some HP laptops with IVY. 220 - */ 221 - for_each_pipe(dev_priv, pipe) { 222 - val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe)); 223 - val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 224 - val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 225 - if (dev_priv->display.vbt.fdi_rx_polarity_inverted) 226 - val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 227 - val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; 228 - val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; 229 - intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val); 230 - } 231 - /* WADP0ClockGatingDisable */ 232 - for_each_pipe(dev_priv, pipe) { 233 - intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe), 234 - TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 235 - } 236 - } 237 - 238 - static void gen6_check_mch_setup(struct drm_i915_private *dev_priv) 239 - { 240 - u32 tmp; 241 - 242 - tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD); 243 - if (REG_FIELD_GET(SSKPD_WM0_MASK_SNB, tmp) != 12) 244 - drm_dbg_kms(&dev_priv->drm, 245 - "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", 246 - tmp); 247 - } 248 - 249 - static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) 250 - { 251 - u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 252 - 253 - intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate); 254 - 255 - intel_uncore_rmw(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, 0, ILK_ELPIN_409_SELECT); 256 - 257 - intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, 258 - intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | 259 - GEN6_BLBUNIT_CLOCK_GATE_DISABLE | 260 - GEN6_CSUNIT_CLOCK_GATE_DISABLE); 261 - 262 - /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 263 - * gating disable must be set. Failure to set it results in 264 - * flickering pixels due to Z write ordering failures after 265 - * some amount of runtime in the Mesa "fire" demo, and Unigine 266 - * Sanctuary and Tropics, and apparently anything else with 267 - * alpha test or pixel discard. 268 - * 269 - * According to the spec, bit 11 (RCCUNIT) must also be set, 270 - * but we didn't debug actual testcases to find it out. 271 - * 272 - * WaDisableRCCUnitClockGating:snb 273 - * WaDisableRCPBUnitClockGating:snb 274 - */ 275 - intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2, 276 - GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 277 - GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 278 - 279 - /* 280 - * According to the spec the following bits should be 281 - * set in order to enable memory self-refresh and fbc: 282 - * The bit21 and bit22 of 0x42000 283 - * The bit21 and bit22 of 0x42004 284 - * The bit5 and bit7 of 0x42020 285 - * The bit14 of 0x70180 286 - * The bit14 of 0x71180 287 - * 288 - * WaFbcAsynchFlipDisableFbcQueue:snb 289 - */ 290 - intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1, 291 - intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) | 292 - ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 293 - intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, 294 - intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) | 295 - ILK_DPARB_GATE | ILK_VSDPFD_FULL); 296 - intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, 297 - intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) | 298 - ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 299 - ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 300 - 301 - g4x_disable_trickle_feed(dev_priv); 302 - 303 - cpt_init_clock_gating(dev_priv); 304 - 305 - gen6_check_mch_setup(dev_priv); 306 - } 307 - 308 - static void lpt_init_clock_gating(struct drm_i915_private *dev_priv) 309 - { 310 - /* 311 - * TODO: this bit should only be enabled when really needed, then 312 - * disabled when not needed anymore in order to save power. 313 - */ 314 - if (HAS_PCH_LPT_LP(dev_priv)) 315 - intel_uncore_rmw(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, 316 - 0, PCH_LP_PARTITION_LEVEL_DISABLE); 317 - 318 - /* WADPOClockGatingDisable:hsw */ 319 - intel_uncore_rmw(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A), 320 - 0, TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 321 - } 322 - 323 - static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, 324 - int general_prio_credits, 325 - int high_prio_credits) 326 - { 327 - u32 misccpctl; 328 - u32 val; 329 - 330 - /* WaTempDisableDOPClkGating:bdw */ 331 - misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL, 332 - GEN7_DOP_CLOCK_GATE_ENABLE, 0); 333 - 334 - val = intel_gt_mcr_read_any(to_gt(dev_priv), GEN8_L3SQCREG1); 335 - val &= ~L3_PRIO_CREDITS_MASK; 336 - val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits); 337 - val |= L3_HIGH_PRIO_CREDITS(high_prio_credits); 338 - intel_gt_mcr_multicast_write(to_gt(dev_priv), GEN8_L3SQCREG1, val); 339 - 340 - /* 341 - * Wait at least 100 clocks before re-enabling clock gating. 342 - * See the definition of L3SQCREG1 in BSpec. 343 - */ 344 - intel_gt_mcr_read_any(to_gt(dev_priv), GEN8_L3SQCREG1); 345 - udelay(1); 346 - intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl); 347 - } 348 - 349 - static void icl_init_clock_gating(struct drm_i915_private *dev_priv) 350 - { 351 - /* Wa_1409120013:icl,ehl */ 352 - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 353 - DPFC_CHICKEN_COMP_DUMMY_PIXEL); 354 - 355 - /*Wa_14010594013:icl, ehl */ 356 - intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, 357 - 0, ICL_DELAY_PMRSP); 358 - } 359 - 360 - static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv) 361 - { 362 - /* Wa_1409120013 */ 363 - if (DISPLAY_VER(dev_priv) == 12) 364 - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 365 - DPFC_CHICKEN_COMP_DUMMY_PIXEL); 366 - 367 - /* Wa_14013723622:tgl,rkl,dg1,adl-s */ 368 - if (DISPLAY_VER(dev_priv) == 12) 369 - intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY, 370 - CLKREQ_POLICY_MEM_UP_OVRD, 0); 371 - } 372 - 373 - static void adlp_init_clock_gating(struct drm_i915_private *dev_priv) 374 - { 375 - gen12lp_init_clock_gating(dev_priv); 376 - 377 - /* Wa_22011091694:adlp */ 378 - intel_de_rmw(dev_priv, GEN9_CLKGATE_DIS_5, 0, DPCE_GATING_DIS); 379 - 380 - /* Bspec/49189 Initialize Sequence */ 381 - intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0); 382 - } 383 - 384 - static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv) 385 - { 386 - /* Wa_22010146351:xehpsdv */ 387 - if (IS_XEHPSDV_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0)) 388 - intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS); 389 - } 390 - 391 - static void dg2_init_clock_gating(struct drm_i915_private *i915) 392 - { 393 - /* Wa_22010954014:dg2 */ 394 - intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, 395 - SGSI_SIDECLK_DIS); 396 - 397 - /* 398 - * Wa_14010733611:dg2_g10 399 - * Wa_22010146351:dg2_g10 400 - */ 401 - if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) 402 - intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, 403 - SGR_DIS | SGGI_DIS); 404 - } 405 - 406 - static void pvc_init_clock_gating(struct drm_i915_private *dev_priv) 407 - { 408 - /* Wa_14012385139:pvc */ 409 - if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0)) 410 - intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS); 411 - 412 - /* Wa_22010954014:pvc */ 413 - if (IS_PVC_BD_STEP(dev_priv, STEP_A0, STEP_B0)) 414 - intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS); 415 - } 416 - 417 - static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) 418 - { 419 - if (!HAS_PCH_CNP(dev_priv)) 420 - return; 421 - 422 - /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */ 423 - intel_uncore_rmw(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, 0, CNP_PWM_CGE_GATING_DISABLE); 424 - } 425 - 426 - static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) 427 - { 428 - cnp_init_clock_gating(dev_priv); 429 - gen9_init_clock_gating(dev_priv); 430 - 431 - /* WAC6entrylatency:cfl */ 432 - intel_uncore_rmw(&dev_priv->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN); 433 - 434 - /* 435 - * WaFbcTurnOffFbcWatermark:cfl 436 - * Display WA #0562: cfl 437 - */ 438 - intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS); 439 - 440 - /* 441 - * WaFbcNukeOnHostModify:cfl 442 - * Display WA #0873: cfl 443 - */ 444 - intel_uncore_rmw(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 445 - 0, DPFC_NUKE_ON_ANY_MODIFICATION); 446 - } 447 - 448 - static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) 449 - { 450 - gen9_init_clock_gating(dev_priv); 451 - 452 - /* WAC6entrylatency:kbl */ 453 - intel_uncore_rmw(&dev_priv->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN); 454 - 455 - /* WaDisableSDEUnitClockGating:kbl */ 456 - if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0)) 457 - intel_uncore_rmw(&dev_priv->uncore, GEN8_UCGCTL6, 458 - 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 459 - 460 - /* WaDisableGamClockGating:kbl */ 461 - if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0)) 462 - intel_uncore_rmw(&dev_priv->uncore, GEN6_UCGCTL1, 463 - 0, GEN6_GAMUNIT_CLOCK_GATE_DISABLE); 464 - 465 - /* 466 - * WaFbcTurnOffFbcWatermark:kbl 467 - * Display WA #0562: kbl 468 - */ 469 - intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS); 470 - 471 - /* 472 - * WaFbcNukeOnHostModify:kbl 473 - * Display WA #0873: kbl 474 - */ 475 - intel_uncore_rmw(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 476 - 0, DPFC_NUKE_ON_ANY_MODIFICATION); 477 - } 478 - 479 - static void skl_init_clock_gating(struct drm_i915_private *dev_priv) 480 - { 481 - gen9_init_clock_gating(dev_priv); 482 - 483 - /* WaDisableDopClockGating:skl */ 484 - intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL, 485 - GEN7_DOP_CLOCK_GATE_ENABLE, 0); 486 - 487 - /* WAC6entrylatency:skl */ 488 - intel_uncore_rmw(&dev_priv->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN); 489 - 490 - /* 491 - * WaFbcTurnOffFbcWatermark:skl 492 - * Display WA #0562: skl 493 - */ 494 - intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, 0, DISP_FBC_WM_DIS); 495 - 496 - /* 497 - * WaFbcNukeOnHostModify:skl 498 - * Display WA #0873: skl 499 - */ 500 - intel_uncore_rmw(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 501 - 0, DPFC_NUKE_ON_ANY_MODIFICATION); 502 - 503 - /* 504 - * WaFbcHighMemBwCorruptionAvoidance:skl 505 - * Display WA #0883: skl 506 - */ 507 - intel_uncore_rmw(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 0, DPFC_DISABLE_DUMMY0); 508 - } 509 - 510 - static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) 511 - { 512 - enum pipe pipe; 513 - 514 - /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 515 - intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A), 0, HSW_FBCQ_DIS); 516 - 517 - /* WaSwitchSolVfFArbitrationPriority:bdw */ 518 - intel_uncore_rmw(&dev_priv->uncore, GAM_ECOCHK, 0, HSW_ECOCHK_ARB_PRIO_SOL); 519 - 520 - /* WaPsrDPAMaskVBlankInSRD:bdw */ 521 - intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PAR1_1, 0, DPA_MASK_VBLANK_SRD); 522 - 523 - for_each_pipe(dev_priv, pipe) { 524 - /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 525 - intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), 526 - 0, BDW_DPRS_MASK_VBLANK_SRD); 527 - } 528 - 529 - /* WaVSRefCountFullforceMissDisable:bdw */ 530 - /* WaDSRefCountFullforceMissDisable:bdw */ 531 - intel_uncore_rmw(&dev_priv->uncore, GEN7_FF_THREAD_MODE, 532 - GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME, 0); 533 - 534 - intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE), 535 - _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 536 - 537 - /* WaDisableSDEUnitClockGating:bdw */ 538 - intel_uncore_rmw(&dev_priv->uncore, GEN8_UCGCTL6, 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 539 - 540 - /* WaProgramL3SqcReg1Default:bdw */ 541 - gen8_set_l3sqc_credits(dev_priv, 30, 2); 542 - 543 - /* WaKVMNotificationOnConfigChange:bdw */ 544 - intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PAR2_1, 545 - 0, KVM_CONFIG_CHANGE_NOTIFICATION_SELECT); 546 - 547 - lpt_init_clock_gating(dev_priv); 548 - 549 - /* WaDisableDopClockGating:bdw 550 - * 551 - * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP 552 - * clock gating. 553 - */ 554 - intel_uncore_rmw(&dev_priv->uncore, GEN6_UCGCTL1, 0, GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE); 555 - } 556 - 557 - static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) 558 - { 559 - /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ 560 - intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A), 0, HSW_FBCQ_DIS); 561 - 562 - /* This is required by WaCatErrorRejectionIssue:hsw */ 563 - intel_uncore_rmw(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 564 - 0, GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 565 - 566 - /* WaSwitchSolVfFArbitrationPriority:hsw */ 567 - intel_uncore_rmw(&dev_priv->uncore, GAM_ECOCHK, 0, HSW_ECOCHK_ARB_PRIO_SOL); 568 - 569 - lpt_init_clock_gating(dev_priv); 570 - } 571 - 572 - static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) 573 - { 574 - intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 575 - 576 - /* WaFbcAsynchFlipDisableFbcQueue:ivb */ 577 - intel_uncore_rmw(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1, 0, ILK_FBCQ_DIS); 578 - 579 - /* WaDisableBackToBackFlipFix:ivb */ 580 - intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3, 581 - CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 582 - CHICKEN3_DGMG_DONE_FIX_DISABLE); 583 - 584 - if (IS_IVB_GT1(dev_priv)) 585 - intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2, 586 - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 587 - else { 588 - /* must write both registers */ 589 - intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2, 590 - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 591 - intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2, 592 - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 593 - } 594 - 595 - /* 596 - * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 597 - * This implements the WaDisableRCZUnitClockGating:ivb workaround. 598 - */ 599 - intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2, 600 - GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 601 - 602 - /* This is required by WaCatErrorRejectionIssue:ivb */ 603 - intel_uncore_rmw(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 604 - 0, GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 605 - 606 - g4x_disable_trickle_feed(dev_priv); 607 - 608 - intel_uncore_rmw(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, GEN6_MBC_SNPCR_MASK, 609 - GEN6_MBC_SNPCR_MED); 610 - 611 - if (!HAS_PCH_NOP(dev_priv)) 612 - cpt_init_clock_gating(dev_priv); 613 - 614 - gen6_check_mch_setup(dev_priv); 615 - } 616 - 617 - static void vlv_init_clock_gating(struct drm_i915_private *dev_priv) 618 - { 619 - /* WaDisableBackToBackFlipFix:vlv */ 620 - intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3, 621 - CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 622 - CHICKEN3_DGMG_DONE_FIX_DISABLE); 623 - 624 - /* WaDisableDopClockGating:vlv */ 625 - intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2, 626 - _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 627 - 628 - /* This is required by WaCatErrorRejectionIssue:vlv */ 629 - intel_uncore_rmw(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 630 - 0, GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 631 - 632 - /* 633 - * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 634 - * This implements the WaDisableRCZUnitClockGating:vlv workaround. 635 - */ 636 - intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2, 637 - GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 638 - 639 - /* WaDisableL3Bank2xClockGate:vlv 640 - * Disabling L3 clock gating- MMIO 940c[25] = 1 641 - * Set bit 25, to disable L3_BANK_2x_CLK_GATING */ 642 - intel_uncore_rmw(&dev_priv->uncore, GEN7_UCGCTL4, 0, GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 643 - 644 - /* 645 - * WaDisableVLVClockGating_VBIIssue:vlv 646 - * Disable clock gating on th GCFG unit to prevent a delay 647 - * in the reporting of vblank events. 648 - */ 649 - intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 650 - } 651 - 652 - static void chv_init_clock_gating(struct drm_i915_private *dev_priv) 653 - { 654 - /* WaVSRefCountFullforceMissDisable:chv */ 655 - /* WaDSRefCountFullforceMissDisable:chv */ 656 - intel_uncore_rmw(&dev_priv->uncore, GEN7_FF_THREAD_MODE, 657 - GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME, 0); 658 - 659 - /* WaDisableSemaphoreAndSyncFlipWait:chv */ 660 - intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE), 661 - _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 662 - 663 - /* WaDisableCSUnitClockGating:chv */ 664 - intel_uncore_rmw(&dev_priv->uncore, GEN6_UCGCTL1, 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE); 665 - 666 - /* WaDisableSDEUnitClockGating:chv */ 667 - intel_uncore_rmw(&dev_priv->uncore, GEN8_UCGCTL6, 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 668 - 669 - /* 670 - * WaProgramL3SqcReg1Default:chv 671 - * See gfxspecs/Related Documents/Performance Guide/ 672 - * LSQC Setting Recommendations. 673 - */ 674 - gen8_set_l3sqc_credits(dev_priv, 38, 2); 675 - } 676 - 677 - static void g4x_init_clock_gating(struct drm_i915_private *dev_priv) 678 - { 679 - u32 dspclk_gate; 680 - 681 - intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0); 682 - intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 683 - GS_UNIT_CLOCK_GATE_DISABLE | 684 - CL_UNIT_CLOCK_GATE_DISABLE); 685 - intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0); 686 - dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 687 - OVRUNIT_CLOCK_GATE_DISABLE | 688 - OVCUNIT_CLOCK_GATE_DISABLE; 689 - if (IS_GM45(dev_priv)) 690 - dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 691 - intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D(dev_priv), dspclk_gate); 692 - 693 - g4x_disable_trickle_feed(dev_priv); 694 - } 695 - 696 - static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv) 697 - { 698 - struct intel_uncore *uncore = &dev_priv->uncore; 699 - 700 - intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 701 - intel_uncore_write(uncore, RENCLK_GATE_D2, 0); 702 - intel_uncore_write(uncore, DSPCLK_GATE_D(dev_priv), 0); 703 - intel_uncore_write(uncore, RAMCLK_GATE_D, 0); 704 - intel_uncore_write16(uncore, DEUC, 0); 705 - intel_uncore_write(uncore, 706 - MI_ARB_STATE, 707 - _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 708 - } 709 - 710 - static void i965g_init_clock_gating(struct drm_i915_private *dev_priv) 711 - { 712 - intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 713 - I965_RCC_CLOCK_GATE_DISABLE | 714 - I965_RCPB_CLOCK_GATE_DISABLE | 715 - I965_ISC_CLOCK_GATE_DISABLE | 716 - I965_FBC_CLOCK_GATE_DISABLE); 717 - intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0); 718 - intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, 719 - _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 720 - } 721 - 722 - static void gen3_init_clock_gating(struct drm_i915_private *dev_priv) 723 - { 724 - u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE); 725 - 726 - dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 727 - DSTATE_DOT_CLOCK_GATING; 728 - intel_uncore_write(&dev_priv->uncore, D_STATE, dstate); 729 - 730 - if (IS_PINEVIEW(dev_priv)) 731 - intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE), 732 - _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); 733 - 734 - /* IIR "flip pending" means done if this bit is set */ 735 - intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE), 736 - _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); 737 - 738 - /* interrupts should cause a wake up from C3 */ 739 - intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); 740 - 741 - /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 742 - intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 743 - 744 - intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, 745 - _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 746 - } 747 - 748 - static void i85x_init_clock_gating(struct drm_i915_private *dev_priv) 749 - { 750 - intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 751 - 752 - /* interrupts should cause a wake up from C3 */ 753 - intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | 754 - _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); 755 - 756 - intel_uncore_write(&dev_priv->uncore, MEM_MODE, 757 - _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE)); 758 - 759 - /* 760 - * Have FBC ignore 3D activity since we use software 761 - * render tracking, and otherwise a pure 3D workload 762 - * (even if it just renders a single frame and then does 763 - * abosultely nothing) would not allow FBC to recompress 764 - * until a 2D blit occurs. 765 - */ 766 - intel_uncore_write(&dev_priv->uncore, SCPD0, 767 - _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D)); 768 - } 769 - 770 - static void i830_init_clock_gating(struct drm_i915_private *dev_priv) 771 - { 772 - intel_uncore_write(&dev_priv->uncore, MEM_MODE, 773 - _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) | 774 - _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE)); 775 - } 776 - 777 - void intel_init_clock_gating(struct drm_i915_private *dev_priv) 778 - { 779 - dev_priv->clock_gating_funcs->init_clock_gating(dev_priv); 780 - } 781 - 782 - static void nop_init_clock_gating(struct drm_i915_private *dev_priv) 783 - { 784 - drm_dbg_kms(&dev_priv->drm, 785 - "No clock gating settings or workarounds applied.\n"); 786 - } 787 - 788 - #define CG_FUNCS(platform) \ 789 - static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = { \ 790 - .init_clock_gating = platform##_init_clock_gating, \ 791 - } 792 - 793 - CG_FUNCS(pvc); 794 - CG_FUNCS(dg2); 795 - CG_FUNCS(xehpsdv); 796 - CG_FUNCS(adlp); 797 - CG_FUNCS(gen12lp); 798 - CG_FUNCS(icl); 799 - CG_FUNCS(cfl); 800 - CG_FUNCS(skl); 801 - CG_FUNCS(kbl); 802 - CG_FUNCS(bxt); 803 - CG_FUNCS(glk); 804 - CG_FUNCS(bdw); 805 - CG_FUNCS(chv); 806 - CG_FUNCS(hsw); 807 - CG_FUNCS(ivb); 808 - CG_FUNCS(vlv); 809 - CG_FUNCS(gen6); 810 - CG_FUNCS(ilk); 811 - CG_FUNCS(g4x); 812 - CG_FUNCS(i965gm); 813 - CG_FUNCS(i965g); 814 - CG_FUNCS(gen3); 815 - CG_FUNCS(i85x); 816 - CG_FUNCS(i830); 817 - CG_FUNCS(nop); 818 - #undef CG_FUNCS 819 - 820 - /** 821 - * intel_init_clock_gating_hooks - setup the clock gating hooks 822 - * @dev_priv: device private 823 - * 824 - * Setup the hooks that configure which clocks of a given platform can be 825 - * gated and also apply various GT and display specific workarounds for these 826 - * platforms. Note that some GT specific workarounds are applied separately 827 - * when GPU contexts or batchbuffers start their execution. 828 - */ 829 - void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) 830 - { 831 - if (IS_METEORLAKE(dev_priv)) 832 - dev_priv->clock_gating_funcs = &nop_clock_gating_funcs; 833 - else if (IS_PONTEVECCHIO(dev_priv)) 834 - dev_priv->clock_gating_funcs = &pvc_clock_gating_funcs; 835 - else if (IS_DG2(dev_priv)) 836 - dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs; 837 - else if (IS_XEHPSDV(dev_priv)) 838 - dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs; 839 - else if (IS_ALDERLAKE_P(dev_priv)) 840 - dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs; 841 - else if (GRAPHICS_VER(dev_priv) == 12) 842 - dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs; 843 - else if (GRAPHICS_VER(dev_priv) == 11) 844 - dev_priv->clock_gating_funcs = &icl_clock_gating_funcs; 845 - else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) 846 - dev_priv->clock_gating_funcs = &cfl_clock_gating_funcs; 847 - else if (IS_SKYLAKE(dev_priv)) 848 - dev_priv->clock_gating_funcs = &skl_clock_gating_funcs; 849 - else if (IS_KABYLAKE(dev_priv)) 850 - dev_priv->clock_gating_funcs = &kbl_clock_gating_funcs; 851 - else if (IS_BROXTON(dev_priv)) 852 - dev_priv->clock_gating_funcs = &bxt_clock_gating_funcs; 853 - else if (IS_GEMINILAKE(dev_priv)) 854 - dev_priv->clock_gating_funcs = &glk_clock_gating_funcs; 855 - else if (IS_BROADWELL(dev_priv)) 856 - dev_priv->clock_gating_funcs = &bdw_clock_gating_funcs; 857 - else if (IS_CHERRYVIEW(dev_priv)) 858 - dev_priv->clock_gating_funcs = &chv_clock_gating_funcs; 859 - else if (IS_HASWELL(dev_priv)) 860 - dev_priv->clock_gating_funcs = &hsw_clock_gating_funcs; 861 - else if (IS_IVYBRIDGE(dev_priv)) 862 - dev_priv->clock_gating_funcs = &ivb_clock_gating_funcs; 863 - else if (IS_VALLEYVIEW(dev_priv)) 864 - dev_priv->clock_gating_funcs = &vlv_clock_gating_funcs; 865 - else if (GRAPHICS_VER(dev_priv) == 6) 866 - dev_priv->clock_gating_funcs = &gen6_clock_gating_funcs; 867 - else if (GRAPHICS_VER(dev_priv) == 5) 868 - dev_priv->clock_gating_funcs = &ilk_clock_gating_funcs; 869 - else if (IS_G4X(dev_priv)) 870 - dev_priv->clock_gating_funcs = &g4x_clock_gating_funcs; 871 - else if (IS_I965GM(dev_priv)) 872 - dev_priv->clock_gating_funcs = &i965gm_clock_gating_funcs; 873 - else if (IS_I965G(dev_priv)) 874 - dev_priv->clock_gating_funcs = &i965g_clock_gating_funcs; 875 - else if (GRAPHICS_VER(dev_priv) == 3) 876 - dev_priv->clock_gating_funcs = &gen3_clock_gating_funcs; 877 - else if (IS_I85X(dev_priv) || IS_I865G(dev_priv)) 878 - dev_priv->clock_gating_funcs = &i85x_clock_gating_funcs; 879 - else if (GRAPHICS_VER(dev_priv) == 2) 880 - dev_priv->clock_gating_funcs = &i830_clock_gating_funcs; 881 - else { 882 - MISSING_CASE(INTEL_DEVID(dev_priv)); 883 - dev_priv->clock_gating_funcs = &nop_clock_gating_funcs; 884 - } 885 - }
-18
drivers/gpu/drm/i915/intel_pm.h
··· 1 - /* SPDX-License-Identifier: MIT */ 2 - /* 3 - * Copyright © 2019 Intel Corporation 4 - */ 5 - 6 - #ifndef __INTEL_PM_H__ 7 - #define __INTEL_PM_H__ 8 - 9 - #include <linux/types.h> 10 - 11 - struct drm_i915_private; 12 - struct intel_crtc_state; 13 - struct intel_plane_state; 14 - 15 - void intel_init_clock_gating(struct drm_i915_private *dev_priv); 16 - void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); 17 - 18 - #endif /* __INTEL_PM_H__ */
+1 -1
drivers/gpu/drm/i915/intel_wakeref.h
··· 105 105 } 106 106 107 107 /** 108 - * intel_wakeref_get_if_in_use: Acquire the wakeref 108 + * intel_wakeref_get_if_active: Acquire the wakeref 109 109 * @wf: the wakeref 110 110 * 111 111 * Acquire a hold on the wakeref, but only if the wakeref is already
+3
drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
··· 18 18 enum pxp_status { 19 19 PXP_STATUS_SUCCESS = 0x0, 20 20 PXP_STATUS_ERROR_API_VERSION = 0x1002, 21 + PXP_STATUS_NOT_READY = 0x100e, 22 + PXP_STATUS_PLATFCONFIG_KF1_NOVERIF = 0x101a, 23 + PXP_STATUS_PLATFCONFIG_KF1_BAD = 0x101f, 21 24 PXP_STATUS_OP_NOT_PERMITTED = 0x4013 22 25 }; 23 26
+1 -1
drivers/gpu/drm/i915/pxp/intel_pxp_session.c
··· 74 74 75 75 ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true); 76 76 if (ret) { 77 - drm_err(&gt->i915->drm, "arb session failed to go in play\n"); 77 + drm_dbg(&gt->i915->drm, "arb session failed to go in play\n"); 78 78 return ret; 79 79 } 80 80 drm_dbg(&gt->i915->drm, "PXP ARB session is alive\n");
+63 -14
drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
··· 19 19 #include "intel_pxp_tee.h" 20 20 #include "intel_pxp_types.h" 21 21 22 + static bool 23 + is_fw_err_platform_config(u32 type) 24 + { 25 + switch (type) { 26 + case PXP_STATUS_ERROR_API_VERSION: 27 + case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF: 28 + case PXP_STATUS_PLATFCONFIG_KF1_BAD: 29 + return true; 30 + default: 31 + break; 32 + } 33 + return false; 34 + } 35 + 36 + static const char * 37 + fw_err_to_string(u32 type) 38 + { 39 + switch (type) { 40 + case PXP_STATUS_ERROR_API_VERSION: 41 + return "ERR_API_VERSION"; 42 + case PXP_STATUS_NOT_READY: 43 + return "ERR_NOT_READY"; 44 + case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF: 45 + case PXP_STATUS_PLATFCONFIG_KF1_BAD: 46 + return "ERR_PLATFORM_CONFIG"; 47 + default: 48 + break; 49 + } 50 + return NULL; 51 + } 52 + 22 53 static int intel_pxp_tee_io_message(struct intel_pxp *pxp, 23 54 void *msg_in, u32 msg_in_size, 24 55 void *msg_out, u32 msg_out_max_size, ··· 338 307 &msg_out, sizeof(msg_out), 339 308 NULL); 340 309 341 - if (ret) 342 - drm_err(&i915->drm, "Failed to send tee msg ret=[%d]\n", ret); 343 - else if (msg_out.header.status == PXP_STATUS_ERROR_API_VERSION) 344 - drm_dbg(&i915->drm, "PXP firmware version unsupported, requested: " 345 - "CMD-ID-[0x%08x] on API-Ver-[0x%08x]\n", 346 - msg_in.header.command_id, msg_in.header.api_version); 347 - else if (msg_out.header.status != 0x0) 348 - drm_warn(&i915->drm, "PXP firmware failed arb session init request ret=[0x%08x]\n", 349 - msg_out.header.status); 310 + if (ret) { 311 + drm_err(&i915->drm, "Failed to send tee msg init arb session, ret=[%d]\n", ret); 312 + } else if (msg_out.header.status != 0) { 313 + if (is_fw_err_platform_config(msg_out.header.status)) { 314 + drm_info_once(&i915->drm, 315 + "PXP init-arb-session-%d failed due to BIOS/SOC:0x%08x:%s\n", 316 + arb_session_id, msg_out.header.status, 317 + fw_err_to_string(msg_out.header.status)); 318 + } else { 319 + drm_dbg(&i915->drm, "PXP init-arb-session--%d failed 0x%08x:%st:\n", 320 + arb_session_id, msg_out.header.status, 321 + fw_err_to_string(msg_out.header.status)); 322 + drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n", 323 + msg_in.header.command_id, msg_in.header.api_version); 324 + } 325 + } 350 326 351 327 return ret; 352 328 } ··· 385 347 if ((ret || msg_out.header.status != 0x0) && ++trials < 3) 386 348 goto try_again; 387 349 388 - if (ret) 389 - drm_err(&i915->drm, "Failed to send tee msg for inv-stream-key-%d, ret=[%d]\n", 350 + if (ret) { 351 + drm_err(&i915->drm, "Failed to send tee msg for inv-stream-key-%u, ret=[%d]\n", 390 352 session_id, ret); 391 - else if (msg_out.header.status != 0x0) 392 - drm_warn(&i915->drm, "PXP firmware failed inv-stream-key-%d with status 0x%08x\n", 393 - session_id, msg_out.header.status); 353 + } else if (msg_out.header.status != 0) { 354 + if (is_fw_err_platform_config(msg_out.header.status)) { 355 + drm_info_once(&i915->drm, 356 + "PXP inv-stream-key-%u failed due to BIOS/SOC :0x%08x:%s\n", 357 + session_id, msg_out.header.status, 358 + fw_err_to_string(msg_out.header.status)); 359 + } else { 360 + drm_dbg(&i915->drm, "PXP inv-stream-key-%u failed 0x%08x:%s:\n", 361 + session_id, msg_out.header.status, 362 + fw_err_to_string(msg_out.header.status)); 363 + drm_dbg(&i915->drm, " cmd-detail: ID=[0x%08x],API-Ver-[0x%08x]\n", 364 + msg_in.header.command_id, msg_in.header.api_version); 365 + } 366 + } 394 367 }
+2 -2
drivers/gpu/drm/i915/vlv_suspend.c
··· 12 12 #include "i915_reg.h" 13 13 #include "i915_trace.h" 14 14 #include "i915_utils.h" 15 - #include "intel_pm.h" 15 + #include "intel_clock_gating.h" 16 16 #include "vlv_suspend.h" 17 17 18 18 #include "gt/intel_gt_regs.h" ··· 451 451 vlv_check_no_gt_access(dev_priv); 452 452 453 453 if (rpm_resume) 454 - intel_init_clock_gating(dev_priv); 454 + intel_clock_gating_init(dev_priv); 455 455 456 456 return ret; 457 457 }
+13
include/drm/display/drm_dp_helper.h
··· 194 194 DP_DSC_SLICE_WIDTH_MULTIPLIER; 195 195 } 196 196 197 + /** 198 + * drm_dp_dsc_sink_supports_format() - check if sink supports DSC with given output format 199 + * @dsc_dpcd : DSC-capability DPCDs of the sink 200 + * @output_format: output_format which is to be checked 201 + * 202 + * Returns true if the sink supports DSC with the given output_format, false otherwise. 203 + */ 204 + static inline bool 205 + drm_dp_dsc_sink_supports_format(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], u8 output_format) 206 + { 207 + return dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & output_format; 208 + } 209 + 197 210 /* Forward Error Correction Support on DP 1.4 */ 198 211 static inline bool 199 212 drm_dp_sink_supports_fec(const u8 fec_capable)