Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2023-03-23' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Core Changes:
- drm: Add SDP Error Detection Configuration Register (Arun)

Driver Changes:
- Meteor Lake enabling and fixes (RK, Jose, Madhumitha)
- Lock the fbdev obj before vma pin (Tejas)
- DSC fixes (Stanislav)
- Fixes and clean-up on opregion code (Imre)
- More wm/vblank stuff (Ville)
- More general display code organization (Jani)
- DP Fixes (Stanislav, Ville)
- Introduce flags to ignore long HPD and link training issues \
for handling spurious issues on CI (Vinod)
- Plane cleanups and extra registers (Ville)
- Update audio keepalive clock values (Clint)
- Rename find_section to bdb_find_section (Maarten)
- DP SDP CRC16 for 128b132b link layer (Arun)
- Fix various issues with noarm register writes (Ville)
- Fix a few TypeC / MST issues (Imre)
- Create GSC submission targeting HDCP and PXP usages on MTL+ (Suraj)
- Enable HDCP2.x via GSC CS (Suraj)

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZBy56qc9C00tCLOY@intel.com

+3327 -1472
+3
drivers/gpu/drm/i915/Makefile
··· 195 195 i915-y += \ 196 196 gt/uc/intel_gsc_fw.o \ 197 197 gt/uc/intel_gsc_uc.o \ 198 + gt/uc/intel_gsc_uc_heci_cmd_submit.o\ 198 199 gt/uc/intel_guc.o \ 199 200 gt/uc/intel_guc_ads.o \ 200 201 gt/uc/intel_guc_capture.o \ ··· 256 255 display/intel_frontbuffer.o \ 257 256 display/intel_global_state.o \ 258 257 display/intel_hdcp.o \ 258 + display/intel_hdcp_gsc.o \ 259 259 display/intel_hotplug.o \ 260 260 display/intel_hti.o \ 261 261 display/intel_lpe_audio.o \ ··· 269 267 display/intel_psr.o \ 270 268 display/intel_quirks.o \ 271 269 display/intel_sprite.o \ 270 + display/intel_sprite_uapi.o \ 272 271 display/intel_tc.o \ 273 272 display/intel_vblank.o \ 274 273 display/intel_vga.o \
+37
drivers/gpu/drm/i915/display/hsw_ips.c
··· 267 267 crtc_state->ips_enabled = true; 268 268 } 269 269 } 270 + 271 + static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused) 272 + { 273 + struct drm_i915_private *i915 = m->private; 274 + intel_wakeref_t wakeref; 275 + 276 + if (!HAS_IPS(i915)) 277 + return -ENODEV; 278 + 279 + wakeref = intel_runtime_pm_get(&i915->runtime_pm); 280 + 281 + seq_printf(m, "Enabled by kernel parameter: %s\n", 282 + str_yes_no(i915->params.enable_ips)); 283 + 284 + if (DISPLAY_VER(i915) >= 8) { 285 + seq_puts(m, "Currently: unknown\n"); 286 + } else { 287 + if (intel_de_read(i915, IPS_CTL) & IPS_ENABLE) 288 + seq_puts(m, "Currently: enabled\n"); 289 + else 290 + seq_puts(m, "Currently: disabled\n"); 291 + } 292 + 293 + intel_runtime_pm_put(&i915->runtime_pm, wakeref); 294 + 295 + return 0; 296 + } 297 + 298 + DEFINE_SHOW_ATTRIBUTE(hsw_ips_debugfs_status); 299 + 300 + void hsw_ips_debugfs_register(struct drm_i915_private *i915) 301 + { 302 + struct drm_minor *minor = i915->drm.primary; 303 + 304 + debugfs_create_file("i915_ips_status", 0444, minor->debugfs_root, 305 + i915, &hsw_ips_debugfs_status_fops); 306 + }
+2
drivers/gpu/drm/i915/display/hsw_ips.h
··· 8 8 9 9 #include <linux/types.h> 10 10 11 + struct drm_i915_private; 11 12 struct intel_atomic_state; 12 13 struct intel_crtc; 13 14 struct intel_crtc_state; ··· 23 22 int hsw_ips_compute_config(struct intel_atomic_state *state, 24 23 struct intel_crtc *crtc); 25 24 void hsw_ips_get_config(struct intel_crtc_state *crtc_state); 25 + void hsw_ips_debugfs_register(struct drm_i915_private *i915); 26 26 27 27 #endif /* __HSW_IPS_H__ */
+1 -1
drivers/gpu/drm/i915/display/icl_dsi.c
··· 1500 1500 1501 1501 gen11_dsi_get_timings(encoder, pipe_config); 1502 1502 pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); 1503 - pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); 1503 + pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc); 1504 1504 1505 1505 /* Get the details on which TE should be enabled */ 1506 1506 if (is_cmd_mode(intel_dsi))
+59 -1
drivers/gpu/drm/i915/display/intel_atomic_plane.c
··· 32 32 */ 33 33 34 34 #include <drm/drm_atomic_helper.h> 35 + #include <drm/drm_blend.h> 35 36 #include <drm/drm_fourcc.h> 36 37 37 38 #include "i915_config.h" ··· 43 42 #include "intel_display_types.h" 44 43 #include "intel_fb.h" 45 44 #include "intel_fb_pin.h" 46 - #include "intel_sprite.h" 47 45 #include "skl_scaler.h" 48 46 #include "skl_watermark.h" 49 47 ··· 936 936 937 937 /* final plane coordinates will be relative to the plane's pipe */ 938 938 drm_rect_translate(dst, -clip->x1, -clip->y1); 939 + 940 + return 0; 941 + } 942 + 943 + int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) 944 + { 945 + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 946 + const struct drm_framebuffer *fb = plane_state->hw.fb; 947 + struct drm_rect *src = &plane_state->uapi.src; 948 + u32 src_x, src_y, src_w, src_h, hsub, vsub; 949 + bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation); 950 + 951 + /* 952 + * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS 953 + * abuses hsub/vsub so we can't use them here. But as they 954 + * are limited to 32bpp RGB formats we don't actually need 955 + * to check anything. 956 + */ 957 + if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || 958 + fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) 959 + return 0; 960 + 961 + /* 962 + * Hardware doesn't handle subpixel coordinates. 963 + * Adjust to (macro)pixel boundary, but be careful not to 964 + * increase the source viewport size, because that could 965 + * push the downscaling factor out of bounds. 966 + */ 967 + src_x = src->x1 >> 16; 968 + src_w = drm_rect_width(src) >> 16; 969 + src_y = src->y1 >> 16; 970 + src_h = drm_rect_height(src) >> 16; 971 + 972 + drm_rect_init(src, src_x << 16, src_y << 16, 973 + src_w << 16, src_h << 16); 974 + 975 + if (fb->format->format == DRM_FORMAT_RGB565 && rotated) { 976 + hsub = 2; 977 + vsub = 2; 978 + } else { 979 + hsub = fb->format->hsub; 980 + vsub = fb->format->vsub; 981 + } 982 + 983 + if (rotated) 984 + hsub = vsub = max(hsub, vsub); 985 + 986 + if (src_x % hsub || src_w % hsub) { 987 + drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n", 988 + src_x, src_w, hsub, str_yes_no(rotated)); 989 + return -EINVAL; 990 + } 991 + 992 + if (src_y % vsub || src_h % vsub) { 993 + drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n", 994 + src_y, src_h, vsub, str_yes_no(rotated)); 995 + return -EINVAL; 996 + } 939 997 940 998 return 0; 941 999 }
+1
drivers/gpu/drm/i915/display/intel_atomic_plane.h
··· 62 62 struct intel_crtc_state *crtc_state, 63 63 int min_scale, int max_scale, 64 64 bool can_position); 65 + int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); 65 66 void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, 66 67 struct intel_plane_state *plane_state); 67 68 void intel_plane_helper_add(struct intel_plane *plane);
+1 -5
drivers/gpu/drm/i915/display/intel_audio.c
··· 983 983 984 984 static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts) 985 985 { 986 - if (refclk == 24000) 987 - aud_ts->m = 12; 988 - else 989 - aud_ts->m = 15; 990 - 986 + aud_ts->m = 60; 991 987 aud_ts->n = cdclk * aud_ts->m / 24000; 992 988 } 993 989
+23 -23
drivers/gpu/drm/i915/display/intel_bios.c
··· 141 141 }; 142 142 143 143 static const void * 144 - find_section(struct drm_i915_private *i915, 145 - enum bdb_block_id section_id) 144 + bdb_find_section(struct drm_i915_private *i915, 145 + enum bdb_block_id section_id) 146 146 { 147 147 struct bdb_block_entry *entry; 148 148 ··· 201 201 const struct bdb_lvds_lfp_data_ptrs *ptrs; 202 202 size_t size; 203 203 204 - ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); 204 + ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS); 205 205 if (!ptrs) 206 206 return 0; 207 207 ··· 630 630 { 631 631 const struct bdb_lvds_options *lvds_options; 632 632 633 - lvds_options = find_section(i915, BDB_LVDS_OPTIONS); 633 + lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS); 634 634 if (!lvds_options) 635 635 return -1; 636 636 ··· 671 671 672 672 dump_pnp_id(i915, edid_id, "EDID"); 673 673 674 - ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); 674 + ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS); 675 675 if (!ptrs) 676 676 return -1; 677 677 678 - data = find_section(i915, BDB_LVDS_LFP_DATA); 678 + data = bdb_find_section(i915, BDB_LVDS_LFP_DATA); 679 679 if (!data) 680 680 return -1; 681 681 ··· 791 791 int panel_type = panel->vbt.panel_type; 792 792 int drrs_mode; 793 793 794 - lvds_options = find_section(i915, BDB_LVDS_OPTIONS); 794 + lvds_options = bdb_find_section(i915, BDB_LVDS_OPTIONS); 795 795 if (!lvds_options) 796 796 return; 797 797 ··· 881 881 const struct lvds_pnp_id *pnp_id; 882 882 int panel_type = panel->vbt.panel_type; 883 883 884 - ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS); 884 + ptrs = bdb_find_section(i915, BDB_LVDS_LFP_DATA_PTRS); 885 885 if (!ptrs) 886 886 return; 887 887 888 - data = find_section(i915, BDB_LVDS_LFP_DATA); 888 + data = bdb_find_section(i915, BDB_LVDS_LFP_DATA); 889 889 if (!data) 890 890 return; 891 891 ··· 932 932 if (i915->display.vbt.version < 229) 933 933 return; 934 934 935 - generic_dtd = find_section(i915, BDB_GENERIC_DTD); 935 + generic_dtd = bdb_find_section(i915, BDB_GENERIC_DTD); 936 936 if (!generic_dtd) 937 937 return; 938 938 ··· 1011 1011 int panel_type = panel->vbt.panel_type; 1012 1012 u16 level; 1013 1013 1014 - backlight_data = find_section(i915, BDB_LVDS_BACKLIGHT); 1014 + backlight_data = bdb_find_section(i915, BDB_LVDS_BACKLIGHT); 1015 1015 if (!backlight_data) 1016 1016 return; 1017 1017 ··· 1119 1119 if (index == -1) { 1120 1120 const struct bdb_sdvo_lvds_options *sdvo_lvds_options; 1121 1121 1122 - sdvo_lvds_options = find_section(i915, BDB_SDVO_LVDS_OPTIONS); 1122 + sdvo_lvds_options = bdb_find_section(i915, BDB_SDVO_LVDS_OPTIONS); 1123 1123 if (!sdvo_lvds_options) 1124 1124 return; 1125 1125 1126 1126 index = sdvo_lvds_options->panel_type; 1127 1127 } 1128 1128 1129 - dtds = find_section(i915, BDB_SDVO_PANEL_DTDS); 1129 + dtds = bdb_find_section(i915, BDB_SDVO_PANEL_DTDS); 1130 1130 if (!dtds) 1131 1131 return; 1132 1132 ··· 1162 1162 { 1163 1163 const struct bdb_general_features *general; 1164 1164 1165 - general = find_section(i915, BDB_GENERAL_FEATURES); 1165 + general = bdb_find_section(i915, BDB_GENERAL_FEATURES); 1166 1166 if (!general) 1167 1167 return; 1168 1168 ··· 1285 1285 { 1286 1286 const struct bdb_driver_features *driver; 1287 1287 1288 - driver = find_section(i915, BDB_DRIVER_FEATURES); 1288 + driver = bdb_find_section(i915, BDB_DRIVER_FEATURES); 1289 1289 if (!driver) 1290 1290 return; 1291 1291 ··· 1322 1322 { 1323 1323 const struct bdb_driver_features *driver; 1324 1324 1325 - driver = find_section(i915, BDB_DRIVER_FEATURES); 1325 + driver = bdb_find_section(i915, BDB_DRIVER_FEATURES); 1326 1326 if (!driver) 1327 1327 return; 1328 1328 ··· 1362 1362 if (i915->display.vbt.version < 228) 1363 1363 return; 1364 1364 1365 - power = find_section(i915, BDB_LFP_POWER); 1365 + power = bdb_find_section(i915, BDB_LFP_POWER); 1366 1366 if (!power) 1367 1367 return; 1368 1368 ··· 1402 1402 const struct edp_fast_link_params *edp_link_params; 1403 1403 int panel_type = panel->vbt.panel_type; 1404 1404 1405 - edp = find_section(i915, BDB_EDP); 1405 + edp = bdb_find_section(i915, BDB_EDP); 1406 1406 if (!edp) 1407 1407 return; 1408 1408 ··· 1532 1532 const struct psr_table *psr_table; 1533 1533 int panel_type = panel->vbt.panel_type; 1534 1534 1535 - psr = find_section(i915, BDB_PSR); 1535 + psr = bdb_find_section(i915, BDB_PSR); 1536 1536 if (!psr) { 1537 1537 drm_dbg_kms(&i915->drm, "No PSR BDB found.\n"); 1538 1538 return; ··· 1693 1693 /* Parse #52 for panel index used from panel_type already 1694 1694 * parsed 1695 1695 */ 1696 - start = find_section(i915, BDB_MIPI_CONFIG); 1696 + start = bdb_find_section(i915, BDB_MIPI_CONFIG); 1697 1697 if (!start) { 1698 1698 drm_dbg_kms(&i915->drm, "No MIPI config BDB found"); 1699 1699 return; ··· 2005 2005 if (panel->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID) 2006 2006 return; 2007 2007 2008 - sequence = find_section(i915, BDB_MIPI_SEQUENCE); 2008 + sequence = bdb_find_section(i915, BDB_MIPI_SEQUENCE); 2009 2009 if (!sequence) { 2010 2010 drm_dbg_kms(&i915->drm, 2011 2011 "No MIPI Sequence found, parsing complete\n"); ··· 2086 2086 if (i915->display.vbt.version < 198) 2087 2087 return; 2088 2088 2089 - params = find_section(i915, BDB_COMPRESSION_PARAMETERS); 2089 + params = bdb_find_section(i915, BDB_COMPRESSION_PARAMETERS); 2090 2090 if (params) { 2091 2091 /* Sanity checks */ 2092 2092 if (params->entry_size != sizeof(params->data[0])) { ··· 2792 2792 u16 block_size; 2793 2793 int bus_pin; 2794 2794 2795 - defs = find_section(i915, BDB_GENERAL_DEFINITIONS); 2795 + defs = bdb_find_section(i915, BDB_GENERAL_DEFINITIONS); 2796 2796 if (!defs) { 2797 2797 drm_dbg_kms(&i915->drm, 2798 2798 "No general definition block is found, no devices defined.\n");
+97 -4
drivers/gpu/drm/i915/display/intel_color.c
··· 47 47 */ 48 48 void (*color_commit_arm)(const struct intel_crtc_state *crtc_state); 49 49 /* 50 + * Perform any extra tasks needed after all the 51 + * double buffered registers have been latched. 52 + */ 53 + void (*color_post_update)(const struct intel_crtc_state *crtc_state); 54 + /* 50 55 * Load LUTs (and other single buffered color management 51 56 * registers). Will (hopefully) be called during the vblank 52 57 * following the latching of any double buffered registers ··· 619 614 620 615 static void icl_color_commit_noarm(const struct intel_crtc_state *crtc_state) 621 616 { 617 + /* 618 + * Despite Wa_1406463849, ICL no longer suffers from the SKL 619 + * DC5/PSR CSC black screen issue (see skl_color_commit_noarm()). 620 + * Possibly due to the extra sticky CSC arming 621 + * (see icl_color_post_update()). 622 + * 623 + * On TGL+ all CSC arming issues have been properly fixed. 624 + */ 622 625 icl_load_csc_matrix(crtc_state); 626 + } 627 + 628 + static void skl_color_commit_noarm(const struct intel_crtc_state *crtc_state) 629 + { 630 + /* 631 + * Possibly related to display WA #1184, SKL CSC loses the latched 632 + * CSC coeff/offset register values if the CSC registers are disarmed 633 + * between DC5 exit and PSR exit. This will cause the plane(s) to 634 + * output all black (until CSC_MODE is rearmed and properly latched). 635 + * Once PSR exit (and proper register latching) has occurred the 636 + * danger is over. Thus when PSR is enabled the CSC coeff/offset 637 + * register programming will be peformed from skl_color_commit_arm() 638 + * which is called after PSR exit. 639 + */ 640 + if (!crtc_state->has_psr) 641 + ilk_load_csc_matrix(crtc_state); 623 642 } 624 643 625 644 static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state) ··· 688 659 enum pipe pipe = crtc->pipe; 689 660 u32 val = 0; 690 661 662 + if (crtc_state->has_psr) 663 + ilk_load_csc_matrix(crtc_state); 664 + 691 665 /* 692 666 * We don't (yet) allow userspace to control the pipe background color, 693 667 * so force it to black, but apply pipe gamma and CSC appropriately ··· 707 675 708 676 intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), 709 677 crtc_state->csc_mode); 678 + } 679 + 680 + static void icl_color_commit_arm(const struct intel_crtc_state *crtc_state) 681 + { 682 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 683 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 684 + enum pipe pipe = crtc->pipe; 685 + 686 + /* 687 + * We don't (yet) allow userspace to control the pipe background color, 688 + * so force it to black. 689 + */ 690 + intel_de_write(i915, SKL_BOTTOM_COLOR(pipe), 0); 691 + 692 + intel_de_write(i915, GAMMA_MODE(crtc->pipe), 693 + crtc_state->gamma_mode); 694 + 695 + intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), 696 + crtc_state->csc_mode); 697 + } 698 + 699 + static void icl_color_post_update(const struct intel_crtc_state *crtc_state) 700 + { 701 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 702 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 703 + 704 + /* 705 + * Despite Wa_1406463849, ICL CSC is no longer disarmed by 706 + * coeff/offset register *writes*. Instead, once CSC_MODE 707 + * is armed it stays armed, even after it has been latched. 708 + * Afterwards the coeff/offset registers become effectively 709 + * self-arming. That self-arming must be disabled before the 710 + * next icl_color_commit_noarm() tries to write the next set 711 + * of coeff/offset registers. Fortunately register *reads* 712 + * do still disarm the CSC. Naturally this must not be done 713 + * until the previously written CSC registers have actually 714 + * been latched. 715 + * 716 + * TGL+ no longer need this workaround. 717 + */ 718 + intel_de_read_fw(i915, PIPE_CSC_PREOFF_HI(crtc->pipe)); 710 719 } 711 720 712 721 static struct drm_property_blob * ··· 1447 1374 struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1448 1375 1449 1376 i915->display.funcs.color->color_commit_arm(crtc_state); 1377 + } 1378 + 1379 + void intel_color_post_update(const struct intel_crtc_state *crtc_state) 1380 + { 1381 + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 1382 + 1383 + if (i915->display.funcs.color->color_post_update) 1384 + i915->display.funcs.color->color_post_update(crtc_state); 1450 1385 } 1451 1386 1452 1387 void intel_color_prepare_commit(struct intel_crtc_state *crtc_state) ··· 3153 3072 .lut_equal = i9xx_lut_equal, 3154 3073 }; 3155 3074 3075 + static const struct intel_color_funcs tgl_color_funcs = { 3076 + .color_check = icl_color_check, 3077 + .color_commit_noarm = icl_color_commit_noarm, 3078 + .color_commit_arm = icl_color_commit_arm, 3079 + .load_luts = icl_load_luts, 3080 + .read_luts = icl_read_luts, 3081 + .lut_equal = icl_lut_equal, 3082 + }; 3083 + 3156 3084 static const struct intel_color_funcs icl_color_funcs = { 3157 3085 .color_check = icl_color_check, 3158 3086 .color_commit_noarm = icl_color_commit_noarm, 3159 - .color_commit_arm = skl_color_commit_arm, 3087 + .color_commit_arm = icl_color_commit_arm, 3088 + .color_post_update = icl_color_post_update, 3160 3089 .load_luts = icl_load_luts, 3161 3090 .read_luts = icl_read_luts, 3162 3091 .lut_equal = icl_lut_equal, ··· 3174 3083 3175 3084 static const struct intel_color_funcs glk_color_funcs = { 3176 3085 .color_check = glk_color_check, 3177 - .color_commit_noarm = ilk_color_commit_noarm, 3086 + .color_commit_noarm = skl_color_commit_noarm, 3178 3087 .color_commit_arm = skl_color_commit_arm, 3179 3088 .load_luts = glk_load_luts, 3180 3089 .read_luts = glk_read_luts, ··· 3183 3092 3184 3093 static const struct intel_color_funcs skl_color_funcs = { 3185 3094 .color_check = ivb_color_check, 3186 - .color_commit_noarm = ilk_color_commit_noarm, 3095 + .color_commit_noarm = skl_color_commit_noarm, 3187 3096 .color_commit_arm = skl_color_commit_arm, 3188 3097 .load_luts = bdw_load_luts, 3189 3098 .read_luts = bdw_read_luts, ··· 3279 3188 else 3280 3189 i915->display.funcs.color = &i9xx_color_funcs; 3281 3190 } else { 3282 - if (DISPLAY_VER(i915) >= 11) 3191 + if (DISPLAY_VER(i915) >= 12) 3192 + i915->display.funcs.color = &tgl_color_funcs; 3193 + else if (DISPLAY_VER(i915) == 11) 3283 3194 i915->display.funcs.color = &icl_color_funcs; 3284 3195 else if (DISPLAY_VER(i915) == 10) 3285 3196 i915->display.funcs.color = &glk_color_funcs;
+1
drivers/gpu/drm/i915/display/intel_color.h
··· 21 21 void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state); 22 22 void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state); 23 23 void intel_color_commit_arm(const struct intel_crtc_state *crtc_state); 24 + void intel_color_post_update(const struct intel_crtc_state *crtc_state); 24 25 void intel_color_load_luts(const struct intel_crtc_state *crtc_state); 25 26 void intel_color_get_config(struct intel_crtc_state *crtc_state); 26 27 bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
+9 -1
drivers/gpu/drm/i915/display/intel_crtc.c
··· 212 212 213 213 static int intel_crtc_late_register(struct drm_crtc *crtc) 214 214 { 215 - intel_crtc_debugfs_add(crtc); 215 + intel_crtc_debugfs_add(to_intel_crtc(crtc)); 216 216 return 0; 217 217 } 218 218 ··· 685 685 * vblank start instead of vmax vblank start. 686 686 */ 687 687 intel_vrr_send_push(new_crtc_state); 688 + 689 + /* 690 + * Seamless M/N update may need to update frame timings. 691 + * 692 + * FIXME Should be synchronized with the start of vblank somehow... 693 + */ 694 + if (new_crtc_state->seamless_m_n && intel_crtc_needs_fastset(new_crtc_state)) 695 + intel_crtc_update_active_timings(new_crtc_state); 688 696 689 697 local_irq_enable(); 690 698
-1
drivers/gpu/drm/i915/display/intel_cursor.c
··· 21 21 #include "intel_fb_pin.h" 22 22 #include "intel_frontbuffer.h" 23 23 #include "intel_psr.h" 24 - #include "intel_sprite.h" 25 24 #include "skl_watermark.h" 26 25 27 26 /* Cursor formats */
+53 -8
drivers/gpu/drm/i915/display/intel_ddi.c
··· 65 65 #include "intel_psr.h" 66 66 #include "intel_quirks.h" 67 67 #include "intel_snps_phy.h" 68 - #include "intel_sprite.h" 69 68 #include "intel_tc.h" 70 69 #include "intel_vdsc.h" 71 70 #include "intel_vdsc_regs.h" ··· 2519 2520 { 2520 2521 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2521 2522 2523 + if (HAS_DP20(dev_priv)) 2524 + intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder), 2525 + crtc_state); 2526 + 2522 2527 if (DISPLAY_VER(dev_priv) >= 12) 2523 2528 tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); 2524 2529 else ··· 2621 2618 2622 2619 if (intel_crtc_has_dp_encoder(crtc_state)) 2623 2620 intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), 2624 - DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK, 2625 - DP_TP_CTL_LINK_TRAIN_PAT1); 2621 + DP_TP_CTL_ENABLE, 0); 2626 2622 2627 2623 /* Disable FEC in DP Sink */ 2628 2624 intel_ddi_disable_fec_state(encoder, crtc_state); ··· 3142 3140 wait = true; 3143 3141 } 3144 3142 3145 - dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); 3146 - dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1; 3143 + dp_tp_ctl &= ~DP_TP_CTL_ENABLE; 3147 3144 intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), dp_tp_ctl); 3148 3145 intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); 3149 3146 ··· 3544 3543 intel_ddi_get_config(encoder, crtc_state); 3545 3544 } 3546 3545 3546 + static bool icl_ddi_tc_pll_is_tbt(const struct intel_shared_dpll *pll) 3547 + { 3548 + return pll->info->id == DPLL_ID_ICL_TBTPLL; 3549 + } 3550 + 3551 + static enum icl_port_dpll_id 3552 + icl_ddi_tc_port_pll_type(struct intel_encoder *encoder, 3553 + const struct intel_crtc_state *crtc_state) 3554 + { 3555 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 3556 + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; 3557 + 3558 + if (drm_WARN_ON(&i915->drm, !pll)) 3559 + return ICL_PORT_DPLL_DEFAULT; 3560 + 3561 + if (icl_ddi_tc_pll_is_tbt(pll)) 3562 + return ICL_PORT_DPLL_DEFAULT; 3563 + else 3564 + return ICL_PORT_DPLL_MG_PHY; 3565 + } 3566 + 3567 + enum icl_port_dpll_id 3568 + intel_ddi_port_pll_type(struct intel_encoder *encoder, 3569 + const struct intel_crtc_state *crtc_state) 3570 + { 3571 + if (!encoder->port_pll_type) 3572 + return ICL_PORT_DPLL_DEFAULT; 3573 + 3574 + return encoder->port_pll_type(encoder, crtc_state); 3575 + } 3576 + 3547 3577 static void icl_ddi_tc_get_clock(struct intel_encoder *encoder, 3548 3578 struct intel_crtc_state *crtc_state, 3549 3579 struct intel_shared_dpll *pll) ··· 3587 3555 if (drm_WARN_ON(&i915->drm, !pll)) 3588 3556 return; 3589 3557 3590 - if (pll->info->id == DPLL_ID_ICL_TBTPLL) 3558 + if (icl_ddi_tc_pll_is_tbt(pll)) 3591 3559 port_dpll_id = ICL_PORT_DPLL_DEFAULT; 3592 3560 else 3593 3561 port_dpll_id = ICL_PORT_DPLL_MG_PHY; ··· 3600 3568 3601 3569 icl_set_active_port_dpll(crtc_state, port_dpll_id); 3602 3570 3603 - if (crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL) 3571 + if (icl_ddi_tc_pll_is_tbt(crtc_state->shared_dpll)) 3604 3572 crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port); 3605 3573 else 3606 3574 crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll, ··· 3642 3610 enum phy phy = intel_port_to_phy(i915, encoder->port); 3643 3611 3644 3612 if (intel_phy_is_tc(i915, phy)) 3645 - intel_tc_port_sanitize_mode(enc_to_dig_port(encoder)); 3613 + intel_tc_port_sanitize_mode(enc_to_dig_port(encoder), 3614 + crtc_state); 3646 3615 3647 3616 if (crtc_state && intel_crtc_has_dp_encoder(crtc_state)) 3648 3617 intel_dp_sync_state(encoder, crtc_state); ··· 4437 4404 encoder->enable_clock = jsl_ddi_tc_enable_clock; 4438 4405 encoder->disable_clock = jsl_ddi_tc_disable_clock; 4439 4406 encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled; 4407 + encoder->port_pll_type = icl_ddi_tc_port_pll_type; 4440 4408 encoder->get_config = icl_ddi_combo_get_config; 4441 4409 } else { 4442 4410 encoder->enable_clock = icl_ddi_combo_enable_clock; ··· 4450 4416 encoder->enable_clock = icl_ddi_tc_enable_clock; 4451 4417 encoder->disable_clock = icl_ddi_tc_disable_clock; 4452 4418 encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled; 4419 + encoder->port_pll_type = icl_ddi_tc_port_pll_type; 4453 4420 encoder->get_config = icl_ddi_tc_get_config; 4454 4421 } else { 4455 4422 encoder->enable_clock = icl_ddi_combo_enable_clock; ··· 4530 4495 bool is_legacy = 4531 4496 !intel_bios_encoder_supports_typec_usb(devdata) && 4532 4497 !intel_bios_encoder_supports_tbt(devdata); 4498 + 4499 + if (!is_legacy && init_hdmi) { 4500 + is_legacy = !init_dp; 4501 + 4502 + drm_dbg_kms(&dev_priv->drm, 4503 + "VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n", 4504 + port_name(port), 4505 + str_yes_no(init_dp), 4506 + is_legacy ? "legacy" : "non-legacy"); 4507 + } 4533 4508 4534 4509 intel_tc_port_init(dig_port, is_legacy); 4535 4510
+3
drivers/gpu/drm/i915/display/intel_ddi.h
··· 40 40 const struct intel_crtc_state *crtc_state); 41 41 void hsw_ddi_disable_clock(struct intel_encoder *encoder); 42 42 bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder); 43 + enum icl_port_dpll_id 44 + intel_ddi_port_pll_type(struct intel_encoder *encoder, 45 + const struct intel_crtc_state *crtc_state); 43 46 void hsw_ddi_get_config(struct intel_encoder *encoder, 44 47 struct intel_crtc_state *crtc_state); 45 48 struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder);
+61 -96
drivers/gpu/drm/i915/display/intel_display.c
··· 111 111 #include "intel_quirks.h" 112 112 #include "intel_sdvo.h" 113 113 #include "intel_snps_phy.h" 114 - #include "intel_sprite.h" 115 114 #include "intel_tc.h" 116 115 #include "intel_tv.h" 117 116 #include "intel_vblank.h" ··· 130 131 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); 131 132 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); 132 133 static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); 133 - static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); 134 + static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state); 134 135 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); 135 136 136 137 /* returns HPLL frequency in kHz */ ··· 1115 1116 if (needs_cursorclk_wa(old_crtc_state) && 1116 1117 !needs_cursorclk_wa(new_crtc_state)) 1117 1118 icl_wa_cursorclkgating(dev_priv, pipe, false); 1119 + 1120 + if (intel_crtc_needs_color_update(new_crtc_state)) 1121 + intel_color_post_update(new_crtc_state); 1118 1122 } 1119 1123 1120 1124 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, ··· 1795 1793 1796 1794 intel_set_pipe_src_size(new_crtc_state); 1797 1795 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 1798 - bdw_set_pipemisc(new_crtc_state); 1796 + bdw_set_pipe_misc(new_crtc_state); 1799 1797 1800 1798 if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) && 1801 1799 !transcoder_is_dsi(cpu_transcoder)) ··· 2140 2138 i9xx_configure_cpu_transcoder(new_crtc_state); 2141 2139 2142 2140 intel_set_pipe_src_size(new_crtc_state); 2141 + 2142 + intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0); 2143 2143 2144 2144 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 2145 2145 intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); ··· 3078 3074 } 3079 3075 3080 3076 static enum intel_output_format 3081 - bdw_get_pipemisc_output_format(struct intel_crtc *crtc) 3077 + bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) 3082 3078 { 3083 3079 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3084 3080 u32 tmp; 3085 3081 3086 - tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 3082 + tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 3087 3083 3088 - if (tmp & PIPEMISC_YUV420_ENABLE) { 3084 + if (tmp & PIPE_MISC_YUV420_ENABLE) { 3089 3085 /* We support 4:2:0 in full blend mode only */ 3090 3086 drm_WARN_ON(&dev_priv->drm, 3091 - (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); 3087 + (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0); 3092 3088 3093 3089 return INTEL_OUTPUT_FORMAT_YCBCR420; 3094 - } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { 3090 + } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) { 3095 3091 return INTEL_OUTPUT_FORMAT_YCBCR444; 3096 3092 } else { 3097 3093 return INTEL_OUTPUT_FORMAT_RGB; ··· 3334 3330 intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); 3335 3331 } 3336 3332 3337 - static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) 3333 + static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state) 3338 3334 { 3339 3335 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3340 3336 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); ··· 3342 3338 3343 3339 switch (crtc_state->pipe_bpp) { 3344 3340 case 18: 3345 - val |= PIPEMISC_BPC_6; 3341 + val |= PIPE_MISC_BPC_6; 3346 3342 break; 3347 3343 case 24: 3348 - val |= PIPEMISC_BPC_8; 3344 + val |= PIPE_MISC_BPC_8; 3349 3345 break; 3350 3346 case 30: 3351 - val |= PIPEMISC_BPC_10; 3347 + val |= PIPE_MISC_BPC_10; 3352 3348 break; 3353 3349 case 36: 3354 3350 /* Port output 12BPC defined for ADLP+ */ 3355 3351 if (DISPLAY_VER(dev_priv) > 12) 3356 - val |= PIPEMISC_BPC_12_ADLP; 3352 + val |= PIPE_MISC_BPC_12_ADLP; 3357 3353 break; 3358 3354 default: 3359 3355 MISSING_CASE(crtc_state->pipe_bpp); ··· 3361 3357 } 3362 3358 3363 3359 if (crtc_state->dither) 3364 - val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 3360 + val |= PIPE_MISC_DITHER_ENABLE | PIPE_MISC_DITHER_TYPE_SP; 3365 3361 3366 3362 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 3367 3363 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) 3368 - val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; 3364 + val |= PIPE_MISC_OUTPUT_COLORSPACE_YUV; 3369 3365 3370 3366 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 3371 - val |= PIPEMISC_YUV420_ENABLE | 3372 - PIPEMISC_YUV420_MODE_FULL_BLEND; 3367 + val |= PIPE_MISC_YUV420_ENABLE | 3368 + PIPE_MISC_YUV420_MODE_FULL_BLEND; 3373 3369 3374 3370 if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state)) 3375 - val |= PIPEMISC_HDR_MODE_PRECISION; 3371 + val |= PIPE_MISC_HDR_MODE_PRECISION; 3376 3372 3377 3373 if (DISPLAY_VER(dev_priv) >= 12) 3378 - val |= PIPEMISC_PIXEL_ROUNDING_TRUNC; 3374 + val |= PIPE_MISC_PIXEL_ROUNDING_TRUNC; 3379 3375 3380 - intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val); 3376 + intel_de_write(dev_priv, PIPE_MISC(crtc->pipe), val); 3381 3377 } 3382 3378 3383 - int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) 3379 + int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc) 3384 3380 { 3385 3381 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3386 3382 u32 tmp; 3387 3383 3388 - tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); 3384 + tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); 3389 3385 3390 - switch (tmp & PIPEMISC_BPC_MASK) { 3391 - case PIPEMISC_BPC_6: 3386 + switch (tmp & PIPE_MISC_BPC_MASK) { 3387 + case PIPE_MISC_BPC_6: 3392 3388 return 18; 3393 - case PIPEMISC_BPC_8: 3389 + case PIPE_MISC_BPC_8: 3394 3390 return 24; 3395 - case PIPEMISC_BPC_10: 3391 + case PIPE_MISC_BPC_10: 3396 3392 return 30; 3397 3393 /* 3398 3394 * PORT OUTPUT 12 BPC defined for ADLP+. ··· 3404 3400 * on older platforms, need to find a workaround for 12 BPC 3405 3401 * MIPI DSI HW readout. 3406 3402 */ 3407 - case PIPEMISC_BPC_12_ADLP: 3403 + case PIPE_MISC_BPC_12_ADLP: 3408 3404 if (DISPLAY_VER(dev_priv) > 12) 3409 3405 return 36; 3410 3406 fallthrough; ··· 3985 3981 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; 3986 3982 } else { 3987 3983 pipe_config->output_format = 3988 - bdw_get_pipemisc_output_format(crtc); 3984 + bdw_get_pipe_misc_output_format(crtc); 3989 3985 } 3990 3986 3991 3987 pipe_config->gamma_mode = intel_de_read(dev_priv, ··· 5083 5079 * only fields that are know to not cause problems are preserved. */ 5084 5080 5085 5081 saved_state->uapi = crtc_state->uapi; 5082 + saved_state->inherited = crtc_state->inherited; 5086 5083 saved_state->scaler_state = crtc_state->scaler_state; 5087 5084 saved_state->shared_dpll = crtc_state->shared_dpll; 5088 5085 saved_state->dpll_hw_state = crtc_state->dpll_hw_state; ··· 5907 5902 } 5908 5903 5909 5904 return 0; 5910 - } 5911 - 5912 - void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) 5913 - { 5914 - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 5915 - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5916 - struct drm_display_mode adjusted_mode; 5917 - 5918 - drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode); 5919 - 5920 - if (crtc_state->vrr.enable) { 5921 - adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax; 5922 - adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax; 5923 - adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state); 5924 - crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state); 5925 - } 5926 - 5927 - drm_calc_timestamping_constants(&crtc->base, &adjusted_mode); 5928 - 5929 - crtc->mode_flags = crtc_state->mode_flags; 5930 - 5931 - /* 5932 - * The scanline counter increments at the leading edge of hsync. 5933 - * 5934 - * On most platforms it starts counting from vtotal-1 on the 5935 - * first active line. That means the scanline counter value is 5936 - * always one less than what we would expect. Ie. just after 5937 - * start of vblank, which also occurs at start of hsync (on the 5938 - * last active line), the scanline counter will read vblank_start-1. 5939 - * 5940 - * On gen2 the scanline counter starts counting from 1 instead 5941 - * of vtotal-1, so we have to subtract one (or rather add vtotal-1 5942 - * to keep the value positive), instead of adding one. 5943 - * 5944 - * On HSW+ the behaviour of the scanline counter depends on the output 5945 - * type. For DP ports it behaves like most other platforms, but on HDMI 5946 - * there's an extra 1 line difference. So we need to add two instead of 5947 - * one to the value. 5948 - * 5949 - * On VLV/CHV DSI the scanline counter would appear to increment 5950 - * approx. 1/3 of a scanline before start of vblank. Unfortunately 5951 - * that means we can't tell whether we're in vblank or not while 5952 - * we're on that particular line. We must still set scanline_offset 5953 - * to 1 so that the vblank timestamps come out correct when we query 5954 - * the scanline counter from within the vblank interrupt handler. 5955 - * However if queried just before the start of vblank we'll get an 5956 - * answer that's slightly in the future. 5957 - */ 5958 - if (DISPLAY_VER(dev_priv) == 2) { 5959 - int vtotal; 5960 - 5961 - vtotal = adjusted_mode.crtc_vtotal; 5962 - if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 5963 - vtotal /= 2; 5964 - 5965 - crtc->scanline_offset = vtotal - 1; 5966 - } else if (HAS_DDI(dev_priv) && 5967 - intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 5968 - crtc->scanline_offset = 2; 5969 - } else { 5970 - crtc->scanline_offset = 1; 5971 - } 5972 5905 } 5973 5906 5974 5907 /* ··· 6913 6970 intel_color_commit_arm(new_crtc_state); 6914 6971 6915 6972 if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 6916 - bdw_set_pipemisc(new_crtc_state); 6973 + bdw_set_pipe_misc(new_crtc_state); 6917 6974 6918 6975 if (intel_crtc_needs_fastset(new_crtc_state)) 6919 6976 intel_pipe_fastset(old_crtc_state, new_crtc_state); ··· 6988 7045 } 6989 7046 6990 7047 intel_fbc_update(state, crtc); 7048 + 7049 + drm_WARN_ON(&i915->drm, !intel_display_power_is_enabled(i915, POWER_DOMAIN_DC_OFF)); 6991 7050 6992 7051 if (!modeset && 6993 7052 intel_crtc_needs_color_update(new_crtc_state)) ··· 7358 7413 drm_atomic_helper_wait_for_dependencies(&state->base); 7359 7414 drm_dp_mst_atomic_wait_for_dependencies(&state->base); 7360 7415 7361 - if (state->modeset) 7362 - wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 7416 + /* 7417 + * During full modesets we write a lot of registers, wait 7418 + * for PLLs, etc. Doing that while DC states are enabled 7419 + * is not a good idea. 7420 + * 7421 + * During fastsets and other updates we also need to 7422 + * disable DC states due to the following scenario: 7423 + * 1. DC5 exit and PSR exit happen 7424 + * 2. Some or all _noarm() registers are written 7425 + * 3. Due to some long delay PSR is re-entered 7426 + * 4. DC5 entry -> DMC saves the already written new 7427 + * _noarm() registers and the old not yet written 7428 + * _arm() registers 7429 + * 5. DC5 exit -> DMC restores a mixture of old and 7430 + * new register values and arms the update 7431 + * 6. PSR exit -> hardware latches a mixture of old and 7432 + * new register values -> corrupted frame, or worse 7433 + * 7. New _arm() registers are finally written 7434 + * 8. Hardware finally latches a complete set of new 7435 + * register values, and subsequent frames will be OK again 7436 + */ 7437 + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DC_OFF); 7363 7438 7364 7439 intel_atomic_prepare_plane_clear_colors(state); 7365 7440 ··· 7528 7563 * the culprit. 7529 7564 */ 7530 7565 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); 7531 - intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); 7532 7566 } 7567 + intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, wakeref); 7533 7568 intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); 7534 7569 7535 7570 /* ··· 8851 8886 if (!HAS_DISPLAY(i915)) 8852 8887 return; 8853 8888 8854 - intel_display_debugfs_register(i915); 8855 - 8856 8889 /* Must be done after probing outputs */ 8857 8890 intel_opregion_register(i915); 8858 8891 intel_acpi_video_register(i915); 8859 8892 8860 8893 intel_audio_init(i915); 8894 + 8895 + intel_display_debugfs_register(i915); 8861 8896 8862 8897 /* 8863 8898 * Some ports require correctly set-up hpd registers for
+1 -2
drivers/gpu/drm/i915/display/intel_display.h
··· 422 422 bool intel_pipe_config_compare(const struct intel_crtc_state *current_config, 423 423 const struct intel_crtc_state *pipe_config, 424 424 bool fastset); 425 - void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state); 426 425 427 426 void intel_plane_destroy(struct drm_plane *plane); 428 427 void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); ··· 510 511 struct intel_crtc_state *crtc_state); 511 512 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state); 512 513 513 - int bdw_get_pipemisc_bpp(struct intel_crtc *crtc); 514 + int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc); 514 515 unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state); 515 516 516 517 bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);
+18 -1
drivers/gpu/drm/i915/display/intel_display_core.h
··· 183 183 * blocked behind the non-DP one. 184 184 */ 185 185 struct workqueue_struct *dp_wq; 186 + 187 + /* 188 + * Flag to track if long HPDs need not to be processed 189 + * 190 + * Some panels generate long HPDs while keep connected to the port. 191 + * This can cause issues with CI tests results. In CI systems we 192 + * don't expect to disconnect the panels and could ignore the long 193 + * HPDs generated from the faulty panels. This flag can be used as 194 + * cue to ignore the long HPDs and can be set / unset using debugfs. 195 + */ 196 + bool ignore_long_hpd; 186 197 }; 187 198 188 199 struct intel_vbt_data { ··· 395 384 } gmbus; 396 385 397 386 struct { 398 - struct i915_hdcp_comp_master *master; 387 + struct i915_hdcp_master *master; 399 388 bool comp_added; 400 389 390 + /* 391 + * HDCP message struct for allocation of memory which can be 392 + * reused when sending message to gsc cs. 393 + * this is only populated post Meteorlake 394 + */ 395 + struct intel_hdcp_gsc_message *hdcp_message; 401 396 /* Mutex to protect the above hdcp component related values. */ 402 397 struct mutex comp_mutex; 403 398 } hdcp;
+30 -323
drivers/gpu/drm/i915/display/intel_display_debugfs.c
··· 8 8 #include <drm/drm_debugfs.h> 9 9 #include <drm/drm_fourcc.h> 10 10 11 + #include "hsw_ips.h" 11 12 #include "i915_debugfs.h" 12 13 #include "i915_irq.h" 13 14 #include "i915_reg.h" ··· 45 44 46 45 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 47 46 dev_priv->display.fb_tracking.flip_bits); 48 - 49 - return 0; 50 - } 51 - 52 - static int i915_ips_status(struct seq_file *m, void *unused) 53 - { 54 - struct drm_i915_private *dev_priv = node_to_i915(m->private); 55 - intel_wakeref_t wakeref; 56 - 57 - if (!HAS_IPS(dev_priv)) 58 - return -ENODEV; 59 - 60 - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 61 - 62 - seq_printf(m, "Enabled by kernel parameter: %s\n", 63 - str_yes_no(dev_priv->params.enable_ips)); 64 - 65 - if (DISPLAY_VER(dev_priv) >= 8) { 66 - seq_puts(m, "Currently: unknown\n"); 67 - } else { 68 - if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE) 69 - seq_puts(m, "Currently: enabled\n"); 70 - else 71 - seq_puts(m, "Currently: disabled\n"); 72 - } 73 - 74 - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 75 47 76 48 return 0; 77 49 } ··· 141 167 142 168 return 0; 143 169 } 144 - 145 - static int i915_psr_sink_status_show(struct seq_file *m, void *data) 146 - { 147 - u8 val; 148 - static const char * const sink_status[] = { 149 - "inactive", 150 - "transition to active, capture and display", 151 - "active, display from RFB", 152 - "active, capture and display on sink device timings", 153 - "transition to inactive, capture and display, timing re-sync", 154 - "reserved", 155 - "reserved", 156 - "sink internal error", 157 - }; 158 - struct drm_connector *connector = m->private; 159 - struct intel_dp *intel_dp = 160 - intel_attached_dp(to_intel_connector(connector)); 161 - int ret; 162 - 163 - if (!CAN_PSR(intel_dp)) { 164 - seq_puts(m, "PSR Unsupported\n"); 165 - return -ENODEV; 166 - } 167 - 168 - if (connector->status != connector_status_connected) 169 - return -ENODEV; 170 - 171 - ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); 172 - 173 - if (ret == 1) { 174 - const char *str = "unknown"; 175 - 176 - val &= DP_PSR_SINK_STATE_MASK; 177 - if (val < ARRAY_SIZE(sink_status)) 178 - str = sink_status[val]; 179 - seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); 180 - } else { 181 - return ret; 182 - } 183 - 184 - return 0; 185 - } 186 - DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); 187 - 188 - static void 189 - psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) 190 - { 191 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 192 - const char *status = "unknown"; 193 - u32 val, status_val; 194 - 195 - if (intel_dp->psr.psr2_enabled) { 196 - static const char * const live_status[] = { 197 - "IDLE", 198 - "CAPTURE", 199 - "CAPTURE_FS", 200 - "SLEEP", 201 - "BUFON_FW", 202 - "ML_UP", 203 - "SU_STANDBY", 204 - "FAST_SLEEP", 205 - "DEEP_SLEEP", 206 - "BUF_ON", 207 - "TG_ON" 208 - }; 209 - val = intel_de_read(dev_priv, 210 - EDP_PSR2_STATUS(intel_dp->psr.transcoder)); 211 - status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); 212 - if (status_val < ARRAY_SIZE(live_status)) 213 - status = live_status[status_val]; 214 - } else { 215 - static const char * const live_status[] = { 216 - "IDLE", 217 - "SRDONACK", 218 - "SRDENT", 219 - "BUFOFF", 220 - "BUFON", 221 - "AUXACK", 222 - "SRDOFFACK", 223 - "SRDENT_ON", 224 - }; 225 - val = intel_de_read(dev_priv, 226 - EDP_PSR_STATUS(intel_dp->psr.transcoder)); 227 - status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> 228 - EDP_PSR_STATUS_STATE_SHIFT; 229 - if (status_val < ARRAY_SIZE(live_status)) 230 - status = live_status[status_val]; 231 - } 232 - 233 - seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); 234 - } 235 - 236 - static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) 237 - { 238 - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 239 - struct intel_psr *psr = &intel_dp->psr; 240 - intel_wakeref_t wakeref; 241 - const char *status; 242 - bool enabled; 243 - u32 val; 244 - 245 - seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support)); 246 - if (psr->sink_support) 247 - seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]); 248 - seq_puts(m, "\n"); 249 - 250 - if (!psr->sink_support) 251 - return 0; 252 - 253 - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 254 - mutex_lock(&psr->lock); 255 - 256 - if (psr->enabled) 257 - status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; 258 - else 259 - status = "disabled"; 260 - seq_printf(m, "PSR mode: %s\n", status); 261 - 262 - if (!psr->enabled) { 263 - seq_printf(m, "PSR sink not reliable: %s\n", 264 - str_yes_no(psr->sink_not_reliable)); 265 - 266 - goto unlock; 267 - } 268 - 269 - if (psr->psr2_enabled) { 270 - val = intel_de_read(dev_priv, 271 - EDP_PSR2_CTL(intel_dp->psr.transcoder)); 272 - enabled = val & EDP_PSR2_ENABLE; 273 - } else { 274 - val = intel_de_read(dev_priv, 275 - EDP_PSR_CTL(intel_dp->psr.transcoder)); 276 - enabled = val & EDP_PSR_ENABLE; 277 - } 278 - seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", 279 - str_enabled_disabled(enabled), val); 280 - psr_source_status(intel_dp, m); 281 - seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", 282 - psr->busy_frontbuffer_bits); 283 - 284 - /* 285 - * SKL+ Perf counter is reset to 0 everytime DC state is entered 286 - */ 287 - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 288 - val = intel_de_read(dev_priv, 289 - EDP_PSR_PERF_CNT(intel_dp->psr.transcoder)); 290 - val &= EDP_PSR_PERF_CNT_MASK; 291 - seq_printf(m, "Performance counter: %u\n", val); 292 - } 293 - 294 - if (psr->debug & I915_PSR_DEBUG_IRQ) { 295 - seq_printf(m, "Last attempted entry at: %lld\n", 296 - psr->last_entry_attempt); 297 - seq_printf(m, "Last exit at: %lld\n", psr->last_exit); 298 - } 299 - 300 - if (psr->psr2_enabled) { 301 - u32 su_frames_val[3]; 302 - int frame; 303 - 304 - /* 305 - * Reading all 3 registers before hand to minimize crossing a 306 - * frame boundary between register reads 307 - */ 308 - for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { 309 - val = intel_de_read(dev_priv, 310 - PSR2_SU_STATUS(intel_dp->psr.transcoder, frame)); 311 - su_frames_val[frame / 3] = val; 312 - } 313 - 314 - seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); 315 - 316 - for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { 317 - u32 su_blocks; 318 - 319 - su_blocks = su_frames_val[frame / 3] & 320 - PSR2_SU_STATUS_MASK(frame); 321 - su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); 322 - seq_printf(m, "%d\t%d\n", frame, su_blocks); 323 - } 324 - 325 - seq_printf(m, "PSR2 selective fetch: %s\n", 326 - str_enabled_disabled(psr->psr2_sel_fetch_enabled)); 327 - } 328 - 329 - unlock: 330 - mutex_unlock(&psr->lock); 331 - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 332 - 333 - return 0; 334 - } 335 - 336 - static int i915_edp_psr_status(struct seq_file *m, void *data) 337 - { 338 - struct drm_i915_private *dev_priv = node_to_i915(m->private); 339 - struct intel_dp *intel_dp = NULL; 340 - struct intel_encoder *encoder; 341 - 342 - if (!HAS_PSR(dev_priv)) 343 - return -ENODEV; 344 - 345 - /* Find the first EDP which supports PSR */ 346 - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 347 - intel_dp = enc_to_intel_dp(encoder); 348 - break; 349 - } 350 - 351 - if (!intel_dp) 352 - return -ENODEV; 353 - 354 - return intel_psr_status(m, intel_dp); 355 - } 356 - 357 - static int 358 - i915_edp_psr_debug_set(void *data, u64 val) 359 - { 360 - struct drm_i915_private *dev_priv = data; 361 - struct intel_encoder *encoder; 362 - intel_wakeref_t wakeref; 363 - int ret = -ENODEV; 364 - 365 - if (!HAS_PSR(dev_priv)) 366 - return ret; 367 - 368 - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 369 - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 370 - 371 - drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); 372 - 373 - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 374 - 375 - // TODO: split to each transcoder's PSR debug state 376 - ret = intel_psr_debug_set(intel_dp, val); 377 - 378 - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 379 - } 380 - 381 - return ret; 382 - } 383 - 384 - static int 385 - i915_edp_psr_debug_get(void *data, u64 *val) 386 - { 387 - struct drm_i915_private *dev_priv = data; 388 - struct intel_encoder *encoder; 389 - 390 - if (!HAS_PSR(dev_priv)) 391 - return -ENODEV; 392 - 393 - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 394 - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 395 - 396 - // TODO: split to each transcoder's PSR debug state 397 - *val = READ_ONCE(intel_dp->psr.debug); 398 - return 0; 399 - } 400 - 401 - return -ENODEV; 402 - } 403 - 404 - DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, 405 - i915_edp_psr_debug_get, i915_edp_psr_debug_set, 406 - "%llu\n"); 407 170 408 171 static int i915_power_domain_info(struct seq_file *m, void *unused) 409 172 { ··· 542 831 .write = crtc_updates_write 543 832 }; 544 833 545 - static void crtc_updates_add(struct drm_crtc *crtc) 834 + static void crtc_updates_add(struct intel_crtc *crtc) 546 835 { 547 - debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry, 548 - to_intel_crtc(crtc), &crtc_updates_fops); 836 + debugfs_create_file("i915_update_info", 0644, crtc->base.debugfs_entry, 837 + crtc, &crtc_updates_fops); 549 838 } 550 839 551 840 #else ··· 555 844 { 556 845 } 557 846 558 - static void crtc_updates_add(struct drm_crtc *crtc) 847 + static void crtc_updates_add(struct intel_crtc *crtc) 559 848 { 560 849 } 561 850 #endif ··· 1053 1342 1054 1343 static const struct drm_info_list intel_display_debugfs_list[] = { 1055 1344 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 1056 - {"i915_ips_status", i915_ips_status, 0}, 1057 1345 {"i915_sr_status", i915_sr_status, 0}, 1058 1346 {"i915_opregion", i915_opregion, 0}, 1059 1347 {"i915_vbt", i915_vbt, 0}, 1060 1348 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 1061 - {"i915_edp_psr_status", i915_edp_psr_status, 0}, 1062 1349 {"i915_power_domain_info", i915_power_domain_info, 0}, 1063 1350 {"i915_display_info", i915_display_info, 0}, 1064 1351 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, ··· 1073 1364 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 1074 1365 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 1075 1366 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 1076 - {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}, 1077 1367 }; 1078 1368 1079 1369 void intel_display_debugfs_register(struct drm_i915_private *i915) ··· 1092 1384 ARRAY_SIZE(intel_display_debugfs_list), 1093 1385 minor->debugfs_root, minor); 1094 1386 1387 + hsw_ips_debugfs_register(i915); 1095 1388 intel_dmc_debugfs_register(i915); 1096 1389 intel_fbc_debugfs_register(i915); 1097 1390 intel_hpd_debugfs_register(i915); 1391 + intel_psr_debugfs_register(i915); 1098 1392 intel_wm_debugfs_register(i915); 1099 1393 } 1100 1394 ··· 1148 1438 return ret; 1149 1439 } 1150 1440 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); 1151 - 1152 - static int i915_psr_status_show(struct seq_file *m, void *data) 1153 - { 1154 - struct drm_connector *connector = m->private; 1155 - struct intel_dp *intel_dp = 1156 - intel_attached_dp(to_intel_connector(connector)); 1157 - 1158 - return intel_psr_status(m, intel_dp); 1159 - } 1160 - DEFINE_SHOW_ATTRIBUTE(i915_psr_status); 1161 1441 1162 1442 static int i915_lpsp_capability_show(struct seq_file *m, void *data) 1163 1443 { ··· 1366 1666 */ 1367 1667 static int i915_current_bpc_show(struct seq_file *m, void *data) 1368 1668 { 1369 - struct intel_crtc *crtc = to_intel_crtc(m->private); 1669 + struct intel_crtc *crtc = m->private; 1370 1670 struct intel_crtc_state *crtc_state; 1371 1671 int ret; 1372 1672 ··· 1382 1682 return ret; 1383 1683 } 1384 1684 DEFINE_SHOW_ATTRIBUTE(i915_current_bpc); 1685 + 1686 + /* Pipe may differ from crtc index if pipes are fused off */ 1687 + static int intel_crtc_pipe_show(struct seq_file *m, void *unused) 1688 + { 1689 + struct intel_crtc *crtc = m->private; 1690 + 1691 + seq_printf(m, "%c\n", pipe_name(crtc->pipe)); 1692 + 1693 + return 0; 1694 + } 1695 + DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe); 1385 1696 1386 1697 /** 1387 1698 * intel_connector_debugfs_add - add i915 specific connector debugfs files ··· 1412 1701 return; 1413 1702 1414 1703 intel_drrs_connector_debugfs_add(intel_connector); 1704 + intel_psr_connector_debugfs_add(intel_connector); 1415 1705 1416 - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1706 + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 1417 1707 debugfs_create_file("i915_panel_timings", S_IRUGO, root, 1418 1708 connector, &i915_panel_fops); 1419 - debugfs_create_file("i915_psr_sink_status", S_IRUGO, root, 1420 - connector, &i915_psr_sink_status_fops); 1421 - } 1422 - 1423 - if (HAS_PSR(dev_priv) && 1424 - connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1425 - debugfs_create_file("i915_psr_status", 0444, root, 1426 - connector, &i915_psr_status_fops); 1427 - } 1428 1709 1429 1710 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 1430 1711 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || ··· 1451 1748 * 1452 1749 * Failure to add debugfs entries should generally be ignored. 1453 1750 */ 1454 - void intel_crtc_debugfs_add(struct drm_crtc *crtc) 1751 + void intel_crtc_debugfs_add(struct intel_crtc *crtc) 1455 1752 { 1456 - if (!crtc->debugfs_entry) 1753 + struct dentry *root = crtc->base.debugfs_entry; 1754 + 1755 + if (!root) 1457 1756 return; 1458 1757 1459 1758 crtc_updates_add(crtc); 1460 - intel_drrs_crtc_debugfs_add(to_intel_crtc(crtc)); 1461 - intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc)); 1759 + intel_drrs_crtc_debugfs_add(crtc); 1760 + intel_fbc_crtc_debugfs_add(crtc); 1462 1761 1463 - debugfs_create_file("i915_current_bpc", 0444, crtc->debugfs_entry, crtc, 1762 + debugfs_create_file("i915_current_bpc", 0444, root, crtc, 1464 1763 &i915_current_bpc_fops); 1764 + debugfs_create_file("i915_pipe", 0444, root, crtc, 1765 + &intel_crtc_pipe_fops); 1465 1766 }
+3 -3
drivers/gpu/drm/i915/display/intel_display_debugfs.h
··· 6 6 #ifndef __INTEL_DISPLAY_DEBUGFS_H__ 7 7 #define __INTEL_DISPLAY_DEBUGFS_H__ 8 8 9 - struct drm_crtc; 10 9 struct drm_i915_private; 11 10 struct intel_connector; 11 + struct intel_crtc; 12 12 13 13 #ifdef CONFIG_DEBUG_FS 14 14 void intel_display_debugfs_register(struct drm_i915_private *i915); 15 15 void intel_connector_debugfs_add(struct intel_connector *connector); 16 - void intel_crtc_debugfs_add(struct drm_crtc *crtc); 16 + void intel_crtc_debugfs_add(struct intel_crtc *crtc); 17 17 #else 18 18 static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {} 19 19 static inline void intel_connector_debugfs_add(struct intel_connector *connector) {} 20 - static inline void intel_crtc_debugfs_add(struct drm_crtc *crtc) {} 20 + static inline void intel_crtc_debugfs_add(struct intel_crtc *crtc) {} 21 21 #endif 22 22 23 23 #endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
+8
drivers/gpu/drm/i915/display/intel_display_power.c
··· 1625 1625 intel_power_well_enable(dev_priv, well); 1626 1626 mutex_unlock(&power_domains->lock); 1627 1627 1628 + if (DISPLAY_VER(dev_priv) == 14) 1629 + intel_de_rmw(dev_priv, DC_STATE_EN, 1630 + HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH, 0); 1631 + 1628 1632 /* 4. Enable CDCLK. */ 1629 1633 intel_cdclk_init_hw(dev_priv); 1630 1634 ··· 1681 1677 1682 1678 /* 3. Disable CD clock */ 1683 1679 intel_cdclk_uninit_hw(dev_priv); 1680 + 1681 + if (DISPLAY_VER(dev_priv) == 14) 1682 + intel_de_rmw(dev_priv, DC_STATE_EN, 0, 1683 + HOLD_PHY_PG1_LATCH | HOLD_PHY_CLKREQ_PG1_LATCH); 1684 1684 1685 1685 /* 1686 1686 * 4. Disable Power Well 1 (PG1).
+7 -1
drivers/gpu/drm/i915/display/intel_display_types.h
··· 43 43 #include <drm/drm_rect.h> 44 44 #include <drm/drm_vblank.h> 45 45 #include <drm/drm_vblank_work.h> 46 - #include <drm/i915_mei_hdcp_interface.h> 46 + #include <drm/i915_hdcp_interface.h> 47 47 #include <media/cec-notifier.h> 48 48 49 49 #include "i915_vma.h" ··· 255 255 * Returns whether the port clock is enabled or not. 256 256 */ 257 257 bool (*is_clock_enabled)(struct intel_encoder *encoder); 258 + /* 259 + * Returns the PLL type the port uses. 260 + */ 261 + enum icl_port_dpll_id (*port_pll_type)(struct intel_encoder *encoder, 262 + const struct intel_crtc_state *crtc_state); 258 263 const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder, 259 264 const struct intel_crtc_state *crtc_state, 260 265 int *n_entries); ··· 1788 1783 bool tc_legacy_port:1; 1789 1784 char tc_port_name[8]; 1790 1785 enum tc_port_mode tc_mode; 1786 + enum tc_port_mode tc_init_mode; 1791 1787 enum phy_fia tc_phy_fia; 1792 1788 u8 tc_phy_fia_idx; 1793 1789
+29 -7
drivers/gpu/drm/i915/display/intel_dmc.c
··· 89 89 __stringify(major) "_" \ 90 90 __stringify(minor) ".bin" 91 91 92 + #define XELPDP_DMC_MAX_FW_SIZE 0x7000 92 93 #define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000 93 - 94 94 #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE 95 + 96 + #define MTL_DMC_PATH DMC_PATH(mtl) 97 + MODULE_FIRMWARE(MTL_DMC_PATH); 95 98 96 99 #define DG2_DMC_PATH DMC_LEGACY_PATH(dg2, 2, 08) 97 100 MODULE_FIRMWARE(DG2_DMC_PATH); ··· 427 424 } 428 425 } 429 426 430 - static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) 427 + static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) 431 428 { 432 429 enum pipe pipe; 433 430 434 - if (DISPLAY_VER(i915) < 13) 435 - return; 436 - 437 431 /* 438 - * Wa_16015201720:adl-p,dg2, mtl 432 + * Wa_16015201720:adl-p,dg2 439 433 * The WA requires clock gating to be disabled all the time 440 434 * for pipe A and B. 441 435 * For pipe C and D clock gating needs to be disabled only ··· 446 446 for (pipe = PIPE_C; pipe <= PIPE_D; pipe++) 447 447 intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe), 448 448 PIPEDMC_GATING_DIS, 0); 449 + } 450 + 451 + static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915) 452 + { 453 + /* 454 + * Wa_16015201720 455 + * The WA requires clock gating to be disabled all the time 456 + * for pipe A and B. 457 + */ 458 + intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, 459 + MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B); 460 + } 461 + 462 + static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) 463 + { 464 + if (DISPLAY_VER(i915) >= 14 && enable) 465 + mtl_pipedmc_clock_gating_wa(i915); 466 + else if (DISPLAY_VER(i915) == 13) 467 + adlp_pipedmc_clock_gating_wa(i915, enable); 449 468 } 450 469 451 470 void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) ··· 998 979 999 980 INIT_WORK(&dmc->work, dmc_load_work_fn); 1000 981 1001 - if (IS_DG2(i915)) { 982 + if (IS_METEORLAKE(i915)) { 983 + dmc->fw_path = MTL_DMC_PATH; 984 + dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE; 985 + } else if (IS_DG2(i915)) { 1002 986 dmc->fw_path = DG2_DMC_PATH; 1003 987 dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; 1004 988 } else if (IS_ALDERLAKE_P(i915)) {
+27 -12
drivers/gpu/drm/i915/display/intel_dp.c
··· 687 687 /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */ 688 688 if (DISPLAY_VER(i915) >= 13) { 689 689 bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1); 690 + 691 + /* 692 + * According to BSpec, 27 is the max DSC output bpp, 693 + * 8 is the min DSC output bpp 694 + */ 695 + bits_per_pixel = clamp_t(u32, bits_per_pixel, 8, 27); 690 696 } else { 691 697 /* Find the nearest match in the array of known BPPs from VESA */ 692 698 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { ··· 722 716 * (LinkSymbolClock)* 8 * (TimeSlots / 64) 723 717 * for SST -> TimeSlots is 64(i.e all TimeSlots that are available) 724 718 * for MST -> TimeSlots has to be calculated, based on mode requirements 719 + * 720 + * Due to FEC overhead, the available bw is reduced to 97.2261%. 721 + * To support the given mode: 722 + * Bandwidth required should be <= Available link Bandwidth * FEC Overhead 723 + * =>ModeClock * bits_per_pixel <= Available Link Bandwidth * FEC Overhead 724 + * =>bits_per_pixel <= Available link Bandwidth * FEC Overhead / ModeClock 725 + * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock) * 8 (TimeSlots / 64) / 726 + * (ModeClock / FEC Overhead) 727 + * =>bits_per_pixel <= (NumberOfLanes * LinkSymbolClock * TimeSlots) / 728 + * (ModeClock / FEC Overhead * 8) 725 729 */ 726 - bits_per_pixel = DIV_ROUND_UP((link_clock * lane_count) * timeslots, 727 - intel_dp_mode_to_fec_clock(mode_clock) * 8); 730 + bits_per_pixel = ((link_clock * lane_count) * timeslots) / 731 + (intel_dp_mode_to_fec_clock(mode_clock) * 8); 728 732 729 733 drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots " 730 734 "total bw %u pixel clock %u\n", ··· 786 770 else 787 771 min_slice_count = DIV_ROUND_UP(mode_clock, 788 772 DP_DSC_MAX_ENC_THROUGHPUT_1); 773 + 774 + /* 775 + * Due to some DSC engine BW limitations, we need to enable second 776 + * slice and VDSC engine, whenever we approach close enough to max CDCLK 777 + */ 778 + if (mode_clock >= ((i915->display.cdclk.max_cdclk_freq * 85) / 100)) 779 + min_slice_count = max_t(u8, min_slice_count, 2); 789 780 790 781 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 791 782 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { ··· 1620 1597 * is greater than the maximum Cdclock and if slice count is even 1621 1598 * then we need to use 2 VDSC instances. 1622 1599 */ 1623 - if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq || 1624 - pipe_config->bigjoiner_pipes) { 1625 - if (pipe_config->dsc.slice_count > 1) { 1626 - pipe_config->dsc.dsc_split = true; 1627 - } else { 1628 - drm_dbg_kms(&dev_priv->drm, 1629 - "Cannot split stream to use 2 VDSC instances\n"); 1630 - return -EINVAL; 1631 - } 1632 - } 1600 + if (pipe_config->bigjoiner_pipes || pipe_config->dsc.slice_count > 1) 1601 + pipe_config->dsc.dsc_split = true; 1633 1602 1634 1603 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 1635 1604 if (ret < 0) {
+13 -2
drivers/gpu/drm/i915/display/intel_dp_aux.c
··· 205 205 for (i = 0; i < ARRAY_SIZE(ch_data); i++) 206 206 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); 207 207 208 - if (is_tc_port) 208 + if (is_tc_port) { 209 209 intel_tc_port_lock(dig_port); 210 + /* 211 + * Abort transfers on a disconnected port as required by 212 + * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX 213 + * timeouts that would otherwise happen. 214 + * TODO: abort the transfer on non-TC ports as well. 215 + */ 216 + if (!intel_tc_port_connected_locked(&dig_port->base)) { 217 + ret = -ENXIO; 218 + goto out_unlock; 219 + } 220 + } 210 221 211 222 aux_domain = intel_aux_power_domain(dig_port); 212 223 ··· 378 367 379 368 intel_pps_unlock(intel_dp, pps_wakeref); 380 369 intel_display_power_put_async(i915, aux_domain, aux_wakeref); 381 - 370 + out_unlock: 382 371 if (is_tc_port) 383 372 intel_tc_port_unlock(dig_port); 384 373
+18 -6
drivers/gpu/drm/i915/display/intel_fbdev.c
··· 210 210 bool prealloc = false; 211 211 void __iomem *vaddr; 212 212 struct drm_i915_gem_object *obj; 213 + struct i915_gem_ww_ctx ww; 213 214 int ret; 214 215 215 216 mutex_lock(&ifbdev->hpd_lock); ··· 284 283 info->fix.smem_len = vma->size; 285 284 } 286 285 287 - vaddr = i915_vma_pin_iomap(vma); 288 - if (IS_ERR(vaddr)) { 289 - drm_err(&dev_priv->drm, 290 - "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr); 291 - ret = PTR_ERR(vaddr); 292 - goto out_unpin; 286 + for_i915_gem_ww(&ww, ret, false) { 287 + ret = i915_gem_object_lock(vma->obj, &ww); 288 + 289 + if (ret) 290 + continue; 291 + 292 + vaddr = i915_vma_pin_iomap(vma); 293 + if (IS_ERR(vaddr)) { 294 + drm_err(&dev_priv->drm, 295 + "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr); 296 + ret = PTR_ERR(vaddr); 297 + continue; 298 + } 293 299 } 300 + 301 + if (ret) 302 + goto out_unpin; 303 + 294 304 info->screen_base = vaddr; 295 305 info->screen_size = vma->size; 296 306
+1 -3
drivers/gpu/drm/i915/display/intel_fdi.c
··· 845 845 intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); 846 846 847 847 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ 848 - intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), 849 - DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK, 850 - DP_TP_CTL_LINK_TRAIN_PAT1); 848 + intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0); 851 849 intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E)); 852 850 853 851 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
+89 -69
drivers/gpu/drm/i915/display/intel_hdcp.c
··· 23 23 #include "intel_display_power_well.h" 24 24 #include "intel_display_types.h" 25 25 #include "intel_hdcp.h" 26 + #include "intel_hdcp_gsc.h" 26 27 #include "intel_hdcp_regs.h" 27 28 #include "intel_pcode.h" 28 29 ··· 204 203 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 205 204 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 206 205 struct intel_hdcp *hdcp = &connector->hdcp; 206 + struct intel_gt *gt = dev_priv->media_gt; 207 + struct intel_gsc_uc *gsc = &gt->uc.gsc; 207 208 bool capable = false; 208 209 209 210 /* I915 support for HDCP2.2 */ 210 211 if (!hdcp->hdcp2_supported) 211 212 return false; 212 213 213 - /* MEI interface is solid */ 214 + /* If MTL+ make sure gsc is loaded and proxy is setup */ 215 + if (intel_hdcp_gsc_cs_required(dev_priv)) 216 + if (!intel_uc_fw_is_running(&gsc->fw)) 217 + return false; 218 + 219 + /* MEI/GSC interface is solid depending on which is used */ 214 220 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 215 221 if (!dev_priv->display.hdcp.comp_added || !dev_priv->display.hdcp.master) { 216 222 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); ··· 1150 1142 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1151 1143 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1152 1144 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1153 - struct i915_hdcp_comp_master *comp; 1145 + struct i915_hdcp_master *arbiter; 1154 1146 int ret; 1155 1147 1156 1148 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 1157 - comp = dev_priv->display.hdcp.master; 1149 + arbiter = dev_priv->display.hdcp.master; 1158 1150 1159 - if (!comp || !comp->ops) { 1151 + if (!arbiter || !arbiter->ops) { 1160 1152 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 1161 1153 return -EINVAL; 1162 1154 } 1163 1155 1164 - ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data); 1156 + ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data); 1165 1157 if (ret) 1166 1158 drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n", 1167 1159 ret); ··· 1180 1172 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1181 1173 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1182 1174 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1183 - struct i915_hdcp_comp_master *comp; 1175 + struct i915_hdcp_master *arbiter; 1184 1176 int ret; 1185 1177 1186 1178 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 1187 - comp = dev_priv->display.hdcp.master; 1179 + arbiter = dev_priv->display.hdcp.master; 1188 1180 1189 - if (!comp || !comp->ops) { 1181 + if (!arbiter || !arbiter->ops) { 1190 1182 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 1191 1183 return -EINVAL; 1192 1184 } 1193 1185 1194 - ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data, 1186 + ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data, 1195 1187 rx_cert, paired, 1196 1188 ek_pub_km, msg_sz); 1197 1189 if (ret < 0) ··· 1208 1200 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1209 1201 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1210 1202 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1211 - struct i915_hdcp_comp_master *comp; 1203 + struct i915_hdcp_master *arbiter; 1212 1204 int ret; 1213 1205 1214 1206 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 1215 - comp = dev_priv->display.hdcp.master; 1207 + arbiter = dev_priv->display.hdcp.master; 1216 1208 1217 - if (!comp || !comp->ops) { 1209 + if (!arbiter || !arbiter->ops) { 1218 1210 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 1219 1211 return -EINVAL; 1220 1212 } 1221 1213 1222 - ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime); 1214 + ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime); 1223 1215 if (ret < 0) 1224 1216 drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret); 1225 1217 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); ··· 1234 1226 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1235 1227 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1236 1228 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1237 - struct i915_hdcp_comp_master *comp; 1229 + struct i915_hdcp_master *arbiter; 1238 1230 int ret; 1239 1231 1240 1232 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 1241 - comp = dev_priv->display.hdcp.master; 1233 + arbiter = dev_priv->display.hdcp.master; 1242 1234 1243 - if (!comp || !comp->ops) { 1235 + if (!arbiter || !arbiter->ops) { 1244 1236 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 1245 1237 return -EINVAL; 1246 1238 } 1247 1239 1248 - ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info); 1240 + ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info); 1249 1241 if (ret < 0) 1250 1242 drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n", 1251 1243 ret); ··· 1261 1253 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1262 1254 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1263 1255 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1264 - struct i915_hdcp_comp_master *comp; 1256 + struct i915_hdcp_master *arbiter; 1265 1257 int ret; 1266 1258 1267 1259 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 1268 - comp = dev_priv->display.hdcp.master; 1260 + arbiter = dev_priv->display.hdcp.master; 1269 1261 1270 - if (!comp || !comp->ops) { 1262 + if (!arbiter || !arbiter->ops) { 1271 1263 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 1272 1264 return -EINVAL; 1273 1265 } 1274 1266 1275 - ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init); 1267 + ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init); 1276 1268 if (ret < 0) 1277 1269 drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n", 1278 1270 ret); ··· 1288 1280 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1289 1281 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1290 1282 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1291 - struct i915_hdcp_comp_master *comp; 1283 + struct i915_hdcp_master *arbiter; 1292 1284 int ret; 1293 1285 1294 1286 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 1295 - comp = dev_priv->display.hdcp.master; 1287 + arbiter = dev_priv->display.hdcp.master; 1296 1288 1297 - if (!comp || !comp->ops) { 1289 + if (!arbiter || !arbiter->ops) { 1298 1290 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 1299 1291 return -EINVAL; 1300 1292 } 1301 1293 1302 - ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime); 1294 + ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime); 1303 1295 if (ret < 0) 1304 1296 drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n", 1305 1297 ret); ··· 1314 1306 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1315 1307 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1316 1308 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1317 - struct i915_hdcp_comp_master *comp; 1309 + struct i915_hdcp_master *arbiter; 1318 1310 int ret; 1319 1311 1320 1312 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 1321 - comp = dev_priv->display.hdcp.master; 1313 + arbiter = dev_priv->display.hdcp.master; 1322 1314 1323 - if (!comp || !comp->ops) { 1315 + if (!arbiter || !arbiter->ops) { 1324 1316 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 1325 1317 return -EINVAL; 1326 1318 } 1327 1319 1328 - ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data); 1320 + ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data); 1329 1321 if (ret < 0) 1330 1322 drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n", 1331 1323 ret); ··· 1343 1335 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1344 1336 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1345 1337 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1346 - struct i915_hdcp_comp_master *comp; 1338 + struct i915_hdcp_master *arbiter; 1347 1339 int ret; 1348 1340 1349 1341 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 1350 - comp = dev_priv->display.hdcp.master; 1342 + arbiter = dev_priv->display.hdcp.master; 1351 1343 1352 - if (!comp || !comp->ops) { 1344 + if (!arbiter || !arbiter->ops) { 1353 1345 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 1354 1346 return -EINVAL; 1355 1347 } 1356 1348 1357 - ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data, 1358 - rep_topology, 1359 - rep_send_ack); 1349 + ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev, 1350 + data, 1351 + rep_topology, 1352 + rep_send_ack); 1360 1353 if (ret < 0) 1361 1354 drm_dbg_kms(&dev_priv->drm, 1362 1355 "Verify rep topology failed. %d\n", ret); ··· 1373 1364 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1374 1365 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1375 1366 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1376 - struct i915_hdcp_comp_master *comp; 1367 + struct i915_hdcp_master *arbiter; 1377 1368 int ret; 1378 1369 1379 1370 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 1380 - comp = dev_priv->display.hdcp.master; 1371 + arbiter = dev_priv->display.hdcp.master; 1381 1372 1382 - if (!comp || !comp->ops) { 1373 + if (!arbiter || !arbiter->ops) { 1383 1374 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 1384 1375 return -EINVAL; 1385 1376 } 1386 1377 1387 - ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready); 1378 + ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready); 1388 1379 if (ret < 0) 1389 1380 drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret); 1390 1381 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); ··· 1397 1388 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1398 1389 struct hdcp_port_data *data = &dig_port->hdcp_port_data; 1399 1390 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1400 - struct i915_hdcp_comp_master *comp; 1391 + struct i915_hdcp_master *arbiter; 1401 1392 int ret; 1402 1393 1403 1394 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 1404 - comp = dev_priv->display.hdcp.master; 1395 + arbiter = dev_priv->display.hdcp.master; 1405 1396 1406 - if (!comp || !comp->ops) { 1397 + if (!arbiter || !arbiter->ops) { 1407 1398 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 1408 1399 return -EINVAL; 1409 1400 } 1410 1401 1411 - ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data); 1402 + ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data); 1412 1403 if (ret < 0) 1413 1404 drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n", 1414 1405 ret); ··· 1417 1408 return ret; 1418 1409 } 1419 1410 1420 - static int hdcp2_close_mei_session(struct intel_connector *connector) 1411 + static int hdcp2_close_session(struct intel_connector *connector) 1421 1412 { 1422 1413 struct intel_digital_port *dig_port = intel_attached_dig_port(connector); 1423 1414 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1424 - struct i915_hdcp_comp_master *comp; 1415 + struct i915_hdcp_master *arbiter; 1425 1416 int ret; 1426 1417 1427 1418 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 1428 - comp = dev_priv->display.hdcp.master; 1419 + arbiter = dev_priv->display.hdcp.master; 1429 1420 1430 - if (!comp || !comp->ops) { 1421 + if (!arbiter || !arbiter->ops) { 1431 1422 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 1432 1423 return -EINVAL; 1433 1424 } 1434 1425 1435 - ret = comp->ops->close_hdcp_session(comp->mei_dev, 1426 + ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev, 1436 1427 &dig_port->hdcp_port_data); 1437 1428 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 1438 1429 ··· 1441 1432 1442 1433 static int hdcp2_deauthenticate_port(struct intel_connector *connector) 1443 1434 { 1444 - return hdcp2_close_mei_session(connector); 1435 + return hdcp2_close_session(connector); 1445 1436 } 1446 1437 1447 1438 /* Authentication flow starts from here */ ··· 2151 2142 2152 2143 drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n"); 2153 2144 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 2154 - dev_priv->display.hdcp.master = (struct i915_hdcp_comp_master *)data; 2155 - dev_priv->display.hdcp.master->mei_dev = mei_kdev; 2145 + dev_priv->display.hdcp.master = (struct i915_hdcp_master *)data; 2146 + dev_priv->display.hdcp.master->hdcp_dev = mei_kdev; 2156 2147 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 2157 2148 2158 2149 return 0; ··· 2169 2160 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 2170 2161 } 2171 2162 2172 - static const struct component_ops i915_hdcp_component_ops = { 2163 + static const struct component_ops i915_hdcp_ops = { 2173 2164 .bind = i915_hdcp_component_bind, 2174 2165 .unbind = i915_hdcp_component_unbind, 2175 2166 }; 2176 2167 2177 - static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port) 2168 + static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port) 2178 2169 { 2179 2170 switch (port) { 2180 2171 case PORT_A: 2181 - return MEI_DDI_A; 2172 + return HDCP_DDI_A; 2182 2173 case PORT_B ... PORT_F: 2183 - return (enum mei_fw_ddi)port; 2174 + return (enum hdcp_ddi)port; 2184 2175 default: 2185 - return MEI_DDI_INVALID_PORT; 2176 + return HDCP_DDI_INVALID_PORT; 2186 2177 } 2187 2178 } 2188 2179 2189 - static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder) 2180 + static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder) 2190 2181 { 2191 2182 switch (cpu_transcoder) { 2192 2183 case TRANSCODER_A ... TRANSCODER_D: 2193 - return (enum mei_fw_tc)(cpu_transcoder | 0x10); 2184 + return (enum hdcp_transcoder)(cpu_transcoder | 0x10); 2194 2185 default: /* eDP, DSI TRANSCODERS are non HDCP capable */ 2195 - return MEI_INVALID_TRANSCODER; 2186 + return HDCP_INVALID_TRANSCODER; 2196 2187 } 2197 2188 } 2198 2189 ··· 2206 2197 enum port port = dig_port->base.port; 2207 2198 2208 2199 if (DISPLAY_VER(dev_priv) < 12) 2209 - data->fw_ddi = intel_get_mei_fw_ddi_index(port); 2200 + data->hdcp_ddi = intel_get_hdcp_ddi_index(port); 2210 2201 else 2211 2202 /* 2212 - * As per ME FW API expectation, for GEN 12+, fw_ddi is filled 2203 + * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled 2213 2204 * with zero(INVALID PORT index). 2214 2205 */ 2215 - data->fw_ddi = MEI_DDI_INVALID_PORT; 2206 + data->hdcp_ddi = HDCP_DDI_INVALID_PORT; 2216 2207 2217 2208 /* 2218 - * As associated transcoder is set and modified at modeset, here fw_tc 2209 + * As associated transcoder is set and modified at modeset, here hdcp_transcoder 2219 2210 * is initialized to zero (invalid transcoder index). This will be 2220 2211 * retained for <Gen12 forever. 2221 2212 */ 2222 - data->fw_tc = MEI_INVALID_TRANSCODER; 2213 + data->hdcp_transcoder = HDCP_INVALID_TRANSCODER; 2223 2214 2224 2215 data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; 2225 2216 data->protocol = (u8)shim->protocol; ··· 2241 2232 2242 2233 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv) 2243 2234 { 2235 + if (intel_hdcp_gsc_cs_required(dev_priv)) 2236 + return true; 2237 + 2244 2238 if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) 2245 2239 return false; 2246 2240 ··· 2265 2253 2266 2254 dev_priv->display.hdcp.comp_added = true; 2267 2255 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 2268 - ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops, 2269 - I915_COMPONENT_HDCP); 2256 + if (intel_hdcp_gsc_cs_required(dev_priv)) 2257 + ret = intel_hdcp_gsc_init(dev_priv); 2258 + else 2259 + ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_ops, 2260 + I915_COMPONENT_HDCP); 2261 + 2270 2262 if (ret < 0) { 2271 - drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n", 2263 + drm_dbg_kms(&dev_priv->drm, "Failed at fw component add(%d)\n", 2272 2264 ret); 2273 2265 mutex_lock(&dev_priv->display.hdcp.comp_mutex); 2274 2266 dev_priv->display.hdcp.comp_added = false; ··· 2363 2347 } 2364 2348 2365 2349 if (DISPLAY_VER(dev_priv) >= 12) 2366 - dig_port->hdcp_port_data.fw_tc = intel_get_mei_fw_tc(hdcp->cpu_transcoder); 2350 + dig_port->hdcp_port_data.hdcp_transcoder = 2351 + intel_get_hdcp_transcoder(hdcp->cpu_transcoder); 2367 2352 2368 2353 /* 2369 2354 * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup ··· 2499 2482 dev_priv->display.hdcp.comp_added = false; 2500 2483 mutex_unlock(&dev_priv->display.hdcp.comp_mutex); 2501 2484 2502 - component_del(dev_priv->drm.dev, &i915_hdcp_component_ops); 2485 + if (intel_hdcp_gsc_cs_required(dev_priv)) 2486 + intel_hdcp_gsc_fini(dev_priv); 2487 + else 2488 + component_del(dev_priv->drm.dev, &i915_hdcp_ops); 2503 2489 } 2504 2490 2505 2491 void intel_hdcp_cleanup(struct intel_connector *connector)
+831
drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2023, Intel Corporation. 4 + */ 5 + 6 + #include <drm/i915_hdcp_interface.h> 7 + 8 + #include "display/intel_hdcp_gsc.h" 9 + #include "gem/i915_gem_region.h" 10 + #include "gt/uc/intel_gsc_uc_heci_cmd_submit.h" 11 + #include "i915_drv.h" 12 + #include "i915_utils.h" 13 + 14 + bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915) 15 + { 16 + return DISPLAY_VER(i915) >= 14; 17 + } 18 + 19 + static int 20 + gsc_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data, 21 + struct hdcp2_ake_init *ake_data) 22 + { 23 + struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } }; 24 + struct wired_cmd_initiate_hdcp2_session_out 25 + session_init_out = { { 0 } }; 26 + struct drm_i915_private *i915; 27 + ssize_t byte; 28 + 29 + if (!dev || !data || !ake_data) 30 + return -EINVAL; 31 + 32 + i915 = kdev_to_i915(dev); 33 + if (!i915) { 34 + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); 35 + return -ENODEV; 36 + } 37 + 38 + session_init_in.header.api_version = HDCP_API_VERSION; 39 + session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION; 40 + session_init_in.header.status = FW_HDCP_STATUS_SUCCESS; 41 + session_init_in.header.buffer_len = 42 + WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN; 43 + 44 + session_init_in.port.integrated_port_type = data->port_type; 45 + session_init_in.port.physical_port = (u8)data->hdcp_ddi; 46 + session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 47 + session_init_in.protocol = data->protocol; 48 + 49 + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_init_in, 50 + sizeof(session_init_in), 51 + (u8 *)&session_init_out, 52 + sizeof(session_init_out)); 53 + if (byte < 0) { 54 + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); 55 + return byte; 56 + } 57 + 58 + if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { 59 + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", 60 + WIRED_INITIATE_HDCP2_SESSION, 61 + session_init_out.header.status); 62 + return -EIO; 63 + } 64 + 65 + ake_data->msg_id = HDCP_2_2_AKE_INIT; 66 + ake_data->tx_caps = session_init_out.tx_caps; 67 + memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN); 68 + 69 + return 0; 70 + } 71 + 72 + static int 73 + gsc_hdcp_verify_receiver_cert_prepare_km(struct device *dev, 74 + struct hdcp_port_data *data, 75 + struct hdcp2_ake_send_cert *rx_cert, 76 + bool *km_stored, 77 + struct hdcp2_ake_no_stored_km 78 + *ek_pub_km, 79 + size_t *msg_sz) 80 + { 81 + struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } }; 82 + struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } }; 83 + struct drm_i915_private *i915; 84 + ssize_t byte; 85 + 86 + if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz) 87 + return -EINVAL; 88 + 89 + i915 = kdev_to_i915(dev); 90 + if (!i915) { 91 + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); 92 + return -ENODEV; 93 + } 94 + 95 + verify_rxcert_in.header.api_version = HDCP_API_VERSION; 96 + verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT; 97 + verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS; 98 + verify_rxcert_in.header.buffer_len = 99 + WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN; 100 + 101 + verify_rxcert_in.port.integrated_port_type = data->port_type; 102 + verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi; 103 + verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 104 + 105 + verify_rxcert_in.cert_rx = rx_cert->cert_rx; 106 + memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN); 107 + memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN); 108 + 109 + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_rxcert_in, 110 + sizeof(verify_rxcert_in), 111 + (u8 *)&verify_rxcert_out, 112 + sizeof(verify_rxcert_out)); 113 + if (byte < 0) { 114 + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed: %zd\n", byte); 115 + return byte; 116 + } 117 + 118 + if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) { 119 + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", 120 + WIRED_VERIFY_RECEIVER_CERT, 121 + verify_rxcert_out.header.status); 122 + return -EIO; 123 + } 124 + 125 + *km_stored = !!verify_rxcert_out.km_stored; 126 + if (verify_rxcert_out.km_stored) { 127 + ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM; 128 + *msg_sz = sizeof(struct hdcp2_ake_stored_km); 129 + } else { 130 + ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM; 131 + *msg_sz = sizeof(struct hdcp2_ake_no_stored_km); 132 + } 133 + 134 + memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff, 135 + sizeof(verify_rxcert_out.ekm_buff)); 136 + 137 + return 0; 138 + } 139 + 140 + static int 141 + gsc_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data, 142 + struct hdcp2_ake_send_hprime *rx_hprime) 143 + { 144 + struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } }; 145 + struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } }; 146 + struct drm_i915_private *i915; 147 + ssize_t byte; 148 + 149 + if (!dev || !data || !rx_hprime) 150 + return -EINVAL; 151 + 152 + i915 = kdev_to_i915(dev); 153 + if (!i915) { 154 + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); 155 + return -ENODEV; 156 + } 157 + 158 + send_hprime_in.header.api_version = HDCP_API_VERSION; 159 + send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME; 160 + send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS; 161 + send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN; 162 + 163 + send_hprime_in.port.integrated_port_type = data->port_type; 164 + send_hprime_in.port.physical_port = (u8)data->hdcp_ddi; 165 + send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 166 + 167 + memcpy(send_hprime_in.h_prime, rx_hprime->h_prime, 168 + HDCP_2_2_H_PRIME_LEN); 169 + 170 + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&send_hprime_in, 171 + sizeof(send_hprime_in), 172 + (u8 *)&send_hprime_out, 173 + sizeof(send_hprime_out)); 174 + if (byte < 0) { 175 + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); 176 + return byte; 177 + } 178 + 179 + if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { 180 + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. Status: 0x%X\n", 181 + WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status); 182 + return -EIO; 183 + } 184 + 185 + return 0; 186 + } 187 + 188 + static int 189 + gsc_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data, 190 + struct hdcp2_ake_send_pairing_info *pairing_info) 191 + { 192 + struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } }; 193 + struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } }; 194 + struct drm_i915_private *i915; 195 + ssize_t byte; 196 + 197 + if (!dev || !data || !pairing_info) 198 + return -EINVAL; 199 + 200 + i915 = kdev_to_i915(dev); 201 + if (!i915) { 202 + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); 203 + return -ENODEV; 204 + } 205 + 206 + pairing_info_in.header.api_version = HDCP_API_VERSION; 207 + pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO; 208 + pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS; 209 + pairing_info_in.header.buffer_len = 210 + WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN; 211 + 212 + pairing_info_in.port.integrated_port_type = data->port_type; 213 + pairing_info_in.port.physical_port = (u8)data->hdcp_ddi; 214 + pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 215 + 216 + memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km, 217 + HDCP_2_2_E_KH_KM_LEN); 218 + 219 + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&pairing_info_in, 220 + sizeof(pairing_info_in), 221 + (u8 *)&pairing_info_out, 222 + sizeof(pairing_info_out)); 223 + if (byte < 0) { 224 + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); 225 + return byte; 226 + } 227 + 228 + if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) { 229 + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. Status: 0x%X\n", 230 + WIRED_AKE_SEND_PAIRING_INFO, 231 + pairing_info_out.header.status); 232 + return -EIO; 233 + } 234 + 235 + return 0; 236 + } 237 + 238 + static int 239 + gsc_hdcp_initiate_locality_check(struct device *dev, 240 + struct hdcp_port_data *data, 241 + struct hdcp2_lc_init *lc_init_data) 242 + { 243 + struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } }; 244 + struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } }; 245 + struct drm_i915_private *i915; 246 + ssize_t byte; 247 + 248 + if (!dev || !data || !lc_init_data) 249 + return -EINVAL; 250 + 251 + i915 = kdev_to_i915(dev); 252 + if (!i915) { 253 + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); 254 + return -ENODEV; 255 + } 256 + 257 + lc_init_in.header.api_version = HDCP_API_VERSION; 258 + lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK; 259 + lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS; 260 + lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN; 261 + 262 + lc_init_in.port.integrated_port_type = data->port_type; 263 + lc_init_in.port.physical_port = (u8)data->hdcp_ddi; 264 + lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 265 + 266 + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&lc_init_in, sizeof(lc_init_in), 267 + (u8 *)&lc_init_out, sizeof(lc_init_out)); 268 + if (byte < 0) { 269 + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); 270 + return byte; 271 + } 272 + 273 + if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { 274 + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X Failed. status: 0x%X\n", 275 + WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status); 276 + return -EIO; 277 + } 278 + 279 + lc_init_data->msg_id = HDCP_2_2_LC_INIT; 280 + memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN); 281 + 282 + return 0; 283 + } 284 + 285 + static int 286 + gsc_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data, 287 + struct hdcp2_lc_send_lprime *rx_lprime) 288 + { 289 + struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } }; 290 + struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } }; 291 + struct drm_i915_private *i915; 292 + ssize_t byte; 293 + 294 + if (!dev || !data || !rx_lprime) 295 + return -EINVAL; 296 + 297 + i915 = kdev_to_i915(dev); 298 + if (!i915) { 299 + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); 300 + return -ENODEV; 301 + } 302 + 303 + verify_lprime_in.header.api_version = HDCP_API_VERSION; 304 + verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY; 305 + verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS; 306 + verify_lprime_in.header.buffer_len = 307 + WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN; 308 + 309 + verify_lprime_in.port.integrated_port_type = data->port_type; 310 + verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi; 311 + verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 312 + 313 + memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime, 314 + HDCP_2_2_L_PRIME_LEN); 315 + 316 + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_lprime_in, 317 + sizeof(verify_lprime_in), 318 + (u8 *)&verify_lprime_out, 319 + sizeof(verify_lprime_out)); 320 + if (byte < 0) { 321 + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); 322 + return byte; 323 + } 324 + 325 + if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { 326 + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", 327 + WIRED_VALIDATE_LOCALITY, 328 + verify_lprime_out.header.status); 329 + return -EIO; 330 + } 331 + 332 + return 0; 333 + } 334 + 335 + static int gsc_hdcp_get_session_key(struct device *dev, 336 + struct hdcp_port_data *data, 337 + struct hdcp2_ske_send_eks *ske_data) 338 + { 339 + struct wired_cmd_get_session_key_in get_skey_in = { { 0 } }; 340 + struct wired_cmd_get_session_key_out get_skey_out = { { 0 } }; 341 + struct drm_i915_private *i915; 342 + ssize_t byte; 343 + 344 + if (!dev || !data || !ske_data) 345 + return -EINVAL; 346 + 347 + i915 = kdev_to_i915(dev); 348 + if (!i915) { 349 + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); 350 + return -ENODEV; 351 + } 352 + 353 + get_skey_in.header.api_version = HDCP_API_VERSION; 354 + get_skey_in.header.command_id = WIRED_GET_SESSION_KEY; 355 + get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS; 356 + get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN; 357 + 358 + get_skey_in.port.integrated_port_type = data->port_type; 359 + get_skey_in.port.physical_port = (u8)data->hdcp_ddi; 360 + get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 361 + 362 + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&get_skey_in, sizeof(get_skey_in), 363 + (u8 *)&get_skey_out, sizeof(get_skey_out)); 364 + if (byte < 0) { 365 + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); 366 + return byte; 367 + } 368 + 369 + if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) { 370 + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", 371 + WIRED_GET_SESSION_KEY, get_skey_out.header.status); 372 + return -EIO; 373 + } 374 + 375 + ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS; 376 + memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks, 377 + HDCP_2_2_E_DKEY_KS_LEN); 378 + memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN); 379 + 380 + return 0; 381 + } 382 + 383 + static int 384 + gsc_hdcp_repeater_check_flow_prepare_ack(struct device *dev, 385 + struct hdcp_port_data *data, 386 + struct hdcp2_rep_send_receiverid_list 387 + *rep_topology, 388 + struct hdcp2_rep_send_ack 389 + *rep_send_ack) 390 + { 391 + struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } }; 392 + struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } }; 393 + struct drm_i915_private *i915; 394 + ssize_t byte; 395 + 396 + if (!dev || !rep_topology || !rep_send_ack || !data) 397 + return -EINVAL; 398 + 399 + i915 = kdev_to_i915(dev); 400 + if (!i915) { 401 + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); 402 + return -ENODEV; 403 + } 404 + 405 + verify_repeater_in.header.api_version = HDCP_API_VERSION; 406 + verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER; 407 + verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS; 408 + verify_repeater_in.header.buffer_len = 409 + WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN; 410 + 411 + verify_repeater_in.port.integrated_port_type = data->port_type; 412 + verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi; 413 + verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 414 + 415 + memcpy(verify_repeater_in.rx_info, rep_topology->rx_info, 416 + HDCP_2_2_RXINFO_LEN); 417 + memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v, 418 + HDCP_2_2_SEQ_NUM_LEN); 419 + memcpy(verify_repeater_in.v_prime, rep_topology->v_prime, 420 + HDCP_2_2_V_PRIME_HALF_LEN); 421 + memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids, 422 + HDCP_2_2_RECEIVER_IDS_MAX_LEN); 423 + 424 + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&verify_repeater_in, 425 + sizeof(verify_repeater_in), 426 + (u8 *)&verify_repeater_out, 427 + sizeof(verify_repeater_out)); 428 + if (byte < 0) { 429 + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); 430 + return byte; 431 + } 432 + 433 + if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) { 434 + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", 435 + WIRED_VERIFY_REPEATER, 436 + verify_repeater_out.header.status); 437 + return -EIO; 438 + } 439 + 440 + memcpy(rep_send_ack->v, verify_repeater_out.v, 441 + HDCP_2_2_V_PRIME_HALF_LEN); 442 + rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK; 443 + 444 + return 0; 445 + } 446 + 447 + static int gsc_hdcp_verify_mprime(struct device *dev, 448 + struct hdcp_port_data *data, 449 + struct hdcp2_rep_stream_ready *stream_ready) 450 + { 451 + struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in; 452 + struct wired_cmd_repeater_auth_stream_req_out 453 + verify_mprime_out = { { 0 } }; 454 + struct drm_i915_private *i915; 455 + ssize_t byte; 456 + size_t cmd_size; 457 + 458 + if (!dev || !stream_ready || !data) 459 + return -EINVAL; 460 + 461 + i915 = kdev_to_i915(dev); 462 + if (!i915) { 463 + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); 464 + return -ENODEV; 465 + } 466 + 467 + cmd_size = struct_size(verify_mprime_in, streams, data->k); 468 + if (cmd_size == SIZE_MAX) 469 + return -EINVAL; 470 + 471 + verify_mprime_in = kzalloc(cmd_size, GFP_KERNEL); 472 + if (!verify_mprime_in) 473 + return -ENOMEM; 474 + 475 + verify_mprime_in->header.api_version = HDCP_API_VERSION; 476 + verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ; 477 + verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS; 478 + verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header); 479 + 480 + verify_mprime_in->port.integrated_port_type = data->port_type; 481 + verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi; 482 + verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder; 483 + 484 + memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN); 485 + drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m); 486 + 487 + memcpy(verify_mprime_in->streams, data->streams, 488 + array_size(data->k, sizeof(*data->streams))); 489 + 490 + verify_mprime_in->k = cpu_to_be16(data->k); 491 + 492 + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)verify_mprime_in, cmd_size, 493 + (u8 *)&verify_mprime_out, 494 + sizeof(verify_mprime_out)); 495 + kfree(verify_mprime_in); 496 + if (byte < 0) { 497 + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); 498 + return byte; 499 + } 500 + 501 + if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { 502 + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", 503 + WIRED_REPEATER_AUTH_STREAM_REQ, 504 + verify_mprime_out.header.status); 505 + return -EIO; 506 + } 507 + 508 + return 0; 509 + } 510 + 511 + static int gsc_hdcp_enable_authentication(struct device *dev, 512 + struct hdcp_port_data *data) 513 + { 514 + struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } }; 515 + struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } }; 516 + struct drm_i915_private *i915; 517 + ssize_t byte; 518 + 519 + if (!dev || !data) 520 + return -EINVAL; 521 + 522 + i915 = kdev_to_i915(dev); 523 + if (!i915) { 524 + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); 525 + return -ENODEV; 526 + } 527 + 528 + enable_auth_in.header.api_version = HDCP_API_VERSION; 529 + enable_auth_in.header.command_id = WIRED_ENABLE_AUTH; 530 + enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS; 531 + enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN; 532 + 533 + enable_auth_in.port.integrated_port_type = data->port_type; 534 + enable_auth_in.port.physical_port = (u8)data->hdcp_ddi; 535 + enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 536 + enable_auth_in.stream_type = data->streams[0].stream_type; 537 + 538 + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&enable_auth_in, 539 + sizeof(enable_auth_in), 540 + (u8 *)&enable_auth_out, 541 + sizeof(enable_auth_out)); 542 + if (byte < 0) { 543 + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); 544 + return byte; 545 + } 546 + 547 + if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) { 548 + drm_dbg_kms(&i915->drm, "FW cmd 0x%08X failed. status: 0x%X\n", 549 + WIRED_ENABLE_AUTH, enable_auth_out.header.status); 550 + return -EIO; 551 + } 552 + 553 + return 0; 554 + } 555 + 556 + static int 557 + gsc_hdcp_close_session(struct device *dev, struct hdcp_port_data *data) 558 + { 559 + struct wired_cmd_close_session_in session_close_in = { { 0 } }; 560 + struct wired_cmd_close_session_out session_close_out = { { 0 } }; 561 + struct drm_i915_private *i915; 562 + ssize_t byte; 563 + 564 + if (!dev || !data) 565 + return -EINVAL; 566 + 567 + i915 = kdev_to_i915(dev); 568 + if (!i915) { 569 + dev_err(dev, "DRM not initialized, aborting HDCP.\n"); 570 + return -ENODEV; 571 + } 572 + 573 + session_close_in.header.api_version = HDCP_API_VERSION; 574 + session_close_in.header.command_id = WIRED_CLOSE_SESSION; 575 + session_close_in.header.status = FW_HDCP_STATUS_SUCCESS; 576 + session_close_in.header.buffer_len = 577 + WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN; 578 + 579 + session_close_in.port.integrated_port_type = data->port_type; 580 + session_close_in.port.physical_port = (u8)data->hdcp_ddi; 581 + session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 582 + 583 + byte = intel_hdcp_gsc_msg_send(i915, (u8 *)&session_close_in, 584 + sizeof(session_close_in), 585 + (u8 *)&session_close_out, 586 + sizeof(session_close_out)); 587 + if (byte < 0) { 588 + drm_dbg_kms(&i915->drm, "intel_hdcp_gsc_msg_send failed. %zd\n", byte); 589 + return byte; 590 + } 591 + 592 + if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) { 593 + drm_dbg_kms(&i915->drm, "Session Close Failed. status: 0x%X\n", 594 + session_close_out.header.status); 595 + return -EIO; 596 + } 597 + 598 + return 0; 599 + } 600 + 601 + static const struct i915_hdcp_ops gsc_hdcp_ops = { 602 + .initiate_hdcp2_session = gsc_hdcp_initiate_session, 603 + .verify_receiver_cert_prepare_km = 604 + gsc_hdcp_verify_receiver_cert_prepare_km, 605 + .verify_hprime = gsc_hdcp_verify_hprime, 606 + .store_pairing_info = gsc_hdcp_store_pairing_info, 607 + .initiate_locality_check = gsc_hdcp_initiate_locality_check, 608 + .verify_lprime = gsc_hdcp_verify_lprime, 609 + .get_session_key = gsc_hdcp_get_session_key, 610 + .repeater_check_flow_prepare_ack = 611 + gsc_hdcp_repeater_check_flow_prepare_ack, 612 + .verify_mprime = gsc_hdcp_verify_mprime, 613 + .enable_hdcp_authentication = gsc_hdcp_enable_authentication, 614 + .close_hdcp_session = gsc_hdcp_close_session, 615 + }; 616 + 617 + /*This function helps allocate memory for the command that we will send to gsc cs */ 618 + static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915, 619 + struct intel_hdcp_gsc_message *hdcp_message) 620 + { 621 + struct intel_gt *gt = i915->media_gt; 622 + struct drm_i915_gem_object *obj = NULL; 623 + struct i915_vma *vma = NULL; 624 + void *cmd; 625 + int err; 626 + 627 + /* allocate object of one page for HDCP command memory and store it */ 628 + obj = i915_gem_object_create_shmem(i915, PAGE_SIZE); 629 + 630 + if (IS_ERR(obj)) { 631 + drm_err(&i915->drm, "Failed to allocate HDCP streaming command!\n"); 632 + return PTR_ERR(obj); 633 + } 634 + 635 + cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true)); 636 + if (IS_ERR(cmd)) { 637 + drm_err(&i915->drm, "Failed to map gsc message page!\n"); 638 + err = PTR_ERR(cmd); 639 + goto out_unpin; 640 + } 641 + 642 + vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL); 643 + if (IS_ERR(vma)) { 644 + err = PTR_ERR(vma); 645 + goto out_unmap; 646 + } 647 + 648 + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); 649 + if (err) 650 + goto out_unmap; 651 + 652 + memset(cmd, 0, obj->base.size); 653 + 654 + hdcp_message->hdcp_cmd = cmd; 655 + hdcp_message->vma = vma; 656 + 657 + return 0; 658 + 659 + out_unmap: 660 + i915_gem_object_unpin_map(obj); 661 + out_unpin: 662 + i915_gem_object_put(obj); 663 + return err; 664 + } 665 + 666 + static int intel_hdcp_gsc_hdcp2_init(struct drm_i915_private *i915) 667 + { 668 + struct intel_hdcp_gsc_message *hdcp_message; 669 + int ret; 670 + 671 + hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL); 672 + 673 + if (!hdcp_message) 674 + return -ENOMEM; 675 + 676 + /* 677 + * NOTE: No need to lock the comp mutex here as it is already 678 + * going to be taken before this function called 679 + */ 680 + i915->display.hdcp.hdcp_message = hdcp_message; 681 + ret = intel_hdcp_gsc_initialize_message(i915, hdcp_message); 682 + 683 + if (ret) 684 + drm_err(&i915->drm, "Could not initialize hdcp_message\n"); 685 + 686 + return ret; 687 + } 688 + 689 + static void intel_hdcp_gsc_free_message(struct drm_i915_private *i915) 690 + { 691 + struct intel_hdcp_gsc_message *hdcp_message = 692 + i915->display.hdcp.hdcp_message; 693 + 694 + i915_vma_unpin_and_release(&hdcp_message->vma, I915_VMA_RELEASE_MAP); 695 + kfree(hdcp_message); 696 + } 697 + 698 + int intel_hdcp_gsc_init(struct drm_i915_private *i915) 699 + { 700 + struct i915_hdcp_master *data; 701 + int ret; 702 + 703 + data = kzalloc(sizeof(struct i915_hdcp_master), GFP_KERNEL); 704 + if (!data) 705 + return -ENOMEM; 706 + 707 + mutex_lock(&i915->display.hdcp.comp_mutex); 708 + i915->display.hdcp.master = data; 709 + i915->display.hdcp.master->hdcp_dev = i915->drm.dev; 710 + i915->display.hdcp.master->ops = &gsc_hdcp_ops; 711 + ret = intel_hdcp_gsc_hdcp2_init(i915); 712 + mutex_unlock(&i915->display.hdcp.comp_mutex); 713 + 714 + return ret; 715 + } 716 + 717 + void intel_hdcp_gsc_fini(struct drm_i915_private *i915) 718 + { 719 + intel_hdcp_gsc_free_message(i915); 720 + kfree(i915->display.hdcp.master); 721 + } 722 + 723 + static int intel_gsc_send_sync(struct drm_i915_private *i915, 724 + struct intel_gsc_mtl_header *header, u64 addr, 725 + size_t msg_out_len) 726 + { 727 + struct intel_gt *gt = i915->media_gt; 728 + int ret; 729 + 730 + header->flags = 0; 731 + ret = intel_gsc_uc_heci_cmd_submit_packet(&gt->uc.gsc, addr, 732 + header->message_size, 733 + addr, 734 + msg_out_len + sizeof(*header)); 735 + if (ret) { 736 + drm_err(&i915->drm, "failed to send gsc HDCP msg (%d)\n", ret); 737 + return ret; 738 + } 739 + 740 + /* 741 + * Checking validity marker for memory sanity 742 + */ 743 + if (header->validity_marker != GSC_HECI_VALIDITY_MARKER) { 744 + drm_err(&i915->drm, "invalid validity marker\n"); 745 + return -EINVAL; 746 + } 747 + 748 + if (header->status != 0) { 749 + drm_err(&i915->drm, "header status indicates error %d\n", 750 + header->status); 751 + return -EINVAL; 752 + } 753 + 754 + if (header->flags & GSC_OUTFLAG_MSG_PENDING) 755 + return -EAGAIN; 756 + 757 + return 0; 758 + } 759 + 760 + /* 761 + * This function can now be used for sending requests and will also handle 762 + * receipt of reply messages hence no different function of message retrieval 763 + * is required. We will initialize intel_hdcp_gsc_message structure then add 764 + * gsc cs memory header as stated in specs after which the normal HDCP payload 765 + * will follow 766 + */ 767 + ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, 768 + size_t msg_in_len, u8 *msg_out, 769 + size_t msg_out_len) 770 + { 771 + struct intel_gt *gt = i915->media_gt; 772 + struct intel_gsc_mtl_header *header; 773 + const size_t max_msg_size = PAGE_SIZE - sizeof(*header); 774 + struct intel_hdcp_gsc_message *hdcp_message; 775 + u64 addr, host_session_id; 776 + u32 reply_size, msg_size; 777 + int ret, tries = 0; 778 + 779 + if (!intel_uc_uses_gsc_uc(&gt->uc)) 780 + return -ENODEV; 781 + 782 + if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) 783 + return -ENOSPC; 784 + 785 + hdcp_message = i915->display.hdcp.hdcp_message; 786 + header = hdcp_message->hdcp_cmd; 787 + addr = i915_ggtt_offset(hdcp_message->vma); 788 + 789 + msg_size = msg_in_len + sizeof(*header); 790 + memset(header, 0, msg_size); 791 + get_random_bytes(&host_session_id, sizeof(u64)); 792 + intel_gsc_uc_heci_cmd_emit_mtl_header(header, HECI_MEADDRESS_HDCP, 793 + msg_size, host_session_id); 794 + memcpy(hdcp_message->hdcp_cmd + sizeof(*header), msg_in, msg_in_len); 795 + 796 + /* 797 + * Keep sending request in case the pending bit is set no need to add 798 + * message handle as we are using same address hence loc. of header is 799 + * same and it will contain the message handle. we will send the message 800 + * 20 times each message 50 ms apart 801 + */ 802 + do { 803 + ret = intel_gsc_send_sync(i915, header, addr, msg_out_len); 804 + 805 + /* Only try again if gsc says so */ 806 + if (ret != -EAGAIN) 807 + break; 808 + 809 + msleep(50); 810 + 811 + } while (++tries < 20); 812 + 813 + if (ret) 814 + goto err; 815 + 816 + /* we use the same mem for the reply, so header is in the same loc */ 817 + reply_size = header->message_size - sizeof(*header); 818 + if (reply_size > msg_out_len) { 819 + drm_warn(&i915->drm, "caller with insufficient HDCP reply size %u (%d)\n", 820 + reply_size, (u32)msg_out_len); 821 + reply_size = msg_out_len; 822 + } else if (reply_size != msg_out_len) { 823 + drm_dbg_kms(&i915->drm, "caller unexpected HCDP reply size %u (%d)\n", 824 + reply_size, (u32)msg_out_len); 825 + } 826 + 827 + memcpy(msg_out, hdcp_message->hdcp_cmd + sizeof(*header), msg_out_len); 828 + 829 + err: 830 + return ret; 831 + }
+26
drivers/gpu/drm/i915/display/intel_hdcp_gsc.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_HDCP_GSC_H__ 7 + #define __INTEL_HDCP_GSC_H__ 8 + 9 + #include <linux/err.h> 10 + #include <linux/types.h> 11 + 12 + struct drm_i915_private; 13 + 14 + struct intel_hdcp_gsc_message { 15 + struct i915_vma *vma; 16 + void *hdcp_cmd; 17 + }; 18 + 19 + bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915); 20 + ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, 21 + size_t msg_in_len, u8 *msg_out, 22 + size_t msg_out_len); 23 + int intel_hdcp_gsc_init(struct drm_i915_private *i915); 24 + void intel_hdcp_gsc_fini(struct drm_i915_private *i915); 25 + 26 + #endif /* __INTEL_HDCP_GCS_H__ */
+9
drivers/gpu/drm/i915/display/intel_hotplug.c
··· 389 389 390 390 spin_unlock_irq(&dev_priv->irq_lock); 391 391 392 + /* Skip calling encode hotplug handlers if ignore long HPD set*/ 393 + if (dev_priv->display.hotplug.ignore_long_hpd) { 394 + drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); 395 + mutex_unlock(&dev_priv->drm.mode_config.mutex); 396 + return; 397 + } 398 + 392 399 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 393 400 for_each_intel_connector_iter(connector, &conn_iter) { 394 401 enum hpd_pin pin; ··· 947 940 i915, &i915_hpd_storm_ctl_fops); 948 941 debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root, 949 942 i915, &i915_hpd_short_storm_ctl_fops); 943 + debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root, 944 + &i915->display.hotplug.ignore_long_hpd); 950 945 }
+1
drivers/gpu/drm/i915/display/intel_modeset_setup.c
··· 26 26 #include "intel_fifo_underrun.h" 27 27 #include "intel_modeset_setup.h" 28 28 #include "intel_pch_display.h" 29 + #include "intel_vblank.h" 29 30 #include "intel_wm.h" 30 31 #include "skl_watermark.h" 31 32
+35 -11
drivers/gpu/drm/i915/display/intel_opregion.c
··· 1159 1159 intel_opregion_resume(i915); 1160 1160 } 1161 1161 1162 - void intel_opregion_resume(struct drm_i915_private *i915) 1162 + static void intel_opregion_resume_display(struct drm_i915_private *i915) 1163 1163 { 1164 1164 struct intel_opregion *opregion = &i915->display.opregion; 1165 - 1166 - if (!opregion->header) 1167 - return; 1168 1165 1169 1166 if (opregion->acpi) { 1170 1167 intel_didl_outputs(i915); ··· 1183 1186 1184 1187 /* Some platforms abuse the _DSM to enable MUX */ 1185 1188 intel_dsm_get_bios_data_funcs_supported(i915); 1189 + } 1190 + 1191 + void intel_opregion_resume(struct drm_i915_private *i915) 1192 + { 1193 + struct intel_opregion *opregion = &i915->display.opregion; 1194 + 1195 + if (!opregion->header) 1196 + return; 1197 + 1198 + if (HAS_DISPLAY(i915)) 1199 + intel_opregion_resume_display(i915); 1186 1200 1187 1201 intel_opregion_notify_adapter(i915, PCI_D0); 1202 + } 1203 + 1204 + static void intel_opregion_suspend_display(struct drm_i915_private *i915) 1205 + { 1206 + struct intel_opregion *opregion = &i915->display.opregion; 1207 + 1208 + if (opregion->asle) 1209 + opregion->asle->ardy = ASLE_ARDY_NOT_READY; 1210 + 1211 + cancel_work_sync(&i915->display.opregion.asle_work); 1212 + 1213 + if (opregion->acpi) 1214 + opregion->acpi->drdy = 0; 1188 1215 } 1189 1216 1190 1217 void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) ··· 1220 1199 1221 1200 intel_opregion_notify_adapter(i915, state); 1222 1201 1223 - if (opregion->asle) 1224 - opregion->asle->ardy = ASLE_ARDY_NOT_READY; 1225 - 1226 - cancel_work_sync(&i915->display.opregion.asle_work); 1227 - 1228 - if (opregion->acpi) 1229 - opregion->acpi->drdy = 0; 1202 + if (HAS_DISPLAY(i915)) 1203 + intel_opregion_suspend_display(i915); 1230 1204 } 1231 1205 1232 1206 void intel_opregion_unregister(struct drm_i915_private *i915) ··· 1237 1221 unregister_acpi_notifier(&opregion->acpi_notifier); 1238 1222 opregion->acpi_notifier.notifier_call = NULL; 1239 1223 } 1224 + } 1225 + 1226 + void intel_opregion_cleanup(struct drm_i915_private *i915) 1227 + { 1228 + struct intel_opregion *opregion = &i915->display.opregion; 1229 + 1230 + if (!opregion->header) 1231 + return; 1240 1232 1241 1233 /* just clear all opregion memory pointers now */ 1242 1234 memunmap(opregion->header);
+5
drivers/gpu/drm/i915/display/intel_opregion.h
··· 60 60 #ifdef CONFIG_ACPI 61 61 62 62 int intel_opregion_setup(struct drm_i915_private *dev_priv); 63 + void intel_opregion_cleanup(struct drm_i915_private *i915); 63 64 64 65 void intel_opregion_register(struct drm_i915_private *dev_priv); 65 66 void intel_opregion_unregister(struct drm_i915_private *dev_priv); ··· 84 83 static inline int intel_opregion_setup(struct drm_i915_private *dev_priv) 85 84 { 86 85 return 0; 86 + } 87 + 88 + static inline void intel_opregion_cleanup(struct drm_i915_private *i915) 89 + { 87 90 } 88 91 89 92 static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
+299
drivers/gpu/drm/i915/display/intel_psr.c
··· 2644 2644 break; 2645 2645 } 2646 2646 } 2647 + 2648 + static void 2649 + psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) 2650 + { 2651 + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2652 + const char *status = "unknown"; 2653 + u32 val, status_val; 2654 + 2655 + if (intel_dp->psr.psr2_enabled) { 2656 + static const char * const live_status[] = { 2657 + "IDLE", 2658 + "CAPTURE", 2659 + "CAPTURE_FS", 2660 + "SLEEP", 2661 + "BUFON_FW", 2662 + "ML_UP", 2663 + "SU_STANDBY", 2664 + "FAST_SLEEP", 2665 + "DEEP_SLEEP", 2666 + "BUF_ON", 2667 + "TG_ON" 2668 + }; 2669 + val = intel_de_read(dev_priv, 2670 + EDP_PSR2_STATUS(intel_dp->psr.transcoder)); 2671 + status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); 2672 + if (status_val < ARRAY_SIZE(live_status)) 2673 + status = live_status[status_val]; 2674 + } else { 2675 + static const char * const live_status[] = { 2676 + "IDLE", 2677 + "SRDONACK", 2678 + "SRDENT", 2679 + "BUFOFF", 2680 + "BUFON", 2681 + "AUXACK", 2682 + "SRDOFFACK", 2683 + "SRDENT_ON", 2684 + }; 2685 + val = intel_de_read(dev_priv, 2686 + EDP_PSR_STATUS(intel_dp->psr.transcoder)); 2687 + status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> 2688 + EDP_PSR_STATUS_STATE_SHIFT; 2689 + if (status_val < ARRAY_SIZE(live_status)) 2690 + status = live_status[status_val]; 2691 + } 2692 + 2693 + seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val); 2694 + } 2695 + 2696 + static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) 2697 + { 2698 + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2699 + struct intel_psr *psr = &intel_dp->psr; 2700 + intel_wakeref_t wakeref; 2701 + const char *status; 2702 + bool enabled; 2703 + u32 val; 2704 + 2705 + seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support)); 2706 + if (psr->sink_support) 2707 + seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]); 2708 + seq_puts(m, "\n"); 2709 + 2710 + if (!psr->sink_support) 2711 + return 0; 2712 + 2713 + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2714 + mutex_lock(&psr->lock); 2715 + 2716 + if (psr->enabled) 2717 + status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled"; 2718 + else 2719 + status = "disabled"; 2720 + seq_printf(m, "PSR mode: %s\n", status); 2721 + 2722 + if (!psr->enabled) { 2723 + seq_printf(m, "PSR sink not reliable: %s\n", 2724 + str_yes_no(psr->sink_not_reliable)); 2725 + 2726 + goto unlock; 2727 + } 2728 + 2729 + if (psr->psr2_enabled) { 2730 + val = intel_de_read(dev_priv, 2731 + EDP_PSR2_CTL(intel_dp->psr.transcoder)); 2732 + enabled = val & EDP_PSR2_ENABLE; 2733 + } else { 2734 + val = intel_de_read(dev_priv, 2735 + EDP_PSR_CTL(intel_dp->psr.transcoder)); 2736 + enabled = val & EDP_PSR_ENABLE; 2737 + } 2738 + seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", 2739 + str_enabled_disabled(enabled), val); 2740 + psr_source_status(intel_dp, m); 2741 + seq_printf(m, "Busy frontbuffer bits: 0x%08x\n", 2742 + psr->busy_frontbuffer_bits); 2743 + 2744 + /* 2745 + * SKL+ Perf counter is reset to 0 everytime DC state is entered 2746 + */ 2747 + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2748 + val = intel_de_read(dev_priv, 2749 + EDP_PSR_PERF_CNT(intel_dp->psr.transcoder)); 2750 + val &= EDP_PSR_PERF_CNT_MASK; 2751 + seq_printf(m, "Performance counter: %u\n", val); 2752 + } 2753 + 2754 + if (psr->debug & I915_PSR_DEBUG_IRQ) { 2755 + seq_printf(m, "Last attempted entry at: %lld\n", 2756 + psr->last_entry_attempt); 2757 + seq_printf(m, "Last exit at: %lld\n", psr->last_exit); 2758 + } 2759 + 2760 + if (psr->psr2_enabled) { 2761 + u32 su_frames_val[3]; 2762 + int frame; 2763 + 2764 + /* 2765 + * Reading all 3 registers before hand to minimize crossing a 2766 + * frame boundary between register reads 2767 + */ 2768 + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { 2769 + val = intel_de_read(dev_priv, 2770 + PSR2_SU_STATUS(intel_dp->psr.transcoder, frame)); 2771 + su_frames_val[frame / 3] = val; 2772 + } 2773 + 2774 + seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); 2775 + 2776 + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) { 2777 + u32 su_blocks; 2778 + 2779 + su_blocks = su_frames_val[frame / 3] & 2780 + PSR2_SU_STATUS_MASK(frame); 2781 + su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame); 2782 + seq_printf(m, "%d\t%d\n", frame, su_blocks); 2783 + } 2784 + 2785 + seq_printf(m, "PSR2 selective fetch: %s\n", 2786 + str_enabled_disabled(psr->psr2_sel_fetch_enabled)); 2787 + } 2788 + 2789 + unlock: 2790 + mutex_unlock(&psr->lock); 2791 + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2792 + 2793 + return 0; 2794 + } 2795 + 2796 + static int i915_edp_psr_status_show(struct seq_file *m, void *data) 2797 + { 2798 + struct drm_i915_private *dev_priv = m->private; 2799 + struct intel_dp *intel_dp = NULL; 2800 + struct intel_encoder *encoder; 2801 + 2802 + if (!HAS_PSR(dev_priv)) 2803 + return -ENODEV; 2804 + 2805 + /* Find the first EDP which supports PSR */ 2806 + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 2807 + intel_dp = enc_to_intel_dp(encoder); 2808 + break; 2809 + } 2810 + 2811 + if (!intel_dp) 2812 + return -ENODEV; 2813 + 2814 + return intel_psr_status(m, intel_dp); 2815 + } 2816 + DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status); 2817 + 2818 + static int 2819 + i915_edp_psr_debug_set(void *data, u64 val) 2820 + { 2821 + struct drm_i915_private *dev_priv = data; 2822 + struct intel_encoder *encoder; 2823 + intel_wakeref_t wakeref; 2824 + int ret = -ENODEV; 2825 + 2826 + if (!HAS_PSR(dev_priv)) 2827 + return ret; 2828 + 2829 + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 2830 + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2831 + 2832 + drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val); 2833 + 2834 + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2835 + 2836 + // TODO: split to each transcoder's PSR debug state 2837 + ret = intel_psr_debug_set(intel_dp, val); 2838 + 2839 + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2840 + } 2841 + 2842 + return ret; 2843 + } 2844 + 2845 + static int 2846 + i915_edp_psr_debug_get(void *data, u64 *val) 2847 + { 2848 + struct drm_i915_private *dev_priv = data; 2849 + struct intel_encoder *encoder; 2850 + 2851 + if (!HAS_PSR(dev_priv)) 2852 + return -ENODEV; 2853 + 2854 + for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 2855 + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2856 + 2857 + // TODO: split to each transcoder's PSR debug state 2858 + *val = READ_ONCE(intel_dp->psr.debug); 2859 + return 0; 2860 + } 2861 + 2862 + return -ENODEV; 2863 + } 2864 + 2865 + DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, 2866 + i915_edp_psr_debug_get, i915_edp_psr_debug_set, 2867 + "%llu\n"); 2868 + 2869 + void intel_psr_debugfs_register(struct drm_i915_private *i915) 2870 + { 2871 + struct drm_minor *minor = i915->drm.primary; 2872 + 2873 + debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root, 2874 + i915, &i915_edp_psr_debug_fops); 2875 + 2876 + debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root, 2877 + i915, &i915_edp_psr_status_fops); 2878 + } 2879 + 2880 + static int i915_psr_sink_status_show(struct seq_file *m, void *data) 2881 + { 2882 + struct intel_connector *connector = m->private; 2883 + struct intel_dp *intel_dp = intel_attached_dp(connector); 2884 + static const char * const sink_status[] = { 2885 + "inactive", 2886 + "transition to active, capture and display", 2887 + "active, display from RFB", 2888 + "active, capture and display on sink device timings", 2889 + "transition to inactive, capture and display, timing re-sync", 2890 + "reserved", 2891 + "reserved", 2892 + "sink internal error", 2893 + }; 2894 + const char *str; 2895 + int ret; 2896 + u8 val; 2897 + 2898 + if (!CAN_PSR(intel_dp)) { 2899 + seq_puts(m, "PSR Unsupported\n"); 2900 + return -ENODEV; 2901 + } 2902 + 2903 + if (connector->base.status != connector_status_connected) 2904 + return -ENODEV; 2905 + 2906 + ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val); 2907 + if (ret != 1) 2908 + return ret < 0 ? ret : -EIO; 2909 + 2910 + val &= DP_PSR_SINK_STATE_MASK; 2911 + if (val < ARRAY_SIZE(sink_status)) 2912 + str = sink_status[val]; 2913 + else 2914 + str = "unknown"; 2915 + 2916 + seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str); 2917 + 2918 + return 0; 2919 + } 2920 + DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status); 2921 + 2922 + static int i915_psr_status_show(struct seq_file *m, void *data) 2923 + { 2924 + struct intel_connector *connector = m->private; 2925 + struct intel_dp *intel_dp = intel_attached_dp(connector); 2926 + 2927 + return intel_psr_status(m, intel_dp); 2928 + } 2929 + DEFINE_SHOW_ATTRIBUTE(i915_psr_status); 2930 + 2931 + void intel_psr_connector_debugfs_add(struct intel_connector *connector) 2932 + { 2933 + struct drm_i915_private *i915 = to_i915(connector->base.dev); 2934 + struct dentry *root = connector->base.debugfs_entry; 2935 + 2936 + if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 2937 + return; 2938 + 2939 + debugfs_create_file("i915_psr_sink_status", 0444, root, 2940 + connector, &i915_psr_sink_status_fops); 2941 + 2942 + if (HAS_PSR(i915)) 2943 + debugfs_create_file("i915_psr_status", 0444, root, 2944 + connector, &i915_psr_status_fops); 2945 + }
+3
drivers/gpu/drm/i915/display/intel_psr.h
··· 13 13 struct drm_connector_state; 14 14 struct drm_i915_private; 15 15 struct intel_atomic_state; 16 + struct intel_connector; 16 17 struct intel_crtc; 17 18 struct intel_crtc_state; 18 19 struct intel_dp; ··· 62 61 63 62 void intel_psr_lock(const struct intel_crtc_state *crtc_state); 64 63 void intel_psr_unlock(const struct intel_crtc_state *crtc_state); 64 + void intel_psr_connector_debugfs_add(struct intel_connector *connector); 65 + void intel_psr_debugfs_register(struct drm_i915_private *i915); 65 66 66 67 #endif /* __INTEL_PSR_H__ */
-183
drivers/gpu/drm/i915/display/intel_sprite.c
··· 32 32 33 33 #include <linux/string_helpers.h> 34 34 35 - #include <drm/drm_atomic.h> 36 35 #include <drm/drm_atomic_helper.h> 37 36 #include <drm/drm_blend.h> 38 37 #include <drm/drm_color_mgmt.h> 39 - #include <drm/drm_crtc.h> 40 - #include <drm/drm_damage_helper.h> 41 38 #include <drm/drm_fourcc.h> 42 39 #include <drm/drm_rect.h> 43 40 44 41 #include "i915_drv.h" 45 42 #include "i915_reg.h" 46 - #include "i915_vgpu.h" 47 43 #include "i9xx_plane.h" 48 44 #include "intel_atomic_plane.h" 49 - #include "intel_crtc.h" 50 45 #include "intel_de.h" 51 46 #include "intel_display_types.h" 52 47 #include "intel_fb.h" 53 - #include "intel_frontbuffer.h" 54 48 #include "intel_sprite.h" 55 - #include "intel_vrr.h" 56 - 57 - int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) 58 - { 59 - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); 60 - const struct drm_framebuffer *fb = plane_state->hw.fb; 61 - struct drm_rect *src = &plane_state->uapi.src; 62 - u32 src_x, src_y, src_w, src_h, hsub, vsub; 63 - bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation); 64 - 65 - /* 66 - * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS 67 - * abuses hsub/vsub so we can't use them here. But as they 68 - * are limited to 32bpp RGB formats we don't actually need 69 - * to check anything. 70 - */ 71 - if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || 72 - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) 73 - return 0; 74 - 75 - /* 76 - * Hardware doesn't handle subpixel coordinates. 77 - * Adjust to (macro)pixel boundary, but be careful not to 78 - * increase the source viewport size, because that could 79 - * push the downscaling factor out of bounds. 80 - */ 81 - src_x = src->x1 >> 16; 82 - src_w = drm_rect_width(src) >> 16; 83 - src_y = src->y1 >> 16; 84 - src_h = drm_rect_height(src) >> 16; 85 - 86 - drm_rect_init(src, src_x << 16, src_y << 16, 87 - src_w << 16, src_h << 16); 88 - 89 - if (fb->format->format == DRM_FORMAT_RGB565 && rotated) { 90 - hsub = 2; 91 - vsub = 2; 92 - } else { 93 - hsub = fb->format->hsub; 94 - vsub = fb->format->vsub; 95 - } 96 - 97 - if (rotated) 98 - hsub = vsub = max(hsub, vsub); 99 - 100 - if (src_x % hsub || src_w % hsub) { 101 - drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n", 102 - src_x, src_w, hsub, str_yes_no(rotated)); 103 - return -EINVAL; 104 - } 105 - 106 - if (src_y % vsub || src_h % vsub) { 107 - drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n", 108 - src_y, src_h, vsub, str_yes_no(rotated)); 109 - return -EINVAL; 110 - } 111 - 112 - return 0; 113 - } 114 49 115 50 static void i9xx_plane_linear_gamma(u16 gamma[8]) 116 51 { ··· 1382 1447 plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state); 1383 1448 1384 1449 return 0; 1385 - } 1386 - 1387 - static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv) 1388 - { 1389 - return DISPLAY_VER(dev_priv) >= 9; 1390 - } 1391 - 1392 - static void intel_plane_set_ckey(struct intel_plane_state *plane_state, 1393 - const struct drm_intel_sprite_colorkey *set) 1394 - { 1395 - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 1396 - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1397 - struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 1398 - 1399 - *key = *set; 1400 - 1401 - /* 1402 - * We want src key enabled on the 1403 - * sprite and not on the primary. 1404 - */ 1405 - if (plane->id == PLANE_PRIMARY && 1406 - set->flags & I915_SET_COLORKEY_SOURCE) 1407 - key->flags = 0; 1408 - 1409 - /* 1410 - * On SKL+ we want dst key enabled on 1411 - * the primary and not on the sprite. 1412 - */ 1413 - if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY && 1414 - set->flags & I915_SET_COLORKEY_DESTINATION) 1415 - key->flags = 0; 1416 - } 1417 - 1418 - int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, 1419 - struct drm_file *file_priv) 1420 - { 1421 - struct drm_i915_private *dev_priv = to_i915(dev); 1422 - struct drm_intel_sprite_colorkey *set = data; 1423 - struct drm_plane *plane; 1424 - struct drm_plane_state *plane_state; 1425 - struct drm_atomic_state *state; 1426 - struct drm_modeset_acquire_ctx ctx; 1427 - int ret = 0; 1428 - 1429 - /* ignore the pointless "none" flag */ 1430 - set->flags &= ~I915_SET_COLORKEY_NONE; 1431 - 1432 - if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) 1433 - return -EINVAL; 1434 - 1435 - /* Make sure we don't try to enable both src & dest simultaneously */ 1436 - if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) 1437 - return -EINVAL; 1438 - 1439 - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1440 - set->flags & I915_SET_COLORKEY_DESTINATION) 1441 - return -EINVAL; 1442 - 1443 - plane = drm_plane_find(dev, file_priv, set->plane_id); 1444 - if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) 1445 - return -ENOENT; 1446 - 1447 - /* 1448 - * SKL+ only plane 2 can do destination keying against plane 1. 1449 - * Also multiple planes can't do destination keying on the same 1450 - * pipe simultaneously. 1451 - */ 1452 - if (DISPLAY_VER(dev_priv) >= 9 && 1453 - to_intel_plane(plane)->id >= PLANE_SPRITE1 && 1454 - set->flags & I915_SET_COLORKEY_DESTINATION) 1455 - return -EINVAL; 1456 - 1457 - drm_modeset_acquire_init(&ctx, 0); 1458 - 1459 - state = drm_atomic_state_alloc(plane->dev); 1460 - if (!state) { 1461 - ret = -ENOMEM; 1462 - goto out; 1463 - } 1464 - state->acquire_ctx = &ctx; 1465 - 1466 - while (1) { 1467 - plane_state = drm_atomic_get_plane_state(state, plane); 1468 - ret = PTR_ERR_OR_ZERO(plane_state); 1469 - if (!ret) 1470 - intel_plane_set_ckey(to_intel_plane_state(plane_state), set); 1471 - 1472 - /* 1473 - * On some platforms we have to configure 1474 - * the dst colorkey on the primary plane. 1475 - */ 1476 - if (!ret && has_dst_key_in_primary_plane(dev_priv)) { 1477 - struct intel_crtc *crtc = 1478 - intel_crtc_for_pipe(dev_priv, 1479 - to_intel_plane(plane)->pipe); 1480 - 1481 - plane_state = drm_atomic_get_plane_state(state, 1482 - crtc->base.primary); 1483 - ret = PTR_ERR_OR_ZERO(plane_state); 1484 - if (!ret) 1485 - intel_plane_set_ckey(to_intel_plane_state(plane_state), set); 1486 - } 1487 - 1488 - if (!ret) 1489 - ret = drm_atomic_commit(state); 1490 - 1491 - if (ret != -EDEADLK) 1492 - break; 1493 - 1494 - drm_atomic_state_clear(state); 1495 - drm_modeset_backoff(&ctx); 1496 - } 1497 - 1498 - drm_atomic_state_put(state); 1499 - out: 1500 - drm_modeset_drop_locks(&ctx); 1501 - drm_modeset_acquire_fini(&ctx); 1502 - return ret; 1503 1450 } 1504 1451 1505 1452 static const u32 g4x_sprite_formats[] = {
+127
drivers/gpu/drm/i915/display/intel_sprite_uapi.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "i915_drv.h" 7 + #include "intel_crtc.h" 8 + #include "intel_display_types.h" 9 + #include "intel_sprite_uapi.h" 10 + 11 + static bool has_dst_key_in_primary_plane(struct drm_i915_private *dev_priv) 12 + { 13 + return DISPLAY_VER(dev_priv) >= 9; 14 + } 15 + 16 + static void intel_plane_set_ckey(struct intel_plane_state *plane_state, 17 + const struct drm_intel_sprite_colorkey *set) 18 + { 19 + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); 20 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 21 + struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 22 + 23 + *key = *set; 24 + 25 + /* 26 + * We want src key enabled on the 27 + * sprite and not on the primary. 28 + */ 29 + if (plane->id == PLANE_PRIMARY && 30 + set->flags & I915_SET_COLORKEY_SOURCE) 31 + key->flags = 0; 32 + 33 + /* 34 + * On SKL+ we want dst key enabled on 35 + * the primary and not on the sprite. 36 + */ 37 + if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_PRIMARY && 38 + set->flags & I915_SET_COLORKEY_DESTINATION) 39 + key->flags = 0; 40 + } 41 + 42 + int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, 43 + struct drm_file *file_priv) 44 + { 45 + struct drm_i915_private *dev_priv = to_i915(dev); 46 + struct drm_intel_sprite_colorkey *set = data; 47 + struct drm_plane *plane; 48 + struct drm_plane_state *plane_state; 49 + struct drm_atomic_state *state; 50 + struct drm_modeset_acquire_ctx ctx; 51 + int ret = 0; 52 + 53 + /* ignore the pointless "none" flag */ 54 + set->flags &= ~I915_SET_COLORKEY_NONE; 55 + 56 + if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) 57 + return -EINVAL; 58 + 59 + /* Make sure we don't try to enable both src & dest simultaneously */ 60 + if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) 61 + return -EINVAL; 62 + 63 + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 64 + set->flags & I915_SET_COLORKEY_DESTINATION) 65 + return -EINVAL; 66 + 67 + plane = drm_plane_find(dev, file_priv, set->plane_id); 68 + if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) 69 + return -ENOENT; 70 + 71 + /* 72 + * SKL+ only plane 2 can do destination keying against plane 1. 73 + * Also multiple planes can't do destination keying on the same 74 + * pipe simultaneously. 75 + */ 76 + if (DISPLAY_VER(dev_priv) >= 9 && 77 + to_intel_plane(plane)->id >= PLANE_SPRITE1 && 78 + set->flags & I915_SET_COLORKEY_DESTINATION) 79 + return -EINVAL; 80 + 81 + drm_modeset_acquire_init(&ctx, 0); 82 + 83 + state = drm_atomic_state_alloc(plane->dev); 84 + if (!state) { 85 + ret = -ENOMEM; 86 + goto out; 87 + } 88 + state->acquire_ctx = &ctx; 89 + 90 + while (1) { 91 + plane_state = drm_atomic_get_plane_state(state, plane); 92 + ret = PTR_ERR_OR_ZERO(plane_state); 93 + if (!ret) 94 + intel_plane_set_ckey(to_intel_plane_state(plane_state), set); 95 + 96 + /* 97 + * On some platforms we have to configure 98 + * the dst colorkey on the primary plane. 99 + */ 100 + if (!ret && has_dst_key_in_primary_plane(dev_priv)) { 101 + struct intel_crtc *crtc = 102 + intel_crtc_for_pipe(dev_priv, 103 + to_intel_plane(plane)->pipe); 104 + 105 + plane_state = drm_atomic_get_plane_state(state, 106 + crtc->base.primary); 107 + ret = PTR_ERR_OR_ZERO(plane_state); 108 + if (!ret) 109 + intel_plane_set_ckey(to_intel_plane_state(plane_state), set); 110 + } 111 + 112 + if (!ret) 113 + ret = drm_atomic_commit(state); 114 + 115 + if (ret != -EDEADLK) 116 + break; 117 + 118 + drm_atomic_state_clear(state); 119 + drm_modeset_backoff(&ctx); 120 + } 121 + 122 + drm_atomic_state_put(state); 123 + out: 124 + drm_modeset_drop_locks(&ctx); 125 + drm_modeset_acquire_fini(&ctx); 126 + return ret; 127 + }
+15
drivers/gpu/drm/i915/display/intel_sprite_uapi.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_SPRITE_UAPI_H__ 7 + #define __INTEL_SPRITE_UAPI_H__ 8 + 9 + struct drm_device; 10 + struct drm_file; 11 + 12 + int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, 13 + struct drm_file *file_priv); 14 + 15 + #endif /* __INTEL_SPRITE_UAPI_H__ */
+259 -67
drivers/gpu/drm/i915/display/intel_tc.c
··· 5 5 6 6 #include "i915_drv.h" 7 7 #include "i915_reg.h" 8 + #include "intel_ddi.h" 8 9 #include "intel_de.h" 9 10 #include "intel_display.h" 10 11 #include "intel_display_power_map.h" ··· 117 116 tc_cold_get_power_domain(dig_port, 118 117 dig_port->tc_mode)); 119 118 drm_WARN_ON(&i915->drm, !enabled); 119 + } 120 + 121 + static enum intel_display_power_domain 122 + tc_port_power_domain(struct intel_digital_port *dig_port) 123 + { 124 + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 125 + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); 126 + 127 + return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1; 128 + } 129 + 130 + static void 131 + assert_tc_port_power_enabled(struct intel_digital_port *dig_port) 132 + { 133 + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 134 + 135 + drm_WARN_ON(&i915->drm, 136 + !intel_display_power_is_enabled(i915, tc_port_power_domain(dig_port))); 120 137 } 121 138 122 139 u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) ··· 437 418 val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); 438 419 if (val == 0xffffffff) { 439 420 drm_dbg_kms(&i915->drm, 440 - "Port %s: PHY in TCCOLD, assume safe mode\n", 421 + "Port %s: PHY in TCCOLD, assume not owned\n", 441 422 dig_port->tc_port_name); 442 - return true; 423 + return false; 443 424 } 444 425 445 426 return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); ··· 483 464 u32 live_status_mask; 484 465 int max_lanes; 485 466 486 - if (!tc_phy_status_complete(dig_port)) { 467 + if (!tc_phy_status_complete(dig_port) && 468 + !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port)) { 487 469 drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n", 488 470 dig_port->tc_port_name); 489 471 goto out_set_tbt_alt_mode; ··· 559 539 } 560 540 } 561 541 562 - static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port) 542 + static bool tc_phy_is_ready_and_owned(struct intel_digital_port *dig_port, 543 + bool phy_is_ready, bool phy_is_owned) 563 544 { 564 545 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 565 546 566 - if (!tc_phy_status_complete(dig_port)) { 567 - drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n", 568 - dig_port->tc_port_name); 569 - return dig_port->tc_mode == TC_PORT_TBT_ALT; 547 + drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready); 548 + 549 + return phy_is_ready && phy_is_owned; 550 + } 551 + 552 + static bool tc_phy_is_connected(struct intel_digital_port *dig_port, 553 + enum icl_port_dpll_id port_pll_type) 554 + { 555 + struct intel_encoder *encoder = &dig_port->base; 556 + struct drm_i915_private *i915 = to_i915(encoder->base.dev); 557 + bool phy_is_ready = tc_phy_status_complete(dig_port); 558 + bool phy_is_owned = tc_phy_is_owned(dig_port); 559 + bool is_connected; 560 + 561 + if (tc_phy_is_ready_and_owned(dig_port, phy_is_ready, phy_is_owned)) 562 + is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY; 563 + else 564 + is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT; 565 + 566 + drm_dbg_kms(&i915->drm, 567 + "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n", 568 + dig_port->tc_port_name, 569 + str_yes_no(is_connected), 570 + str_yes_no(phy_is_ready), 571 + str_yes_no(phy_is_owned), 572 + port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt"); 573 + 574 + return is_connected; 575 + } 576 + 577 + static void tc_phy_wait_for_ready(struct intel_digital_port *dig_port) 578 + { 579 + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 580 + 581 + if (wait_for(tc_phy_status_complete(dig_port), 100)) 582 + drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n", 583 + dig_port->tc_port_name); 584 + } 585 + 586 + static enum tc_port_mode 587 + hpd_mask_to_tc_mode(u32 live_status_mask) 588 + { 589 + if (live_status_mask) 590 + return fls(live_status_mask) - 1; 591 + 592 + return TC_PORT_DISCONNECTED; 593 + } 594 + 595 + static enum tc_port_mode 596 + tc_phy_hpd_live_mode(struct intel_digital_port *dig_port) 597 + { 598 + u32 live_status_mask = tc_port_live_status_mask(dig_port); 599 + 600 + return hpd_mask_to_tc_mode(live_status_mask); 601 + } 602 + 603 + static enum tc_port_mode 604 + get_tc_mode_in_phy_owned_state(struct intel_digital_port *dig_port, 605 + enum tc_port_mode live_mode) 606 + { 607 + switch (live_mode) { 608 + case TC_PORT_LEGACY: 609 + case TC_PORT_DP_ALT: 610 + return live_mode; 611 + default: 612 + MISSING_CASE(live_mode); 613 + fallthrough; 614 + case TC_PORT_TBT_ALT: 615 + case TC_PORT_DISCONNECTED: 616 + if (dig_port->tc_legacy_port) 617 + return TC_PORT_LEGACY; 618 + else 619 + return TC_PORT_DP_ALT; 570 620 } 621 + } 571 622 572 - /* On ADL-P the PHY complete flag is set in TBT mode as well. */ 573 - if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT) 574 - return true; 575 - 576 - if (!tc_phy_is_owned(dig_port)) { 577 - drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n", 578 - dig_port->tc_port_name); 579 - 580 - return false; 623 + static enum tc_port_mode 624 + get_tc_mode_in_phy_not_owned_state(struct intel_digital_port *dig_port, 625 + enum tc_port_mode live_mode) 626 + { 627 + switch (live_mode) { 628 + case TC_PORT_LEGACY: 629 + return TC_PORT_DISCONNECTED; 630 + case TC_PORT_DP_ALT: 631 + case TC_PORT_TBT_ALT: 632 + return TC_PORT_TBT_ALT; 633 + default: 634 + MISSING_CASE(live_mode); 635 + fallthrough; 636 + case TC_PORT_DISCONNECTED: 637 + if (dig_port->tc_legacy_port) 638 + return TC_PORT_DISCONNECTED; 639 + else 640 + return TC_PORT_TBT_ALT; 581 641 } 582 - 583 - return dig_port->tc_mode == TC_PORT_DP_ALT || 584 - dig_port->tc_mode == TC_PORT_LEGACY; 585 642 } 586 643 587 644 static enum tc_port_mode 588 645 intel_tc_port_get_current_mode(struct intel_digital_port *dig_port) 589 646 { 590 647 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 591 - u32 live_status_mask = tc_port_live_status_mask(dig_port); 648 + enum tc_port_mode live_mode = tc_phy_hpd_live_mode(dig_port); 649 + bool phy_is_ready; 650 + bool phy_is_owned; 592 651 enum tc_port_mode mode; 593 652 594 - if (!tc_phy_is_owned(dig_port) || 595 - drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port))) 596 - return TC_PORT_TBT_ALT; 653 + /* 654 + * For legacy ports the IOM firmware initializes the PHY during boot-up 655 + * and system resume whether or not a sink is connected. Wait here for 656 + * the initialization to get ready. 657 + */ 658 + if (dig_port->tc_legacy_port) 659 + tc_phy_wait_for_ready(dig_port); 597 660 598 - mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT; 599 - if (live_status_mask) { 600 - enum tc_port_mode live_mode = fls(live_status_mask) - 1; 661 + phy_is_ready = tc_phy_status_complete(dig_port); 662 + phy_is_owned = tc_phy_is_owned(dig_port); 601 663 602 - if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT)) 603 - mode = live_mode; 664 + if (!tc_phy_is_ready_and_owned(dig_port, phy_is_ready, phy_is_owned)) { 665 + mode = get_tc_mode_in_phy_not_owned_state(dig_port, live_mode); 666 + } else { 667 + drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT); 668 + mode = get_tc_mode_in_phy_owned_state(dig_port, live_mode); 604 669 } 605 670 671 + drm_dbg_kms(&i915->drm, 672 + "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n", 673 + dig_port->tc_port_name, 674 + tc_port_mode_name(mode), 675 + str_yes_no(phy_is_ready), 676 + str_yes_no(phy_is_owned), 677 + tc_port_mode_name(live_mode)); 678 + 606 679 return mode; 680 + } 681 + 682 + static enum tc_port_mode default_tc_mode(struct intel_digital_port *dig_port) 683 + { 684 + if (dig_port->tc_legacy_port) 685 + return TC_PORT_LEGACY; 686 + 687 + return TC_PORT_TBT_ALT; 688 + } 689 + 690 + static enum tc_port_mode 691 + hpd_mask_to_target_mode(struct intel_digital_port *dig_port, u32 live_status_mask) 692 + { 693 + enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask); 694 + 695 + if (mode != TC_PORT_DISCONNECTED) 696 + return mode; 697 + 698 + return default_tc_mode(dig_port); 607 699 } 608 700 609 701 static enum tc_port_mode ··· 723 591 { 724 592 u32 live_status_mask = tc_port_live_status_mask(dig_port); 725 593 726 - if (live_status_mask) 727 - return fls(live_status_mask) - 1; 728 - 729 - return TC_PORT_TBT_ALT; 594 + return hpd_mask_to_target_mode(dig_port, live_status_mask); 730 595 } 731 596 732 597 static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, ··· 789 660 tc_cold_unblock(dig_port, domain, wref); 790 661 } 791 662 792 - static void 793 - intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port, 794 - int refcount) 663 + static void __intel_tc_port_get_link(struct intel_digital_port *dig_port) 795 664 { 796 - dig_port->tc_link_refcount = refcount; 665 + dig_port->tc_link_refcount++; 666 + } 667 + 668 + static void __intel_tc_port_put_link(struct intel_digital_port *dig_port) 669 + { 670 + dig_port->tc_link_refcount--; 671 + } 672 + 673 + static bool tc_port_is_enabled(struct intel_digital_port *dig_port) 674 + { 675 + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 676 + 677 + assert_tc_port_power_enabled(dig_port); 678 + 679 + return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) & 680 + DDI_BUF_CTL_ENABLE; 797 681 } 798 682 799 683 /** ··· 821 679 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 822 680 intel_wakeref_t tc_cold_wref; 823 681 enum intel_display_power_domain domain; 682 + bool update_mode = false; 824 683 825 684 mutex_lock(&dig_port->tc_lock); 826 685 ··· 832 689 tc_cold_wref = tc_cold_block(dig_port, &domain); 833 690 834 691 dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); 692 + /* 693 + * Save the initial mode for the state check in 694 + * intel_tc_port_sanitize_mode(). 695 + */ 696 + dig_port->tc_init_mode = dig_port->tc_mode; 697 + if (dig_port->tc_mode != TC_PORT_DISCONNECTED) 698 + dig_port->tc_lock_wakeref = 699 + tc_cold_block(dig_port, &dig_port->tc_lock_power_domain); 700 + 701 + /* 702 + * The PHY needs to be connected for AUX to work during HW readout and 703 + * MST topology resume, but the PHY mode can only be changed if the 704 + * port is disabled. 705 + * 706 + * An exception is the case where BIOS leaves the PHY incorrectly 707 + * disconnected on an enabled legacy port. Work around that by 708 + * connecting the PHY even though the port is enabled. This doesn't 709 + * cause a problem as the PHY ownership state is ignored by the 710 + * IOM/TCSS firmware (only display can own the PHY in that case). 711 + */ 712 + if (!tc_port_is_enabled(dig_port)) { 713 + update_mode = true; 714 + } else if (dig_port->tc_mode == TC_PORT_DISCONNECTED) { 715 + drm_WARN_ON(&i915->drm, !dig_port->tc_legacy_port); 716 + drm_err(&i915->drm, 717 + "Port %s: PHY disconnected on enabled port, connecting it\n", 718 + dig_port->tc_port_name); 719 + update_mode = true; 720 + } 721 + 722 + if (update_mode) 723 + intel_tc_port_update_mode(dig_port, 1, false); 724 + 835 725 /* Prevent changing dig_port->tc_mode until intel_tc_port_sanitize_mode() is called. */ 836 - intel_tc_port_link_init_refcount(dig_port, 1); 837 - dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain); 726 + __intel_tc_port_get_link(dig_port); 838 727 839 728 tc_cold_unblock(dig_port, domain, tc_cold_wref); 840 729 841 - drm_dbg_kms(&i915->drm, "Port %s: init mode (%s)\n", 842 - dig_port->tc_port_name, 843 - tc_port_mode_name(dig_port->tc_mode)); 844 - 845 730 mutex_unlock(&dig_port->tc_lock); 731 + } 732 + 733 + static bool tc_port_has_active_links(struct intel_digital_port *dig_port, 734 + const struct intel_crtc_state *crtc_state) 735 + { 736 + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 737 + enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT; 738 + int active_links = 0; 739 + 740 + if (dig_port->dp.is_mst) { 741 + /* TODO: get the PLL type for MST, once HW readout is done for it. */ 742 + active_links = intel_dp_mst_encoder_active_links(dig_port); 743 + } else if (crtc_state && crtc_state->hw.active) { 744 + pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state); 745 + active_links = 1; 746 + } 747 + 748 + if (active_links && !tc_phy_is_connected(dig_port, pll_type)) 749 + drm_err(&i915->drm, 750 + "Port %s: PHY disconnected with %d active link(s)\n", 751 + dig_port->tc_port_name, active_links); 752 + 753 + return active_links; 846 754 } 847 755 848 756 /** 849 757 * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode 850 758 * @dig_port: digital port 759 + * @crtc_state: atomic state of CRTC connected to @dig_port 851 760 * 852 761 * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver 853 762 * loading and system resume: 854 763 * If the encoder is enabled keep the TypeC mode/PHY connected state locked until 855 764 * the encoder is disabled. 856 765 * If the encoder is disabled make sure the PHY is disconnected. 766 + * @crtc_state is valid if @dig_port is enabled, NULL otherwise. 857 767 */ 858 - void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port) 768 + void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, 769 + const struct intel_crtc_state *crtc_state) 859 770 { 860 771 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 861 - struct intel_encoder *encoder = &dig_port->base; 862 - int active_links = 0; 863 772 864 773 mutex_lock(&dig_port->tc_lock); 865 774 866 - if (dig_port->dp.is_mst) 867 - active_links = intel_dp_mst_encoder_active_links(dig_port); 868 - else if (encoder->base.crtc) 869 - active_links = to_intel_crtc(encoder->base.crtc)->active; 870 - 871 775 drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount != 1); 872 - intel_tc_port_link_init_refcount(dig_port, active_links); 873 - 874 - if (active_links) { 875 - if (!icl_tc_phy_is_connected(dig_port)) 876 - drm_dbg_kms(&i915->drm, 877 - "Port %s: PHY disconnected with %d active link(s)\n", 878 - dig_port->tc_port_name, active_links); 879 - } else { 776 + if (!tc_port_has_active_links(dig_port, crtc_state)) { 880 777 /* 881 778 * TBT-alt is the default mode in any case the PHY ownership is not 882 779 * held (regardless of the sink's connected live state), so 883 780 * we'll just switch to disconnected mode from it here without 884 781 * a note. 885 782 */ 886 - if (dig_port->tc_mode != TC_PORT_TBT_ALT) 783 + if (dig_port->tc_init_mode != TC_PORT_TBT_ALT && 784 + dig_port->tc_init_mode != TC_PORT_DISCONNECTED) 887 785 drm_dbg_kms(&i915->drm, 888 786 "Port %s: PHY left in %s mode on disabled port, disconnecting it\n", 889 787 dig_port->tc_port_name, 890 - tc_port_mode_name(dig_port->tc_mode)); 788 + tc_port_mode_name(dig_port->tc_init_mode)); 891 789 icl_tc_phy_disconnect(dig_port); 790 + __intel_tc_port_put_link(dig_port); 892 791 893 792 tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain, 894 793 fetch_and_zero(&dig_port->tc_lock_wakeref)); ··· 953 768 * connected ports are usable, and avoids exposing to the users objects they 954 769 * can't really use. 955 770 */ 771 + bool intel_tc_port_connected_locked(struct intel_encoder *encoder) 772 + { 773 + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 774 + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 775 + 776 + drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port)); 777 + 778 + return tc_port_live_status_mask(dig_port) & BIT(dig_port->tc_mode); 779 + } 780 + 956 781 bool intel_tc_port_connected(struct intel_encoder *encoder) 957 782 { 958 783 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 959 784 bool is_connected; 960 785 961 786 intel_tc_port_lock(dig_port); 962 - 963 - is_connected = tc_port_live_status_mask(dig_port) & 964 - BIT(dig_port->tc_mode); 965 - 787 + is_connected = intel_tc_port_connected_locked(encoder); 966 788 intel_tc_port_unlock(dig_port); 967 789 968 790 return is_connected; ··· 1049 857 int required_lanes) 1050 858 { 1051 859 __intel_tc_port_lock(dig_port, required_lanes); 1052 - dig_port->tc_link_refcount++; 860 + __intel_tc_port_get_link(dig_port); 1053 861 intel_tc_port_unlock(dig_port); 1054 862 } 1055 863 1056 864 void intel_tc_port_put_link(struct intel_digital_port *dig_port) 1057 865 { 1058 866 intel_tc_port_lock(dig_port); 1059 - --dig_port->tc_link_refcount; 867 + __intel_tc_port_put_link(dig_port); 1060 868 intel_tc_port_unlock(dig_port); 1061 869 1062 870 /*
+4 -1
drivers/gpu/drm/i915/display/intel_tc.h
··· 9 9 #include <linux/mutex.h> 10 10 #include <linux/types.h> 11 11 12 + struct intel_crtc_state; 12 13 struct intel_digital_port; 13 14 struct intel_encoder; 14 15 ··· 18 17 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port); 19 18 20 19 bool intel_tc_port_connected(struct intel_encoder *encoder); 20 + bool intel_tc_port_connected_locked(struct intel_encoder *encoder); 21 21 22 22 u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); 23 23 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port); ··· 27 25 int required_lanes); 28 26 29 27 void intel_tc_port_init_mode(struct intel_digital_port *dig_port); 30 - void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port); 28 + void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, 29 + const struct intel_crtc_state *crtc_state); 31 30 void intel_tc_port_lock(struct intel_digital_port *dig_port); 32 31 void intel_tc_port_unlock(struct intel_digital_port *dig_port); 33 32 void intel_tc_port_flush_work(struct intel_digital_port *dig_port);
+92
drivers/gpu/drm/i915/display/intel_vblank.c
··· 8 8 #include "intel_de.h" 9 9 #include "intel_display_types.h" 10 10 #include "intel_vblank.h" 11 + #include "intel_vrr.h" 11 12 12 13 /* 13 14 * This timing diagram depicts the video signal in and ··· 439 438 void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 440 439 { 441 440 wait_for_pipe_scanline_moving(crtc, true); 441 + } 442 + 443 + static int intel_crtc_scanline_offset(const struct intel_crtc_state *crtc_state) 444 + { 445 + struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); 446 + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; 447 + 448 + /* 449 + * The scanline counter increments at the leading edge of hsync. 450 + * 451 + * On most platforms it starts counting from vtotal-1 on the 452 + * first active line. That means the scanline counter value is 453 + * always one less than what we would expect. Ie. just after 454 + * start of vblank, which also occurs at start of hsync (on the 455 + * last active line), the scanline counter will read vblank_start-1. 456 + * 457 + * On gen2 the scanline counter starts counting from 1 instead 458 + * of vtotal-1, so we have to subtract one (or rather add vtotal-1 459 + * to keep the value positive), instead of adding one. 460 + * 461 + * On HSW+ the behaviour of the scanline counter depends on the output 462 + * type. For DP ports it behaves like most other platforms, but on HDMI 463 + * there's an extra 1 line difference. So we need to add two instead of 464 + * one to the value. 465 + * 466 + * On VLV/CHV DSI the scanline counter would appear to increment 467 + * approx. 1/3 of a scanline before start of vblank. Unfortunately 468 + * that means we can't tell whether we're in vblank or not while 469 + * we're on that particular line. We must still set scanline_offset 470 + * to 1 so that the vblank timestamps come out correct when we query 471 + * the scanline counter from within the vblank interrupt handler. 472 + * However if queried just before the start of vblank we'll get an 473 + * answer that's slightly in the future. 474 + */ 475 + if (DISPLAY_VER(i915) == 2) { 476 + int vtotal; 477 + 478 + vtotal = adjusted_mode->crtc_vtotal; 479 + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 480 + vtotal /= 2; 481 + 482 + return vtotal - 1; 483 + } else if (HAS_DDI(i915) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { 484 + return 2; 485 + } else { 486 + return 1; 487 + } 488 + } 489 + 490 + void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) 491 + { 492 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 493 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 494 + struct drm_display_mode adjusted_mode; 495 + int vmax_vblank_start = 0; 496 + unsigned long irqflags; 497 + 498 + drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode); 499 + 500 + if (crtc_state->vrr.enable) { 501 + adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax; 502 + adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax; 503 + adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state); 504 + vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state); 505 + } 506 + 507 + /* 508 + * Belts and suspenders locking to guarantee everyone sees 100% 509 + * consistent state during fastset seamless refresh rate changes. 510 + * 511 + * vblank_time_lock takes care of all drm_vblank.c stuff, and 512 + * uncore.lock takes care of __intel_get_crtc_scanline() which 513 + * may get called elsewhere as well. 514 + * 515 + * TODO maybe just protect everything (including 516 + * __intel_get_crtc_scanline()) with vblank_time_lock? 517 + * Need to audit everything to make sure it's safe. 518 + */ 519 + spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags); 520 + spin_lock(&i915->uncore.lock); 521 + 522 + drm_calc_timestamping_constants(&crtc->base, &adjusted_mode); 523 + 524 + crtc->vmax_vblank_start = vmax_vblank_start; 525 + 526 + crtc->mode_flags = crtc_state->mode_flags; 527 + 528 + crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state); 529 + 530 + spin_unlock(&i915->uncore.lock); 531 + spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags); 442 532 }
+2
drivers/gpu/drm/i915/display/intel_vblank.h
··· 11 11 12 12 struct drm_crtc; 13 13 struct intel_crtc; 14 + struct intel_crtc_state; 14 15 15 16 u32 i915_get_vblank_counter(struct drm_crtc *crtc); 16 17 u32 g4x_get_vblank_counter(struct drm_crtc *crtc); ··· 20 19 int intel_get_crtc_scanline(struct intel_crtc *crtc); 21 20 void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc); 22 21 void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc); 22 + void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state); 23 23 24 24 #endif /* __INTEL_VBLANK_H__ */
-1
drivers/gpu/drm/i915/display/skl_universal_plane.c
··· 17 17 #include "intel_fb.h" 18 18 #include "intel_fbc.h" 19 19 #include "intel_psr.h" 20 - #include "intel_sprite.h" 21 20 #include "skl_scaler.h" 22 21 #include "skl_universal_plane.h" 23 22 #include "skl_watermark.h"
+141 -15
drivers/gpu/drm/i915/display/skl_watermark.c
··· 12 12 #include "intel_atomic.h" 13 13 #include "intel_atomic_plane.h" 14 14 #include "intel_bw.h" 15 + #include "intel_crtc.h" 15 16 #include "intel_de.h" 16 17 #include "intel_display.h" 17 18 #include "intel_display_power.h" ··· 705 704 const struct skl_wm_level *result_prev, 706 705 struct skl_wm_level *result /* out */); 707 706 707 + static unsigned int skl_wm_latency(struct drm_i915_private *i915, int level, 708 + const struct skl_wm_params *wp) 709 + { 710 + unsigned int latency = i915->display.wm.skl_latency[level]; 711 + 712 + if (latency == 0) 713 + return 0; 714 + 715 + /* 716 + * WaIncreaseLatencyIPCEnabled: kbl,cfl 717 + * Display WA #1141: kbl,cfl 718 + */ 719 + if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) && 720 + skl_watermark_ipc_enabled(i915)) 721 + latency += 4; 722 + 723 + if (skl_needs_memory_bw_wa(i915) && wp && wp->x_tiled) 724 + latency += 15; 725 + 726 + return latency; 727 + } 728 + 708 729 static unsigned int 709 730 skl_cursor_allocation(const struct intel_crtc_state *crtc_state, 710 731 int num_active) ··· 746 723 drm_WARN_ON(&i915->drm, ret); 747 724 748 725 for (level = 0; level < i915->display.wm.num_levels; level++) { 749 - unsigned int latency = i915->display.wm.skl_latency[level]; 726 + unsigned int latency = skl_wm_latency(i915, level, &wp); 750 727 751 728 skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); 752 729 if (wm.min_ddb_alloc == U16_MAX) ··· 1862 1839 return; 1863 1840 } 1864 1841 1865 - /* 1866 - * WaIncreaseLatencyIPCEnabled: kbl,cfl 1867 - * Display WA #1141: kbl,cfl 1868 - */ 1869 - if ((IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) && 1870 - skl_watermark_ipc_enabled(i915)) 1871 - latency += 4; 1872 - 1873 - if (skl_needs_memory_bw_wa(i915) && wp->x_tiled) 1874 - latency += 15; 1875 - 1876 1842 method1 = skl_wm_method1(i915, wp->plane_pixel_rate, 1877 1843 wp->cpp, latency, wp->dbuf_block_size); 1878 1844 method2 = skl_wm_method2(wp->plane_pixel_rate, ··· 1988 1976 1989 1977 for (level = 0; level < i915->display.wm.num_levels; level++) { 1990 1978 struct skl_wm_level *result = &levels[level]; 1991 - unsigned int latency = i915->display.wm.skl_latency[level]; 1979 + unsigned int latency = skl_wm_latency(i915, level, wm_params); 1992 1980 1993 1981 skl_compute_plane_wm(crtc_state, plane, level, latency, 1994 1982 wm_params, result_prev, result); ··· 2008 1996 unsigned int latency = 0; 2009 1997 2010 1998 if (i915->display.sagv.block_time_us) 2011 - latency = i915->display.sagv.block_time_us + i915->display.wm.skl_latency[0]; 1999 + latency = i915->display.sagv.block_time_us + 2000 + skl_wm_latency(i915, 0, wm_params); 2012 2001 2013 2002 skl_compute_plane_wm(crtc_state, plane, 0, latency, 2014 2003 wm_params, &levels[0], ··· 2201 2188 return 0; 2202 2189 } 2203 2190 2191 + static bool 2192 + skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state, 2193 + int wm0_lines, int latency) 2194 + { 2195 + const struct drm_display_mode *adjusted_mode = 2196 + &crtc_state->hw.adjusted_mode; 2197 + 2198 + /* FIXME missing scaler and DSC pre-fill time */ 2199 + return crtc_state->framestart_delay + 2200 + intel_usecs_to_scanlines(adjusted_mode, latency) + 2201 + wm0_lines > 2202 + adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start; 2203 + } 2204 + 2205 + static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state) 2206 + { 2207 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2208 + enum plane_id plane_id; 2209 + int wm0_lines = 0; 2210 + 2211 + for_each_plane_id_on_crtc(crtc, plane_id) { 2212 + const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; 2213 + 2214 + /* FIXME what about !skl_wm_has_lines() platforms? */ 2215 + wm0_lines = max_t(int, wm0_lines, wm->wm[0].lines); 2216 + } 2217 + 2218 + return wm0_lines; 2219 + } 2220 + 2221 + static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state, 2222 + int wm0_lines) 2223 + { 2224 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2225 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2226 + int level; 2227 + 2228 + for (level = i915->display.wm.num_levels - 1; level >= 0; level--) { 2229 + int latency; 2230 + 2231 + /* FIXME should we care about the latency w/a's? */ 2232 + latency = skl_wm_latency(i915, level, NULL); 2233 + if (latency == 0) 2234 + continue; 2235 + 2236 + /* FIXME is it correct to use 0 latency for wm0 here? */ 2237 + if (level == 0) 2238 + latency = 0; 2239 + 2240 + if (!skl_is_vblank_too_short(crtc_state, wm0_lines, latency)) 2241 + return level; 2242 + } 2243 + 2244 + return -EINVAL; 2245 + } 2246 + 2247 + static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state) 2248 + { 2249 + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 2250 + struct drm_i915_private *i915 = to_i915(crtc->base.dev); 2251 + int wm0_lines, level; 2252 + 2253 + if (!crtc_state->hw.active) 2254 + return 0; 2255 + 2256 + wm0_lines = skl_max_wm0_lines(crtc_state); 2257 + 2258 + level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines); 2259 + if (level < 0) 2260 + return level; 2261 + 2262 + /* 2263 + * FIXME PSR needs to toggle LATENCY_REPORTING_REMOVED_PIPE_* 2264 + * based on whether we're limited by the vblank duration. 2265 + * 2266 + * FIXME also related to skl+ w/a 1136 (also unimplemented as of 2267 + * now) perhaps? 2268 + */ 2269 + 2270 + for (level++; level < i915->display.wm.num_levels; level++) { 2271 + enum plane_id plane_id; 2272 + 2273 + for_each_plane_id_on_crtc(crtc, plane_id) { 2274 + struct skl_plane_wm *wm = 2275 + &crtc_state->wm.skl.optimal.planes[plane_id]; 2276 + 2277 + /* 2278 + * FIXME just clear enable or flag the entire 2279 + * thing as bad via min_ddb_alloc=U16_MAX? 2280 + */ 2281 + wm->wm[level].enable = false; 2282 + wm->uv_wm[level].enable = false; 2283 + } 2284 + } 2285 + 2286 + if (DISPLAY_VER(i915) >= 12 && 2287 + i915->display.sagv.block_time_us && 2288 + skl_is_vblank_too_short(crtc_state, wm0_lines, 2289 + i915->display.sagv.block_time_us)) { 2290 + enum plane_id plane_id; 2291 + 2292 + for_each_plane_id_on_crtc(crtc, plane_id) { 2293 + struct skl_plane_wm *wm = 2294 + &crtc_state->wm.skl.optimal.planes[plane_id]; 2295 + 2296 + wm->sagv.wm0.enable = false; 2297 + wm->sagv.trans_wm.enable = false; 2298 + } 2299 + } 2300 + 2301 + return 0; 2302 + } 2303 + 2204 2304 static int skl_build_pipe_wm(struct intel_atomic_state *state, 2205 2305 struct intel_crtc *crtc) 2206 2306 { ··· 2343 2217 2344 2218 crtc_state->wm.skl.optimal = crtc_state->wm.skl.raw; 2345 2219 2346 - return 0; 2220 + return skl_wm_check_vblank(crtc_state); 2347 2221 } 2348 2222 2349 2223 static void skl_ddb_entry_write(struct drm_i915_private *i915,
+1 -1
drivers/gpu/drm/i915/display/vlv_dsi.c
··· 1072 1072 bpp = mipi_dsi_pixel_format_to_bpp( 1073 1073 pixel_format_from_register_bits(fmt)); 1074 1074 1075 - pipe_config->pipe_bpp = bdw_get_pipemisc_bpp(crtc); 1075 + pipe_config->pipe_bpp = bdw_get_pipe_misc_bpp(crtc); 1076 1076 1077 1077 /* Enable Frame time stamo based scanline reporting */ 1078 1078 pipe_config->mode_flags |=
+2
drivers/gpu/drm/i915/gt/intel_gpu_commands.h
··· 440 440 #define GSC_FW_LOAD GSC_INSTR(1, 0, 2) 441 441 #define HECI1_FW_LIMIT_VALID (1 << 31) 442 442 443 + #define GSC_HECI_CMD_PKT GSC_INSTR(0, 0, 6) 444 + 443 445 /* 444 446 * Used to convert any address to canonical form. 445 447 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
+109
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "gt/intel_engine_pm.h" 7 + #include "gt/intel_gpu_commands.h" 8 + #include "gt/intel_gt.h" 9 + #include "gt/intel_ring.h" 10 + #include "intel_gsc_uc_heci_cmd_submit.h" 11 + 12 + struct gsc_heci_pkt { 13 + u64 addr_in; 14 + u32 size_in; 15 + u64 addr_out; 16 + u32 size_out; 17 + }; 18 + 19 + static int emit_gsc_heci_pkt(struct i915_request *rq, struct gsc_heci_pkt *pkt) 20 + { 21 + u32 *cs; 22 + 23 + cs = intel_ring_begin(rq, 8); 24 + if (IS_ERR(cs)) 25 + return PTR_ERR(cs); 26 + 27 + *cs++ = GSC_HECI_CMD_PKT; 28 + *cs++ = lower_32_bits(pkt->addr_in); 29 + *cs++ = upper_32_bits(pkt->addr_in); 30 + *cs++ = pkt->size_in; 31 + *cs++ = lower_32_bits(pkt->addr_out); 32 + *cs++ = upper_32_bits(pkt->addr_out); 33 + *cs++ = pkt->size_out; 34 + *cs++ = 0; 35 + 36 + intel_ring_advance(rq, cs); 37 + 38 + return 0; 39 + } 40 + 41 + int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc, u64 addr_in, 42 + u32 size_in, u64 addr_out, 43 + u32 size_out) 44 + { 45 + struct intel_context *ce = gsc->ce; 46 + struct i915_request *rq; 47 + struct gsc_heci_pkt pkt = { 48 + .addr_in = addr_in, 49 + .size_in = size_in, 50 + .addr_out = addr_out, 51 + .size_out = size_out 52 + }; 53 + int err; 54 + 55 + if (!ce) 56 + return -ENODEV; 57 + 58 + rq = i915_request_create(ce); 59 + if (IS_ERR(rq)) 60 + return PTR_ERR(rq); 61 + 62 + if (ce->engine->emit_init_breadcrumb) { 63 + err = ce->engine->emit_init_breadcrumb(rq); 64 + if (err) 65 + goto out_rq; 66 + } 67 + 68 + err = emit_gsc_heci_pkt(rq, &pkt); 69 + 70 + if (err) 71 + goto out_rq; 72 + 73 + err = ce->engine->emit_flush(rq, 0); 74 + 75 + out_rq: 76 + i915_request_get(rq); 77 + 78 + if (unlikely(err)) 79 + i915_request_set_error_once(rq, err); 80 + 81 + i915_request_add(rq); 82 + 83 + if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0) 84 + err = -ETIME; 85 + 86 + i915_request_put(rq); 87 + 88 + if (err) 89 + drm_err(&gsc_uc_to_gt(gsc)->i915->drm, 90 + "Request submission for GSC heci cmd failed (%d)\n", 91 + err); 92 + 93 + return err; 94 + } 95 + 96 + void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header, 97 + u8 heci_client_id, u32 message_size, 98 + u64 host_session_id) 99 + { 100 + host_session_id &= ~HOST_SESSION_MASK; 101 + if (heci_client_id == HECI_MEADDRESS_PXP) 102 + host_session_id |= HOST_SESSION_PXP_SINGLE; 103 + 104 + header->validity_marker = GSC_HECI_VALIDITY_MARKER; 105 + header->heci_client_id = heci_client_id; 106 + header->host_session_handle = host_session_id; 107 + header->header_version = MTL_GSC_HEADER_VERSION; 108 + header->message_size = message_size; 109 + }
+61
drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef _INTEL_GSC_UC_HECI_CMD_SUBMIT_H_ 7 + #define _INTEL_GSC_UC_HECI_CMD_SUBMIT_H_ 8 + 9 + #include <linux/types.h> 10 + 11 + struct intel_gsc_uc; 12 + struct intel_gsc_mtl_header { 13 + u32 validity_marker; 14 + #define GSC_HECI_VALIDITY_MARKER 0xA578875A 15 + 16 + u8 heci_client_id; 17 + #define HECI_MEADDRESS_PXP 17 18 + #define HECI_MEADDRESS_HDCP 18 19 + 20 + u8 reserved1; 21 + 22 + u16 header_version; 23 + #define MTL_GSC_HEADER_VERSION 1 24 + 25 + /* 26 + * FW allows host to decide host_session handle 27 + * as it sees fit. 28 + * For intertracebility reserving select bits(60-63) 29 + * to differentiate caller-target subsystem 30 + * 0000 - HDCP 31 + * 0001 - PXP Single Session 32 + */ 33 + u64 host_session_handle; 34 + #define HOST_SESSION_MASK REG_GENMASK64(63, 60) 35 + #define HOST_SESSION_PXP_SINGLE BIT_ULL(60) 36 + u64 gsc_message_handle; 37 + 38 + u32 message_size; /* lower 20 bits only, upper 12 are reserved */ 39 + 40 + /* 41 + * Flags mask: 42 + * Bit 0: Pending 43 + * Bit 1: Session Cleanup; 44 + * Bits 2-15: Flags 45 + * Bits 16-31: Extension Size 46 + * According to internal spec flags are either input or output 47 + * we distinguish the flags using OUTFLAG or INFLAG 48 + */ 49 + u32 flags; 50 + #define GSC_OUTFLAG_MSG_PENDING 1 51 + 52 + u32 status; 53 + } __packed; 54 + 55 + int intel_gsc_uc_heci_cmd_submit_packet(struct intel_gsc_uc *gsc, 56 + u64 addr_in, u32 size_in, 57 + u64 addr_out, u32 size_out); 58 + void intel_gsc_uc_heci_cmd_emit_mtl_header(struct intel_gsc_mtl_header *header, 59 + u8 heci_client_id, u32 message_size, 60 + u64 host_session_id); 61 + #endif
+5 -1
drivers/gpu/drm/i915/i915_driver.c
··· 535 535 536 536 ret = i915_pcode_init(dev_priv); 537 537 if (ret) 538 - goto err_msi; 538 + goto err_opregion; 539 539 540 540 /* 541 541 * Fill the dram structure to get the system dram info. This will be ··· 556 556 557 557 return 0; 558 558 559 + err_opregion: 560 + intel_opregion_cleanup(dev_priv); 559 561 err_msi: 560 562 if (pdev->msi_enabled) 561 563 pci_disable_msi(pdev); ··· 582 580 struct pci_dev *root_pdev; 583 581 584 582 i915_perf_fini(dev_priv); 583 + 584 + intel_opregion_cleanup(dev_priv); 585 585 586 586 if (pdev->msi_enabled) 587 587 pci_disable_msi(pdev);
+63 -24
drivers/gpu/drm/i915/i915_reg.h
··· 1794 1794 * GEN9 clock gating regs 1795 1795 */ 1796 1796 #define GEN9_CLKGATE_DIS_0 _MMIO(0x46530) 1797 - #define DARBF_GATING_DIS (1 << 27) 1798 - #define PWM2_GATING_DIS (1 << 14) 1799 - #define PWM1_GATING_DIS (1 << 13) 1797 + #define DARBF_GATING_DIS REG_BIT(27) 1798 + #define MTL_PIPEDMC_GATING_DIS_A REG_BIT(15) 1799 + #define MTL_PIPEDMC_GATING_DIS_B REG_BIT(14) 1800 + #define PWM2_GATING_DIS REG_BIT(14) 1801 + #define PWM1_GATING_DIS REG_BIT(13) 1800 1802 1801 1803 #define GEN9_CLKGATE_DIS_3 _MMIO(0x46538) 1802 1804 #define TGL_VRH_GATING_DIS REG_BIT(31) ··· 3497 3495 3498 3496 #define _PIPE_MISC_A 0x70030 3499 3497 #define _PIPE_MISC_B 0x71030 3500 - #define PIPEMISC_YUV420_ENABLE REG_BIT(27) /* glk+ */ 3501 - #define PIPEMISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */ 3502 - #define PIPEMISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */ 3503 - #define PIPEMISC_OUTPUT_COLORSPACE_YUV REG_BIT(11) 3504 - #define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */ 3498 + #define PIPE_MISC_YUV420_ENABLE REG_BIT(27) /* glk+ */ 3499 + #define PIPE_MISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */ 3500 + #define PIPE_MISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */ 3501 + #define PIPE_MISC_OUTPUT_COLORSPACE_YUV REG_BIT(11) 3502 + #define PIPE_MISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */ 3505 3503 /* 3506 3504 * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with 3507 3505 * valid values of: 6, 8, 10 BPC. 3508 3506 * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of: 3509 3507 * 6, 8, 10, 12 BPC. 3510 3508 */ 3511 - #define PIPEMISC_BPC_MASK REG_GENMASK(7, 5) 3512 - #define PIPEMISC_BPC_8 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 0) 3513 - #define PIPEMISC_BPC_10 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 1) 3514 - #define PIPEMISC_BPC_6 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 2) 3515 - #define PIPEMISC_BPC_12_ADLP REG_FIELD_PREP(PIPEMISC_BPC_MASK, 4) /* adlp+ */ 3516 - #define PIPEMISC_DITHER_ENABLE REG_BIT(4) 3517 - #define PIPEMISC_DITHER_TYPE_MASK REG_GENMASK(3, 2) 3518 - #define PIPEMISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 0) 3519 - #define PIPEMISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 1) 3520 - #define PIPEMISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 2) 3521 - #define PIPEMISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 3) 3522 - #define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A) 3509 + #define PIPE_MISC_BPC_MASK REG_GENMASK(7, 5) 3510 + #define PIPE_MISC_BPC_8 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 0) 3511 + #define PIPE_MISC_BPC_10 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 1) 3512 + #define PIPE_MISC_BPC_6 REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 2) 3513 + #define PIPE_MISC_BPC_12_ADLP REG_FIELD_PREP(PIPE_MISC_BPC_MASK, 4) /* adlp+ */ 3514 + #define PIPE_MISC_DITHER_ENABLE REG_BIT(4) 3515 + #define PIPE_MISC_DITHER_TYPE_MASK REG_GENMASK(3, 2) 3516 + #define PIPE_MISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 0) 3517 + #define PIPE_MISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 1) 3518 + #define PIPE_MISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 2) 3519 + #define PIPE_MISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPE_MISC_DITHER_TYPE_MASK, 3) 3520 + #define PIPE_MISC(pipe) _MMIO_PIPE(pipe, _PIPE_MISC_A, _PIPE_MISC_B) 3523 3521 3524 3522 #define _PIPE_MISC2_A 0x7002C 3525 3523 #define _PIPE_MISC2_B 0x7102C 3526 3524 #define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24) 3527 3525 #define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80) 3528 3526 #define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20) 3529 - #define PIPE_MISC2(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC2_A) 3527 + #define PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK REG_GENMASK(2, 0) /* tgl+ */ 3528 + #define PIPE_MISC2_FLIP_INFO_PLANE_SEL(plane_id) REG_FIELD_PREP(PIPE_MISC2_FLIP_INFO_PLANE_SEL_MASK, (plane_id)) 3529 + #define PIPE_MISC2(pipe) _MMIO_PIPE(pipe, _PIPE_MISC2_A, _PIPE_MISC2_B) 3530 3530 3531 3531 /* Skylake+ pipe bottom (background) color */ 3532 3532 #define _SKL_BOTTOM_COLOR_A 0x70034 ··· 4394 4390 #define SP_CONST_ALPHA_ENABLE REG_BIT(31) 4395 4391 #define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0) 4396 4392 #define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha)) 4393 + #define _SPASURFLIVE (VLV_DISPLAY_BASE + 0x721ac) 4397 4394 #define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0) 4398 4395 #define SP_CONTRAST_MASK REG_GENMASK(26, 18) 4399 4396 #define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */ ··· 4418 4413 #define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0) 4419 4414 #define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4) 4420 4415 #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) 4416 + #define _SPBSURFLIVE (VLV_DISPLAY_BASE + 0x722ac) 4421 4417 #define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0) 4422 4418 #define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4) 4423 4419 #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0) ··· 4439 4433 #define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL) 4440 4434 #define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF) 4441 4435 #define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA) 4436 + #define SPSURFLIVE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURFLIVE, _SPBSURFLIVE) 4442 4437 #define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0) 4443 4438 #define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1) 4444 4439 #define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */ ··· 4591 4584 #define _PLANE_KEYVAL_2_A 0x70294 4592 4585 #define _PLANE_KEYMSK_1_A 0x70198 4593 4586 #define _PLANE_KEYMSK_2_A 0x70298 4594 - #define PLANE_KEYMSK_ALPHA_ENABLE (1 << 31) 4587 + #define PLANE_KEYMSK_ALPHA_ENABLE REG_BIT(31) 4595 4588 #define _PLANE_KEYMAX_1_A 0x701a0 4596 4589 #define _PLANE_KEYMAX_2_A 0x702a0 4597 - #define PLANE_KEYMAX_ALPHA(a) ((a) << 24) 4590 + #define PLANE_KEYMAX_ALPHA_MASK REG_GENMASK(31, 24) 4591 + #define PLANE_KEYMAX_ALPHA(a) REG_FIELD_PREP(PLANE_KEYMAX_ALPHA_MASK, (a)) 4592 + #define _PLANE_SURFLIVE_1_A 0x701ac 4593 + #define _PLANE_SURFLIVE_2_A 0x702ac 4598 4594 #define _PLANE_CC_VAL_1_A 0x701b4 4599 4595 #define _PLANE_CC_VAL_2_A 0x702b4 4600 4596 #define _PLANE_AUX_DIST_1_A 0x701c0 ··· 4781 4771 #define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B) 4782 4772 #define PLANE_KEYMAX(pipe, plane) \ 4783 4773 _MMIO_PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe)) 4774 + 4775 + #define _PLANE_SURFLIVE_1_B 0x711ac 4776 + #define _PLANE_SURFLIVE_2_B 0x712ac 4777 + #define _PLANE_SURFLIVE_1(pipe) _PIPE(pipe, _PLANE_SURFLIVE_1_A, _PLANE_SURFLIVE_1_B) 4778 + #define _PLANE_SURFLIVE_2(pipe) _PIPE(pipe, _PLANE_SURFLIVE_2_A, _PLANE_SURFLIVE_2_B) 4779 + #define PLANE_SURFLIVE(pipe, plane) \ 4780 + _MMIO_PLANE(plane, _PLANE_SURFLIVE_1(pipe), _PLANE_SURFLIVE_2(pipe)) 4784 4781 4785 4782 #define _PLANE_BUF_CFG_1_B 0x7127c 4786 4783 #define _PLANE_BUF_CFG_2_B 0x7137c ··· 7249 7232 #define DC_STATE_DISABLE 0 7250 7233 #define DC_STATE_EN_DC3CO REG_BIT(30) 7251 7234 #define DC_STATE_DC3CO_STATUS REG_BIT(29) 7235 + #define HOLD_PHY_CLKREQ_PG1_LATCH REG_BIT(21) 7236 + #define HOLD_PHY_PG1_LATCH REG_BIT(20) 7252 7237 #define DC_STATE_EN_UPTO_DC5 (1 << 0) 7253 7238 #define DC_STATE_EN_DC9 (1 << 3) 7254 7239 #define DC_STATE_EN_UPTO_DC6 (2 << 0) ··· 7560 7541 #define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT 12 7561 7542 #define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK (0xf << 12) 7562 7543 7544 + /* g4x+, except vlv/chv! */ 7563 7545 #define _PIPE_FRMTMSTMP_A 0x70048 7546 + #define _PIPE_FRMTMSTMP_B 0x71048 7564 7547 #define PIPE_FRMTMSTMP(pipe) \ 7565 - _MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A) 7548 + _MMIO_PIPE(pipe, _PIPE_FRMTMSTMP_A, _PIPE_FRMTMSTMP_B) 7549 + 7550 + /* g4x+, except vlv/chv! */ 7551 + #define _PIPE_FLIPTMSTMP_A 0x7004C 7552 + #define _PIPE_FLIPTMSTMP_B 0x7104C 7553 + #define PIPE_FLIPTMSTMP(pipe) \ 7554 + _MMIO_PIPE(pipe, _PIPE_FLIPTMSTMP_A, _PIPE_FLIPTMSTMP_B) 7555 + 7556 + /* tgl+ */ 7557 + #define _PIPE_FLIPDONETMSTMP_A 0x70054 7558 + #define _PIPE_FLIPDONETMSTMP_B 0x71054 7559 + #define PIPE_FLIPDONETIMSTMP(pipe) \ 7560 + _MMIO_PIPE(pipe, _PIPE_FLIPDONETMSTMP_A, _PIPE_FLIPDONETMSTMP_B) 7561 + 7562 + #define _VLV_PIPE_MSA_MISC_A 0x70048 7563 + #define VLV_PIPE_MSA_MISC(pipe) \ 7564 + _MMIO_PIPE2(pipe, _VLV_PIPE_MSA_MISC_A) 7565 + #define VLV_MSA_MISC1_HW_ENABLE REG_BIT(31) 7566 + #define VLV_MSA_MISC1_SW_S3D_MASK REG_GENMASK(2, 0) /* MSA MISC1 3:1 */ 7566 7567 7567 7568 #define GGC _MMIO(0x108040) 7568 7569 #define GMS_MASK REG_GENMASK(15, 8)
+3 -3
drivers/gpu/drm/i915/intel_gvt_mmio_table.c
··· 789 789 MMIO_RING_D(RING_REG); 790 790 #undef RING_REG 791 791 792 - MMIO_D(PIPEMISC(PIPE_A)); 793 - MMIO_D(PIPEMISC(PIPE_B)); 794 - MMIO_D(PIPEMISC(PIPE_C)); 792 + MMIO_D(PIPE_MISC(PIPE_A)); 793 + MMIO_D(PIPE_MISC(PIPE_B)); 794 + MMIO_D(PIPE_MISC(PIPE_C)); 795 795 MMIO_D(_MMIO(0x1c1d0)); 796 796 MMIO_D(GEN6_MBCUNIT_SNPCR); 797 797 MMIO_D(GEN7_MISCCPCTL);
+51 -54
drivers/misc/mei/hdcp/mei_hdcp.c
··· 23 23 #include <linux/component.h> 24 24 #include <drm/drm_connector.h> 25 25 #include <drm/i915_component.h> 26 - #include <drm/i915_mei_hdcp_interface.h> 26 + #include <drm/i915_hdcp_interface.h> 27 27 28 28 #include "mei_hdcp.h" 29 29 ··· 52 52 53 53 session_init_in.header.api_version = HDCP_API_VERSION; 54 54 session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION; 55 - session_init_in.header.status = ME_HDCP_STATUS_SUCCESS; 55 + session_init_in.header.status = FW_HDCP_STATUS_SUCCESS; 56 56 session_init_in.header.buffer_len = 57 57 WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN; 58 58 59 59 session_init_in.port.integrated_port_type = data->port_type; 60 - session_init_in.port.physical_port = (u8)data->fw_ddi; 61 - session_init_in.port.attached_transcoder = (u8)data->fw_tc; 60 + session_init_in.port.physical_port = (u8)data->hdcp_ddi; 61 + session_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 62 62 session_init_in.protocol = data->protocol; 63 63 64 64 byte = mei_cldev_send(cldev, (u8 *)&session_init_in, ··· 75 75 return byte; 76 76 } 77 77 78 - if (session_init_out.header.status != ME_HDCP_STATUS_SUCCESS) { 78 + if (session_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { 79 79 dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n", 80 80 WIRED_INITIATE_HDCP2_SESSION, 81 81 session_init_out.header.status); ··· 122 122 123 123 verify_rxcert_in.header.api_version = HDCP_API_VERSION; 124 124 verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT; 125 - verify_rxcert_in.header.status = ME_HDCP_STATUS_SUCCESS; 125 + verify_rxcert_in.header.status = FW_HDCP_STATUS_SUCCESS; 126 126 verify_rxcert_in.header.buffer_len = 127 127 WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN; 128 128 129 129 verify_rxcert_in.port.integrated_port_type = data->port_type; 130 - verify_rxcert_in.port.physical_port = (u8)data->fw_ddi; 131 - verify_rxcert_in.port.attached_transcoder = (u8)data->fw_tc; 130 + verify_rxcert_in.port.physical_port = (u8)data->hdcp_ddi; 131 + verify_rxcert_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 132 132 133 133 verify_rxcert_in.cert_rx = rx_cert->cert_rx; 134 134 memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN); ··· 148 148 return byte; 149 149 } 150 150 151 - if (verify_rxcert_out.header.status != ME_HDCP_STATUS_SUCCESS) { 151 + if (verify_rxcert_out.header.status != FW_HDCP_STATUS_SUCCESS) { 152 152 dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n", 153 153 WIRED_VERIFY_RECEIVER_CERT, 154 154 verify_rxcert_out.header.status); ··· 194 194 195 195 send_hprime_in.header.api_version = HDCP_API_VERSION; 196 196 send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME; 197 - send_hprime_in.header.status = ME_HDCP_STATUS_SUCCESS; 197 + send_hprime_in.header.status = FW_HDCP_STATUS_SUCCESS; 198 198 send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN; 199 199 200 200 send_hprime_in.port.integrated_port_type = data->port_type; 201 - send_hprime_in.port.physical_port = (u8)data->fw_ddi; 202 - send_hprime_in.port.attached_transcoder = (u8)data->fw_tc; 201 + send_hprime_in.port.physical_port = (u8)data->hdcp_ddi; 202 + send_hprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 203 203 204 204 memcpy(send_hprime_in.h_prime, rx_hprime->h_prime, 205 205 HDCP_2_2_H_PRIME_LEN); ··· 218 218 return byte; 219 219 } 220 220 221 - if (send_hprime_out.header.status != ME_HDCP_STATUS_SUCCESS) { 221 + if (send_hprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { 222 222 dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n", 223 223 WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status); 224 224 return -EIO; ··· 251 251 252 252 pairing_info_in.header.api_version = HDCP_API_VERSION; 253 253 pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO; 254 - pairing_info_in.header.status = ME_HDCP_STATUS_SUCCESS; 254 + pairing_info_in.header.status = FW_HDCP_STATUS_SUCCESS; 255 255 pairing_info_in.header.buffer_len = 256 256 WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN; 257 257 258 258 pairing_info_in.port.integrated_port_type = data->port_type; 259 - pairing_info_in.port.physical_port = (u8)data->fw_ddi; 260 - pairing_info_in.port.attached_transcoder = (u8)data->fw_tc; 259 + pairing_info_in.port.physical_port = (u8)data->hdcp_ddi; 260 + pairing_info_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 261 261 262 262 memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km, 263 263 HDCP_2_2_E_KH_KM_LEN); ··· 276 276 return byte; 277 277 } 278 278 279 - if (pairing_info_out.header.status != ME_HDCP_STATUS_SUCCESS) { 279 + if (pairing_info_out.header.status != FW_HDCP_STATUS_SUCCESS) { 280 280 dev_dbg(dev, "ME cmd 0x%08X failed. Status: 0x%X\n", 281 281 WIRED_AKE_SEND_PAIRING_INFO, 282 282 pairing_info_out.header.status); ··· 311 311 312 312 lc_init_in.header.api_version = HDCP_API_VERSION; 313 313 lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK; 314 - lc_init_in.header.status = ME_HDCP_STATUS_SUCCESS; 314 + lc_init_in.header.status = FW_HDCP_STATUS_SUCCESS; 315 315 lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN; 316 316 317 317 lc_init_in.port.integrated_port_type = data->port_type; 318 - lc_init_in.port.physical_port = (u8)data->fw_ddi; 319 - lc_init_in.port.attached_transcoder = (u8)data->fw_tc; 318 + lc_init_in.port.physical_port = (u8)data->hdcp_ddi; 319 + lc_init_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 320 320 321 321 byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in)); 322 322 if (byte < 0) { ··· 330 330 return byte; 331 331 } 332 332 333 - if (lc_init_out.header.status != ME_HDCP_STATUS_SUCCESS) { 333 + if (lc_init_out.header.status != FW_HDCP_STATUS_SUCCESS) { 334 334 dev_dbg(dev, "ME cmd 0x%08X Failed. status: 0x%X\n", 335 335 WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status); 336 336 return -EIO; ··· 366 366 367 367 verify_lprime_in.header.api_version = HDCP_API_VERSION; 368 368 verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY; 369 - verify_lprime_in.header.status = ME_HDCP_STATUS_SUCCESS; 369 + verify_lprime_in.header.status = FW_HDCP_STATUS_SUCCESS; 370 370 verify_lprime_in.header.buffer_len = 371 371 WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN; 372 372 373 373 verify_lprime_in.port.integrated_port_type = data->port_type; 374 - verify_lprime_in.port.physical_port = (u8)data->fw_ddi; 375 - verify_lprime_in.port.attached_transcoder = (u8)data->fw_tc; 374 + verify_lprime_in.port.physical_port = (u8)data->hdcp_ddi; 375 + verify_lprime_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 376 376 377 377 memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime, 378 378 HDCP_2_2_L_PRIME_LEN); ··· 391 391 return byte; 392 392 } 393 393 394 - if (verify_lprime_out.header.status != ME_HDCP_STATUS_SUCCESS) { 394 + if (verify_lprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { 395 395 dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", 396 396 WIRED_VALIDATE_LOCALITY, 397 397 verify_lprime_out.header.status); ··· 425 425 426 426 get_skey_in.header.api_version = HDCP_API_VERSION; 427 427 get_skey_in.header.command_id = WIRED_GET_SESSION_KEY; 428 - get_skey_in.header.status = ME_HDCP_STATUS_SUCCESS; 428 + get_skey_in.header.status = FW_HDCP_STATUS_SUCCESS; 429 429 get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN; 430 430 431 431 get_skey_in.port.integrated_port_type = data->port_type; 432 - get_skey_in.port.physical_port = (u8)data->fw_ddi; 433 - get_skey_in.port.attached_transcoder = (u8)data->fw_tc; 432 + get_skey_in.port.physical_port = (u8)data->hdcp_ddi; 433 + get_skey_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 434 434 435 435 byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in)); 436 436 if (byte < 0) { ··· 445 445 return byte; 446 446 } 447 447 448 - if (get_skey_out.header.status != ME_HDCP_STATUS_SUCCESS) { 448 + if (get_skey_out.header.status != FW_HDCP_STATUS_SUCCESS) { 449 449 dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", 450 450 WIRED_GET_SESSION_KEY, get_skey_out.header.status); 451 451 return -EIO; ··· 489 489 490 490 verify_repeater_in.header.api_version = HDCP_API_VERSION; 491 491 verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER; 492 - verify_repeater_in.header.status = ME_HDCP_STATUS_SUCCESS; 492 + verify_repeater_in.header.status = FW_HDCP_STATUS_SUCCESS; 493 493 verify_repeater_in.header.buffer_len = 494 494 WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN; 495 495 496 496 verify_repeater_in.port.integrated_port_type = data->port_type; 497 - verify_repeater_in.port.physical_port = (u8)data->fw_ddi; 498 - verify_repeater_in.port.attached_transcoder = (u8)data->fw_tc; 497 + verify_repeater_in.port.physical_port = (u8)data->hdcp_ddi; 498 + verify_repeater_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 499 499 500 500 memcpy(verify_repeater_in.rx_info, rep_topology->rx_info, 501 501 HDCP_2_2_RXINFO_LEN); ··· 520 520 return byte; 521 521 } 522 522 523 - if (verify_repeater_out.header.status != ME_HDCP_STATUS_SUCCESS) { 523 + if (verify_repeater_out.header.status != FW_HDCP_STATUS_SUCCESS) { 524 524 dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", 525 525 WIRED_VERIFY_REPEATER, 526 526 verify_repeater_out.header.status); ··· 568 568 569 569 verify_mprime_in->header.api_version = HDCP_API_VERSION; 570 570 verify_mprime_in->header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ; 571 - verify_mprime_in->header.status = ME_HDCP_STATUS_SUCCESS; 571 + verify_mprime_in->header.status = FW_HDCP_STATUS_SUCCESS; 572 572 verify_mprime_in->header.buffer_len = cmd_size - sizeof(verify_mprime_in->header); 573 573 574 574 verify_mprime_in->port.integrated_port_type = data->port_type; 575 - verify_mprime_in->port.physical_port = (u8)data->fw_ddi; 576 - verify_mprime_in->port.attached_transcoder = (u8)data->fw_tc; 575 + verify_mprime_in->port.physical_port = (u8)data->hdcp_ddi; 576 + verify_mprime_in->port.attached_transcoder = (u8)data->hdcp_transcoder; 577 577 578 578 memcpy(verify_mprime_in->m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN); 579 579 drm_hdcp_cpu_to_be24(verify_mprime_in->seq_num_m, data->seq_num_m); ··· 597 597 return byte; 598 598 } 599 599 600 - if (verify_mprime_out.header.status != ME_HDCP_STATUS_SUCCESS) { 600 + if (verify_mprime_out.header.status != FW_HDCP_STATUS_SUCCESS) { 601 601 dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", 602 602 WIRED_REPEATER_AUTH_STREAM_REQ, 603 603 verify_mprime_out.header.status); ··· 630 630 631 631 enable_auth_in.header.api_version = HDCP_API_VERSION; 632 632 enable_auth_in.header.command_id = WIRED_ENABLE_AUTH; 633 - enable_auth_in.header.status = ME_HDCP_STATUS_SUCCESS; 633 + enable_auth_in.header.status = FW_HDCP_STATUS_SUCCESS; 634 634 enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN; 635 635 636 636 enable_auth_in.port.integrated_port_type = data->port_type; 637 - enable_auth_in.port.physical_port = (u8)data->fw_ddi; 638 - enable_auth_in.port.attached_transcoder = (u8)data->fw_tc; 637 + enable_auth_in.port.physical_port = (u8)data->hdcp_ddi; 638 + enable_auth_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 639 639 enable_auth_in.stream_type = data->streams[0].stream_type; 640 640 641 641 byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in, ··· 652 652 return byte; 653 653 } 654 654 655 - if (enable_auth_out.header.status != ME_HDCP_STATUS_SUCCESS) { 655 + if (enable_auth_out.header.status != FW_HDCP_STATUS_SUCCESS) { 656 656 dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", 657 657 WIRED_ENABLE_AUTH, enable_auth_out.header.status); 658 658 return -EIO; ··· 684 684 685 685 session_close_in.header.api_version = HDCP_API_VERSION; 686 686 session_close_in.header.command_id = WIRED_CLOSE_SESSION; 687 - session_close_in.header.status = ME_HDCP_STATUS_SUCCESS; 687 + session_close_in.header.status = FW_HDCP_STATUS_SUCCESS; 688 688 session_close_in.header.buffer_len = 689 689 WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN; 690 690 691 691 session_close_in.port.integrated_port_type = data->port_type; 692 - session_close_in.port.physical_port = (u8)data->fw_ddi; 693 - session_close_in.port.attached_transcoder = (u8)data->fw_tc; 692 + session_close_in.port.physical_port = (u8)data->hdcp_ddi; 693 + session_close_in.port.attached_transcoder = (u8)data->hdcp_transcoder; 694 694 695 695 byte = mei_cldev_send(cldev, (u8 *)&session_close_in, 696 696 sizeof(session_close_in)); ··· 706 706 return byte; 707 707 } 708 708 709 - if (session_close_out.header.status != ME_HDCP_STATUS_SUCCESS) { 709 + if (session_close_out.header.status != FW_HDCP_STATUS_SUCCESS) { 710 710 dev_dbg(dev, "Session Close Failed. status: 0x%X\n", 711 711 session_close_out.header.status); 712 712 return -EIO; ··· 715 715 return 0; 716 716 } 717 717 718 - static const struct i915_hdcp_component_ops mei_hdcp_ops = { 718 + static const struct i915_hdcp_ops mei_hdcp_ops = { 719 719 .owner = THIS_MODULE, 720 720 .initiate_hdcp2_session = mei_hdcp_initiate_session, 721 721 .verify_receiver_cert_prepare_km = ··· 735 735 static int mei_component_master_bind(struct device *dev) 736 736 { 737 737 struct mei_cl_device *cldev = to_mei_cl_device(dev); 738 - struct i915_hdcp_comp_master *comp_master = 739 - mei_cldev_get_drvdata(cldev); 738 + struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev); 740 739 int ret; 741 740 742 741 dev_dbg(dev, "%s\n", __func__); 743 742 comp_master->ops = &mei_hdcp_ops; 744 - comp_master->mei_dev = dev; 743 + comp_master->hdcp_dev = dev; 745 744 ret = component_bind_all(dev, comp_master); 746 745 if (ret < 0) 747 746 return ret; ··· 751 752 static void mei_component_master_unbind(struct device *dev) 752 753 { 753 754 struct mei_cl_device *cldev = to_mei_cl_device(dev); 754 - struct i915_hdcp_comp_master *comp_master = 755 - mei_cldev_get_drvdata(cldev); 755 + struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev); 756 756 757 757 dev_dbg(dev, "%s\n", __func__); 758 758 component_unbind_all(dev, comp_master); ··· 799 801 static int mei_hdcp_probe(struct mei_cl_device *cldev, 800 802 const struct mei_cl_device_id *id) 801 803 { 802 - struct i915_hdcp_comp_master *comp_master; 804 + struct i915_hdcp_master *comp_master; 803 805 struct component_match *master_match; 804 806 int ret; 805 807 ··· 844 846 845 847 static void mei_hdcp_remove(struct mei_cl_device *cldev) 846 848 { 847 - struct i915_hdcp_comp_master *comp_master = 848 - mei_cldev_get_drvdata(cldev); 849 + struct i915_hdcp_master *comp_master = mei_cldev_get_drvdata(cldev); 849 850 int ret; 850 851 851 852 component_master_del(&cldev->dev, &mei_component_master_ops);
-354
drivers/misc/mei/hdcp/mei_hdcp.h
··· 11 11 12 12 #include <drm/display/drm_hdcp.h> 13 13 14 - /* me_hdcp_status: Enumeration of all HDCP Status Codes */ 15 - enum me_hdcp_status { 16 - ME_HDCP_STATUS_SUCCESS = 0x0000, 17 - 18 - /* WiDi Generic Status Codes */ 19 - ME_HDCP_STATUS_INTERNAL_ERROR = 0x1000, 20 - ME_HDCP_STATUS_UNKNOWN_ERROR = 0x1001, 21 - ME_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002, 22 - ME_HDCP_STATUS_INVALID_FUNCTION = 0x1003, 23 - ME_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004, 24 - ME_HDCP_STATUS_INVALID_PARAMS = 0x1005, 25 - ME_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006, 26 - 27 - /* WiDi Status Codes */ 28 - ME_HDCP_INVALID_SESSION_STATE = 0x6000, 29 - ME_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001, 30 - ME_HDCP_SRM_INVALID_LENGTH = 0x6002, 31 - ME_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003, 32 - ME_HDCP_SRM_VERIFICATION_FAILED = 0x6004, 33 - ME_HDCP_SRM_VERSION_TOO_OLD = 0x6005, 34 - ME_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006, 35 - ME_HDCP_RX_REVOKED = 0x6007, 36 - ME_HDCP_H_VERIFICATION_FAILED = 0x6008, 37 - ME_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009, 38 - ME_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A, 39 - ME_HDCP_V_VERIFICATION_FAILED = 0x600B, 40 - ME_HDCP_L_VERIFICATION_FAILED = 0x600C, 41 - ME_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D, 42 - ME_HDCP_BASE_KEY_RESET_FAILED = 0x600E, 43 - ME_HDCP_NONCE_GENERATION_FAILED = 0x600F, 44 - ME_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010, 45 - ME_HDCP_STATUS_INVALID_CS_ICV = 0x6011, 46 - ME_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012, 47 - ME_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013, 48 - ME_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014, 49 - ME_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015, 50 - 51 - /* New status for HDCP 2.1 */ 52 - ME_HDCP_STATUS_MISMATCH_IN_M = 0x6016, 53 - 54 - /* New status code for HDCP 2.2 Rx */ 55 - ME_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017, 56 - ME_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018, 57 - ME_HDCP_RX_NEEDS_PROVISIONING = 0x6019, 58 - ME_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020, 59 - ME_HDCP_STATUS_INVALID_STREAM_ID = 0x6021, 60 - ME_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022, 61 - ME_HDCP_FAIL_NOT_EXPECTED = 0x6023, 62 - ME_HDCP_FAIL_HDCP_OFF = 0x6024, 63 - ME_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025, 64 - ME_HDCP_FAIL_AES_ECB_FAILURE = 0x6026, 65 - ME_HDCP_FEATURE_NOT_SUPPORTED = 0x6027, 66 - ME_HDCP_DMA_READ_ERROR = 0x6028, 67 - ME_HDCP_DMA_WRITE_ERROR = 0x6029, 68 - ME_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030, 69 - ME_HDCP_H264_PARSING_ERROR = 0x6031, 70 - ME_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032, 71 - ME_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033, 72 - ME_HDCP_TX_ACTIVE_ERROR = 0x6034, 73 - ME_HDCP_MODE_CHANGE_ERROR = 0x6035, 74 - ME_HDCP_STREAM_TYPE_ERROR = 0x6036, 75 - ME_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037, 76 - 77 - ME_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038, 78 - ME_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039, 79 - ME_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a, 80 - ME_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b, 81 - ME_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c, 82 - ME_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d, 83 - 84 - /* hdcp capable bit is not set in rx_caps(error is unique to DP) */ 85 - ME_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041, 86 - 87 - ME_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042, 88 - }; 89 - 90 - #define HDCP_API_VERSION 0x00010000 91 - 92 - #define HDCP_M_LEN 16 93 - #define HDCP_KH_LEN 16 94 - 95 - /* Payload Buffer size(Excluding Header) for CMDs and corresponding response */ 96 - /* Wired_Tx_AKE */ 97 - #define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1) 98 - #define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3) 99 - 100 - #define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3) 101 - #define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16) 102 - #define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128) 103 - 104 - #define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32) 105 - #define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4) 106 - 107 - #define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16) 108 - #define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4) 109 - 110 - #define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4) 111 - #define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4) 112 - 113 - /* Wired_Tx_LC */ 114 - #define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4) 115 - #define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8) 116 - 117 - #define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32) 118 - #define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4) 119 - 120 - /* Wired_Tx_SKE */ 121 - #define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4) 122 - #define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8) 123 - 124 - /* Wired_Tx_SKE */ 125 - #define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1) 126 - #define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4) 127 - 128 - /* Wired_Tx_Repeater */ 129 - #define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155) 130 - #define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16) 131 - 132 - #define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \ 133 - 32 + 2 + 2) 134 - 135 - #define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4) 136 - 137 - /* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */ 138 - enum hdcp_command_id { 139 - _WIDI_COMMAND_BASE = 0x00030000, 140 - WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE, 141 - HDCP_GET_SRM_STATUS, 142 - HDCP_SEND_SRM_FRAGMENT, 143 - 144 - /* The wired HDCP Tx commands */ 145 - _WIRED_COMMAND_BASE = 0x00031000, 146 - WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE, 147 - WIRED_VERIFY_RECEIVER_CERT, 148 - WIRED_AKE_SEND_HPRIME, 149 - WIRED_AKE_SEND_PAIRING_INFO, 150 - WIRED_INIT_LOCALITY_CHECK, 151 - WIRED_VALIDATE_LOCALITY, 152 - WIRED_GET_SESSION_KEY, 153 - WIRED_ENABLE_AUTH, 154 - WIRED_VERIFY_REPEATER, 155 - WIRED_REPEATER_AUTH_STREAM_REQ, 156 - WIRED_CLOSE_SESSION, 157 - 158 - _WIRED_COMMANDS_COUNT, 159 - }; 160 - 161 - union encrypted_buff { 162 - u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN]; 163 - u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN]; 164 - struct { 165 - u8 e_kh_km[HDCP_KH_LEN]; 166 - u8 m[HDCP_M_LEN]; 167 - } __packed; 168 - }; 169 - 170 - /* HDCP HECI message header. All header values are little endian. */ 171 - struct hdcp_cmd_header { 172 - u32 api_version; 173 - u32 command_id; 174 - enum me_hdcp_status status; 175 - /* Length of the HECI message (excluding the header) */ 176 - u32 buffer_len; 177 - } __packed; 178 - 179 - /* Empty command request or response. No data follows the header. */ 180 - struct hdcp_cmd_no_data { 181 - struct hdcp_cmd_header header; 182 - } __packed; 183 - 184 - /* Uniquely identifies the hdcp port being addressed for a given command. */ 185 - struct hdcp_port_id { 186 - u8 integrated_port_type; 187 - /* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */ 188 - u8 physical_port; 189 - /* attached_transcoder is for Gen11.5+. Set to zero for <Gen11.5 */ 190 - u8 attached_transcoder; 191 - u8 reserved; 192 - } __packed; 193 - 194 - /* 195 - * Data structures for integrated wired HDCP2 Tx in 196 - * support of the AKE protocol 197 - */ 198 - /* HECI struct for integrated wired HDCP Tx session initiation. */ 199 - struct wired_cmd_initiate_hdcp2_session_in { 200 - struct hdcp_cmd_header header; 201 - struct hdcp_port_id port; 202 - u8 protocol; /* for HDMI vs DP */ 203 - } __packed; 204 - 205 - struct wired_cmd_initiate_hdcp2_session_out { 206 - struct hdcp_cmd_header header; 207 - struct hdcp_port_id port; 208 - u8 r_tx[HDCP_2_2_RTX_LEN]; 209 - struct hdcp2_tx_caps tx_caps; 210 - } __packed; 211 - 212 - /* HECI struct for ending an integrated wired HDCP Tx session. */ 213 - struct wired_cmd_close_session_in { 214 - struct hdcp_cmd_header header; 215 - struct hdcp_port_id port; 216 - } __packed; 217 - 218 - struct wired_cmd_close_session_out { 219 - struct hdcp_cmd_header header; 220 - struct hdcp_port_id port; 221 - } __packed; 222 - 223 - /* HECI struct for integrated wired HDCP Tx Rx Cert verification. */ 224 - struct wired_cmd_verify_receiver_cert_in { 225 - struct hdcp_cmd_header header; 226 - struct hdcp_port_id port; 227 - struct hdcp2_cert_rx cert_rx; 228 - u8 r_rx[HDCP_2_2_RRX_LEN]; 229 - u8 rx_caps[HDCP_2_2_RXCAPS_LEN]; 230 - } __packed; 231 - 232 - struct wired_cmd_verify_receiver_cert_out { 233 - struct hdcp_cmd_header header; 234 - struct hdcp_port_id port; 235 - u8 km_stored; 236 - u8 reserved[3]; 237 - union encrypted_buff ekm_buff; 238 - } __packed; 239 - 240 - /* HECI struct for verification of Rx's Hprime in a HDCP Tx session */ 241 - struct wired_cmd_ake_send_hprime_in { 242 - struct hdcp_cmd_header header; 243 - struct hdcp_port_id port; 244 - u8 h_prime[HDCP_2_2_H_PRIME_LEN]; 245 - } __packed; 246 - 247 - struct wired_cmd_ake_send_hprime_out { 248 - struct hdcp_cmd_header header; 249 - struct hdcp_port_id port; 250 - } __packed; 251 - 252 - /* 253 - * HECI struct for sending in AKE pairing data generated by the Rx in an 254 - * integrated wired HDCP Tx session. 255 - */ 256 - struct wired_cmd_ake_send_pairing_info_in { 257 - struct hdcp_cmd_header header; 258 - struct hdcp_port_id port; 259 - u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN]; 260 - } __packed; 261 - 262 - struct wired_cmd_ake_send_pairing_info_out { 263 - struct hdcp_cmd_header header; 264 - struct hdcp_port_id port; 265 - } __packed; 266 - 267 - /* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/ 268 - /* 269 - * HECI struct for initiating locality check with an 270 - * integrated wired HDCP Tx session. 271 - */ 272 - struct wired_cmd_init_locality_check_in { 273 - struct hdcp_cmd_header header; 274 - struct hdcp_port_id port; 275 - } __packed; 276 - 277 - struct wired_cmd_init_locality_check_out { 278 - struct hdcp_cmd_header header; 279 - struct hdcp_port_id port; 280 - u8 r_n[HDCP_2_2_RN_LEN]; 281 - } __packed; 282 - 283 - /* 284 - * HECI struct for validating an Rx's LPrime value in an 285 - * integrated wired HDCP Tx session. 286 - */ 287 - struct wired_cmd_validate_locality_in { 288 - struct hdcp_cmd_header header; 289 - struct hdcp_port_id port; 290 - u8 l_prime[HDCP_2_2_L_PRIME_LEN]; 291 - } __packed; 292 - 293 - struct wired_cmd_validate_locality_out { 294 - struct hdcp_cmd_header header; 295 - struct hdcp_port_id port; 296 - } __packed; 297 - 298 - /* 299 - * Data structures for integrated wired HDCP2 Tx in support of the 300 - * SKE protocol 301 - */ 302 - /* HECI struct for creating session key */ 303 - struct wired_cmd_get_session_key_in { 304 - struct hdcp_cmd_header header; 305 - struct hdcp_port_id port; 306 - } __packed; 307 - 308 - struct wired_cmd_get_session_key_out { 309 - struct hdcp_cmd_header header; 310 - struct hdcp_port_id port; 311 - u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN]; 312 - u8 r_iv[HDCP_2_2_RIV_LEN]; 313 - } __packed; 314 - 315 - /* HECI struct for the Tx enable authentication command */ 316 - struct wired_cmd_enable_auth_in { 317 - struct hdcp_cmd_header header; 318 - struct hdcp_port_id port; 319 - u8 stream_type; 320 - } __packed; 321 - 322 - struct wired_cmd_enable_auth_out { 323 - struct hdcp_cmd_header header; 324 - struct hdcp_port_id port; 325 - } __packed; 326 - 327 - /* 328 - * Data structures for integrated wired HDCP2 Tx in support of 329 - * the repeater protocols 330 - */ 331 - /* 332 - * HECI struct for verifying the downstream repeater's HDCP topology in an 333 - * integrated wired HDCP Tx session. 334 - */ 335 - struct wired_cmd_verify_repeater_in { 336 - struct hdcp_cmd_header header; 337 - struct hdcp_port_id port; 338 - u8 rx_info[HDCP_2_2_RXINFO_LEN]; 339 - u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN]; 340 - u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN]; 341 - u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN]; 342 - } __packed; 343 - 344 - struct wired_cmd_verify_repeater_out { 345 - struct hdcp_cmd_header header; 346 - struct hdcp_port_id port; 347 - u8 content_type_supported; 348 - u8 v[HDCP_2_2_V_PRIME_HALF_LEN]; 349 - } __packed; 350 - 351 - /* 352 - * HECI struct in support of stream management in an 353 - * integrated wired HDCP Tx session. 354 - */ 355 - struct wired_cmd_repeater_auth_stream_req_in { 356 - struct hdcp_cmd_header header; 357 - struct hdcp_port_id port; 358 - u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN]; 359 - u8 m_prime[HDCP_2_2_MPRIME_LEN]; 360 - __be16 k; 361 - struct hdcp2_streamid_type streams[]; 362 - } __packed; 363 - 364 - struct wired_cmd_repeater_auth_stream_req_out { 365 - struct hdcp_cmd_header header; 366 - struct hdcp_port_id port; 367 - } __packed; 368 14 #endif /* __MEI_HDCP_H__ */
+3
include/drm/display/drm_dp.h
··· 692 692 # define DP_FEC_LANE_2_SELECT (2 << 4) 693 693 # define DP_FEC_LANE_3_SELECT (3 << 4) 694 694 695 + #define DP_SDP_ERROR_DETECTION_CONFIGURATION 0x121 /* DP 2.0 E11 */ 696 + #define DP_SDP_CRC16_128B132B_EN BIT(0) 697 + 695 698 #define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */ 696 699 # define DP_AUX_FRAME_SYNC_VALID (1 << 0) 697 700
+539
include/drm/i915_hdcp_interface.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0+) */ 2 + /* 3 + * Copyright © 2017-2019 Intel Corporation 4 + * 5 + * Authors: 6 + * Ramalingam C <ramalingam.c@intel.com> 7 + */ 8 + 9 + #ifndef _I915_HDCP_INTERFACE_H_ 10 + #define _I915_HDCP_INTERFACE_H_ 11 + 12 + #include <linux/mutex.h> 13 + #include <linux/device.h> 14 + #include <drm/display/drm_hdcp.h> 15 + 16 + /** 17 + * enum hdcp_port_type - HDCP port implementation type defined by ME/GSC FW 18 + * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type 19 + * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port 20 + * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON 21 + * (HDMI 2.0) solution 22 + * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3) 23 + * solution 24 + */ 25 + enum hdcp_port_type { 26 + HDCP_PORT_TYPE_INVALID, 27 + HDCP_PORT_TYPE_INTEGRATED, 28 + HDCP_PORT_TYPE_LSPCON, 29 + HDCP_PORT_TYPE_CPDP 30 + }; 31 + 32 + /** 33 + * enum hdcp_wired_protocol - HDCP adaptation used on the port 34 + * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol 35 + * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port 36 + * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port 37 + */ 38 + enum hdcp_wired_protocol { 39 + HDCP_PROTOCOL_INVALID, 40 + HDCP_PROTOCOL_HDMI, 41 + HDCP_PROTOCOL_DP 42 + }; 43 + 44 + enum hdcp_ddi { 45 + HDCP_DDI_INVALID_PORT = 0x0, 46 + 47 + HDCP_DDI_B = 1, 48 + HDCP_DDI_C, 49 + HDCP_DDI_D, 50 + HDCP_DDI_E, 51 + HDCP_DDI_F, 52 + HDCP_DDI_A = 7, 53 + HDCP_DDI_RANGE_END = HDCP_DDI_A, 54 + }; 55 + 56 + /** 57 + * enum hdcp_tc - ME/GSC Firmware defined index for transcoders 58 + * @HDCP_INVALID_TRANSCODER: Index for Invalid transcoder 59 + * @HDCP_TRANSCODER_EDP: Index for EDP Transcoder 60 + * @HDCP_TRANSCODER_DSI0: Index for DSI0 Transcoder 61 + * @HDCP_TRANSCODER_DSI1: Index for DSI1 Transcoder 62 + * @HDCP_TRANSCODER_A: Index for Transcoder A 63 + * @HDCP_TRANSCODER_B: Index for Transcoder B 64 + * @HDCP_TRANSCODER_C: Index for Transcoder C 65 + * @HDCP_TRANSCODER_D: Index for Transcoder D 66 + */ 67 + enum hdcp_transcoder { 68 + HDCP_INVALID_TRANSCODER = 0x00, 69 + HDCP_TRANSCODER_EDP, 70 + HDCP_TRANSCODER_DSI0, 71 + HDCP_TRANSCODER_DSI1, 72 + HDCP_TRANSCODER_A = 0x10, 73 + HDCP_TRANSCODER_B, 74 + HDCP_TRANSCODER_C, 75 + HDCP_TRANSCODER_D 76 + }; 77 + 78 + /** 79 + * struct hdcp_port_data - intel specific HDCP port data 80 + * @hdcp_ddi: ddi index as per ME/GSC FW 81 + * @hdcp_transcoder: transcoder index as per ME/GSC FW 82 + * @port_type: HDCP port type as per ME/GSC FW classification 83 + * @protocol: HDCP adaptation as per ME/GSC FW 84 + * @k: No of streams transmitted on a port. Only on DP MST this is != 1 85 + * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated. 86 + * Initialized to 0 on AKE_INIT. Incremented after every successful 87 + * transmission of RepeaterAuth_Stream_Manage message. When it rolls 88 + * over re-Auth has to be triggered. 89 + * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the 90 + * streams 91 + */ 92 + struct hdcp_port_data { 93 + enum hdcp_ddi hdcp_ddi; 94 + enum hdcp_transcoder hdcp_transcoder; 95 + u8 port_type; 96 + u8 protocol; 97 + u16 k; 98 + u32 seq_num_m; 99 + struct hdcp2_streamid_type *streams; 100 + }; 101 + 102 + /** 103 + * struct i915_hdcp_ops- ops for HDCP2.2 services. 104 + * @owner: Module providing the ops 105 + * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session. 106 + * And Prepare AKE_Init. 107 + * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate 108 + * AKE_Send_Cert and prepare 109 + AKE_Stored_Km/AKE_No_Stored_Km 110 + * @verify_hprime: Verify AKE_Send_H_prime 111 + * @store_pairing_info: Store pairing info received 112 + * @initiate_locality_check: Prepare LC_Init 113 + * @verify_lprime: Verify lprime 114 + * @get_session_key: Prepare SKE_Send_Eks 115 + * @repeater_check_flow_prepare_ack: Validate the Downstream topology 116 + * and prepare rep_ack 117 + * @verify_mprime: Verify mprime 118 + * @enable_hdcp_authentication: Mark a port as authenticated. 119 + * @close_hdcp_session: Close the Wired HDCP Tx session per port. 120 + * This also disables the authenticated state of the port. 121 + */ 122 + struct i915_hdcp_ops { 123 + /** 124 + * @owner: hdcp module 125 + */ 126 + struct module *owner; 127 + 128 + int (*initiate_hdcp2_session)(struct device *dev, 129 + struct hdcp_port_data *data, 130 + struct hdcp2_ake_init *ake_data); 131 + int (*verify_receiver_cert_prepare_km)(struct device *dev, 132 + struct hdcp_port_data *data, 133 + struct hdcp2_ake_send_cert 134 + *rx_cert, 135 + bool *km_stored, 136 + struct hdcp2_ake_no_stored_km 137 + *ek_pub_km, 138 + size_t *msg_sz); 139 + int (*verify_hprime)(struct device *dev, 140 + struct hdcp_port_data *data, 141 + struct hdcp2_ake_send_hprime *rx_hprime); 142 + int (*store_pairing_info)(struct device *dev, 143 + struct hdcp_port_data *data, 144 + struct hdcp2_ake_send_pairing_info 145 + *pairing_info); 146 + int (*initiate_locality_check)(struct device *dev, 147 + struct hdcp_port_data *data, 148 + struct hdcp2_lc_init *lc_init_data); 149 + int (*verify_lprime)(struct device *dev, 150 + struct hdcp_port_data *data, 151 + struct hdcp2_lc_send_lprime *rx_lprime); 152 + int (*get_session_key)(struct device *dev, 153 + struct hdcp_port_data *data, 154 + struct hdcp2_ske_send_eks *ske_data); 155 + int (*repeater_check_flow_prepare_ack)(struct device *dev, 156 + struct hdcp_port_data *data, 157 + struct hdcp2_rep_send_receiverid_list 158 + *rep_topology, 159 + struct hdcp2_rep_send_ack 160 + *rep_send_ack); 161 + int (*verify_mprime)(struct device *dev, 162 + struct hdcp_port_data *data, 163 + struct hdcp2_rep_stream_ready *stream_ready); 164 + int (*enable_hdcp_authentication)(struct device *dev, 165 + struct hdcp_port_data *data); 166 + int (*close_hdcp_session)(struct device *dev, 167 + struct hdcp_port_data *data); 168 + }; 169 + 170 + /** 171 + * struct i915_hdcp_master - Used for communication between i915 172 + * and hdcp drivers for the HDCP2.2 services 173 + * @hdcp_dev: device that provide the HDCP2.2 service from MEI Bus. 174 + * @hdcp_ops: Ops implemented by hdcp driver or intel_hdcp_gsc , used by i915 driver. 175 + */ 176 + struct i915_hdcp_master { 177 + struct device *hdcp_dev; 178 + const struct i915_hdcp_ops *ops; 179 + 180 + /* To protect the above members. */ 181 + struct mutex mutex; 182 + }; 183 + 184 + /* fw_hdcp_status: Enumeration of all HDCP Status Codes */ 185 + enum fw_hdcp_status { 186 + FW_HDCP_STATUS_SUCCESS = 0x0000, 187 + 188 + /* WiDi Generic Status Codes */ 189 + FW_HDCP_STATUS_INTERNAL_ERROR = 0x1000, 190 + FW_HDCP_STATUS_UNKNOWN_ERROR = 0x1001, 191 + FW_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002, 192 + FW_HDCP_STATUS_INVALID_FUNCTION = 0x1003, 193 + FW_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004, 194 + FW_HDCP_STATUS_INVALID_PARAMS = 0x1005, 195 + FW_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006, 196 + 197 + /* WiDi Status Codes */ 198 + FW_HDCP_INVALID_SESSION_STATE = 0x6000, 199 + FW_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001, 200 + FW_HDCP_SRM_INVALID_LENGTH = 0x6002, 201 + FW_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003, 202 + FW_HDCP_SRM_VERIFICATION_FAILED = 0x6004, 203 + FW_HDCP_SRM_VERSION_TOO_OLD = 0x6005, 204 + FW_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006, 205 + FW_HDCP_RX_REVOKED = 0x6007, 206 + FW_HDCP_H_VERIFICATION_FAILED = 0x6008, 207 + FW_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009, 208 + FW_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A, 209 + FW_HDCP_V_VERIFICATION_FAILED = 0x600B, 210 + FW_HDCP_L_VERIFICATION_FAILED = 0x600C, 211 + FW_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D, 212 + FW_HDCP_BASE_KEY_RESET_FAILED = 0x600E, 213 + FW_HDCP_NONCE_GENERATION_FAILED = 0x600F, 214 + FW_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010, 215 + FW_HDCP_STATUS_INVALID_CS_ICV = 0x6011, 216 + FW_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012, 217 + FW_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013, 218 + FW_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014, 219 + FW_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015, 220 + 221 + /* New status for HDCP 2.1 */ 222 + FW_HDCP_STATUS_MISMATCH_IN_M = 0x6016, 223 + 224 + /* New status code for HDCP 2.2 Rx */ 225 + FW_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017, 226 + FW_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018, 227 + FW_HDCP_RX_NEEDS_PROVISIONING = 0x6019, 228 + FW_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020, 229 + FW_HDCP_STATUS_INVALID_STREAM_ID = 0x6021, 230 + FW_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022, 231 + FW_HDCP_FAIL_NOT_EXPECTED = 0x6023, 232 + FW_HDCP_FAIL_HDCP_OFF = 0x6024, 233 + FW_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025, 234 + FW_HDCP_FAIL_AES_ECB_FAILURE = 0x6026, 235 + FW_HDCP_FEATURE_NOT_SUPPORTED = 0x6027, 236 + FW_HDCP_DMA_READ_ERROR = 0x6028, 237 + FW_HDCP_DMA_WRITE_ERROR = 0x6029, 238 + FW_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030, 239 + FW_HDCP_H264_PARSING_ERROR = 0x6031, 240 + FW_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032, 241 + FW_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033, 242 + FW_HDCP_TX_ACTIVE_ERROR = 0x6034, 243 + FW_HDCP_MODE_CHANGE_ERROR = 0x6035, 244 + FW_HDCP_STREAM_TYPE_ERROR = 0x6036, 245 + FW_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037, 246 + 247 + FW_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038, 248 + FW_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039, 249 + FW_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a, 250 + FW_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b, 251 + FW_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c, 252 + FW_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d, 253 + 254 + /* hdcp capable bit is not set in rx_caps(error is unique to DP) */ 255 + FW_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041, 256 + 257 + FW_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042, 258 + }; 259 + 260 + #define HDCP_API_VERSION 0x00010000 261 + 262 + #define HDCP_M_LEN 16 263 + #define HDCP_KH_LEN 16 264 + 265 + /* Payload Buffer size(Excluding Header) for CMDs and corresponding response */ 266 + /* Wired_Tx_AKE */ 267 + #define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1) 268 + #define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3) 269 + 270 + #define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3) 271 + #define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16) 272 + #define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128) 273 + 274 + #define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32) 275 + #define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4) 276 + 277 + #define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16) 278 + #define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4) 279 + 280 + #define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4) 281 + #define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4) 282 + 283 + /* Wired_Tx_LC */ 284 + #define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4) 285 + #define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8) 286 + 287 + #define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32) 288 + #define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4) 289 + 290 + /* Wired_Tx_SKE */ 291 + #define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4) 292 + #define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8) 293 + 294 + /* Wired_Tx_SKE */ 295 + #define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1) 296 + #define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4) 297 + 298 + /* Wired_Tx_Repeater */ 299 + #define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155) 300 + #define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16) 301 + 302 + #define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \ 303 + 32 + 2 + 2) 304 + 305 + #define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4) 306 + 307 + /* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */ 308 + enum hdcp_command_id { 309 + _WIDI_COMMAND_BASE = 0x00030000, 310 + WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE, 311 + HDCP_GET_SRM_STATUS, 312 + HDCP_SEND_SRM_FRAGMENT, 313 + 314 + /* The wired HDCP Tx commands */ 315 + _WIRED_COMMAND_BASE = 0x00031000, 316 + WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE, 317 + WIRED_VERIFY_RECEIVER_CERT, 318 + WIRED_AKE_SEND_HPRIME, 319 + WIRED_AKE_SEND_PAIRING_INFO, 320 + WIRED_INIT_LOCALITY_CHECK, 321 + WIRED_VALIDATE_LOCALITY, 322 + WIRED_GET_SESSION_KEY, 323 + WIRED_ENABLE_AUTH, 324 + WIRED_VERIFY_REPEATER, 325 + WIRED_REPEATER_AUTH_STREAM_REQ, 326 + WIRED_CLOSE_SESSION, 327 + 328 + _WIRED_COMMANDS_COUNT, 329 + }; 330 + 331 + union encrypted_buff { 332 + u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN]; 333 + u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN]; 334 + struct { 335 + u8 e_kh_km[HDCP_KH_LEN]; 336 + u8 m[HDCP_M_LEN]; 337 + } __packed; 338 + }; 339 + 340 + /* HDCP HECI message header. All header values are little endian. */ 341 + struct hdcp_cmd_header { 342 + u32 api_version; 343 + u32 command_id; 344 + enum fw_hdcp_status status; 345 + /* Length of the HECI message (excluding the header) */ 346 + u32 buffer_len; 347 + } __packed; 348 + 349 + /* Empty command request or response. No data follows the header. */ 350 + struct hdcp_cmd_no_data { 351 + struct hdcp_cmd_header header; 352 + } __packed; 353 + 354 + /* Uniquely identifies the hdcp port being addressed for a given command. */ 355 + struct hdcp_port_id { 356 + u8 integrated_port_type; 357 + /* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */ 358 + u8 physical_port; 359 + /* attached_transcoder is for Gen11.5+. Set to zero for <Gen11.5 */ 360 + u8 attached_transcoder; 361 + u8 reserved; 362 + } __packed; 363 + 364 + /* 365 + * Data structures for integrated wired HDCP2 Tx in 366 + * support of the AKE protocol 367 + */ 368 + /* HECI struct for integrated wired HDCP Tx session initiation. */ 369 + struct wired_cmd_initiate_hdcp2_session_in { 370 + struct hdcp_cmd_header header; 371 + struct hdcp_port_id port; 372 + u8 protocol; /* for HDMI vs DP */ 373 + } __packed; 374 + 375 + struct wired_cmd_initiate_hdcp2_session_out { 376 + struct hdcp_cmd_header header; 377 + struct hdcp_port_id port; 378 + u8 r_tx[HDCP_2_2_RTX_LEN]; 379 + struct hdcp2_tx_caps tx_caps; 380 + } __packed; 381 + 382 + /* HECI struct for ending an integrated wired HDCP Tx session. */ 383 + struct wired_cmd_close_session_in { 384 + struct hdcp_cmd_header header; 385 + struct hdcp_port_id port; 386 + } __packed; 387 + 388 + struct wired_cmd_close_session_out { 389 + struct hdcp_cmd_header header; 390 + struct hdcp_port_id port; 391 + } __packed; 392 + 393 + /* HECI struct for integrated wired HDCP Tx Rx Cert verification. */ 394 + struct wired_cmd_verify_receiver_cert_in { 395 + struct hdcp_cmd_header header; 396 + struct hdcp_port_id port; 397 + struct hdcp2_cert_rx cert_rx; 398 + u8 r_rx[HDCP_2_2_RRX_LEN]; 399 + u8 rx_caps[HDCP_2_2_RXCAPS_LEN]; 400 + } __packed; 401 + 402 + struct wired_cmd_verify_receiver_cert_out { 403 + struct hdcp_cmd_header header; 404 + struct hdcp_port_id port; 405 + u8 km_stored; 406 + u8 reserved[3]; 407 + union encrypted_buff ekm_buff; 408 + } __packed; 409 + 410 + /* HECI struct for verification of Rx's Hprime in a HDCP Tx session */ 411 + struct wired_cmd_ake_send_hprime_in { 412 + struct hdcp_cmd_header header; 413 + struct hdcp_port_id port; 414 + u8 h_prime[HDCP_2_2_H_PRIME_LEN]; 415 + } __packed; 416 + 417 + struct wired_cmd_ake_send_hprime_out { 418 + struct hdcp_cmd_header header; 419 + struct hdcp_port_id port; 420 + } __packed; 421 + 422 + /* 423 + * HECI struct for sending in AKE pairing data generated by the Rx in an 424 + * integrated wired HDCP Tx session. 425 + */ 426 + struct wired_cmd_ake_send_pairing_info_in { 427 + struct hdcp_cmd_header header; 428 + struct hdcp_port_id port; 429 + u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN]; 430 + } __packed; 431 + 432 + struct wired_cmd_ake_send_pairing_info_out { 433 + struct hdcp_cmd_header header; 434 + struct hdcp_port_id port; 435 + } __packed; 436 + 437 + /* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/ 438 + /* 439 + * HECI struct for initiating locality check with an 440 + * integrated wired HDCP Tx session. 441 + */ 442 + struct wired_cmd_init_locality_check_in { 443 + struct hdcp_cmd_header header; 444 + struct hdcp_port_id port; 445 + } __packed; 446 + 447 + struct wired_cmd_init_locality_check_out { 448 + struct hdcp_cmd_header header; 449 + struct hdcp_port_id port; 450 + u8 r_n[HDCP_2_2_RN_LEN]; 451 + } __packed; 452 + 453 + /* 454 + * HECI struct for validating an Rx's LPrime value in an 455 + * integrated wired HDCP Tx session. 456 + */ 457 + struct wired_cmd_validate_locality_in { 458 + struct hdcp_cmd_header header; 459 + struct hdcp_port_id port; 460 + u8 l_prime[HDCP_2_2_L_PRIME_LEN]; 461 + } __packed; 462 + 463 + struct wired_cmd_validate_locality_out { 464 + struct hdcp_cmd_header header; 465 + struct hdcp_port_id port; 466 + } __packed; 467 + 468 + /* 469 + * Data structures for integrated wired HDCP2 Tx in support of the 470 + * SKE protocol 471 + */ 472 + /* HECI struct for creating session key */ 473 + struct wired_cmd_get_session_key_in { 474 + struct hdcp_cmd_header header; 475 + struct hdcp_port_id port; 476 + } __packed; 477 + 478 + struct wired_cmd_get_session_key_out { 479 + struct hdcp_cmd_header header; 480 + struct hdcp_port_id port; 481 + u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN]; 482 + u8 r_iv[HDCP_2_2_RIV_LEN]; 483 + } __packed; 484 + 485 + /* HECI struct for the Tx enable authentication command */ 486 + struct wired_cmd_enable_auth_in { 487 + struct hdcp_cmd_header header; 488 + struct hdcp_port_id port; 489 + u8 stream_type; 490 + } __packed; 491 + 492 + struct wired_cmd_enable_auth_out { 493 + struct hdcp_cmd_header header; 494 + struct hdcp_port_id port; 495 + } __packed; 496 + 497 + /* 498 + * Data structures for integrated wired HDCP2 Tx in support of 499 + * the repeater protocols 500 + */ 501 + /* 502 + * HECI struct for verifying the downstream repeater's HDCP topology in an 503 + * integrated wired HDCP Tx session. 504 + */ 505 + struct wired_cmd_verify_repeater_in { 506 + struct hdcp_cmd_header header; 507 + struct hdcp_port_id port; 508 + u8 rx_info[HDCP_2_2_RXINFO_LEN]; 509 + u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN]; 510 + u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN]; 511 + u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN]; 512 + } __packed; 513 + 514 + struct wired_cmd_verify_repeater_out { 515 + struct hdcp_cmd_header header; 516 + struct hdcp_port_id port; 517 + u8 content_type_supported; 518 + u8 v[HDCP_2_2_V_PRIME_HALF_LEN]; 519 + } __packed; 520 + 521 + /* 522 + * HECI struct in support of stream management in an 523 + * integrated wired HDCP Tx session. 524 + */ 525 + struct wired_cmd_repeater_auth_stream_req_in { 526 + struct hdcp_cmd_header header; 527 + struct hdcp_port_id port; 528 + u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN]; 529 + u8 m_prime[HDCP_2_2_MPRIME_LEN]; 530 + __be16 k; 531 + struct hdcp2_streamid_type streams[]; 532 + } __packed; 533 + 534 + struct wired_cmd_repeater_auth_stream_req_out { 535 + struct hdcp_cmd_header header; 536 + struct hdcp_port_id port; 537 + } __packed; 538 + 539 + #endif /* _I915_HDCP_INTERFACE_H_ */
-184
include/drm/i915_mei_hdcp_interface.h
··· 1 - /* SPDX-License-Identifier: (GPL-2.0+) */ 2 - /* 3 - * Copyright © 2017-2019 Intel Corporation 4 - * 5 - * Authors: 6 - * Ramalingam C <ramalingam.c@intel.com> 7 - */ 8 - 9 - #ifndef _I915_MEI_HDCP_INTERFACE_H_ 10 - #define _I915_MEI_HDCP_INTERFACE_H_ 11 - 12 - #include <linux/mutex.h> 13 - #include <linux/device.h> 14 - #include <drm/display/drm_hdcp.h> 15 - 16 - /** 17 - * enum hdcp_port_type - HDCP port implementation type defined by ME FW 18 - * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type 19 - * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port 20 - * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON 21 - * (HDMI 2.0) solution 22 - * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3) 23 - * solution 24 - */ 25 - enum hdcp_port_type { 26 - HDCP_PORT_TYPE_INVALID, 27 - HDCP_PORT_TYPE_INTEGRATED, 28 - HDCP_PORT_TYPE_LSPCON, 29 - HDCP_PORT_TYPE_CPDP 30 - }; 31 - 32 - /** 33 - * enum hdcp_wired_protocol - HDCP adaptation used on the port 34 - * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol 35 - * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port 36 - * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port 37 - */ 38 - enum hdcp_wired_protocol { 39 - HDCP_PROTOCOL_INVALID, 40 - HDCP_PROTOCOL_HDMI, 41 - HDCP_PROTOCOL_DP 42 - }; 43 - 44 - enum mei_fw_ddi { 45 - MEI_DDI_INVALID_PORT = 0x0, 46 - 47 - MEI_DDI_B = 1, 48 - MEI_DDI_C, 49 - MEI_DDI_D, 50 - MEI_DDI_E, 51 - MEI_DDI_F, 52 - MEI_DDI_A = 7, 53 - MEI_DDI_RANGE_END = MEI_DDI_A, 54 - }; 55 - 56 - /** 57 - * enum mei_fw_tc - ME Firmware defined index for transcoders 58 - * @MEI_INVALID_TRANSCODER: Index for Invalid transcoder 59 - * @MEI_TRANSCODER_EDP: Index for EDP Transcoder 60 - * @MEI_TRANSCODER_DSI0: Index for DSI0 Transcoder 61 - * @MEI_TRANSCODER_DSI1: Index for DSI1 Transcoder 62 - * @MEI_TRANSCODER_A: Index for Transcoder A 63 - * @MEI_TRANSCODER_B: Index for Transcoder B 64 - * @MEI_TRANSCODER_C: Index for Transcoder C 65 - * @MEI_TRANSCODER_D: Index for Transcoder D 66 - */ 67 - enum mei_fw_tc { 68 - MEI_INVALID_TRANSCODER = 0x00, 69 - MEI_TRANSCODER_EDP, 70 - MEI_TRANSCODER_DSI0, 71 - MEI_TRANSCODER_DSI1, 72 - MEI_TRANSCODER_A = 0x10, 73 - MEI_TRANSCODER_B, 74 - MEI_TRANSCODER_C, 75 - MEI_TRANSCODER_D 76 - }; 77 - 78 - /** 79 - * struct hdcp_port_data - intel specific HDCP port data 80 - * @fw_ddi: ddi index as per ME FW 81 - * @fw_tc: transcoder index as per ME FW 82 - * @port_type: HDCP port type as per ME FW classification 83 - * @protocol: HDCP adaptation as per ME FW 84 - * @k: No of streams transmitted on a port. Only on DP MST this is != 1 85 - * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated. 86 - * Initialized to 0 on AKE_INIT. Incremented after every successful 87 - * transmission of RepeaterAuth_Stream_Manage message. When it rolls 88 - * over re-Auth has to be triggered. 89 - * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the 90 - * streams 91 - */ 92 - struct hdcp_port_data { 93 - enum mei_fw_ddi fw_ddi; 94 - enum mei_fw_tc fw_tc; 95 - u8 port_type; 96 - u8 protocol; 97 - u16 k; 98 - u32 seq_num_m; 99 - struct hdcp2_streamid_type *streams; 100 - }; 101 - 102 - /** 103 - * struct i915_hdcp_component_ops- ops for HDCP2.2 services. 104 - * @owner: Module providing the ops 105 - * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session. 106 - * And Prepare AKE_Init. 107 - * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate 108 - * AKE_Send_Cert and prepare 109 - AKE_Stored_Km/AKE_No_Stored_Km 110 - * @verify_hprime: Verify AKE_Send_H_prime 111 - * @store_pairing_info: Store pairing info received 112 - * @initiate_locality_check: Prepare LC_Init 113 - * @verify_lprime: Verify lprime 114 - * @get_session_key: Prepare SKE_Send_Eks 115 - * @repeater_check_flow_prepare_ack: Validate the Downstream topology 116 - * and prepare rep_ack 117 - * @verify_mprime: Verify mprime 118 - * @enable_hdcp_authentication: Mark a port as authenticated. 119 - * @close_hdcp_session: Close the Wired HDCP Tx session per port. 120 - * This also disables the authenticated state of the port. 121 - */ 122 - struct i915_hdcp_component_ops { 123 - /** 124 - * @owner: mei_hdcp module 125 - */ 126 - struct module *owner; 127 - 128 - int (*initiate_hdcp2_session)(struct device *dev, 129 - struct hdcp_port_data *data, 130 - struct hdcp2_ake_init *ake_data); 131 - int (*verify_receiver_cert_prepare_km)(struct device *dev, 132 - struct hdcp_port_data *data, 133 - struct hdcp2_ake_send_cert 134 - *rx_cert, 135 - bool *km_stored, 136 - struct hdcp2_ake_no_stored_km 137 - *ek_pub_km, 138 - size_t *msg_sz); 139 - int (*verify_hprime)(struct device *dev, 140 - struct hdcp_port_data *data, 141 - struct hdcp2_ake_send_hprime *rx_hprime); 142 - int (*store_pairing_info)(struct device *dev, 143 - struct hdcp_port_data *data, 144 - struct hdcp2_ake_send_pairing_info 145 - *pairing_info); 146 - int (*initiate_locality_check)(struct device *dev, 147 - struct hdcp_port_data *data, 148 - struct hdcp2_lc_init *lc_init_data); 149 - int (*verify_lprime)(struct device *dev, 150 - struct hdcp_port_data *data, 151 - struct hdcp2_lc_send_lprime *rx_lprime); 152 - int (*get_session_key)(struct device *dev, 153 - struct hdcp_port_data *data, 154 - struct hdcp2_ske_send_eks *ske_data); 155 - int (*repeater_check_flow_prepare_ack)(struct device *dev, 156 - struct hdcp_port_data *data, 157 - struct hdcp2_rep_send_receiverid_list 158 - *rep_topology, 159 - struct hdcp2_rep_send_ack 160 - *rep_send_ack); 161 - int (*verify_mprime)(struct device *dev, 162 - struct hdcp_port_data *data, 163 - struct hdcp2_rep_stream_ready *stream_ready); 164 - int (*enable_hdcp_authentication)(struct device *dev, 165 - struct hdcp_port_data *data); 166 - int (*close_hdcp_session)(struct device *dev, 167 - struct hdcp_port_data *data); 168 - }; 169 - 170 - /** 171 - * struct i915_hdcp_component_master - Used for communication between i915 172 - * and mei_hdcp drivers for the HDCP2.2 services 173 - * @mei_dev: device that provide the HDCP2.2 service from MEI Bus. 174 - * @hdcp_ops: Ops implemented by mei_hdcp driver, used by i915 driver. 175 - */ 176 - struct i915_hdcp_comp_master { 177 - struct device *mei_dev; 178 - const struct i915_hdcp_component_ops *ops; 179 - 180 - /* To protect the above members. */ 181 - struct mutex mutex; 182 - }; 183 - 184 - #endif /* _I915_MEI_HDCP_INTERFACE_H_ */