Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/display: HDCP Locality check using DMUB Fused IO

[Why]
HDCP locality check has strict timing requirements, currently broken
due to reliance on msleep which does not guarantee accuracy.
The PR moves the write-poll-read sequence into DMUB using new generic
Fused IO interface, where the timing accuracy is greatly improved.
New flow is enabled using DCN resource capability bit (none for now),
or using a debug flag.

[How]
* Extended mod_hdcp_config with new function for requesting DMUB
to execute a sequence of fused I2C/AUX commands and synchronously
wait until an outbox reply arrives or a timeout expires.
* If the timeout expires, send an abort to DMUB.
* Update HDCP to use the DMUB for locality check if supported.
* Add DC_HDCP_LC_FORCE_FW_ENABLE and DC_HDCP_LC_ENABLE_SW_FALLBACK.
* Make the first enable new flow regardless of resource capabilities.
* Make the second enable fallback to old SW flow.
* Clean up makefile source file listings for easier updates.

Reviewed-by: Alvin Lee <alvin.lee2@amd.com>
Signed-off-by: Dominik Kaszewski <dominik.kaszewski@amd.com>
Signed-off-by: Roman Li <roman.li@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Dominik Kaszewski and committed by
Alex Deucher
ce801e5d d01a7306

+616 -62
+138 -10
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 115 115 #include "modules/inc/mod_freesync.h" 116 116 #include "modules/power/power_helpers.h" 117 117 118 + static_assert(AMDGPU_DMUB_NOTIFICATION_MAX == DMUB_NOTIFICATION_MAX, "AMDGPU_DMUB_NOTIFICATION_MAX mismatch"); 119 + 118 120 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" 119 121 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); 120 122 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" ··· 751 749 complete(&adev->dm.dmub_aux_transfer_done); 752 750 } 753 751 752 + static void dmub_aux_fused_io_callback(struct amdgpu_device *adev, 753 + struct dmub_notification *notify) 754 + { 755 + if (!adev || !notify) { 756 + ASSERT(false); 757 + return; 758 + } 759 + 760 + const struct dmub_cmd_fused_request *req = &notify->fused_request; 761 + const uint8_t ddc_line = req->u.aux.ddc_line; 762 + 763 + if (ddc_line >= ARRAY_SIZE(adev->dm.fused_io)) { 764 + ASSERT(false); 765 + return; 766 + } 767 + 768 + struct fused_io_sync *sync = &adev->dm.fused_io[ddc_line]; 769 + 770 + static_assert(sizeof(*req) <= sizeof(sync->reply_data), "Size mismatch"); 771 + memcpy(sync->reply_data, req, sizeof(*req)); 772 + complete(&sync->replied); 773 + } 774 + 754 775 /** 755 776 * dmub_hpd_callback - DMUB HPD interrupt processing callback. 756 777 * @adev: amdgpu_device pointer ··· 910 885 911 886 } 912 887 888 + static const char *dmub_notification_type_str(enum dmub_notification_type e) 889 + { 890 + switch (e) { 891 + case DMUB_NOTIFICATION_NO_DATA: 892 + return "NO_DATA"; 893 + case DMUB_NOTIFICATION_AUX_REPLY: 894 + return "AUX_REPLY"; 895 + case DMUB_NOTIFICATION_HPD: 896 + return "HPD"; 897 + case DMUB_NOTIFICATION_HPD_IRQ: 898 + return "HPD_IRQ"; 899 + case DMUB_NOTIFICATION_SET_CONFIG_REPLY: 900 + return "SET_CONFIG_REPLY"; 901 + case DMUB_NOTIFICATION_DPIA_NOTIFICATION: 902 + return "DPIA_NOTIFICATION"; 903 + case DMUB_NOTIFICATION_HPD_SENSE_NOTIFY: 904 + return "HPD_SENSE_NOTIFY"; 905 + case DMUB_NOTIFICATION_FUSED_IO: 906 + return "FUSED_IO"; 907 + default: 908 + return "<unknown>"; 909 + } 910 + } 911 + 913 912 #define DMUB_TRACE_MAX_READ 64 914 913 /** 915 914 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt ··· 951 902 struct dmcub_trace_buf_entry entry = { 0 }; 952 903 u32 count = 0; 953 904 struct dmub_hpd_work *dmub_hpd_wrk; 954 - static const char *const event_type[] = { 955 - "NO_DATA", 956 - "AUX_REPLY", 957 - "HPD", 958 - "HPD_IRQ", 959 - "SET_CONFIGC_REPLY", 960 - "DPIA_NOTIFICATION", 961 - "HPD_SENSE_NOTIFY", 962 - }; 963 905 964 906 do { 965 907 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { ··· 980 940 } 981 941 if (!dm->dmub_callback[notify.type]) { 982 942 drm_warn(adev_to_drm(adev), "DMUB notification skipped due to no handler: type=%s\n", 983 - event_type[notify.type]); 943 + dmub_notification_type_str(notify.type)); 984 944 continue; 985 945 } 986 946 if (dm->dmub_thread_offload[notify.type] == true) { ··· 2171 2131 adev->dm.dc->debug.using_dml21 = true; 2172 2132 } 2173 2133 2134 + if (amdgpu_dc_debug_mask & DC_HDCP_LC_FORCE_FW_ENABLE) 2135 + adev->dm.dc->debug.hdcp_lc_force_fw_enable = true; 2136 + 2137 + if (amdgpu_dc_debug_mask & DC_HDCP_LC_ENABLE_SW_FALLBACK) 2138 + adev->dm.dc->debug.hdcp_lc_enable_sw_fallback = true; 2139 + 2174 2140 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; 2175 2141 2176 2142 /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ ··· 2257 2211 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, 2258 2212 dmub_aux_setconfig_callback, false)) { 2259 2213 drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub aux callback"); 2214 + goto error; 2215 + } 2216 + 2217 + for (size_t i = 0; i < ARRAY_SIZE(adev->dm.fused_io); i++) 2218 + init_completion(&adev->dm.fused_io[i].replied); 2219 + 2220 + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO, 2221 + dmub_aux_fused_io_callback, false)) { 2222 + drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub fused io callback"); 2260 2223 goto error; 2261 2224 } 2262 2225 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. ··· 12865 12810 reinit_completion(&adev->dm.dmub_aux_transfer_done); 12866 12811 mutex_unlock(&adev->dm.dpia_aux_lock); 12867 12812 return ret; 12813 + } 12814 + 12815 + static void abort_fused_io( 12816 + struct dc_context *ctx, 12817 + const struct dmub_cmd_fused_request *request 12818 + ) 12819 + { 12820 + union dmub_rb_cmd command = { 0 }; 12821 + struct dmub_rb_cmd_fused_io *io = &command.fused_io; 12822 + 12823 + io->header.type = DMUB_CMD__FUSED_IO; 12824 + io->header.sub_type = DMUB_CMD__FUSED_IO_ABORT; 12825 + io->header.payload_bytes = sizeof(*io) - sizeof(io->header); 12826 + io->request = *request; 12827 + dm_execute_dmub_cmd(ctx, &command, DM_DMUB_WAIT_TYPE_NO_WAIT); 12828 + } 12829 + 12830 + static bool execute_fused_io( 12831 + struct amdgpu_device *dev, 12832 + struct dc_context *ctx, 12833 + union dmub_rb_cmd *commands, 12834 + uint8_t count, 12835 + uint32_t timeout_us 12836 + ) 12837 + { 12838 + const uint8_t ddc_line = commands[0].fused_io.request.u.aux.ddc_line; 12839 + 12840 + if (ddc_line >= ARRAY_SIZE(dev->dm.fused_io)) 12841 + return false; 12842 + 12843 + struct fused_io_sync *sync = &dev->dm.fused_io[ddc_line]; 12844 + struct dmub_rb_cmd_fused_io *first = &commands[0].fused_io; 12845 + const bool result = dm_execute_dmub_cmd_list(ctx, count, commands, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) 12846 + && first->header.ret_status 12847 + && first->request.status == FUSED_REQUEST_STATUS_SUCCESS; 12848 + 12849 + if (!result) 12850 + return false; 12851 + 12852 + while (wait_for_completion_timeout(&sync->replied, usecs_to_jiffies(timeout_us))) { 12853 + reinit_completion(&sync->replied); 12854 + 12855 + struct dmub_cmd_fused_request *reply = (struct dmub_cmd_fused_request *) sync->reply_data; 12856 + 12857 + static_assert(sizeof(*reply) <= sizeof(sync->reply_data), "Size mismatch"); 12858 + 12859 + if (reply->identifier == first->request.identifier) { 12860 + first->request = *reply; 12861 + return true; 12862 + } 12863 + } 12864 + 12865 + reinit_completion(&sync->replied); 12866 + first->request.status = FUSED_REQUEST_STATUS_TIMEOUT; 12867 + abort_fused_io(ctx, &first->request); 12868 + return false; 12869 + } 12870 + 12871 + bool amdgpu_dm_execute_fused_io( 12872 + struct amdgpu_device *dev, 12873 + struct dc_link *link, 12874 + union dmub_rb_cmd *commands, 12875 + uint8_t count, 12876 + uint32_t timeout_us) 12877 + { 12878 + struct amdgpu_display_manager *dm = &dev->dm; 12879 + 12880 + mutex_lock(&dm->dpia_aux_lock); 12881 + 12882 + const bool result = execute_fused_io(dev, link->ctx, commands, count, timeout_us); 12883 + 12884 + mutex_unlock(&dm->dpia_aux_lock); 12885 + return result; 12868 12886 } 12869 12887 12870 12888 int amdgpu_dm_process_dmub_set_config_sync(
+15 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 50 50 51 51 #define AMDGPU_DM_MAX_NUM_EDP 2 52 52 53 - #define AMDGPU_DMUB_NOTIFICATION_MAX 7 53 + #define AMDGPU_DMUB_NOTIFICATION_MAX 8 54 54 55 55 #define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A 56 56 #define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40 ··· 81 81 struct dmub_srv; 82 82 struct dc_plane_state; 83 83 struct dmub_notification; 84 + struct dmub_cmd_fused_request; 84 85 85 86 struct amd_vsdb_block { 86 87 unsigned char ieee_id[3]; ··· 638 637 * OEM i2c bus 639 638 */ 640 639 struct amdgpu_i2c_adapter *oem_i2c; 640 + 641 + struct fused_io_sync { 642 + struct completion replied; 643 + char reply_data[0x40]; // Cannot include dmub_cmd here 644 + } fused_io[8]; 641 645 }; 642 646 643 647 enum dsc_clock_force_state { ··· 1021 1015 1022 1016 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index, 1023 1017 struct aux_payload *payload, enum aux_return_code_type *operation_result); 1018 + 1019 + bool amdgpu_dm_execute_fused_io( 1020 + struct amdgpu_device *dev, 1021 + struct dc_link *link, 1022 + union dmub_rb_cmd *commands, 1023 + uint8_t count, 1024 + uint32_t timeout_us 1025 + ); 1024 1026 1025 1027 int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index, 1026 1028 struct set_config_cmd_payload *payload, enum set_config_status *operation_result);
+49 -7
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
··· 26 26 #include "amdgpu_dm_hdcp.h" 27 27 #include "amdgpu.h" 28 28 #include "amdgpu_dm.h" 29 + #include "dc_fused_io.h" 29 30 #include "dm_helpers.h" 30 31 #include <drm/display/drm_hdcp_helper.h> 31 32 #include "hdcp_psp.h" ··· 75 74 struct dc_link *link = handle; 76 75 77 76 return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size); 77 + } 78 + 79 + static bool lp_atomic_write_poll_read_i2c( 80 + void *handle, 81 + const struct mod_hdcp_atomic_op_i2c *write, 82 + const struct mod_hdcp_atomic_op_i2c *poll, 83 + struct mod_hdcp_atomic_op_i2c *read, 84 + uint32_t poll_timeout_us, 85 + uint8_t poll_mask_msb 86 + ) 87 + { 88 + struct dc_link *link = handle; 89 + 90 + return dm_atomic_write_poll_read_i2c(link, write, poll, read, poll_timeout_us, poll_mask_msb); 91 + } 92 + 93 + static bool lp_atomic_write_poll_read_aux( 94 + void *handle, 95 + const struct mod_hdcp_atomic_op_aux *write, 96 + const struct mod_hdcp_atomic_op_aux *poll, 97 + struct mod_hdcp_atomic_op_aux *read, 98 + uint32_t poll_timeout_us, 99 + uint8_t poll_mask_msb 100 + ) 101 + { 102 + struct dc_link *link = handle; 103 + 104 + return dm_atomic_write_poll_read_aux(link, write, poll, read, poll_timeout_us, poll_mask_msb); 78 105 } 79 106 80 107 static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size) ··· 748 719 INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer); 749 720 INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate); 750 721 751 - hdcp_work[i].hdcp.config.psp.handle = &adev->psp; 722 + struct mod_hdcp_config *config = &hdcp_work[i].hdcp.config; 723 + struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs; 724 + 725 + config->psp.handle = &adev->psp; 752 726 if (dc->ctx->dce_version == DCN_VERSION_3_1 || 753 727 dc->ctx->dce_version == DCN_VERSION_3_14 || 754 728 dc->ctx->dce_version == DCN_VERSION_3_15 || ··· 759 727 dc->ctx->dce_version == DCN_VERSION_3_51 || 760 728 dc->ctx->dce_version == DCN_VERSION_3_6 || 761 729 dc->ctx->dce_version == DCN_VERSION_3_16) 762 - hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1; 763 - hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); 764 - hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c; 765 - hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; 766 - hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd; 767 - hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; 730 + config->psp.caps.dtm_v3_supported = 1; 731 + config->ddc.handle = dc_get_link_at_index(dc, i); 732 + 733 + ddc_funcs->write_i2c = lp_write_i2c; 734 + ddc_funcs->read_i2c = lp_read_i2c; 735 + ddc_funcs->write_dpcd = lp_write_dpcd; 736 + ddc_funcs->read_dpcd = lp_read_dpcd; 737 + 738 + config->debug.lc_enable_sw_fallback = dc->debug.hdcp_lc_enable_sw_fallback; 739 + if (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable) { 740 + ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c; 741 + ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux; 742 + } else { 743 + ddc_funcs->atomic_write_poll_read_i2c = NULL; 744 + ddc_funcs->atomic_write_poll_read_aux = NULL; 745 + } 768 746 769 747 memset(hdcp_work[i].aconnector, 0, 770 748 sizeof(struct amdgpu_dm_connector *) *
+13
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
··· 630 630 return result; 631 631 } 632 632 633 + bool dm_helpers_execute_fused_io( 634 + struct dc_context *ctx, 635 + struct dc_link *link, 636 + union dmub_rb_cmd *commands, 637 + uint8_t count, 638 + uint32_t timeout_us 639 + ) 640 + { 641 + struct amdgpu_device *dev = ctx->driver_context; 642 + 643 + return amdgpu_dm_execute_fused_io(dev, link, commands, count, timeout_us); 644 + } 645 + 633 646 static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, 634 647 bool is_write_cmd, 635 648 unsigned char cmd,
+19 -20
drivers/gpu/drm/amd/display/dc/Makefile
··· 53 53 54 54 ifdef CONFIG_DRM_AMD_DC_FP 55 55 DC_LIBS += sspl 56 - DC_SPL_TRANS += dc_spl_translate.o 56 + AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, dc_spl_translate.o) 57 57 endif 58 58 59 59 AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS))) 60 60 61 61 include $(AMD_DC) 62 62 63 - DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ 64 - dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o 63 + FILES = 64 + FILES += dc_dmub_srv.o 65 + FILES += dc_edid_parser.o 66 + FILES += dc_fused_io.o 67 + FILES += dc_helper.o 68 + FILES += core/dc.o 69 + FILES += core/dc_debug.o 70 + FILES += core/dc_hw_sequencer.o 71 + FILES += core/dc_link_enc_cfg.o 72 + FILES += core/dc_link_exports.o 73 + FILES += core/dc_resource.o 74 + FILES += core/dc_sink.o 75 + FILES += core/dc_stat.o 76 + FILES += core/dc_state.o 77 + FILES += core/dc_stream.o 78 + FILES += core/dc_surface.o 79 + FILES += core/dc_vm_helper.o 65 80 66 - DISPLAY_CORE += dc_vm_helper.o 81 + AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/, $(FILES)) 67 82 68 - AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE)) 69 - 70 - AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o) 71 - 72 - AMD_DC_SPL_TRANS = $(addprefix $(AMDDALPATH)/dc/,$(DC_SPL_TRANS)) 73 - 74 - AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE) 75 - AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE) 76 - 77 - DC_DMUB += dc_dmub_srv.o 78 - DC_EDID += dc_edid_parser.o 79 - AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB)) 80 - AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID)) 81 - AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID) 82 - 83 - AMD_DISPLAY_FILES += $(AMD_DC_SPL_TRANS)
+4
drivers/gpu/drm/amd/display/dc/dc.h
··· 282 282 bool edp_dsc_support; 283 283 bool vbios_lttpr_aware; 284 284 bool vbios_lttpr_enable; 285 + bool fused_io_supported; 285 286 uint32_t max_otg_num; 286 287 uint32_t max_cab_allocation_bytes; 287 288 uint32_t cache_line_size; ··· 903 902 bool force_single_disp_pipe_split; 904 903 bool voltage_align_fclk; 905 904 bool disable_min_fclk; 905 + 906 + bool hdcp_lc_force_fw_enable; 907 + bool hdcp_lc_enable_sw_fallback; 906 908 907 909 bool disable_dfs_bypass; 908 910 bool disable_dpp_power_gate;
+144
drivers/gpu/drm/amd/display/dc/dc_fused_io.c
··· 1 + // SPDX-License-Identifier: MIT 2 + // 3 + // Copyright 2025 Advanced Micro Devices, Inc. 4 + 5 + #include "dc_fused_io.h" 6 + 7 + #include "dm_helpers.h" 8 + #include "gpio.h" 9 + 10 + static bool op_i2c_convert( 11 + union dmub_rb_cmd *cmd, 12 + const struct mod_hdcp_atomic_op_i2c *op, 13 + enum dmub_cmd_fused_request_type type, 14 + uint32_t ddc_line 15 + ) 16 + { 17 + struct dmub_cmd_fused_request *req = &cmd->fused_io.request; 18 + struct dmub_cmd_fused_request_location_i2c *loc = &req->u.i2c; 19 + 20 + if (!op || op->size > sizeof(req->buffer)) 21 + return false; 22 + 23 + req->type = type; 24 + loc->is_aux = false; 25 + loc->ddc_line = ddc_line; 26 + loc->address = op->address; 27 + loc->offset = op->offset; 28 + loc->length = op->size; 29 + memcpy(req->buffer, op->data, op->size); 30 + 31 + return true; 32 + } 33 + 34 + static bool op_aux_convert( 35 + union dmub_rb_cmd *cmd, 36 + const struct mod_hdcp_atomic_op_aux *op, 37 + enum dmub_cmd_fused_request_type type, 38 + uint32_t ddc_line 39 + ) 40 + { 41 + struct dmub_cmd_fused_request *req = &cmd->fused_io.request; 42 + struct dmub_cmd_fused_request_location_aux *loc = &req->u.aux; 43 + 44 + if (!op || op->size > sizeof(req->buffer)) 45 + return false; 46 + 47 + req->type = type; 48 + loc->is_aux = true; 49 + loc->ddc_line = ddc_line; 50 + loc->address = op->address; 51 + loc->length = op->size; 52 + memcpy(req->buffer, op->data, op->size); 53 + 54 + return true; 55 + } 56 + 57 + static bool atomic_write_poll_read( 58 + struct dc_link *link, 59 + union dmub_rb_cmd commands[3], 60 + uint32_t poll_timeout_us, 61 + uint8_t poll_mask_msb 62 + ) 63 + { 64 + const uint8_t count = 3; 65 + const uint32_t timeout_per_request_us = 10000; 66 + const uint32_t timeout_per_aux_transaction_us = 10000; 67 + uint64_t timeout_us = 0; 68 + 69 + commands[1].fused_io.request.poll_mask_msb = poll_mask_msb; 70 + commands[1].fused_io.request.timeout_us = poll_timeout_us; 71 + 72 + for (uint8_t i = 0; i < count; i++) { 73 + struct dmub_rb_cmd_fused_io *io = &commands[i].fused_io; 74 + 75 + io->header.type = DMUB_CMD__FUSED_IO; 76 + io->header.sub_type = DMUB_CMD__FUSED_IO_EXECUTE; 77 + io->header.multi_cmd_pending = i != count - 1; 78 + io->header.payload_bytes = sizeof(commands[i].fused_io) - sizeof(io->header); 79 + 80 + timeout_us += timeout_per_request_us + io->request.timeout_us; 81 + if (!io->request.timeout_us && io->request.u.aux.is_aux) 82 + timeout_us += timeout_per_aux_transaction_us * (io->request.u.aux.length / 16); 83 + } 84 + 85 + if (!dm_helpers_execute_fused_io(link->ctx, link, commands, count, timeout_us)) 86 + return false; 87 + 88 + return commands[0].fused_io.request.status == FUSED_REQUEST_STATUS_SUCCESS; 89 + } 90 + 91 + bool dm_atomic_write_poll_read_i2c( 92 + struct dc_link *link, 93 + const struct mod_hdcp_atomic_op_i2c *write, 94 + const struct mod_hdcp_atomic_op_i2c *poll, 95 + struct mod_hdcp_atomic_op_i2c *read, 96 + uint32_t poll_timeout_us, 97 + uint8_t poll_mask_msb 98 + ) 99 + { 100 + if (!link) 101 + return false; 102 + 103 + const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en; 104 + union dmub_rb_cmd commands[3] = { 0 }; 105 + const bool converted = op_i2c_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line) 106 + && op_i2c_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line) 107 + && op_i2c_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line); 108 + 109 + if (!converted) 110 + return false; 111 + 112 + const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb); 113 + 114 + memcpy(read->data, commands[0].fused_io.request.buffer, read->size); 115 + return result; 116 + } 117 + 118 + bool dm_atomic_write_poll_read_aux( 119 + struct dc_link *link, 120 + const struct mod_hdcp_atomic_op_aux *write, 121 + const struct mod_hdcp_atomic_op_aux *poll, 122 + struct mod_hdcp_atomic_op_aux *read, 123 + uint32_t poll_timeout_us, 124 + uint8_t poll_mask_msb 125 + ) 126 + { 127 + if (!link) 128 + return false; 129 + 130 + const uint32_t ddc_line = link->ddc->ddc_pin->pin_data->en; 131 + union dmub_rb_cmd commands[3] = { 0 }; 132 + const bool converted = op_aux_convert(&commands[0], write, FUSED_REQUEST_WRITE, ddc_line) 133 + && op_aux_convert(&commands[1], poll, FUSED_REQUEST_POLL, ddc_line) 134 + && op_aux_convert(&commands[2], read, FUSED_REQUEST_READ, ddc_line); 135 + 136 + if (!converted) 137 + return false; 138 + 139 + const bool result = atomic_write_poll_read(link, commands, poll_timeout_us, poll_mask_msb); 140 + 141 + memcpy(read->data, commands[0].fused_io.request.buffer, read->size); 142 + return result; 143 + } 144 +
+31
drivers/gpu/drm/amd/display/dc/dc_fused_io.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright 2025 Advanced Micro Devices, Inc. 4 + */ 5 + 6 + #ifndef __DC_FUSED_IO_H__ 7 + #define __DC_FUSED_IO_H__ 8 + 9 + #include "dc.h" 10 + #include "mod_hdcp.h" 11 + 12 + bool dm_atomic_write_poll_read_i2c( 13 + struct dc_link *link, 14 + const struct mod_hdcp_atomic_op_i2c *write, 15 + const struct mod_hdcp_atomic_op_i2c *poll, 16 + struct mod_hdcp_atomic_op_i2c *read, 17 + uint32_t poll_timeout_us, 18 + uint8_t poll_mask_msb 19 + ); 20 + 21 + bool dm_atomic_write_poll_read_aux( 22 + struct dc_link *link, 23 + const struct mod_hdcp_atomic_op_aux *write, 24 + const struct mod_hdcp_atomic_op_aux *poll, 25 + struct mod_hdcp_atomic_op_aux *read, 26 + uint32_t poll_timeout_us, 27 + uint8_t poll_mask_msb 28 + ); 29 + 30 + #endif // __DC_FUSED_IO_H__ 31 +
+8
drivers/gpu/drm/amd/display/dc/dm_helpers.h
··· 153 153 const struct dc_link *link, 154 154 struct i2c_command *cmd); 155 155 156 + bool dm_helpers_execute_fused_io( 157 + struct dc_context *ctx, 158 + struct dc_link *link, 159 + union dmub_rb_cmd *commands, 160 + uint8_t count, 161 + uint32_t timeout_us 162 + ); 163 + 156 164 bool dm_helpers_dp_write_dsc_enable( 157 165 struct dc_context *ctx, 158 166 const struct dc_stream_state *stream,
+2
drivers/gpu/drm/amd/display/dmub/dmub_srv.h
··· 142 142 DMUB_NOTIFICATION_SET_CONFIG_REPLY, 143 143 DMUB_NOTIFICATION_DPIA_NOTIFICATION, 144 144 DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, 145 + DMUB_NOTIFICATION_FUSED_IO, 145 146 DMUB_NOTIFICATION_MAX 146 147 }; 147 148 ··· 596 595 enum dp_hpd_status hpd_status; 597 596 enum set_config_status sc_status; 598 597 struct dmub_rb_cmd_hpd_sense_notify_data hpd_sense_notify; 598 + struct dmub_cmd_fused_request fused_request; 599 599 }; 600 600 }; 601 601
+4
drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
··· 102 102 &cmd.hpd_sense_notify.data, 103 103 sizeof(cmd.hpd_sense_notify.data)); 104 104 break; 105 + case DMUB_OUT_CMD__FUSED_IO: 106 + notify->type = DMUB_NOTIFICATION_FUSED_IO; 107 + dmub_memcpy(&notify->fused_request, &cmd.fused_io.request, sizeof(cmd.fused_io.request)); 108 + break; 105 109 default: 106 110 notify->type = DMUB_NOTIFICATION_NO_DATA; 107 111 break;
+1
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
··· 386 386 enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp); 387 387 enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp); 388 388 enum mod_hdcp_status mod_hdcp_clear_cp_irq_status(struct mod_hdcp *hdcp); 389 + enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp); 389 390 390 391 /* hdcp version helpers */ 391 392 static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
+43 -10
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
··· 452 452 return status; 453 453 } 454 454 455 - static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp, 455 + static enum mod_hdcp_status locality_check_sw(struct mod_hdcp *hdcp, 456 456 struct mod_hdcp_event_context *event_ctx, 457 457 struct mod_hdcp_transition_input_hdcp2 *input) 458 458 { 459 459 enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; 460 460 461 - if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { 462 - event_ctx->unexpected_event = 1; 463 - goto out; 464 - } 465 - 466 - if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init, 467 - &input->lc_init_prepare, &status, 468 - hdcp, "lc_init_prepare")) 469 - goto out; 470 461 if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init, 471 462 &input->lc_init_write, &status, 472 463 hdcp, "lc_init_write")) ··· 473 482 &input->l_prime_read, &status, 474 483 hdcp, "l_prime_read")) 475 484 goto out; 485 + out: 486 + return status; 487 + } 488 + 489 + static enum mod_hdcp_status locality_check_fw(struct mod_hdcp *hdcp, 490 + struct mod_hdcp_event_context *event_ctx, 491 + struct mod_hdcp_transition_input_hdcp2 *input) 492 + { 493 + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; 494 + 495 + if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw, 496 + &input->l_prime_read, &status, 497 + hdcp, "l_prime_read")) 498 + goto out; 499 + 500 + out: 501 + return status; 502 + } 503 + 504 + static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp, 505 + struct mod_hdcp_event_context *event_ctx, 506 + struct mod_hdcp_transition_input_hdcp2 *input) 507 + { 508 + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; 509 + const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c 510 + && hdcp->config.ddc.funcs.atomic_write_poll_read_aux 511 + && !hdcp->connection.link.adjust.hdcp2.force_sw_locality_check; 512 + 513 + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { 514 + event_ctx->unexpected_event = 1; 515 + goto out; 516 + } 517 + 518 + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_prepare_lc_init, 519 + &input->lc_init_prepare, &status, 520 + hdcp, "lc_init_prepare")) 521 + goto out; 522 + 523 + status = (use_fw ? locality_check_fw : locality_check_sw)(hdcp, event_ctx, input); 524 + if (status != MOD_HDCP_STATUS_SUCCESS) 525 + goto out; 526 + 476 527 if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime, 477 528 &input->l_prime_validation, &status, 478 529 hdcp, "l_prime_validation"))
+36 -12
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
··· 184 184 callback_in_ms(0, output); 185 185 set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK); 186 186 break; 187 - case H2_A2_LOCALITY_CHECK: 187 + case H2_A2_LOCALITY_CHECK: { 188 + const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c 189 + && !adjust->hdcp2.force_sw_locality_check; 190 + 191 + /* 192 + * 1A-05: consider disconnection after LC init a failure 193 + * 1A-13-1: consider invalid l' a failure 194 + * 1A-13-2: consider l' timeout a failure 195 + */ 188 196 if (hdcp->state.stay_count > 10 || 189 197 input->lc_init_prepare != PASS || 190 - input->lc_init_write != PASS || 191 - input->l_prime_available_poll != PASS || 192 - input->l_prime_read != PASS) { 193 - /* 194 - * 1A-05: consider disconnection after LC init a failure 195 - * 1A-13-1: consider invalid l' a failure 196 - * 1A-13-2: consider l' timeout a failure 197 - */ 198 + (!use_fw && input->lc_init_write != PASS) || 199 + (!use_fw && input->l_prime_available_poll != PASS)) { 200 + fail_and_restart_in_ms(0, &status, output); 201 + break; 202 + } else if (input->l_prime_read != PASS) { 203 + if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) { 204 + adjust->hdcp2.force_sw_locality_check = true; 205 + callback_in_ms(0, output); 206 + break; 207 + } 208 + 198 209 fail_and_restart_in_ms(0, &status, output); 199 210 break; 200 211 } else if (input->l_prime_validation != PASS) { ··· 216 205 callback_in_ms(0, output); 217 206 set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER); 218 207 break; 208 + } 219 209 case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER: 220 210 if (input->eks_prepare != PASS || 221 211 input->eks_write != PASS) { ··· 510 498 callback_in_ms(0, output); 511 499 set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK); 512 500 break; 513 - case D2_A2_LOCALITY_CHECK: 501 + case D2_A2_LOCALITY_CHECK: { 502 + const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_aux 503 + && !adjust->hdcp2.force_sw_locality_check; 504 + 514 505 if (hdcp->state.stay_count > 10 || 515 506 input->lc_init_prepare != PASS || 516 - input->lc_init_write != PASS || 517 - input->l_prime_read != PASS) { 507 + (!use_fw && input->lc_init_write != PASS)) { 518 508 /* 1A-12: consider invalid l' a failure */ 509 + fail_and_restart_in_ms(0, &status, output); 510 + break; 511 + } else if (input->l_prime_read != PASS) { 512 + if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) { 513 + adjust->hdcp2.force_sw_locality_check = true; 514 + callback_in_ms(0, output); 515 + break; 516 + } 517 + 519 518 fail_and_restart_in_ms(0, &status, output); 520 519 break; 521 520 } else if (input->l_prime_validation != PASS) { ··· 537 514 callback_in_ms(0, output); 538 515 set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER); 539 516 break; 517 + } 540 518 case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER: 541 519 if (input->eks_prepare != PASS || 542 520 input->eks_write != PASS) {
+73
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
··· 688 688 689 689 return MOD_HDCP_STATUS_INVALID_OPERATION; 690 690 } 691 + 692 + static bool write_stall_read_lc_fw_aux(struct mod_hdcp *hdcp) 693 + { 694 + struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2; 695 + 696 + struct mod_hdcp_atomic_op_aux write = { 697 + hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT], 698 + hdcp2->lc_init + 1, 699 + sizeof(hdcp2->lc_init) - 1, 700 + }; 701 + struct mod_hdcp_atomic_op_aux stall = { 0, NULL, 0, }; 702 + struct mod_hdcp_atomic_op_aux read = { 703 + hdcp_dpcd_addrs[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME], 704 + hdcp2->lc_l_prime + 1, 705 + sizeof(hdcp2->lc_l_prime) - 1, 706 + }; 707 + 708 + hdcp2->lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME; 709 + 710 + return hdcp->config.ddc.funcs.atomic_write_poll_read_aux( 711 + hdcp->config.ddc.handle, 712 + &write, 713 + &stall, 714 + &read, 715 + 16 * 1000, 716 + 0 717 + ); 718 + } 719 + 720 + static bool write_poll_read_lc_fw_i2c(struct mod_hdcp *hdcp) 721 + { 722 + struct mod_hdcp_message_hdcp2 *hdcp2 = &hdcp->auth.msg.hdcp2; 723 + uint8_t expected_rxstatus[2] = { sizeof(hdcp2->lc_l_prime) }; 724 + 725 + hdcp->buf[0] = hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_WRITE_LC_INIT]; 726 + memmove(&hdcp->buf[1], hdcp2->lc_init, sizeof(hdcp2->lc_init)); 727 + 728 + struct mod_hdcp_atomic_op_i2c write = { 729 + HDCP_I2C_ADDR, 730 + 0, 731 + hdcp->buf, 732 + sizeof(hdcp2->lc_init) + 1, 733 + }; 734 + struct mod_hdcp_atomic_op_i2c poll = { 735 + HDCP_I2C_ADDR, 736 + hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_RXSTATUS], 737 + expected_rxstatus, 738 + sizeof(expected_rxstatus), 739 + }; 740 + struct mod_hdcp_atomic_op_i2c read = { 741 + HDCP_I2C_ADDR, 742 + hdcp_i2c_offsets[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME], 743 + hdcp2->lc_l_prime, 744 + sizeof(hdcp2->lc_l_prime), 745 + }; 746 + 747 + return hdcp->config.ddc.funcs.atomic_write_poll_read_i2c( 748 + hdcp->config.ddc.handle, 749 + &write, 750 + &poll, 751 + &read, 752 + 20 * 1000, 753 + 6 754 + ); 755 + } 756 + 757 + enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp) 758 + { 759 + const bool success = (is_dp_hdcp(hdcp) ? write_stall_read_lc_fw_aux : write_poll_read_lc_fw_i2c)(hdcp); 760 + 761 + return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE; 762 + } 763 +
+36 -2
drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
··· 133 133 MOD_HDCP_DISPLAY_DISABLE_ENCRYPTION, 134 134 }; 135 135 136 + struct mod_hdcp_atomic_op_i2c { 137 + uint8_t address; 138 + uint8_t offset; 139 + uint8_t *data; 140 + uint32_t size; 141 + }; 142 + 143 + struct mod_hdcp_atomic_op_aux { 144 + uint32_t address; 145 + uint8_t *data; 146 + uint32_t size; 147 + }; 148 + 136 149 struct mod_hdcp_ddc { 137 150 void *handle; 138 - struct { 151 + struct mod_hdcp_ddc_funcs { 139 152 bool (*read_i2c)(void *handle, 140 153 uint32_t address, 141 154 uint8_t offset, ··· 166 153 uint32_t address, 167 154 const uint8_t *data, 168 155 uint32_t size); 156 + bool (*atomic_write_poll_read_i2c)( 157 + void *handle, 158 + const struct mod_hdcp_atomic_op_i2c *write, 159 + const struct mod_hdcp_atomic_op_i2c *poll, 160 + struct mod_hdcp_atomic_op_i2c *read, 161 + uint32_t poll_timeout_us, 162 + uint8_t poll_mask_msb 163 + ); 164 + bool (*atomic_write_poll_read_aux)( 165 + void *handle, 166 + const struct mod_hdcp_atomic_op_aux *write, 167 + const struct mod_hdcp_atomic_op_aux *poll, 168 + struct mod_hdcp_atomic_op_aux *read, 169 + uint32_t poll_timeout_us, 170 + uint8_t poll_mask_msb 171 + ); 169 172 } funcs; 170 173 }; 171 174 ··· 214 185 uint8_t force_type : 2; 215 186 uint8_t force_no_stored_km : 1; 216 187 uint8_t increase_h_prime_timeout: 1; 217 - uint8_t reserved : 3; 188 + uint8_t force_sw_locality_check : 1; 189 + uint8_t reserved : 2; 218 190 }; 219 191 220 192 struct mod_hdcp_link_adjustment { ··· 302 272 struct mod_hdcp_config { 303 273 struct mod_hdcp_psp psp; 304 274 struct mod_hdcp_ddc ddc; 275 + struct { 276 + uint8_t lc_enable_sw_fallback : 1; 277 + uint8_t reserved : 7; 278 + } debug; 305 279 uint8_t index; 306 280 }; 307 281