Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/display: Add misc DC changes for DCN401

Add miscellaneous changes to enable DCN401 init

Signed-off-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
Acked-by: Rodrigo Siqueira <rodrigo.siqueira@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Aurabindo Pillai and committed by
Alex Deucher
00c39110 da87132f

+1837 -181
+29 -4
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 151 151 #define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" 152 152 MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); 153 153 154 + #define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin" 155 + MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB); 156 + 154 157 /* Number of bytes in PSP header for firmware. */ 155 158 #define PSP_HEADER_BYTES 0x100 156 159 ··· 1226 1223 case IP_VERSION(3, 1, 4): 1227 1224 case IP_VERSION(3, 5, 0): 1228 1225 case IP_VERSION(3, 5, 1): 1226 + case IP_VERSION(4, 0, 1): 1229 1227 hw_params.dpia_supported = true; 1230 1228 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; 1231 1229 break; ··· 1785 1781 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) 1786 1782 adev->dm.dc->debug.force_subvp_mclk_switch = true; 1787 1783 1788 - if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) 1784 + if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) { 1789 1785 adev->dm.dc->debug.using_dml2 = true; 1786 + adev->dm.dc->debug.using_dml21 = true; 1787 + } 1790 1788 1791 1789 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; 1792 1790 ··· 2065 2059 case IP_VERSION(3, 2, 1): 2066 2060 case IP_VERSION(3, 5, 0): 2067 2061 case IP_VERSION(3, 5, 1): 2062 + case IP_VERSION(4, 0, 1): 2068 2063 return 0; 2069 2064 default: 2070 2065 break; ··· 2189 2182 case IP_VERSION(3, 5, 1): 2190 2183 dmub_asic = DMUB_ASIC_DCN35; 2191 2184 break; 2185 + case IP_VERSION(4, 0, 1): 2186 + dmub_asic = DMUB_ASIC_DCN401; 2187 + break; 2188 + 2192 2189 default: 2193 2190 /* ASIC doesn't support DMUB. */ 2194 2191 return 0; ··· 4534 4523 case IP_VERSION(2, 1, 0): 4535 4524 case IP_VERSION(3, 5, 0): 4536 4525 case IP_VERSION(3, 5, 1): 4526 + case IP_VERSION(4, 0, 1): 4537 4527 if (register_outbox_irq_handlers(dm->adev)) { 4538 4528 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4539 4529 goto fail; ··· 4557 4545 case IP_VERSION(3, 2, 1): 4558 4546 case IP_VERSION(3, 5, 0): 4559 4547 case IP_VERSION(3, 5, 1): 4548 + case IP_VERSION(4, 0, 1): 4560 4549 psr_feature_enabled = true; 4561 4550 break; 4562 4551 default: ··· 4729 4716 case IP_VERSION(3, 2, 1): 4730 4717 case IP_VERSION(3, 5, 0): 4731 4718 case IP_VERSION(3, 5, 1): 4719 + case IP_VERSION(4, 0, 1): 4732 4720 if (dcn10_register_irq_handlers(dm->adev)) { 4733 4721 DRM_ERROR("DM: Failed to initialize IRQ\n"); 4734 4722 goto fail; ··· 4866 4852 case IP_VERSION(3, 5, 1): 4867 4853 fw_name_dmub = FIRMWARE_DCN_351_DMUB; 4868 4854 break; 4855 + case IP_VERSION(4, 0, 1): 4856 + fw_name_dmub = FIRMWARE_DCN_401_DMUB; 4857 + break; 4869 4858 default: 4870 4859 /* ASIC doesn't support DMUB. */ 4871 4860 return 0; ··· 4993 4976 case IP_VERSION(3, 2, 1): 4994 4977 case IP_VERSION(3, 5, 0): 4995 4978 case IP_VERSION(3, 5, 1): 4979 + case IP_VERSION(4, 0, 1): 4996 4980 adev->mode_info.num_crtc = 4; 4997 4981 adev->mode_info.num_hpd = 4; 4998 4982 adev->mode_info.num_dig = 4; ··· 6041 6023 return true; 6042 6024 } 6043 6025 6026 + #if defined(CONFIG_DRM_AMD_DC_FP) 6044 6027 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, 6045 6028 struct dc_sink *sink, struct dc_stream_state *stream, 6046 6029 struct dsc_dec_dpcd_caps *dsc_caps) ··· 6059 6040 dsc_caps); 6060 6041 } 6061 6042 } 6062 - 6063 6043 6064 6044 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, 6065 6045 struct dc_sink *sink, struct dc_stream_state *stream, ··· 6122 6104 stream->timing.flags.DSC = 1; 6123 6105 } 6124 6106 } 6125 - 6126 6107 6127 6108 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, 6128 6109 struct dc_sink *sink, struct dc_stream_state *stream, ··· 6200 6183 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) 6201 6184 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; 6202 6185 } 6186 + #endif 6203 6187 6204 6188 static struct dc_stream_state * 6205 6189 create_stream_for_sink(struct drm_connector *connector, ··· 6222 6204 int mode_refresh; 6223 6205 int preferred_refresh = 0; 6224 6206 enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; 6207 + #if defined(CONFIG_DRM_AMD_DC_FP) 6225 6208 struct dsc_dec_dpcd_caps dsc_caps; 6226 - 6209 + #endif 6227 6210 struct dc_link *link = NULL; 6228 6211 struct dc_sink *sink = NULL; 6229 6212 ··· 6340 6321 stream->timing = *aconnector->timing_requested; 6341 6322 } 6342 6323 6324 + #if defined(CONFIG_DRM_AMD_DC_FP) 6343 6325 /* SST DSC determination policy */ 6344 6326 update_dsc_caps(aconnector, sink, stream, &dsc_caps); 6345 6327 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) 6346 6328 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); 6329 + #endif 6347 6330 6348 6331 update_stream_scaling_settings(&mode, dm_state, stream); 6349 6332 ··· 10820 10799 } 10821 10800 } 10822 10801 10802 + #if defined(CONFIG_DRM_AMD_DC_FP) 10823 10803 if (dc_resource_is_dsc_encoding_supported(dc)) { 10824 10804 ret = pre_validate_dsc(state, &dm_state, vars); 10825 10805 if (ret != 0) 10826 10806 goto fail; 10827 10807 } 10808 + #endif 10828 10809 10829 10810 /* Run this here since we want to validate the streams we created */ 10830 10811 ret = drm_atomic_helper_check_planes(dev, state); ··· 10938 10915 goto fail; 10939 10916 } 10940 10917 10918 + #if defined(CONFIG_DRM_AMD_DC_FP) 10941 10919 if (dc_resource_is_dsc_encoding_supported(dc)) { 10942 10920 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); 10943 10921 if (ret) { ··· 10947 10923 goto fail; 10948 10924 } 10949 10925 } 10926 + #endif 10950 10927 10951 10928 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); 10952 10929 if (ret) {
+10 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 210 210 return false; 211 211 } 212 212 213 + #if defined(CONFIG_DRM_AMD_DC_FP) 213 214 static bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port) 214 215 { 215 216 u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F ··· 270 269 271 270 return true; 272 271 } 272 + #endif 273 273 274 274 static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector) 275 275 { ··· 404 402 amdgpu_dm_update_freesync_caps( 405 403 connector, aconnector->edid); 406 404 405 + #if defined(CONFIG_DRM_AMD_DC_FP) 407 406 if (!validate_dsc_caps_on_connector(aconnector)) 408 407 memset(&aconnector->dc_sink->dsc_caps, 409 408 0, sizeof(aconnector->dc_sink->dsc_caps)); 409 + #endif 410 410 411 411 if (!retrieve_downstream_port_device(aconnector)) 412 412 memset(&aconnector->mst_downstream_port_present, ··· 795 791 struct amdgpu_dm_connector *aconnector; 796 792 }; 797 793 794 + #if defined(CONFIG_DRM_AMD_DC_FP) 798 795 static int kbps_to_peak_pbn(int kbps) 799 796 { 800 797 u64 peak_kbps = kbps; ··· 1586 1581 1587 1582 return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16; 1588 1583 } 1584 + #endif 1589 1585 1590 1586 enum dc_status dm_dp_mst_is_port_support_mode( 1591 1587 struct amdgpu_dm_connector *aconnector, 1592 1588 struct dc_stream_state *stream) 1593 1589 { 1594 - int pbn, branch_max_throughput_mps = 0; 1590 + int branch_max_throughput_mps = 0; 1591 + #if defined(CONFIG_DRM_AMD_DC_FP) 1595 1592 struct dc_link_settings cur_link_settings; 1593 + int pbn; 1596 1594 unsigned int end_to_end_bw_in_kbps = 0; 1597 1595 unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; 1598 1596 struct dc_dsc_bw_range bw_range = {0}; ··· 1673 1665 return DC_FAIL_BANDWIDTH_VALIDATE; 1674 1666 } 1675 1667 } 1676 - 1677 1668 /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ 1678 1669 switch (stream->timing.pixel_encoding) { 1679 1670 case PIXEL_ENCODING_RGB: ··· 1688 1681 default: 1689 1682 break; 1690 1683 } 1684 + #endif 1691 1685 1692 1686 if (branch_max_throughput_mps != 0 && 1693 1687 ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000))
+9
drivers/gpu/drm/amd/display/dc/Makefile
··· 39 39 DC_LIBS += dcn32 40 40 DC_LIBS += dcn321 41 41 DC_LIBS += dcn35 42 + DC_LIBS += dcn401 42 43 DC_LIBS += dml 43 44 DC_LIBS += dml2 44 45 endif ··· 56 55 57 56 DC_LIBS += hdcp 58 57 58 + ifdef CONFIG_DRM_AMD_DC_FP 59 + DC_LIBS += spl 60 + DC_SPL_TRANS += dc_spl_translate.o 61 + endif 62 + 59 63 AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS))) 60 64 61 65 include $(AMD_DC) ··· 74 68 75 69 AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o) 76 70 71 + AMD_DC_SPL_TRANS = $(addprefix $(AMDDALPATH)/dc/,$(DC_SPL_TRANS)) 72 + 77 73 AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE) 78 74 AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE) 79 75 ··· 85 77 AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID)) 86 78 AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID) 87 79 80 + AMD_DISPLAY_FILES += $(AMD_DC_SPL_TRANS)
+107 -2
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
··· 75 75 struct bios_parser *bp, 76 76 struct dc_firmware_info *info); 77 77 78 + static enum bp_result get_firmware_info_v3_5( 79 + struct bios_parser *bp, 80 + struct dc_firmware_info *info); 81 + 78 82 static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp, 79 83 struct atom_display_object_path_v2 *object); 80 84 ··· 1758 1754 case 4: 1759 1755 result = get_firmware_info_v3_4(bp, info); 1760 1756 break; 1757 + case 5: 1758 + result = get_firmware_info_v3_5(bp, info); 1759 + break; 1761 1760 default: 1762 1761 break; 1763 1762 } ··· 2047 2040 } else { 2048 2041 info->oem_i2c_present = false; 2049 2042 } 2043 + 2044 + return BP_RESULT_OK; 2045 + } 2046 + 2047 + static enum bp_result get_firmware_info_v3_5( 2048 + struct bios_parser *bp, 2049 + struct dc_firmware_info *info) 2050 + { 2051 + struct atom_firmware_info_v3_5 *firmware_info; 2052 + struct atom_common_table_header *header; 2053 + struct atom_data_revision revision; 2054 + struct atom_display_controller_info_v4_5 *dce_info_v4_5 = NULL; 2055 + 2056 + if (!info) 2057 + return BP_RESULT_BADINPUT; 2058 + 2059 + firmware_info = GET_IMAGE(struct atom_firmware_info_v3_5, 2060 + DATA_TABLES(firmwareinfo)); 2061 + 2062 + if (!firmware_info) 2063 + return BP_RESULT_BADBIOSTABLE; 2064 + 2065 + memset(info, 0, sizeof(*info)); 2066 + 2067 + if (firmware_info->board_i2c_feature_id == 0x2) { 2068 + info->oem_i2c_present = true; 2069 + info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id; 2070 + } else { 2071 + info->oem_i2c_present = false; 2072 + } 2073 + 2074 + header = GET_IMAGE(struct atom_common_table_header, 2075 + DATA_TABLES(dce_info)); 2076 + 2077 + get_atom_data_table_revision(header, &revision); 2078 + 2079 + switch (revision.major) { 2080 + case 4: 2081 + switch (revision.minor) { 2082 + case 5: 2083 + dce_info_v4_5 = GET_IMAGE(struct atom_display_controller_info_v4_5, 2084 + DATA_TABLES(dce_info)); 2085 + 2086 + if (!dce_info_v4_5) 2087 + return BP_RESULT_BADBIOSTABLE; 2088 + 2089 + /* 100MHz expected */ 2090 + info->pll_info.crystal_frequency = dce_info_v4_5->dce_refclk_10khz * 10; 2091 + break; 2092 + default: 2093 + break; 2094 + } 2095 + break; 2096 + default: 2097 + break; 2098 + } 2099 + 2050 2100 2051 2101 return BP_RESULT_OK; 2052 2102 } ··· 2458 2394 2459 2395 info->num_chans = info_v30->channel_num; 2460 2396 info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8; 2397 + 2398 + return result; 2399 + } 2400 + 2401 + static enum bp_result get_vram_info_from_umc_info_v40( 2402 + struct bios_parser *bp, 2403 + struct dc_vram_info *info) 2404 + { 2405 + struct atom_umc_info_v4_0 *info_v40; 2406 + enum bp_result result = BP_RESULT_OK; 2407 + 2408 + info_v40 = GET_IMAGE(struct atom_umc_info_v4_0, 2409 + DATA_TABLES(umc_info)); 2410 + 2411 + if (info_v40 == NULL) 2412 + return BP_RESULT_BADBIOSTABLE; 2413 + 2414 + info->num_chans = info_v40->channel_num; 2415 + info->dram_channel_width_bytes = (1 << info_v40->channel_width) / 8; 2461 2416 2462 2417 return result; 2463 2418 } ··· 3122 3039 struct atom_common_table_header *header; 3123 3040 struct atom_data_revision revision; 3124 3041 3125 - if (info && DATA_TABLES(vram_info)) { 3042 + // vram info moved to umc_info for DCN4x 3043 + if (info && DATA_TABLES(umc_info)) { 3044 + header = GET_IMAGE(struct atom_common_table_header, 3045 + DATA_TABLES(umc_info)); 3046 + 3047 + get_atom_data_table_revision(header, &revision); 3048 + 3049 + switch (revision.major) { 3050 + case 4: 3051 + switch (revision.minor) { 3052 + case 0: 3053 + result = get_vram_info_from_umc_info_v40(bp, info); 3054 + break; 3055 + default: 3056 + break; 3057 + } 3058 + break; 3059 + default: 3060 + break; 3061 + } 3062 + } 3063 + 3064 + if (result != BP_RESULT_OK && info && DATA_TABLES(vram_info)) { 3126 3065 header = GET_IMAGE(struct atom_common_table_header, 3127 3066 DATA_TABLES(vram_info)); 3128 3067 ··· 3767 3662 bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base); 3768 3663 bp->base.fw_info_valid = bios_parser_get_firmware_info(&bp->base, &bp->base.fw_info) == BP_RESULT_OK; 3769 3664 bios_parser_get_vram_info(&bp->base, &bp->base.vram_info); 3770 - 3665 + bios_parser_get_soc_bb_info(&bp->base, &bp->base.bb_info); 3771 3666 return true; 3772 3667 } 3773 3668
+1
drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
··· 82 82 case DCN_VERSION_3_21: 83 83 case DCN_VERSION_3_5: 84 84 case DCN_VERSION_3_51: 85 + case DCN_VERSION_4_01: 85 86 *h = dal_cmd_tbl_helper_dce112_get_table2(); 86 87 return true; 87 88
+9
drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
··· 180 180 AMD_DAL_CLK_MGR_DCN35 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn35/,$(CLK_MGR_DCN35)) 181 181 182 182 AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN35) 183 + 184 + ############################################################################### 185 + # DCN401 186 + ############################################################################### 187 + CLK_MGR_DCN401 = dcn401_clk_mgr.o dcn401_clk_mgr_smu_msg.o 188 + 189 + AMD_DAL_CLK_MGR_DCN401 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn401/,$(CLK_MGR_DCN401)) 190 + 191 + AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN401) 183 192 endif
+16
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
··· 48 48 #include "dcn316/dcn316_clk_mgr.h" 49 49 #include "dcn32/dcn32_clk_mgr.h" 50 50 #include "dcn35/dcn35_clk_mgr.h" 51 + #include "dcn401/dcn401_clk_mgr.h" 51 52 52 53 int clk_mgr_helper_get_active_display_cnt( 53 54 struct dc *dc, ··· 366 365 } 367 366 break; 368 367 368 + case AMDGPU_FAMILY_GC_12_0_0: { 369 + struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL); 370 + 371 + if (clk_mgr == NULL) { 372 + BREAK_TO_DEBUGGER(); 373 + return NULL; 374 + } 375 + 376 + dcn401_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); 377 + return &clk_mgr->base; 378 + } 379 + break; 369 380 #endif /* CONFIG_DRM_AMD_DC_FP */ 370 381 default: 371 382 ASSERT(0); /* Unknown Asic */ ··· 431 418 432 419 case AMDGPU_FAMILY_GC_11_5_0: 433 420 dcn35_clk_mgr_destroy(clk_mgr); 421 + break; 422 + case AMDGPU_FAMILY_GC_12_0_0: 423 + dcn401_clk_mgr_destroy(clk_mgr); 434 424 break; 435 425 436 426 default:
+254 -7
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 80 80 81 81 #include "hw_sequencer_private.h" 82 82 83 + #if defined(CONFIG_DRM_AMD_DC_FP) 83 84 #include "dml2/dml2_internal_types.h" 85 + #endif 84 86 85 87 #include "dce/dmub_outbox.h" 86 88 ··· 1164 1162 get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1165 1163 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) 1166 1164 get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1165 + else if (dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2) 1166 + get_fams2_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color)); 1167 1167 } 1168 1168 } 1169 1169 } ··· 1460 1456 dc->build_id = DC_BUILD_ID; 1461 1457 1462 1458 DC_LOG_DC("Display Core initialized\n"); 1463 - 1464 - 1465 1459 1466 1460 return dc; 1467 1461 ··· 1973 1971 */ 1974 1972 if (dc->hwss.subvp_pipe_control_lock) 1975 1973 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use); 1974 + if (dc->hwss.fams2_global_control_lock) 1975 + dc->hwss.fams2_global_control_lock(dc, context, true); 1976 1976 1977 1977 if (dc->hwss.update_dsc_pg) 1978 1978 dc->hwss.update_dsc_pg(dc, context, false); ··· 2033 2029 dc->hwss.commit_subvp_config(dc, context); 2034 2030 if (dc->hwss.subvp_pipe_control_lock) 2035 2031 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use); 2032 + if (dc->hwss.fams2_global_control_lock) 2033 + dc->hwss.fams2_global_control_lock(dc, context, false); 2036 2034 2037 2035 for (i = 0; i < context->stream_count; i++) { 2038 2036 const struct dc_link *link = context->streams[i]->link; ··· 2638 2632 elevate_update_type(&overall_type, UPDATE_TYPE_MED); 2639 2633 } 2640 2634 2635 + if (u->cm2_params) { 2636 + if ((u->cm2_params->component_settings.shaper_3dlut_setting 2637 + != u->surface->mcm_shaper_3dlut_setting) 2638 + || (u->cm2_params->component_settings.lut1d_enable 2639 + != u->surface->mcm_lut1d_enable)) 2640 + update_flags->bits.mcm_transfer_function_enable_change = 1; 2641 + if (u->cm2_params->cm2_luts.lut3d_data.lut3d_src 2642 + != u->surface->mcm_luts.lut3d_data.lut3d_src) 2643 + update_flags->bits.mcm_transfer_function_enable_change = 1; 2644 + } 2641 2645 if (update_flags->bits.in_transfer_func_change) { 2642 2646 type = UPDATE_TYPE_MED; 2643 2647 elevate_update_type(&overall_type, type); 2644 2648 } 2645 2649 2646 2650 if (update_flags->bits.lut_3d) { 2651 + type = UPDATE_TYPE_FULL; 2652 + elevate_update_type(&overall_type, type); 2653 + } 2654 + if (update_flags->bits.mcm_transfer_function_enable_change) { 2647 2655 type = UPDATE_TYPE_FULL; 2648 2656 elevate_update_type(&overall_type, type); 2649 2657 } ··· 2926 2906 if (srf_update->gamut_remap_matrix) 2927 2907 surface->gamut_remap_matrix = 2928 2908 *srf_update->gamut_remap_matrix; 2909 + if (srf_update->cm2_params) { 2910 + surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting; 2911 + surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable; 2912 + surface->mcm_luts = srf_update->cm2_params->cm2_luts; 2913 + } 2914 + if (srf_update->cursor_csc_color_matrix) 2915 + surface->cursor_csc_color_matrix = 2916 + *srf_update->cursor_csc_color_matrix; 2929 2917 } 2930 2918 2931 2919 static void copy_stream_update_to_stream(struct dc *dc, ··· 3549 3521 } 3550 3522 } 3551 3523 3524 + static bool check_address_only_update(union surface_update_flags update_flags) 3525 + { 3526 + union surface_update_flags addr_only_update_flags; 3527 + addr_only_update_flags.raw = 0; 3528 + addr_only_update_flags.bits.addr_update = 1; 3529 + 3530 + return update_flags.bits.addr_update && 3531 + !(update_flags.raw & ~addr_only_update_flags.raw); 3532 + } 3552 3533 3553 3534 /** 3554 3535 * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB ··· 3589 3552 build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count); 3590 3553 } 3591 3554 3555 + static void commit_plane_for_stream_offload_fams2_flip(struct dc *dc, 3556 + struct dc_surface_update *srf_updates, 3557 + int surface_count, 3558 + struct dc_stream_state *stream, 3559 + struct dc_state *context) 3560 + { 3561 + int i, j; 3562 + 3563 + /* update dirty rect for PSR */ 3564 + dc_dmub_update_dirty_rect(dc, surface_count, stream, 3565 + srf_updates, context); 3566 + 3567 + /* Perform requested Updates */ 3568 + for (i = 0; i < surface_count; i++) { 3569 + struct dc_plane_state *plane_state = srf_updates[i].surface; 3570 + 3571 + /* set offload flag so driver does not program address */ 3572 + plane_state->address.offload_flip = true; 3573 + 3574 + for (j = 0; j < dc->res_pool->pipe_count; j++) { 3575 + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3576 + 3577 + if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3578 + continue; 3579 + 3580 + if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3581 + continue; 3582 + 3583 + /* update pipe context for plane */ 3584 + if (pipe_ctx->plane_state->update_flags.bits.addr_update) 3585 + dc->hwss.update_plane_addr(dc, pipe_ctx); 3586 + } 3587 + } 3588 + 3589 + /* Send commands to DMCUB */ 3590 + dc_dmub_srv_fams2_passthrough_flip(dc, 3591 + context, 3592 + stream, 3593 + srf_updates, 3594 + surface_count); 3595 + 3596 + /* reset offload flip flag */ 3597 + for (i = 0; i < surface_count; i++) { 3598 + struct dc_plane_state *plane_state = srf_updates[i].surface; 3599 + plane_state->address.offload_flip = false; 3600 + } 3601 + } 3602 + 3592 3603 static void commit_planes_for_stream_fast(struct dc *dc, 3593 3604 struct dc_surface_update *srf_updates, 3594 3605 int surface_count, ··· 3648 3563 int i, j; 3649 3564 struct pipe_ctx *top_pipe_to_program = NULL; 3650 3565 struct dc_stream_status *stream_status = NULL; 3566 + bool should_offload_fams2_flip = false; 3567 + 3568 + if (dc->debug.fams2_config.bits.enable && 3569 + dc->debug.fams2_config.bits.enable_offload_flip && 3570 + dc_state_is_fams2_in_use(dc, context)) { 3571 + /* if not offloading to HWFQ, offload to FAMS2 if needed */ 3572 + should_offload_fams2_flip = true; 3573 + for (i = 0; i < surface_count; i++) { 3574 + if (srf_updates[i].surface && 3575 + srf_updates[i].surface->update_flags.raw && 3576 + !check_address_only_update(srf_updates[i].surface->update_flags)) { 3577 + /* more than address update, need to acquire FAMS2 lock */ 3578 + should_offload_fams2_flip = false; 3579 + break; 3580 + } 3581 + } 3582 + } 3651 3583 3652 3584 dc_exit_ips_for_hw_access(dc); 3653 3585 ··· 3700 3598 continue; 3701 3599 pipe_ctx->plane_state->triplebuffer_flips = false; 3702 3600 if (update_type == UPDATE_TYPE_FAST && 3703 - dc->hwss.program_triplebuffer && 3601 + dc->hwss.program_triplebuffer != NULL && 3704 3602 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 3705 3603 /*triple buffer for VUpdate only*/ 3706 3604 pipe_ctx->plane_state->triplebuffer_flips = true; ··· 3709 3607 } 3710 3608 3711 3609 stream_status = dc_state_get_stream_status(context, stream); 3610 + 3611 + if (should_offload_fams2_flip) { 3612 + commit_plane_for_stream_offload_fams2_flip(dc, 3613 + srf_updates, 3614 + surface_count, 3615 + stream, 3616 + context); 3617 + } else { 3618 + build_dmub_cmd_list(dc, 3619 + srf_updates, 3620 + surface_count, 3621 + stream, 3622 + context, 3623 + context->dc_dmub_cmd, 3624 + &(context->dmub_cmd_count)); 3625 + hwss_build_fast_sequence(dc, 3626 + context->dc_dmub_cmd, 3627 + context->dmub_cmd_count, 3628 + context->block_sequence, 3629 + &(context->block_sequence_steps), 3630 + top_pipe_to_program, 3631 + stream_status, 3632 + context); 3633 + hwss_execute_sequence(dc, 3634 + context->block_sequence, 3635 + context->block_sequence_steps); 3636 + } 3712 3637 3713 3638 build_dmub_cmd_list(dc, 3714 3639 srf_updates, ··· 3905 3776 3906 3777 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3907 3778 if (dc->hwss.subvp_pipe_control_lock) 3908 - dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); 3909 - dc->hwss.interdependent_update_lock(dc, context, true); 3779 + dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use); 3910 3780 3781 + if (dc->hwss.fams2_global_control_lock) 3782 + dc->hwss.fams2_global_control_lock(dc, context, true); 3783 + 3784 + dc->hwss.interdependent_update_lock(dc, context, true); 3911 3785 } else { 3912 3786 if (dc->hwss.subvp_pipe_control_lock) 3913 3787 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 3788 + 3789 + if (dc->hwss.fams2_global_control_lock) 3790 + dc->hwss.fams2_global_control_lock(dc, context, true); 3791 + 3914 3792 /* Lock the top pipe while updating plane addrs, since freesync requires 3915 3793 * plane addr update event triggers to be synchronized. 3916 3794 * top_pipe_to_program is expected to never be NULL ··· 3958 3822 if (dc->hwss.subvp_pipe_control_lock) 3959 3823 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, 3960 3824 NULL, subvp_prev_use); 3825 + 3826 + if (dc->hwss.fams2_global_control_lock) 3827 + dc->hwss.fams2_global_control_lock(dc, context, false); 3828 + 3961 3829 return; 3962 3830 } 3963 3831 ··· 4165 4025 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 4166 4026 if (dc->hwss.subvp_pipe_control_lock) 4167 4027 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use); 4028 + if (dc->hwss.fams2_global_control_lock) 4029 + dc->hwss.fams2_global_control_lock(dc, context, false); 4168 4030 } else { 4169 4031 if (dc->hwss.subvp_pipe_control_lock) 4170 4032 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use); 4033 + if (dc->hwss.fams2_global_control_lock) 4034 + dc->hwss.fams2_global_control_lock(dc, context, false); 4171 4035 } 4172 4036 4173 4037 // Fire manual trigger only when bottom plane is flipped ··· 4683 4539 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix; 4684 4540 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix; 4685 4541 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor; 4542 + fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix; 4686 4543 } 4687 4544 } 4688 4545 ··· 4700 4555 fast_update[i].gamma || 4701 4556 fast_update[i].gamut_remap_matrix || 4702 4557 fast_update[i].input_csc_color_matrix || 4558 + fast_update[i].cursor_csc_color_matrix || 4703 4559 fast_update[i].coeff_reduction_factor) 4704 4560 return true; 4705 4561 } ··· 4731 4585 srf_updates[i].surface->force_full_update || 4732 4586 (srf_updates[i].flip_addr && 4733 4587 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || 4588 + (srf_updates[i].cm2_params && 4589 + (srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting || 4590 + srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) || 4734 4591 !is_surface_in_context(context, srf_updates[i].surface))) 4735 4592 return true; 4736 4593 } ··· 5118 4969 * specially handle compatibility problems with transitions among those 5119 4970 * features as they are now transparent to the new sequence. 5120 4971 */ 5121 - if (dc->ctx->dce_version > DCN_VERSION_3_51) 4972 + if (dc->ctx->dce_version > DCN_VERSION_4_01) 5122 4973 return update_planes_and_stream_v3(dc, srf_updates, 5123 4974 surface_count, stream, stream_update); 5124 4975 return update_planes_and_stream_v2(dc, srf_updates, ··· 5138 4989 * we get more confident about this change we'll need to enable 5139 4990 * the new sequence for all ASICs. 5140 4991 */ 5141 - if (dc->ctx->dce_version > DCN_VERSION_3_51) { 4992 + if (dc->ctx->dce_version > DCN_VERSION_4_01) { 5142 4993 update_planes_and_stream_v3(dc, srf_updates, surface_count, 5143 4994 stream, stream_update); 5144 4995 return; ··· 5978 5829 return profile; 5979 5830 } 5980 5831 5832 + /* Need to account for padding due to pixel-to-symbol packing 5833 + * for uncompressed 128b/132b streams. 5834 + */ 5835 + static uint32_t apply_128b_132b_stream_overhead( 5836 + const struct dc_crtc_timing *timing, const uint32_t kbps) 5837 + { 5838 + uint32_t total_kbps = kbps; 5839 + #if defined(CONFIG_DRM_AMD_DC_FP) 5840 + if (dc_get_disable_128b_132b_stream_overhead()) 5841 + return kbps; 5842 + #endif 5843 + 5844 + if (!timing->flags.DSC) { 5845 + struct fixed31_32 bpp; 5846 + struct fixed31_32 overhead_factor; 5847 + 5848 + bpp = dc_fixpt_from_int(kbps); 5849 + bpp = dc_fixpt_div_int(bpp, timing->pix_clk_100hz / 10); 5850 + 5851 + /* Symbols_per_HActive = HActive * bpp / (4 lanes * 32-bit symbol size) 5852 + * Overhead_factor = ceil(Symbols_per_HActive) / Symbols_per_HActive 5853 + */ 5854 + overhead_factor = dc_fixpt_from_int(timing->h_addressable); 5855 + overhead_factor = dc_fixpt_mul(overhead_factor, bpp); 5856 + overhead_factor = dc_fixpt_div_int(overhead_factor, 128); 5857 + overhead_factor = dc_fixpt_div( 5858 + dc_fixpt_from_int(dc_fixpt_ceil(overhead_factor)), 5859 + overhead_factor); 5860 + 5861 + total_kbps = dc_fixpt_ceil( 5862 + dc_fixpt_mul_int(overhead_factor, total_kbps)); 5863 + } 5864 + 5865 + return total_kbps; 5866 + } 5867 + 5868 + uint32_t dc_bandwidth_in_kbps_from_timing( 5869 + const struct dc_crtc_timing *timing, 5870 + const enum dc_link_encoding_format link_encoding) 5871 + { 5872 + uint32_t bits_per_channel = 0; 5873 + uint32_t kbps; 5874 + 5875 + #if defined(CONFIG_DRM_AMD_DC_FP) 5876 + if (timing->flags.DSC) 5877 + return dc_dsc_stream_bandwidth_in_kbps(timing, 5878 + timing->dsc_cfg.bits_per_pixel, 5879 + timing->dsc_cfg.num_slices_h, 5880 + timing->dsc_cfg.is_dp); 5881 + #endif 5882 + 5883 + switch (timing->display_color_depth) { 5884 + case COLOR_DEPTH_666: 5885 + bits_per_channel = 6; 5886 + break; 5887 + case COLOR_DEPTH_888: 5888 + bits_per_channel = 8; 5889 + break; 5890 + case COLOR_DEPTH_101010: 5891 + bits_per_channel = 10; 5892 + break; 5893 + case COLOR_DEPTH_121212: 5894 + bits_per_channel = 12; 5895 + break; 5896 + case COLOR_DEPTH_141414: 5897 + bits_per_channel = 14; 5898 + break; 5899 + case COLOR_DEPTH_161616: 5900 + bits_per_channel = 16; 5901 + break; 5902 + default: 5903 + ASSERT(bits_per_channel != 0); 5904 + bits_per_channel = 8; 5905 + break; 5906 + } 5907 + 5908 + kbps = timing->pix_clk_100hz / 10; 5909 + kbps *= bits_per_channel; 5910 + 5911 + if (timing->flags.Y_ONLY != 1) { 5912 + /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ 5913 + kbps *= 3; 5914 + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) 5915 + kbps /= 2; 5916 + else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) 5917 + kbps = kbps * 2 / 3; 5918 + } 5919 + 5920 + if (link_encoding == DC_LINK_ENCODING_DP_128b_132b) 5921 + kbps = apply_128b_132b_stream_overhead(timing, kbps); 5922 + 5923 + if (link_encoding == DC_LINK_ENCODING_HDMI_FRL && 5924 + timing->vic == 0 && timing->hdmi_vic == 0 && 5925 + timing->frl_uncompressed_video_bandwidth_in_kbps != 0) 5926 + kbps = timing->frl_uncompressed_video_bandwidth_in_kbps; 5927 + 5928 + return kbps; 5929 + }
+43
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
··· 554 554 } 555 555 } 556 556 557 + void get_fams2_visual_confirm_color( 558 + struct dc *dc, 559 + struct dc_state *context, 560 + struct pipe_ctx *pipe_ctx, 561 + struct tg_color *color) 562 + { 563 + uint32_t color_value = MAX_TG_COLOR_VALUE; 564 + 565 + if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context || !dc->debug.fams2_config.bits.enable) 566 + return; 567 + 568 + /* driver only handles visual confirm when FAMS2 is disabled */ 569 + if (!dc_state_is_fams2_in_use(dc, context)) { 570 + /* when FAMS2 is disabled, all pipes are grey */ 571 + color->color_g_y = color_value / 2; 572 + color->color_b_cb = color_value / 2; 573 + color->color_r_cr = color_value / 2; 574 + } 575 + } 576 + 557 577 void hwss_build_fast_sequence(struct dc *dc, 558 578 struct dc_dmub_cmd *dc_dmub_cmd, 559 579 unsigned int dmub_cmd_count, ··· 601 581 block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip = 602 582 plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN; 603 583 block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; 584 + (*num_steps)++; 585 + } 586 + if (dc->hwss.fams2_global_control_lock_fast) { 587 + block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc; 588 + block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = true; 589 + block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context); 590 + block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST; 604 591 (*num_steps)++; 605 592 } 606 593 if (dc->hwss.pipe_control_lock) { ··· 733 706 block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST; 734 707 (*num_steps)++; 735 708 } 709 + if (dc->hwss.fams2_global_control_lock_fast) { 710 + block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc; 711 + block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = false; 712 + block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context); 713 + block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST; 714 + (*num_steps)++; 715 + } 736 716 737 717 current_pipe = pipe_ctx; 738 718 while (current_pipe) { ··· 835 801 case DMUB_SUBVP_SAVE_SURF_ADDR: 836 802 hwss_subvp_save_surf_addr(params); 837 803 break; 804 + case DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST: 805 + dc->hwss.fams2_global_control_lock_fast(params); 806 + break; 838 807 default: 839 808 ASSERT(false); 840 809 break; ··· 876 839 plane_state->input_csc_color_matrix, 877 840 plane_state->color_space, 878 841 NULL); 842 + } 843 + 844 + if (dpp && dpp->funcs->set_cursor_matrix) { 845 + dpp->funcs->set_cursor_matrix(dpp, 846 + plane_state->color_space, 847 + plane_state->cursor_csc_color_matrix); 879 848 } 880 849 } 881 850
+42 -2
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 74 74 #include "dcn321/dcn321_resource.h" 75 75 #include "dcn35/dcn35_resource.h" 76 76 #include "dcn351/dcn351_resource.h" 77 + #include "dcn401/dcn401_resource.h" 78 + #if defined(CONFIG_DRM_AMD_DC_FP) 79 + #include "dc_spl_translate.h" 80 + #endif 77 81 78 82 #define VISUAL_CONFIRM_BASE_DEFAULT 3 79 83 #define VISUAL_CONFIRM_BASE_MIN 1 ··· 203 199 if (ASICREV_IS_GC_11_0_4(asic_id.hw_internal_rev)) 204 200 dc_version = DCN_VERSION_3_51; 205 201 break; 202 + case AMDGPU_FAMILY_GC_12_0_0: 203 + if (ASICREV_IS_DCN401(asic_id.hw_internal_rev)) 204 + dc_version = DCN_VERSION_4_01; 205 + break; 206 206 default: 207 207 dc_version = DCE_VERSION_UNKNOWN; 208 208 break; ··· 316 308 break; 317 309 case DCN_VERSION_3_51: 318 310 res_pool = dcn351_create_resource_pool(init_data, dc); 311 + break; 312 + case DCN_VERSION_4_01: 313 + res_pool = dcn401_create_resource_pool(init_data, dc); 319 314 break; 320 315 #endif /* CONFIG_DRM_AMD_DC_FP */ 321 316 default: ··· 1525 1514 pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( 1526 1515 pipe_ctx->plane_state->format); 1527 1516 1517 + if (pipe_ctx->stream->ctx->dc->config.use_spl) { 1518 + #if defined(CONFIG_DRM_AMD_DC_FP) 1519 + struct spl_in *spl_in = &pipe_ctx->plane_res.spl_in; 1520 + struct spl_out *spl_out = &pipe_ctx->plane_res.spl_out; 1521 + 1522 + if (plane_state->ctx->dce_version > DCE_VERSION_MAX) 1523 + pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP; 1524 + else 1525 + pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; 1526 + 1527 + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha; 1528 + spl_out->scl_data.h_active = pipe_ctx->plane_res.scl_data.h_active; 1529 + spl_out->scl_data.v_active = pipe_ctx->plane_res.scl_data.v_active; 1530 + 1531 + // Convert pipe_ctx to respective input params for SPL 1532 + translate_SPL_in_params_from_pipe_ctx(pipe_ctx, spl_in); 1533 + // Set SPL output parameters to dscl_prog_data to be used for hw registers 1534 + spl_out->dscl_prog_data = resource_get_dscl_prog_data(pipe_ctx); 1535 + // Calculate scaler parameters from SPL 1536 + res = spl_calculate_scaler_params(spl_in, spl_out); 1537 + // Convert respective out params from SPL to scaler data 1538 + translate_SPL_out_params_to_pipe_ctx(pipe_ctx, spl_out); 1539 + #endif 1540 + } else { 1541 + 1528 1542 /* depends on h_active */ 1529 1543 calculate_recout(pipe_ctx); 1530 1544 /* depends on pixel format */ ··· 1629 1593 pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE; 1630 1594 if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) 1631 1595 pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE; 1632 - 1633 - 1596 + } 1634 1597 DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n" 1635 1598 "src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n", 1636 1599 __func__, ··· 5126 5091 return true; 5127 5092 5128 5093 return false; 5094 + } 5095 + 5096 + struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx) 5097 + { 5098 + return &pipe_ctx->plane_res.scl_data.dscl_prog_data; 5129 5099 } 5130 5100 5131 5101 void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options)
+16
drivers/gpu/drm/amd/display/dc/core/dc_state.c
··· 33 33 #include "resource.h" 34 34 #include "link_enc_cfg.h" 35 35 36 + #if defined(CONFIG_DRM_AMD_DC_FP) 36 37 #include "dml2/dml2_wrapper.h" 37 38 #include "dml2/dml2_internal_types.h" 39 + #endif 38 40 39 41 #define DC_LOGGER \ 40 42 dc->ctx->logger ··· 918 916 return stream; 919 917 } 920 918 919 + bool dc_state_is_fams2_in_use( 920 + const struct dc *dc, 921 + const struct dc_state *state) 922 + { 923 + bool is_fams2_in_use = false; 924 + 925 + if (state) 926 + is_fams2_in_use |= state->bw_ctx.bw.dcn.fams2_stream_count > 0; 927 + 928 + if (dc->current_state) 929 + is_fams2_in_use |= dc->current_state->bw_ctx.bw.dcn.fams2_stream_count > 0; 930 + 931 + return is_fams2_in_use; 932 + }
+42 -3
drivers/gpu/drm/amd/display/dc/dc.h
··· 260 260 bool zstate_support; 261 261 bool ips_support; 262 262 uint32_t num_of_internal_disp; 263 + uint32_t max_dwb_htap; 264 + uint32_t max_dwb_vtap; 263 265 enum dp_protocol_version max_dp_protocol_version; 266 + bool spdif_aud; 264 267 unsigned int mall_size_per_mem_channel; 265 268 unsigned int mall_size_total; 266 269 unsigned int cursor_cache_size; ··· 289 286 uint32_t max_v_total; 290 287 uint32_t max_disp_clock_khz_at_vmin; 291 288 uint8_t subvp_drr_vblank_start_margin_us; 289 + bool cursor_not_scaled; 292 290 }; 293 291 294 292 struct dc_bug_wa { ··· 303 299 uint8_t dcfclk : 1; 304 300 uint8_t dcfclk_ds: 1; 305 301 } clock_update_disable_mask; 302 + //Customer Specific WAs 303 + uint32_t force_backlight_start_level; 306 304 }; 307 305 struct dc_dcc_surface_param { 308 306 struct dc_size surface_size; 309 307 enum surface_pixel_format format; 310 - enum swizzle_mode_values swizzle_mode; 308 + unsigned int plane0_pitch; 309 + struct dc_size plane1_size; 310 + unsigned int plane1_pitch; 311 + union { 312 + enum swizzle_mode_values swizzle_mode; 313 + enum swizzle_mode_addr3_values swizzle_mode_addr3; 314 + }; 311 315 enum dc_scan_direction scan; 312 316 }; 313 317 ··· 396 384 struct dc_plane_state; 397 385 struct dc_state; 398 386 399 - 400 387 struct dc_cap_funcs { 401 388 bool (*get_dcc_compression_cap)(const struct dc *dc, 402 389 const struct dc_dcc_surface_param *input, ··· 438 427 bool is_asymmetric_memory; 439 428 bool is_single_rank_dimm; 440 429 bool is_vmin_only_asic; 430 + bool use_spl; 431 + bool prefer_easf; 441 432 bool use_pipe_ctx_sync_logic; 442 433 bool ignore_dpref_ss; 443 434 bool enable_mipi_converter_optimization; ··· 470 457 VISUAL_CONFIRM_REPLAY = 12, 471 458 VISUAL_CONFIRM_SUBVP = 14, 472 459 VISUAL_CONFIRM_MCLK_SWITCH = 16, 460 + VISUAL_CONFIRM_FAMS2 = 19, 473 461 }; 474 462 475 463 enum dc_psr_power_opts { ··· 984 970 bool enable_single_display_2to1_odm_policy; 985 971 bool enable_double_buffered_dsc_pg_support; 986 972 bool enable_dp_dig_pixel_rate_div_policy; 973 + bool using_dml21; 987 974 enum lttpr_mode lttpr_mode_override; 988 975 unsigned int dsc_delay_factor_wa_x1000; 989 976 unsigned int min_prefetch_in_strobe_ns; ··· 1020 1005 unsigned int static_screen_wait_frames; 1021 1006 bool force_chroma_subsampling_1tap; 1022 1007 bool disable_422_left_edge_pixel; 1008 + bool dml21_force_pstate_method; 1009 + uint32_t dml21_force_pstate_method_value; 1010 + uint32_t dml21_disable_pstate_method_mask; 1011 + union dmub_fams2_global_feature_config fams2_config; 1023 1012 unsigned int force_cositing; 1024 1013 }; 1025 1014 ··· 1233 1214 uint32_t stereo_format_change:1; 1234 1215 uint32_t lut_3d:1; 1235 1216 uint32_t tmz_changed:1; 1217 + uint32_t mcm_transfer_function_enable_change:1; /* disable or enable MCM transfer func */ 1236 1218 uint32_t full_update:1; 1237 1219 } bits; 1238 1220 ··· 1308 1288 1309 1289 bool is_statically_allocated; 1310 1290 enum chroma_cositing cositing; 1291 + enum dc_cm2_shaper_3dlut_setting mcm_shaper_3dlut_setting; 1292 + bool mcm_lut1d_enable; 1293 + struct dc_cm2_func_luts mcm_luts; 1294 + bool lut_bank_a; 1295 + enum mpcc_movable_cm_location mcm_location; 1296 + struct dc_csc_transform cursor_csc_color_matrix; 1297 + bool adaptive_sharpness_en; 1298 + unsigned int sharpnessX1000; 1299 + enum linear_light_scaling linear_light_scaling; 1311 1300 }; 1312 1301 1313 1302 struct dc_plane_info { ··· 1335 1306 int global_alpha_value; 1336 1307 bool input_csc_enabled; 1337 1308 int layer_index; 1309 + bool front_buffer_rendering_active; 1338 1310 enum chroma_cositing cositing; 1339 1311 }; 1340 1312 ··· 1443 1413 const struct fixed31_32 *coeff_reduction_factor; 1444 1414 struct dc_transfer_func *out_transfer_func; 1445 1415 struct dc_csc_transform *output_csc_transform; 1416 + const struct dc_csc_transform *cursor_csc_color_matrix; 1446 1417 }; 1447 1418 1448 1419 struct dc_surface_update { ··· 1466 1435 const struct dc_3dlut *lut3d_func; 1467 1436 const struct dc_transfer_func *blend_tf; 1468 1437 const struct colorspace_transform *gamut_remap_matrix; 1438 + /* 1439 + * Color Transformations for pre-blend MCM (Shaper, 3DLUT, 1DLUT) 1440 + * 1441 + * change cm2_params.component_settings: Full update 1442 + * change cm2_params.cm2_luts: Fast update 1443 + */ 1444 + struct dc_cm2_parameters *cm2_params; 1445 + const struct dc_csc_transform *cursor_csc_color_matrix; 1469 1446 }; 1470 1447 1471 1448 /* ··· 1570 1531 uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane); 1571 1532 1572 1533 void dc_set_disable_128b_132b_stream_overhead(bool disable); 1534 + bool dc_get_disable_128b_132b_stream_overhead(void); 1573 1535 1574 1536 /* The function returns minimum bandwidth required to drive a given timing 1575 1537 * return - minimum required timing bandwidth in kbps. ··· 1700 1660 union dpcd_sink_ext_caps dpcd_sink_ext_caps; 1701 1661 1702 1662 struct psr_settings psr_settings; 1703 - 1704 1663 struct replay_settings replay_settings; 1705 1664 1706 1665 /* Drive settings read from integrated info table */
+1
drivers/gpu/drm/amd/display/dc/dc_bios_types.h
··· 183 183 struct dc_firmware_info fw_info; 184 184 bool fw_info_valid; 185 185 struct dc_vram_info vram_info; 186 + struct bp_soc_bb_info bb_info; 186 187 struct dc_golden_table golden_table; 187 188 }; 188 189
+176
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
··· 1595 1595 return result; 1596 1596 } 1597 1597 1598 + void dc_dmub_srv_fams2_update_config(struct dc *dc, 1599 + struct dc_state *context, 1600 + bool enable) 1601 + { 1602 + uint8_t num_cmds = 1; 1603 + uint32_t i; 1604 + union dmub_rb_cmd cmd[MAX_STREAMS + 1]; 1605 + struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config; 1606 + 1607 + memset(cmd, 0, sizeof(union dmub_rb_cmd) * (MAX_STREAMS + 1)); 1608 + /* fill in generic command header */ 1609 + global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1610 + global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; 1611 + global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); 1612 + 1613 + /* send global configuration parameters */ 1614 + global_cmd->config.global.max_allow_delay_us = 100 * 1000; //100ms 1615 + global_cmd->config.global.lock_wait_time_us = 5000; //5ms 1616 + 1617 + /* copy static feature configuration */ 1618 + global_cmd->config.global.features.all = dc->debug.fams2_config.all; 1619 + 1620 + /* apply feature configuration based on current driver state */ 1621 + global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2; 1622 + global_cmd->config.global.features.bits.enable = enable; 1623 + 1624 + /* construct per-stream configs */ 1625 + if (enable) { 1626 + for (i = 0; i < context->bw_ctx.bw.dcn.fams2_stream_count; i++) { 1627 + struct dmub_rb_cmd_fams2 *stream_cmd = &cmd[i+1].fams2_config; 1628 + 1629 + /* configure command header */ 1630 + stream_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1631 + stream_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG; 1632 + stream_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header); 1633 + stream_cmd->header.multi_cmd_pending = 1; 1634 + /* copy stream static state */ 1635 + memcpy(&stream_cmd->config.stream, 1636 + &context->bw_ctx.bw.dcn.fams2_stream_params[i], 1637 + sizeof(struct dmub_fams2_stream_static_state)); 1638 + } 1639 + } 1640 + 1641 + if (enable && context->bw_ctx.bw.dcn.fams2_stream_count) { 1642 + /* set multi pending for global, and unset for last stream cmd */ 1643 + global_cmd->config.global.num_streams = context->bw_ctx.bw.dcn.fams2_stream_count; 1644 + global_cmd->header.multi_cmd_pending = 1; 1645 + cmd[context->bw_ctx.bw.dcn.fams2_stream_count].fams2_config.header.multi_cmd_pending = 0; 1646 + num_cmds += context->bw_ctx.bw.dcn.fams2_stream_count; 1647 + } 1648 + 1649 + dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT); 1650 + } 1651 + 1652 + void dc_dmub_srv_fams2_drr_update(struct dc *dc, 1653 + uint32_t tg_inst, 1654 + uint32_t vtotal_min, 1655 + uint32_t vtotal_max, 1656 + uint32_t vtotal_mid, 1657 + uint32_t vtotal_mid_frame_num, 1658 + bool program_manual_trigger) 1659 + { 1660 + union dmub_rb_cmd cmd = { 0 }; 1661 + 1662 + cmd.fams2_drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1663 + cmd.fams2_drr_update.header.sub_type = DMUB_CMD__FAMS2_DRR_UPDATE; 1664 + cmd.fams2_drr_update.dmub_optc_state_req.tg_inst = tg_inst; 1665 + cmd.fams2_drr_update.dmub_optc_state_req.v_total_max = vtotal_max; 1666 + cmd.fams2_drr_update.dmub_optc_state_req.v_total_min = vtotal_min; 1667 + cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid = vtotal_mid; 1668 + cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num; 1669 + cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger; 1670 + 1671 + cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header); 1672 + 1673 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 1674 + } 1675 + 1676 + void dc_dmub_srv_fams2_passthrough_flip( 1677 + struct dc *dc, 1678 + struct dc_state *state, 1679 + struct dc_stream_state *stream, 1680 + struct dc_surface_update *srf_updates, 1681 + int surface_count) 1682 + { 1683 + int plane_index; 1684 + union dmub_rb_cmd cmds[MAX_PLANES]; 1685 + struct dc_plane_address *address; 1686 + struct dc_plane_state *plane_state; 1687 + int num_cmds = 0; 1688 + struct dc_stream_status *stream_status = dc_stream_get_status(stream); 1689 + 1690 + if (surface_count <= 0 || stream_status == NULL) 1691 + return; 1692 + 1693 + memset(cmds, 0, sizeof(union dmub_rb_cmd) * MAX_PLANES); 1694 + 1695 + /* build command for each surface update */ 1696 + for (plane_index = 0; plane_index < surface_count; plane_index++) { 1697 + plane_state = srf_updates[plane_index].surface; 1698 + address = &plane_state->address; 1699 + 1700 + /* skip if there is no address update for plane */ 1701 + if (!srf_updates[plane_index].flip_addr) 1702 + continue; 1703 + 1704 + /* build command header */ 1705 + cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; 1706 + cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP; 1707 + cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip); 1708 + 1709 + /* for chaining multiple commands, all but last command should set to 1 */ 1710 + cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1; 1711 + 1712 + /* set topology info */ 1713 + cmds[num_cmds].fams2_flip.flip_info.pipe_mask = dc_plane_get_pipe_mask(state, plane_state); 1714 + if (stream_status) 1715 + cmds[num_cmds].fams2_flip.flip_info.otg_inst = stream_status->primary_otg_inst; 1716 + 1717 + cmds[num_cmds].fams2_flip.flip_info.config.bits.is_immediate = plane_state->flip_immediate; 1718 + 1719 + /* build address info for command */ 1720 + switch (address->type) { 1721 + case PLN_ADDR_TYPE_GRAPHICS: 1722 + if (address->grph.addr.quad_part == 0) { 1723 + BREAK_TO_DEBUGGER(); 1724 + break; 1725 + } 1726 + 1727 + cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo = 1728 + address->grph.meta_addr.low_part; 1729 + cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi = 1730 + (uint16_t)address->grph.meta_addr.high_part; 1731 + cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo = 1732 + address->grph.addr.low_part; 1733 + cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi = 1734 + (uint16_t)address->grph.addr.high_part; 1735 + break; 1736 + case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE: 1737 + if (address->video_progressive.luma_addr.quad_part == 0 || 1738 + address->video_progressive.chroma_addr.quad_part == 0) { 1739 + BREAK_TO_DEBUGGER(); 1740 + break; 1741 + } 1742 + 1743 + cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo = 1744 + address->video_progressive.luma_meta_addr.low_part; 1745 + cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi = 1746 + (uint16_t)address->video_progressive.luma_meta_addr.high_part; 1747 + cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_lo = 1748 + address->video_progressive.chroma_meta_addr.low_part; 1749 + cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_hi = 1750 + (uint16_t)address->video_progressive.chroma_meta_addr.high_part; 1751 + cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo = 1752 + address->video_progressive.luma_addr.low_part; 1753 + cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi = 1754 + (uint16_t)address->video_progressive.luma_addr.high_part; 1755 + cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_lo = 1756 + address->video_progressive.chroma_addr.low_part; 1757 + cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_hi = 1758 + (uint16_t)address->video_progressive.chroma_addr.high_part; 1759 + break; 1760 + default: 1761 + // Should never be hit 1762 + BREAK_TO_DEBUGGER(); 1763 + break; 1764 + } 1765 + 1766 + num_cmds++; 1767 + } 1768 + 1769 + if (num_cmds > 0) { 1770 + cmds[num_cmds - 1].fams2_flip.header.multi_cmd_pending = 0; 1771 + dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT); 1772 + } 1773 + }
+17 -1
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
··· 74 74 bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type); 75 75 76 76 bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, 77 - unsigned int stream_mask); 77 + unsigned int stream_mask); 78 78 79 79 bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv); 80 80 ··· 160 160 bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, 161 161 uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type); 162 162 163 + void dc_dmub_srv_fams2_update_config(struct dc *dc, 164 + struct dc_state *context, 165 + bool enable); 166 + void dc_dmub_srv_fams2_drr_update(struct dc *dc, 167 + uint32_t tg_inst, 168 + uint32_t vtotal_min, 169 + uint32_t vtotal_max, 170 + uint32_t vtotal_mid, 171 + uint32_t vtotal_mid_frame_num, 172 + bool program_manual_trigger); 173 + void dc_dmub_srv_fams2_passthrough_flip( 174 + struct dc *dc, 175 + struct dc_state *state, 176 + struct dc_stream_state *stream, 177 + struct dc_surface_update *srf_updates, 178 + int surface_count); 163 179 #endif /* _DMUB_DC_SRV_H_ */
+2 -1
drivers/gpu/drm/amd/display/dc/dc_helper.c
··· 266 266 267 267 va_end(ap); 268 268 269 - 270 269 /* mmio write directly */ 271 270 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; 272 271 ··· 746 747 return "DCN 3.5"; 747 748 case DCN_VERSION_3_51: 748 749 return "DCN 3.5.1"; 750 + case DCN_VERSION_4_01: 751 + return "DCN 4.0.1"; 749 752 default: 750 753 return "Unknown"; 751 754 }
+48 -2
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
··· 60 60 61 61 enum dc_plane_addr_type { 62 62 PLN_ADDR_TYPE_GRAPHICS = 0, 63 + PLN_ADDR_TYPE_3DLUT, 63 64 PLN_ADDR_TYPE_GRPH_STEREO, 64 65 PLN_ADDR_TYPE_VIDEO_PROGRESSIVE, 65 66 PLN_ADDR_TYPE_RGBEA ··· 76 75 PHYSICAL_ADDRESS_LOC meta_addr; 77 76 union large_integer dcc_const_color; 78 77 } grph; 78 + 79 + struct { 80 + PHYSICAL_ADDRESS_LOC addr; 81 + } lut3d; 79 82 80 83 /*stereo*/ 81 84 struct { ··· 98 93 PHYSICAL_ADDRESS_LOC right_alpha_addr; 99 94 PHYSICAL_ADDRESS_LOC right_alpha_meta_addr; 100 95 union large_integer right_alpha_dcc_const_color; 101 - 102 96 } grph_stereo; 103 97 104 98 /*video progressive*/ ··· 125 121 union large_integer page_table_base; 126 122 127 123 uint8_t vmid; 124 + /* dc should use hw flip queue rather than directly programming the surface address. 125 + * Value is determined on each flip. */ 126 + bool offload_flip; 128 127 }; 129 128 130 129 struct dc_size { ··· 270 263 DC_TRIPLEBUFFER_DISABLE = 0x0, 271 264 DC_TRIPLEBUFFER_ENABLE = 0x1, 272 265 }; 266 + enum tile_split_values_new { 267 + DC_SURF_TILE_SPLIT_1KB = 0x4, 268 + }; 273 269 274 270 /* TODO: These values come from hardware spec. We need to readdress this 275 271 * if they ever change. ··· 328 318 DC_SW_VAR_R_X = 31, 329 319 DC_SW_MAX = 32, 330 320 DC_SW_UNKNOWN = DC_SW_MAX 321 + }; 322 + 323 + // Definition of swizzle modes with addr3 ASICs 324 + enum swizzle_mode_addr3_values { 325 + DC_ADDR3_SW_LINEAR = 0, 326 + DC_ADDR3_SW_256B_2D = 1, 327 + DC_ADDR3_SW_4KB_2D = 2, 328 + DC_ADDR3_SW_64KB_2D = 3, 329 + DC_ADDR3_SW_256KB_2D = 4, 330 + DC_ADDR3_SW_4KB_3D = 5, 331 + DC_ADDR3_SW_64KB_3D = 6, 332 + DC_ADDR3_SW_256KB_3D = 7, 333 + DC_ADDR3_SW_MAX = 8, 334 + DC_ADDR3_SW_UNKNOWN = DC_ADDR3_SW_MAX 331 335 }; 332 336 333 337 union dc_tiling_info { ··· 423 399 bool rb_aligned; 424 400 bool pipe_aligned; 425 401 unsigned int num_pkrs; 426 - } gfx9; 402 + } gfx9;/*gfx9, gfx10 and above*/ 403 + struct { 404 + enum swizzle_mode_addr3_values swizzle; 405 + } gfx_addr3;/*gfx with addr3 and above*/ 427 406 }; 428 407 429 408 /* Rotation angle */ ··· 488 461 unsigned int pixel_clk_khz; 489 462 unsigned int ref_clk_khz; 490 463 struct rect viewport; 464 + struct rect recout; 491 465 struct fixed31_32 h_scale_ratio; 492 466 struct fixed31_32 v_scale_ratio; 493 467 enum dc_rotation_angle rotation; ··· 1084 1056 enum cm_gamut_coef_format { 1085 1057 CM_GAMUT_REMAP_COEF_FORMAT_S2_13 = 0, 1086 1058 CM_GAMUT_REMAP_COEF_FORMAT_S3_12 = 1 1059 + }; 1060 + 1061 + enum mpcc_gamut_remap_mode_select { 1062 + MPCC_GAMUT_REMAP_MODE_SELECT_0 = 0, 1063 + MPCC_GAMUT_REMAP_MODE_SELECT_1, 1064 + MPCC_GAMUT_REMAP_MODE_SELECT_2 1065 + }; 1066 + 1067 + enum mpcc_gamut_remap_id { 1068 + MPCC_OGAM_GAMUT_REMAP, 1069 + MPCC_MCM_FIRST_GAMUT_REMAP, 1070 + MPCC_MCM_SECOND_GAMUT_REMAP 1071 + }; 1072 + 1073 + enum cursor_matrix_mode { 1074 + CUR_MATRIX_BYPASS = 0, 1075 + CUR_MATRIX_SET_A, 1076 + CUR_MATRIX_SET_B 1087 1077 }; 1088 1078 1089 1079 struct mcif_warmup_params {
+4
drivers/gpu/drm/amd/display/dc/dc_state_priv.h
··· 101 101 const struct dc *dc, 102 102 struct dc_state *state); 103 103 104 + bool dc_state_is_fams2_in_use( 105 + const struct dc *dc, 106 + const struct dc_state *state); 107 + 104 108 #endif /* _DC_STATE_PRIV_H_ */
+77
drivers/gpu/drm/amd/display/dc/dc_types.h
··· 1172 1172 HPD_EN_FOR_SECONDARY_EDP_ONLY, 1173 1173 }; 1174 1174 1175 + enum dc_cm2_shaper_3dlut_setting { 1176 + DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL, 1177 + DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER, 1178 + /* Bypassing Shaper will always bypass 3DLUT */ 1179 + DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT 1180 + }; 1181 + 1182 + enum dc_cm2_gpu_mem_layout { 1183 + DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB, 1184 + DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR, 1185 + DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR 1186 + }; 1187 + 1188 + enum dc_cm2_gpu_mem_pixel_component_order { 1189 + DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA, 1190 + }; 1191 + 1192 + enum dc_cm2_gpu_mem_format { 1193 + DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB, 1194 + DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB, 1195 + DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10 1196 + }; 1197 + 1198 + struct dc_cm2_gpu_mem_format_parameters { 1199 + enum dc_cm2_gpu_mem_format format; 1200 + union { 1201 + struct { 1202 + /* bias & scale for float only */ 1203 + uint16_t bias; 1204 + uint16_t scale; 1205 + } float_params; 1206 + }; 1207 + }; 1208 + 1209 + enum dc_cm2_gpu_mem_size { 1210 + DC_CM2_GPU_MEM_SIZE_171717, 1211 + DC_CM2_GPU_MEM_SIZE_TRANSFORMED 1212 + }; 1213 + 1214 + struct dc_cm2_gpu_mem_parameters { 1215 + struct dc_plane_address addr; 1216 + enum dc_cm2_gpu_mem_layout layout; 1217 + struct dc_cm2_gpu_mem_format_parameters format_params; 1218 + enum dc_cm2_gpu_mem_pixel_component_order component_order; 1219 + enum dc_cm2_gpu_mem_size size; 1220 + }; 1221 + 1222 + enum dc_cm2_transfer_func_source { 1223 + DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM, 1224 + DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM 1225 + }; 1226 + 1227 + struct dc_cm2_component_settings { 1228 + enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting; 1229 + bool lut1d_enable; 1230 + }; 1231 + 1232 + /* 1233 + * All pointers in this struct must remain valid for as long as the 3DLUTs are used 1234 + */ 1235 + struct dc_cm2_func_luts { 1236 + const struct dc_transfer_func *shaper; 1237 + struct { 1238 + enum dc_cm2_transfer_func_source lut3d_src; 1239 + union { 1240 + const struct dc_3dlut *lut3d_func; 1241 + struct dc_cm2_gpu_mem_parameters gpu_mem_params; 1242 + }; 1243 + } lut3d_data; 1244 + const struct dc_transfer_func *lut1d_func; 1245 + }; 1246 + 1247 + struct dc_cm2_parameters { 1248 + struct dc_cm2_component_settings component_settings; 1249 + struct dc_cm2_func_luts cm2_luts; 1250 + }; 1251 + 1175 1252 enum mall_stream_type { 1176 1253 SUBVP_NONE, // subvp not in use 1177 1254 SUBVP_MAIN, // subvp in use, this stream is main stream
+117
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
··· 1063 1063 return true; 1064 1064 } 1065 1065 1066 + static bool dcn401_program_pix_clk( 1067 + struct clock_source *clock_source, 1068 + struct pixel_clk_params *pix_clk_params, 1069 + enum dp_link_encoding encoding, 1070 + struct pll_settings *pll_settings) 1071 + { 1072 + struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); 1073 + unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; 1074 + const struct pixel_rate_range_table_entry *e = 1075 + look_up_in_video_optimized_rate_tlb(pix_clk_params->requested_pix_clk_100hz / 10); 1076 + struct bp_pixel_clock_parameters bp_pc_params = {0}; 1077 + enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; 1078 + struct dp_dto_params dto_params = { 0 }; 1079 + 1080 + dto_params.otg_inst = inst; 1081 + dto_params.signal = pix_clk_params->signal_type; 1082 + 1083 + // all but TMDS gets Driver to program DP_DTO without calling VBIOS Command table 1084 + if (!dc_is_hdmi_tmds_signal(pix_clk_params->signal_type)) { 1085 + long long ref_dtbclk_khz = clock_source->ctx->dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(clock_source->ctx->dc->clk_mgr); 1086 + long long dprefclk_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz; 1087 + long long dtbclk_p_src_clk_khz; 1088 + /* if signal is DP132B128B dtbclk_p_src is DTBCLK else DPREFCLK */ 1089 + dtbclk_p_src_clk_khz = encoding == DP_128b_132b_ENCODING ? ref_dtbclk_khz : dprefclk_khz; 1090 + if (e) { 1091 + dto_params.pixclk_hz = e->target_pixel_rate_khz * e->mult_factor; 1092 + dto_params.refclk_hz = dtbclk_p_src_clk_khz * e->div_factor; 1093 + } else { 1094 + dto_params.pixclk_hz = pix_clk_params->requested_pix_clk_100hz * 100; 1095 + dto_params.refclk_hz = dtbclk_p_src_clk_khz * 1000; 1096 + } 1097 + 1098 + /* enable DP DTO */ 1099 + clock_source->ctx->dc->res_pool->dccg->funcs->set_dp_dto( 1100 + clock_source->ctx->dc->res_pool->dccg, 1101 + &dto_params); 1102 + 1103 + } else { 1104 + /* disables DP DTO when provided with TMDS signal type */ 1105 + clock_source->ctx->dc->res_pool->dccg->funcs->set_dp_dto( 1106 + clock_source->ctx->dc->res_pool->dccg, 1107 + &dto_params); 1108 + 1109 + /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/ 1110 + bp_pc_params.controller_id = pix_clk_params->controller_id; 1111 + bp_pc_params.pll_id = clock_source->id; 1112 + bp_pc_params.target_pixel_clock_100hz = pll_settings->actual_pix_clk_100hz; 1113 + bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id; 1114 + bp_pc_params.signal_type = pix_clk_params->signal_type; 1115 + 1116 + // Make sure we send the correct color depth to DMUB for HDMI 1117 + if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) { 1118 + switch (pix_clk_params->color_depth) { 1119 + case COLOR_DEPTH_888: 1120 + bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; 1121 + break; 1122 + case COLOR_DEPTH_101010: 1123 + bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_30; 1124 + break; 1125 + case COLOR_DEPTH_121212: 1126 + bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_36; 1127 + break; 1128 + case COLOR_DEPTH_161616: 1129 + bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_48; 1130 + break; 1131 + default: 1132 + bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; 1133 + break; 1134 + } 1135 + bp_pc_params.color_depth = bp_pc_colour_depth; 1136 + } 1137 + 1138 + if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) { 1139 + bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC = 1140 + pll_settings->use_external_clk; 1141 + bp_pc_params.flags.SET_XTALIN_REF_SRC = 1142 + !pll_settings->use_external_clk; 1143 + if (pix_clk_params->flags.SUPPORT_YCBCR420) { 1144 + bp_pc_params.flags.SUPPORT_YUV_420 = 1; 1145 + } 1146 + } 1147 + if (clk_src->bios->funcs->set_pixel_clock( 1148 + clk_src->bios, &bp_pc_params) != BP_RESULT_OK) 1149 + return false; 1150 + /* Resync deep color DTO */ 1151 + if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) 1152 + dce112_program_pixel_clk_resync(clk_src, 1153 + pix_clk_params->signal_type, 1154 + pix_clk_params->color_depth, 1155 + pix_clk_params->flags.SUPPORT_YCBCR420); 1156 + } 1157 + 1158 + return true; 1159 + } 1160 + 1066 1161 static bool dce110_clock_source_power_down( 1067 1162 struct clock_source *clk_src) 1068 1163 { ··· 1405 1310 static const struct clock_source_funcs dcn31_clk_src_funcs = { 1406 1311 .cs_power_down = dce110_clock_source_power_down, 1407 1312 .program_pix_clk = dcn31_program_pix_clk, 1313 + .get_pix_clk_dividers = dcn3_get_pix_clk_dividers, 1314 + .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz 1315 + }; 1316 + 1317 + static const struct clock_source_funcs dcn401_clk_src_funcs = { 1318 + .cs_power_down = dce110_clock_source_power_down, 1319 + .program_pix_clk = dcn401_program_pix_clk, 1408 1320 .get_pix_clk_dividers = dcn3_get_pix_clk_dividers, 1409 1321 .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz 1410 1322 }; ··· 1833 1731 return ret; 1834 1732 } 1835 1733 1734 + bool dcn401_clk_src_construct( 1735 + struct dce110_clk_src *clk_src, 1736 + struct dc_context *ctx, 1737 + struct dc_bios *bios, 1738 + enum clock_source_id id, 1739 + const struct dce110_clk_src_regs *regs, 1740 + const struct dce110_clk_src_shift *cs_shift, 1741 + const struct dce110_clk_src_mask *cs_mask) 1742 + { 1743 + bool ret = dce112_clk_src_construct(clk_src, ctx, bios, id, regs, cs_shift, cs_mask); 1744 + 1745 + clk_src->base.funcs = &dcn401_clk_src_funcs; 1746 + 1747 + return ret; 1748 + } 1836 1749 bool dcn301_clk_src_construct( 1837 1750 struct dce110_clk_src *clk_src, 1838 1751 struct dc_context *ctx,
+8
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
··· 307 307 const struct dce110_clk_src_shift *cs_shift, 308 308 const struct dce110_clk_src_mask *cs_mask); 309 309 310 + bool dcn401_clk_src_construct( 311 + struct dce110_clk_src *clk_src, 312 + struct dc_context *ctx, 313 + struct dc_bios *bios, 314 + enum clock_source_id id, 315 + const struct dce110_clk_src_regs *regs, 316 + const struct dce110_clk_src_shift *cs_shift, 317 + const struct dce110_clk_src_mask *cs_mask); 310 318 /* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */ 311 319 struct pixel_rate_range_table_entry { 312 320 unsigned int range_min_khz;
+4
drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
··· 249 249 I2C_COMMON_MASK_SH_LIST_DCN30(mask_sh),\ 250 250 I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN, mask_sh) 251 251 252 + #define I2C_COMMON_MASK_SH_LIST_DCN401(mask_sh)\ 253 + I2C_COMMON_MASK_SH_LIST_DCN30(mask_sh),\ 254 + I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN, mask_sh) 255 + 252 256 struct dce_i2c_registers { 253 257 uint32_t SETUP; 254 258 uint32_t SPEED;
+2 -2
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
··· 596 596 #define SE_REG_FIELD_LIST_DCN4_01_COMMON(type) \ 597 597 type COMPRESSED_PIXEL_FORMAT;\ 598 598 type DP_VID_N_INTERVAL;\ 599 - type DIG_FIFO_OUTPUT_PIXEL_PER_CYCLE 600 - 599 + type DIG_FIFO_OUTPUT_PIXEL_PER_CYCLE;\ 600 + type DP_STEER_FIFO_ENABLE 601 601 struct dcn10_stream_encoder_shift { 602 602 SE_REG_FIELD_LIST_DCN1_0(uint8_t); 603 603 uint8_t HDMI_ACP_SEND;
+1 -1
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
··· 1492 1492 MPC_RMU_3DLUT_SIZE, &s->lut3d_size); 1493 1493 } 1494 1494 1495 - REG_GET_2(MPCC_OGAM_CONTROL[mpcc_inst], 1495 + REG_GET_2(MPCC_OGAM_CONTROL[mpcc_inst], 1496 1496 MPCC_OGAM_MODE_CURRENT, &s->rgam_mode, 1497 1497 MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut); 1498 1498 }
+4
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
··· 1091 1091 1092 1092 void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst); 1093 1093 1094 + void mpc3_mpc_init_single_inst( 1095 + struct mpc *mpc, 1096 + unsigned int mpcc_id); 1097 + 1094 1098 enum dc_lut_mode mpc3_get_ogam_current( 1095 1099 struct mpc *mpc, 1096 1100 int mpcc_id);
+1 -1
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
··· 128 128 } 129 129 } 130 130 131 - static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) 131 + void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) 132 132 { 133 133 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 134 134 unsigned int compbuf_size_segments = (compbuf_size_kb + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB;
+2
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
··· 161 161 162 162 void hubbub32_get_mall_en(struct hubbub *hubbub, unsigned int *mall_in_use); 163 163 164 + void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase); 165 + 164 166 #endif
+3
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
··· 30 30 #include "basics/conversion.h" 31 31 #include "dcn10/dcn10_cm_common.h" 32 32 #include "dc.h" 33 + #include "dcn401/dcn401_mpc.h" 33 34 34 35 #define REG(reg)\ 35 36 mpc30->mpc_regs->reg ··· 1018 1017 .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, 1019 1018 .get_mpc_out_mux = mpc1_get_mpc_out_mux, 1020 1019 .set_bg_color = mpc1_set_bg_color, 1020 + .set_movable_cm_location = mpc401_set_movable_cm_location, 1021 + .populate_lut = mpc401_populate_lut, 1021 1022 }; 1022 1023 1023 1024
+4
drivers/gpu/drm/amd/display/dc/dml/Makefile
··· 140 140 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags) 141 141 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags) 142 142 143 + CFLAGS_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_ccflags) 144 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_rcflags) 145 + 143 146 ifdef CONFIG_DRM_AMD_DC_FP 144 147 DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o 145 148 DML += dcn10/dcn10_fpu.o ··· 163 160 DML += dcn314/dcn314_fpu.o 164 161 DML += dcn35/dcn35_fpu.o 165 162 DML += dcn351/dcn351_fpu.o 163 + DML += dcn401/dcn401_fpu.o 166 164 DML += dsc/rc_calc_fpu.o 167 165 DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o 168 166 endif
-3
drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
··· 24 24 * 25 25 */ 26 26 27 - #define UNIT_TEST 0 28 - #if !UNIT_TEST 29 27 #include "dc.h" 30 - #endif 31 28 #include "../display_mode_lib.h" 32 29 #include "display_mode_vba_314.h" 33 30 #include "../dml_inline_defs.h"
+1
drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
··· 632 632 unsigned int ref_freq_to_pix_freq; 633 633 unsigned int vratio_prefetch; 634 634 unsigned int vratio_prefetch_c; 635 + unsigned int refcyc_per_tdlut_group; 635 636 unsigned int refcyc_per_pte_group_vblank_l; 636 637 unsigned int refcyc_per_pte_group_vblank_c; 637 638 unsigned int refcyc_per_meta_chunk_vblank_l;
+80
drivers/gpu/drm/amd/display/dc/dml2/Makefile
··· 67 67 endif 68 68 endif 69 69 70 + # DRIVER_BUILD is mostly used in DML2.1 source 71 + subdir-ccflags-y += -DDRIVER_BUILD=1 70 72 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2 73 + subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core 74 + subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/ 75 + subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_dpmm/ 76 + subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_pmo/ 77 + subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_standalone_libraries/ 78 + subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/inc 79 + subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/inc 80 + subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/ 81 + 71 82 CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) 72 83 CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags) 73 84 CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags) ··· 106 95 AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2)) 107 96 108 97 AMD_DISPLAY_FILES += $(AMD_DAL_DML2) 98 + 99 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top.o := $(dml2_ccflags) 100 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top_mcache.o := $(dml2_ccflags) 101 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization := $(dml2_ccflags) 102 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags) 103 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) -Wframe-larger-than=2048 104 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags) 105 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_shared.o := $(dml2_ccflags) 106 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags) 107 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags) 108 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags) 109 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags) 110 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags) 111 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.o := $(dml2_ccflags) 112 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags) 113 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags) 114 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags) 115 + 116 + 117 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags) 118 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags) 119 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags) 120 + CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_ccflags) 121 + 122 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top.o := $(dml2_rcflags) 123 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top_mcache.o := $(dml2_rcflags) 124 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.o := $(dml2_rcflags) 125 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags) 126 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) -Wframe-larger-than=2048 127 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags) 128 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_shared.o := $(dml2_rcflags) 129 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags) 130 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags) 131 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags) 132 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags) 133 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags) 134 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.o := $(dml2_rcflags) 135 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags) 136 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags) 137 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags) 138 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags) 139 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags) 140 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags) 141 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_rcflags) 142 + 143 + DML21 := src/dml2_top/dml_top.o 144 + DML21 += src/dml2_top/dml_top_mcache.o 145 + DML21 += src/dml2_top/dml2_top_optimization.o 146 + DML21 += src/inc/dml2_debug.o 147 + DML21 += src/dml2_core/dml2_core_dcn4.o 148 + DML21 += src/dml2_core/dml2_core_factory.o 149 + DML21 += src/dml2_core/dml2_core_dcn4_calcs.o 150 + DML21 += src/dml2_core/dml2_core_shared.o 151 + DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o 152 + DML21 += src/dml2_dpmm/dml2_dpmm_factory.o 153 + DML21 += src/dml2_mcg/dml2_mcg_dcn4.o 154 + DML21 += src/dml2_mcg/dml2_mcg_factory.o 155 + DML21 += src/dml2_pmo/dml2_pmo_dcn3.o 156 + DML21 += src/dml2_pmo/dml2_pmo_dcn4.o 157 + DML21 += src/dml2_pmo/dml2_pmo_factory.o 158 + DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o 159 + DML21 += src/dml2_standalone_libraries/lib_float_math.o 160 + DML21 += dml21_translation_helper.o 161 + DML21 += dml21_wrapper.o 162 + DML21 += dml21_utils.o 163 + 164 + AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2/dml21/,$(DML21)) 165 + 166 + AMD_DISPLAY_FILES += $(AMD_DAL_DML21) 109 167
+1
drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
··· 36 36 dml_project_dcn321 = 2, 37 37 dml_project_dcn35 = 3, 38 38 dml_project_dcn351 = 4, 39 + dml_project_dcn401 = 5, 39 40 }; 40 41 enum dml_prefetch_modes { 41 42 dml_prefetch_support_uclk_fclk_and_stutter_if_possible = 0,
+72 -2
drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
··· 812 812 stream->stream_id, plane_idx, &plane_id); 813 813 cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id); 814 814 mpc_factor = (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx]; 815 + } else if (ctx->architecture == dml2_architecture_21) { 816 + if (ctx->config.svp_pstate.callbacks.get_stream_subvp_type(state, stream) == SUBVP_PHANTOM) { 817 + struct dc_stream_state *main_stream; 818 + struct dc_stream_status *main_stream_status; 819 + 820 + /* get stream id of main stream */ 821 + main_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(state, stream); 822 + main_stream_status = ctx->config.callbacks.get_stream_status(state, main_stream); 823 + 824 + /* get plane id for associated main plane */ 825 + get_plane_id(ctx, state, main_stream_status->plane_states[plane_idx], 826 + main_stream->stream_id, plane_idx, &plane_id); 827 + } else { 828 + get_plane_id(ctx, state, status->plane_states[plane_idx], 829 + stream->stream_id, plane_idx, &plane_id); 830 + } 831 + 832 + cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id); 833 + mpc_factor = ctx->v21.mode_programming.programming->plane_programming[cfg_idx].num_dpps_required; 815 834 } else { 816 835 mpc_factor = 1; 817 836 ASSERT(false); ··· 866 847 break; 867 848 } 868 849 } 850 + else if (ctx->architecture == dml2_architecture_21) { 851 + if (ctx->config.svp_pstate.callbacks.get_stream_subvp_type(state, stream) == SUBVP_PHANTOM) { 852 + struct dc_stream_state *main_stream; 853 + 854 + /* get stream id of main stream */ 855 + main_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(state, stream); 856 + 857 + /* get cfg idx for associated main stream */ 858 + cfg_idx = find_disp_cfg_idx_by_stream_id( 859 + mapping, main_stream->stream_id); 860 + } else { 861 + cfg_idx = find_disp_cfg_idx_by_stream_id( 862 + mapping, stream->stream_id); 863 + } 864 + 865 + return ctx->v21.mode_programming.programming->stream_programming[cfg_idx].num_odms_required; 866 + } 867 + 869 868 ASSERT(false); 870 869 return 1; 871 870 } ··· 1036 999 unsigned int stream_id; 1037 1000 1038 1001 const unsigned int *ODMMode, *DPPPerSurface; 1002 + unsigned int odm_mode_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}, dpp_per_surface_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}; 1039 1003 struct dc_pipe_mapping_scratch scratch; 1040 1004 1041 1005 if (ctx->config.map_dc_pipes_with_callbacks) 1042 1006 return map_dc_pipes_with_callbacks( 1043 1007 ctx, state, disp_cfg, mapping, existing_state); 1044 1008 1045 - ODMMode = (unsigned int *)disp_cfg->hw.ODMMode; 1046 - DPPPerSurface = disp_cfg->hw.DPPPerSurface; 1009 + if (ctx->architecture == dml2_architecture_21) { 1010 + /* 1011 + * Extract ODM and DPP outputs from DML2.1 and map them in an array as required for pipe mapping in dml2_map_dc_pipes. 1012 + * As data cannot be directly extracted in const pointers, assign these arrays to const pointers before proceeding to 1013 + * maximize the reuse of existing code. Const pointers are required because dml2.0 dml_display_cfg_st is const. 1014 + * 1015 + */ 1016 + for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) { 1017 + odm_mode_array[i] = ctx->v21.mode_programming.programming->stream_programming[i].num_odms_required; 1018 + dpp_per_surface_array[i] = ctx->v21.mode_programming.programming->plane_programming[i].num_dpps_required; 1019 + } 1020 + 1021 + ODMMode = (const unsigned int *)odm_mode_array; 1022 + DPPPerSurface = (const unsigned int *)dpp_per_surface_array; 1023 + } else { 1024 + ODMMode = (unsigned int *)disp_cfg->hw.ODMMode; 1025 + DPPPerSurface = disp_cfg->hw.DPPPerSurface; 1026 + } 1047 1027 1048 1028 for (stream_index = 0; stream_index < state->stream_count; stream_index++) { 1049 1029 memset(&scratch, 0, sizeof(struct dc_pipe_mapping_scratch)); ··· 1079 1025 scratch.odm_info.odm_factor = 1; 1080 1026 } 1081 1027 1028 + /* After DML2.1 update, ODM interpretation needs to change and is no longer same as for DML2.0. 1029 + * This is not an issue with new resource management logic. This block ensure backcompat 1030 + * with legacy pipe management with updated DML. 1031 + * */ 1032 + if (ctx->architecture == dml2_architecture_21) { 1033 + if (ODMMode[stream_disp_cfg_index] == 1) { 1034 + scratch.odm_info.odm_factor = 1; 1035 + } else if (ODMMode[stream_disp_cfg_index] == 2) { 1036 + scratch.odm_info.odm_factor = 2; 1037 + } else if (ODMMode[stream_disp_cfg_index] == 4) { 1038 + scratch.odm_info.odm_factor = 4; 1039 + } else { 1040 + ASSERT(false); 1041 + scratch.odm_info.odm_factor = 1; 1042 + } 1043 + } 1082 1044 calculate_odm_slices(state->streams[stream_index], scratch.odm_info.odm_factor, scratch.odm_info.odm_slice_end_x); 1083 1045 1084 1046 // If there are no planes, you still want to setup ODM...
+20
drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
··· 32 32 #include "dml2_wrapper.h" 33 33 #include "dml2_policy.h" 34 34 35 + #include "dml_top.h" 36 + #include "dml21_wrapper.h" 35 37 36 38 struct dml2_wrapper_optimize_configuration_params { 37 39 struct display_mode_lib_st *dml_core_ctx; ··· 109 107 110 108 enum dml2_architecture { 111 109 dml2_architecture_20, 110 + dml2_architecture_21 111 + }; 112 + 113 + struct prepare_mcache_programming_locals { 114 + struct dml2_build_mcache_programming_in_out build_mcache_programming_params; 115 + }; 116 + 117 + struct dml21_wrapper_scratch { 118 + struct prepare_mcache_programming_locals prepare_mcache_locals; 119 + struct pipe_ctx temp_pipe; 112 120 }; 113 121 114 122 struct dml2_pipe_combine_factor { ··· 142 130 struct dml2_wrapper_scratch scratch; 143 131 struct dcn_watermarks g6_temp_read_watermark_set; 144 132 } v20; 133 + struct { 134 + struct dml21_wrapper_scratch scratch; 135 + struct dml2_initialize_instance_in_out dml_init; 136 + struct dml2_display_cfg display_config; 137 + struct dml2_check_mode_supported_in_out mode_support; 138 + struct dml2_build_mode_programming_in_out mode_programming; 139 + struct dml2_dml_to_dc_pipe_mapping dml_to_dc_pipe_mapping; 140 + } v21; 145 141 }; 146 142 }; 147 143
+125 -1
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
··· 34 34 void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out) 35 35 { 36 36 switch (dml2->v20.dml_core_ctx.project) { 37 - 38 37 case dml_project_dcn32: 39 38 case dml_project_dcn321: 40 39 default: ··· 175 176 out->config_return_buffer_segment_size_in_kbytes = 64; /*required, but not exist,, hard coded in dml2_translate_ip_params*/ 176 177 break; 177 178 179 + case dml_project_dcn401: 180 + // Hardcoded values for DCN4m 181 + out->vblank_nom_default_us = 668; //600; 182 + out->rob_buffer_size_kbytes = 192; //128; 183 + out->config_return_buffer_size_in_kbytes = 1344; //1280; 184 + out->config_return_buffer_segment_size_in_kbytes = 64; 185 + out->compressed_buffer_segment_size_in_kbytes = 64; 186 + out->meta_fifo_size_in_kentries = 22; 187 + out->dpte_buffer_size_in_pte_reqs_luma = 68; 188 + out->dpte_buffer_size_in_pte_reqs_chroma = 36; 189 + out->gpuvm_max_page_table_levels = 4; 190 + out->pixel_chunk_size_kbytes = 8; 191 + out->alpha_pixel_chunk_size_kbytes = 4; 192 + out->min_pixel_chunk_size_bytes = 1024; 193 + out->writeback_chunk_size_kbytes = 8; 194 + out->line_buffer_size_bits = 1171920; 195 + out->max_line_buffer_lines = 32; 196 + out->writeback_interface_buffer_size_kbytes = 90; 197 + //Number of pipes after DCN Pipe harvesting 198 + out->max_num_dpp = dml2->config.dcn_pipe_count; 199 + out->max_num_otg = dml2->config.dcn_pipe_count; 200 + out->max_num_wb = 1; 201 + out->max_dchub_pscl_bw_pix_per_clk = 4; 202 + out->max_pscl_lb_bw_pix_per_clk = 2; 203 + out->max_lb_vscl_bw_pix_per_clk = 4; 204 + out->max_vscl_hscl_bw_pix_per_clk = 4; 205 + out->max_hscl_ratio = 6; 206 + out->max_vscl_ratio = 6; 207 + out->max_hscl_taps = 8; 208 + out->max_vscl_taps = 8; 209 + out->dispclk_ramp_margin_percent = 1; 210 + out->dppclk_delay_subtotal = 47; 211 + out->dppclk_delay_scl = 50; 212 + out->dppclk_delay_scl_lb_only = 16; 213 + out->dppclk_delay_cnvc_formatter = 28; 214 + out->dppclk_delay_cnvc_cursor = 6; 215 + out->dispclk_delay_subtotal = 125; 216 + out->cursor_buffer_size = 24; //16 217 + out->cursor_chunk_size = 2; 218 + out->max_inter_dcn_tile_repeaters = 8; 219 + out->writeback_max_hscl_ratio = 1; 220 + out->writeback_max_vscl_ratio = 1; 221 + out->writeback_min_hscl_ratio = 1; 222 + out->writeback_min_vscl_ratio = 1; 223 + out->writeback_max_hscl_taps = 1; 224 + out->writeback_max_vscl_taps = 1; 225 + out->writeback_line_buffer_buffer_size = 0; 226 + out->num_dsc = 4; 227 + out->maximum_dsc_bits_per_component = 12; 228 + out->maximum_pixels_per_line_per_dsc_unit = 5760; 229 + out->dsc422_native_support = true; 230 + out->dcc_supported = true; 231 + out->ptoi_supported = false; 232 + 233 + out->gpuvm_enable = false; 234 + out->hostvm_enable = false; 235 + out->cursor_64bpp_support = true; //false; 236 + out->dynamic_metadata_vm_enabled = false; 237 + 238 + out->max_num_hdmi_frl_outputs = 1; 239 + out->max_num_dp2p0_outputs = 4; //2; 240 + out->max_num_dp2p0_streams = 4; 241 + break; 178 242 } 179 243 } 180 244 ··· 298 236 out->dispclk_dppclk_vco_speed_mhz = 3600; 299 237 break; 300 238 239 + case dml_project_dcn401: 240 + out->pct_ideal_fabric_bw_after_urgent = 76; //67; 241 + out->max_avg_sdp_bw_use_normal_percent = 75; //80; 242 + out->max_avg_fabric_bw_use_normal_percent = 57; //60; 243 + 244 + out->urgent_out_of_order_return_per_channel_pixel_only_bytes = 0; //4096; 245 + out->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 0; //4096; 246 + out->urgent_out_of_order_return_per_channel_vm_only_bytes = 0; //4096; 247 + 248 + out->num_chans = 16; 249 + out->round_trip_ping_latency_dcfclk_cycles = 1000; //263; 250 + out->smn_latency_us = 0; //2 us 251 + out->mall_allocated_for_dcn_mbytes = dml2->config.mall_cfg.max_cab_allocation_bytes / 1048576; // 64; 252 + break; 301 253 } 302 254 /* ---Overrides if available--- */ 303 255 if (dml2->config.bbox_overrides.dram_num_chan) ··· 420 344 p->in_states->state_array[1].fabricclk_mhz = 2250.0; 421 345 p->in_states->state_array[1].dcfclk_mhz = 1434.0; 422 346 p->in_states->state_array[1].dram_speed_mts = 1000 * transactions_per_mem_clock; 347 + break; 348 + case dml_project_dcn401: 349 + p->in_states->num_states = 2; 350 + transactions_per_mem_clock = 16; 351 + p->in_states->state_array[0].socclk_mhz = 300; //620.0; 352 + p->in_states->state_array[0].dscclk_mhz = 666.667; //716.667; 353 + p->in_states->state_array[0].phyclk_mhz = 810; 354 + p->in_states->state_array[0].phyclk_d18_mhz = 667; 355 + p->in_states->state_array[0].phyclk_d32_mhz = 625; 356 + p->in_states->state_array[0].dtbclk_mhz = 2000; //1564.0; 357 + p->in_states->state_array[0].fabricclk_mhz = 300; //450.0; 358 + p->in_states->state_array[0].dcfclk_mhz = 200; //300.0; 359 + p->in_states->state_array[0].dispclk_mhz = 2000; //2150.0; 360 + p->in_states->state_array[0].dppclk_mhz = 2000; //2150.0; 361 + p->in_states->state_array[0].dram_speed_mts = 97 * transactions_per_mem_clock; //100 * 362 + 363 + p->in_states->state_array[0].urgent_latency_pixel_data_only_us = 4; 364 + p->in_states->state_array[0].urgent_latency_pixel_mixed_with_vm_data_us = 0; 365 + p->in_states->state_array[0].urgent_latency_vm_data_only_us = 0; 366 + p->in_states->state_array[0].writeback_latency_us = 12; 367 + p->in_states->state_array[0].urgent_latency_adjustment_fabric_clock_component_us = 1; 368 + p->in_states->state_array[0].urgent_latency_adjustment_fabric_clock_reference_mhz = 1000; //3000; 369 + p->in_states->state_array[0].sr_exit_z8_time_us = 0; 370 + p->in_states->state_array[0].sr_enter_plus_exit_z8_time_us = 0; 371 + p->in_states->state_array[0].dram_clock_change_latency_us = 400; 372 + p->in_states->state_array[0].use_ideal_dram_bw_strobe = true; 373 + p->in_states->state_array[0].sr_exit_time_us = 15.70; //42.97; 374 + p->in_states->state_array[0].sr_enter_plus_exit_time_us = 20.20; //49.94; 375 + p->in_states->state_array[0].fclk_change_latency_us = 0; //20; 376 + p->in_states->state_array[0].usr_retraining_latency_us = 0; //2; 377 + 378 + p->in_states->state_array[1].socclk_mhz = 1600; //1200.0; 379 + p->in_states->state_array[1].fabricclk_mhz = 2500; //2500.0; 380 + p->in_states->state_array[1].dcfclk_mhz = 1800; //1564.0; 381 + p->in_states->state_array[1].dram_speed_mts = 1125 * transactions_per_mem_clock; 423 382 break; 424 383 } 425 384 ··· 882 771 default: 883 772 out->SurfaceTiling[location] = (enum dml_swizzle_mode)in->tiling_info.gfx9.swizzle; 884 773 break; 774 + case dml_project_dcn401: 775 + // Temporary use gfx11 swizzle in dml, until proper dml for DCN4x is integrated/implemented 776 + switch (in->tiling_info.gfx_addr3.swizzle) { 777 + case DC_ADDR3_SW_4KB_2D: 778 + case DC_ADDR3_SW_64KB_2D: 779 + case DC_ADDR3_SW_256KB_2D: 780 + default: 781 + out->SurfaceTiling[location] = dml_sw_64kb_r_x; 782 + break; 783 + case DC_ADDR3_SW_LINEAR: 784 + out->SurfaceTiling[location] = dml_sw_linear; 785 + break; 786 + } 885 787 } 886 788 887 789 switch (in->format) {
+30
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
··· 31 31 #include "dml2_translation_helper.h" 32 32 #include "dml2_mall_phantom.h" 33 33 #include "dml2_dc_resource_mgmt.h" 34 + #include "dml21_wrapper.h" 34 35 35 36 36 37 static void initialize_dml2_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out) ··· 700 699 return false; 701 700 dml2_apply_debug_options(in_dc, dml2); 702 701 702 + /* DML2.1 validation path */ 703 + if (dml2->architecture == dml2_architecture_21) { 704 + out = dml21_validate(in_dc, context, dml2, fast_validate); 705 + return out; 706 + } 703 707 704 708 /* Use dml_validate_only for fast_validate path */ 705 709 if (fast_validate) ··· 721 715 722 716 static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) 723 717 { 718 + // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete. 719 + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) { 720 + dml21_reinit(in_dc, dml2, config); 721 + } 724 722 725 723 // Store config options 726 724 (*dml2)->config = *config; ··· 742 732 case DCN_VERSION_3_21: 743 733 (*dml2)->v20.dml_core_ctx.project = dml_project_dcn321; 744 734 break; 735 + case DCN_VERSION_4_01: 736 + (*dml2)->v20.dml_core_ctx.project = dml_project_dcn401; 737 + break; 745 738 default: 746 739 (*dml2)->v20.dml_core_ctx.project = dml_project_default; 747 740 break; ··· 759 746 760 747 bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) 761 748 { 749 + DC_FP_START(); 750 + // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete. 751 + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) { 752 + return dml21_create(in_dc, dml2, config); 753 + } 754 + 762 755 // Allocate Mode Lib Ctx 763 756 *dml2 = dml2_allocate_memory(); 764 757 ··· 773 754 774 755 dml2_init(in_dc, config, dml2); 775 756 757 + DC_FP_END(); 776 758 return true; 777 759 } 778 760 ··· 795 775 void dml2_copy(struct dml2_context *dst_dml2, 796 776 struct dml2_context *src_dml2) 797 777 { 778 + if (src_dml2->architecture == dml2_architecture_21) { 779 + dml21_copy(dst_dml2, src_dml2); 780 + return; 781 + } 798 782 /* copy Mode Lib Ctx */ 799 783 memcpy(dst_dml2, src_dml2, sizeof(struct dml2_context)); 800 784 } ··· 806 782 bool dml2_create_copy(struct dml2_context **dst_dml2, 807 783 struct dml2_context *src_dml2) 808 784 { 785 + if (src_dml2->architecture == dml2_architecture_21) 786 + return dml21_create_copy(dst_dml2, src_dml2); 809 787 /* Allocate Mode Lib Ctx */ 810 788 *dst_dml2 = dml2_allocate_memory(); 811 789 ··· 824 798 const struct dml2_configuration_options *config, 825 799 struct dml2_context **dml2) 826 800 { 801 + // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete. 802 + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) { 803 + dml21_reinit(in_dc, dml2, config); 804 + } 827 805 828 806 dml2_init(in_dc, config, dml2); 829 807 }
+14
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
··· 192 192 struct dml2_clks_limit_table clks_table; 193 193 }; 194 194 195 + enum dml2_force_pstate_methods { 196 + dml2_force_pstate_method_auto = 0, 197 + dml2_force_pstate_method_vactive, 198 + dml2_force_pstate_method_vblank, 199 + dml2_force_pstate_method_drr, 200 + dml2_force_pstate_method_subvp, 201 + }; 202 + 195 203 struct dml2_configuration_options { 196 204 int dcn_pipe_count; 197 205 bool use_native_pstate_optimization; ··· 223 215 struct dml2_soc_bbox_overrides bbox_overrides; 224 216 unsigned int max_segments_per_hubp; 225 217 unsigned int det_segment_size; 218 + /* Only for debugging purposes when initializing SOCBB params via tool for DML21. */ 219 + struct socbb_ip_params_external *external_socbb_ip_params; 220 + struct { 221 + bool force_pstate_method_enable; 222 + enum dml2_force_pstate_methods force_pstate_method_value; 223 + } pmo; 226 224 bool map_dc_pipes_with_callbacks; 227 225 228 226 bool use_clock_dc_limits;
+6
drivers/gpu/drm/amd/display/dc/dpp/Makefile
··· 74 74 75 75 ############################################################################### 76 76 77 + DPP_DCN401 = dcn401_dpp.o dcn401_dpp_cm.o dcn401_dpp_dscl.o 78 + 79 + AMD_DAL_DPP_DCN401 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn401/,$(DPP_DCN401)) 80 + 81 + AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN401) 82 + 77 83 endif
+56
drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c
··· 432 432 433 433 return true; 434 434 } 435 + /*compute the maximum number of lines that we can fit in the line buffer*/ 436 + void dscl2_spl_calc_lb_num_partitions( 437 + bool alpha_en, 438 + const struct spl_scaler_data *scl_data, 439 + enum lb_memory_config lb_config, 440 + int *num_part_y, 441 + int *num_part_c) 442 + { 443 + int memory_line_size_y, memory_line_size_c, memory_line_size_a, 444 + lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a; 435 445 446 + int line_size = scl_data->viewport.width < scl_data->recout.width ? 447 + scl_data->viewport.width : scl_data->recout.width; 448 + int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? 449 + scl_data->viewport_c.width : scl_data->recout.width; 450 + 451 + if (line_size == 0) 452 + line_size = 1; 453 + 454 + if (line_size_c == 0) 455 + line_size_c = 1; 456 + 457 + memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */ 458 + memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */ 459 + memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ 460 + 461 + if (lb_config == LB_MEMORY_CONFIG_1) { 462 + lb_memory_size = 970; 463 + lb_memory_size_c = 970; 464 + lb_memory_size_a = 970; 465 + } else if (lb_config == LB_MEMORY_CONFIG_2) { 466 + lb_memory_size = 1290; 467 + lb_memory_size_c = 1290; 468 + lb_memory_size_a = 1290; 469 + } else if (lb_config == LB_MEMORY_CONFIG_3) { 470 + /* 420 mode: using 3rd mem from Y, Cr and Cb */ 471 + lb_memory_size = 970 + 1290 + 484 + 484 + 484; 472 + lb_memory_size_c = 970 + 1290; 473 + lb_memory_size_a = 970 + 1290 + 484; 474 + } else { 475 + lb_memory_size = 970 + 1290 + 484; 476 + lb_memory_size_c = 970 + 1290 + 484; 477 + lb_memory_size_a = 970 + 1290 + 484; 478 + } 479 + *num_part_y = lb_memory_size / memory_line_size_y; 480 + *num_part_c = lb_memory_size_c / memory_line_size_c; 481 + num_partitions_a = lb_memory_size_a / memory_line_size_a; 482 + 483 + if (alpha_en 484 + && (num_partitions_a < *num_part_y)) 485 + *num_part_y = num_partitions_a; 486 + 487 + if (*num_part_y > 64) 488 + *num_part_y = 64; 489 + if (*num_part_c > 64) 490 + *num_part_c = 64; 491 + }
+8 -1
drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h
··· 26 26 #define __DCN20_DPP_H__ 27 27 28 28 #include "dcn10/dcn10_dpp.h" 29 - 29 + #include "spl/dc_spl_types.h" 30 30 #define TO_DCN20_DPP(dpp)\ 31 31 container_of(dpp, struct dcn20_dpp, base) 32 32 ··· 747 747 enum lb_memory_config lb_config, 748 748 int *num_part_y, 749 749 int *num_part_c); 750 + 751 + void dscl2_spl_calc_lb_num_partitions( 752 + bool alpha_en, 753 + const struct spl_scaler_data *scl_data, 754 + enum lb_memory_config lb_config, 755 + int *num_part_y, 756 + int *num_part_c); 750 757 751 758 void dpp2_set_cursor_attributes( 752 759 struct dpp *dpp_base,
+1 -3
drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
··· 175 175 SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM, id),\ 176 176 SRI(CM_BLNDGAM_LUT_CONTROL, CM, id) 177 177 178 - 179 - 180 178 #define DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh)\ 181 179 TF_SF(CM0_CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, mask_sh),\ 182 180 TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_EN, mask_sh),\ ··· 598 600 struct scaler_data *scl_data, 599 601 const struct scaling_taps *in_taps); 600 602 601 - void dpp3_cnv_setup ( 603 + void dpp3_cnv_setup( 602 604 struct dpp *dpp_base, 603 605 enum surface_pixel_format format, 604 606 enum expansion_mode mode,
+73
drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
··· 163 163 164 164 return true; 165 165 } 166 + void dscl32_spl_calc_lb_num_partitions( 167 + bool alpha_en, 168 + const struct spl_scaler_data *scl_data, 169 + enum lb_memory_config lb_config, 170 + int *num_part_y, 171 + int *num_part_c) 172 + { 173 + int memory_line_size_y, memory_line_size_c, memory_line_size_a, 174 + lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a; 175 + 176 + int line_size = scl_data->viewport.width < scl_data->recout.width ? 177 + scl_data->viewport.width : scl_data->recout.width; 178 + int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? 179 + scl_data->viewport_c.width : scl_data->recout.width; 180 + 181 + if (line_size == 0) 182 + line_size = 1; 183 + 184 + if (line_size_c == 0) 185 + line_size_c = 1; 186 + 187 + memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */ 188 + memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */ 189 + memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ 190 + 191 + if (lb_config == LB_MEMORY_CONFIG_1) { 192 + lb_memory_size = 970; 193 + lb_memory_size_c = 970; 194 + lb_memory_size_a = 970; 195 + } else if (lb_config == LB_MEMORY_CONFIG_2) { 196 + lb_memory_size = 1290; 197 + lb_memory_size_c = 1290; 198 + lb_memory_size_a = 1290; 199 + } else if (lb_config == LB_MEMORY_CONFIG_3) { 200 + if (scl_data->viewport.width == scl_data->h_active && 201 + scl_data->viewport.height == scl_data->v_active) { 202 + /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */ 203 + /* use increased LB size for calculation only if Scaler not enabled */ 204 + lb_memory_size = 970 + 1290 + 1170 + 1170 + 1170; 205 + lb_memory_size_c = 970 + 1290; 206 + lb_memory_size_a = 970 + 1290 + 1170; 207 + } else { 208 + /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */ 209 + lb_memory_size = 970 + 1290 + 484 + 484 + 484; 210 + lb_memory_size_c = 970 + 1290; 211 + lb_memory_size_a = 970 + 1290 + 484; 212 + } 213 + } else { 214 + if (scl_data->viewport.width == scl_data->h_active && 215 + scl_data->viewport.height == scl_data->v_active) { 216 + /* use increased LB size for calculation only if Scaler not enabled */ 217 + lb_memory_size = 970 + 1290 + 1170; 218 + lb_memory_size_c = 970 + 1290 + 1170; 219 + lb_memory_size_a = 970 + 1290 + 1170; 220 + } else { 221 + lb_memory_size = 970 + 1290 + 484; 222 + lb_memory_size_c = 970 + 1290 + 484; 223 + lb_memory_size_a = 970 + 1290 + 484; 224 + } 225 + } 226 + *num_part_y = lb_memory_size / memory_line_size_y; 227 + *num_part_c = lb_memory_size_c / memory_line_size_c; 228 + num_partitions_a = lb_memory_size_a / memory_line_size_a; 229 + 230 + if (alpha_en 231 + && (num_partitions_a < *num_part_y)) 232 + *num_part_y = num_partitions_a; 233 + 234 + if (*num_part_y > 32) 235 + *num_part_y = 32; 236 + if (*num_part_c > 32) 237 + *num_part_c = 32; 238 + }
+8
drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h
··· 27 27 28 28 #include "dcn20/dcn20_dpp.h" 29 29 #include "dcn30/dcn30_dpp.h" 30 + #include "spl/dc_spl_types.h" 30 31 31 32 bool dpp32_construct(struct dcn3_dpp *dpp3, 32 33 struct dc_context *ctx, ··· 35 34 const struct dcn3_dpp_registers *tf_regs, 36 35 const struct dcn3_dpp_shift *tf_shift, 37 36 const struct dcn3_dpp_mask *tf_mask); 37 + 38 + void dscl32_spl_calc_lb_num_partitions( 39 + bool alpha_en, 40 + const struct spl_scaler_data *scl_data, 41 + enum lb_memory_config lb_config, 42 + int *num_part_y, 43 + int *num_part_c); 38 44 39 45 #endif /* __DCN32_DPP_H__ */
+7 -1
drivers/gpu/drm/amd/display/dc/dsc/Makefile
··· 22 22 23 23 AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn35/,$(DSC_DCN35)) 24 24 25 + ############################################################################### 26 + # DCN401 27 + ############################################################################### 25 28 29 + DSC_DCN401 += dcn401_dsc.o 26 30 27 - endif 31 + AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn401/,$(DSC_DCN401)) 32 + 28 33 29 34 DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o 30 35 ··· 37 32 38 33 AMD_DISPLAY_FILES += $(AMD_DAL_DSC) 39 34 35 + endif
+5 -97
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
··· 49 49 #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) 50 50 #endif 51 51 52 - /* Need to account for padding due to pixel-to-symbol packing 53 - * for uncompressed 128b/132b streams. 54 - */ 55 - static uint32_t apply_128b_132b_stream_overhead( 56 - const struct dc_crtc_timing *timing, const uint32_t kbps) 57 - { 58 - uint32_t total_kbps = kbps; 59 - 60 - if (disable_128b_132b_stream_overhead) 61 - return kbps; 62 - 63 - if (!timing->flags.DSC) { 64 - struct fixed31_32 bpp; 65 - struct fixed31_32 overhead_factor; 66 - 67 - bpp = dc_fixpt_from_int(kbps); 68 - bpp = dc_fixpt_div_int(bpp, timing->pix_clk_100hz / 10); 69 - 70 - /* Symbols_per_HActive = HActive * bpp / (4 lanes * 32-bit symbol size) 71 - * Overhead_factor = ceil(Symbols_per_HActive) / Symbols_per_HActive 72 - */ 73 - overhead_factor = dc_fixpt_from_int(timing->h_addressable); 74 - overhead_factor = dc_fixpt_mul(overhead_factor, bpp); 75 - overhead_factor = dc_fixpt_div_int(overhead_factor, 128); 76 - overhead_factor = dc_fixpt_div( 77 - dc_fixpt_from_int(dc_fixpt_ceil(overhead_factor)), 78 - overhead_factor); 79 - 80 - total_kbps = dc_fixpt_ceil( 81 - dc_fixpt_mul_int(overhead_factor, total_kbps)); 82 - } 83 - 84 - return total_kbps; 85 - } 86 - 87 - uint32_t dc_bandwidth_in_kbps_from_timing( 88 - const struct dc_crtc_timing *timing, 89 - const enum dc_link_encoding_format link_encoding) 90 - { 91 - uint32_t bits_per_channel = 0; 92 - uint32_t kbps; 93 - 94 - if (timing->flags.DSC) 95 - return dc_dsc_stream_bandwidth_in_kbps(timing, 96 - timing->dsc_cfg.bits_per_pixel, 97 - timing->dsc_cfg.num_slices_h, 98 - timing->dsc_cfg.is_dp); 99 - 100 - switch (timing->display_color_depth) { 101 - case COLOR_DEPTH_666: 102 - bits_per_channel = 6; 103 - break; 104 - case COLOR_DEPTH_888: 105 - bits_per_channel = 8; 106 - break; 107 - case COLOR_DEPTH_101010: 108 - bits_per_channel = 10; 109 - break; 110 - case COLOR_DEPTH_121212: 111 - bits_per_channel = 12; 112 - break; 113 - case COLOR_DEPTH_141414: 114 - bits_per_channel = 14; 115 - break; 116 - case COLOR_DEPTH_161616: 117 - bits_per_channel = 16; 118 - break; 119 - default: 120 - ASSERT(bits_per_channel != 0); 121 - bits_per_channel = 8; 122 - break; 123 - } 124 - 125 - kbps = timing->pix_clk_100hz / 10; 126 - kbps *= bits_per_channel; 127 - 128 - if (timing->flags.Y_ONLY != 1) { 129 - /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ 130 - kbps *= 3; 131 - if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) 132 - kbps /= 2; 133 - else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) 134 - kbps = kbps * 2 / 3; 135 - } 136 - 137 - if (link_encoding == DC_LINK_ENCODING_DP_128b_132b) 138 - kbps = apply_128b_132b_stream_overhead(timing, kbps); 139 - 140 - if (link_encoding == DC_LINK_ENCODING_HDMI_FRL && 141 - timing->vic == 0 && timing->hdmi_vic == 0 && 142 - timing->frl_uncompressed_video_bandwidth_in_kbps != 0) 143 - kbps = timing->frl_uncompressed_video_bandwidth_in_kbps; 144 - 145 - return kbps; 146 - } 147 - 148 - 149 52 /* Forward Declerations */ 150 53 static bool decide_dsc_bandwidth_range( 151 54 const uint32_t min_bpp_x16, ··· 1147 1244 void dc_set_disable_128b_132b_stream_overhead(bool disable) 1148 1245 { 1149 1246 disable_128b_132b_stream_overhead = disable; 1247 + } 1248 + 1249 + bool dc_get_disable_128b_132b_stream_overhead(void) 1250 + { 1251 + return disable_128b_132b_stream_overhead; 1150 1252 } 1151 1253 1152 1254 void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options)
+10
drivers/gpu/drm/amd/display/dc/gpio/Makefile
··· 122 122 AMD_DAL_GPIO_DCN32 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn32/,$(GPIO_DCN32)) 123 123 124 124 AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN32) 125 + 126 + ############################################################################### 127 + # DCN 4.01 128 + ############################################################################### 129 + GPIO_DCN401 = hw_translate_dcn401.o hw_factory_dcn401.o 130 + 131 + AMD_DAL_GPIO_DCN401 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn401/,$(GPIO_DCN401)) 132 + 133 + AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN401) 134 +
+4
drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
··· 52 52 #include "dcn30/hw_factory_dcn30.h" 53 53 #include "dcn315/hw_factory_dcn315.h" 54 54 #include "dcn32/hw_factory_dcn32.h" 55 + #include "dcn401/hw_factory_dcn401.h" 55 56 56 57 bool dal_hw_factory_init( 57 58 struct hw_factory *factory, ··· 113 112 case DCN_VERSION_3_5: 114 113 case DCN_VERSION_3_51: 115 114 dal_hw_factory_dcn32_init(factory); 115 + return true; 116 + case DCN_VERSION_4_01: 117 + dal_hw_factory_dcn401_init(factory); 116 118 return true; 117 119 default: 118 120 ASSERT_CRITICAL(false);
+4
drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
··· 52 52 #include "dcn30/hw_translate_dcn30.h" 53 53 #include "dcn315/hw_translate_dcn315.h" 54 54 #include "dcn32/hw_translate_dcn32.h" 55 + #include "dcn401/hw_translate_dcn401.h" 55 56 56 57 /* 57 58 * This unit ··· 114 113 case DCN_VERSION_3_5: 115 114 case DCN_VERSION_3_51: 116 115 dal_hw_translate_dcn32_init(translate); 116 + return true; 117 + case DCN_VERSION_4_01: 118 + dal_hw_translate_dcn401_init(translate); 117 119 return true; 118 120 default: 119 121 BREAK_TO_DEBUGGER();
+5
drivers/gpu/drm/amd/display/dc/hwss/Makefile
··· 190 190 191 191 ############################################################################### 192 192 193 + HWSS_DCN401 = dcn401_hwseq.o dcn401_init.o 194 + 195 + AMD_DAL_HWSS_DCN401 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn401/,$(HWSS_DCN401)) 196 + 197 + AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN401) 193 198 endif
+4 -3
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
··· 233 233 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc " 234 234 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l " 235 235 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay" 236 - " x_rp_dlay x_rr_sfl\n"); 236 + " x_rp_dlay x_rr_sfl rc_td_grp\n"); 237 + 237 238 for (i = 0; i < pool->pipe_count; i++) { 238 239 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); 239 240 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr; ··· 242 241 if (!s->blank_en) 243 242 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" 244 243 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" 245 - " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", 244 + " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %xh\n", 246 245 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start, 247 246 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler, 248 247 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank, ··· 260 259 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1, 261 260 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit, 262 261 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay, 263 - dlg_regs->xfc_reg_remote_surface_flip_latency); 262 + dlg_regs->xfc_reg_remote_surface_flip_latency, dlg_regs->refcyc_per_tdlut_group); 264 263 } 265 264 266 265 DTN_INFO("========TTU========\n");
+17 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
··· 204 204 * gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1 205 205 * Using a magic value like -1 would require tracking all inits/resets 206 206 */ 207 - void dcn20_setup_gsl_group_as_lock( 207 + void dcn20_setup_gsl_group_as_lock( 208 208 const struct dc *dc, 209 209 struct pipe_ctx *pipe_ctx, 210 210 bool enable) ··· 1709 1709 plane_state->color_space, 1710 1710 NULL); 1711 1711 1712 + if (dpp->funcs->set_cursor_matrix) { 1713 + dpp->funcs->set_cursor_matrix(dpp, 1714 + plane_state->color_space, 1715 + plane_state->cursor_csc_color_matrix); 1716 + } 1712 1717 if (dpp->funcs->dpp_program_bias_and_scale) { 1713 1718 //TODO :for CNVC set scale and bias registers if necessary 1714 1719 build_prescale_params(&bns_params, plane_state); ··· 1913 1908 if (dc->res_pool->hubbub->funcs->program_det_size) 1914 1909 dc->res_pool->hubbub->funcs->program_det_size( 1915 1910 dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); 1911 + 1912 + if (dc->res_pool->hubbub->funcs->program_det_segments) 1913 + dc->res_pool->hubbub->funcs->program_det_segments( 1914 + dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size); 1916 1915 } 1917 1916 1918 1917 if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) ··· 1926 1917 || pipe_ctx->plane_state->update_flags.bits.hdr_mult) 1927 1918 hws->funcs.set_hdr_multiplier(pipe_ctx); 1928 1919 1920 + if (hws->funcs.populate_mcm_luts) { 1921 + hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts, 1922 + pipe_ctx->plane_state->lut_bank_a); 1923 + pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a; 1924 + } 1929 1925 if (pipe_ctx->update_flags.bits.enable || 1930 1926 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || 1931 1927 pipe_ctx->plane_state->update_flags.bits.gamma_change || ··· 2087 2073 (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) { 2088 2074 if (hubbub->funcs->program_det_size) 2089 2075 hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); 2076 + if (dc->res_pool->hubbub->funcs->program_det_segments) 2077 + dc->res_pool->hubbub->funcs->program_det_segments(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); 2090 2078 } 2091 2079 hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); 2092 2080 DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
+2
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
··· 30 30 #include "dcn30/dcn30_hwseq.h" 31 31 #include "dcn31/dcn31_hwseq.h" 32 32 #include "dcn32/dcn32_hwseq.h" 33 + #include "dcn401/dcn401_hwseq.h" 33 34 #include "dcn32_init.h" 34 35 35 36 static const struct hw_sequencer_funcs dcn32_funcs = { ··· 163 162 .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy, 164 163 .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw, 165 164 .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe, 165 + .populate_mcm_luts = dcn401_populate_mcm_luts, 166 166 }; 167 167 168 168 void dcn32_hw_sequencer_init_functions(struct dc *dc)
+22
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
··· 141 141 uint8_t subvp_index; 142 142 }; 143 143 144 + struct fams2_global_control_lock_fast_params { 145 + struct dc *dc; 146 + bool is_required; 147 + bool lock; 148 + }; 149 + 144 150 union block_sequence_params { 145 151 struct update_plane_addr_params update_plane_addr_params; 146 152 struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params; ··· 165 159 struct set_output_csc_params set_output_csc_params; 166 160 struct set_ocsc_default_params set_ocsc_default_params; 167 161 struct subvp_save_surf_addr subvp_save_surf_addr; 162 + struct fams2_global_control_lock_fast_params fams2_global_control_lock_fast_params; 168 163 }; 169 164 170 165 enum block_sequence_func { ··· 186 179 MPC_SET_OUTPUT_CSC, 187 180 MPC_SET_OCSC_DEFAULT, 188 181 DMUB_SUBVP_SAVE_SURF_ADDR, 182 + DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST, 183 + 189 184 }; 190 185 191 186 struct block_sequence { ··· 439 430 bool (*is_pipe_topology_transition_seamless)(struct dc *dc, 440 431 const struct dc_state *cur_ctx, 441 432 const struct dc_state *new_ctx); 433 + void (*fams2_global_control_lock)(struct dc *dc, 434 + struct dc_state *context, 435 + bool lock); 436 + void (*fams2_update_config)(struct dc *dc, 437 + struct dc_state *context, 438 + bool enable); 439 + void (*fams2_global_control_lock_fast)(union block_sequence_params *params); 442 440 void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max); 443 441 }; 444 442 ··· 476 460 struct tg_color *color); 477 461 478 462 void get_subvp_visual_confirm_color( 463 + struct pipe_ctx *pipe_ctx, 464 + struct tg_color *color); 465 + 466 + void get_fams2_visual_confirm_color( 467 + struct dc *dc, 468 + struct dc_state *context, 479 469 struct pipe_ctx *pipe_ctx, 480 470 struct tg_color *color); 481 471
+4
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
··· 176 176 void (*reset_back_end_for_pipe)(struct dc *dc, 177 177 struct pipe_ctx *pipe_ctx, 178 178 struct dc_state *context); 179 + void (*populate_mcm_luts)(struct dc *dc, 180 + struct pipe_ctx *pipe_ctx, 181 + struct dc_cm2_func_luts mcm_luts, 182 + bool lut_bank_a); 179 183 }; 180 184 181 185 struct dce_hwseq {
+20 -1
drivers/gpu/drm/amd/display/dc/inc/core_types.h
··· 39 39 #include "panel_cntl.h" 40 40 #include "dmub/inc/dmub_cmd.h" 41 41 #include "pg_cntl.h" 42 + #include "spl/dc_spl.h" 42 43 43 44 #define MAX_CLOCK_SOURCES 7 44 45 #define MAX_SVP_PHANTOM_STREAMS 2 ··· 60 59 /********** DAL Core*********************/ 61 60 #include "transform.h" 62 61 #include "dpp.h" 62 + 63 + #include "dml2/dml21/inc/dml_top_dchub_registers.h" 64 + #include "dml2/dml21/inc/dml_top_types.h" 63 65 64 66 struct resource_pool; 65 67 struct dc_state; ··· 163 159 struct dc *dc, 164 160 struct dc_state *new_ctx, 165 161 struct dc_stream_state *stream); 162 + 166 163 enum dc_status (*patch_unknown_plane_state)( 167 164 struct dc_plane_state *plane_state); 168 165 ··· 171 166 struct resource_context *res_ctx, 172 167 const struct resource_pool *pool, 173 168 struct dc_stream_state *stream); 169 + 174 170 void (*populate_dml_writeback_from_context)( 175 171 struct dc *dc, 176 172 struct resource_context *res_ctx, ··· 182 176 struct dc_state *context, 183 177 display_e2e_pipe_params_st *pipes, 184 178 int pipe_cnt); 179 + 185 180 void (*update_bw_bounding_box)( 186 181 struct dc *dc, 187 182 struct clk_bw_params *bw_params); ··· 299 292 struct abm *abm; 300 293 struct dmcu *dmcu; 301 294 struct dmub_psr *psr; 302 - 303 295 struct dmub_replay *replay; 304 296 305 297 struct abm *multiple_abms[MAX_PIPES]; ··· 347 341 struct plane_resource { 348 342 /* scl_data is scratch space required to program a plane */ 349 343 struct scaler_data scl_data; 344 + /* Below pointers to hw objects are required to enable the plane */ 345 + /* spl_in and spl_out are the input and output structures for SPL 346 + * which are required when using Scaler Programming Library 347 + * these are scratch spaces needed when programming a plane 348 + */ 349 + struct spl_in spl_in; 350 + struct spl_out spl_out; 350 351 /* Below pointers to hw objects are required to enable the plane */ 351 352 struct hubp *hubp; 352 353 struct mem_input *mi; ··· 453 440 int det_buffer_size_kb; 454 441 bool unbounded_req; 455 442 unsigned int surface_size_in_mall_bytes; 443 + struct dml2_dchub_per_pipe_register_set hubp_regs; 444 + struct dml2_hubp_pipe_mcache_regs mcache_regs; 456 445 457 446 struct dwbc *dwbc; 458 447 struct mcif_wb *mcif_wb; ··· 524 509 unsigned int mall_subvp_size_bytes; 525 510 unsigned int legacy_svp_drr_stream_index; 526 511 bool legacy_svp_drr_stream_index_valid; 512 + struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES]; 513 + struct dmub_fams2_stream_static_state fams2_stream_params[DML2_MAX_PLANES]; 514 + unsigned fams2_stream_count; 515 + struct dml2_display_arb_regs arb_regs; 527 516 }; 528 517 529 518 union bw_output {
+4 -2
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
··· 39 39 #define WM_C 2 40 40 #define WM_D 3 41 41 #define WM_SET_COUNT 4 42 + #define WM_1A 2 43 + #define WM_1B 3 42 44 43 45 #define DCN_MINIMUM_DISPCLK_Khz 100000 44 46 #define DCN_MINIMUM_DPPCLK_Khz 100000 ··· 244 242 245 243 struct dummy_pstate_entry { 246 244 unsigned int dram_speed_mts; 247 - double dummy_pstate_latency_us; 245 + unsigned int dummy_pstate_latency_us; 248 246 }; 249 247 250 248 struct clk_bw_params { 251 249 unsigned int vram_type; 252 250 unsigned int num_channels; 253 251 unsigned int dram_channel_width_bytes; 254 - unsigned int dispclk_vco_khz; 252 + unsigned int dispclk_vco_khz; 255 253 unsigned int dc_mode_softmax_memclk; 256 254 unsigned int max_memclk_mhz; 257 255 struct clk_limit_table clk_table;
+16 -28
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
··· 97 97 #define CLK_COMMON_REG_LIST_DCN_BASE() \ 98 98 SR(DENTIST_DISPCLK_CNTL) 99 99 100 - #define VBIOS_SMU_MSG_BOX_REG_LIST_RV() \ 101 - .MP1_SMN_C2PMSG_91 = mmMP1_SMN_C2PMSG_91, \ 102 - .MP1_SMN_C2PMSG_83 = mmMP1_SMN_C2PMSG_83, \ 103 - .MP1_SMN_C2PMSG_67 = mmMP1_SMN_C2PMSG_67 104 - 105 100 #define CLK_COMMON_REG_LIST_DCN_201() \ 106 101 SR(DENTIST_DISPCLK_CNTL), \ 107 102 CLK_SRI(CLK4_CLK_PLL_REQ, CLK4, 0), \ ··· 108 113 CLK_SRI(CLK3_CLK2_DFS_CNTL, CLK3, 0) 109 114 110 115 #define CLK_REG_LIST_DCN3() \ 111 - CLK_COMMON_REG_LIST_DCN_BASE(), \ 116 + SR(DENTIST_DISPCLK_CNTL), \ 112 117 CLK_SRI(CLK0_CLK_PLL_REQ, CLK02, 0), \ 113 118 CLK_SRI(CLK0_CLK2_DFS_CNTL, CLK02, 0) 114 119 ··· 202 207 type DENTIST_DISPCLK_WDIVIDER; \ 203 208 type DENTIST_DISPCLK_CHG_DONE; 204 209 205 - /* 206 - *************************************************************************************** 207 - ****************** Clock Manager Private Structures *********************************** 208 - *************************************************************************************** 209 - */ 210 210 #define CLK20_REG_FIELD_LIST(type) \ 211 211 type DENTIST_DPPCLK_WDIVIDER; \ 212 212 type DENTIST_DPPCLK_CHG_DONE; \ 213 213 type FbMult_int; \ 214 214 type FbMult_frac; 215 215 216 - #define VBIOS_SMU_REG_FIELD_LIST(type) \ 217 - type CONTENT; 218 - 219 - struct clk_mgr_shift { 220 - CLK_REG_FIELD_LIST(uint8_t) 221 - CLK20_REG_FIELD_LIST(uint8_t) 222 - VBIOS_SMU_REG_FIELD_LIST(uint32_t) 223 - }; 224 - 225 - struct clk_mgr_mask { 226 - CLK_REG_FIELD_LIST(uint32_t) 227 - CLK20_REG_FIELD_LIST(uint32_t) 228 - VBIOS_SMU_REG_FIELD_LIST(uint32_t) 229 - }; 216 + /* 217 + *************************************************************************************** 218 + ****************** Clock Manager Private Structures *********************************** 219 + *************************************************************************************** 220 + */ 230 221 231 222 struct clk_mgr_registers { 232 223 uint32_t DPREFCLK_CNTL; 233 224 uint32_t DENTIST_DISPCLK_CNTL; 225 + 234 226 uint32_t CLK4_CLK2_CURRENT_CNT; 235 227 uint32_t CLK4_CLK_PLL_REQ; 236 228 ··· 246 264 uint32_t CLK0_CLK1_DFS_CNTL; 247 265 uint32_t CLK0_CLK3_DFS_CNTL; 248 266 uint32_t CLK0_CLK4_DFS_CNTL; 267 + }; 249 268 250 - uint32_t MP1_SMN_C2PMSG_67; 251 - uint32_t MP1_SMN_C2PMSG_83; 252 - uint32_t MP1_SMN_C2PMSG_91; 269 + struct clk_mgr_shift { 270 + CLK_REG_FIELD_LIST(uint8_t) 271 + CLK20_REG_FIELD_LIST(uint8_t) 272 + }; 273 + 274 + struct clk_mgr_mask { 275 + CLK_REG_FIELD_LIST(uint32_t) 276 + CLK20_REG_FIELD_LIST(uint32_t) 253 277 }; 254 278 255 279 enum clock_type {
+15
drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
··· 33 33 * blocks for the Data Fabric Interface that are not clock/power gated. 34 34 */ 35 35 36 + #include "dc/dc_hw_types.h" 37 + 36 38 enum dcc_control { 37 39 dcc_control__256_256_xxx, 38 40 dcc_control__128_128_xxx, ··· 149 147 enum segment_order *segment_order_horz, 150 148 enum segment_order *segment_order_vert); 151 149 150 + bool (*dcc_support_swizzle_addr3)( 151 + enum swizzle_mode_addr3_values swizzle, 152 + unsigned int plane_pitch, 153 + unsigned int bytes_per_element, 154 + enum segment_order *segment_order_horz, 155 + enum segment_order *segment_order_vert); 156 + 157 + bool (*dcc_support_pixel_format_plane0_plane1)( 158 + enum surface_pixel_format format, 159 + unsigned int *plane0_bpe, 160 + unsigned int *plane1_bpe); 152 161 bool (*dcc_support_pixel_format)( 153 162 enum surface_pixel_format format, 154 163 unsigned int *bytes_per_element); ··· 214 201 void (*set_request_limit)(struct hubbub *hubbub, int memory_channel_count, int words_per_channel); 215 202 void (*dchubbub_init)(struct hubbub *hubbub); 216 203 void (*get_mall_en)(struct hubbub *hubbub, unsigned int *mall_in_use); 204 + void (*program_det_segments)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg); 205 + void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase); 217 206 }; 218 207 219 208 struct hubbub {
+4
drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
··· 329 329 330 330 void (*dpp_get_gamut_remap)(struct dpp *dpp_base, 331 331 struct dpp_grph_csc_adjustment *adjust); 332 + void (*set_cursor_matrix)( 333 + struct dpp *dpp_base, 334 + enum dc_color_space color_space, 335 + struct dc_csc_transform cursor_csc_color_matrix); 332 336 }; 333 337 334 338
+54 -2
drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
··· 41 41 #include "mem_input.h" 42 42 #include "cursor_reg_cache.h" 43 43 44 + #include "dml2/dml21/inc/dml_top_dchub_registers.h" 45 + 44 46 #define OPP_ID_INVALID 0xf 45 47 #define MAX_TTU 0xffffff 46 48 ··· 65 63 hubp_ind_block_64b, 66 64 hubp_ind_block_128b, 67 65 hubp_ind_block_64b_no_128bcl, 66 + }; 67 + 68 + enum hubp_3dlut_fl_mode { 69 + hubp_3dlut_fl_mode_disable = 0, 70 + hubp_3dlut_fl_mode_native_1 = 1, 71 + hubp_3dlut_fl_mode_native_2 = 2, 72 + hubp_3dlut_fl_mode_transform = 3 73 + }; 74 + 75 + enum hubp_3dlut_fl_format { 76 + hubp_3dlut_fl_format_unorm_12msb_bitslice = 0, 77 + hubp_3dlut_fl_format_unorm_12lsb_bitslice = 1, 78 + hubp_3dlut_fl_format_float_fp1_5_10 = 2 79 + }; 80 + 81 + enum hubp_3dlut_fl_addressing_mode { 82 + hubp_3dlut_fl_addressing_mode_sw_linear = 0, 83 + hubp_3dlut_fl_addressing_mode_simple_linear = 1 84 + }; 85 + 86 + enum hubp_3dlut_fl_width { 87 + hubp_3dlut_fl_width_17 = 17, 88 + hubp_3dlut_fl_width_33 = 33, 89 + hubp_3dlut_fl_width_transformed = 4916 90 + }; 91 + 92 + enum hubp_3dlut_fl_crossbar_bit_slice { 93 + hubp_3dlut_fl_crossbar_bit_slice_0_15 = 0, 94 + hubp_3dlut_fl_crossbar_bit_slice_16_31 = 1, 95 + hubp_3dlut_fl_crossbar_bit_slice_32_47 = 2, 96 + hubp_3dlut_fl_crossbar_bit_slice_48_63 = 3 68 97 }; 69 98 70 99 struct hubp { ··· 244 211 bool (*hubp_in_blank)(struct hubp *hubp); 245 212 void (*hubp_soft_reset)(struct hubp *hubp, bool reset); 246 213 214 + void (*hubp_set_flip_int)(struct hubp *hubp); 215 + 247 216 void (*hubp_update_force_pstate_disallow)(struct hubp *hubp, bool allow); 248 217 void (*hubp_update_force_cursor_pstate_disallow)(struct hubp *hubp, bool allow); 249 218 void (*hubp_update_mall_sel)(struct hubp *hubp, uint32_t mall_sel, bool c_cursor); 250 219 void (*hubp_prepare_subvp_buffering)(struct hubp *hubp, bool enable); 251 - 252 - void (*hubp_set_flip_int)(struct hubp *hubp); 220 + void (*hubp_surface_update_lock)(struct hubp *hubp, 221 + bool lock); 253 222 254 223 void (*program_extended_blank)(struct hubp *hubp, 255 224 unsigned int min_dst_y_next_start_optimized); 256 225 257 226 void (*hubp_wait_pipe_read_start)(struct hubp *hubp); 227 + void (*hubp_update_3dlut_fl_bias_scale)(struct hubp *hubp, uint16_t bias, uint16_t scale); 228 + void (*hubp_program_3dlut_fl_mode)(struct hubp *hubp, 229 + enum hubp_3dlut_fl_mode mode); 230 + void (*hubp_program_3dlut_fl_format)(struct hubp *hubp, 231 + enum hubp_3dlut_fl_format format); 232 + void (*hubp_program_3dlut_fl_addr)(struct hubp *hubp, 233 + const struct dc_plane_address address); 234 + void (*hubp_program_3dlut_fl_dlg_param)(struct hubp *hubp, int refcyc_per_3dlut_group); 235 + void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable); 236 + void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode); 237 + void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width); 238 + void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, bool protection_enabled); 239 + void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp, 240 + enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g, 241 + enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b, 242 + enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r); 243 + int (*hubp_get_3dlut_fl_done)(struct hubp *hubp); 258 244 }; 259 245 260 246 #endif
+8 -1
drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
··· 29 29 #include "include/grph_object_id.h" 30 30 31 31 #include "dml/display_mode_structs.h" 32 + #include "dml2/dml21/inc/dml_top_dchub_registers.h" 32 33 33 34 struct dchub_init_data; 34 35 struct cstate_pstate_watermarks_st { ··· 46 45 uint32_t urgent_ns; 47 46 uint32_t frac_urg_bw_nom; 48 47 uint32_t frac_urg_bw_flip; 49 - int32_t urgent_latency_ns; 48 + uint32_t urgent_latency_ns; 50 49 struct cstate_pstate_watermarks_st cstate_pstate; 51 50 uint32_t usr_retraining_ns; 52 51 }; ··· 58 57 struct dcn_watermarks c; 59 58 struct dcn_watermarks d; 60 59 }; // legacy 60 + struct { 61 + struct dml2_dchub_watermark_regs a; 62 + struct dml2_dchub_watermark_regs b; 63 + struct dml2_dchub_watermark_regs c; 64 + struct dml2_dchub_watermark_regs d; 65 + } dcn4; //dcn4+ 61 66 }; 62 67 63 68 struct dce_watermarks {
+34 -2
drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
··· 96 96 MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA 97 97 }; 98 98 99 + enum mpcc_movable_cm_location { 100 + MPCC_MOVABLE_CM_LOCATION_BEFORE, 101 + MPCC_MOVABLE_CM_LOCATION_AFTER, 102 + }; 103 + 104 + enum MCM_LUT_XABLE { 105 + MCM_LUT_DISABLE, 106 + MCM_LUT_DISABLED = MCM_LUT_DISABLE, 107 + MCM_LUT_ENABLE, 108 + MCM_LUT_ENABLED = MCM_LUT_ENABLE, 109 + }; 110 + 111 + enum MCM_LUT_ID { 112 + MCM_LUT_3DLUT, 113 + MCM_LUT_1DLUT, 114 + MCM_LUT_SHAPER 115 + }; 116 + 117 + union mcm_lut_params { 118 + const struct pwl_params *pwl; 119 + const struct tetrahedral_params *lut3d; 120 + }; 121 + 99 122 /** 100 123 * struct mpcc_blnd_cfg - MPCC blending configuration 101 124 */ ··· 186 163 struct mpc_grph_gamut_adjustment { 187 164 struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE]; 188 165 enum graphics_gamut_adjust_type gamut_adjust_type; 166 + enum mpcc_gamut_remap_id mpcc_gamut_remap_block_id; 189 167 }; 190 168 191 169 struct mpcc_sm_cfg { ··· 561 537 int (*release_rmu)(struct mpc *mpc, int mpcc_id); 562 538 563 539 unsigned int (*get_mpc_out_mux)( 564 - struct mpc *mpc, 565 - int opp_id); 540 + struct mpc *mpc, 541 + int opp_id); 566 542 567 543 void (*set_bg_color)(struct mpc *mpc, 568 544 struct tg_color *bg_color, 569 545 int mpcc_id); 570 546 void (*set_mpc_mem_lp_mode)(struct mpc *mpc); 547 + void (*set_movable_cm_location)(struct mpc *mpc, enum mpcc_movable_cm_location location, int mpcc_id); 548 + void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx); 549 + void (*get_3dlut_fast_load_status)(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow); 550 + void (*populate_lut)(struct mpc *mpc, const enum MCM_LUT_ID id, const union mcm_lut_params params, 551 + bool lut_bank_a, int mpcc_id); 552 + void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id, bool lut_bank_a, int mpcc_id); 553 + void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_ID id, const enum MCM_LUT_XABLE xable, 554 + bool lut_bank_a, int mpcc_id); 571 555 }; 572 556 573 557 #endif
+12
drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
··· 29 29 #include "hw_shared.h" 30 30 #include "dc_hw_types.h" 31 31 #include "fixed31_32.h" 32 + #include "spl/dc_spl_types.h" 32 33 33 34 #include "spl/dc_spl_types.h" 34 35 ··· 164 163 struct sharpness_adj sharpness; 165 164 enum pixel_format format; 166 165 struct line_buffer_params lb_params; 166 + // Below struct holds the scaler values to program hw registers 167 + struct dscl_prog_data dscl_prog_data; 167 168 }; 168 169 169 170 struct transform_funcs { ··· 248 245 struct transform *xfm_base, 249 246 const struct dc_cursor_attributes *attr); 250 247 248 + bool (*transform_program_blnd_lut)( 249 + struct transform *xfm, 250 + const struct pwl_params *params); 251 + bool (*transform_program_shaper_lut)( 252 + struct transform *xfm, 253 + const struct pwl_params *params); 254 + bool (*transform_program_3dlut)( 255 + struct transform *xfm, 256 + struct tetrahedral_params *params); 251 257 }; 252 258 253 259 const uint16_t *get_filter_2tap_16p(void);
+6 -2
drivers/gpu/drm/amd/display/dc/inc/resource.h
··· 29 29 #include "core_status.h" 30 30 #include "dal_asic_id.h" 31 31 #include "dm_pp_smu.h" 32 + #include "spl/dc_spl.h" 32 33 33 34 #define MEMORY_TYPE_MULTIPLIER_CZ 4 34 35 #define MEMORY_TYPE_HBM 2 ··· 78 77 79 78 struct hpo_dp_stream_encoder *(*create_hpo_dp_stream_encoder)( 80 79 enum engine_id eng_id, struct dc_context *ctx); 81 - 82 80 struct hpo_dp_link_encoder *(*create_hpo_dp_link_encoder)( 83 81 uint8_t inst, 84 82 struct dc_context *ctx); 85 - 86 83 struct dce_hwseq *(*create_hwseq)( 87 84 struct dc_context *ctx); 88 85 }; ··· 619 620 620 621 bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream); 621 622 623 + /* Get hw programming parameters container from pipe context 624 + * @pipe_ctx: pipe context 625 + * @dscl_prog_data: struct to hold programmable hw reg values 626 + */ 627 + struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx); 622 628 /* Setup dc callbacks for dml2 623 629 * @dc: the display core structure 624 630 * @dml2_options: struct to hold callbacks
+9
drivers/gpu/drm/amd/display/dc/irq/Makefile
··· 180 180 AMD_DAL_IRQ_DCN351= $(addprefix $(AMDDALPATH)/dc/irq/dcn351/,$(IRQ_DCN351)) 181 181 182 182 AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN351) 183 + 184 + ############################################################################### 185 + # DCN 401 186 + ############################################################################### 187 + IRQ_DCN401 = irq_service_dcn401.o 188 + 189 + AMD_DAL_IRQ_DCN401= $(addprefix $(AMDDALPATH)/dc/irq/dcn401/,$(IRQ_DCN401)) 190 + 191 + AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN401)
+6
drivers/gpu/drm/amd/display/dc/optc/Makefile
··· 105 105 ############################################################################### 106 106 107 107 ############################################################################### 108 + OPTC_DCN401 = dcn401_optc.o 109 + 110 + AMD_DAL_OPTC_DCN401 = $(addprefix $(AMDDALPATH)/dc/optc/dcn401/,$(OPTC_DCN401)) 111 + 112 + AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN401) 108 113 endif 114 +
-3
drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
··· 330 330 SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\ 331 331 SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh) 332 332 333 - 334 - 335 333 #define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\ 336 334 TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\ 337 335 SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC0, mask_sh),\ ··· 566 568 567 569 #define TG_REG_FIELD_LIST_DCN3_2(type) \ 568 570 type OTG_H_TIMING_DIV_MODE_MANUAL; 569 - 570 571 571 572 #define TG_REG_FIELD_LIST_DCN3_5(type) \ 572 573 type OTG_CRC0_WINDOWA_X_START_READBACK;\
+8
drivers/gpu/drm/amd/display/dc/resource/Makefile
··· 198 198 199 199 ############################################################################### 200 200 201 + ############################################################################### 202 + 203 + RESOURCE_DCN401 = dcn401_resource.o 204 + 205 + AMD_DAL_RESOURCE_DCN401 = $(addprefix $(AMDDALPATH)/dc/resource/dcn401/,$(RESOURCE_DCN401)) 206 + 207 + AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN401) 208 + 201 209 endif
+1
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
··· 719 719 .force_disable_subvp = false, 720 720 .exit_idle_opt_for_cursor_updates = true, 721 721 .using_dml2 = false, 722 + .using_dml21 = false, // TODO : Temporary for N-1 validation. Remove after N-1 is done. 722 723 .enable_single_display_2to1_odm_policy = true, 723 724 724 725 /* Must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/
+11
drivers/gpu/drm/amd/display/include/dal_asic_id.h
··· 258 258 #define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN) 259 259 #define ASICREV_IS_GC_11_0_4(eChipRev) (eChipRev >= GC_11_0_4_A0 && eChipRev < GC_11_UNKNOWN) 260 260 261 + #define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */ 262 + 263 + enum { 264 + GC_12_0_0_A0 = 0x50, 265 + GC_12_0_1_A0 = 0x40, 266 + GC_12_UNKNOWN = 0xFF, 267 + }; 268 + 269 + #define ASICREV_IS_DCN4(eChipRev) (eChipRev >= GC_12_0_1_A0 && eChipRev < GC_12_0_0_A0) 270 + #define ASICREV_IS_DCN401(eChipRev) (eChipRev >= GC_12_0_0_A0 && eChipRev < GC_12_UNKNOWN) 271 + 261 272 /* 262 273 * ASIC chip ID 263 274 */
+1
drivers/gpu/drm/amd/display/include/dal_types.h
··· 63 63 DCN_VERSION_3_21, 64 64 DCN_VERSION_3_5, 65 65 DCN_VERSION_3_51, 66 + DCN_VERSION_4_01, 66 67 DCN_VERSION_MAX 67 68 }; 68 69