Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/display: remove redundant CONFIG_DRM_AMD_DC_DCN in dc

[Why & How]
CONFIG_DRM_AMD_DC_DCN is used by pass the compilation failures, but DC
code should be OS-agnostic.

This patch fixes it by removing unnecessasry CONFIG_DRM_AMD_DC_DCN
in dc and dc/core directories.

Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
Acked-by: Stylon Wang <stylon.wang@amd.com>
Signed-off-by: Alex Hung <alex.hung@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Alex Hung and committed by
Alex Deucher
59b8ca24 5d3e1442

+10 -121
-2
drivers/gpu/drm/amd/display/dc/Makefile
··· 63 63 dc_surface.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \ 64 64 dc_link_enc_cfg.o dc_link_dpia.o dc_link_dpcd.o 65 65 66 - ifdef CONFIG_DRM_AMD_DC_DCN 67 66 DISPLAY_CORE += dc_vm_helper.o 68 - endif 69 67 70 68 AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE)) 71 69
+1 -2
drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
··· 65 65 case DCE_VERSION_12_1: 66 66 *h = dal_cmd_tbl_helper_dce112_get_table2(); 67 67 return true; 68 - #if defined(CONFIG_DRM_AMD_DC_DCN) 69 68 case DCN_VERSION_1_0: 70 69 case DCN_VERSION_1_01: 71 70 case DCN_VERSION_2_0: ··· 79 80 case DCN_VERSION_3_16: 80 81 *h = dal_cmd_tbl_helper_dce112_get_table2(); 81 82 return true; 82 - #endif 83 + 83 84 default: 84 85 /* Unsupported DCE */ 85 86 BREAK_TO_DEBUGGER();
+3 -28
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 829 829 kfree(dc->bw_dceip); 830 830 dc->bw_dceip = NULL; 831 831 832 - #ifdef CONFIG_DRM_AMD_DC_DCN 833 832 kfree(dc->dcn_soc); 834 833 dc->dcn_soc = NULL; 835 834 836 835 kfree(dc->dcn_ip); 837 836 dc->dcn_ip = NULL; 838 837 839 - #endif 840 838 kfree(dc->vm_helper); 841 839 dc->vm_helper = NULL; 842 840 ··· 880 882 struct dc_context *dc_ctx; 881 883 struct bw_calcs_dceip *dc_dceip; 882 884 struct bw_calcs_vbios *dc_vbios; 883 - #ifdef CONFIG_DRM_AMD_DC_DCN 884 885 struct dcn_soc_bounding_box *dcn_soc; 885 886 struct dcn_ip_params *dcn_ip; 886 - #endif 887 887 888 888 dc->config = init_params->flags; 889 889 ··· 909 913 } 910 914 911 915 dc->bw_vbios = dc_vbios; 912 - #ifdef CONFIG_DRM_AMD_DC_DCN 913 916 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 914 917 if (!dcn_soc) { 915 918 dm_error("%s: failed to create dcn_soc\n", __func__); ··· 924 929 } 925 930 926 931 dc->dcn_ip = dcn_ip; 927 - #endif 928 932 929 933 if (!dc_construct_ctx(dc, init_params)) { 930 934 dm_error("%s: failed to create ctx\n", __func__); ··· 1862 1868 return (result == DC_OK); 1863 1869 } 1864 1870 1865 - #if defined(CONFIG_DRM_AMD_DC_DCN) 1866 1871 bool dc_acquire_release_mpc_3dlut( 1867 1872 struct dc *dc, bool acquire, 1868 1873 struct dc_stream_state *stream, ··· 1897 1904 } 1898 1905 return ret; 1899 1906 } 1900 - #endif 1907 + 1901 1908 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 1902 1909 { 1903 1910 int i; ··· 1918 1925 return false; 1919 1926 } 1920 1927 1921 - #ifdef CONFIG_DRM_AMD_DC_DCN 1922 1928 /* Perform updates here which need to be deferred until next vupdate 1923 1929 * 1924 1930 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered ··· 1936 1944 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); 1937 1945 } 1938 1946 } 1939 - #endif /* CONFIG_DRM_AMD_DC_DCN */ 1940 1947 1941 1948 void dc_post_update_surfaces_to_stream(struct dc *dc) 1942 1949 { ··· 1962 1971 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); 1963 1972 } 1964 1973 1965 - #ifdef CONFIG_DRM_AMD_DC_DCN 1966 1974 process_deferred_updates(dc); 1967 - #endif 1968 1975 1969 1976 dc->hwss.optimize_bandwidth(dc, context); 1970 1977 ··· 1976 1987 * initialize and obtain IP and SOC the base DML instance from DC is 1977 1988 * initially copied into every context 1978 1989 */ 1979 - #ifdef CONFIG_DRM_AMD_DC_DCN 1980 1990 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); 1981 - #endif 1982 1991 } 1983 1992 1984 1993 struct dc_state *dc_create_state(struct dc *dc) ··· 2348 2361 int i; 2349 2362 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2350 2363 2351 - #if defined(CONFIG_DRM_AMD_DC_DCN) 2352 2364 if (dc->idle_optimizations_allowed) 2353 2365 overall_type = UPDATE_TYPE_FULL; 2354 2366 2355 - #endif 2356 2367 if (stream_status == NULL || stream_status->plane_count != surface_count) 2357 2368 overall_type = UPDATE_TYPE_FULL; 2358 2369 ··· 2859 2874 } 2860 2875 2861 2876 if (update_type == UPDATE_TYPE_FULL) { 2862 - #if defined(CONFIG_DRM_AMD_DC_DCN) 2863 2877 dc_allow_idle_optimizations(dc, false); 2864 2878 2865 - #endif 2866 2879 if (get_seamless_boot_stream_count(context) == 0) 2867 2880 dc->hwss.prepare_bandwidth(dc, context); 2868 2881 ··· 2878 2895 } 2879 2896 } 2880 2897 2881 - #ifdef CONFIG_DRM_AMD_DC_DCN 2882 2898 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 2883 2899 struct pipe_ctx *mpcc_pipe; 2884 2900 struct pipe_ctx *odm_pipe; ··· 2886 2904 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 2887 2905 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 2888 2906 } 2889 - #endif 2890 2907 2891 2908 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 2892 2909 if (top_pipe_to_program && ··· 2995 3014 } 2996 3015 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 2997 3016 dc->hwss.program_front_end_for_ctx(dc, context); 2998 - #ifdef CONFIG_DRM_AMD_DC_DCN 2999 3017 if (dc->debug.validate_dml_output) { 3000 3018 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3001 3019 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; ··· 3008 3028 &context->res_ctx.pipe_ctx[i].ttu_regs); 3009 3029 } 3010 3030 } 3011 - #endif 3012 3031 } 3013 3032 3014 3033 // Update Type FAST, Surface updates ··· 3562 3583 return true; 3563 3584 } 3564 3585 3565 - #if defined(CONFIG_DRM_AMD_DC_DCN) 3566 - 3567 3586 void dc_allow_idle_optimizations(struct dc *dc, bool allow) 3568 3587 { 3569 3588 if (dc->debug.disable_idle_power_optimizations) ··· 3717 3740 if (dc->hwss.hardware_release) 3718 3741 dc->hwss.hardware_release(dc); 3719 3742 } 3720 - #endif 3721 3743 3722 3744 /* 3723 3745 ***************************************************************************** ··· 3736 3760 */ 3737 3761 bool dc_is_dmub_outbox_supported(struct dc *dc) 3738 3762 { 3739 - #if defined(CONFIG_DRM_AMD_DC_DCN) 3740 - /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */ 3763 + /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 3741 3764 if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && 3742 3765 dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 3743 3766 !dc->debug.dpia_debug.bits.disable_dpia) 3744 3767 return true; 3745 - #endif 3768 + 3746 3769 /* dmub aux needs dmub notifications to be enabled */ 3747 3770 return dc->debug.enable_dmub_aux_for_legacy_ddc; 3748 3771 }
-2
drivers/gpu/drm/amd/display/dc/core/dc_debug.c
··· 345 345 struct dc *dc, 346 346 struct dc_state *context) 347 347 { 348 - #if defined(CONFIG_DRM_AMD_DC_DCN) 349 348 DC_LOGGER_INIT(dc->ctx->logger); 350 349 CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" 351 350 "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n", ··· 362 363 context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz, 363 364 context->bw_ctx.bw.dcn.clk.fclk_khz, 364 365 context->bw_ctx.bw.dcn.clk.socclk_khz); 365 - #endif 366 366 } 367 367 368 368 /**
-2
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 804 804 805 805 static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link) 806 806 { 807 - #if defined(CONFIG_DRM_AMD_DC_DCN) 808 807 /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock 809 808 * reports DSC support. 810 809 */ ··· 814 815 link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && 815 816 !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) 816 817 link->wa_flags.dpia_mst_dsc_always_on = true; 817 - #endif 818 818 } 819 819 820 820 static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link)
+3 -18
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 56 56 #include "dce110/dce110_resource.h" 57 57 #include "dce112/dce112_resource.h" 58 58 #include "dce120/dce120_resource.h" 59 - #if defined(CONFIG_DRM_AMD_DC_DCN) 60 59 #include "dcn10/dcn10_resource.h" 61 60 #include "dcn20/dcn20_resource.h" 62 61 #include "dcn21/dcn21_resource.h" ··· 67 68 #include "dcn31/dcn31_resource.h" 68 69 #include "dcn315/dcn315_resource.h" 69 70 #include "dcn316/dcn316_resource.h" 70 - #endif 71 71 72 72 #define DC_LOGGER_INIT(logger) 73 73 ··· 122 124 else 123 125 dc_version = DCE_VERSION_12_0; 124 126 break; 125 - #if defined(CONFIG_DRM_AMD_DC_DCN) 126 127 case FAMILY_RV: 127 128 dc_version = DCN_VERSION_1_0; 128 129 if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) ··· 162 165 if (ASICREV_IS_GC_10_3_7(asic_id.hw_internal_rev)) 163 166 dc_version = DCN_VERSION_3_16; 164 167 break; 165 - #endif 166 168 167 169 default: 168 170 dc_version = DCE_VERSION_UNKNOWN; ··· 393 397 } 394 398 } 395 399 396 - #if defined(CONFIG_DRM_AMD_DC_DCN) 397 400 for (i = 0; i < caps->num_mpc_3dlut; i++) { 398 401 pool->mpc_lut[i] = dc_create_3dlut_func(); 399 402 if (pool->mpc_lut[i] == NULL) ··· 401 406 if (pool->mpc_shaper[i] == NULL) 402 407 DC_ERR("DC: failed to create MPC shaper!\n"); 403 408 } 404 - #endif 409 + 405 410 dc->caps.dynamic_audio = false; 406 411 if (pool->audio_count < pool->stream_enc_count) { 407 412 dc->caps.dynamic_audio = true; ··· 1364 1369 return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream); 1365 1370 } 1366 1371 1367 - #if defined(CONFIG_DRM_AMD_DC_DCN) 1368 1372 static int acquire_first_split_pipe( 1369 1373 struct resource_context *res_ctx, 1370 1374 const struct resource_pool *pool, ··· 1398 1404 } 1399 1405 return -1; 1400 1406 } 1401 - #endif 1402 1407 1403 1408 bool dc_add_plane_to_context( 1404 1409 const struct dc *dc, ··· 1440 1447 while (head_pipe) { 1441 1448 free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe); 1442 1449 1443 - #if defined(CONFIG_DRM_AMD_DC_DCN) 1444 1450 if (!free_pipe) { 1445 1451 int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); 1446 1452 if (pipe_idx >= 0) 1447 1453 free_pipe = &context->res_ctx.pipe_ctx[pipe_idx]; 1448 1454 } 1449 - #endif 1455 + 1450 1456 if (!free_pipe) { 1451 1457 dc_plane_state_release(plane_state); 1452 1458 return false; ··· 2251 2259 /* acquire new resources */ 2252 2260 pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream); 2253 2261 2254 - #ifdef CONFIG_DRM_AMD_DC_DCN 2255 2262 if (pipe_idx < 0) 2256 2263 pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); 2257 - #endif 2258 2264 2259 2265 if (pipe_idx < 0 || context->res_ctx.pipe_ctx[pipe_idx].stream_res.tg == NULL) 2260 2266 return DC_NO_CONTROLLER_RESOURCE; ··· 2443 2453 if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) 2444 2454 result = DC_FAIL_BANDWIDTH_VALIDATE; 2445 2455 2446 - #if defined(CONFIG_DRM_AMD_DC_DCN) 2447 2456 /* 2448 2457 * Only update link encoder to stream assignment after bandwidth validation passed. 2449 2458 * TODO: Split out assignment and validation. ··· 2450 2461 if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false) 2451 2462 dc->res_pool->funcs->link_encs_assign( 2452 2463 dc, new_ctx, new_ctx->streams, new_ctx->stream_count); 2453 - #endif 2454 2464 2455 2465 return result; 2456 2466 } ··· 3177 3189 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: 3178 3190 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: 3179 3191 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: 3180 - #if defined(CONFIG_DRM_AMD_DC_DCN) 3181 3192 case SURFACE_PIXEL_FORMAT_GRPH_RGBE: 3182 3193 case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: 3183 - #endif 3184 3194 return 32; 3185 3195 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: 3186 3196 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: ··· 3331 3345 /* TODO - get transmitter to phy idx mapping from DMUB */ 3332 3346 uint8_t phy_idx = transmitter - TRANSMITTER_UNIPHY_A; 3333 3347 3334 - #if defined(CONFIG_DRM_AMD_DC_DCN) 3335 3348 if (dc->ctx->dce_version == DCN_VERSION_3_1 && 3336 3349 dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { 3337 3350 switch (transmitter) { ··· 3354 3369 break; 3355 3370 } 3356 3371 } 3357 - #endif 3372 + 3358 3373 return phy_idx; 3359 3374 } 3360 3375
-8
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 314 314 const struct dc_cursor_attributes *attributes) 315 315 { 316 316 struct dc *dc; 317 - #if defined(CONFIG_DRM_AMD_DC_DCN) 318 317 bool reset_idle_optimizations = false; 319 - #endif 320 318 321 319 if (NULL == stream) { 322 320 dm_error("DC: dc_stream is NULL!\n"); ··· 344 346 #endif 345 347 program_cursor_attributes(dc, stream, attributes); 346 348 347 - #if defined(CONFIG_DRM_AMD_DC_DCN) 348 349 /* re-enable idle optimizations if necessary */ 349 350 if (reset_idle_optimizations) 350 351 dc_allow_idle_optimizations(dc, true); 351 352 352 - #endif 353 353 return true; 354 354 } 355 355 ··· 392 396 const struct dc_cursor_position *position) 393 397 { 394 398 struct dc *dc; 395 - #if defined(CONFIG_DRM_AMD_DC_DCN) 396 399 bool reset_idle_optimizations = false; 397 - #endif 398 400 399 401 if (NULL == stream) { 400 402 dm_error("DC: dc_stream is NULL!\n"); ··· 418 424 stream->cursor_position = *position; 419 425 420 426 program_cursor_position(dc, stream, position); 421 - #if defined(CONFIG_DRM_AMD_DC_DCN) 422 427 /* re-enable idle optimizations if necessary */ 423 428 if (reset_idle_optimizations) 424 429 dc_allow_idle_optimizations(dc, true); 425 430 426 - #endif 427 431 return true; 428 432 } 429 433
-37
drivers/gpu/drm/amd/display/dc/dc.h
··· 222 222 unsigned int max_compressed_blk_size; 223 223 unsigned int max_uncompressed_blk_size; 224 224 bool independent_64b_blks; 225 - #if defined(CONFIG_DRM_AMD_DC_DCN) 226 225 //These bitfields to be used starting with DCN 227 226 struct { 228 227 uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case) ··· 229 230 uint32_t dcc_256_128_128 : 1; //available starting with DCN 230 231 uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case) 231 232 } dcc_controls; 232 - #endif 233 233 }; 234 234 235 235 struct dc_surface_dcc_cap { ··· 330 332 bool enable_4to1MPC; 331 333 bool enable_windowed_mpo_odm; 332 334 bool allow_edp_hotplug_detection; 333 - #if defined(CONFIG_DRM_AMD_DC_DCN) 334 335 bool clamp_min_dcfclk; 335 - #endif 336 336 uint64_t vblank_alignment_dto_params; 337 337 uint8_t vblank_alignment_max_frame_time_diff; 338 338 bool is_asymmetric_memory; ··· 391 395 DCN_PWR_STATE_LOW_POWER = 3, 392 396 }; 393 397 394 - #if defined(CONFIG_DRM_AMD_DC_DCN) 395 398 enum dcn_zstate_support_state { 396 399 DCN_ZSTATE_SUPPORT_UNKNOWN, 397 400 DCN_ZSTATE_SUPPORT_ALLOW, 398 401 DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY, 399 402 DCN_ZSTATE_SUPPORT_DISALLOW, 400 403 }; 401 - #endif 402 404 /* 403 405 * For any clocks that may differ per pipe 404 406 * only the max is stored in this structure ··· 414 420 int phyclk_khz; 415 421 int dramclk_khz; 416 422 bool p_state_change_support; 417 - #if defined(CONFIG_DRM_AMD_DC_DCN) 418 423 enum dcn_zstate_support_state zstate_support; 419 424 bool dtbclk_en; 420 - #endif 421 425 enum dcn_pwr_state pwr_state; 422 426 /* 423 427 * Elements below are not compared for the purposes of ··· 645 653 bool disable_pplib_clock_request; 646 654 bool disable_clock_gate; 647 655 bool disable_mem_low_power; 648 - #if defined(CONFIG_DRM_AMD_DC_DCN) 649 656 bool pstate_enabled; 650 - #endif 651 657 bool disable_dmcu; 652 658 bool disable_psr; 653 659 bool force_abm_enable; ··· 663 673 bool remove_disconnect_edp; 664 674 unsigned int force_odm_combine; //bit vector based on otg inst 665 675 unsigned int seamless_boot_odm_combine; 666 - #if defined(CONFIG_DRM_AMD_DC_DCN) 667 676 unsigned int force_odm_combine_4to1; //bit vector based on otg inst 668 677 bool disable_z9_mpc; 669 - #endif 670 678 unsigned int force_fclk_khz; 671 679 bool enable_tri_buf; 672 680 bool dmub_offload_enabled; 673 681 bool dmcub_emulation; 674 - #if defined(CONFIG_DRM_AMD_DC_DCN) 675 682 bool disable_idle_power_optimizations; 676 683 unsigned int mall_size_override; 677 684 unsigned int mall_additional_timer_percent; 678 685 bool mall_error_as_fatal; 679 - #endif 680 686 bool dmub_command_table; /* for testing only */ 681 687 struct dc_bw_validation_profile bw_val_profile; 682 688 bool disable_fec; ··· 681 695 * watermarks are not affected. 682 696 */ 683 697 unsigned int force_min_dcfclk_mhz; 684 - #if defined(CONFIG_DRM_AMD_DC_DCN) 685 698 int dwb_fi_phase; 686 - #endif 687 699 bool disable_timing_sync; 688 700 bool cm_in_bypass; 689 701 int force_clock_mode;/*every mode change.*/ ··· 713 729 enum det_size crb_alloc_policy; 714 730 int crb_alloc_policy_min_disp_count; 715 731 bool disable_z10; 716 - #if defined(CONFIG_DRM_AMD_DC_DCN) 717 732 bool enable_z9_disable_interface; 718 733 bool enable_sw_cntl_psr; 719 734 union dpia_debug_options dpia_debug; 720 - #endif 721 735 bool apply_vendor_specific_lttpr_wa; 722 736 bool extended_blank_optimization; 723 737 union aux_wake_wa_options aux_wake_wa; ··· 749 767 /* Inputs into BW and WM calculations. */ 750 768 struct bw_calcs_dceip *bw_dceip; 751 769 struct bw_calcs_vbios *bw_vbios; 752 - #ifdef CONFIG_DRM_AMD_DC_DCN 753 770 struct dcn_soc_bounding_box *dcn_soc; 754 771 struct dcn_ip_params *dcn_ip; 755 772 struct display_mode_lib dml; 756 - #endif 757 773 758 774 /* HW functions */ 759 775 struct hw_sequencer_funcs hwss; ··· 760 780 /* Require to optimize clocks and bandwidth for added/removed planes */ 761 781 bool optimized_required; 762 782 bool wm_optimized_required; 763 - #if defined(CONFIG_DRM_AMD_DC_DCN) 764 783 bool idle_optimizations_allowed; 765 - #endif 766 - #if defined(CONFIG_DRM_AMD_DC_DCN) 767 784 bool enable_c20_dtm_b0; 768 - #endif 769 785 770 786 /* Require to maintain clocks and bandwidth for UEFI enabled HW */ 771 787 ··· 811 835 uint64_t log_mask; 812 836 813 837 struct dpcd_vendor_signature vendor_signature; 814 - #if defined(CONFIG_DRM_AMD_DC_DCN) 815 838 bool force_smu_not_present; 816 - #endif 817 839 }; 818 840 819 841 struct dc_callback_init { ··· 1004 1030 struct dc_transfer_func *in_shaper_func; 1005 1031 struct dc_transfer_func *blend_tf; 1006 1032 1007 - #if defined(CONFIG_DRM_AMD_DC_DCN) 1008 1033 struct dc_transfer_func *gamcor_tf; 1009 - #endif 1010 1034 enum surface_pixel_format format; 1011 1035 enum dc_rotation_angle rotation; 1012 1036 enum plane_stereo_format stereo_format; ··· 1141 1169 const struct dc *dc, 1142 1170 struct dc_state *dst_ctx); 1143 1171 1144 - #if defined(CONFIG_DRM_AMD_DC_DCN) 1145 1172 bool dc_acquire_release_mpc_3dlut( 1146 1173 struct dc *dc, bool acquire, 1147 1174 struct dc_stream_state *stream, 1148 1175 struct dc_3dlut **lut, 1149 1176 struct dc_transfer_func **shaper); 1150 - #endif 1151 1177 1152 1178 void dc_resource_state_copy_construct( 1153 1179 const struct dc_state *src_ctx, ··· 1276 1306 1277 1307 #include "dc_link.h" 1278 1308 1279 - #if defined(CONFIG_DRM_AMD_DC_DCN) 1280 1309 uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane); 1281 1310 1282 - #endif 1283 1311 /******************************************************************************* 1284 1312 * Sink Interfaces - A sink corresponds to a display output device 1285 1313 ******************************************************************************/ ··· 1401 1433 1402 1434 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping); 1403 1435 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); 1404 - #if defined(CONFIG_DRM_AMD_DC_DCN) 1405 1436 1406 1437 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, 1407 1438 struct dc_cursor_attributes *cursor_attr); ··· 1425 1458 /* cleanup on driver unload */ 1426 1459 void dc_hardware_release(struct dc *dc); 1427 1460 1428 - #endif 1429 - 1430 1461 bool dc_set_psr_allow_active(struct dc *dc, bool enable); 1431 - #if defined(CONFIG_DRM_AMD_DC_DCN) 1432 1462 void dc_z10_restore(const struct dc *dc); 1433 1463 void dc_z10_save_init(struct dc *dc); 1434 - #endif 1435 1464 1436 1465 bool dc_is_dmub_outbox_supported(struct dc *dc); 1437 1466 bool dc_enable_dmub_notifications(struct dc *dc);
-2
drivers/gpu/drm/amd/display/dc/dm_helpers.h
··· 160 160 struct dc_context *ctx, 161 161 struct dc_clocks *clks); 162 162 163 - #if defined(CONFIG_DRM_AMD_DC_DCN) 164 163 void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable); 165 - #endif 166 164 167 165 void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz); 168 166
+3 -14
drivers/gpu/drm/amd/display/dc/inc/core_types.h
··· 33 33 #include "dc_bios_types.h" 34 34 #include "mem_input.h" 35 35 #include "hubp.h" 36 - #if defined(CONFIG_DRM_AMD_DC_DCN) 37 36 #include "mpc.h" 38 - #endif 39 37 #include "dwb.h" 40 38 #include "mcif_wb.h" 41 39 #include "panel_cntl.h" ··· 179 181 void (*update_bw_bounding_box)( 180 182 struct dc *dc, 181 183 struct clk_bw_params *bw_params); 182 - #if defined(CONFIG_DRM_AMD_DC_DCN) 183 184 bool (*acquire_post_bldn_3dlut)( 184 185 struct resource_context *res_ctx, 185 186 const struct resource_pool *pool, ··· 191 194 const struct resource_pool *pool, 192 195 struct dc_3dlut **lut, 193 196 struct dc_transfer_func **shaper); 194 - #endif 197 + 195 198 enum dc_status (*add_dsc_to_stream_resource)( 196 199 struct dc *dc, struct dc_state *state, 197 200 struct dc_stream_state *stream); ··· 251 254 struct hpo_dp_stream_encoder *hpo_dp_stream_enc[MAX_HPO_DP2_ENCODERS]; 252 255 unsigned int hpo_dp_link_enc_count; 253 256 struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS]; 254 - #if defined(CONFIG_DRM_AMD_DC_DCN) 255 257 struct dc_3dlut *mpc_lut[MAX_PIPES]; 256 258 struct dc_transfer_func *mpc_shaper[MAX_PIPES]; 257 - #endif 259 + 258 260 struct { 259 261 unsigned int xtalin_clock_inKhz; 260 262 unsigned int dccg_ref_clock_inKhz; ··· 282 286 struct dmcu *dmcu; 283 287 struct dmub_psr *psr; 284 288 285 - #if defined(CONFIG_DRM_AMD_DC_DCN) 286 289 struct abm *multiple_abms[MAX_PIPES]; 287 - #endif 288 290 289 291 const struct resource_funcs *funcs; 290 292 const struct resource_caps *res_cap; ··· 374 380 struct pipe_ctx *next_odm_pipe; 375 381 struct pipe_ctx *prev_odm_pipe; 376 382 377 - #ifdef CONFIG_DRM_AMD_DC_DCN 378 383 struct _vcs_dpi_display_dlg_regs_st dlg_regs; 379 384 struct _vcs_dpi_display_ttu_regs_st ttu_regs; 380 385 struct _vcs_dpi_display_rq_regs_st rq_regs; ··· 383 390 struct _vcs_dpi_display_e2e_pipe_params_st dml_input; 384 391 int det_buffer_size_kb; 385 392 bool unbounded_req; 386 - #endif 393 + 387 394 union pipe_update_flags update_flags; 388 395 struct dwbc *dwbc; 389 396 struct mcif_wb *mcif_wb; ··· 412 419 bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS]; 413 420 unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS]; 414 421 int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS]; 415 - #if defined(CONFIG_DRM_AMD_DC_DCN) 416 422 bool is_mpc_3dlut_acquired[MAX_PIPES]; 417 - #endif 418 423 }; 419 424 420 425 struct dce_bw_output { ··· 475 484 476 485 /* Note: these are big structures, do *not* put on stack! */ 477 486 struct dm_pp_display_configuration pp_display_cfg; 478 - #ifdef CONFIG_DRM_AMD_DC_DCN 479 487 struct dcn_bw_internal_vars dcn_bw_vars; 480 - #endif 481 488 482 489 struct clk_mgr *clk_mgr; 483 490