Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/display: Refactor SubVP cursor limiting logic

[WHY]
There are several gaps that can result in SubVP being enabled with
incompatible HW cursor sizes, and unjust restrictions to cursor size due
to wrong predictions on future usage of SubVP.

[HOW]
- remove "prediction" logic in favor of tagging based on previous SubVP
usage
- block SubVP if current HW cursor settings are incompatible
- provide interface for DM to determine if HW cursor should be disabled
due to an attempt to enable SubVP

Reviewed-by: Alvin Lee <alvin.lee2@amd.com>
Signed-off-by: Dillon Varone <dillon.varone@amd.com>
Signed-off-by: Ray Wu <ray.wu@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Dillon Varone and committed by
Alex Deucher
4465dd0e fe3250f1

+418 -135
+46 -11
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 37 37 #include "dc_state.h" 38 38 #include "dc_state_priv.h" 39 39 #include "dc_plane_priv.h" 40 + #include "dc_stream_priv.h" 40 41 41 42 #include "gpio_service_interface.h" 42 43 #include "clk_mgr.h" ··· 2887 2886 int i; 2888 2887 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2889 2888 2890 - if (dc->idle_optimizations_allowed) 2889 + if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc)) 2891 2890 overall_type = UPDATE_TYPE_FULL; 2892 2891 2893 2892 if (stream_status == NULL || stream_status->plane_count != surface_count) ··· 3291 3290 if (dsc_validate_context) { 3292 3291 stream->timing.dsc_cfg = *update->dsc_config; 3293 3292 stream->timing.flags.DSC = enable_dsc; 3294 - if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { 3293 + if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true) != DC_OK) { 3295 3294 stream->timing.dsc_cfg = old_dsc_cfg; 3296 3295 stream->timing.flags.DSC = old_dsc_enabled; 3297 3296 update->dsc_config = NULL; ··· 3516 3515 } 3517 3516 3518 3517 if (update_type == UPDATE_TYPE_FULL) { 3519 - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 3518 + if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) { 3520 3519 BREAK_TO_DEBUGGER(); 3521 3520 goto fail; 3522 3521 } ··· 4609 4608 4610 4609 backup_and_set_minimal_pipe_split_policy(dc, base_context, policy); 4611 4610 /* commit minimal state */ 4612 - if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) { 4611 + if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false) == DC_OK) { 4613 4612 /* prevent underflow and corruption when reconfiguring pipes */ 4614 4613 force_vsync_flip_in_minimal_transition_context(minimal_transition_context); 4615 4614 } else { ··· 5044 5043 if (dc->idle_optimizations_allowed) 5045 5044 return true; 5046 5045 5046 + if (dc_can_clear_cursor_limit(dc)) 5047 + return true; 5048 + 5047 5049 return false; 5048 5050 } 5049 5051 ··· 5132 5128 copy_stream_update_to_stream(dc, context, stream, stream_update); 5133 5129 5134 5130 if (update_type >= UPDATE_TYPE_FULL) { 5135 - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 5131 + if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) { 5136 5132 DC_ERROR("Mode validation failed for stream update!\n"); 5137 5133 dc_state_release(context); 5138 5134 return false; ··· 6276 6272 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties) 6277 6273 { 6278 6274 unsigned int i; 6279 - bool subvp_sw_cursor_req = false; 6275 + unsigned int max_cursor_size = dc->caps.max_cursor_size; 6276 + unsigned int stream_cursor_size; 6280 6277 6281 - for (i = 0; i < dc->current_state->stream_count; i++) { 6282 - if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i]) && !dc->current_state->streams[i]->hw_cursor_req) { 6283 - subvp_sw_cursor_req = true; 6284 - break; 6278 + if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) { 6279 + for (i = 0; i < dc->current_state->stream_count; i++) { 6280 + stream_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, 6281 + dc->current_state, 6282 + dc->current_state->streams[i]); 6283 + 6284 + if (stream_cursor_size < max_cursor_size) { 6285 + max_cursor_size = stream_cursor_size; 6286 + } 6285 6287 } 6286 6288 } 6287 - properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size; 6289 + 6290 + properties->cursor_size_limit = max_cursor_size; 6288 6291 } 6289 6292 6290 6293 /** ··· 6356 6345 return dc->res_pool->funcs->get_det_buffer_size(context); 6357 6346 else 6358 6347 return 0; 6348 + } 6349 + 6350 + bool dc_is_cursor_limit_pending(struct dc *dc) 6351 + { 6352 + uint32_t i; 6353 + 6354 + for (i = 0; i < dc->current_state->stream_count; i++) { 6355 + if (dc_stream_is_cursor_limit_pending(dc, dc->current_state->streams[i])) 6356 + return true; 6357 + } 6358 + 6359 + return false; 6360 + } 6361 + 6362 + bool dc_can_clear_cursor_limit(struct dc *dc) 6363 + { 6364 + uint32_t i; 6365 + 6366 + for (i = 0; i < dc->current_state->stream_count; i++) { 6367 + if (dc_state_can_clear_stream_cursor_subvp_limit(dc->current_state->streams[i], dc->current_state)) 6368 + return true; 6369 + } 6370 + 6371 + return false; 6359 6372 }
+2
drivers/gpu/drm/amd/display/dc/core/dc_debug.c
··· 266 266 return "Fail dp payload allocation"; 267 267 case DC_FAIL_DP_LINK_BANDWIDTH: 268 268 return "Insufficient DP link bandwidth"; 269 + case DC_FAIL_HW_CURSOR_SUPPORT: 270 + return "HW Cursor not supported"; 269 271 case DC_ERROR_UNEXPECTED: 270 272 return "Unexpected error"; 271 273 }
+6 -42
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 1342 1342 data->viewport_c.y += src.y / vpc_div; 1343 1343 } 1344 1344 1345 - static bool is_subvp_high_refresh_candidate(struct dc_stream_state *stream) 1346 - { 1347 - uint32_t refresh_rate; 1348 - struct dc *dc = stream->ctx->dc; 1349 - 1350 - refresh_rate = (stream->timing.pix_clk_100hz * (uint64_t)100 + 1351 - stream->timing.v_total * stream->timing.h_total - (uint64_t)1); 1352 - refresh_rate = div_u64(refresh_rate, stream->timing.v_total); 1353 - refresh_rate = div_u64(refresh_rate, stream->timing.h_total); 1354 - 1355 - /* If there's any stream that fits the SubVP high refresh criteria, 1356 - * we must return true. This is because cursor updates are asynchronous 1357 - * with full updates, so we could transition into a SubVP config and 1358 - * remain in HW cursor mode if there's no cursor update which will 1359 - * then cause corruption. 1360 - */ 1361 - if ((refresh_rate >= 120 && refresh_rate <= 175 && 1362 - stream->timing.v_addressable >= 1080 && 1363 - stream->timing.v_addressable <= 2160) && 1364 - (dc->current_state->stream_count > 1 || 1365 - (dc->current_state->stream_count == 1 && !stream->allow_freesync))) 1366 - return true; 1367 - 1368 - return false; 1369 - } 1370 - 1371 1345 static enum controller_dp_test_pattern convert_dp_to_controller_test_pattern( 1372 1346 enum dp_test_pattern test_pattern) 1373 1347 { ··· 4233 4259 } 4234 4260 } 4235 4261 4262 + /* clear subvp cursor limitations */ 4263 + for (i = 0; i < context->stream_count; i++) { 4264 + dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false); 4265 + } 4266 + 4236 4267 res = dc_validate_global_state(dc, context, fast_validate); 4237 4268 4238 4269 /* calculate pixel rate divider after deciding pxiel clock & odm combine */ ··· 4364 4385 result = resource_build_scaling_params_for_context(dc, new_ctx); 4365 4386 4366 4387 if (result == DC_OK) 4367 - if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) 4368 - result = DC_FAIL_BANDWIDTH_VALIDATE; 4388 + result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate); 4369 4389 4370 4390 return result; 4371 4391 } ··· 5514 5536 return DC_NO_LINK_ENC_RESOURCE; 5515 5537 5516 5538 return DC_OK; 5517 - } 5518 - 5519 - bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream) 5520 - { 5521 - if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream)) 5522 - return true; 5523 - if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 && 5524 - ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) 5525 - return true; 5526 - else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 && 5527 - ((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120) 5528 - return true; 5529 - 5530 - return false; 5531 5539 } 5532 5540 5533 5541 struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
+107 -4
drivers/gpu/drm/amd/display/dc/core/dc_state.c
··· 22 22 * Authors: AMD 23 23 * 24 24 */ 25 + #include "dc_types.h" 25 26 #include "core_types.h" 26 27 #include "core_status.h" 27 28 #include "dc_state.h" ··· 813 812 if (phantom_stream_status) { 814 813 phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM; 815 814 phantom_stream_status->mall_stream_config.paired_stream = main_stream; 815 + phantom_stream_status->mall_stream_config.subvp_limit_cursor_size = false; 816 + phantom_stream_status->mall_stream_config.cursor_size_limit_subvp = false; 816 817 } 818 + 819 + dc_state_set_stream_subvp_cursor_limit(main_stream, state, true); 817 820 818 821 return res; 819 822 } ··· 944 939 const struct dc *dc, 945 940 struct dc_state *state) 946 941 { 942 + unsigned int phantom_count; 943 + struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES]; 944 + struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES]; 947 945 int i; 948 946 949 - for (i = 0; i < state->phantom_stream_count; i++) 950 - dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]); 947 + phantom_count = state->phantom_stream_count; 948 + memcpy(phantom_streams, state->phantom_streams, sizeof(struct dc_stream_state *) * MAX_PHANTOM_PIPES); 949 + for (i = 0; i < phantom_count; i++) 950 + dc_state_release_phantom_stream(dc, state, phantom_streams[i]); 951 951 952 - for (i = 0; i < state->phantom_plane_count; i++) 953 - dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]); 952 + phantom_count = state->phantom_plane_count; 953 + memcpy(phantom_planes, state->phantom_planes, sizeof(struct dc_plane_state *) * MAX_PHANTOM_PIPES); 954 + for (i = 0; i < phantom_count; i++) 955 + dc_state_release_phantom_plane(dc, state, phantom_planes[i]); 954 956 } 955 957 956 958 struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id) ··· 988 976 is_fams2_in_use |= dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable; 989 977 990 978 return is_fams2_in_use; 979 + } 980 + 981 + void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream, 982 + struct dc_state *state, 983 + bool limit) 984 + { 985 + struct dc_stream_status *stream_status; 986 + 987 + stream_status = dc_state_get_stream_status(state, stream); 988 + 989 + if (stream_status) { 990 + stream_status->mall_stream_config.subvp_limit_cursor_size = limit; 991 + } 992 + } 993 + 994 + bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream, 995 + struct dc_state *state) 996 + { 997 + bool limit = false; 998 + 999 + struct dc_stream_status *stream_status; 1000 + 1001 + stream_status = dc_state_get_stream_status(state, stream); 1002 + 1003 + if (stream_status) { 1004 + limit = stream_status->mall_stream_config.subvp_limit_cursor_size; 1005 + } 1006 + 1007 + return limit; 1008 + } 1009 + 1010 + void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream, 1011 + struct dc_state *state, 1012 + bool limit) 1013 + { 1014 + struct dc_stream_status *stream_status; 1015 + 1016 + stream_status = dc_state_get_stream_status(state, stream); 1017 + 1018 + if (stream_status) { 1019 + stream_status->mall_stream_config.cursor_size_limit_subvp = limit; 1020 + } 1021 + } 1022 + 1023 + bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream, 1024 + struct dc_state *state) 1025 + { 1026 + bool limit = false; 1027 + 1028 + struct dc_stream_status *stream_status; 1029 + 1030 + stream_status = dc_state_get_stream_status(state, stream); 1031 + 1032 + if (stream_status) { 1033 + limit = stream_status->mall_stream_config.cursor_size_limit_subvp; 1034 + } 1035 + 1036 + return limit; 1037 + } 1038 + 1039 + bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream, 1040 + struct dc_state *state) 1041 + { 1042 + bool can_clear_limit = false; 1043 + 1044 + struct dc_stream_status *stream_status; 1045 + 1046 + stream_status = dc_state_get_stream_status(state, stream); 1047 + 1048 + if (stream_status) { 1049 + can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, state) && 1050 + (stream_status->mall_stream_config.type == SUBVP_PHANTOM || 1051 + stream->hw_cursor_req || 1052 + !stream_status->mall_stream_config.subvp_limit_cursor_size || 1053 + !stream->cursor_position.enable || 1054 + dc_stream_check_cursor_attributes(stream, state, &stream->cursor_attributes)); 1055 + } 1056 + 1057 + return can_clear_limit; 1058 + } 1059 + 1060 + bool dc_state_is_subvp_in_use(struct dc_state *state) 1061 + { 1062 + uint32_t i; 1063 + 1064 + for (i = 0; i < state->stream_count; i++) { 1065 + if (dc_state_get_stream_subvp_type(state, state->streams[i]) != SUBVP_NONE) 1066 + return true; 1067 + } 1068 + 1069 + return false; 991 1070 }
+55 -15
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 265 265 } 266 266 267 267 /* 268 - * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address 268 + * dc_stream_check_cursor_attributes() - Check validitity of cursor attributes and surface address 269 269 */ 270 - bool dc_stream_set_cursor_attributes( 271 - struct dc_stream_state *stream, 270 + bool dc_stream_check_cursor_attributes( 271 + const struct dc_stream_state *stream, 272 + struct dc_state *state, 272 273 const struct dc_cursor_attributes *attributes) 273 274 { 274 - struct dc *dc; 275 + const struct dc *dc; 276 + 277 + unsigned int max_cursor_size; 275 278 276 279 if (NULL == stream) { 277 280 dm_error("DC: dc_stream is NULL!\n"); ··· 292 289 293 290 dc = stream->ctx->dc; 294 291 295 - /* SubVP is not compatible with HW cursor larger than 64 x 64 x 4. 296 - * Therefore, if cursor is greater than 64 x 64 x 4, fallback to SW cursor in the following case: 297 - * 1. If the config is a candidate for SubVP high refresh (both single an dual display configs) 298 - * 2. If not subvp high refresh, for single display cases, if resolution is >= 5K and refresh rate < 120hz 299 - * 3. If not subvp high refresh, for multi display cases, if resolution is >= 4K and refresh rate < 120hz 292 + /* SubVP is not compatible with HW cursor larger than what can fit in cursor SRAM. 293 + * Therefore, if cursor is greater than this, fallback to SW cursor. 300 294 */ 301 - if (dc->debug.allow_sw_cursor_fallback && 302 - attributes->height * attributes->width * 4 > 16384 && 303 - !stream->hw_cursor_req) { 304 - if (check_subvp_sw_cursor_fallback_req(dc, stream)) 295 + if (dc->debug.allow_sw_cursor_fallback && dc->res_pool->funcs->get_max_hw_cursor_size) { 296 + max_cursor_size = dc->res_pool->funcs->get_max_hw_cursor_size(dc, state, stream); 297 + max_cursor_size = max_cursor_size * max_cursor_size * 4; 298 + 299 + if (attributes->height * attributes->width * 4 > max_cursor_size) { 305 300 return false; 301 + } 306 302 } 307 303 308 - stream->cursor_attributes = *attributes; 309 - 310 304 return true; 305 + } 306 + 307 + /* 308 + * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address 309 + */ 310 + bool dc_stream_set_cursor_attributes( 311 + struct dc_stream_state *stream, 312 + const struct dc_cursor_attributes *attributes) 313 + { 314 + bool result = false; 315 + 316 + if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) { 317 + stream->cursor_attributes = *attributes; 318 + result = true; 319 + } 320 + 321 + return result; 311 322 } 312 323 313 324 bool dc_stream_program_cursor_attributes( ··· 1125 1108 return 0; 1126 1109 1127 1110 return dc_stream_get_max_flickerless_instant_vtotal_delta(stream, is_gaming, false); 1111 + } 1112 + 1113 + bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream) 1114 + { 1115 + bool is_limit_pending = false; 1116 + 1117 + if (dc->current_state) 1118 + is_limit_pending = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state); 1119 + 1120 + return is_limit_pending; 1121 + } 1122 + 1123 + bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream) 1124 + { 1125 + bool can_clear_limit = false; 1126 + 1127 + if (dc->current_state) 1128 + can_clear_limit = dc_state_get_stream_cursor_subvp_limit(stream, dc->current_state) && 1129 + (stream->hw_cursor_req || 1130 + !stream->cursor_position.enable || 1131 + dc_stream_check_cursor_attributes(stream, dc->current_state, &stream->cursor_attributes)); 1132 + 1133 + return can_clear_limit; 1128 1134 }
+4
drivers/gpu/drm/amd/display/dc/dc.h
··· 249 249 uint32_t i2c_speed_in_khz_hdcp; 250 250 uint32_t dmdata_alloc_size; 251 251 unsigned int max_cursor_size; 252 + unsigned int max_buffered_cursor_size; 252 253 unsigned int max_video_width; 253 254 /* 254 255 * max video plane width that can be safely assumed to be always ··· 2608 2607 2609 2608 bool dc_is_timing_changed(struct dc_stream_state *cur_stream, 2610 2609 struct dc_stream_state *new_stream); 2610 + 2611 + bool dc_is_cursor_limit_pending(struct dc *dc); 2612 + bool dc_can_clear_cursor_limit(struct dc *dc); 2611 2613 2612 2614 #endif /* DC_INTERFACE_H_ */
+20
drivers/gpu/drm/amd/display/dc/dc_state_priv.h
··· 105 105 const struct dc *dc, 106 106 const struct dc_state *state); 107 107 108 + 109 + void dc_state_set_stream_subvp_cursor_limit(const struct dc_stream_state *stream, 110 + struct dc_state *state, 111 + bool limit); 112 + 113 + bool dc_state_get_stream_subvp_cursor_limit(const struct dc_stream_state *stream, 114 + struct dc_state *state); 115 + 116 + void dc_state_set_stream_cursor_subvp_limit(const struct dc_stream_state *stream, 117 + struct dc_state *state, 118 + bool limit); 119 + 120 + bool dc_state_get_stream_cursor_subvp_limit(const struct dc_stream_state *stream, 121 + struct dc_state *state); 122 + 123 + bool dc_state_can_clear_stream_cursor_subvp_limit(const struct dc_stream_state *stream, 124 + struct dc_state *state); 125 + 126 + bool dc_state_is_subvp_in_use(struct dc_state *state); 127 + 108 128 #endif /* _DC_STATE_PRIV_H_ */
+10
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 44 44 */ 45 45 enum mall_stream_type type; 46 46 struct dc_stream_state *paired_stream; // master / slave stream 47 + bool subvp_limit_cursor_size; /* stream has/is using subvp limiting hw cursor support */ 48 + bool cursor_size_limit_subvp; /* stream is using hw cursor config preventing subvp */ 47 49 }; 48 50 49 51 struct dc_stream_status { ··· 505 503 struct dc *dc, 506 504 struct dc_stream_state *stream); 507 505 506 + bool dc_stream_check_cursor_attributes( 507 + const struct dc_stream_state *stream, 508 + struct dc_state *state, 509 + const struct dc_cursor_attributes *attributes); 510 + 508 511 bool dc_stream_set_cursor_attributes( 509 512 struct dc_stream_state *stream, 510 513 const struct dc_cursor_attributes *attributes); ··· 586 579 struct dc_stream_state *stream, 587 580 struct dc_surface_update *srf_updates, 588 581 struct dc_state *context); 582 + 583 + bool dc_stream_is_cursor_limit_pending(struct dc *dc, struct dc_stream_state *stream); 584 + bool dc_stream_can_clear_cursor_limit(struct dc *dc, struct dc_stream_state *stream); 589 585 590 586 #endif /* DC_STREAM_H_ */
+1
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
··· 627 627 */ 628 628 if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) && 629 629 !pipe->stream->hw_cursor_req && 630 + !dc_state_get_stream_cursor_subvp_limit(pipe->stream, context) && 630 631 !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && 631 632 (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && 632 633 dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
+6 -3
drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
··· 526 526 527 527 static void populate_dml21_stream_overrides_from_stream_state( 528 528 struct dml2_stream_parameters *stream_desc, 529 - struct dc_stream_state *stream) 529 + struct dc_stream_state *stream, 530 + struct dc_stream_status *stream_status) 530 531 { 531 532 switch (stream->debug.force_odm_combine_segments) { 532 533 case 0: ··· 552 551 if (!stream->ctx->dc->debug.enable_single_display_2to1_odm_policy || 553 552 stream->debug.force_odm_combine_segments > 0) 554 553 stream_desc->overrides.disable_dynamic_odm = true; 555 - stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || stream->hw_cursor_req; 554 + stream_desc->overrides.disable_subvp = stream->ctx->dc->debug.force_disable_subvp || 555 + stream->hw_cursor_req || 556 + stream_status->mall_stream_config.cursor_size_limit_subvp; 556 557 } 557 558 558 559 static enum dml2_swizzle_mode gfx_addr3_to_dml2_swizzle_mode(enum swizzle_mode_addr3_values addr3_mode) ··· 1027 1024 populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx); 1028 1025 adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]); 1029 1026 populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]); 1030 - populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]); 1027 + populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]); 1031 1028 1032 1029 dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.fclk_pstate = dml2_twait_budgeting_setting_if_needed; 1033 1030 dml_dispcfg->stream_descriptors[disp_cfg_stream_location].overrides.hw.twait_budgeting.uclk_pstate = dml2_twait_budgeting_setting_if_needed;
+1 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
··· 2482 2482 struct dce_hwseq *hws = dc->hwseq; 2483 2483 2484 2484 /* recalculate DML parameters */ 2485 - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) 2485 + if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) 2486 2486 return false; 2487 2487 2488 2488 /* apply updated bandwidth parameters */
+1 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
··· 2651 2651 struct dce_hwseq *hws = dc->hwseq; 2652 2652 2653 2653 /* recalculate DML parameters */ 2654 - if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) 2654 + if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) 2655 2655 return false; 2656 2656 2657 2657 /* apply updated bandwidth parameters */
+3
drivers/gpu/drm/amd/display/dc/inc/core_status.h
··· 26 26 #ifndef _CORE_STATUS_H_ 27 27 #define _CORE_STATUS_H_ 28 28 29 + #include "dc_hw_types.h" 30 + 29 31 enum dc_status { 30 32 DC_OK = 1, 31 33 ··· 58 56 DC_NO_LINK_ENC_RESOURCE = 26, 59 57 DC_FAIL_DP_PAYLOAD_ALLOCATION = 27, 60 58 DC_FAIL_DP_LINK_BANDWIDTH = 28, 59 + DC_FAIL_HW_CURSOR_SUPPORT = 29, 61 60 DC_ERROR_UNEXPECTED = -1 62 61 }; 63 62
+4 -1
drivers/gpu/drm/amd/display/dc/inc/core_types.h
··· 78 78 /* Create a minimal link encoder object with no dc_link object 79 79 * associated with it. */ 80 80 struct link_encoder *(*link_enc_create_minimal)(struct dc_context *ctx, enum engine_id eng_id); 81 - bool (*validate_bandwidth)( 81 + enum dc_status (*validate_bandwidth)( 82 82 struct dc *dc, 83 83 struct dc_state *context, 84 84 bool fast_validate); ··· 217 217 int (*get_power_profile)(const struct dc_state *context); 218 218 unsigned int (*get_det_buffer_size)(const struct dc_state *context); 219 219 unsigned int (*get_vstartup_for_pipe)(struct pipe_ctx *pipe_ctx); 220 + unsigned int (*get_max_hw_cursor_size)(const struct dc *dc, 221 + struct dc_state *state, 222 + const struct dc_stream_state *stream); 220 223 }; 221 224 222 225 struct audio_support{
-2
drivers/gpu/drm/amd/display/dc/inc/resource.h
··· 628 628 struct dc_state *context, 629 629 struct pipe_ctx *pipe_ctx); 630 630 631 - bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream); 632 - 633 631 /* Get hw programming parameters container from pipe context 634 632 * @pipe_ctx: pipe context 635 633 * @dscl_prog_data: struct to hold programmable hw reg values
+2 -2
drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
··· 836 836 return DC_OK; 837 837 } 838 838 839 - static bool dce100_validate_bandwidth( 839 + static enum dc_status dce100_validate_bandwidth( 840 840 struct dc *dc, 841 841 struct dc_state *context, 842 842 bool fast_validate) ··· 858 858 context->bw_ctx.bw.dce.yclk_khz = 0; 859 859 } 860 860 861 - return true; 861 + return DC_OK; 862 862 } 863 863 864 864 static bool dce100_validate_surface_sets(
+2 -2
drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
··· 960 960 return DC_OK; 961 961 } 962 962 963 - static bool dce110_validate_bandwidth( 963 + static enum dc_status dce110_validate_bandwidth( 964 964 struct dc *dc, 965 965 struct dc_state *context, 966 966 bool fast_validate) ··· 1031 1031 context->bw_ctx.bw.dce.yclk_khz, 1032 1032 context->bw_ctx.bw.dce.blackout_recovery_time_us); 1033 1033 } 1034 - return result; 1034 + return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1035 1035 } 1036 1036 1037 1037 static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
+2 -2
drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
··· 883 883 return DC_OK; 884 884 } 885 885 886 - bool dce112_validate_bandwidth( 886 + enum dc_status dce112_validate_bandwidth( 887 887 struct dc *dc, 888 888 struct dc_state *context, 889 889 bool fast_validate) ··· 952 952 context->bw_ctx.bw.dce.yclk_khz, 953 953 context->bw_ctx.bw.dce.blackout_recovery_time_us); 954 954 } 955 - return result; 955 + return result ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 956 956 } 957 957 958 958 enum dc_status resource_map_phy_clock_resources(
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
··· 42 42 struct dc_state *context, 43 43 struct dc_state *old_context); 44 44 45 - bool dce112_validate_bandwidth( 45 + enum dc_status dce112_validate_bandwidth( 46 46 struct dc *dc, 47 47 struct dc_state *context, 48 48 bool fast_validate);
+2 -2
drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
··· 863 863 } 864 864 } 865 865 866 - static bool dce60_validate_bandwidth( 866 + static enum dc_status dce60_validate_bandwidth( 867 867 struct dc *dc, 868 868 struct dc_state *context, 869 869 bool fast_validate) ··· 885 885 context->bw_ctx.bw.dce.yclk_khz = 0; 886 886 } 887 887 888 - return true; 888 + return DC_OK; 889 889 } 890 890 891 891 static bool dce60_validate_surface_sets(
+2 -2
drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
··· 869 869 } 870 870 } 871 871 872 - static bool dce80_validate_bandwidth( 872 + static enum dc_status dce80_validate_bandwidth( 873 873 struct dc *dc, 874 874 struct dc_state *context, 875 875 bool fast_validate) ··· 891 891 context->bw_ctx.bw.dce.yclk_khz = 0; 892 892 } 893 893 894 - return true; 894 + return DC_OK; 895 895 } 896 896 897 897 static bool dce80_validate_surface_sets(
+3 -2
drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
··· 23 23 * 24 24 */ 25 25 26 + #include "core_status.h" 26 27 #include "dm_services.h" 27 28 #include "dc.h" 28 29 ··· 1126 1125 *pool = NULL; 1127 1126 } 1128 1127 1129 - static bool dcn10_validate_bandwidth( 1128 + static enum dc_status dcn10_validate_bandwidth( 1130 1129 struct dc *dc, 1131 1130 struct dc_state *context, 1132 1131 bool fast_validate) ··· 1137 1136 voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate); 1138 1137 DC_FP_END(); 1139 1138 1140 - return voltage_supported; 1139 + return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1141 1140 } 1142 1141 1143 1142 static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
+3 -3
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
··· 2124 2124 return out; 2125 2125 } 2126 2126 2127 - bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, 2127 + enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, 2128 2128 bool fast_validate) 2129 2129 { 2130 2130 bool voltage_supported; ··· 2132 2132 2133 2133 pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL); 2134 2134 if (!pipes) 2135 - return false; 2135 + return DC_FAIL_BANDWIDTH_VALIDATE; 2136 2136 2137 2137 DC_FP_START(); 2138 2138 voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes); 2139 2139 DC_FP_END(); 2140 2140 2141 2141 kfree(pipes); 2142 - return voltage_supported; 2142 + return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 2143 2143 } 2144 2144 2145 2145 struct pipe_ctx *dcn20_acquire_free_pipe_for_layer(
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
··· 119 119 struct dc_state *context, 120 120 display_e2e_pipe_params_st *pipes, 121 121 int pipe_cnt); 122 - bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); 122 + enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); 123 123 void dcn20_merge_pipes_for_validate( 124 124 struct dc *dc, 125 125 struct dc_state *context);
+3 -3
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
··· 923 923 * with DC_FP_START()/DC_FP_END(). Use the same approach as for 924 924 * dcn20_validate_bandwidth in dcn20_resource.c. 925 925 */ 926 - static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, 926 + static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, 927 927 bool fast_validate) 928 928 { 929 929 bool voltage_supported; ··· 931 931 932 932 pipes = kcalloc(dc->res_pool->pipe_count, sizeof(display_e2e_pipe_params_st), GFP_KERNEL); 933 933 if (!pipes) 934 - return false; 934 + return DC_FAIL_BANDWIDTH_VALIDATE; 935 935 936 936 DC_FP_START(); 937 937 voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes); 938 938 DC_FP_END(); 939 939 940 940 kfree(pipes); 941 - return voltage_supported; 941 + return voltage_supported ? DC_OK : DC_NOT_SUPPORTED; 942 942 } 943 943 944 944 static void dcn21_destroy_resource_pool(struct resource_pool **pool)
+2 -2
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
··· 2035 2035 DC_FP_END(); 2036 2036 } 2037 2037 2038 - bool dcn30_validate_bandwidth(struct dc *dc, 2038 + enum dc_status dcn30_validate_bandwidth(struct dc *dc, 2039 2039 struct dc_state *context, 2040 2040 bool fast_validate) 2041 2041 { ··· 2092 2092 2093 2093 BW_VAL_TRACE_FINISH(); 2094 2094 2095 - return out; 2095 + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 2096 2096 } 2097 2097 2098 2098 void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
··· 56 56 enum mmhubbub_wbif_mode mode, 57 57 unsigned int urgent_watermark); 58 58 59 - bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, 59 + enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, 60 60 bool fast_validate); 61 61 bool dcn30_internal_validate_bw( 62 62 struct dc *dc,
+2 -2
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
··· 1758 1758 DC_FP_END(); 1759 1759 } 1760 1760 1761 - bool dcn31_validate_bandwidth(struct dc *dc, 1761 + enum dc_status dcn31_validate_bandwidth(struct dc *dc, 1762 1762 struct dc_state *context, 1763 1763 bool fast_validate) 1764 1764 { ··· 1813 1813 1814 1814 BW_VAL_TRACE_FINISH(); 1815 1815 1816 - return out; 1816 + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1817 1817 } 1818 1818 1819 1819 static void dcn31_get_panel_config_defaults(struct dc_panel_config *panel_config)
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
··· 37 37 struct resource_pool base; 38 38 }; 39 39 40 - bool dcn31_validate_bandwidth(struct dc *dc, 40 + enum dc_status dcn31_validate_bandwidth(struct dc *dc, 41 41 struct dc_state *context, 42 42 bool fast_validate); 43 43 void dcn31_calculate_wm_and_dlg(
+2 -2
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
··· 1694 1694 *panel_config = panel_config_defaults; 1695 1695 } 1696 1696 1697 - bool dcn314_validate_bandwidth(struct dc *dc, 1697 + enum dc_status dcn314_validate_bandwidth(struct dc *dc, 1698 1698 struct dc_state *context, 1699 1699 bool fast_validate) 1700 1700 { ··· 1750 1750 1751 1751 BW_VAL_TRACE_FINISH(); 1752 1752 1753 - return out; 1753 + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1754 1754 } 1755 1755 1756 1756 static struct resource_funcs dcn314_res_pool_funcs = {
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
··· 39 39 struct resource_pool base; 40 40 }; 41 41 42 - bool dcn314_validate_bandwidth(struct dc *dc, 42 + enum dc_status dcn314_validate_bandwidth(struct dc *dc, 43 43 struct dc_state *context, 44 44 bool fast_validate); 45 45
+59 -7
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
··· 24 24 * 25 25 */ 26 26 27 + #include "dc_types.h" 27 28 #include "dm_services.h" 28 29 #include "dc.h" 29 30 ··· 1807 1806 return out; 1808 1807 } 1809 1808 1810 - bool dcn32_validate_bandwidth(struct dc *dc, 1809 + enum dc_status dcn32_validate_bandwidth(struct dc *dc, 1811 1810 struct dc_state *context, 1812 1811 bool fast_validate) 1813 1812 { 1814 - bool out = false; 1813 + unsigned int i; 1814 + enum dc_status status; 1815 + const struct dc_stream_state *stream; 1816 + 1817 + /* reset cursor limitations on subvp */ 1818 + for (i = 0; i < context->stream_count; i++) { 1819 + stream = context->streams[i]; 1820 + 1821 + if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) { 1822 + dc_state_set_stream_cursor_subvp_limit(stream, context, false); 1823 + } 1824 + } 1815 1825 1816 1826 if (dc->debug.using_dml2) 1817 - out = dml2_validate(dc, context, 1827 + status = dml2_validate(dc, context, 1818 1828 context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, 1819 - fast_validate); 1829 + fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1820 1830 else 1821 - out = dml1_validate(dc, context, fast_validate); 1822 - return out; 1831 + status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1832 + 1833 + if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) { 1834 + /* check new stream configuration still supports cursor if subvp used */ 1835 + for (i = 0; i < context->stream_count; i++) { 1836 + stream = context->streams[i]; 1837 + 1838 + if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM && 1839 + stream->cursor_position.enable && 1840 + !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) { 1841 + /* hw cursor cannot be supported with subvp active, so disable subvp for now */ 1842 + dc_state_set_stream_cursor_subvp_limit(stream, context, true); 1843 + status = DC_FAIL_HW_CURSOR_SUPPORT; 1844 + } 1845 + }; 1846 + } 1847 + 1848 + if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) { 1849 + /* attempt to validate again with subvp disabled due to cursor */ 1850 + if (dc->debug.using_dml2) 1851 + status = dml2_validate(dc, context, 1852 + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, 1853 + fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1854 + else 1855 + status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1856 + } 1857 + 1858 + return status; 1823 1859 } 1824 1860 1825 1861 int dcn32_populate_dml_pipes_from_context( ··· 2080 2042 DC_FP_END(); 2081 2043 } 2082 2044 2045 + unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc, 2046 + struct dc_state *state, 2047 + const struct dc_stream_state *stream) 2048 + { 2049 + bool limit_cur_to_buf; 2050 + 2051 + limit_cur_to_buf = dc_state_get_stream_subvp_cursor_limit(stream, state) && 2052 + !stream->hw_cursor_req; 2053 + 2054 + return limit_cur_to_buf ? dc->caps.max_buffered_cursor_size : dc->caps.max_cursor_size; 2055 + } 2056 + 2083 2057 static struct resource_funcs dcn32_res_pool_funcs = { 2084 2058 .destroy = dcn32_destroy_resource_pool, 2085 2059 .link_enc_create = dcn32_link_encoder_create, ··· 2117 2067 .add_phantom_pipes = dcn32_add_phantom_pipes, 2118 2068 .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, 2119 2069 .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, 2120 - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe 2070 + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, 2071 + .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size, 2121 2072 }; 2122 2073 2123 2074 static uint32_t read_pipe_fuses(struct dc_context *ctx) ··· 2202 2151 dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ 2203 2152 /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/ 2204 2153 dc->caps.max_cursor_size = 64; 2154 + dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4) 2205 2155 dc->caps.min_horizontal_blanking_period = 80; 2206 2156 dc->caps.dmdata_alloc_size = 2048; 2207 2157 dc->caps.mall_size_per_mem_channel = 4;
+5 -1
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
··· 98 98 unsigned int pipe_cnt, 99 99 unsigned int index); 100 100 101 - bool dcn32_validate_bandwidth(struct dc *dc, 101 + enum dc_status dcn32_validate_bandwidth(struct dc *dc, 102 102 struct dc_state *context, 103 103 bool fast_validate); 104 104 ··· 187 187 void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context); 188 188 189 189 unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes); 190 + 191 + unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc, 192 + struct dc_state *state, 193 + const struct dc_stream_state *stream); 190 194 191 195 /* definitions for run time init of reg offsets */ 192 196
+3 -1
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
··· 1624 1624 .add_phantom_pipes = dcn32_add_phantom_pipes, 1625 1625 .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, 1626 1626 .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, 1627 - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe 1627 + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, 1628 + .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size, 1628 1629 }; 1629 1630 1630 1631 static uint32_t read_pipe_fuses(struct dc_context *ctx) ··· 1710 1709 dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/ 1711 1710 /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/ 1712 1711 dc->caps.max_cursor_size = 64; 1712 + dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4) 1713 1713 dc->caps.min_horizontal_blanking_period = 80; 1714 1714 dc->caps.dmdata_alloc_size = 2048; 1715 1715 dc->caps.mall_size_per_mem_channel = 4;
+3 -3
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
··· 1732 1732 } 1733 1733 1734 1734 1735 - static bool dcn35_validate_bandwidth(struct dc *dc, 1735 + static enum dc_status dcn35_validate_bandwidth(struct dc *dc, 1736 1736 struct dc_state *context, 1737 1737 bool fast_validate) 1738 1738 { ··· 1743 1743 fast_validate); 1744 1744 1745 1745 if (fast_validate) 1746 - return out; 1746 + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1747 1747 1748 1748 DC_FP_START(); 1749 1749 dcn35_decide_zstate_support(dc, context); 1750 1750 DC_FP_END(); 1751 1751 1752 - return out; 1752 + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1753 1753 } 1754 1754 1755 1755 enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_state)
+3 -3
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
··· 1712 1712 } 1713 1713 1714 1714 1715 - static bool dcn351_validate_bandwidth(struct dc *dc, 1715 + static enum dc_status dcn351_validate_bandwidth(struct dc *dc, 1716 1716 struct dc_state *context, 1717 1717 bool fast_validate) 1718 1718 { ··· 1723 1723 fast_validate); 1724 1724 1725 1725 if (fast_validate) 1726 - return out; 1726 + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1727 1727 1728 1728 DC_FP_START(); 1729 1729 dcn35_decide_zstate_support(dc, context); 1730 1730 DC_FP_END(); 1731 1731 1732 - return out; 1732 + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1733 1733 } 1734 1734 1735 1735 static struct resource_funcs dcn351_res_pool_funcs = {
+3 -3
drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
··· 1713 1713 } 1714 1714 1715 1715 1716 - static bool dcn35_validate_bandwidth(struct dc *dc, 1716 + static enum dc_status dcn35_validate_bandwidth(struct dc *dc, 1717 1717 struct dc_state *context, 1718 1718 bool fast_validate) 1719 1719 { ··· 1724 1724 fast_validate); 1725 1725 1726 1726 if (fast_validate) 1727 - return out; 1727 + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1728 1728 1729 1729 DC_FP_START(); 1730 1730 dcn35_decide_zstate_support(dc, context); 1731 1731 DC_FP_END(); 1732 1732 1733 - return out; 1733 + return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1734 1734 } 1735 1735 1736 1736
+45 -7
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
··· 1642 1642 return DC_OK; 1643 1643 } 1644 1644 1645 - bool dcn401_validate_bandwidth(struct dc *dc, 1645 + enum dc_status dcn401_validate_bandwidth(struct dc *dc, 1646 1646 struct dc_state *context, 1647 1647 bool fast_validate) 1648 1648 { 1649 - bool out = false; 1649 + unsigned int i; 1650 + enum dc_status status = DC_OK; 1651 + const struct dc_stream_state *stream; 1652 + 1653 + /* reset cursor limitations on subvp */ 1654 + for (i = 0; i < context->stream_count; i++) { 1655 + stream = context->streams[i]; 1656 + 1657 + if (dc_state_can_clear_stream_cursor_subvp_limit(stream, context)) { 1658 + dc_state_set_stream_cursor_subvp_limit(stream, context, false); 1659 + } 1660 + } 1661 + 1650 1662 if (dc->debug.using_dml2) 1651 - out = dml2_validate(dc, context, 1663 + status = dml2_validate(dc, context, 1652 1664 context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, 1653 - fast_validate); 1654 - return out; 1665 + fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1666 + 1667 + if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) { 1668 + /* check new stream configuration still supports cursor if subvp used */ 1669 + for (i = 0; i < context->stream_count; i++) { 1670 + stream = context->streams[i]; 1671 + 1672 + if (dc_state_get_stream_subvp_type(context, stream) != SUBVP_PHANTOM && 1673 + stream->cursor_position.enable && 1674 + !dc_stream_check_cursor_attributes(stream, context, &stream->cursor_attributes)) { 1675 + /* hw cursor cannot be supported with subvp active, so disable subvp for now */ 1676 + dc_state_set_stream_cursor_subvp_limit(stream, context, true); 1677 + status = DC_FAIL_HW_CURSOR_SUPPORT; 1678 + } 1679 + }; 1680 + } 1681 + 1682 + if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) { 1683 + /* attempt to validate again with subvp disabled due to cursor */ 1684 + if (dc->debug.using_dml2) 1685 + status = dml2_validate(dc, context, 1686 + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, 1687 + fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1688 + } 1689 + 1690 + return status; 1655 1691 } 1656 1692 1657 1693 void dcn401_prepare_mcache_programming(struct dc *dc, ··· 1806 1770 .build_pipe_pix_clk_params = dcn401_build_pipe_pix_clk_params, 1807 1771 .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, 1808 1772 .get_power_profile = dcn401_get_power_profile, 1809 - .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe 1773 + .get_vstartup_for_pipe = dcn401_get_vstartup_for_pipe, 1774 + .get_max_hw_cursor_size = dcn32_get_max_hw_cursor_size 1810 1775 }; 1811 1776 1812 1777 static uint32_t read_pipe_fuses(struct dc_context *ctx) ··· 1883 1846 dc->caps.max_downscale_ratio = 600; 1884 1847 dc->caps.i2c_speed_in_khz = 95; 1885 1848 dc->caps.i2c_speed_in_khz_hdcp = 95; /*1.4 w/a applied by default*/ 1886 - /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/ 1849 + /* used to set cursor pitch, so must be aligned to power of 2 (HW actually supported 78x78) */ 1887 1850 dc->caps.max_cursor_size = 64; 1851 + dc->caps.max_buffered_cursor_size = 64; 1888 1852 dc->caps.cursor_not_scaled = true; 1889 1853 dc->caps.min_horizontal_blanking_period = 80; 1890 1854 dc->caps.dmdata_alloc_size = 2048;
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
··· 22 22 23 23 enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state); 24 24 25 - bool dcn401_validate_bandwidth(struct dc *dc, 25 + enum dc_status dcn401_validate_bandwidth(struct dc *dc, 26 26 struct dc_state *context, 27 27 bool fast_validate); 28 28