Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/display: Refactor dc_state interface

[WHY?]
Part of the dc_state interface that deals with adding streams and planes should
remain public, while others that deal with internal status' and subvp should be
private to DC.

[HOW?]
Move and rename the public functions to dc_state.h and private functions to
dc_state_priv.h. Also add some additional functions for extracting subvp meta
data from the state.

Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Reviewed-by: Jun Lei <jun.lei@amd.com>
Acked-by: Wayne Lin <wayne.lin@amd.com>
Signed-off-by: Dillon Varone <dillon.varone@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Dillon Varone and committed by
Alex Deucher
09a4ec5d e5ffd126

+1171 -706
+23 -22
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 37 37 #include "dc/dc_dmub_srv.h" 38 38 #include "dc/dc_edid_parser.h" 39 39 #include "dc/dc_stat.h" 40 + #include "dc/dc_state.h" 40 41 #include "amdgpu_dm_trace.h" 41 42 #include "dpcd_defs.h" 42 43 #include "link/protocols/link_dpcd.h" ··· 2608 2607 2609 2608 memset(del_streams, 0, sizeof(del_streams)); 2610 2609 2611 - context = dc_create_state(dc); 2610 + context = dc_state_create(dc); 2612 2611 if (context == NULL) 2613 2612 goto context_alloc_fail; 2614 2613 ··· 2623 2622 2624 2623 /* Remove all planes for removed streams and then remove the streams */ 2625 2624 for (i = 0; i < del_streams_count; i++) { 2626 - if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { 2625 + if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) { 2627 2626 res = DC_FAIL_DETACH_SURFACES; 2628 2627 goto fail; 2629 2628 } 2630 2629 2631 - res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); 2630 + res = dc_state_remove_stream(dc, context, del_streams[i]); 2632 2631 if (res != DC_OK) 2633 2632 goto fail; 2634 2633 } ··· 2636 2635 res = dc_commit_streams(dc, context->streams, context->stream_count); 2637 2636 2638 2637 fail: 2639 - dc_release_state(context); 2638 + dc_state_release(context); 2640 2639 2641 2640 context_alloc_fail: 2642 2641 return res; ··· 2663 2662 2664 2663 dc_allow_idle_optimizations(adev->dm.dc, false); 2665 2664 2666 - dm->cached_dc_state = dc_copy_state(dm->dc->current_state); 2665 + dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); 2667 2666 2668 2667 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); 2669 2668 ··· 2910 2909 2911 2910 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); 2912 2911 2913 - dc_release_state(dm->cached_dc_state); 2912 + dc_state_release(dm->cached_dc_state); 2914 2913 dm->cached_dc_state = NULL; 2915 2914 2916 2915 amdgpu_dm_irq_resume_late(adev); ··· 2920 2919 return 0; 2921 2920 } 2922 2921 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 2923 - dc_release_state(dm_state->context); 2924 - dm_state->context = dc_create_state(dm->dc); 2922 + dc_state_release(dm_state->context); 2923 + dm_state->context = dc_state_create(dm->dc); 2925 2924 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 2926 2925 dc_resource_state_construct(dm->dc, dm_state->context); 2927 2926 ··· 3999 3998 old_state = to_dm_atomic_state(obj->state); 4000 3999 4001 4000 if (old_state && old_state->context) 4002 - new_state->context = dc_copy_state(old_state->context); 4001 + new_state->context = dc_state_create_copy(old_state->context); 4003 4002 4004 4003 if (!new_state->context) { 4005 4004 kfree(new_state); ··· 4015 4014 struct dm_atomic_state *dm_state = to_dm_atomic_state(state); 4016 4015 4017 4016 if (dm_state && dm_state->context) 4018 - dc_release_state(dm_state->context); 4017 + dc_state_release(dm_state->context); 4019 4018 4020 4019 kfree(dm_state); 4021 4020 } ··· 4051 4050 if (!state) 4052 4051 return -ENOMEM; 4053 4052 4054 - state->context = dc_create_state(adev->dm.dc); 4053 + state->context = dc_state_create(adev->dm.dc); 4055 4054 if (!state->context) { 4056 4055 kfree(state); 4057 4056 return -ENOMEM; ··· 4066 4065 4067 4066 r = amdgpu_display_modeset_create_props(adev); 4068 4067 if (r) { 4069 - dc_release_state(state->context); 4068 + dc_state_release(state->context); 4070 4069 kfree(state); 4071 4070 return r; 4072 4071 } ··· 4078 4077 4079 4078 r = amdgpu_dm_audio_init(adev); 4080 4079 if (r) { 4081 - dc_release_state(state->context); 4080 + dc_state_release(state->context); 4082 4081 kfree(state); 4083 4082 return r; 4084 4083 } ··· 6659 6658 if (!dc_plane_state) 6660 6659 goto cleanup; 6661 6660 6662 - dc_state = dc_create_state(dc); 6661 + dc_state = dc_state_create(dc); 6663 6662 if (!dc_state) 6664 6663 goto cleanup; 6665 6664 ··· 6686 6685 dc_result = dc_validate_plane(dc, dc_plane_state); 6687 6686 6688 6687 if (dc_result == DC_OK) 6689 - dc_result = dc_add_stream_to_ctx(dc, dc_state, stream); 6688 + dc_result = dc_state_add_stream(dc, dc_state, stream); 6690 6689 6691 - if (dc_result == DC_OK && !dc_add_plane_to_context( 6690 + if (dc_result == DC_OK && !dc_state_add_plane( 6692 6691 dc, 6693 6692 stream, 6694 6693 dc_plane_state, ··· 6700 6699 6701 6700 cleanup: 6702 6701 if (dc_state) 6703 - dc_release_state(dc_state); 6702 + dc_state_release(dc_state); 6704 6703 6705 6704 if (dc_plane_state) 6706 6705 dc_plane_state_release(dc_plane_state); ··· 8859 8858 dc_stream_get_status(dm_new_crtc_state->stream); 8860 8859 8861 8860 if (!status) 8862 - status = dc_stream_get_status_from_state(dc_state, 8861 + status = dc_state_get_stream_status(dc_state, 8863 8862 dm_new_crtc_state->stream); 8864 8863 if (!status) 8865 8864 drm_err(dev, ··· 9784 9783 crtc->base.id); 9785 9784 9786 9785 /* i.e. reset mode */ 9787 - if (dc_remove_stream_from_ctx( 9786 + if (dc_state_remove_stream( 9788 9787 dm->dc, 9789 9788 dm_state->context, 9790 9789 dm_old_crtc_state->stream) != DC_OK) { ··· 9827 9826 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", 9828 9827 crtc->base.id); 9829 9828 9830 - if (dc_add_stream_to_ctx( 9829 + if (dc_state_add_stream( 9831 9830 dm->dc, 9832 9831 dm_state->context, 9833 9832 dm_new_crtc_state->stream) != DC_OK) { ··· 10149 10148 if (ret) 10150 10149 return ret; 10151 10150 10152 - if (!dc_remove_plane_from_context( 10151 + if (!dc_state_remove_plane( 10153 10152 dc, 10154 10153 dm_old_crtc_state->stream, 10155 10154 dm_old_plane_state->dc_state, ··· 10227 10226 * state. It'll be released when the atomic state is 10228 10227 * cleaned. 10229 10228 */ 10230 - if (!dc_add_plane_to_context( 10229 + if (!dc_state_add_plane( 10231 10230 dc, 10232 10231 dm_new_crtc_state->stream, 10233 10232 dc_new_plane_state,
+1 -1
drivers/gpu/drm/amd/display/dc/Makefile
··· 62 62 include $(AMD_DC) 63 63 64 64 DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ 65 - dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o 65 + dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o 66 66 67 67 DISPLAY_CORE += dc_vm_helper.o 68 68
+2 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
··· 29 29 #include "dc_types.h" 30 30 #include "dccg.h" 31 31 #include "clk_mgr_internal.h" 32 + #include "dc_state_priv.h" 32 33 #include "link.h" 33 34 34 35 #include "dce100/dce_clk_mgr.h" ··· 64 63 /* Don't count SubVP phantom pipes as part of active 65 64 * display count 66 65 */ 67 - if (stream->mall_stream_config.type == SUBVP_PHANTOM) 66 + if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) 68 67 continue; 69 68 70 69 /*
+39 -132
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 34 34 #include "dce/dce_hwseq.h" 35 35 36 36 #include "resource.h" 37 + #include "dc_state.h" 38 + #include "dc_state_priv.h" 37 39 38 40 #include "gpio_service_interface.h" 39 41 #include "clk_mgr.h" ··· 813 811 link_enc_cfg_init(dc, dc->current_state); 814 812 815 813 if (dc->current_state) { 816 - dc_release_state(dc->current_state); 814 + dc_state_release(dc->current_state); 817 815 dc->current_state = NULL; 818 816 } 819 817 ··· 1030 1028 * on creation it copies the contents of dc->dml 1031 1029 */ 1032 1030 1033 - dc->current_state = dc_create_state(dc); 1031 + dc->current_state = dc_state_create(dc); 1034 1032 1035 1033 if (!dc->current_state) { 1036 1034 dm_error("%s: failed to create validate ctx\n", __func__); ··· 1120 1118 static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 1121 1119 { 1122 1120 int i, j; 1123 - struct dc_state *dangling_context = dc_create_state(dc); 1121 + struct dc_state *dangling_context = dc_state_create(dc); 1124 1122 struct dc_state *current_ctx; 1125 1123 struct pipe_ctx *pipe; 1126 1124 struct timing_generator *tg; ··· 1166 1164 } 1167 1165 1168 1166 if (should_disable && old_stream) { 1167 + bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM; 1169 1168 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1170 1169 tg = pipe->stream_res.tg; 1171 1170 /* When disabling plane for a phantom pipe, we must turn on the ··· 1175 1172 * state that can result in underflow or hang when enabling it 1176 1173 * again for different use. 1177 1174 */ 1178 - if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { 1175 + if (is_phantom) { 1179 1176 if (tg->funcs->enable_crtc) { 1180 1177 int main_pipe_width, main_pipe_height; 1178 + struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream); 1181 1179 1182 - main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width; 1183 - main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height; 1180 + main_pipe_width = old_paired_stream->dst.width; 1181 + main_pipe_height = old_paired_stream->dst.height; 1184 1182 if (dc->hwss.blank_phantom) 1185 1183 dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); 1186 1184 tg->funcs->enable_crtc(tg); 1187 1185 } 1188 1186 } 1189 - dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1187 + dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1190 1188 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 1191 1189 1192 1190 if (pipe->stream && pipe->plane_state) ··· 1210 1206 * The OTG is set to disable on falling edge of VUPDATE so the plane disable 1211 1207 * will still get it's double buffer update. 1212 1208 */ 1213 - if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) { 1209 + if (is_phantom) { 1214 1210 if (tg->funcs->disable_phantom_crtc) 1215 1211 tg->funcs->disable_phantom_crtc(tg); 1216 1212 } ··· 1219 1215 1220 1216 current_ctx = dc->current_state; 1221 1217 dc->current_state = dangling_context; 1222 - dc_release_state(current_ctx); 1218 + dc_state_release(current_ctx); 1223 1219 } 1224 1220 1225 1221 static void disable_vbios_mode_if_required( ··· 1291 1287 int count = 0; 1292 1288 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1293 1289 1294 - if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) 1290 + if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) 1295 1291 continue; 1296 1292 1297 1293 /* Timeout 100 ms */ ··· 1517 1513 } 1518 1514 1519 1515 for (k = 0; k < group_size; k++) { 1520 - struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); 1516 + struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream); 1521 1517 1522 1518 status->timing_sync_info.group_id = num_group; 1523 1519 status->timing_sync_info.group_size = group_size; ··· 1844 1840 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1845 1841 1846 1842 /* Check old context for SubVP */ 1847 - subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 1843 + subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); 1848 1844 if (subvp_prev_use) 1849 1845 break; 1850 1846 } ··· 2002 1998 old_state = dc->current_state; 2003 1999 dc->current_state = context; 2004 2000 2005 - dc_release_state(old_state); 2001 + dc_state_release(old_state); 2006 2002 2007 - dc_retain_state(dc->current_state); 2003 + dc_state_retain(dc->current_state); 2008 2004 2009 2005 return result; 2010 2006 } ··· 2075 2071 if (handle_exit_odm2to1) 2076 2072 res = commit_minimal_transition_state(dc, dc->current_state); 2077 2073 2078 - context = dc_create_state(dc); 2074 + context = dc_state_create(dc); 2079 2075 if (!context) 2080 2076 goto context_alloc_fail; 2081 2077 ··· 2095 2091 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; 2096 2092 2097 2093 if (dc_is_embedded_signal(streams[i]->signal)) { 2098 - struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]); 2094 + struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]); 2099 2095 2100 2096 if (dc->hwss.is_abm_supported) 2101 2097 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); ··· 2106 2102 } 2107 2103 2108 2104 fail: 2109 - dc_release_state(context); 2105 + dc_state_release(context); 2110 2106 2111 2107 context_alloc_fail: 2112 2108 ··· 2160 2156 pipe = &context->res_ctx.pipe_ctx[i]; 2161 2157 2162 2158 // Don't check flip pending on phantom pipes 2163 - if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)) 2159 + if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)) 2164 2160 continue; 2165 2161 2166 2162 /* Must set to false to start with, due to OR in update function */ ··· 2230 2226 } 2231 2227 2232 2228 dc->optimized_required = false; 2233 - } 2234 - 2235 - static void init_state(struct dc *dc, struct dc_state *context) 2236 - { 2237 - /* Each context must have their own instance of VBA and in order to 2238 - * initialize and obtain IP and SOC the base DML instance from DC is 2239 - * initially copied into every context 2240 - */ 2241 - memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); 2242 - } 2243 - 2244 - struct dc_state *dc_create_state(struct dc *dc) 2245 - { 2246 - struct dc_state *context = kvzalloc(sizeof(struct dc_state), 2247 - GFP_KERNEL); 2248 - 2249 - if (!context) 2250 - return NULL; 2251 - 2252 - init_state(dc, context); 2253 - 2254 - #ifdef CONFIG_DRM_AMD_DC_FP 2255 - if (dc->debug.using_dml2) { 2256 - dml2_create(dc, &dc->dml2_options, &context->bw_ctx.dml2); 2257 - } 2258 - #endif 2259 - kref_init(&context->refcount); 2260 - 2261 - return context; 2262 - } 2263 - 2264 - struct dc_state *dc_copy_state(struct dc_state *src_ctx) 2265 - { 2266 - int i, j; 2267 - struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); 2268 - 2269 - if (!new_ctx) 2270 - return NULL; 2271 - memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); 2272 - 2273 - #ifdef CONFIG_DRM_AMD_DC_FP 2274 - if (new_ctx->bw_ctx.dml2 && !dml2_create_copy(&new_ctx->bw_ctx.dml2, src_ctx->bw_ctx.dml2)) { 2275 - dc_release_state(new_ctx); 2276 - return NULL; 2277 - } 2278 - #endif 2279 - 2280 - for (i = 0; i < MAX_PIPES; i++) { 2281 - struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; 2282 - 2283 - if (cur_pipe->top_pipe) 2284 - cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; 2285 - 2286 - if (cur_pipe->bottom_pipe) 2287 - cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; 2288 - 2289 - if (cur_pipe->prev_odm_pipe) 2290 - cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; 2291 - 2292 - if (cur_pipe->next_odm_pipe) 2293 - cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; 2294 - 2295 - } 2296 - 2297 - for (i = 0; i < new_ctx->stream_count; i++) { 2298 - dc_stream_retain(new_ctx->streams[i]); 2299 - for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) 2300 - dc_plane_state_retain( 2301 - new_ctx->stream_status[i].plane_states[j]); 2302 - } 2303 - 2304 - kref_init(&new_ctx->refcount); 2305 - 2306 - return new_ctx; 2307 - } 2308 - 2309 - void dc_retain_state(struct dc_state *context) 2310 - { 2311 - kref_get(&context->refcount); 2312 - } 2313 - 2314 - static void dc_state_free(struct kref *kref) 2315 - { 2316 - struct dc_state *context = container_of(kref, struct dc_state, refcount); 2317 - dc_resource_state_destruct(context); 2318 - 2319 - #ifdef CONFIG_DRM_AMD_DC_FP 2320 - dml2_destroy(context->bw_ctx.dml2); 2321 - context->bw_ctx.dml2 = 0; 2322 - #endif 2323 - 2324 - kvfree(context); 2325 - } 2326 - 2327 - void dc_release_state(struct dc_state *context) 2328 - { 2329 - kref_put(&context->refcount, dc_state_free); 2330 2229 } 2331 2230 2332 2231 bool dc_set_generic_gpio_for_stereo(bool enable, ··· 2899 2992 update->dsc_config->num_slices_v != 0); 2900 2993 2901 2994 /* Use temporarry context for validating new DSC config */ 2902 - struct dc_state *dsc_validate_context = dc_create_state(dc); 2995 + struct dc_state *dsc_validate_context = dc_state_create(dc); 2903 2996 2904 2997 if (dsc_validate_context) { 2905 2998 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); ··· 2912 3005 update->dsc_config = NULL; 2913 3006 } 2914 3007 2915 - dc_release_state(dsc_validate_context); 3008 + dc_state_release(dsc_validate_context); 2916 3009 } else { 2917 3010 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 2918 3011 update->dsc_config = NULL; ··· 3011 3104 new_planes[i] = srf_updates[i].surface; 3012 3105 3013 3106 /* initialize scratch memory for building context */ 3014 - context = dc_create_state(dc); 3107 + context = dc_state_create(dc); 3015 3108 if (context == NULL) { 3016 3109 DC_ERROR("Failed to allocate new validate context!\n"); 3017 3110 return false; ··· 3027 3120 dc->res_pool->funcs->remove_phantom_pipes(dc, context, false); 3028 3121 3029 3122 /*remove old surfaces from context */ 3030 - if (!dc_rem_all_planes_for_stream(dc, stream, context)) { 3123 + if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) { 3031 3124 3032 3125 BREAK_TO_DEBUGGER(); 3033 3126 goto fail; 3034 3127 } 3035 3128 3036 3129 /* add surface to context */ 3037 - if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { 3130 + if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { 3038 3131 3039 3132 BREAK_TO_DEBUGGER(); 3040 3133 goto fail; ··· 3092 3185 return true; 3093 3186 3094 3187 fail: 3095 - dc_release_state(context); 3188 + dc_state_release(context); 3096 3189 3097 3190 return false; 3098 3191 ··· 3533 3626 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3534 3627 3535 3628 // Check old context for SubVP 3536 - subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM); 3629 + subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM); 3537 3630 if (subvp_prev_use) 3538 3631 break; 3539 3632 } ··· 3541 3634 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3542 3635 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 3543 3636 3544 - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 3637 + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 3545 3638 subvp_curr_use = true; 3546 3639 break; 3547 3640 } ··· 3926 4019 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3927 4020 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3928 4021 3929 - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { 4022 + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) { 3930 4023 subvp_active = true; 3931 4024 break; 3932 4025 } ··· 3963 4056 static void release_minimal_transition_state(struct dc *dc, 3964 4057 struct dc_state *context, struct pipe_split_policy_backup *policy) 3965 4058 { 3966 - dc_release_state(context); 4059 + dc_state_release(context); 3967 4060 /* restore previous pipe split and odm policy */ 3968 4061 if (!dc->config.is_vmin_only_asic) 3969 4062 dc->debug.pipe_split_policy = policy->mpc_policy; ··· 3974 4067 static struct dc_state *create_minimal_transition_state(struct dc *dc, 3975 4068 struct dc_state *base_context, struct pipe_split_policy_backup *policy) 3976 4069 { 3977 - struct dc_state *minimal_transition_context = dc_create_state(dc); 4070 + struct dc_state *minimal_transition_context = dc_state_create(dc); 3978 4071 unsigned int i, j; 3979 4072 3980 4073 if (!dc->config.is_vmin_only_asic) { ··· 4118 4211 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4119 4212 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4120 4213 4121 - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 4214 + if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) { 4122 4215 subvp_in_use = true; 4123 4216 break; 4124 4217 } ··· 4430 4523 if (dc->res_pool->funcs->save_mall_state) 4431 4524 dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config); 4432 4525 if (!commit_minimal_transition_state(dc, context)) { 4433 - dc_release_state(context); 4526 + dc_state_release(context); 4434 4527 return false; 4435 4528 } 4436 4529 if (dc->res_pool->funcs->restore_mall_state) ··· 4500 4593 struct dc_state *old = dc->current_state; 4501 4594 4502 4595 dc->current_state = context; 4503 - dc_release_state(old); 4596 + dc_state_release(old); 4504 4597 4505 4598 // clear any forced full updates 4506 4599 for (i = 0; i < dc->res_pool->pipe_count; i++) { ··· 4559 4652 if (update_type >= UPDATE_TYPE_FULL) { 4560 4653 4561 4654 /* initialize scratch memory for building context */ 4562 - context = dc_create_state(dc); 4655 + context = dc_state_create(dc); 4563 4656 if (context == NULL) { 4564 4657 DC_ERROR("Failed to allocate new validate context!\n"); 4565 4658 return; ··· 4605 4698 if (update_type >= UPDATE_TYPE_FULL) { 4606 4699 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 4607 4700 DC_ERROR("Mode validation failed for stream update!\n"); 4608 - dc_release_state(context); 4701 + dc_state_release(context); 4609 4702 return; 4610 4703 } 4611 4704 } ··· 4638 4731 struct dc_state *old = dc->current_state; 4639 4732 4640 4733 dc->current_state = context; 4641 - dc_release_state(old); 4734 + dc_state_release(old); 4642 4735 4643 4736 for (i = 0; i < dc->res_pool->pipe_count; i++) { 4644 4737 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+4 -4
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
··· 31 31 #include "basics/dc_common.h" 32 32 #include "resource.h" 33 33 #include "dc_dmub_srv.h" 34 + #include "dc_state_priv.h" 34 35 35 36 #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) 36 37 ··· 441 440 for (i = 0; i < dc->res_pool->pipe_count; i++) { 442 441 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 443 442 444 - if (pipe->stream && pipe->stream->mall_stream_config.paired_stream && 445 - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { 443 + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { 446 444 /* SubVP enable - red */ 447 445 color->color_g_y = 0; 448 446 color->color_b_cb = 0; ··· 454 454 } 455 455 } 456 456 457 - if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) { 457 + if (enable_subvp && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_NONE) { 458 458 color->color_r_cr = 0; 459 459 if (pipe_ctx->stream->allow_freesync == 1) { 460 460 /* SubVP enable and DRR on - green */ ··· 529 529 } 530 530 if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) { 531 531 if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) && 532 - current_mpc_pipe->stream->mall_stream_config.type == SUBVP_MAIN) { 532 + dc_state_get_pipe_subvp_type(NULL, pipe_ctx) == SUBVP_MAIN) { 533 533 block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv; 534 534 block_sequence[*num_steps].params.subvp_save_surf_addr.addr = &current_mpc_pipe->plane_state->address; 535 535 block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index;
+29 -265
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 2993 2993 return result; 2994 2994 } 2995 2995 2996 - bool dc_add_plane_to_context( 2997 - const struct dc *dc, 2998 - struct dc_stream_state *stream, 2999 - struct dc_plane_state *plane_state, 3000 - struct dc_state *context) 3001 - { 3002 - struct resource_pool *pool = dc->res_pool; 3003 - struct pipe_ctx *otg_master_pipe; 3004 - struct dc_stream_status *stream_status = NULL; 3005 - bool added = false; 3006 - 3007 - stream_status = dc_stream_get_status_from_state(context, stream); 3008 - if (stream_status == NULL) { 3009 - dm_error("Existing stream not found; failed to attach surface!\n"); 3010 - goto out; 3011 - } else if (stream_status->plane_count == MAX_SURFACE_NUM) { 3012 - dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", 3013 - plane_state, MAX_SURFACE_NUM); 3014 - goto out; 3015 - } 3016 - 3017 - otg_master_pipe = resource_get_otg_master_for_stream( 3018 - &context->res_ctx, stream); 3019 - added = resource_append_dpp_pipes_for_plane_composition(context, 3020 - dc->current_state, pool, otg_master_pipe, plane_state); 3021 - 3022 - if (added) { 3023 - stream_status->plane_states[stream_status->plane_count] = 3024 - plane_state; 3025 - stream_status->plane_count++; 3026 - dc_plane_state_retain(plane_state); 3027 - } 3028 - 3029 - out: 3030 - return added; 3031 - } 3032 - 3033 - bool dc_remove_plane_from_context( 3034 - const struct dc *dc, 3035 - struct dc_stream_state *stream, 3036 - struct dc_plane_state *plane_state, 3037 - struct dc_state *context) 3038 - { 3039 - int i; 3040 - struct dc_stream_status *stream_status = NULL; 3041 - struct resource_pool *pool = dc->res_pool; 3042 - 3043 - if (!plane_state) 3044 - return true; 3045 - 3046 - for (i = 0; i < context->stream_count; i++) 3047 - if (context->streams[i] == stream) { 3048 - stream_status = &context->stream_status[i]; 3049 - break; 3050 - } 3051 - 3052 - if (stream_status == NULL) { 3053 - dm_error("Existing stream not found; failed to remove plane.\n"); 3054 - return false; 3055 - } 3056 - 3057 - resource_remove_dpp_pipes_for_plane_composition( 3058 - context, pool, plane_state); 3059 - 3060 - for (i = 0; i < stream_status->plane_count; i++) { 3061 - if (stream_status->plane_states[i] == plane_state) { 3062 - dc_plane_state_release(stream_status->plane_states[i]); 3063 - break; 3064 - } 3065 - } 3066 - 3067 - if (i == stream_status->plane_count) { 3068 - dm_error("Existing plane_state not found; failed to detach it!\n"); 3069 - return false; 3070 - } 3071 - 3072 - stream_status->plane_count--; 3073 - 3074 - /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */ 3075 - for (; i < stream_status->plane_count; i++) 3076 - stream_status->plane_states[i] = stream_status->plane_states[i + 1]; 3077 - 3078 - stream_status->plane_states[stream_status->plane_count] = NULL; 3079 - 3080 - if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) 3081 - /* ODM combine could prevent us from supporting more planes 3082 - * we will reset ODM slice count back to 1 when all planes have 3083 - * been removed to maximize the amount of planes supported when 3084 - * new planes are added. 3085 - */ 3086 - resource_update_pipes_for_stream_with_slice_count( 3087 - context, dc->current_state, dc->res_pool, stream, 1); 3088 - 3089 - return true; 3090 - } 3091 - 3092 - /** 3093 - * dc_rem_all_planes_for_stream - Remove planes attached to the target stream. 3094 - * 3095 - * @dc: Current dc state. 3096 - * @stream: Target stream, which we want to remove the attached plans. 3097 - * @context: New context. 3098 - * 3099 - * Return: 3100 - * Return true if DC was able to remove all planes from the target 3101 - * stream, otherwise, return false. 3102 - */ 3103 - bool dc_rem_all_planes_for_stream( 3104 - const struct dc *dc, 3105 - struct dc_stream_state *stream, 3106 - struct dc_state *context) 3107 - { 3108 - int i, old_plane_count; 3109 - struct dc_stream_status *stream_status = NULL; 3110 - struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 }; 3111 - 3112 - for (i = 0; i < context->stream_count; i++) 3113 - if (context->streams[i] == stream) { 3114 - stream_status = &context->stream_status[i]; 3115 - break; 3116 - } 3117 - 3118 - if (stream_status == NULL) { 3119 - dm_error("Existing stream %p not found!\n", stream); 3120 - return false; 3121 - } 3122 - 3123 - old_plane_count = stream_status->plane_count; 3124 - 3125 - for (i = 0; i < old_plane_count; i++) 3126 - del_planes[i] = stream_status->plane_states[i]; 3127 - 3128 - for (i = 0; i < old_plane_count; i++) 3129 - if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context)) 3130 - return false; 3131 - 3132 - return true; 3133 - } 3134 - 3135 - static bool add_all_planes_for_stream( 3136 - const struct dc *dc, 3137 - struct dc_stream_state *stream, 3138 - const struct dc_validation_set set[], 3139 - int set_count, 3140 - struct dc_state *context) 3141 - { 3142 - int i, j; 3143 - 3144 - for (i = 0; i < set_count; i++) 3145 - if (set[i].stream == stream) 3146 - break; 3147 - 3148 - if (i == set_count) { 3149 - dm_error("Stream %p not found in set!\n", stream); 3150 - return false; 3151 - } 3152 - 3153 - for (j = 0; j < set[i].plane_count; j++) 3154 - if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context)) 3155 - return false; 3156 - 3157 - return true; 3158 - } 3159 - 3160 - bool dc_add_all_planes_for_stream( 3161 - const struct dc *dc, 3162 - struct dc_stream_state *stream, 3163 - struct dc_plane_state * const *plane_states, 3164 - int plane_count, 3165 - struct dc_state *context) 3166 - { 3167 - struct dc_validation_set set; 3168 - int i; 3169 - 3170 - set.stream = stream; 3171 - set.plane_count = plane_count; 3172 - 3173 - for (i = 0; i < plane_count; i++) 3174 - set.plane_states[i] = plane_states[i]; 3175 - 3176 - return add_all_planes_for_stream(dc, stream, &set, 1, context); 3177 - } 3178 - 3179 2996 bool dc_is_timing_changed(struct dc_stream_state *cur_stream, 3180 2997 struct dc_stream_state *new_stream) 3181 2998 { ··· 3142 3325 } 3143 3326 } 3144 3327 return NULL; 3145 - } 3146 - 3147 - /* 3148 - * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state. 3149 - */ 3150 - enum dc_status dc_add_stream_to_ctx( 3151 - struct dc *dc, 3152 - struct dc_state *new_ctx, 3153 - struct dc_stream_state *stream) 3154 - { 3155 - enum dc_status res; 3156 - DC_LOGGER_INIT(dc->ctx->logger); 3157 - 3158 - if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) { 3159 - DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream); 3160 - return DC_ERROR_UNEXPECTED; 3161 - } 3162 - 3163 - new_ctx->streams[new_ctx->stream_count] = stream; 3164 - dc_stream_retain(stream); 3165 - new_ctx->stream_count++; 3166 - 3167 - res = resource_add_otg_master_for_stream_output( 3168 - new_ctx, dc->res_pool, stream); 3169 - if (res != DC_OK) 3170 - DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res); 3171 - 3172 - return res; 3173 - } 3174 - 3175 - /* 3176 - * dc_remove_stream_from_ctx() - Remove a stream from a dc_state. 3177 - */ 3178 - enum dc_status dc_remove_stream_from_ctx( 3179 - struct dc *dc, 3180 - struct dc_state *new_ctx, 3181 - struct dc_stream_state *stream) 3182 - { 3183 - int i; 3184 - struct dc_context *dc_ctx = dc->ctx; 3185 - struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream( 3186 - &new_ctx->res_ctx, stream); 3187 - 3188 - if (!del_pipe) { 3189 - DC_ERROR("Pipe not found for stream %p !\n", stream); 3190 - return DC_ERROR_UNEXPECTED; 3191 - } 3192 - 3193 - resource_update_pipes_for_stream_with_slice_count(new_ctx, 3194 - dc->current_state, dc->res_pool, stream, 1); 3195 - resource_remove_otg_master_for_stream_output( 3196 - new_ctx, dc->res_pool, stream); 3197 - 3198 - for (i = 0; i < new_ctx->stream_count; i++) 3199 - if (new_ctx->streams[i] == stream) 3200 - break; 3201 - 3202 - if (new_ctx->streams[i] != stream) { 3203 - DC_ERROR("Context doesn't have stream %p !\n", stream); 3204 - return DC_ERROR_UNEXPECTED; 3205 - } 3206 - 3207 - dc_stream_release(new_ctx->streams[i]); 3208 - new_ctx->stream_count--; 3209 - 3210 - /* Trim back arrays */ 3211 - for (; i < new_ctx->stream_count; i++) { 3212 - new_ctx->streams[i] = new_ctx->streams[i + 1]; 3213 - new_ctx->stream_status[i] = new_ctx->stream_status[i + 1]; 3214 - } 3215 - 3216 - new_ctx->streams[new_ctx->stream_count] = NULL; 3217 - memset( 3218 - &new_ctx->stream_status[new_ctx->stream_count], 3219 - 0, 3220 - sizeof(new_ctx->stream_status[0])); 3221 - 3222 - return DC_OK; 3223 3328 } 3224 3329 3225 3330 static struct dc_stream_state *find_pll_sharable_stream( ··· 3594 3855 return false; 3595 3856 } 3596 3857 3858 + static bool add_all_planes_for_stream( 3859 + const struct dc *dc, 3860 + struct dc_stream_state *stream, 3861 + const struct dc_validation_set set[], 3862 + int set_count, 3863 + struct dc_state *state) 3864 + { 3865 + int i, j; 3866 + 3867 + for (i = 0; i < set_count; i++) 3868 + if (set[i].stream == stream) 3869 + break; 3870 + 3871 + if (i == set_count) { 3872 + dm_error("Stream %p not found in set!\n", stream); 3873 + return false; 3874 + } 3875 + 3876 + for (j = 0; j < set[i].plane_count; j++) 3877 + if (!dc_state_add_plane(dc, stream, set[i].plane_states[j], state)) 3878 + return false; 3879 + 3880 + return true; 3881 + } 3882 + 3597 3883 /** 3598 3884 * dc_validate_with_context - Validate and update the potential new stream in the context object 3599 3885 * ··· 3724 3960 unchanged_streams[i], 3725 3961 set, 3726 3962 set_count)) { 3727 - if (!dc_rem_all_planes_for_stream(dc, 3963 + if (!dc_state_rem_all_planes_for_stream(dc, 3728 3964 unchanged_streams[i], 3729 3965 context)) { 3730 3966 res = DC_FAIL_DETACH_SURFACES; ··· 3746 3982 } 3747 3983 } 3748 3984 3749 - if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { 3985 + if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) { 3750 3986 res = DC_FAIL_DETACH_SURFACES; 3751 3987 goto fail; 3752 3988 } 3753 3989 3754 - res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); 3990 + res = dc_state_remove_stream(dc, context, del_streams[i]); 3755 3991 if (res != DC_OK) 3756 3992 goto fail; 3757 3993 } ··· 3774 4010 /* Add new streams and then add all planes for the new stream */ 3775 4011 for (i = 0; i < add_streams_count; i++) { 3776 4012 calculate_phy_pix_clks(add_streams[i]); 3777 - res = dc_add_stream_to_ctx(dc, context, add_streams[i]); 4013 + res = dc_state_add_stream(dc, context, add_streams[i]); 3778 4014 if (res != DC_OK) 3779 4015 goto fail; 3780 4016
+527
drivers/gpu/drm/amd/display/dc/core/dc_state.c
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: AMD 23 + * 24 + */ 25 + #include "core_types.h" 26 + #include "core_status.h" 27 + #include "dc_state.h" 28 + #include "dc_state_priv.h" 29 + #include "dc_stream_priv.h" 30 + #include "dc_plane_priv.h" 31 + 32 + #include "dm_services.h" 33 + #include "resource.h" 34 + 35 + #include "dml2/dml2_wrapper.h" 36 + #include "dml2/dml2_internal_types.h" 37 + 38 + #define DC_LOGGER \ 39 + dc->ctx->logger 40 + #define DC_LOGGER_INIT(logger) 41 + 42 + /* Public dc_state functions */ 43 + static void init_state(struct dc *dc, struct dc_state *state) 44 + { 45 + /* Each context must have their own instance of VBA and in order to 46 + * initialize and obtain IP and SOC the base DML instance from DC is 47 + * initially copied into every context 48 + */ 49 + memcpy(&state->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); 50 + } 51 + 52 + struct dc_state *dc_state_create(struct dc *dc) 53 + { 54 + struct dc_state *state = kvzalloc(sizeof(struct dc_state), 55 + GFP_KERNEL); 56 + 57 + if (!state) 58 + return NULL; 59 + 60 + init_state(dc, state); 61 + 62 + #ifdef CONFIG_DRM_AMD_DC_FP 63 + if (dc->debug.using_dml2) 64 + dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2); 65 + #endif 66 + 67 + kref_init(&state->refcount); 68 + 69 + return state; 70 + } 71 + 72 + struct dc_state *dc_state_create_copy(struct dc_state *src_state) 73 + { 74 + int i, j; 75 + struct dc_state *new_state = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); 76 + 77 + if (!new_state) 78 + return NULL; 79 + 80 + memcpy(new_state, src_state, sizeof(struct dc_state)); 81 + 82 + #ifdef CONFIG_DRM_AMD_DC_FP 83 + if (new_state->bw_ctx.dml2 && !dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) { 84 + dc_state_release(new_state); 85 + return NULL; 86 + } 87 + #endif 88 + 89 + for (i = 0; i < MAX_PIPES; i++) { 90 + struct pipe_ctx *cur_pipe = &new_state->res_ctx.pipe_ctx[i]; 91 + 92 + if (cur_pipe->top_pipe) 93 + cur_pipe->top_pipe = &new_state->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; 94 + 95 + if (cur_pipe->bottom_pipe) 96 + cur_pipe->bottom_pipe = &new_state->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; 97 + 98 + if (cur_pipe->prev_odm_pipe) 99 + cur_pipe->prev_odm_pipe = &new_state->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; 100 + 101 + if (cur_pipe->next_odm_pipe) 102 + cur_pipe->next_odm_pipe = &new_state->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; 103 + } 104 + 105 + for (i = 0; i < new_state->stream_count; i++) { 106 + dc_stream_retain(new_state->streams[i]); 107 + for (j = 0; j < new_state->stream_status[i].plane_count; j++) 108 + dc_plane_state_retain( 109 + new_state->stream_status[i].plane_states[j]); 110 + } 111 + 112 + kref_init(&new_state->refcount); 113 + 114 + return new_state; 115 + } 116 + 117 + void dc_state_retain(struct dc_state *context) 118 + { 119 + kref_get(&context->refcount); 120 + } 121 + 122 + static void dc_state_free(struct kref *kref) 123 + { 124 + struct dc_state *state = container_of(kref, struct dc_state, refcount); 125 + 126 + dc_resource_state_destruct(state); 127 + 128 + #ifdef CONFIG_DRM_AMD_DC_FP 129 + dml2_destroy(state->bw_ctx.dml2); 130 + state->bw_ctx.dml2 = 0; 131 + #endif 132 + 133 + kvfree(state); 134 + } 135 + 136 + void dc_state_release(struct dc_state *state) 137 + { 138 + kref_put(&state->refcount, dc_state_free); 139 + } 140 + /* 141 + * dc_state_add_stream() - Add a new dc_stream_state to a dc_state. 142 + */ 143 + enum dc_status dc_state_add_stream( 144 + struct dc *dc, 145 + struct dc_state *state, 146 + struct dc_stream_state *stream) 147 + { 148 + enum dc_status res; 149 + 150 + DC_LOGGER_INIT(dc->ctx->logger); 151 + 152 + if (state->stream_count >= dc->res_pool->timing_generator_count) { 153 + DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream); 154 + return DC_ERROR_UNEXPECTED; 155 + } 156 + 157 + state->streams[state->stream_count] = stream; 158 + dc_stream_retain(stream); 159 + state->stream_count++; 160 + 161 + res = resource_add_otg_master_for_stream_output( 162 + state, dc->res_pool, stream); 163 + if (res != DC_OK) 164 + DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res); 165 + 166 + return res; 167 + } 168 + 169 + /* 170 + * dc_state_remove_stream() - Remove a stream from a dc_state. 171 + */ 172 + enum dc_status dc_state_remove_stream( 173 + struct dc *dc, 174 + struct dc_state *state, 175 + struct dc_stream_state *stream) 176 + { 177 + int i; 178 + struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream( 179 + &state->res_ctx, stream); 180 + 181 + if (!del_pipe) { 182 + dm_error("Pipe not found for stream %p !\n", stream); 183 + return DC_ERROR_UNEXPECTED; 184 + } 185 + 186 + resource_update_pipes_for_stream_with_slice_count(state, 187 + dc->current_state, dc->res_pool, stream, 1); 188 + resource_remove_otg_master_for_stream_output( 189 + state, dc->res_pool, stream); 190 + 191 + for (i = 0; i < state->stream_count; i++) 192 + if (state->streams[i] == stream) 193 + break; 194 + 195 + if (state->streams[i] != stream) { 196 + dm_error("Context doesn't have stream %p !\n", stream); 197 + return DC_ERROR_UNEXPECTED; 198 + } 199 + 200 + dc_stream_release(state->streams[i]); 201 + state->stream_count--; 202 + 203 + /* Trim back arrays */ 204 + for (; i < state->stream_count; i++) { 205 + state->streams[i] = state->streams[i + 1]; 206 + state->stream_status[i] = state->stream_status[i + 1]; 207 + } 208 + 209 + state->streams[state->stream_count] = NULL; 210 + memset( 211 + &state->stream_status[state->stream_count], 212 + 0, 213 + sizeof(state->stream_status[0])); 214 + 215 + return DC_OK; 216 + } 217 + 218 + bool dc_state_add_plane( 219 + const struct dc *dc, 220 + struct dc_stream_state *stream, 221 + struct dc_plane_state *plane_state, 222 + struct dc_state *state) 223 + { 224 + struct resource_pool *pool = dc->res_pool; 225 + struct pipe_ctx *otg_master_pipe; 226 + struct dc_stream_status *stream_status = NULL; 227 + bool added = false; 228 + 229 + stream_status = dc_state_get_stream_status(state, stream); 230 + if (stream_status == NULL) { 231 + dm_error("Existing stream not found; failed to attach surface!\n"); 232 + goto out; 233 + } else if (stream_status->plane_count == MAX_SURFACE_NUM) { 234 + dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n", 235 + plane_state, MAX_SURFACE_NUM); 236 + goto out; 237 + } 238 + 239 + otg_master_pipe = resource_get_otg_master_for_stream( 240 + &state->res_ctx, stream); 241 + added = resource_append_dpp_pipes_for_plane_composition(state, 242 + dc->current_state, pool, otg_master_pipe, plane_state); 243 + 244 + if (added) { 245 + stream_status->plane_states[stream_status->plane_count] = 246 + plane_state; 247 + stream_status->plane_count++; 248 + dc_plane_state_retain(plane_state); 249 + } 250 + 251 + out: 252 + return added; 253 + } 254 + 255 + bool dc_state_remove_plane( 256 + const struct dc *dc, 257 + struct dc_stream_state *stream, 258 + struct dc_plane_state *plane_state, 259 + struct dc_state *state) 260 + { 261 + int i; 262 + struct dc_stream_status *stream_status = NULL; 263 + struct resource_pool *pool = dc->res_pool; 264 + 265 + if (!plane_state) 266 + return true; 267 + 268 + for (i = 0; i < state->stream_count; i++) 269 + if (state->streams[i] == stream) { 270 + stream_status = &state->stream_status[i]; 271 + break; 272 + } 273 + 274 + if (stream_status == NULL) { 275 + dm_error("Existing stream not found; failed to remove plane.\n"); 276 + return false; 277 + } 278 + 279 + resource_remove_dpp_pipes_for_plane_composition( 280 + state, pool, plane_state); 281 + 282 + for (i = 0; i < stream_status->plane_count; i++) { 283 + if (stream_status->plane_states[i] == plane_state) { 284 + dc_plane_state_release(stream_status->plane_states[i]); 285 + break; 286 + } 287 + } 288 + 289 + if (i == stream_status->plane_count) { 290 + dm_error("Existing plane_state not found; failed to detach it!\n"); 291 + return false; 292 + } 293 + 294 + stream_status->plane_count--; 295 + 296 + /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */ 297 + for (; i < stream_status->plane_count; i++) 298 + stream_status->plane_states[i] = stream_status->plane_states[i + 1]; 299 + 300 + stream_status->plane_states[stream_status->plane_count] = NULL; 301 + 302 + if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) 303 + /* ODM combine could prevent us from supporting more planes 304 + * we will reset ODM slice count back to 1 when all planes have 305 + * been removed to maximize the amount of planes supported when 306 + * new planes are added. 307 + */ 308 + resource_update_pipes_for_stream_with_slice_count( 309 + state, dc->current_state, dc->res_pool, stream, 1); 310 + 311 + return true; 312 + } 313 + 314 + /** 315 + * dc_state_rem_all_planes_for_stream - Remove planes attached to the target stream. 316 + * 317 + * @dc: Current dc state. 318 + * @stream: Target stream, which we want to remove the attached plans. 319 + * @context: New context. 320 + * 321 + * Return: 322 + * Return true if DC was able to remove all planes from the target 323 + * stream, otherwise, return false. 324 + */ 325 + bool dc_state_rem_all_planes_for_stream( 326 + const struct dc *dc, 327 + struct dc_stream_state *stream, 328 + struct dc_state *state) 329 + { 330 + int i, old_plane_count; 331 + struct dc_stream_status *stream_status = NULL; 332 + struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 }; 333 + 334 + for (i = 0; i < state->stream_count; i++) 335 + if (state->streams[i] == stream) { 336 + stream_status = &state->stream_status[i]; 337 + break; 338 + } 339 + 340 + if (stream_status == NULL) { 341 + dm_error("Existing stream %p not found!\n", stream); 342 + return false; 343 + } 344 + 345 + old_plane_count = stream_status->plane_count; 346 + 347 + for (i = 0; i < old_plane_count; i++) 348 + del_planes[i] = stream_status->plane_states[i]; 349 + 350 + for (i = 0; i < old_plane_count; i++) 351 + if (!dc_state_remove_plane(dc, stream, del_planes[i], state)) 352 + return false; 353 + 354 + return true; 355 + } 356 + 357 + bool dc_state_add_all_planes_for_stream( 358 + const struct dc *dc, 359 + struct dc_stream_state *stream, 360 + struct dc_plane_state * const *plane_states, 361 + int plane_count, 362 + struct dc_state *state) 363 + { 364 + int i; 365 + bool result = true; 366 + 367 + for (i = 0; i < plane_count; i++) 368 + if (!dc_state_add_plane(dc, stream, plane_states[i], state)) { 369 + result = false; 370 + break; 371 + } 372 + 373 + return result; 374 + } 375 + 376 + /* Private dc_state functions */ 377 + 378 + /** 379 + * dc_state_get_stream_status - Get stream status from given dc state 380 + * @state: DC state to find the stream status in 381 + * @stream: The stream to get the stream status for 382 + * 383 + * The given stream is expected to exist in the given dc state. Otherwise, NULL 384 + * will be returned. 385 + */ 386 + struct dc_stream_status *dc_state_get_stream_status( 387 + struct dc_state *state, 388 + struct dc_stream_state *stream) 389 + { 390 + uint8_t i; 391 + 392 + if (state == NULL) 393 + return NULL; 394 + 395 + for (i = 0; i < state->stream_count; i++) { 396 + if (stream == state->streams[i]) 397 + return &state->stream_status[i]; 398 + } 399 + 400 + return NULL; 401 + } 402 + 403 + enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state, 404 + const struct pipe_ctx *pipe_ctx) 405 + { 406 + if (pipe_ctx->stream == NULL) 407 + return SUBVP_NONE; 408 + 409 + return pipe_ctx->stream->mall_stream_config.type; 410 + } 411 + 412 + enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state, 413 + const struct dc_stream_state *stream) 414 + { 415 + return stream->mall_stream_config.type; 416 + } 417 + 418 + struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state, 419 + const struct dc_stream_state *stream) 420 + { 421 + return stream->mall_stream_config.paired_stream; 422 + } 423 + 424 + struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc, 425 + struct dc_state *state, 426 + struct dc_stream_state *main_stream) 427 + { 428 + struct dc_stream_state *phantom_stream = dc_create_stream_for_sink(main_stream->sink); 429 + 430 + if (phantom_stream != NULL) { 431 + phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; 432 + phantom_stream->dpms_off = true; 433 + } 434 + 435 + return phantom_stream; 436 + } 437 + 438 + void dc_state_release_phantom_stream(const struct dc *dc, 439 + struct dc_state *state, 440 + struct dc_stream_state *phantom_stream) 441 + { 442 + dc_stream_release(phantom_stream); 443 + } 444 + 445 + struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc, 446 + struct dc_state *state, 447 + struct dc_plane_state *main_plane) 448 + { 449 + struct dc_plane_state *phantom_plane = dc_create_plane_state(dc); 450 + 451 + if (phantom_plane != NULL) 452 + phantom_plane->is_phantom = true; 453 + 454 + return phantom_plane; 455 + } 456 + 457 + void dc_state_release_phantom_plane(const struct dc *dc, 458 + struct dc_state *state, 459 + struct dc_plane_state *phantom_plane) 460 + { 461 + dc_plane_state_release(phantom_plane); 462 + } 463 + 464 + /* add phantom streams to context and generate correct meta inside dc_state */ 465 + enum dc_status dc_state_add_phantom_stream(struct dc *dc, 466 + struct dc_state *state, 467 + struct dc_stream_state *phantom_stream, 468 + struct dc_stream_state *main_stream) 469 + { 470 + enum dc_status res = dc_state_add_stream(dc, state, phantom_stream); 471 + 472 + /* setup subvp meta */ 473 + phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; 474 + phantom_stream->mall_stream_config.paired_stream = main_stream; 475 + main_stream->mall_stream_config.type = SUBVP_MAIN; 476 + main_stream->mall_stream_config.paired_stream = phantom_stream; 477 + 478 + return res; 479 + } 480 + 481 + enum dc_status dc_state_remove_phantom_stream(struct dc *dc, 482 + struct dc_state *state, 483 + struct dc_stream_state *phantom_stream) 484 + { 485 + /* reset subvp meta */ 486 + phantom_stream->mall_stream_config.paired_stream->mall_stream_config.type = SUBVP_NONE; 487 + phantom_stream->mall_stream_config.paired_stream->mall_stream_config.paired_stream = NULL; 488 + 489 + /* remove stream from state */ 490 + return dc_state_remove_stream(dc, state, phantom_stream); 491 + } 492 + 493 + bool dc_state_add_phantom_plane( 494 + const struct dc *dc, 495 + struct dc_stream_state *phantom_stream, 496 + struct dc_plane_state *phantom_plane, 497 + struct dc_state *state) 498 + { 499 + return dc_state_add_plane(dc, phantom_stream, phantom_plane, state); 500 + } 501 + 502 + bool dc_state_remove_phantom_plane( 503 + const struct dc *dc, 504 + struct dc_stream_state *phantom_stream, 505 + struct dc_plane_state *phantom_plane, 506 + struct dc_state *state) 507 + { 508 + return dc_state_remove_plane(dc, phantom_stream, phantom_plane, state); 509 + } 510 + 511 + bool dc_state_rem_all_phantom_planes_for_stream( 512 + const struct dc *dc, 513 + struct dc_stream_state *phantom_stream, 514 + struct dc_state *state) 515 + { 516 + return dc_state_rem_all_planes_for_stream(dc, phantom_stream, state); 517 + } 518 + 519 + bool dc_state_add_all_phantom_planes_for_stream( 520 + const struct dc *dc, 521 + struct dc_stream_state *phantom_stream, 522 + struct dc_plane_state * const *phantom_planes, 523 + int plane_count, 524 + struct dc_state *state) 525 + { 526 + return dc_state_add_all_planes_for_stream(dc, phantom_stream, phantom_planes, plane_count, state); 527 + }
+5 -28
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 31 31 #include "ipp.h" 32 32 #include "timing_generator.h" 33 33 #include "dc_dmub_srv.h" 34 + #include "dc_state_priv.h" 35 + #include "dc_stream_priv.h" 34 36 35 37 #define DC_LOGGER dc->ctx->logger 36 38 ··· 56 54 } 57 55 } 58 56 59 - static bool dc_stream_construct(struct dc_stream_state *stream, 57 + bool dc_stream_construct(struct dc_stream_state *stream, 60 58 struct dc_sink *dc_sink_data) 61 59 { 62 60 uint32_t i = 0; ··· 129 127 return true; 130 128 } 131 129 132 - static void dc_stream_destruct(struct dc_stream_state *stream) 130 + void dc_stream_destruct(struct dc_stream_state *stream) 133 131 { 134 132 dc_sink_release(stream->sink); 135 133 if (stream->out_transfer_func != NULL) { ··· 211 209 } 212 210 213 211 /** 214 - * dc_stream_get_status_from_state - Get stream status from given dc state 215 - * @state: DC state to find the stream status in 216 - * @stream: The stream to get the stream status for 217 - * 218 - * The given stream is expected to exist in the given dc state. Otherwise, NULL 219 - * will be returned. 220 - */ 221 - struct dc_stream_status *dc_stream_get_status_from_state( 222 - struct dc_state *state, 223 - struct dc_stream_state *stream) 224 - { 225 - uint8_t i; 226 - 227 - if (state == NULL) 228 - return NULL; 229 - 230 - for (i = 0; i < state->stream_count; i++) { 231 - if (stream == state->streams[i]) 232 - return &state->stream_status[i]; 233 - } 234 - 235 - return NULL; 236 - } 237 - 238 - /** 239 212 * dc_stream_get_status() - Get current stream status of the given stream state 240 213 * @stream: The stream to get the stream status for. 241 214 * ··· 221 244 struct dc_stream_state *stream) 222 245 { 223 246 struct dc *dc = stream->ctx->dc; 224 - return dc_stream_get_status_from_state(dc->current_state, stream); 247 + return dc_state_get_stream_status(dc->current_state, stream); 225 248 } 226 249 227 250 static void program_cursor_attributes(
+4 -2
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
··· 32 32 #include "transform.h" 33 33 #include "dpp.h" 34 34 35 + #include "dc_plane_priv.h" 36 + 35 37 /******************************************************************************* 36 38 * Private functions 37 39 ******************************************************************************/ 38 - static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state) 40 + void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state) 39 41 { 40 42 plane_state->ctx = ctx; 41 43 ··· 65 63 66 64 } 67 65 68 - static void dc_plane_destruct(struct dc_plane_state *plane_state) 66 + void dc_plane_destruct(struct dc_plane_state *plane_state) 69 67 { 70 68 if (plane_state->gamma_correction != NULL) { 71 69 dc_gamma_release(&plane_state->gamma_correction);
+2 -14
drivers/gpu/drm/amd/display/dc/dc.h
··· 27 27 #define DC_INTERFACE_H_ 28 28 29 29 #include "dc_types.h" 30 + #include "dc_state.h" 31 + #include "dc_plane.h" 30 32 #include "grph_object_defs.h" 31 33 #include "logger_types.h" 32 34 #include "hdcp_msg_types.h" ··· 1388 1386 const struct colorspace_transform *gamut_remap_matrix; 1389 1387 }; 1390 1388 1391 - /* 1392 - * Create a new surface with default parameters; 1393 - */ 1394 - struct dc_plane_state *dc_create_plane_state(struct dc *dc); 1395 - const struct dc_plane_status *dc_plane_get_status( 1396 - const struct dc_plane_state *plane_state); 1397 - 1398 - void dc_plane_state_retain(struct dc_plane_state *plane_state); 1399 - void dc_plane_state_release(struct dc_plane_state *plane_state); 1400 - 1401 1389 void dc_gamma_retain(struct dc_gamma *dc_gamma); 1402 1390 void dc_gamma_release(struct dc_gamma **dc_gamma); 1403 1391 struct dc_gamma *dc_create_gamma(void); ··· 1478 1486 struct dc_stream_state *streams[], 1479 1487 uint8_t stream_count); 1480 1488 1481 - struct dc_state *dc_create_state(struct dc *dc); 1482 - struct dc_state *dc_copy_state(struct dc_state *src_ctx); 1483 - void dc_retain_state(struct dc_state *context); 1484 - void dc_release_state(struct dc_state *context); 1485 1489 1486 1490 struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc, 1487 1491 struct dc_stream_state *stream,
+21 -10
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
··· 33 33 #include "cursor_reg_cache.h" 34 34 #include "resource.h" 35 35 #include "clk_mgr.h" 36 + #include "dc_state_priv.h" 36 37 37 38 #define CTX dc_dmub_srv->ctx 38 39 #define DC_LOGGER CTX->logger ··· 533 532 * 3. Populate the drr_info with the min and max supported vtotal values 534 533 */ 535 534 static void populate_subvp_cmd_drr_info(struct dc *dc, 535 + struct dc_state *context, 536 536 struct pipe_ctx *subvp_pipe, 537 537 struct pipe_ctx *vblank_pipe, 538 538 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data) 539 539 { 540 + struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); 540 541 struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; 541 - struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; 542 + struct dc_crtc_timing *phantom_timing = &phantom_stream->timing; 542 543 struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing; 543 544 uint16_t drr_frame_us = 0; 544 545 uint16_t min_drr_supported_us = 0; ··· 628 625 continue; 629 626 630 627 // Find the SubVP pipe 631 - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) 628 + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) 632 629 break; 633 630 } 634 631 ··· 645 642 646 643 if (vblank_pipe->stream->ignore_msa_timing_param && 647 644 (vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed)) 648 - populate_subvp_cmd_drr_info(dc, pipe, vblank_pipe, pipe_data); 645 + populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data); 649 646 } 650 647 651 648 /** ··· 670 667 uint32_t subvp0_prefetch_us = 0; 671 668 uint32_t subvp1_prefetch_us = 0; 672 669 uint32_t prefetch_delta_us = 0; 673 - struct dc_crtc_timing *phantom_timing0 = &subvp_pipes[0]->stream->mall_stream_config.paired_stream->timing; 674 - struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing; 670 + struct dc_stream_state *phantom_stream0 = NULL; 671 + struct dc_stream_state *phantom_stream1 = NULL; 672 + struct dc_crtc_timing *phantom_timing0 = &phantom_stream0->timing; 673 + struct dc_crtc_timing *phantom_timing1 = &phantom_stream1->timing; 675 674 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL; 675 + 676 + phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream); 677 + phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream); 676 678 677 679 subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) * 678 680 (uint64_t)phantom_timing0->h_total * 1000000), ··· 728 720 uint32_t j; 729 721 struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = 730 722 &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; 723 + struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); 731 724 struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; 732 - struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; 725 + struct dc_crtc_timing *phantom_timing = &phantom_stream->timing; 733 726 uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den; 734 727 735 728 pipe_data->mode = SUBVP; ··· 784 775 for (j = 0; j < dc->res_pool->pipe_count; j++) { 785 776 struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j]; 786 777 787 - if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) { 778 + if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) { 788 779 pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst; 789 780 if (phantom_pipe->bottom_pipe) { 790 781 pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst; ··· 818 809 union dmub_rb_cmd cmd; 819 810 struct pipe_ctx *subvp_pipes[2]; 820 811 uint32_t wm_val_refclk = 0; 812 + enum mall_stream_type pipe_mall_type; 821 813 822 814 memset(&cmd, 0, sizeof(cmd)); 823 815 // FW command for SUBVP ··· 834 824 */ 835 825 if (resource_is_pipe_type(pipe, OTG_MASTER) && 836 826 resource_is_pipe_type(pipe, DPP_PIPE) && 837 - pipe->stream->mall_stream_config.type == SUBVP_MAIN) 827 + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) 838 828 subvp_pipes[subvp_count++] = pipe; 839 829 } 840 830 ··· 842 832 // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd 843 833 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 844 834 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 835 + pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); 845 836 846 837 if (!pipe->stream) 847 838 continue; ··· 854 843 if (resource_is_pipe_type(pipe, OTG_MASTER) && 855 844 resource_is_pipe_type(pipe, DPP_PIPE) && 856 845 pipe->stream->mall_stream_config.paired_stream && 857 - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { 846 + pipe_mall_type == SUBVP_MAIN) { 858 847 populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++); 859 848 } else if (resource_is_pipe_type(pipe, OTG_MASTER) && 860 849 resource_is_pipe_type(pipe, DPP_PIPE) && 861 - pipe->stream->mall_stream_config.type == SUBVP_NONE) { 850 + pipe_mall_type == SUBVP_NONE) { 862 851 // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where 863 852 // we run through DML without calculating "natural" P-state support 864 853 populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
+38
drivers/gpu/drm/amd/display/dc/dc_plane.h
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: AMD 23 + * 24 + */ 25 + 26 + #ifndef _DC_PLANE_H_ 27 + #define _DC_PLANE_H_ 28 + 29 + #include "dc.h" 30 + #include "dc_hw_types.h" 31 + 32 + struct dc_plane_state *dc_create_plane_state(struct dc *dc); 33 + const struct dc_plane_status *dc_plane_get_status( 34 + const struct dc_plane_state *plane_state); 35 + void dc_plane_state_retain(struct dc_plane_state *plane_state); 36 + void dc_plane_state_release(struct dc_plane_state *plane_state); 37 + 38 + #endif /* _DC_PLANE_H_ */
+34
drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: AMD 23 + * 24 + */ 25 + 26 + #ifndef _DC_PLANE_PRIV_H_ 27 + #define _DC_PLANE_PRIV_H_ 28 + 29 + #include "dc_plane.h" 30 + 31 + void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state); 32 + void dc_plane_destruct(struct dc_plane_state *plane_state); 33 + 34 + #endif /* _DC_PLANE_PRIV_H_ */
+74
drivers/gpu/drm/amd/display/dc/dc_state.h
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: AMD 23 + * 24 + */ 25 + 26 + #ifndef _DC_STATE_H_ 27 + #define _DC_STATE_H_ 28 + 29 + #include "dc.h" 30 + #include "inc/core_status.h" 31 + 32 + 33 + struct dc_state *dc_state_create(struct dc *dc); 34 + struct dc_state *dc_state_create_copy(struct dc_state *src_state); 35 + void dc_state_retain(struct dc_state *state); 36 + void dc_state_release(struct dc_state *state); 37 + 38 + enum dc_status dc_state_add_stream(struct dc *dc, 39 + struct dc_state *state, 40 + struct dc_stream_state *stream); 41 + 42 + enum dc_status dc_state_remove_stream( 43 + struct dc *dc, 44 + struct dc_state *state, 45 + struct dc_stream_state *stream); 46 + 47 + bool dc_state_add_plane( 48 + const struct dc *dc, 49 + struct dc_stream_state *stream, 50 + struct dc_plane_state *plane_state, 51 + struct dc_state *state); 52 + 53 + bool dc_state_remove_plane( 54 + const struct dc *dc, 55 + struct dc_stream_state *stream, 56 + struct dc_plane_state *plane_state, 57 + struct dc_state *state); 58 + 59 + bool dc_state_rem_all_planes_for_stream( 60 + const struct dc *dc, 61 + struct dc_stream_state *stream, 62 + struct dc_state *state); 63 + 64 + bool dc_state_add_all_planes_for_stream( 65 + const struct dc *dc, 66 + struct dc_stream_state *stream, 67 + struct dc_plane_state * const *plane_states, 68 + int plane_count, 69 + struct dc_state *state); 70 + 71 + struct dc_stream_status *dc_state_get_stream_status( 72 + struct dc_state *state, 73 + struct dc_stream_state *stream); 74 + #endif /* _DC_STATE_H_ */
+97
drivers/gpu/drm/amd/display/dc/dc_state_priv.h
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: AMD 23 + * 24 + */ 25 + 26 + #ifndef _DC_STATE_PRIV_H_ 27 + #define _DC_STATE_PRIV_H_ 28 + 29 + #include "dc_state.h" 30 + #include "dc_stream.h" 31 + 32 + struct dc_stream_status *dc_state_get_stream_status( 33 + struct dc_state *state, 34 + struct dc_stream_state *stream); 35 + 36 + /* Get the type of the provided resource (none, phantom, main) based on the provided 37 + * context. If the context is unavailable, determine only if phantom or not. 38 + */ 39 + enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state, 40 + const struct pipe_ctx *pipe_ctx); 41 + enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state, 42 + const struct dc_stream_state *stream); 43 + 44 + /* Gets the phantom stream if main is provided, gets the main if phantom is provided.*/ 45 + struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state, 46 + const struct dc_stream_state *stream); 47 + 48 + /* allocate's phantom stream or plane and returns pointer to the object */ 49 + struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc, 50 + struct dc_state *state, 51 + struct dc_stream_state *main_stream); 52 + struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc, 53 + struct dc_state *state, 54 + struct dc_plane_state *main_plane); 55 + 56 + /* deallocate's phantom stream or plane */ 57 + void dc_state_release_phantom_stream(const struct dc *dc, 58 + struct dc_state *state, 59 + struct dc_stream_state *phantom_stream); 60 + void dc_state_release_phantom_plane(const struct dc *dc, 61 + struct dc_state *state, 62 + struct dc_plane_state *phantom_plane); 63 + 64 + /* add/remove phantom stream to context and generate subvp meta data */ 65 + enum dc_status dc_state_add_phantom_stream(struct dc *dc, 66 + struct dc_state *state, 67 + struct dc_stream_state *phantom_stream, 68 + struct dc_stream_state *main_stream); 69 + enum dc_status dc_state_remove_phantom_stream(struct dc *dc, 70 + struct dc_state *state, 71 + struct dc_stream_state *phantom_stream); 72 + 73 + bool dc_state_add_phantom_plane( 74 + const struct dc *dc, 75 + struct dc_stream_state *phantom_stream, 76 + struct dc_plane_state *phantom_plane, 77 + struct dc_state *state); 78 + 79 + bool dc_state_remove_phantom_plane( 80 + const struct dc *dc, 81 + struct dc_stream_state *phantom_stream, 82 + struct dc_plane_state *phantom_plane, 83 + struct dc_state *state); 84 + 85 + bool dc_state_rem_all_phantom_planes_for_stream( 86 + const struct dc *dc, 87 + struct dc_stream_state *phantom_stream, 88 + struct dc_state *state); 89 + 90 + bool dc_state_add_all_phantom_planes_for_stream( 91 + const struct dc *dc, 92 + struct dc_stream_state *phantom_stream, 93 + struct dc_plane_state * const *phantom_planes, 94 + int plane_count, 95 + struct dc_state *state); 96 + 97 + #endif /* _DC_STATE_PRIV_H_ */
-44
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 146 146 147 147 #define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR) 148 148 149 - enum mall_stream_type { 150 - SUBVP_NONE, // subvp not in use 151 - SUBVP_MAIN, // subvp in use, this stream is main stream 152 - SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream 153 - }; 154 - 155 149 struct mall_stream_config { 156 150 /* MALL stream config to indicate if the stream is phantom or not. 157 151 * We will use a phantom stream to indicate that the pipe is phantom. ··· 407 413 uint32_t *h_position, 408 414 uint32_t *v_position); 409 415 410 - enum dc_status dc_add_stream_to_ctx( 411 - struct dc *dc, 412 - struct dc_state *new_ctx, 413 - struct dc_stream_state *stream); 414 - 415 - enum dc_status dc_remove_stream_from_ctx( 416 - struct dc *dc, 417 - struct dc_state *new_ctx, 418 - struct dc_stream_state *stream); 419 - 420 - 421 - bool dc_add_plane_to_context( 422 - const struct dc *dc, 423 - struct dc_stream_state *stream, 424 - struct dc_plane_state *plane_state, 425 - struct dc_state *context); 426 - 427 - bool dc_remove_plane_from_context( 428 - const struct dc *dc, 429 - struct dc_stream_state *stream, 430 - struct dc_plane_state *plane_state, 431 - struct dc_state *context); 432 - 433 - bool dc_rem_all_planes_for_stream( 434 - const struct dc *dc, 435 - struct dc_stream_state *stream, 436 - struct dc_state *context); 437 - 438 - bool dc_add_all_planes_for_stream( 439 - const struct dc *dc, 440 - struct dc_stream_state *stream, 441 - struct dc_plane_state * const *plane_states, 442 - int plane_count, 443 - struct dc_state *context); 444 - 445 416 bool dc_stream_add_writeback(struct dc *dc, 446 417 struct dc_stream_state *stream, 447 418 struct dc_writeback_info *wb_info); ··· 475 516 void dc_stream_retain(struct dc_stream_state *dc_stream); 476 517 void dc_stream_release(struct dc_stream_state *dc_stream); 477 518 478 - struct dc_stream_status *dc_stream_get_status_from_state( 479 - struct dc_state *state, 480 - struct dc_stream_state *stream); 481 519 struct dc_stream_status *dc_stream_get_status( 482 520 struct dc_stream_state *dc_stream); 483 521
+35
drivers/gpu/drm/amd/display/dc/dc_stream_priv.h
··· 1 + /* 2 + * Copyright 2023 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + * Authors: AMD 23 + * 24 + */ 25 + 26 + #ifndef _DC_STREAM_PRIV_H_ 27 + #define _DC_STREAM_PRIV_H_ 28 + 29 + #include "dc_stream.h" 30 + 31 + bool dc_stream_construct(struct dc_stream_state *stream, 32 + struct dc_sink *dc_sink_data); 33 + void dc_stream_destruct(struct dc_stream_state *stream); 34 + 35 + #endif // _DC_STREAM_PRIV_H_
+5
drivers/gpu/drm/amd/display/dc/dc_types.h
··· 1161 1161 HPD_EN_FOR_SECONDARY_EDP_ONLY, 1162 1162 }; 1163 1163 1164 + enum mall_stream_type { 1165 + SUBVP_NONE, // subvp not in use 1166 + SUBVP_MAIN, // subvp in use, this stream is main stream 1167 + SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream 1168 + }; 1164 1169 #endif /* DC_TYPES_H_ */
+15 -13
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
··· 28 28 #include "dcn20/dcn20_resource.h" 29 29 #include "dml/dcn32/display_mode_vba_util_32.h" 30 30 #include "dml/dcn32/dcn32_fpu.h" 31 + #include "dc_state_priv.h" 31 32 32 33 static bool is_dual_plane(enum surface_pixel_format format) 33 34 { ··· 191 190 for (i = 0; i < dc->res_pool->pipe_count; i++) { 192 191 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 193 192 194 - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) 193 + if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) 195 194 return true; 196 195 } 197 196 return false; ··· 265 264 266 265 // Do not override if a stream has multiple planes 267 266 for (i = 0; i < context->stream_count; i++) { 268 - if (context->stream_status[i].plane_count > 1) { 267 + if (context->stream_status[i].plane_count > 1) 269 268 return; 270 - } 271 - if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) { 269 + 270 + if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM) 272 271 stream_count++; 273 - } 274 272 } 275 273 276 274 for (i = 0; i < dc->res_pool->pipe_count; i++) { 277 275 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 278 276 279 - if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { 277 + if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { 280 278 if (dcn32_allow_subvp_high_refresh_rate(dc, context, pipe_ctx)) { 281 279 282 280 if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) { ··· 290 290 for (i = 0; i < dc->res_pool->pipe_count; i++) { 291 291 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 292 292 293 - if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { 293 + if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx)) { 294 294 if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) { 295 295 if (pipe_segments[i] > 4) 296 296 pipe_segments[i] = 4; ··· 337 337 338 338 for (i = 0; i < context->stream_count; i++) { 339 339 /* Don't count SubVP streams for DET allocation */ 340 - if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) 340 + if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM) 341 341 stream_count++; 342 342 } 343 343 344 344 if (stream_count > 0) { 345 345 stream_segments = 18 / stream_count; 346 346 for (i = 0; i < context->stream_count; i++) { 347 - if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) 347 + if (dc_state_get_stream_subvp_type(context, context->streams[i]) == SUBVP_PHANTOM) 348 348 continue; 349 349 350 350 if (context->stream_status[i].plane_count > 0) ··· 716 716 717 717 for (i = 0; i < dc->res_pool->pipe_count; i++) { 718 718 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 719 + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); 719 720 720 721 if (resource_is_pipe_type(pipe, OPP_HEAD) && 721 722 resource_is_pipe_type(pipe, DPP_PIPE)) { 722 - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { 723 + if (pipe_mall_type == SUBVP_MAIN) { 723 724 subvp_count++; 724 725 725 726 subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe); ··· 729 728 refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); 730 729 refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); 731 730 } 732 - if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { 731 + if (pipe_mall_type == SUBVP_NONE) { 733 732 non_subvp_pipes++; 734 733 drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe)); 735 734 if (pipe->stream->ignore_msa_timing_param && ··· 777 776 778 777 for (i = 0; i < dc->res_pool->pipe_count; i++) { 779 778 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 779 + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); 780 780 781 781 if (resource_is_pipe_type(pipe, OPP_HEAD) && 782 782 resource_is_pipe_type(pipe, DPP_PIPE)) { 783 - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { 783 + if (pipe_mall_type == SUBVP_MAIN) { 784 784 subvp_count++; 785 785 786 786 subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe); ··· 790 788 refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); 791 789 refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total); 792 790 } 793 - if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { 791 + if (pipe_mall_type == SUBVP_NONE) { 794 792 non_subvp_pipes++; 795 793 vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe)); 796 794 if (pipe->stream->ignore_msa_timing_param &&
+3 -2
drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
··· 33 33 34 34 #include "link.h" 35 35 #include "dcn20_fpu.h" 36 + #include "dc_state_priv.h" 36 37 37 38 #define DC_LOGGER \ 38 39 dc->ctx->logger ··· 1075 1074 pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 1076 1075 pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 1077 1076 1078 - if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { 1077 + if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { 1079 1078 // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests 1080 1079 context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; 1081 1080 context->res_ctx.pipe_ctx[i].unbounded_req = false; ··· 1425 1424 */ 1426 1425 if (res_ctx->pipe_ctx[i].plane_state && 1427 1426 (res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || 1428 - res_ctx->pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM)) 1427 + dc_state_get_pipe_subvp_type(context, &res_ctx->pipe_ctx[i]) == SUBVP_PHANTOM)) 1429 1428 pipes[pipe_cnt].pipe.src.num_cursors = 0; 1430 1429 else 1431 1430 pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
+26 -19
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
··· 32 32 #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h" 33 33 #include "dcn30/dcn30_resource.h" 34 34 #include "link.h" 35 + #include "dc_state_priv.h" 35 36 36 37 #define DC_LOGGER_INIT(logger) 37 38 ··· 342 341 if (!pipe->stream) 343 342 continue; 344 343 345 - if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 344 + if (pipe->plane_state && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 346 345 pipes[pipe_idx].pipe.dest.vstartup_start = 347 346 get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); 348 347 pipes[pipe_idx].pipe.dest.vupdate_offset = ··· 625 624 if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && 626 625 !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && 627 626 (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && 628 - pipe->stream->mall_stream_config.type == SUBVP_NONE && 627 + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && 629 628 (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && 630 629 !pipe->plane_state->address.tmz_surface && 631 630 (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || ··· 683 682 684 683 // Find the minimum pipe split count for non SubVP pipes 685 684 if (resource_is_pipe_type(pipe, OPP_HEAD) && 686 - pipe->stream->mall_stream_config.type == SUBVP_NONE) { 685 + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE) { 687 686 split_cnt = 0; 688 687 while (pipe) { 689 688 split_cnt++; ··· 736 735 * and also to store the two main SubVP pipe pointers in subvp_pipes[2]. 737 736 */ 738 737 if (pipe->stream && pipe->plane_state && !pipe->top_pipe && 739 - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { 740 - phantom = pipe->stream->mall_stream_config.paired_stream; 738 + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { 739 + phantom = dc_state_get_paired_subvp_stream(context, pipe->stream); 741 740 microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) + 742 741 phantom->timing.v_addressable; 743 742 ··· 805 804 int16_t stretched_drr_us = 0; 806 805 int16_t drr_stretched_vblank_us = 0; 807 806 int16_t max_vblank_mallregion = 0; 807 + struct dc_stream_state *phantom_stream; 808 808 809 809 // Find SubVP pipe 810 810 for (i = 0; i < dc->res_pool->pipe_count; i++) { ··· 818 816 continue; 819 817 820 818 // Find the SubVP pipe 821 - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) 819 + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) 822 820 break; 823 821 } 824 822 ··· 831 829 !resource_is_pipe_type(pipe, DPP_PIPE)) 832 830 continue; 833 831 834 - if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param && 832 + if (dc_state_get_pipe_subvp_type(context, drr_pipe) == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param && 835 833 (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) 836 834 break; 837 835 } 838 836 837 + phantom_stream = dc_state_get_paired_subvp_stream(context, pipe->stream); 839 838 main_timing = &pipe->stream->timing; 840 - phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing; 839 + phantom_timing = &phantom_stream->timing; 841 840 drr_timing = &drr_pipe->stream->timing; 842 841 prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / 843 842 (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + ··· 898 895 struct dc_crtc_timing *main_timing = NULL; 899 896 struct dc_crtc_timing *phantom_timing = NULL; 900 897 struct dc_crtc_timing *vblank_timing = NULL; 898 + struct dc_stream_state *phantom_stream; 899 + enum mall_stream_type pipe_mall_type; 901 900 902 901 /* For SubVP + VBLANK/DRR cases, we assume there can only be 903 902 * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK ··· 909 904 */ 910 905 for (i = 0; i < dc->res_pool->pipe_count; i++) { 911 906 pipe = &context->res_ctx.pipe_ctx[i]; 907 + pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); 912 908 913 909 // We check for master pipe, but it shouldn't matter since we only need 914 910 // the pipe for timing info (stream should be same for any pipe splits) ··· 917 911 !resource_is_pipe_type(pipe, DPP_PIPE)) 918 912 continue; 919 913 920 - if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) { 914 + if (!found && pipe_mall_type == SUBVP_NONE) { 921 915 // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe). 922 916 vblank_index = i; 923 917 found = true; 924 918 } 925 919 926 - if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) 920 + if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN) 927 921 subvp_pipe = pipe; 928 922 } 929 923 if (found) { 924 + phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream); 930 925 main_timing = &subvp_pipe->stream->timing; 931 - phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; 926 + phantom_timing = &phantom_stream->timing; 932 927 vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing; 933 928 // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe 934 929 // Also include the prefetch end to mallstart delay time ··· 984 977 continue; 985 978 986 979 if (pipe->plane_state && !pipe->top_pipe && 987 - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { 980 + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { 988 981 refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 + 989 982 pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1); 990 983 refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total); ··· 1033 1026 1034 1027 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 1035 1028 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1029 + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); 1036 1030 1037 1031 if (!pipe->stream) 1038 1032 continue; 1039 1033 1040 1034 if (pipe->plane_state && !pipe->top_pipe) { 1041 - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) 1035 + if (pipe_mall_type == SUBVP_MAIN) 1042 1036 subvp_count++; 1043 - if (pipe->stream->mall_stream_config.type == SUBVP_NONE) { 1037 + if (pipe_mall_type == SUBVP_NONE) 1044 1038 non_subvp_pipes++; 1045 - } 1046 1039 } 1047 1040 1048 1041 // Count how many planes that aren't SubVP/phantom are capable of VACTIVE 1049 1042 // switching (SubVP + VACTIVE unsupported). In situations where we force 1050 1043 // SubVP for a VACTIVE plane, we don't want to increment the vactive_count. 1051 1044 if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && 1052 - pipe->stream->mall_stream_config.type == SUBVP_NONE) { 1045 + pipe_mall_type == SUBVP_NONE) { 1053 1046 vactive_count++; 1054 1047 } 1055 1048 pipe_idx++; ··· 1085 1078 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1086 1079 1087 1080 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && 1088 - pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { 1081 + dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) { 1089 1082 pipe_ctx->subvp_index = index++; 1090 1083 } else { 1091 1084 pipe_ctx->subvp_index = 0; ··· 1691 1684 pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, 1692 1685 pipe_idx); 1693 1686 1694 - if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { 1687 + if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { 1695 1688 // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests 1696 1689 context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; 1697 1690 context->res_ctx.pipe_ctx[i].unbounded_req = false; ··· 1723 1716 context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) && 1724 1717 context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { 1725 1718 /* SS: all active surfaces stored in MALL */ 1726 - if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) { 1719 + if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) != SUBVP_PHANTOM) { 1727 1720 context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes; 1728 1721 1729 1722 if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) {
+1
drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
··· 38 38 #include "core_types.h" 39 39 #include "dsc.h" 40 40 #include "clk_mgr.h" 41 + #include "dc_state_priv.h" 41 42 42 43 #endif //__DML2_DC_TYPES_H__
+42 -31
drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
··· 51 51 52 52 // Find the phantom pipes 53 53 if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && 54 - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 54 + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 55 55 bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4; 56 56 mblk_width = ctx->config.mall_cfg.mblk_width_pixels; 57 57 mblk_height = bytes_per_pixel == 4 ? mblk_width = ctx->config.mall_cfg.mblk_height_4bpe_pixels : ctx->config.mall_cfg.mblk_height_8bpe_pixels; ··· 253 253 * to combine this with SubVP can cause issues with the scheduling). 254 254 */ 255 255 if (pipe->plane_state && !pipe->top_pipe && 256 - pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && 256 + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_NONE && refresh_rate < 120 && 257 257 vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { 258 258 while (pipe) { 259 259 num_pipes++; ··· 317 317 318 318 // Find the minimum pipe split count for non SubVP pipes 319 319 if (pipe->stream && !pipe->top_pipe && 320 - pipe->stream->mall_stream_config.type == SUBVP_NONE) { 320 + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_NONE) { 321 321 split_cnt = 0; 322 322 while (pipe) { 323 323 split_cnt++; ··· 372 372 * and also to store the two main SubVP pipe pointers in subvp_pipes[2]. 373 373 */ 374 374 if (pipe->stream && pipe->plane_state && !pipe->top_pipe && 375 - pipe->stream->mall_stream_config.type == SUBVP_MAIN) { 376 - phantom = pipe->stream->mall_stream_config.paired_stream; 375 + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) { 376 + phantom = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream); 377 377 microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) + 378 378 phantom->timing.v_addressable; 379 379 ··· 435 435 struct pipe_ctx *pipe = NULL; 436 436 struct dc_crtc_timing *main_timing = NULL; 437 437 struct dc_crtc_timing *phantom_timing = NULL; 438 + struct dc_stream_state *phantom_stream; 438 439 int16_t prefetch_us = 0; 439 440 int16_t mall_region_us = 0; 440 441 int16_t drr_frame_us = 0; // nominal frame time ··· 454 453 continue; 455 454 456 455 // Find the SubVP pipe 457 - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) 456 + if (ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) 458 457 break; 459 458 } 460 459 460 + phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream); 461 461 main_timing = &pipe->stream->timing; 462 - phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing; 462 + phantom_timing = &phantom_stream->timing; 463 463 prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total / 464 464 (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 + 465 465 ctx->config.svp_pstate.subvp_prefetch_end_to_mall_start_us; ··· 521 519 struct dc_crtc_timing *main_timing = NULL; 522 520 struct dc_crtc_timing *phantom_timing = NULL; 523 521 struct dc_crtc_timing *vblank_timing = NULL; 522 + struct dc_stream_state *phantom_stream; 523 + enum mall_stream_type pipe_mall_type; 524 524 525 525 /* For SubVP + VBLANK/DRR cases, we assume there can only be 526 526 * a single VBLANK/DRR display. If DML outputs SubVP + VBLANK ··· 532 528 */ 533 529 for (i = 0; i < ctx->config.dcn_pipe_count; i++) { 534 530 pipe = &context->res_ctx.pipe_ctx[i]; 531 + pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe); 535 532 536 533 // We check for master pipe, but it shouldn't matter since we only need 537 534 // the pipe for timing info (stream should be same for any pipe splits) 538 535 if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe) 539 536 continue; 540 537 541 - if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) { 538 + if (!found && pipe_mall_type == SUBVP_NONE) { 542 539 // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe). 543 540 vblank_index = i; 544 541 found = true; 545 542 } 546 543 547 - if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN) 544 + if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN) 548 545 subvp_pipe = pipe; 549 546 } 550 547 // Use ignore_msa_timing_param flag to identify as DRR ··· 553 548 // SUBVP + DRR case 554 549 schedulable = dml2_svp_drr_schedulable(ctx, context, &context->res_ctx.pipe_ctx[vblank_index].stream->timing); 555 550 } else if (found) { 551 + phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, subvp_pipe->stream); 556 552 main_timing = &subvp_pipe->stream->timing; 557 - phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; 553 + phantom_timing = &phantom_stream->timing; 558 554 vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing; 559 555 // Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe 560 556 // Also include the prefetch end to mallstart delay time ··· 608 602 609 603 for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) { 610 604 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 605 + enum mall_stream_type pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe); 611 606 612 607 if (!pipe->stream) 613 608 continue; 614 609 615 610 if (pipe->plane_state && !pipe->top_pipe && 616 - pipe->stream->mall_stream_config.type == SUBVP_MAIN) 611 + pipe_mall_type == SUBVP_MAIN) 617 612 subvp_count++; 618 613 619 614 // Count how many planes that aren't SubVP/phantom are capable of VACTIVE 620 615 // switching (SubVP + VACTIVE unsupported). In situations where we force 621 616 // SubVP for a VACTIVE plane, we don't want to increment the vactive_count. 622 617 if (vba->ActiveDRAMClockChangeLatencyMargin[vba->pipe_plane[pipe_idx]] > 0 && 623 - pipe->stream->mall_stream_config.type == SUBVP_NONE) { 618 + pipe_mall_type == SUBVP_NONE) { 624 619 vactive_count++; 625 620 } 626 621 pipe_idx++; ··· 715 708 static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, struct dc_state *state, unsigned int dc_pipe_idx, unsigned int svp_height, unsigned int vstartup) 716 709 { 717 710 struct pipe_ctx *ref_pipe = &state->res_ctx.pipe_ctx[dc_pipe_idx]; 718 - struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_stream_for_sink(ref_pipe->stream->sink); 719 - 720 - phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; 721 - phantom_stream->dpms_off = true; 722 - phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; 723 - phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; 724 - ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; 725 - ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; 711 + struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_phantom_stream( 712 + ctx->config.svp_pstate.callbacks.dc, 713 + state, 714 + ref_pipe->stream); 726 715 727 716 /* stream has limited viewport and small timing */ 728 717 memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); ··· 726 723 memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst)); 727 724 set_phantom_stream_timing(ctx, state, ref_pipe, phantom_stream, dc_pipe_idx, svp_height, vstartup); 728 725 729 - ctx->config.svp_pstate.callbacks.add_stream_to_ctx(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream); 726 + ctx->config.svp_pstate.callbacks.add_phantom_stream(ctx->config.svp_pstate.callbacks.dc, 727 + state, 728 + phantom_stream, 729 + ref_pipe->stream); 730 730 return phantom_stream; 731 731 } 732 732 ··· 746 740 if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) { 747 741 phantom_plane = prev_phantom_plane; 748 742 } else { 749 - phantom_plane = ctx->config.svp_pstate.callbacks.create_plane(ctx->config.svp_pstate.callbacks.dc); 743 + phantom_plane = ctx->config.svp_pstate.callbacks.create_phantom_plane( 744 + ctx->config.svp_pstate.callbacks.dc, 745 + state, 746 + curr_pipe->plane_state); 750 747 } 751 748 752 749 memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); ··· 772 763 phantom_plane->clip_rect.y = 0; 773 764 phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable; 774 765 775 - phantom_plane->is_phantom = true; 776 - 777 - ctx->config.svp_pstate.callbacks.add_plane_to_context(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state); 766 + ctx->config.svp_pstate.callbacks.add_phantom_plane(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state); 778 767 779 768 curr_pipe = curr_pipe->bottom_pipe; 780 769 prev_phantom_plane = phantom_plane; ··· 797 790 // We determine which phantom pipes were added by comparing with 798 791 // the phantom stream. 799 792 if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream && 800 - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 793 + ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) { 801 794 pipe->stream->use_dynamic_meta = false; 802 795 pipe->plane_state->flip_immediate = false; 803 796 if (!ctx->config.svp_pstate.callbacks.build_scaling_params(pipe)) { ··· 829 822 del_planes[i] = stream_status->plane_states[i]; 830 823 831 824 for (i = 0; i < old_plane_count; i++) 832 - if (!ctx->config.svp_pstate.callbacks.remove_plane_from_context(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context)) 825 + if (!ctx->config.svp_pstate.callbacks.remove_phantom_plane(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context)) 833 826 return false; 834 827 835 828 return true; ··· 845 838 for (i = 0; i < ctx->config.dcn_pipe_count; i++) { 846 839 struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i]; 847 840 // build scaling params for phantom pipes 848 - if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 841 + if (pipe->plane_state && pipe->stream && ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) { 849 842 phantom_plane = pipe->plane_state; 850 843 phantom_stream = pipe->stream; 851 844 852 845 remove_all_planes_for_stream(ctx, pipe->stream, state); 853 - ctx->config.svp_pstate.callbacks.remove_stream_from_ctx(ctx->config.svp_pstate.callbacks.dc, state, pipe->stream); 846 + ctx->config.svp_pstate.callbacks.remove_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, pipe->stream); 854 847 855 848 /* Ref count is incremented on allocation and also when added to the context. 856 849 * Therefore we must call release for the the phantom plane and stream once 857 850 * they are removed from the ctx to finally decrement the refcount to 0 to free. 858 851 */ 859 - ctx->config.svp_pstate.callbacks.plane_state_release(phantom_plane); 860 - ctx->config.svp_pstate.callbacks.stream_release(phantom_stream); 852 + ctx->config.svp_pstate.callbacks.release_phantom_plane(ctx->config.svp_pstate.callbacks.dc, 853 + state, 854 + phantom_plane); 855 + ctx->config.svp_pstate.callbacks.release_phantom_stream(ctx->config.svp_pstate.callbacks.dc, 856 + state, 857 + phantom_stream); 861 858 862 859 removed_pipe = true; 863 860 }
+6 -4
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
··· 1051 1051 { 1052 1052 int i = 0, j = 0; 1053 1053 int disp_cfg_stream_location, disp_cfg_plane_location; 1054 + enum mall_stream_type stream_mall_type; 1054 1055 1055 1056 for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) { 1056 1057 dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id_valid[i] = false; ··· 1072 1071 1073 1072 for (i = 0; i < context->stream_count; i++) { 1074 1073 disp_cfg_stream_location = map_stream_to_dml_display_cfg(dml2, context->streams[i], dml_dispcfg); 1074 + stream_mall_type = dc_state_get_stream_subvp_type(context, context->streams[i]); 1075 1075 1076 1076 if (disp_cfg_stream_location < 0) 1077 1077 disp_cfg_stream_location = dml_dispcfg->num_timings++; ··· 1117 1115 populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]); 1118 1116 populate_dml_plane_cfg_from_plane_state(&dml_dispcfg->plane, disp_cfg_plane_location, context->stream_status[i].plane_states[j], context); 1119 1117 1120 - if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN) { 1118 + if (stream_mall_type == SUBVP_MAIN) { 1121 1119 dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport; 1122 1120 dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_optimize; 1123 - } else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) { 1121 + } else if (stream_mall_type == SUBVP_PHANTOM) { 1124 1122 dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe; 1125 1123 dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_disable; 1126 1124 dml2->v20.dml_core_ctx.policy.ImmediateFlipRequirement[disp_cfg_plane_location] = dml_immediate_flip_not_required; ··· 1149 1147 break; 1150 1148 } 1151 1149 1152 - if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN) 1150 + if (stream_mall_type == SUBVP_MAIN) 1153 1151 dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport; 1154 - else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) 1152 + else if (stream_mall_type == SUBVP_PHANTOM) 1155 1153 dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe; 1156 1154 1157 1155 dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[disp_cfg_plane_location] = context->streams[i]->stream_id;
+4 -2
drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
··· 279 279 void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, struct dml2_context *in_ctx, unsigned int pipe_cnt) 280 280 { 281 281 unsigned int dc_pipe_ctx_index, dml_pipe_idx, plane_id; 282 + enum mall_stream_type pipe_mall_type; 282 283 bool unbounded_req_enabled = false; 283 284 struct dml2_calculate_rq_and_dlg_params_scratch *s = &in_ctx->v20.scratch.calculate_rq_and_dlg_params_scratch; 284 285 ··· 327 326 */ 328 327 populate_pipe_ctx_dlg_params_from_dml(&context->res_ctx.pipe_ctx[dc_pipe_ctx_index], &context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx); 329 328 330 - if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type == SUBVP_PHANTOM) { 329 + pipe_mall_type = dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[dc_pipe_ctx_index]); 330 + if (pipe_mall_type == SUBVP_PHANTOM) { 331 331 // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests 332 332 context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb = 0; 333 333 context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = false; ··· 355 353 context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state != context->res_ctx.pipe_ctx[dc_pipe_ctx_index].top_pipe->plane_state) && 356 354 context->res_ctx.pipe_ctx[dc_pipe_ctx_index].prev_odm_pipe == NULL) { 357 355 /* SS: all active surfaces stored in MALL */ 358 - if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type != SUBVP_PHANTOM) { 356 + if (pipe_mall_type != SUBVP_PHANTOM) { 359 357 context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[dc_pipe_ctx_index].surface_size_in_mall_bytes; 360 358 } else { 361 359 /* SUBVP: phantom surfaces only stored in MALL */
+1 -1
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
··· 418 418 int i; 419 419 420 420 for (i = 0; i < display_state->stream_count; i++) { 421 - if (display_state->streams[i]->mall_stream_config.type == SUBVP_NONE 421 + if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE 422 422 && display_state->streams[i]->ignore_msa_timing_param) { 423 423 // Use ignore_msa_timing_param flag to identify as DRR 424 424 return i;
+24 -8
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
··· 93 93 struct dml2_dc_svp_callbacks { 94 94 struct dc *dc; 95 95 bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx); 96 - struct dc_stream_state* (*create_stream_for_sink)(struct dc_sink *dc_sink_data); 97 - struct dc_plane_state* (*create_plane)(struct dc *dc); 98 - enum dc_status (*add_stream_to_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); 99 - bool (*add_plane_to_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); 100 - bool (*remove_plane_from_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); 101 - enum dc_status (*remove_stream_from_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream); 102 - void (*plane_state_release)(struct dc_plane_state *plane_state); 103 - void (*stream_release)(struct dc_stream_state *stream); 96 + struct dc_stream_state* (*create_phantom_stream)(const struct dc *dc, 97 + struct dc_state *state, 98 + struct dc_stream_state *main_stream); 99 + struct dc_plane_state* (*create_phantom_plane)(struct dc *dc, 100 + struct dc_state *state, 101 + struct dc_plane_state *main_plane); 102 + enum dc_status (*add_phantom_stream)(struct dc *dc, 103 + struct dc_state *state, 104 + struct dc_stream_state *phantom_stream, 105 + struct dc_stream_state *main_stream); 106 + bool (*add_phantom_plane)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); 107 + bool (*remove_phantom_plane)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); 108 + enum dc_status (*remove_phantom_stream)(struct dc *dc, 109 + struct dc_state *state, 110 + struct dc_stream_state *stream); 111 + void (*release_phantom_plane)(const struct dc *dc, 112 + struct dc_state *state, 113 + struct dc_plane_state *plane); 114 + void (*release_phantom_stream)(const struct dc *dc, 115 + struct dc_state *state, 116 + struct dc_stream_state *stream); 104 117 void (*release_dsc)(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc); 118 + enum mall_stream_type (*get_pipe_subvp_type)(const struct dc_state *state, const struct pipe_ctx *pipe_ctx); 119 + enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream); 120 + struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream); 105 121 }; 106 122 107 123 struct dml2_clks_table_entry {
+2 -1
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
··· 55 55 #include "audio.h" 56 56 #include "reg_helper.h" 57 57 #include "panel_cntl.h" 58 + #include "dc_state_priv.h" 58 59 #include "dpcd_defs.h" 59 60 /* include DCE11 register header files */ 60 61 #include "dce/dce_11_0_d.h" ··· 1597 1596 * is constructed with the same sink). Make sure not to override 1598 1597 * and link programming on the main. 1599 1598 */ 1600 - if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) { 1599 + if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) { 1601 1600 pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false; 1602 1601 pipe_ctx->stream->link->replay_settings.replay_feature_enabled = false; 1603 1602 }
+9 -8
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
··· 56 56 #include "dc_trace.h" 57 57 #include "dce/dmub_outbox.h" 58 58 #include "link.h" 59 + #include "dc_state_priv.h" 59 60 60 61 #define DC_LOGGER \ 61 62 dc_logger ··· 116 115 !pipe_ctx->stream || 117 116 (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) || 118 117 !tg->funcs->is_tg_enabled(tg) || 119 - pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) 118 + dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) 120 119 continue; 121 120 122 121 if (lock) ··· 1201 1200 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove); 1202 1201 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle, 1203 1202 // so don't wait for MPCC_IDLE in the programming sequence 1204 - if (opp != NULL && !pipe_ctx->plane_state->is_phantom) 1203 + if (opp != NULL && dc_state_get_pipe_subvp_type(NULL, pipe_ctx) != SUBVP_PHANTOM) 1205 1204 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; 1206 1205 1207 1206 dc->optimized_required = true; ··· 2277 2276 DC_SYNC_INFO("Setting up OTG reset trigger\n"); 2278 2277 2279 2278 for (i = 1; i < group_size; i++) { 2280 - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) 2279 + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(NULL, grouped_pipes[i]) == SUBVP_PHANTOM) 2281 2280 continue; 2282 2281 2283 2282 opp = grouped_pipes[i]->stream_res.opp; ··· 2297 2296 if (grouped_pipes[i]->stream == NULL) 2298 2297 continue; 2299 2298 2300 - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) 2299 + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(NULL, grouped_pipes[i]) == SUBVP_PHANTOM) 2301 2300 continue; 2302 2301 2303 2302 grouped_pipes[i]->stream->vblank_synchronized = false; 2304 2303 } 2305 2304 2306 2305 for (i = 1; i < group_size; i++) { 2307 - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) 2306 + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(NULL, grouped_pipes[i]) == SUBVP_PHANTOM) 2308 2307 continue; 2309 2308 2310 2309 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger( ··· 2318 2317 * synchronized. Look at last pipe programmed to reset. 2319 2318 */ 2320 2319 2321 - if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM) 2320 + if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(NULL, grouped_pipes[1]) != SUBVP_PHANTOM) 2322 2321 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg); 2323 2322 2324 2323 for (i = 1; i < group_size; i++) { 2325 - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) 2324 + if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(NULL, grouped_pipes[i]) == SUBVP_PHANTOM) 2326 2325 continue; 2327 2326 2328 2327 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger( ··· 2330 2329 } 2331 2330 2332 2331 for (i = 1; i < group_size; i++) { 2333 - if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM) 2332 + if (dc_state_get_pipe_subvp_type(NULL, grouped_pipes[i]) == SUBVP_PHANTOM) 2334 2333 continue; 2335 2334 2336 2335 opp = grouped_pipes[i]->stream_res.opp;
+24 -18
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
··· 55 55 #include "inc/link_enc_cfg.h" 56 56 #include "link_hwss.h" 57 57 #include "link.h" 58 + #include "dc_state_priv.h" 58 59 59 60 #define DC_LOGGER \ 60 61 dc_logger ··· 626 625 627 626 void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) 628 627 { 629 - bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom; 628 + bool is_phantom = dc_state_get_pipe_subvp_type(NULL, pipe_ctx) == SUBVP_PHANTOM; 630 629 struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; 631 630 632 631 DC_LOGGER_INIT(dc->ctx->logger); ··· 848 847 /* TODO enable stream if timing changed */ 849 848 /* TODO unblank stream if DP */ 850 849 851 - if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) { 850 + if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) { 852 851 if (pipe_ctx->stream_res.tg && pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable) 853 852 pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg); 854 853 } ··· 1371 1370 1372 1371 static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe) 1373 1372 { 1373 + bool old_is_phantom = dc_state_get_pipe_subvp_type(NULL, old_pipe) == SUBVP_PHANTOM; 1374 + bool new_is_phantom = dc_state_get_pipe_subvp_type(NULL, new_pipe) == SUBVP_PHANTOM; 1375 + 1374 1376 new_pipe->update_flags.raw = 0; 1375 1377 1376 1378 /* If non-phantom pipe is being transitioned to a phantom pipe, ··· 1383 1379 * be different). The post_unlock sequence will set the correct 1384 1380 * update flags to enable the phantom pipe. 1385 1381 */ 1386 - if (old_pipe->plane_state && !old_pipe->plane_state->is_phantom && 1387 - new_pipe->plane_state && new_pipe->plane_state->is_phantom) { 1382 + if (old_pipe->plane_state && !old_is_phantom && 1383 + new_pipe->plane_state && new_is_phantom) { 1388 1384 new_pipe->update_flags.bits.disable = 1; 1389 1385 return; 1390 1386 } ··· 1420 1416 * The remove-add sequence of the phantom pipe always results in the pipe 1421 1417 * being blanked in enable_stream_timing (DPG). 1422 1418 */ 1423 - if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) 1419 + if (new_pipe->stream && dc_state_get_pipe_subvp_type(NULL, new_pipe) == SUBVP_PHANTOM) 1424 1420 new_pipe->update_flags.bits.enable = 1; 1425 1421 1426 1422 /* Phantom pipes are effectively disabled, if the pipe was previously phantom 1427 1423 * we have to enable 1428 1424 */ 1429 - if (old_pipe->plane_state && old_pipe->plane_state->is_phantom && 1430 - new_pipe->plane_state && !new_pipe->plane_state->is_phantom) 1425 + if (old_pipe->plane_state && old_is_phantom && 1426 + new_pipe->plane_state && !new_is_phantom) 1431 1427 new_pipe->update_flags.bits.enable = 1; 1432 1428 1433 1429 if (old_pipe->plane_state && !new_pipe->plane_state) { ··· 1564 1560 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 1565 1561 struct dccg *dccg = dc->res_pool->dccg; 1566 1562 bool viewport_changed = false; 1563 + enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx); 1567 1564 1568 1565 if (pipe_ctx->update_flags.bits.dppclk) 1569 1566 dpp->funcs->dpp_dppclk_control(dpp, false, true); ··· 1710 1705 pipe_ctx->update_flags.bits.plane_changed || 1711 1706 plane_state->update_flags.bits.addr_update) { 1712 1707 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && 1713 - pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { 1708 + pipe_mall_type == SUBVP_MAIN) { 1714 1709 union block_sequence_params params; 1715 1710 1716 1711 params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv; ··· 1724 1719 if (pipe_ctx->update_flags.bits.enable) 1725 1720 hubp->funcs->set_blank(hubp, false); 1726 1721 /* If the stream paired with this plane is phantom, the plane is also phantom */ 1727 - if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM 1722 + if (pipe_ctx->stream && pipe_mall_type == SUBVP_PHANTOM 1728 1723 && hubp->funcs->phantom_hubp_post_enable) 1729 1724 hubp->funcs->phantom_hubp_post_enable(hubp); 1730 1725 } ··· 1931 1926 struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream; 1932 1927 1933 1928 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream && 1934 - dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { 1929 + dc_state_get_pipe_subvp_type(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) { 1935 1930 struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg; 1936 1931 1937 1932 if (tg->funcs->enable_crtc) { 1938 1933 if (dc->hwss.blank_phantom) { 1939 1934 int main_pipe_width, main_pipe_height; 1935 + struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream); 1940 1936 1941 - main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width; 1942 - main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height; 1937 + main_pipe_width = phantom_stream->dst.width; 1938 + main_pipe_height = phantom_stream->dst.height; 1943 1939 dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height); 1944 1940 } 1945 1941 tg->funcs->enable_crtc(tg); ··· 1969 1963 * DET allocation. 1970 1964 */ 1971 1965 if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable || 1972 - (context->res_ctx.pipe_ctx[i].plane_state && context->res_ctx.pipe_ctx[i].plane_state->is_phantom))) 1966 + (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) 1973 1967 hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); 1974 1968 hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); 1975 1969 DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); ··· 1994 1988 * but the MPO still exists until the double buffered update of the main pipe so we 1995 1989 * will get a frame of underflow if the phantom pipe is programmed here. 1996 1990 */ 1997 - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) 1991 + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) 1998 1992 dcn20_program_pipe(dc, pipe, context); 1999 1993 } 2000 1994 ··· 2056 2050 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2057 2051 // Don't check flip pending on phantom pipes 2058 2052 if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable && 2059 - pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { 2053 + dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) { 2060 2054 struct hubp *hubp = pipe->plane_res.hubp; 2061 2055 int j = 0; 2062 2056 for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us ··· 2079 2073 * programming sequence). 2080 2074 */ 2081 2075 while (pipe) { 2082 - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 2076 + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 2083 2077 /* When turning on the phantom pipe we want to run through the 2084 2078 * entire enable sequence, so apply all the "enable" flags. 2085 2079 */ ··· 2149 2143 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2150 2144 2151 2145 // At optimize don't restore the original watermark value 2152 - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { 2146 + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) { 2153 2147 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U; 2154 2148 break; 2155 2149 } ··· 2193 2187 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2194 2188 2195 2189 // At optimize don't need to restore the original watermark value 2196 - if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) { 2190 + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) { 2197 2191 context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U; 2198 2192 break; 2199 2193 }
+2 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
··· 51 51 #include "dcn20/dcn20_hwseq.h" 52 52 #include "dcn30/dcn30_resource.h" 53 53 #include "link.h" 54 - 54 + #include "dc_state_priv.h" 55 55 56 56 57 57 ··· 966 966 if (!pipe->stream) 967 967 continue; 968 968 969 - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { 969 + if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_MAIN) { 970 970 subvp_in_use = true; 971 971 break; 972 972 }
+19 -17
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
··· 51 51 #include "dcn32/dcn32_resource.h" 52 52 #include "link.h" 53 53 #include "../dcn20/dcn20_hwseq.h" 54 + #include "dc_state_priv.h" 54 55 55 56 #define DC_LOGGER_INIT(logger) 56 57 ··· 350 349 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 351 350 352 351 if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.paired_stream && 353 - pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) { 352 + dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) { 354 353 // There is at least 1 SubVP pipe, so enable SubVP 355 354 enable_subvp = true; 356 355 break; ··· 376 375 bool subvp_immediate_flip = false; 377 376 bool subvp_in_use = false; 378 377 struct pipe_ctx *pipe; 378 + enum mall_stream_type pipe_mall_type = SUBVP_NONE; 379 379 380 380 for (i = 0; i < dc->res_pool->pipe_count; i++) { 381 381 pipe = &context->res_ctx.pipe_ctx[i]; 382 + pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe); 382 383 383 - if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { 384 + if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN) { 384 385 subvp_in_use = true; 385 386 break; 386 387 } 387 388 } 388 389 389 390 if (top_pipe_to_program && top_pipe_to_program->stream && top_pipe_to_program->plane_state) { 390 - if (top_pipe_to_program->stream->mall_stream_config.type == SUBVP_MAIN && 391 + if (dc_state_get_pipe_subvp_type(context, top_pipe_to_program) == SUBVP_MAIN && 391 392 top_pipe_to_program->plane_state->flip_immediate) 392 393 subvp_immediate_flip = true; 393 394 } ··· 401 398 if (!lock) { 402 399 for (i = 0; i < dc->res_pool->pipe_count; i++) { 403 400 pipe = &context->res_ctx.pipe_ctx[i]; 404 - if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN && 401 + if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN && 405 402 should_lock_all_pipes) 406 403 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); 407 404 } ··· 423 420 bool subvp_immediate_flip = false; 424 421 425 422 if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) { 426 - if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN && 423 + if (dc_state_get_pipe_subvp_type(NULL, pipe_ctx) == SUBVP_MAIN && 427 424 pipe_ctx->plane_state->flip_immediate) 428 425 subvp_immediate_flip = true; 429 426 } ··· 612 609 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 613 610 struct hubp *hubp = pipe->plane_res.hubp; 614 611 615 - if (!pipe->stream || !(pipe->stream->mall_stream_config.type == SUBVP_MAIN || 612 + if (!pipe->stream || !(dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN || 616 613 pipe->stream->fpo_in_use)) { 617 614 if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) 618 615 hubp->funcs->hubp_update_force_pstate_disallow(hubp, false); ··· 627 624 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 628 625 struct hubp *hubp = pipe->plane_res.hubp; 629 626 630 - if (pipe->stream && (pipe->stream->mall_stream_config.type == SUBVP_MAIN || 627 + if (pipe->stream && (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN || 631 628 pipe->stream->fpo_in_use)) { 632 629 if (hubp && hubp->funcs->hubp_update_force_pstate_disallow) 633 630 hubp->funcs->hubp_update_force_pstate_disallow(hubp, true); ··· 674 671 if (cursor_size > 16384) 675 672 cache_cursor = true; 676 673 677 - if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 678 - hubp->funcs->hubp_update_mall_sel(hubp, 1, false); 674 + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 675 + hubp->funcs->hubp_update_mall_sel(hubp, 1, false); 679 676 } else { 680 677 // MALL not supported with Stereo3D 681 678 hubp->funcs->hubp_update_mall_sel(hubp, ··· 717 714 * see if CURSOR_REQ_MODE will be back to 1 for SubVP 718 715 * when it should be 0 for MPO 719 716 */ 720 - if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) { 717 + if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) 721 718 hubp->funcs->hubp_prepare_subvp_buffering(hubp, true); 722 - } 723 719 } 724 720 } 725 721 } ··· 1230 1228 continue; 1231 1229 1232 1230 if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal)) 1233 - && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { 1231 + && dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_PHANTOM) { 1234 1232 pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg); 1235 1233 reset_sync_context_for_pipe(dc, context, i); 1236 1234 otg_disabled[i] = true; ··· 1381 1379 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1382 1380 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1383 1381 1384 - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN && 1382 + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN && 1385 1383 pipe->stream->mall_stream_config.paired_stream == phantom_pipe->stream) { 1386 1384 if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) { 1387 1385 ··· 1407 1405 void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe) 1408 1406 { 1409 1407 phantom_pipe->update_flags.raw = 0; 1410 - if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 1408 + if (dc_state_get_pipe_subvp_type(NULL, phantom_pipe) == SUBVP_PHANTOM) { 1411 1409 if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) { 1412 1410 phantom_pipe->update_flags.bits.enable = 1; 1413 1411 phantom_pipe->update_flags.bits.mpcc = 1; ··· 1493 1491 * pipe, wait for the double buffer update to complete first before we do 1494 1492 * ANY phantom pipe programming. 1495 1493 */ 1496 - if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM && 1497 - old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) { 1494 + if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM && 1495 + old_pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) != SUBVP_PHANTOM) { 1498 1496 old_pipe->stream_res.tg->funcs->wait_for_state( 1499 1497 old_pipe->stream_res.tg, 1500 1498 CRTC_STATE_VBLANK); ··· 1506 1504 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1507 1505 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 1508 1506 1509 - if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 1507 + if (new_pipe->stream && dc_state_get_pipe_subvp_type(context, new_pipe) == SUBVP_PHANTOM) { 1510 1508 // If old context or new context has phantom pipes, apply 1511 1509 // the phantom timings now. We can't change the phantom 1512 1510 // pipe configuration safely without driver acquiring
+2 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
··· 56 56 #include "dcn30/dcn30_cm_common.h" 57 57 #include "dcn31/dcn31_hwseq.h" 58 58 #include "dcn20/dcn20_hwseq.h" 59 + #include "dc_state_priv.h" 59 60 60 61 #define DC_LOGGER_INIT(logger) \ 61 62 struct dal_logger *dc_logger = logger ··· 949 948 void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) 950 949 { 951 950 struct dce_hwseq *hws = dc->hwseq; 952 - bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom; 951 + bool is_phantom = dc_state_get_pipe_subvp_type(NULL, pipe_ctx) == SUBVP_PHANTOM; 953 952 struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL; 954 953 955 954 DC_LOGGER_INIT(dc->ctx->logger);
-1
drivers/gpu/drm/amd/display/dc/inc/resource.h
··· 622 622 struct pipe_ctx *pipe_ctx); 623 623 624 624 bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream); 625 - 626 625 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
+36 -47
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
··· 89 89 #include "dcn20/dcn20_vmid.h" 90 90 #include "dml/dcn32/dcn32_fpu.h" 91 91 92 + #include "dc_state_priv.h" 93 + 92 94 #include "dml2/dml2_wrapper.h" 93 95 94 96 #define DC_LOGGER_INIT(logger) ··· 1646 1644 if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) 1647 1645 phantom_plane = prev_phantom_plane; 1648 1646 else 1649 - phantom_plane = dc_create_plane_state(dc); 1647 + phantom_plane = dc_state_create_phantom_plane(dc, context, curr_pipe->plane_state); 1650 1648 1651 1649 memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); 1652 1650 memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality, ··· 1667 1665 phantom_plane->clip_rect.y = 0; 1668 1666 phantom_plane->clip_rect.height = phantom_stream->src.height; 1669 1667 1670 - phantom_plane->is_phantom = true; 1671 - 1672 - dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context); 1668 + dc_state_add_phantom_plane(dc, phantom_stream, phantom_plane, context); 1673 1669 1674 1670 curr_pipe = curr_pipe->bottom_pipe; 1675 1671 prev_phantom_plane = phantom_plane; ··· 1683 1683 struct dc_stream_state *phantom_stream = NULL; 1684 1684 struct pipe_ctx *ref_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx]; 1685 1685 1686 - phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink); 1687 - phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; 1688 - phantom_stream->dpms_off = true; 1689 - phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; 1690 - phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; 1691 - ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; 1692 - ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; 1686 + phantom_stream = dc_state_create_phantom_stream(dc, context, ref_pipe->stream); 1693 1687 1694 1688 /* stream has limited viewport and small timing */ 1695 1689 memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); ··· 1693 1699 dcn32_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream, pipes, pipe_cnt, dc_pipe_idx); 1694 1700 DC_FP_END(); 1695 1701 1696 - dc_add_stream_to_ctx(dc, context, phantom_stream); 1702 + dc_state_add_phantom_stream(dc, context, phantom_stream, ref_pipe->stream); 1697 1703 return phantom_stream; 1698 1704 } 1699 1705 ··· 1708 1714 1709 1715 if (resource_is_pipe_type(pipe, OTG_MASTER) && 1710 1716 resource_is_pipe_type(pipe, DPP_PIPE) && 1711 - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 1717 + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 1712 1718 phantom_plane = pipe->plane_state; 1713 1719 phantom_stream = pipe->stream; 1714 1720 ··· 1729 1735 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1730 1736 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1731 1737 // build scaling params for phantom pipes 1732 - if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 1738 + if (pipe->plane_state && pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 1733 1739 phantom_plane = pipe->plane_state; 1734 1740 phantom_stream = pipe->stream; 1735 1741 1736 - dc_rem_all_planes_for_stream(dc, pipe->stream, context); 1737 - dc_remove_stream_from_ctx(dc, context, pipe->stream); 1742 + dc_state_rem_all_planes_for_stream(dc, pipe->stream, context); 1743 + 1744 + /* For non-full updates, a shallow copy of the current state 1745 + * is created. In this case we don't want to erase the current 1746 + * state (there can be 2 HIRQL threads, one in flip, and one in 1747 + * checkMPO) that can cause a race condition. 1748 + * 1749 + * This is just a workaround, needs a proper fix. 1750 + */ 1751 + if (!fast_update) 1752 + dc_state_remove_phantom_stream(dc, context, pipe->stream); 1753 + else 1754 + dc_state_remove_stream(dc, context, pipe->stream); 1738 1755 1739 1756 /* Ref count is incremented on allocation and also when added to the context. 1740 1757 * Therefore we must call release for the the phantom plane and stream once 1741 1758 * they are removed from the ctx to finally decrement the refcount to 0 to free. 1742 1759 */ 1743 - dc_plane_state_release(phantom_plane); 1744 - dc_stream_release(phantom_stream); 1760 + dc_state_release_phantom_plane(dc, context, phantom_plane); 1761 + dc_state_release_phantom_stream(dc, context, phantom_stream); 1745 1762 1746 1763 removed_pipe = true; 1747 - } 1748 - 1749 - /* For non-full updates, a shallow copy of the current state 1750 - * is created. In this case we don't want to erase the current 1751 - * state (there can be 2 HIRQL threads, one in flip, and one in 1752 - * checkMPO) that can cause a race condition. 1753 - * 1754 - * This is just a workaround, needs a proper fix. 1755 - */ 1756 - if (!fast_update) { 1757 - // Clear all phantom stream info 1758 - if (pipe->stream) { 1759 - pipe->stream->mall_stream_config.type = SUBVP_NONE; 1760 - pipe->stream->mall_stream_config.paired_stream = NULL; 1761 - } 1762 - 1763 - if (pipe->plane_state) { 1764 - pipe->plane_state->is_phantom = false; 1765 - } 1766 1764 } 1767 1765 } 1768 1766 return removed_pipe; ··· 1784 1798 // We determine which phantom pipes were added by comparing with 1785 1799 // the phantom stream. 1786 1800 if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream && 1787 - pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { 1801 + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) { 1788 1802 pipe->stream->use_dynamic_meta = false; 1789 1803 pipe->plane_state->flip_immediate = false; 1790 1804 if (!resource_build_scaling_params(pipe)) { ··· 1919 1933 * This is just a workaround -- needs a proper fix. 1920 1934 */ 1921 1935 if (!fast_validate) { 1922 - switch (pipe->stream->mall_stream_config.type) { 1936 + switch (dc_state_get_pipe_subvp_type(context, pipe)) { 1923 1937 case SUBVP_MAIN: 1924 1938 pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport; 1925 1939 subvp_in_use = true; ··· 2440 2454 dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; 2441 2455 2442 2456 dc->dml2_options.svp_pstate.callbacks.dc = dc; 2443 - dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context; 2444 - dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx; 2457 + dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; 2458 + dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream; 2445 2459 dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; 2446 - dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state; 2447 - dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context; 2448 - dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx; 2449 - dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink; 2450 - dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release; 2451 - dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release; 2460 + dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane; 2461 + dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane; 2462 + dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream; 2463 + dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream; 2464 + dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane; 2465 + dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream; 2452 2466 dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; 2467 + dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type; 2468 + dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type; 2469 + dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream; 2453 2470 2454 2471 dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; 2455 2472 dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
+13 -8
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
··· 92 92 #include "vm_helper.h" 93 93 #include "dcn20/dcn20_vmid.h" 94 94 95 + #include "dc_state_priv.h" 96 + 95 97 #define DC_LOGGER_INIT(logger) 96 98 97 99 enum dcn321_clk_src_array_id { ··· 2010 2008 dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; 2011 2009 2012 2010 dc->dml2_options.svp_pstate.callbacks.dc = dc; 2013 - dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context; 2014 - dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx; 2011 + dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; 2012 + dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream; 2015 2013 dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; 2016 - dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state; 2017 - dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context; 2018 - dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx; 2019 - dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink; 2020 - dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release; 2021 - dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release; 2014 + dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane; 2015 + dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane; 2016 + dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream; 2017 + dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream; 2018 + dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane; 2019 + dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream; 2022 2020 dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; 2021 + dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type; 2022 + dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type; 2023 + dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream; 2023 2024 2024 2025 dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; 2025 2026 dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
+2
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
··· 102 102 #include "vm_helper.h" 103 103 #include "dcn20/dcn20_vmid.h" 104 104 105 + #include "dc_state_priv.h" 106 + 105 107 #include "link_enc_cfg.h" 106 108 #define DC_LOGGER_INIT(logger) 107 109