Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/display: re structure odm to allow 4 to 1 support

Currently odm is handled using top_bottom pipe by special casing
the differing opps to differentiate from mpc combine.

Since top/bottom pipe list was made to track mpc muxing this creates
difficulties in adding a 4 pipe odm case support.

Rather than continue using mpc combine list, this change reworks odm
to use it's own linked list to keep track of odm combine pipes. This
also opens up options for using mpo with odm, if a practical use case
is ever found.

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
Acked-by: Leo Li <sunpeng.li@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Dmytro Laktyushkin and committed by
Alex Deucher
b1f6d01c bad4c3e6

+316 -265
+5 -5
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 1860 1860 for (j = 0; j < dc->res_pool->pipe_count; j++) { 1861 1861 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 1862 1862 1863 - if (!pipe_ctx->top_pipe && 1864 - pipe_ctx->stream && 1865 - pipe_ctx->stream == stream) { 1863 + if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) { 1866 1864 1867 1865 if (stream_update->periodic_interrupt0 && 1868 1866 dc->hwss.setup_periodic_interrupt) ··· 1886 1888 1887 1889 if (stream_update->dither_option) { 1888 1890 #if defined(CONFIG_DRM_AMD_DC_DCN2_0) 1889 - struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx); 1891 + struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 1890 1892 #endif 1891 1893 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 1892 1894 &pipe_ctx->stream->bit_depth_params); ··· 1894 1896 &stream->bit_depth_params, 1895 1897 &stream->clamping); 1896 1898 #if defined(CONFIG_DRM_AMD_DC_DCN2_0) 1897 - if (odm_pipe) 1899 + while (odm_pipe) { 1898 1900 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 1899 1901 &stream->bit_depth_params, 1900 1902 &stream->clamping); 1903 + odm_pipe = odm_pipe->next_odm_pipe; 1904 + } 1901 1905 #endif 1902 1906 } 1903 1907
+51 -101
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 1117 1117 { 1118 1118 int i; 1119 1119 for (i = 0; i < MAX_PIPES; i++) { 1120 - if (res_ctx->pipe_ctx[i].stream == stream && 1121 - !res_ctx->pipe_ctx[i].top_pipe) { 1120 + if (res_ctx->pipe_ctx[i].stream == stream 1121 + && !res_ctx->pipe_ctx[i].top_pipe 1122 + && !res_ctx->pipe_ctx[i].prev_odm_pipe) { 1122 1123 return &res_ctx->pipe_ctx[i]; 1123 1124 break; 1124 1125 } ··· 1127 1126 return NULL; 1128 1127 } 1129 1128 1130 - static struct pipe_ctx *resource_get_tail_pipe_for_stream( 1129 + static struct pipe_ctx *resource_get_tail_pipe( 1131 1130 struct resource_context *res_ctx, 1132 - struct dc_stream_state *stream) 1131 + struct pipe_ctx *head_pipe) 1133 1132 { 1134 - struct pipe_ctx *head_pipe, *tail_pipe; 1135 - head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); 1136 - 1137 - if (!head_pipe) 1138 - return NULL; 1133 + struct pipe_ctx *tail_pipe; 1139 1134 1140 1135 tail_pipe = head_pipe->bottom_pipe; 1141 1136 ··· 1147 1150 * A free_pipe for a stream is defined here as a pipe 1148 1151 * that has no surface attached yet 1149 1152 */ 1150 - static struct pipe_ctx *acquire_free_pipe_for_stream( 1153 + static struct pipe_ctx *acquire_free_pipe_for_head( 1151 1154 struct dc_state *context, 1152 1155 const struct resource_pool *pool, 1153 - struct dc_stream_state *stream) 1156 + struct pipe_ctx *head_pipe) 1154 1157 { 1155 1158 int i; 1156 1159 struct resource_context *res_ctx = &context->res_ctx; 1157 - 1158 - struct pipe_ctx *head_pipe = NULL; 1159 - 1160 - /* Find head pipe, which has the back end set up*/ 1161 - 1162 - head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); 1163 - 1164 - if (!head_pipe) { 1165 - ASSERT(0); 1166 - return NULL; 1167 - } 1168 1160 1169 1161 if (!head_pipe->plane_state) 1170 1162 return head_pipe; 1171 1163 1172 1164 /* Re-use pipe already acquired for this stream if available*/ 1173 1165 for (i = pool->pipe_count - 1; i >= 0; i--) { 1174 - if (res_ctx->pipe_ctx[i].stream == stream && 1166 + if (res_ctx->pipe_ctx[i].stream == head_pipe->stream && 1175 1167 !res_ctx->pipe_ctx[i].plane_state) { 1176 1168 return &res_ctx->pipe_ctx[i]; 1177 1169 } ··· 1174 1188 if (!pool->funcs->acquire_idle_pipe_for_layer) 1175 1189 return NULL; 1176 1190 1177 - return pool->funcs->acquire_idle_pipe_for_layer(context, pool, stream); 1178 - 1191 + return pool->funcs->acquire_idle_pipe_for_layer(context, pool, head_pipe->stream); 1179 1192 } 1180 1193 1181 1194 #if defined(CONFIG_DRM_AMD_DC_DCN1_0) ··· 1188 1203 for (i = 0; i < pool->pipe_count; i++) { 1189 1204 struct pipe_ctx *split_pipe = &res_ctx->pipe_ctx[i]; 1190 1205 1191 - if (split_pipe->top_pipe && !dc_res_is_odm_head_pipe(split_pipe) && 1206 + if (split_pipe->top_pipe && 1192 1207 split_pipe->top_pipe->plane_state == split_pipe->plane_state) { 1193 1208 split_pipe->top_pipe->bottom_pipe = split_pipe->bottom_pipe; 1194 1209 if (split_pipe->bottom_pipe) ··· 1249 1264 return false; 1250 1265 } 1251 1266 1252 - tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream); 1253 - ASSERT(tail_pipe); 1254 - 1255 - free_pipe = acquire_free_pipe_for_stream(context, pool, stream); 1256 - 1257 - #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 1258 - if (!free_pipe) { 1259 - int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); 1260 - if (pipe_idx >= 0) 1261 - free_pipe = &context->res_ctx.pipe_ctx[pipe_idx]; 1262 - } 1263 - #endif 1264 - if (!free_pipe) 1265 - return false; 1266 - 1267 - /* retain new surfaces */ 1267 + /* retain new surface, but only once per stream */ 1268 1268 dc_plane_state_retain(plane_state); 1269 - free_pipe->plane_state = plane_state; 1270 1269 1271 - if (head_pipe != free_pipe) { 1272 - free_pipe->stream_res.tg = tail_pipe->stream_res.tg; 1273 - free_pipe->stream_res.abm = tail_pipe->stream_res.abm; 1274 - free_pipe->stream_res.opp = tail_pipe->stream_res.opp; 1275 - free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc; 1276 - free_pipe->stream_res.audio = tail_pipe->stream_res.audio; 1277 - free_pipe->clock_source = tail_pipe->clock_source; 1278 - free_pipe->top_pipe = tail_pipe; 1279 - tail_pipe->bottom_pipe = free_pipe; 1280 - } else if (free_pipe->bottom_pipe && free_pipe->bottom_pipe->plane_state == NULL) { 1281 - ASSERT(free_pipe->bottom_pipe->stream_res.opp != free_pipe->stream_res.opp); 1282 - free_pipe->bottom_pipe->plane_state = plane_state; 1270 + while (head_pipe) { 1271 + tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe); 1272 + ASSERT(tail_pipe); 1273 + 1274 + free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe); 1275 + 1276 + #if defined(CONFIG_DRM_AMD_DC_DCN1_0) 1277 + if (!free_pipe) { 1278 + int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); 1279 + if (pipe_idx >= 0) 1280 + free_pipe = &context->res_ctx.pipe_ctx[pipe_idx]; 1281 + } 1282 + #endif 1283 + if (!free_pipe) { 1284 + dc_plane_state_release(plane_state); 1285 + return false; 1286 + } 1287 + 1288 + free_pipe->plane_state = plane_state; 1289 + 1290 + if (head_pipe != free_pipe) { 1291 + free_pipe->stream_res.tg = tail_pipe->stream_res.tg; 1292 + free_pipe->stream_res.abm = tail_pipe->stream_res.abm; 1293 + free_pipe->stream_res.opp = tail_pipe->stream_res.opp; 1294 + free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc; 1295 + free_pipe->stream_res.audio = tail_pipe->stream_res.audio; 1296 + free_pipe->clock_source = tail_pipe->clock_source; 1297 + free_pipe->top_pipe = tail_pipe; 1298 + tail_pipe->bottom_pipe = free_pipe; 1299 + } 1300 + head_pipe = head_pipe->next_odm_pipe; 1283 1301 } 1284 - 1285 1302 /* assign new surfaces*/ 1286 1303 stream_status->plane_states[stream_status->plane_count] = plane_state; 1287 1304 1288 1305 stream_status->plane_count++; 1289 - 1290 - return true; 1291 - } 1292 - 1293 - struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx) 1294 - { 1295 - struct pipe_ctx *bottom_pipe = pipe_ctx->bottom_pipe; 1296 - 1297 - /* ODM should only be updated once per otg */ 1298 - if (pipe_ctx->top_pipe) 1299 - return NULL; 1300 - 1301 - while (bottom_pipe) { 1302 - if (bottom_pipe->stream_res.opp != pipe_ctx->stream_res.opp) 1303 - break; 1304 - bottom_pipe = bottom_pipe->bottom_pipe; 1305 - } 1306 - 1307 - return bottom_pipe; 1308 - } 1309 - 1310 - bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx) 1311 - { 1312 - struct pipe_ctx *top_pipe = pipe_ctx->top_pipe; 1313 - 1314 - if (!top_pipe) 1315 - return false; 1316 - if (top_pipe && top_pipe->stream_res.opp == pipe_ctx->stream_res.opp) 1317 - return false; 1318 1306 1319 1307 return true; 1320 1308 } ··· 1318 1360 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1319 1361 1320 1362 if (pipe_ctx->plane_state == plane_state) { 1321 - if (dc_res_is_odm_head_pipe(pipe_ctx)) { 1322 - pipe_ctx->plane_state = NULL; 1323 - pipe_ctx->bottom_pipe = NULL; 1324 - continue; 1325 - } 1326 - 1327 1363 if (pipe_ctx->top_pipe) 1328 1364 pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe; 1329 1365 ··· 1332 1380 * For head pipe detach surfaces from pipe for tail 1333 1381 * pipe just zero it out 1334 1382 */ 1335 - if (!pipe_ctx->top_pipe) { 1383 + if (!pipe_ctx->top_pipe) 1336 1384 pipe_ctx->plane_state = NULL; 1337 - if (!dc_res_get_odm_bottom_pipe(pipe_ctx)) 1338 - pipe_ctx->bottom_pipe = NULL; 1339 - } else { 1385 + else 1340 1386 memset(pipe_ctx, 0, sizeof(*pipe_ctx)); 1341 - } 1342 1387 } 1343 1388 } 1344 1389 ··· 1704 1755 for (i = 0; i < MAX_PIPES; i++) { 1705 1756 if (new_ctx->res_ctx.pipe_ctx[i].stream == stream && 1706 1757 !new_ctx->res_ctx.pipe_ctx[i].top_pipe) { 1707 - struct pipe_ctx *odm_pipe = 1708 - dc_res_get_odm_bottom_pipe(&new_ctx->res_ctx.pipe_ctx[i]); 1709 - 1710 1758 del_pipe = &new_ctx->res_ctx.pipe_ctx[i]; 1711 1759 1712 1760 ASSERT(del_pipe->stream_res.stream_enc); ··· 1728 1782 dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream); 1729 1783 1730 1784 memset(del_pipe, 0, sizeof(*del_pipe)); 1731 - if (odm_pipe) 1732 - memset(odm_pipe, 0, sizeof(*odm_pipe)); 1733 1785 1734 1786 break; 1735 1787 } ··· 2441 2497 2442 2498 if (cur_pipe->bottom_pipe) 2443 2499 cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; 2500 + 2501 + if (cur_pipe->next_odm_pipe) 2502 + cur_pipe->next_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; 2503 + 2504 + if (cur_pipe->prev_odm_pipe) 2505 + cur_pipe->prev_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; 2444 2506 } 2445 2507 2446 2508 for (i = 0; i < dst_ctx->stream_count; i++) {
+4 -3
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
··· 1341 1341 struct drr_params params = {0}; 1342 1342 unsigned int event_triggers = 0; 1343 1343 #if defined(CONFIG_DRM_AMD_DC_DCN2_0) 1344 - struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx); 1344 + struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 1345 1345 #endif 1346 1346 1347 1347 if (dc->hwss.disable_stream_gating) { ··· 1409 1409 &stream->bit_depth_params, 1410 1410 &stream->clamping); 1411 1411 #if defined(CONFIG_DRM_AMD_DC_DCN2_0) 1412 - if (odm_pipe) { 1412 + while (odm_pipe) { 1413 1413 odm_pipe->stream_res.opp->funcs->opp_set_dyn_expansion( 1414 1414 odm_pipe->stream_res.opp, 1415 1415 COLOR_SPACE_YCBCR601, ··· 1420 1420 odm_pipe->stream_res.opp, 1421 1421 &stream->bit_depth_params, 1422 1422 &stream->clamping); 1423 + odm_pipe = odm_pipe->next_odm_pipe; 1423 1424 } 1424 1425 #endif 1425 1426 ··· 2080 2079 if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) 2081 2080 continue; 2082 2081 2083 - if (pipe_ctx->top_pipe) 2082 + if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe) 2084 2083 continue; 2085 2084 2086 2085 status = apply_single_controller_ctx_to_hw(
+1 -1
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
··· 2537 2537 if (pipe_ctx->stream != stream) 2538 2538 continue; 2539 2539 2540 - if (!pipe_ctx->top_pipe) 2540 + if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) 2541 2541 return pipe_ctx; 2542 2542 } 2543 2543 return NULL;
+54 -36
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
··· 529 529 struct dc_stream_state *stream = pipe_ctx->stream; 530 530 struct drr_params params = {0}; 531 531 unsigned int event_triggers = 0; 532 - 533 - 534 - #if defined(CONFIG_DRM_AMD_DC_DCN2_0) 535 - struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx); 536 - #endif 532 + struct pipe_ctx *odm_pipe; 533 + int opp_cnt = 1; 534 + int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst }; 537 535 538 536 /* by upper caller loop, pipe0 is parent pipe and be called first. 539 537 * back end is set up by for pipe0. Other children pipe share back end ··· 542 544 543 545 /* TODO check if timing_changed, disable stream if timing changed */ 544 546 545 - if (odm_pipe) { 546 - int opp_inst[2] = { pipe_ctx->stream_res.opp->inst, odm_pipe->stream_res.opp->inst }; 547 + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 548 + opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst; 549 + opp_cnt++; 550 + } 547 551 552 + if (opp_cnt > 1) 548 553 pipe_ctx->stream_res.tg->funcs->set_odm_combine( 549 554 pipe_ctx->stream_res.tg, 550 - opp_inst, 2, 555 + opp_inst, opp_cnt, 551 556 &pipe_ctx->stream->timing); 552 - } 557 + 553 558 /* HW program guide assume display already disable 554 559 * by unplug sequence. OTG assume stop. 555 560 */ ··· 576 575 pipe_ctx->stream->signal, 577 576 true); 578 577 579 - if (odm_pipe) 578 + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 580 579 odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( 581 580 odm_pipe->stream_res.opp, 582 581 true); ··· 662 661 */ 663 662 if (mpc->funcs->power_on_mpc_mem_pwr) 664 663 mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true); 665 - if ((pipe_ctx->top_pipe == NULL || dc_res_is_odm_head_pipe(pipe_ctx)) 664 + if (pipe_ctx->top_pipe == NULL 666 665 && mpc->funcs->set_output_gamma && stream->out_transfer_func) { 667 666 if (stream->out_transfer_func->type == TF_TYPE_HWPWL) 668 667 params = &stream->out_transfer_func->pwl; ··· 824 823 825 824 static void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 826 825 { 827 - struct pipe_ctx *combine_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx); 826 + struct pipe_ctx *odm_pipe; 827 + int opp_cnt = 1; 828 + int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst }; 828 829 829 - if (combine_pipe) { 830 - int opp_inst[2] = { pipe_ctx->stream_res.opp->inst, 831 - combine_pipe->stream_res.opp->inst }; 830 + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 831 + opp_inst[opp_cnt] = odm_pipe->stream_res.opp->inst; 832 + opp_cnt++; 833 + } 832 834 835 + if (opp_cnt > 1) 833 836 pipe_ctx->stream_res.tg->funcs->set_odm_combine( 834 837 pipe_ctx->stream_res.tg, 835 - opp_inst, 2, 838 + opp_inst, opp_cnt, 836 839 &pipe_ctx->stream->timing); 837 - } else 840 + else 838 841 pipe_ctx->stream_res.tg->funcs->set_odm_bypass( 839 842 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 840 843 } ··· 853 848 struct dc_stream_state *stream = pipe_ctx->stream; 854 849 enum dc_color_space color_space = stream->output_color_space; 855 850 enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR; 856 - struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx); 851 + struct pipe_ctx *odm_pipe; 852 + int odm_cnt = 1; 857 853 858 854 int width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; 859 855 int height = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; ··· 862 856 /* get opp dpg blank color */ 863 857 color_space_to_black_color(dc, color_space, &black_color); 864 858 865 - if (bot_odm_pipe) 866 - width = width / 2; 859 + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 860 + odm_cnt++; 861 + 862 + width = width / odm_cnt; 867 863 868 864 if (blank) { 869 865 if (stream_res->abm) ··· 885 877 width, 886 878 height); 887 879 888 - if (bot_odm_pipe) { 889 - bot_odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator( 890 - bot_odm_pipe->stream_res.opp, 880 + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 881 + odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator( 882 + odm_pipe->stream_res.opp, 891 883 dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE ? 892 884 CONTROLLER_DP_TEST_PATTERN_COLORRAMP : test_pattern, 893 885 stream->timing.display_color_depth, ··· 1029 1021 struct pipe_ctx *pipe_ctx, 1030 1022 struct dc_state *context) 1031 1023 { 1032 - if (pipe_ctx->top_pipe == NULL) { 1024 + if (pipe_ctx->top_pipe == NULL && !pipe_ctx->prev_odm_pipe) { 1033 1025 bool blank = !is_pipe_tree_visible(pipe_ctx); 1034 1026 1035 1027 pipe_ctx->stream_res.tg->funcs->program_global_sync( ··· 1320 1312 1321 1313 pipe_ctx->stream_res.tg->funcs->set_vtg_params( 1322 1314 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); 1323 - 1324 - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); 1315 + if (pipe_ctx->prev_odm_pipe == NULL) 1316 + dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); 1325 1317 } 1326 1318 1327 1319 pipe_ctx->plane_res.hubp->funcs->hubp_setup( ··· 1411 1403 { 1412 1404 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 1413 1405 struct dce_hwseq *hws = dc->hwseq; 1414 - struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx); 1415 1406 1416 1407 if (pipe_ctx->stream_res.dsc) { 1408 + struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 1409 + 1417 1410 dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, true); 1418 - if (bot_odm_pipe) 1419 - dcn20_dsc_pg_control(hws, bot_odm_pipe->stream_res.dsc->inst, true); 1411 + while (odm_pipe) { 1412 + dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, true); 1413 + odm_pipe = odm_pipe->next_odm_pipe; 1414 + } 1420 1415 } 1421 1416 #endif 1422 1417 } ··· 1428 1417 { 1429 1418 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 1430 1419 struct dce_hwseq *hws = dc->hwseq; 1431 - struct pipe_ctx *bot_odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx); 1432 1420 1433 1421 if (pipe_ctx->stream_res.dsc) { 1422 + struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 1423 + 1434 1424 dcn20_dsc_pg_control(hws, pipe_ctx->stream_res.dsc->inst, false); 1435 - if (bot_odm_pipe) 1436 - dcn20_dsc_pg_control(hws, bot_odm_pipe->stream_res.dsc->inst, false); 1425 + while (odm_pipe) { 1426 + dcn20_dsc_pg_control(hws, odm_pipe->stream_res.dsc->inst, false); 1427 + odm_pipe = odm_pipe->next_odm_pipe; 1428 + } 1437 1429 } 1438 1430 #endif 1439 1431 } ··· 1566 1552 struct encoder_unblank_param params = { { 0 } }; 1567 1553 struct dc_stream_state *stream = pipe_ctx->stream; 1568 1554 struct dc_link *link = stream->link; 1569 - params.odm = dc_res_get_odm_bottom_pipe(pipe_ctx); 1555 + struct pipe_ctx *odm_pipe; 1570 1556 1557 + params.opp_cnt = 1; 1558 + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { 1559 + params.opp_cnt++; 1560 + } 1571 1561 /* only 3 items below are used by unblank */ 1572 1562 params.timing = pipe_ctx->stream->timing; 1573 1563 1574 1564 params.link_settings.link_rate = link_settings->link_rate; 1575 1565 1576 1566 if (dc_is_dp_signal(pipe_ctx->stream->signal)) { 1577 - if (optc1_is_two_pixels_per_containter(&stream->timing) || params.odm) 1567 + if (optc1_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt) 1578 1568 params.timing.pix_clk_100hz /= 2; 1579 1569 pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine( 1580 - pipe_ctx->stream_res.stream_enc, params.odm); 1570 + pipe_ctx->stream_res.stream_enc, params.opp_cnt); 1581 1571 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params); 1582 1572 } 1583 1573 ··· 1672 1654 if (!pipe_ctx_old->stream) 1673 1655 continue; 1674 1656 1675 - if (pipe_ctx_old->top_pipe) 1657 + if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe) 1676 1658 continue; 1677 1659 1678 1660 if (!pipe_ctx->stream ||
+158 -86
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
··· 1319 1319 struct pixel_clk_params *pixel_clk_params) 1320 1320 { 1321 1321 const struct dc_stream_state *stream = pipe_ctx->stream; 1322 - bool odm_combine = dc_res_get_odm_bottom_pipe(pipe_ctx) != NULL; 1322 + struct pipe_ctx *odm_pipe; 1323 + int opp_cnt = 1; 1324 + 1325 + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 1326 + opp_cnt++; 1323 1327 1324 1328 pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; 1325 1329 pixel_clk_params->encoder_object_id = stream->link->link_enc->id; ··· 1341 1337 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) 1342 1338 pixel_clk_params->color_depth = COLOR_DEPTH_888; 1343 1339 1344 - if (optc1_is_two_pixels_per_containter(&stream->timing) || odm_combine) 1340 + if (opp_cnt == 4) 1341 + pixel_clk_params->requested_pix_clk_100hz /= 4; 1342 + else if (optc1_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2) 1345 1343 pixel_clk_params->requested_pix_clk_100hz /= 2; 1346 1344 1347 1345 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) ··· 1487 1481 for (i = 0; i < MAX_PIPES; i++) { 1488 1482 if (new_ctx->res_ctx.pipe_ctx[i].stream == dc_stream && !new_ctx->res_ctx.pipe_ctx[i].top_pipe) { 1489 1483 pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; 1490 - break; 1484 + 1485 + if (pipe_ctx->stream_res.dsc) 1486 + release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc); 1491 1487 } 1492 1488 } 1493 1489 1494 1490 if (!pipe_ctx) 1495 1491 return DC_ERROR_UNEXPECTED; 1496 - 1497 - if (pipe_ctx->stream_res.dsc) { 1498 - struct pipe_ctx *odm_pipe = dc_res_get_odm_bottom_pipe(pipe_ctx); 1499 - 1500 - release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc); 1501 - if (odm_pipe) 1502 - release_dsc(&new_ctx->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); 1503 - } 1504 - 1505 - return DC_OK; 1492 + else 1493 + return DC_OK; 1506 1494 } 1507 1495 #endif 1508 1496 ··· 1595 1595 } 1596 1596 } 1597 1597 1598 - static bool dcn20_split_stream_for_combine( 1598 + static bool dcn20_split_stream_for_odm( 1599 + struct resource_context *res_ctx, 1600 + const struct resource_pool *pool, 1601 + struct pipe_ctx *prev_odm_pipe, 1602 + struct pipe_ctx *next_odm_pipe) 1603 + { 1604 + int pipe_idx = next_odm_pipe->pipe_idx; 1605 + struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data; 1606 + struct pipe_ctx *sec_next_pipe = next_odm_pipe->next_odm_pipe; 1607 + int new_width; 1608 + 1609 + *next_odm_pipe = *prev_odm_pipe; 1610 + next_odm_pipe->next_odm_pipe = sec_next_pipe; 1611 + 1612 + next_odm_pipe->pipe_idx = pipe_idx; 1613 + next_odm_pipe->plane_res.mi = pool->mis[next_odm_pipe->pipe_idx]; 1614 + next_odm_pipe->plane_res.hubp = pool->hubps[next_odm_pipe->pipe_idx]; 1615 + next_odm_pipe->plane_res.ipp = pool->ipps[next_odm_pipe->pipe_idx]; 1616 + next_odm_pipe->plane_res.xfm = pool->transforms[next_odm_pipe->pipe_idx]; 1617 + next_odm_pipe->plane_res.dpp = pool->dpps[next_odm_pipe->pipe_idx]; 1618 + next_odm_pipe->plane_res.mpcc_inst = pool->dpps[next_odm_pipe->pipe_idx]->inst; 1619 + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 1620 + next_odm_pipe->stream_res.dsc = NULL; 1621 + #endif 1622 + if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) { 1623 + ASSERT(!next_odm_pipe->next_odm_pipe); 1624 + next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe; 1625 + next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe; 1626 + } 1627 + prev_odm_pipe->next_odm_pipe = next_odm_pipe; 1628 + next_odm_pipe->prev_odm_pipe = prev_odm_pipe; 1629 + ASSERT(next_odm_pipe->top_pipe == NULL); 1630 + 1631 + if (prev_odm_pipe->plane_state) { 1632 + /* HACTIVE halved for odm combine */ 1633 + sd->h_active /= 2; 1634 + /* Copy scl_data to secondary pipe */ 1635 + next_odm_pipe->plane_res.scl_data = *sd; 1636 + 1637 + /* Calculate new vp and recout for left pipe */ 1638 + /* Need at least 16 pixels width per side */ 1639 + if (sd->recout.x + 16 >= sd->h_active) 1640 + return false; 1641 + new_width = sd->h_active - sd->recout.x; 1642 + sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int( 1643 + sd->ratios.horz, sd->recout.width - new_width)); 1644 + sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int( 1645 + sd->ratios.horz_c, sd->recout.width - new_width)); 1646 + sd->recout.width = new_width; 1647 + 1648 + /* Calculate new vp and recout for right pipe */ 1649 + sd = &next_odm_pipe->plane_res.scl_data; 1650 + new_width = sd->recout.width + sd->recout.x - sd->h_active; 1651 + /* Need at least 16 pixels width per side */ 1652 + if (new_width <= 16) 1653 + return false; 1654 + sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int( 1655 + sd->ratios.horz, sd->recout.width - new_width)); 1656 + sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int( 1657 + sd->ratios.horz_c, sd->recout.width - new_width)); 1658 + sd->recout.width = new_width; 1659 + sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int( 1660 + sd->ratios.horz, sd->h_active - sd->recout.x)); 1661 + sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int( 1662 + sd->ratios.horz_c, sd->h_active - sd->recout.x)); 1663 + sd->recout.x = 0; 1664 + } 1665 + next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx]; 1666 + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 1667 + if (next_odm_pipe->stream->timing.flags.DSC == 1) { 1668 + acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc); 1669 + ASSERT(next_odm_pipe->stream_res.dsc); 1670 + if (next_odm_pipe->stream_res.dsc == NULL) 1671 + return false; 1672 + } 1673 + #endif 1674 + 1675 + return true; 1676 + } 1677 + 1678 + static void dcn20_split_stream_for_mpc( 1599 1679 struct resource_context *res_ctx, 1600 1680 const struct resource_pool *pool, 1601 1681 struct pipe_ctx *primary_pipe, 1602 - struct pipe_ctx *secondary_pipe, 1603 - bool is_odm_combine) 1682 + struct pipe_ctx *secondary_pipe) 1604 1683 { 1605 1684 int pipe_idx = secondary_pipe->pipe_idx; 1606 - struct scaler_data *sd = &primary_pipe->plane_res.scl_data; 1607 1685 struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe; 1608 - int new_width; 1609 1686 1610 1687 *secondary_pipe = *primary_pipe; 1611 1688 secondary_pipe->bottom_pipe = sec_bot_pipe; ··· 1705 1628 primary_pipe->bottom_pipe = secondary_pipe; 1706 1629 secondary_pipe->top_pipe = primary_pipe; 1707 1630 1708 - if (is_odm_combine) { 1709 - if (primary_pipe->plane_state) { 1710 - /* HACTIVE halved for odm combine */ 1711 - sd->h_active /= 2; 1712 - /* Copy scl_data to secondary pipe */ 1713 - secondary_pipe->plane_res.scl_data = *sd; 1714 - 1715 - /* Calculate new vp and recout for left pipe */ 1716 - /* Need at least 16 pixels width per side */ 1717 - if (sd->recout.x + 16 >= sd->h_active) 1718 - return false; 1719 - new_width = sd->h_active - sd->recout.x; 1720 - sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int( 1721 - sd->ratios.horz, sd->recout.width - new_width)); 1722 - sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int( 1723 - sd->ratios.horz_c, sd->recout.width - new_width)); 1724 - sd->recout.width = new_width; 1725 - 1726 - /* Calculate new vp and recout for right pipe */ 1727 - sd = &secondary_pipe->plane_res.scl_data; 1728 - new_width = sd->recout.width + sd->recout.x - sd->h_active; 1729 - /* Need at least 16 pixels width per side */ 1730 - if (new_width <= 16) 1731 - return false; 1732 - sd->viewport.width -= dc_fixpt_floor(dc_fixpt_mul_int( 1733 - sd->ratios.horz, sd->recout.width - new_width)); 1734 - sd->viewport_c.width -= dc_fixpt_floor(dc_fixpt_mul_int( 1735 - sd->ratios.horz_c, sd->recout.width - new_width)); 1736 - sd->recout.width = new_width; 1737 - sd->viewport.x += dc_fixpt_floor(dc_fixpt_mul_int( 1738 - sd->ratios.horz, sd->h_active - sd->recout.x)); 1739 - sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int( 1740 - sd->ratios.horz_c, sd->h_active - sd->recout.x)); 1741 - sd->recout.x = 0; 1742 - } 1743 - secondary_pipe->stream_res.opp = pool->opps[secondary_pipe->pipe_idx]; 1744 - #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 1745 - if (secondary_pipe->stream->timing.flags.DSC == 1) { 1746 - acquire_dsc(res_ctx, pool, &secondary_pipe->stream_res.dsc); 1747 - ASSERT(secondary_pipe->stream_res.dsc); 1748 - if (secondary_pipe->stream_res.dsc == NULL) 1749 - return false; 1750 - } 1751 - #endif 1752 - } else { 1753 - ASSERT(primary_pipe->plane_state); 1754 - resource_build_scaling_params(primary_pipe); 1755 - resource_build_scaling_params(secondary_pipe); 1756 - } 1757 - 1758 - return true; 1631 + ASSERT(primary_pipe->plane_state); 1632 + resource_build_scaling_params(primary_pipe); 1633 + resource_build_scaling_params(secondary_pipe); 1759 1634 } 1760 1635 1761 1636 void dcn20_populate_dml_writeback_from_context( ··· 2137 2108 struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; 2138 2109 struct dc_stream_state *stream = pipe_ctx->stream; 2139 2110 struct dsc_config dsc_cfg; 2111 + struct pipe_ctx *odm_pipe; 2112 + int opp_cnt = 1; 2113 + 2114 + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 2115 + opp_cnt++; 2140 2116 2141 2117 /* Only need to validate top pipe */ 2142 - if (pipe_ctx->top_pipe || !stream || !stream->timing.flags.DSC) 2118 + if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC) 2143 2119 continue; 2144 2120 2145 - dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left 2146 - + stream->timing.h_border_right; 2121 + dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left 2122 + + stream->timing.h_border_right) / opp_cnt; 2147 2123 dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top 2148 2124 + stream->timing.v_border_bottom; 2149 - if (dc_res_get_odm_bottom_pipe(pipe_ctx)) 2150 - dsc_cfg.pic_width /= 2; 2151 2125 dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; 2152 2126 dsc_cfg.color_depth = stream->timing.display_color_depth; 2153 2127 dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; 2128 + dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; 2154 2129 2155 2130 if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg)) 2156 2131 return false; ··· 2178 2145 * if this primary pipe has a bottom pipe in prev. state 2179 2146 * and if the bottom pipe is still available (which it should be), 2180 2147 * pick that pipe as secondary 2148 + * Same logic applies for ODM pipes. Since mpo is not allowed with odm 2149 + * check in else case. 2181 2150 */ 2182 2151 if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) { 2183 2152 preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx; 2153 + if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { 2154 + secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; 2155 + secondary_pipe->pipe_idx = preferred_pipe_idx; 2156 + } 2157 + } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) { 2158 + preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx; 2184 2159 if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { 2185 2160 secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; 2186 2161 secondary_pipe->pipe_idx = preferred_pipe_idx; ··· 2263 2222 if (!pipes) 2264 2223 return false; 2265 2224 2225 + /* merge previously split odm pipes since mode support needs to make the decision */ 2226 + for (i = 0; i < dc->res_pool->pipe_count; i++) { 2227 + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2228 + struct pipe_ctx *odm_pipe = pipe->next_odm_pipe; 2229 + 2230 + if (pipe->prev_odm_pipe) 2231 + continue; 2232 + 2233 + pipe->next_odm_pipe = NULL; 2234 + while (odm_pipe) { 2235 + struct pipe_ctx *next_odm_pipe = odm_pipe->next_odm_pipe; 2236 + 2237 + odm_pipe->plane_state = NULL; 2238 + odm_pipe->stream = NULL; 2239 + odm_pipe->top_pipe = NULL; 2240 + odm_pipe->bottom_pipe = NULL; 2241 + odm_pipe->prev_odm_pipe = NULL; 2242 + odm_pipe->next_odm_pipe = NULL; 2243 + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 2244 + if (odm_pipe->stream_res.dsc) 2245 + release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc); 2246 + #endif 2247 + /* Clear plane_res and stream_res */ 2248 + memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res)); 2249 + memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res)); 2250 + odm_pipe = next_odm_pipe; 2251 + } 2252 + if (pipe->plane_state) 2253 + resource_build_scaling_params(pipe); 2254 + } 2255 + 2256 + /* merge previously mpc split pipes since mode support needs to make the decision */ 2266 2257 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2267 2258 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 2268 2259 struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; ··· 2302 2229 if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) 2303 2230 continue; 2304 2231 2305 - /* merge previously split pipe since mode support needs to make the decision */ 2306 2232 pipe->bottom_pipe = hsplit_pipe->bottom_pipe; 2307 2233 if (hsplit_pipe->bottom_pipe) 2308 2234 hsplit_pipe->bottom_pipe->top_pipe = pipe; ··· 2309 2237 hsplit_pipe->stream = NULL; 2310 2238 hsplit_pipe->top_pipe = NULL; 2311 2239 hsplit_pipe->bottom_pipe = NULL; 2312 - #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT 2313 - if (hsplit_pipe->stream_res.dsc && hsplit_pipe->stream_res.dsc != pipe->stream_res.dsc) 2314 - release_dsc(&context->res_ctx, dc->res_pool, &hsplit_pipe->stream_res.dsc); 2315 - #endif 2240 + 2316 2241 /* Clear plane_res and stream_res */ 2317 2242 memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res)); 2318 2243 memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res)); ··· 2422 2353 if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { 2423 2354 hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); 2424 2355 ASSERT(hsplit_pipe); 2425 - if (!dcn20_split_stream_for_combine( 2356 + if (!dcn20_split_stream_for_odm( 2426 2357 &context->res_ctx, dc->res_pool, 2427 - pipe, hsplit_pipe, 2428 - true)) 2358 + pipe, hsplit_pipe)) 2429 2359 goto validate_fail; 2430 2360 pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; 2431 2361 dcn20_build_mapped_resource(dc, context, pipe->stream); ··· 2465 2397 if (!hsplit_pipe) 2466 2398 continue; 2467 2399 2468 - if (!dcn20_split_stream_for_combine( 2400 + if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { 2401 + if (!dcn20_split_stream_for_odm( 2402 + &context->res_ctx, dc->res_pool, 2403 + pipe, hsplit_pipe)) 2404 + goto validate_fail; 2405 + } else 2406 + dcn20_split_stream_for_mpc( 2469 2407 &context->res_ctx, dc->res_pool, 2470 - pipe, hsplit_pipe, 2471 - context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])) 2472 - goto validate_fail; 2408 + pipe, hsplit_pipe); 2473 2409 pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx; 2474 2410 } 2475 2411 } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
+1 -1
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
··· 460 460 uint64_t m_vid_l = n_vid; 461 461 462 462 /* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */ 463 - if (is_two_pixels_per_containter(&param->timing) || param->odm) { 463 + if (is_two_pixels_per_containter(&param->timing) || param->opp_cnt) { 464 464 /*this logic should be the same in get_pixel_clock_parameters() */ 465 465 n_multiply = 1; 466 466 }
+2
drivers/gpu/drm/amd/display/dc/inc/core_types.h
··· 297 297 298 298 struct pipe_ctx *top_pipe; 299 299 struct pipe_ctx *bottom_pipe; 300 + struct pipe_ctx *next_odm_pipe; 301 + struct pipe_ctx *prev_odm_pipe; 300 302 301 303 #ifdef CONFIG_DRM_AMD_DC_DCN1_0 302 304 struct _vcs_dpi_display_dlg_regs_st dlg_regs;
+1 -1
drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
··· 91 91 struct dc_link_settings link_settings; 92 92 struct dc_crtc_timing timing; 93 93 #ifdef CONFIG_DRM_AMD_DC_DCN2_0 94 - bool odm; 94 + int opp_cnt; 95 95 #endif 96 96 }; 97 97
-3
drivers/gpu/drm/amd/display/dc/inc/resource.h
··· 179 179 180 180 unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format); 181 181 182 - struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx); 183 - bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx); 184 - 185 182 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */