Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'topic/amdgpu-dp2.0-mst-2021-10-27' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

UAPI Changes:
Nope!

Cross-subsystem Changes:
drm_dp_update_payload_part1() takes a new argument for specifying what the
VCPI slot start is

Core Changes:
Make the DP MST helpers aware of the current starting VCPI slot/VCPI total
slot count...

Driver Changes:
...and then add support for taking advantage of this for 128b/132b links on DP
2.0 for amdgpu

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Lyude Paul <lyude@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/bf8e724cc0c8803d58a8d730fd6883c991376a76.camel@redhat.com

+425 -16
+29
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 10669 10669 struct dm_crtc_state *dm_old_crtc_state; 10670 10670 #if defined(CONFIG_DRM_AMD_DC_DCN) 10671 10671 struct dsc_mst_fairness_vars vars[MAX_PIPES]; 10672 + struct drm_dp_mst_topology_state *mst_state; 10673 + struct drm_dp_mst_topology_mgr *mgr; 10672 10674 #endif 10673 10675 10674 10676 trace_amdgpu_dm_atomic_check_begin(state); ··· 10875 10873 lock_and_validation_needed = true; 10876 10874 } 10877 10875 10876 + #if defined(CONFIG_DRM_AMD_DC_DCN) 10877 + /* set the slot info for each mst_state based on the link encoding format */ 10878 + for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 10879 + struct amdgpu_dm_connector *aconnector; 10880 + struct drm_connector *connector; 10881 + struct drm_connector_list_iter iter; 10882 + u8 link_coding_cap; 10883 + 10884 + if (!mgr->mst_state ) 10885 + continue; 10886 + 10887 + drm_connector_list_iter_begin(dev, &iter); 10888 + drm_for_each_connector_iter(connector, &iter) { 10889 + int id = connector->index; 10890 + 10891 + if (id == mst_state->mgr->conn_base_id) { 10892 + aconnector = to_amdgpu_dm_connector(connector); 10893 + link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 10894 + drm_dp_mst_update_slots(mst_state, link_coding_cap); 10895 + 10896 + break; 10897 + } 10898 + } 10899 + drm_connector_list_iter_end(&iter); 10900 + 10901 + } 10902 + #endif 10878 10903 /** 10879 10904 * Streams and planes are reset when there are changes that affect 10880 10905 * bandwidth. Anything that affects bandwidth needs to go through
+3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
··· 294 294 case LINK_RATE_RBR2: 295 295 case LINK_RATE_HIGH2: 296 296 case LINK_RATE_HIGH3: 297 + #if defined(CONFIG_DRM_AMD_DC_DCN) 298 + case LINK_RATE_UHBR10: 299 + #endif 297 300 break; 298 301 default: 299 302 valid_input = false;
+6 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
··· 219 219 struct drm_dp_mst_topology_mgr *mst_mgr; 220 220 struct drm_dp_mst_port *mst_port; 221 221 bool ret; 222 + u8 link_coding_cap = DP_8b_10b_ENCODING; 222 223 223 224 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 224 225 /* Accessing the connector state is required for vcpi_slots allocation ··· 239 238 240 239 mst_port = aconnector->port; 241 240 241 + #if defined(CONFIG_DRM_AMD_DC_DCN) 242 + link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 243 + #endif 244 + 242 245 if (enable) { 243 246 244 247 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, ··· 256 251 } 257 252 258 253 /* It's OK for this to fail */ 259 - drm_dp_update_payload_part1(mst_mgr); 254 + drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1); 260 255 261 256 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or 262 257 * AUX message. The sequence is slot 1-63 allocated sequence for each
+14
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 2354 2354 if (stream_update->dsc_config) 2355 2355 su_flags->bits.dsc_changed = 1; 2356 2356 2357 + #if defined(CONFIG_DRM_AMD_DC_DCN) 2358 + if (stream_update->mst_bw_update) 2359 + su_flags->bits.mst_bw = 1; 2360 + #endif 2361 + 2357 2362 if (su_flags->raw != 0) 2358 2363 overall_type = UPDATE_TYPE_FULL; 2359 2364 ··· 2735 2730 2736 2731 if (stream_update->dsc_config) 2737 2732 dp_update_dsc_config(pipe_ctx); 2733 + 2734 + #if defined(CONFIG_DRM_AMD_DC_DCN) 2735 + if (stream_update->mst_bw_update) { 2736 + if (stream_update->mst_bw_update->is_increase) 2737 + dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); 2738 + else 2739 + dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); 2740 + } 2741 + #endif 2738 2742 2739 2743 if (stream_update->pending_test_pattern) { 2740 2744 dc_link_dp_set_test_pattern(stream->link,
+292
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 3232 3232 static void update_mst_stream_alloc_table( 3233 3233 struct dc_link *link, 3234 3234 struct stream_encoder *stream_enc, 3235 + #if defined(CONFIG_DRM_AMD_DC_DCN) 3236 + struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc? 3237 + #endif 3235 3238 const struct dp_mst_stream_allocation_table *proposed_table) 3236 3239 { 3237 3240 struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 }; ··· 3270 3267 work_table[i].slot_count = 3271 3268 proposed_table->stream_allocations[i].slot_count; 3272 3269 work_table[i].stream_enc = stream_enc; 3270 + #if defined(CONFIG_DRM_AMD_DC_DCN) 3271 + work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc; 3272 + #endif 3273 3273 } 3274 3274 } 3275 3275 ··· 3395 3389 struct dc_link *link = stream->link; 3396 3390 struct link_encoder *link_encoder = NULL; 3397 3391 struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; 3392 + #if defined(CONFIG_DRM_AMD_DC_DCN) 3393 + struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; 3394 + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; 3395 + #endif 3398 3396 struct dp_mst_stream_allocation_table proposed_table = {0}; 3399 3397 struct fixed31_32 avg_time_slots_per_mtp; 3400 3398 struct fixed31_32 pbn; ··· 3426 3416 &proposed_table, 3427 3417 true)) { 3428 3418 update_mst_stream_alloc_table( 3419 + #if defined(CONFIG_DRM_AMD_DC_DCN) 3420 + link, 3421 + pipe_ctx->stream_res.stream_enc, 3422 + pipe_ctx->stream_res.hpo_dp_stream_enc, 3423 + &proposed_table); 3424 + #else 3429 3425 link, pipe_ctx->stream_res.stream_enc, &proposed_table); 3426 + #endif 3430 3427 } 3431 3428 else 3432 3429 DC_LOG_WARNING("Failed to update" ··· 3447 3430 link->mst_stream_alloc_table.stream_count); 3448 3431 3449 3432 for (i = 0; i < MAX_CONTROLLER_NUM; i++) { 3433 + #if defined(CONFIG_DRM_AMD_DC_DCN) 3434 + DC_LOG_MST("stream_enc[%d]: %p " 3435 + "stream[%d].hpo_dp_stream_enc: %p " 3436 + "stream[%d].vcp_id: %d " 3437 + "stream[%d].slot_count: %d\n", 3438 + i, 3439 + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, 3440 + i, 3441 + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, 3442 + i, 3443 + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, 3444 + i, 3445 + link->mst_stream_alloc_table.stream_allocations[i].slot_count); 3446 + #else 3450 3447 DC_LOG_MST("stream_enc[%d]: %p " 3451 3448 "stream[%d].vcp_id: %d " 3452 3449 "stream[%d].slot_count: %d\n", ··· 3470 3439 link->mst_stream_alloc_table.stream_allocations[i].vcp_id, 3471 3440 i, 3472 3441 link->mst_stream_alloc_table.stream_allocations[i].slot_count); 3442 + #endif 3473 3443 } 3474 3444 3475 3445 ASSERT(proposed_table.stream_count > 0); 3476 3446 3477 3447 /* program DP source TX for payload */ 3448 + #if defined(CONFIG_DRM_AMD_DC_DCN) 3449 + switch (dp_get_link_encoding_format(&link->cur_link_settings)) { 3450 + case DP_8b_10b_ENCODING: 3451 + link_encoder->funcs->update_mst_stream_allocation_table( 3452 + link_encoder, 3453 + &link->mst_stream_alloc_table); 3454 + break; 3455 + case DP_128b_132b_ENCODING: 3456 + hpo_dp_link_encoder->funcs->update_stream_allocation_table( 3457 + hpo_dp_link_encoder, 3458 + &link->mst_stream_alloc_table); 3459 + break; 3460 + case DP_UNKNOWN_ENCODING: 3461 + DC_LOG_ERROR("Failure: unknown encoding format\n"); 3462 + return DC_ERROR_UNEXPECTED; 3463 + } 3464 + #else 3478 3465 link_encoder->funcs->update_mst_stream_allocation_table( 3479 3466 link_encoder, 3480 3467 &link->mst_stream_alloc_table); 3468 + #endif 3481 3469 3482 3470 /* send down message */ 3483 3471 ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( ··· 3519 3469 pbn = get_pbn_from_timing(pipe_ctx); 3520 3470 avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); 3521 3471 3472 + #if defined(CONFIG_DRM_AMD_DC_DCN) 3473 + switch (dp_get_link_encoding_format(&link->cur_link_settings)) { 3474 + case DP_8b_10b_ENCODING: 3475 + stream_encoder->funcs->set_throttled_vcp_size( 3476 + stream_encoder, 3477 + avg_time_slots_per_mtp); 3478 + break; 3479 + case DP_128b_132b_ENCODING: 3480 + hpo_dp_link_encoder->funcs->set_throttled_vcp_size( 3481 + hpo_dp_link_encoder, 3482 + hpo_dp_stream_encoder->inst, 3483 + avg_time_slots_per_mtp); 3484 + break; 3485 + case DP_UNKNOWN_ENCODING: 3486 + DC_LOG_ERROR("Failure: unknown encoding format\n"); 3487 + return DC_ERROR_UNEXPECTED; 3488 + } 3489 + #else 3522 3490 stream_encoder->funcs->set_throttled_vcp_size( 3523 3491 stream_encoder, 3524 3492 avg_time_slots_per_mtp); 3493 + #endif 3525 3494 3526 3495 return DC_OK; 3527 3496 3528 3497 } 3498 + 3499 + #if defined(CONFIG_DRM_AMD_DC_DCN) 3500 + enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) 3501 + { 3502 + struct dc_stream_state *stream = pipe_ctx->stream; 3503 + struct dc_link *link = stream->link; 3504 + struct fixed31_32 avg_time_slots_per_mtp; 3505 + struct fixed31_32 pbn; 3506 + struct fixed31_32 pbn_per_slot; 3507 + struct link_encoder *link_encoder = link->link_enc; 3508 + struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; 3509 + struct dp_mst_stream_allocation_table proposed_table = {0}; 3510 + uint8_t i; 3511 + enum act_return_status ret; 3512 + DC_LOGGER_INIT(link->ctx->logger); 3513 + 3514 + /* decrease throttled vcp size */ 3515 + pbn_per_slot = get_pbn_per_slot(stream); 3516 + pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); 3517 + avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); 3518 + 3519 + stream_encoder->funcs->set_throttled_vcp_size( 3520 + stream_encoder, 3521 + avg_time_slots_per_mtp); 3522 + 3523 + /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ 3524 + dm_helpers_dp_mst_send_payload_allocation( 3525 + stream->ctx, 3526 + stream, 3527 + true); 3528 + 3529 + /* notify immediate branch device table update */ 3530 + if (dm_helpers_dp_mst_write_payload_allocation_table( 3531 + stream->ctx, 3532 + stream, 3533 + &proposed_table, 3534 + true)) { 3535 + /* update mst stream allocation table software state */ 3536 + update_mst_stream_alloc_table( 3537 + link, 3538 + pipe_ctx->stream_res.stream_enc, 3539 + pipe_ctx->stream_res.hpo_dp_stream_enc, 3540 + &proposed_table); 3541 + } else { 3542 + DC_LOG_WARNING("Failed to update" 3543 + "MST allocation table for" 3544 + "pipe idx:%d\n", 3545 + pipe_ctx->pipe_idx); 3546 + } 3547 + 3548 + DC_LOG_MST("%s " 3549 + "stream_count: %d: \n ", 3550 + __func__, 3551 + link->mst_stream_alloc_table.stream_count); 3552 + 3553 + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { 3554 + DC_LOG_MST("stream_enc[%d]: %p " 3555 + "stream[%d].vcp_id: %d " 3556 + "stream[%d].slot_count: %d\n", 3557 + i, 3558 + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, 3559 + i, 3560 + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, 3561 + i, 3562 + link->mst_stream_alloc_table.stream_allocations[i].slot_count); 3563 + } 3564 + 3565 + ASSERT(proposed_table.stream_count > 0); 3566 + 3567 + /* update mst stream allocation table hardware state */ 3568 + link_encoder->funcs->update_mst_stream_allocation_table( 3569 + link_encoder, 3570 + &link->mst_stream_alloc_table); 3571 + 3572 + /* poll for immediate branch device ACT handled */ 3573 + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( 3574 + stream->ctx, 3575 + stream); 3576 + 3577 + return DC_OK; 3578 + } 3579 + 3580 + enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) 3581 + { 3582 + struct dc_stream_state *stream = pipe_ctx->stream; 3583 + struct dc_link *link = stream->link; 3584 + struct fixed31_32 avg_time_slots_per_mtp; 3585 + struct fixed31_32 pbn; 3586 + struct fixed31_32 pbn_per_slot; 3587 + struct link_encoder *link_encoder = link->link_enc; 3588 + struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; 3589 + struct dp_mst_stream_allocation_table proposed_table = {0}; 3590 + uint8_t i; 3591 + enum act_return_status ret; 3592 + DC_LOGGER_INIT(link->ctx->logger); 3593 + 3594 + /* notify immediate branch device table update */ 3595 + if (dm_helpers_dp_mst_write_payload_allocation_table( 3596 + stream->ctx, 3597 + stream, 3598 + &proposed_table, 3599 + true)) { 3600 + /* update mst stream allocation table software state */ 3601 + update_mst_stream_alloc_table( 3602 + link, 3603 + pipe_ctx->stream_res.stream_enc, 3604 + pipe_ctx->stream_res.hpo_dp_stream_enc, 3605 + &proposed_table); 3606 + } 3607 + 3608 + DC_LOG_MST("%s " 3609 + "stream_count: %d: \n ", 3610 + __func__, 3611 + link->mst_stream_alloc_table.stream_count); 3612 + 3613 + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { 3614 + DC_LOG_MST("stream_enc[%d]: %p " 3615 + "stream[%d].vcp_id: %d " 3616 + "stream[%d].slot_count: %d\n", 3617 + i, 3618 + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, 3619 + i, 3620 + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, 3621 + i, 3622 + link->mst_stream_alloc_table.stream_allocations[i].slot_count); 3623 + } 3624 + 3625 + ASSERT(proposed_table.stream_count > 0); 3626 + 3627 + /* update mst stream allocation table hardware state */ 3628 + link_encoder->funcs->update_mst_stream_allocation_table( 3629 + link_encoder, 3630 + &link->mst_stream_alloc_table); 3631 + 3632 + /* poll for immediate branch device ACT handled */ 3633 + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( 3634 + stream->ctx, 3635 + stream); 3636 + 3637 + if (ret != ACT_LINK_LOST) { 3638 + /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ 3639 + dm_helpers_dp_mst_send_payload_allocation( 3640 + stream->ctx, 3641 + stream, 3642 + true); 3643 + } 3644 + 3645 + /* increase throttled vcp size */ 3646 + pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); 3647 + pbn_per_slot = get_pbn_per_slot(stream); 3648 + avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); 3649 + 3650 + stream_encoder->funcs->set_throttled_vcp_size( 3651 + stream_encoder, 3652 + avg_time_slots_per_mtp); 3653 + 3654 + return DC_OK; 3655 + } 3656 + #endif 3529 3657 3530 3658 static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) 3531 3659 { ··· 3711 3483 struct dc_link *link = stream->link; 3712 3484 struct link_encoder *link_encoder = NULL; 3713 3485 struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; 3486 + #if defined(CONFIG_DRM_AMD_DC_DCN) 3487 + struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; 3488 + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; 3489 + #endif 3714 3490 struct dp_mst_stream_allocation_table proposed_table = {0}; 3715 3491 struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); 3716 3492 int i; ··· 3736 3504 */ 3737 3505 3738 3506 /* slot X.Y */ 3507 + #if defined(CONFIG_DRM_AMD_DC_DCN) 3508 + switch (dp_get_link_encoding_format(&link->cur_link_settings)) { 3509 + case DP_8b_10b_ENCODING: 3510 + stream_encoder->funcs->set_throttled_vcp_size( 3511 + stream_encoder, 3512 + avg_time_slots_per_mtp); 3513 + break; 3514 + case DP_128b_132b_ENCODING: 3515 + hpo_dp_link_encoder->funcs->set_throttled_vcp_size( 3516 + hpo_dp_link_encoder, 3517 + hpo_dp_stream_encoder->inst, 3518 + avg_time_slots_per_mtp); 3519 + break; 3520 + case DP_UNKNOWN_ENCODING: 3521 + DC_LOG_ERROR("Failure: unknown encoding format\n"); 3522 + return DC_ERROR_UNEXPECTED; 3523 + } 3524 + #else 3739 3525 stream_encoder->funcs->set_throttled_vcp_size( 3740 3526 stream_encoder, 3741 3527 avg_time_slots_per_mtp); 3528 + #endif 3742 3529 3743 3530 /* TODO: which component is responsible for remove payload table? */ 3744 3531 if (mst_mode) { ··· 3767 3516 &proposed_table, 3768 3517 false)) { 3769 3518 3519 + #if defined(CONFIG_DRM_AMD_DC_DCN) 3520 + update_mst_stream_alloc_table( 3521 + link, 3522 + pipe_ctx->stream_res.stream_enc, 3523 + pipe_ctx->stream_res.hpo_dp_stream_enc, 3524 + &proposed_table); 3525 + #else 3770 3526 update_mst_stream_alloc_table( 3771 3527 link, pipe_ctx->stream_res.stream_enc, &proposed_table); 3528 + #endif 3772 3529 } 3773 3530 else { 3774 3531 DC_LOG_WARNING("Failed to update" ··· 3792 3533 link->mst_stream_alloc_table.stream_count); 3793 3534 3794 3535 for (i = 0; i < MAX_CONTROLLER_NUM; i++) { 3536 + #if defined(CONFIG_DRM_AMD_DC_DCN) 3537 + DC_LOG_MST("stream_enc[%d]: %p " 3538 + "stream[%d].hpo_dp_stream_enc: %p " 3539 + "stream[%d].vcp_id: %d " 3540 + "stream[%d].slot_count: %d\n", 3541 + i, 3542 + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, 3543 + i, 3544 + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, 3545 + i, 3546 + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, 3547 + i, 3548 + link->mst_stream_alloc_table.stream_allocations[i].slot_count); 3549 + #else 3795 3550 DC_LOG_MST("stream_enc[%d]: %p " 3796 3551 "stream[%d].vcp_id: %d " 3797 3552 "stream[%d].slot_count: %d\n", ··· 3815 3542 link->mst_stream_alloc_table.stream_allocations[i].vcp_id, 3816 3543 i, 3817 3544 link->mst_stream_alloc_table.stream_allocations[i].slot_count); 3545 + #endif 3818 3546 } 3819 3547 3548 + #if defined(CONFIG_DRM_AMD_DC_DCN) 3549 + switch (dp_get_link_encoding_format(&link->cur_link_settings)) { 3550 + case DP_8b_10b_ENCODING: 3551 + link_encoder->funcs->update_mst_stream_allocation_table( 3552 + link_encoder, 3553 + &link->mst_stream_alloc_table); 3554 + break; 3555 + case DP_128b_132b_ENCODING: 3556 + hpo_dp_link_encoder->funcs->update_stream_allocation_table( 3557 + hpo_dp_link_encoder, 3558 + &link->mst_stream_alloc_table); 3559 + break; 3560 + case DP_UNKNOWN_ENCODING: 3561 + DC_LOG_ERROR("Failure: unknown encoding format\n"); 3562 + return DC_ERROR_UNEXPECTED; 3563 + } 3564 + #else 3820 3565 link_encoder->funcs->update_mst_stream_allocation_table( 3821 3566 link_encoder, 3822 3567 &link->mst_stream_alloc_table); 3568 + #endif 3823 3569 3824 3570 if (mst_mode) { 3825 3571 dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+7
drivers/gpu/drm/amd/display/dc/dc_link.h
··· 295 295 bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); 296 296 bool dc_link_get_hpd_state(struct dc_link *dc_link); 297 297 enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); 298 + #if defined(CONFIG_DRM_AMD_DC_DCN) 299 + enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); 300 + enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); 301 + #endif 298 302 299 303 /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). 300 304 * Return: ··· 428 424 bool dc_link_is_fec_supported(const struct dc_link *link); 429 425 bool dc_link_should_enable_fec(const struct dc_link *link); 430 426 427 + #if defined(CONFIG_DRM_AMD_DC_DCN) 428 + enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link); 429 + #endif 431 430 #endif /* DC_LINK_H_ */
+13
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 115 115 int lines_offset; 116 116 }; 117 117 118 + #if defined(CONFIG_DRM_AMD_DC_DCN) 119 + struct dc_mst_stream_bw_update { 120 + bool is_increase; // is bandwidth reduced or increased 121 + uint32_t mst_stream_bw; // new mst bandwidth in kbps 122 + }; 123 + #endif 124 + 118 125 union stream_update_flags { 119 126 struct { 120 127 uint32_t scaling:1; ··· 132 125 uint32_t gamut_remap:1; 133 126 uint32_t wb_update:1; 134 127 uint32_t dsc_changed : 1; 128 + #if defined(CONFIG_DRM_AMD_DC_DCN) 129 + uint32_t mst_bw : 1; 130 + #endif 135 131 } bits; 136 132 137 133 uint32_t raw; ··· 288 278 289 279 struct dc_writeback_update *wb_update; 290 280 struct dc_dsc_config *dsc_config; 281 + #if defined(CONFIG_DRM_AMD_DC_DCN) 282 + struct dc_mst_stream_bw_update *mst_bw_update; 283 + #endif 291 284 struct dc_transfer_func *func_shaper; 292 285 struct dc_3dlut *lut3d_func; 293 286
+33 -9
drivers/gpu/drm/drm_dp_mst_topology.c
··· 3355 3355 /** 3356 3356 * drm_dp_update_payload_part1() - Execute payload update part 1 3357 3357 * @mgr: manager to use. 3358 + * @start_slot: this is the cur slot 3359 + * 3360 + * NOTE: start_slot is a temporary workaround for non-atomic drivers, 3361 + * this will be removed when non-atomic mst helpers are moved out of the helper 3358 3362 * 3359 3363 * This iterates over all proposed virtual channels, and tries to 3360 3364 * allocate space in the link for them. For 0->slots transitions, ··· 3369 3365 * after calling this the driver should generate ACT and payload 3370 3366 * packets. 3371 3367 */ 3372 - int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) 3368 + int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot) 3373 3369 { 3374 3370 struct drm_dp_payload req_payload; 3375 3371 struct drm_dp_mst_port *port; 3376 3372 int i, j; 3377 - int cur_slots = 1; 3373 + int cur_slots = start_slot; 3378 3374 bool skip; 3379 3375 3380 3376 mutex_lock(&mgr->payload_lock); ··· 4338 4334 { 4339 4335 int ret; 4340 4336 4341 - /* max. time slots - one slot for MTP header */ 4342 - if (slots > 63) 4343 - return -ENOSPC; 4344 - 4345 4337 vcpi->pbn = pbn; 4346 4338 vcpi->aligned_pbn = slots * mgr->pbn_div; 4347 4339 vcpi->num_slots = slots; ··· 4510 4510 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots); 4511 4511 4512 4512 /** 4513 + * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format 4514 + * @mst_state: mst_state to update 4515 + * @link_encoding_cap: the ecoding format on the link 4516 + */ 4517 + void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap) 4518 + { 4519 + if (link_encoding_cap == DP_CAP_ANSI_128B132B) { 4520 + mst_state->total_avail_slots = 64; 4521 + mst_state->start_slot = 0; 4522 + } else { 4523 + mst_state->total_avail_slots = 63; 4524 + mst_state->start_slot = 1; 4525 + } 4526 + 4527 + DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n", 4528 + (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b", 4529 + mst_state); 4530 + } 4531 + EXPORT_SYMBOL(drm_dp_mst_update_slots); 4532 + 4533 + /** 4513 4534 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel 4514 4535 * @mgr: manager for this port 4515 4536 * @port: port to allocate a virtual channel for. ··· 4561 4540 4562 4541 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots); 4563 4542 if (ret) { 4564 - drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d max=63 ret=%d\n", 4543 + drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d ret=%d\n", 4565 4544 DIV_ROUND_UP(pbn, mgr->pbn_div), ret); 4566 4545 drm_dp_mst_topology_put_port(port); 4567 4546 goto out; ··· 5249 5228 struct drm_dp_mst_topology_state *mst_state) 5250 5229 { 5251 5230 struct drm_dp_vcpi_allocation *vcpi; 5252 - int avail_slots = 63, payload_count = 0; 5231 + int avail_slots = mst_state->total_avail_slots, payload_count = 0; 5253 5232 5254 5233 list_for_each_entry(vcpi, &mst_state->vcpis, next) { 5255 5234 /* Releasing VCPI is always OK-even if the port is gone */ ··· 5278 5257 } 5279 5258 } 5280 5259 drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n", 5281 - mgr, mst_state, avail_slots, 63 - avail_slots); 5260 + mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots); 5282 5261 5283 5262 return 0; 5284 5263 } ··· 5554 5533 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL); 5555 5534 if (mst_state == NULL) 5556 5535 return -ENOMEM; 5536 + 5537 + mst_state->total_avail_slots = 63; 5538 + mst_state->start_slot = 1; 5557 5539 5558 5540 mst_state->mgr = mgr; 5559 5541 INIT_LIST_HEAD(&mst_state->vcpis);
+2 -2
drivers/gpu/drm/i915/display/intel_dp_mst.c
··· 378 378 379 379 drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port); 380 380 381 - ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); 381 + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1); 382 382 if (ret) { 383 383 drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret); 384 384 } ··· 518 518 519 519 intel_dp->active_mst_links++; 520 520 521 - ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); 521 + ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1); 522 522 523 523 /* 524 524 * Before Gen 12 this is not done as part of
+1 -1
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 1414 1414 int ret; 1415 1415 1416 1416 NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name); 1417 - ret = drm_dp_update_payload_part1(&mstm->mgr); 1417 + ret = drm_dp_update_payload_part1(&mstm->mgr, 1); 1418 1418 1419 1419 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { 1420 1420 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
+2 -2
drivers/gpu/drm/radeon/radeon_dp_mst.c
··· 423 423 drm_dp_mst_allocate_vcpi(&radeon_connector->mst_port->mst_mgr, 424 424 radeon_connector->port, 425 425 mst_enc->pbn, slots); 426 - drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr); 426 + drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1); 427 427 428 428 radeon_dp_mst_set_be_cntl(primary, mst_enc, 429 429 radeon_connector->mst_port->hpd.hpd, true); ··· 452 452 return; 453 453 454 454 drm_dp_mst_reset_vcpi_slots(&radeon_connector->mst_port->mst_mgr, mst_enc->port); 455 - drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr); 455 + drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1); 456 456 457 457 drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr); 458 458 /* and this can also fail */
+4 -1
include/drm/drm_dp_mst_helper.h
··· 554 554 struct drm_private_state base; 555 555 struct list_head vcpis; 556 556 struct drm_dp_mst_topology_mgr *mgr; 557 + u8 total_avail_slots; 558 + u8 start_slot; 557 559 }; 558 560 559 561 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) ··· 808 806 809 807 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); 810 808 809 + void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap); 811 810 812 811 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, 813 812 struct drm_dp_mst_port *port); ··· 818 815 int pbn); 819 816 820 817 821 - int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr); 818 + int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot); 822 819 823 820 824 821 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);