Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/display: refactor dmub commands into single function

[Why & How]
Consolidate dmub access to a single interface. This makes it easier to
add code in the future that needs to run every time a dmub command is
requested (e.g. instrumentation, locking etc).

Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
Signed-off-by: Josip Pavic <Josip.Pavic@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Josip Pavic and committed by
Alex Deucher
e97cc04f c4edb013

+148 -283
+11 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 10311 10311 input->cea_total_length = total_length; 10312 10312 memcpy(input->payload, data, length); 10313 10313 10314 - res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); 10314 + res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); 10315 10315 if (!res) { 10316 10316 DRM_ERROR("EDID CEA parser failed\n"); 10317 10317 return false; ··· 10760 10760 } 10761 10761 10762 10762 return false; 10763 + } 10764 + 10765 + bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 10766 + { 10767 + return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); 10768 + } 10769 + 10770 + bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 10771 + { 10772 + return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); 10763 10773 }
+6 -19
drivers/gpu/drm/amd/display/dc/bios/command_table2.c
··· 123 123 sizeof(cmd.digx_encoder_control.header); 124 124 cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig; 125 125 126 - dc_dmub_srv_cmd_queue(dmcub, &cmd); 127 - dc_dmub_srv_cmd_execute(dmcub); 128 - dc_dmub_srv_wait_idle(dmcub); 126 + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 129 127 } 130 128 131 129 static enum bp_result encoder_control_digx_v1_5( ··· 259 261 sizeof(cmd.dig1_transmitter_control.header); 260 262 cmd.dig1_transmitter_control.transmitter_control.dig = *dig; 261 263 262 - dc_dmub_srv_cmd_queue(dmcub, &cmd); 263 - dc_dmub_srv_cmd_execute(dmcub); 264 - dc_dmub_srv_wait_idle(dmcub); 264 + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 265 265 } 266 266 267 267 static enum bp_result transmitter_control_v1_6( ··· 321 325 sizeof(cmd.dig1_transmitter_control.header); 322 326 cmd.dig1_transmitter_control.transmitter_control.dig_v1_7 = *dig; 323 327 324 - dc_dmub_srv_cmd_queue(dmcub, &cmd); 325 - dc_dmub_srv_cmd_execute(dmcub); 326 - dc_dmub_srv_wait_idle(dmcub); 328 + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 327 329 } 328 330 329 331 static enum bp_result transmitter_control_v1_7( ··· 429 435 sizeof(cmd.set_pixel_clock.header); 430 436 cmd.set_pixel_clock.pixel_clock.clk = *clk; 431 437 432 - dc_dmub_srv_cmd_queue(dmcub, &cmd); 433 - dc_dmub_srv_cmd_execute(dmcub); 434 - dc_dmub_srv_wait_idle(dmcub); 438 + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 435 439 } 436 440 437 441 static enum bp_result set_pixel_clock_v7( ··· 796 804 sizeof(cmd.enable_disp_power_gating.header); 797 805 cmd.enable_disp_power_gating.power_gating.pwr = *pwr; 798 806 799 - dc_dmub_srv_cmd_queue(dmcub, &cmd); 800 - dc_dmub_srv_cmd_execute(dmcub); 801 - dc_dmub_srv_wait_idle(dmcub); 807 + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 802 808 } 803 809 804 810 static enum bp_result enable_disp_power_gating_v2_1( ··· 1006 1016 panel_instance; 1007 1017 cmd.lvtma_control.data.bypass_panel_control_wait = 1008 1018 bypass_panel_control_wait; 1009 - dc_dmub_srv_cmd_queue(dmcub, &cmd); 1010 - dc_dmub_srv_cmd_execute(dmcub); 1011 - dc_dmub_srv_wait_idle(dmcub); 1012 - 1019 + dm_execute_dmub_cmd(dmcub->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 1013 1020 } 1014 1021 1015 1022 static enum bp_result enable_lvtma_control(
+1 -3
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
··· 250 250 cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; 251 251 cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; 252 252 253 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 254 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 255 - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 253 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 256 254 } 257 255 258 256 static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
+1 -3
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
··· 286 286 cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; 287 287 cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; 288 288 289 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 290 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 291 - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 289 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 292 290 } 293 291 294 292 static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
+1 -3
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
··· 234 234 cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; 235 235 cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; 236 236 237 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 238 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 239 - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 237 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 240 238 } 241 239 242 240 static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+1 -3
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
··· 254 254 cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz; 255 255 cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz; 256 256 257 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 258 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 259 - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 257 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 260 258 } 261 259 262 260 static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+6 -17
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 515 515 cmd.secure_display.roi_info.y_end = rect->y + rect->height; 516 516 } 517 517 518 - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 519 - dc_dmub_srv_cmd_execute(dmub_srv); 518 + dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 520 519 } 521 520 522 521 static inline void ··· 3308 3309 struct dc_state *context) 3309 3310 { 3310 3311 union dmub_rb_cmd cmd; 3311 - struct dc_context *dc_ctx = dc->ctx; 3312 3312 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect; 3313 3313 unsigned int i, j; 3314 3314 unsigned int panel_inst = 0; ··· 3348 3350 3349 3351 update_dirty_rect->panel_inst = panel_inst; 3350 3352 update_dirty_rect->pipe_idx = j; 3351 - dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd); 3352 - dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv); 3353 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 3353 3354 } 3354 3355 } 3355 3356 } ··· 4603 4606 { 4604 4607 uint8_t action; 4605 4608 union dmub_rb_cmd cmd = {0}; 4606 - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4607 4609 4608 4610 ASSERT(payload->length <= 16); 4609 4611 ··· 4650 4654 ); 4651 4655 } 4652 4656 4653 - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 4654 - dc_dmub_srv_cmd_execute(dmub_srv); 4655 - dc_dmub_srv_wait_idle(dmub_srv); 4657 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 4656 4658 4657 4659 return true; 4658 4660 } ··· 4694 4700 struct dmub_notification *notify) 4695 4701 { 4696 4702 union dmub_rb_cmd cmd = {0}; 4697 - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4698 4703 bool is_cmd_complete = true; 4699 4704 4700 4705 /* prepare SET_CONFIG command */ ··· 4704 4711 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; 4705 4712 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; 4706 4713 4707 - if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) { 4714 + if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) { 4708 4715 /* command is not processed by dmub */ 4709 4716 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; 4710 4717 return is_cmd_complete; ··· 4739 4746 uint8_t *mst_slots_in_use) 4740 4747 { 4741 4748 union dmub_rb_cmd cmd = {0}; 4742 - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4743 4749 4744 4750 /* prepare MST_ALLOC_SLOTS command */ 4745 4751 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; ··· 4747 4755 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; 4748 4756 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; 4749 4757 4750 - if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) 4758 + if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 4751 4759 /* command is not processed by dmub */ 4752 4760 return DC_ERROR_UNEXPECTED; 4753 4761 ··· 4781 4789 uint32_t hpd_int_enable) 4782 4790 { 4783 4791 union dmub_rb_cmd cmd = {0}; 4784 - struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 4785 4792 4786 4793 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE; 4787 4794 cmd.dpia_hpd_int_enable.enable = hpd_int_enable; 4788 4795 4789 - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 4790 - dc_dmub_srv_cmd_execute(dmub_srv); 4791 - dc_dmub_srv_wait_idle(dmub_srv); 4796 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 4792 4797 4793 4798 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable); 4794 4799 }
+62 -102
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
··· 65 65 } 66 66 } 67 67 68 - void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, 69 - union dmub_rb_cmd *cmd) 70 - { 71 - struct dmub_srv *dmub = dc_dmub_srv->dmub; 72 - struct dc_context *dc_ctx = dc_dmub_srv->ctx; 73 - enum dmub_status status; 74 - 75 - status = dmub_srv_cmd_queue(dmub, cmd); 76 - if (status == DMUB_STATUS_OK) 77 - return; 78 - 79 - if (status != DMUB_STATUS_QUEUE_FULL) 80 - goto error; 81 - 82 - /* Execute and wait for queue to become empty again. */ 83 - dc_dmub_srv_cmd_execute(dc_dmub_srv); 84 - dc_dmub_srv_wait_idle(dc_dmub_srv); 85 - 86 - /* Requeue the command. */ 87 - status = dmub_srv_cmd_queue(dmub, cmd); 88 - if (status == DMUB_STATUS_OK) 89 - return; 90 - 91 - error: 92 - DC_ERROR("Error queuing DMUB command: status=%d\n", status); 93 - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 94 - } 95 - 96 - void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv) 97 - { 98 - struct dmub_srv *dmub = dc_dmub_srv->dmub; 99 - struct dc_context *dc_ctx = dc_dmub_srv->ctx; 100 - enum dmub_status status; 101 - 102 - status = dmub_srv_cmd_execute(dmub); 103 - if (status != DMUB_STATUS_OK) { 104 - DC_ERROR("Error starting DMUB execution: status=%d\n", status); 105 - dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 106 - } 107 - } 108 - 109 68 void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) 110 69 { 111 70 struct dmub_srv *dmub = dc_dmub_srv->dmub; ··· 118 159 } 119 160 } 120 161 121 - bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd) 162 + bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) 122 163 { 164 + return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type); 165 + } 166 + 167 + bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type) 168 + { 169 + struct dc_context *dc_ctx = dc_dmub_srv->ctx; 123 170 struct dmub_srv *dmub; 124 171 enum dmub_status status; 172 + int i; 125 173 126 174 if (!dc_dmub_srv || !dc_dmub_srv->dmub) 127 175 return false; 128 176 129 177 dmub = dc_dmub_srv->dmub; 130 178 131 - status = dmub_srv_cmd_with_reply_data(dmub, cmd); 179 + for (i = 0 ; i < count; i++) { 180 + // Queue command 181 + status = dmub_srv_cmd_queue(dmub, &cmd_list[i]); 182 + 183 + if (status != DMUB_STATUS_OK) { 184 + DC_ERROR("Error queueing DMUB command: status=%d\n", status); 185 + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 186 + return false; 187 + } 188 + } 189 + 190 + status = dmub_srv_cmd_execute(dmub); 132 191 if (status != DMUB_STATUS_OK) { 133 - DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); 192 + DC_ERROR("Error starting DMUB execution: status=%d\n", status); 193 + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); 134 194 return false; 195 + } 196 + 197 + // Wait for DMUB to process command 198 + if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { 199 + status = dmub_srv_wait_for_idle(dmub, 100000); 200 + 201 + if (status != DMUB_STATUS_OK) { 202 + DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); 203 + return false; 204 + } 205 + 206 + // Copy data back from ring buffer into command 207 + if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) 208 + dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list); 135 209 } 136 210 137 211 return true; ··· 259 267 cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); 260 268 261 269 // Send the command to the DMCUB. 262 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 263 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 264 - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 270 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 265 271 } 266 272 267 273 void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) ··· 273 283 cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); 274 284 275 285 // Send the command to the DMCUB. 276 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 277 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 278 - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 286 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 279 287 } 280 288 281 289 static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream) ··· 366 378 sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header); 367 379 368 380 // Send the command to the DMCUB. 369 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 370 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 371 - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 381 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 372 382 373 383 return true; 374 384 } 375 385 376 - void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub) 386 + void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) 377 387 { 378 388 union dmub_rb_cmd cmd = { 0 }; 379 - enum dmub_status status; 380 - 381 - if (!dmub) { 382 - return; 383 - } 384 389 385 390 memset(&cmd, 0, sizeof(cmd)); 386 391 ··· 383 402 cmd.query_feature_caps.header.ret_status = 1; 384 403 cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data); 385 404 386 - /* Send command to fw */ 387 - status = dmub_srv_cmd_with_reply_data(dmub, &cmd); 388 - 389 - ASSERT(status == DMUB_STATUS_OK); 390 - 391 405 /* If command was processed, copy feature caps to dmub srv */ 392 - if (status == DMUB_STATUS_OK && 406 + if (dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && 393 407 cmd.query_feature_caps.header.ret_status == 0) { 394 - memcpy(&dmub->feature_caps, 408 + memcpy(&dc_dmub_srv->dmub->feature_caps, 395 409 &cmd.query_feature_caps.query_feature_caps_data, 396 410 sizeof(struct dmub_feature_caps)); 397 411 } ··· 395 419 void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx) 396 420 { 397 421 union dmub_rb_cmd cmd = { 0 }; 398 - enum dmub_status status; 399 422 unsigned int panel_inst = 0; 400 423 401 424 dc_get_edp_link_panel_inst(dc, pipe_ctx->stream->link, &panel_inst); ··· 408 433 cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data); 409 434 cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst; 410 435 411 - // Send command to fw 412 - status = dmub_srv_cmd_with_reply_data(dc->ctx->dmub_srv->dmub, &cmd); 413 - 414 - ASSERT(status == DMUB_STATUS_OK); 415 - 416 436 // If command was processed, copy feature caps to dmub srv 417 - if (status == DMUB_STATUS_OK && 437 + if (dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && 418 438 cmd.visual_confirm_color.header.ret_status == 0) { 419 439 memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color, 420 440 &cmd.visual_confirm_color.visual_confirm_color_data, ··· 767 797 768 798 cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF; 769 799 } 770 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 771 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 772 - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 800 + 801 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 773 802 } 774 803 775 804 bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data) ··· 951 982 payload->panel_inst = panel_inst; 952 983 } 953 984 954 - static void dc_send_cmd_to_dmu(struct dc_dmub_srv *dmub_srv, 955 - union dmub_rb_cmd *cmd) 956 - { 957 - dc_dmub_srv_cmd_queue(dmub_srv, cmd); 958 - dc_dmub_srv_cmd_execute(dmub_srv); 959 - dc_dmub_srv_wait_idle(dmub_srv); 960 - } 961 - 962 985 static void dc_build_cursor_position_update_payload0( 963 986 struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx, 964 987 const struct hubp *hubp, const struct dpp *dpp) ··· 993 1032 void dc_send_update_cursor_info_to_dmu( 994 1033 struct pipe_ctx *pCtx, uint8_t pipe_idx) 995 1034 { 996 - union dmub_rb_cmd cmd = { 0 }; 997 - union dmub_cmd_update_cursor_info_data *update_cursor_info = 998 - &cmd.update_cursor_info.update_cursor_info_data; 1035 + union dmub_rb_cmd cmd[2]; 1036 + union dmub_cmd_update_cursor_info_data *update_cursor_info_0 = 1037 + &cmd[0].update_cursor_info.update_cursor_info_data; 1038 + 1039 + memset(cmd, 0, sizeof(cmd)); 999 1040 1000 1041 if (!dc_dmub_should_update_cursor_data(pCtx)) 1001 1042 return; ··· 1014 1051 1015 1052 { 1016 1053 /* Build Payload#0 Header */ 1017 - cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; 1018 - cmd.update_cursor_info.header.payload_bytes = 1019 - sizeof(cmd.update_cursor_info.update_cursor_info_data); 1020 - cmd.update_cursor_info.header.multi_cmd_pending = 1; /* To combine multi dmu cmd, 1st cmd */ 1054 + cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; 1055 + cmd[0].update_cursor_info.header.payload_bytes = 1056 + sizeof(cmd[0].update_cursor_info.update_cursor_info_data); 1057 + cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd 1021 1058 1022 1059 /* Prepare Payload */ 1023 - dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info->payload0); 1060 + dc_build_cursor_update_payload0(pCtx, pipe_idx, &update_cursor_info_0->payload0); 1024 1061 1025 - dc_build_cursor_position_update_payload0(&update_cursor_info->payload0, pipe_idx, 1062 + dc_build_cursor_position_update_payload0(&update_cursor_info_0->payload0, pipe_idx, 1026 1063 pCtx->plane_res.hubp, pCtx->plane_res.dpp); 1027 - /* Send update_curosr_info to queue */ 1028 - dc_dmub_srv_cmd_queue(pCtx->stream->ctx->dmub_srv, &cmd); 1029 - } 1064 + } 1030 1065 { 1031 1066 /* Build Payload#1 Header */ 1032 - memset(update_cursor_info, 0, sizeof(union dmub_cmd_update_cursor_info_data)); 1033 - cmd.update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; 1034 - cmd.update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); 1035 - cmd.update_cursor_info.header.multi_cmd_pending = 0; /* Indicate it's the last command. */ 1067 + cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; 1068 + cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); 1069 + cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command. 1036 1070 1037 1071 dc_build_cursor_attribute_update_payload1( 1038 - &cmd.update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, 1072 + &cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, 1039 1073 pipe_idx, pCtx->plane_res.hubp, pCtx->plane_res.dpp); 1040 1074 1041 1075 /* Combine 2nd cmds update_curosr_info to DMU */ 1042 - dc_send_cmd_to_dmu(pCtx->stream->ctx->dmub_srv, &cmd); 1076 + dm_execute_dmub_cmd_list(pCtx->stream->ctx, 2, cmd, DM_DMUB_WAIT_TYPE_WAIT); 1043 1077 } 1044 1078 }
+5 -8
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
··· 26 26 #ifndef _DMUB_DC_SRV_H_ 27 27 #define _DMUB_DC_SRV_H_ 28 28 29 - #include "os_types.h" 29 + #include "dm_services_types.h" 30 30 #include "dmub/dmub_srv.h" 31 31 32 32 struct dmub_srv; ··· 52 52 void *dm; 53 53 }; 54 54 55 - void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, 56 - union dmub_rb_cmd *cmd); 57 - 58 - void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv); 59 - 60 55 void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); 61 56 62 57 void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv); 63 58 64 - bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd); 59 + bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); 60 + 61 + bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type); 65 62 66 63 bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, 67 64 unsigned int stream_mask); ··· 74 77 void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst); 75 78 bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool enable_pstate, struct dc_state *context); 76 79 77 - void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub); 80 + void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv); 78 81 void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx); 79 82 void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv); 80 83 void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);
+3 -25
drivers/gpu/drm/amd/display/dc/dc_helper.c
··· 41 41 const struct dc_context *ctx) 42 42 { 43 43 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; 44 - bool gather = false; 45 44 46 45 offload->should_burst_write = 47 46 (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); 48 47 cmd_buf->header.payload_bytes = 49 48 sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; 50 49 51 - gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; 52 - ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; 53 - 54 - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); 55 - 56 - ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; 50 + dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); 57 51 58 52 memset(cmd_buf, 0, sizeof(*cmd_buf)); 59 53 ··· 60 66 const struct dc_context *ctx) 61 67 { 62 68 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; 63 - bool gather = false; 64 69 65 70 cmd_buf->header.payload_bytes = 66 71 sizeof(uint32_t) * offload->reg_seq_count; 67 72 68 - gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; 69 - ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; 70 - 71 - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); 72 - 73 - ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; 73 + dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); 74 74 75 75 memset(cmd_buf, 0, sizeof(*cmd_buf)); 76 76 ··· 76 88 const struct dc_context *ctx) 77 89 { 78 90 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; 79 - bool gather = false; 80 91 81 - gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; 82 - ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; 83 - 84 - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); 92 + dm_execute_dmub_cmd(ctx, &offload->cmd_data, DM_DMUB_WAIT_TYPE_NO_WAIT); 85 93 86 94 memset(cmd_buf, 0, sizeof(*cmd_buf)); 87 95 offload->reg_seq_count = 0; 88 - 89 - ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; 90 96 } 91 97 92 98 struct dc_reg_value_masks { ··· 133 151 const struct dc_context *ctx) 134 152 { 135 153 submit_dmub_read_modify_write(offload, ctx); 136 - dc_dmub_srv_cmd_execute(ctx->dmub_srv); 137 154 } 138 155 139 156 static void dmub_flush_burst_write_buffer_execute( ··· 140 159 const struct dc_context *ctx) 141 160 { 142 161 submit_dmub_burst_write(offload, ctx); 143 - dc_dmub_srv_cmd_execute(ctx->dmub_srv); 144 162 } 145 163 146 164 static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, ··· 671 691 default: 672 692 return; 673 693 } 674 - 675 - dc_dmub_srv_cmd_execute(ctx->dmub_srv); 676 694 } 677 695 } 678 696
+7 -21
drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
··· 75 75 cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.panel_mask = panel_mask; 76 76 cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data); 77 77 78 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 79 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 80 - dc_dmub_srv_wait_idle(dc->dmub_srv); 78 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 81 79 } 82 80 83 81 void dmub_abm_init(struct abm *abm, uint32_t backlight) ··· 154 156 cmd.abm_set_level.abm_set_level_data.panel_mask = panel_mask; 155 157 cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data); 156 158 157 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 158 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 159 - dc_dmub_srv_wait_idle(dc->dmub_srv); 159 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 160 160 161 161 return true; 162 162 } ··· 176 180 cmd.abm_set_ambient_level.abm_set_ambient_level_data.panel_mask = panel_mask; 177 181 cmd.abm_set_ambient_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_ambient_level_data); 178 182 179 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 180 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 181 - dc_dmub_srv_wait_idle(dc->dmub_srv); 183 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 182 184 } 183 185 #endif 184 186 ··· 206 212 207 213 cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data); 208 214 209 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 210 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 211 - dc_dmub_srv_wait_idle(dc->dmub_srv); 215 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 212 216 213 217 } 214 218 ··· 223 231 cmd.abm_pause.abm_pause_data.panel_mask = panel_mask; 224 232 cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data); 225 233 226 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 227 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 228 - dc_dmub_srv_wait_idle(dc->dmub_srv); 234 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 229 235 230 236 return true; 231 237 } ··· 243 253 cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; 244 254 cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data); 245 255 246 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 247 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 248 - dc_dmub_srv_wait_idle(dc->dmub_srv); 256 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 249 257 250 258 return true; 251 259 } ··· 265 277 cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); 266 278 cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); 267 279 268 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 269 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 270 - dc_dmub_srv_wait_idle(dc->dmub_srv); 280 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 271 281 272 282 return true; 273 283 }
+1 -3
drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
··· 47 47 if (!lock) 48 48 cmd.lock_hw.lock_hw_data.should_release = 1; 49 49 50 - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 51 - dc_dmub_srv_cmd_execute(dmub_srv); 52 - dc_dmub_srv_wait_idle(dmub_srv); 50 + dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 53 51 } 54 52 55 53 void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
+1 -3
drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
··· 48 48 sizeof(cmd.outbox1_enable.header); 49 49 cmd.outbox1_enable.enable = true; 50 50 51 - dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 52 - dc_dmub_srv_cmd_execute(dmub_srv); 53 - dc_dmub_srv_wait_idle(dmub_srv); 51 + dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 54 52 }
+7 -21
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
··· 168 168 cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst; 169 169 cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data); 170 170 171 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 172 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 173 - dc_dmub_srv_wait_idle(dc->dmub_srv); 171 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 174 172 175 173 return true; 176 174 } ··· 196 198 197 199 cmd.psr_enable.header.payload_bytes = 0; // Send header only 198 200 199 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 200 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 201 - dc_dmub_srv_wait_idle(dc->dmub_srv); 201 + dm_execute_dmub_cmd(dc->dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 202 202 203 203 /* Below loops 1000 x 500us = 500 ms. 204 204 * Exit PSR may need to wait 1-2 frames to power up. Timeout after at ··· 244 248 cmd.psr_set_level.psr_set_level_data.psr_level = psr_level; 245 249 cmd.psr_set_level.psr_set_level_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; 246 250 cmd.psr_set_level.psr_set_level_data.panel_inst = panel_inst; 247 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 248 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 249 - dc_dmub_srv_wait_idle(dc->dmub_srv); 251 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 250 252 } 251 253 252 254 /* ··· 263 269 cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_idle = psr_vtotal_idle; 264 270 cmd.psr_set_vtotal.psr_set_vtotal_data.psr_vtotal_su = psr_vtotal_su; 265 271 266 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 267 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 268 - dc_dmub_srv_wait_idle(dc->dmub_srv); 272 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 269 273 } 270 274 271 275 /* ··· 282 290 cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt; 283 291 cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst; 284 292 285 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 286 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 287 - dc_dmub_srv_wait_idle(dc->dmub_srv); 293 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 288 294 } 289 295 290 296 /* ··· 412 422 copy_settings_data->relock_delay_frame_cnt = 2; 413 423 copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height; 414 424 415 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 416 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 417 - dc_dmub_srv_wait_idle(dc->dmub_srv); 425 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 418 426 419 427 return true; 420 428 } ··· 433 445 cmd.psr_force_static.header.sub_type = DMUB_CMD__PSR_FORCE_STATIC; 434 446 cmd.psr_enable.header.payload_bytes = 0; 435 447 436 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 437 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 438 - dc_dmub_srv_wait_idle(dc->dmub_srv); 448 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 439 449 } 440 450 441 451 /*
+1 -6
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
··· 667 667 static void dmcub_PLAT_54186_wa(struct hubp *hubp, 668 668 struct surface_flip_registers *flip_regs) 669 669 { 670 - struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv; 671 670 struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); 672 671 union dmub_rb_cmd cmd; 673 672 ··· 689 690 cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid; 690 691 691 692 PERF_TRACE(); // TODO: remove after performance is stable. 692 - dc_dmub_srv_cmd_queue(dmcub, &cmd); 693 - PERF_TRACE(); // TODO: remove after performance is stable. 694 - dc_dmub_srv_cmd_execute(dmcub); 695 - PERF_TRACE(); // TODO: remove after performance is stable. 696 - dc_dmub_srv_wait_idle(dmcub); 693 + dm_execute_dmub_cmd(hubp->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 697 694 PERF_TRACE(); // TODO: remove after performance is stable. 698 695 } 699 696
+2 -6
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
··· 152 152 cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary; 153 153 cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data); 154 154 155 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 156 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 157 - dc_dmub_srv_wait_idle(dc->dmub_srv); 155 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 158 156 159 157 return true; 160 158 } ··· 171 173 cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_inst); 172 174 cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data); 173 175 174 - dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); 175 - dc_dmub_srv_cmd_execute(dc->dmub_srv); 176 - dc_dmub_srv_wait_idle(dc->dmub_srv); 176 + dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 177 177 } 178 178 179 179 void dcn21_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
+5 -11
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
··· 632 632 dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub); 633 633 634 634 // Get DMCUB capabilities 635 - dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); 635 + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); 636 636 dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; 637 637 dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; 638 638 } ··· 736 736 cmd.mall.header.sub_type = DMUB_CMD__MALL_ACTION_NO_DF_REQ; 737 737 cmd.mall.header.payload_bytes = sizeof(cmd.mall) - sizeof(cmd.mall.header); 738 738 739 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 740 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 739 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 741 740 742 741 return true; 743 742 } ··· 858 859 cmd.mall.cursor_height = cursor_attr.height; 859 860 cmd.mall.cursor_pitch = cursor_attr.pitch; 860 861 861 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 862 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 863 - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 862 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 864 863 865 864 /* Use copied cursor, and it's okay to not switch back */ 866 865 cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part; ··· 874 877 cmd.mall.tmr_scale = tmr_scale; 875 878 cmd.mall.debug_bits = dc->debug.mall_error_as_fatal; 876 879 877 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 878 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 880 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 879 881 880 882 return true; 881 883 } ··· 891 895 cmd.mall.header.payload_bytes = 892 896 sizeof(cmd.mall) - sizeof(cmd.mall.header); 893 897 894 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 895 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 896 - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 898 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 897 899 898 900 return true; 899 901 }
+3 -7
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
··· 297 297 #endif 298 298 299 299 // Get DMCUB capabilities 300 - dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); 300 + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); 301 301 dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; 302 302 } 303 303 ··· 442 442 cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; 443 443 cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_SAVE_INIT; 444 444 445 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 446 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 447 - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 445 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 448 446 } 449 447 450 448 void dcn31_z10_restore(const struct dc *dc) ··· 460 462 cmd.dcn_restore.header.type = DMUB_CMD__IDLE_OPT; 461 463 cmd.dcn_restore.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_RESTORE; 462 464 463 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 464 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 465 - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 465 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 466 466 } 467 467 468 468 void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
+2 -2
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
··· 52 52 cmd->panel_cntl.header.payload_bytes = sizeof(cmd->panel_cntl.data); 53 53 cmd->panel_cntl.data.inst = dcn31_panel_cntl->base.inst; 54 54 55 - return dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd); 55 + return dm_execute_dmub_cmd(dc_dmub_srv->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); 56 56 } 57 57 58 58 static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cntl) ··· 85 85 panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV; 86 86 cmd.panel_cntl.data.bl_pwm_ref_div2 = 87 87 panel_cntl->stored_backlight_registers.PANEL_PWRSEQ_REF_DIV2; 88 - if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd)) 88 + if (!dm_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) 89 89 return 0; 90 90 91 91 panel_cntl->stored_backlight_registers.BL_PWM_CNTL = cmd.panel_cntl.data.bl_pwm_cntl;
+1 -3
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
··· 417 417 cmd.domain_control.data.inst = hubp_inst; 418 418 cmd.domain_control.data.power_gate = !power_on; 419 419 420 - dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd); 421 - dc_dmub_srv_cmd_execute(ctx->dmub_srv); 422 - dc_dmub_srv_wait_idle(ctx->dmub_srv); 420 + dm_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 423 421 424 422 PERF_TRACE(); 425 423 }
+4 -8
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
··· 274 274 cmd.cab.header.sub_type = DMUB_CMD__CAB_NO_DCN_REQ; 275 275 cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); 276 276 277 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 278 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 277 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 279 278 280 279 return true; 281 280 } ··· 308 309 cmd.cab.header.payload_bytes = sizeof(cmd.cab) - sizeof(cmd.cab.header); 309 310 cmd.cab.cab_alloc_ways = ways; 310 311 311 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 312 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 312 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT); 313 313 314 314 return true; 315 315 } ··· 324 326 cmd.cab.header.payload_bytes = 325 327 sizeof(cmd.cab) - sizeof(cmd.cab.header); 326 328 327 - dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd); 328 - dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv); 329 - dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); 329 + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); 330 330 331 331 return true; 332 332 } ··· 942 946 943 947 // Get DMCUB capabilities 944 948 if (dc->ctx->dmub_srv) { 945 - dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub); 949 + dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); 946 950 dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; 947 951 dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; 948 952 }
+7
drivers/gpu/drm/amd/display/dc/dm_services.h
··· 40 40 41 41 struct dmub_srv; 42 42 struct dc_dmub_srv; 43 + union dmub_rb_cmd; 43 44 44 45 irq_handler_idx dm_register_interrupt( 45 46 struct dc_context *ctx, ··· 273 272 274 273 #define PERF_TRACE() dm_perf_trace_timestamp(__func__, __LINE__, CTX) 275 274 #define PERF_TRACE_CTX(__CTX) dm_perf_trace_timestamp(__func__, __LINE__, __CTX) 275 + 276 + /* 277 + * DMUB Interfaces 278 + */ 279 + bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); 280 + bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); 276 281 277 282 /* 278 283 * Debug and verification hooks
+6
drivers/gpu/drm/amd/display/dc/dm_services_types.h
··· 269 269 uint32_t min_memory_clock_khz; 270 270 }; 271 271 272 + enum dm_dmub_wait_type { 273 + DM_DMUB_WAIT_TYPE_NO_WAIT, 274 + DM_DMUB_WAIT_TYPE_WAIT, 275 + DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY, 276 + }; 277 + 272 278 #endif