Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/display: Support for DMUB AUX

[WHY]
To process AUX transactions with DMUB using inbox1 and outbox1 mail boxes.

[How]
1) Added inbox1 command DMUB_CMD__DP_AUX_ACCESS to issue AUX commands
to DMUB in dc_process_dmub_aux_transfer_async(). DMUB processes AUX cmd
with DCN and sends reply back in an outbox1 message triggering an
outbox1 interrupt to driver.
2) In existing driver implementation, AUX commands are processed
synchronously by configuring DCN reg. But in DMUB AUX, driver sends an
inbox1 message and waits for a conditional variable (CV) which will be
signaled by outbox1 ISR.
3) DM will retrieve Outbox1 message and send back reply to upper layer
and complete the AUX command

Signed-off-by: Jude Shih <shenshih@amd.com>
Reviewed-by: Hanghong Ma <Hanghong.Ma@amd.com>
Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
Acked-by: Wayne Lin <Wayne.Lin@amd.com>
Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Jude Shih and committed by
Alex Deucher
81927e28 7f63d8a1

+241 -66
+147 -48
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 35 35 #include "dc/inc/hw/abm.h" 36 36 #include "dc/dc_dmub_srv.h" 37 37 #include "dc/dc_edid_parser.h" 38 + #include "dc/dc_stat.h" 38 39 #include "amdgpu_dm_trace.h" 39 40 40 41 #include "vid.h" ··· 60 59 61 60 #include "ivsrcid/ivsrcid_vislands30.h" 62 61 62 + #include "i2caux_interface.h" 63 63 #include <linux/module.h> 64 64 #include <linux/moduleparam.h> 65 65 #include <linux/types.h> ··· 622 620 #endif 623 621 #endif 624 622 623 + /** 624 + * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt 625 + * @interrupt_params: used for determining the Outbox instance 626 + * 627 + * Handles the Outbox Interrupt 628 + * event handler. 629 + */ 630 + #define DMUB_TRACE_MAX_READ 64 631 + static void dm_dmub_outbox1_low_irq(void *interrupt_params) 632 + { 633 + struct dmub_notification notify; 634 + struct common_irq_params *irq_params = interrupt_params; 635 + struct amdgpu_device *adev = irq_params->adev; 636 + struct amdgpu_display_manager *dm = &adev->dm; 637 + struct dmcub_trace_buf_entry entry = { 0 }; 638 + uint32_t count = 0; 639 + 640 + if (dc_enable_dmub_notifications(adev->dm.dc)) { 641 + if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 642 + do { 643 + dc_stat_get_dmub_notification(adev->dm.dc, &notify); 644 + } while (notify.pending_notification); 645 + 646 + if (adev->dm.dmub_notify) 647 + memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification)); 648 + if (notify.type == DMUB_NOTIFICATION_AUX_REPLY) 649 + complete(&adev->dm.dmub_aux_transfer_done); 650 + // TODO : HPD Implementation 651 + 652 + } else { 653 + DRM_ERROR("DM: Failed to receive correct outbox IRQ !"); 654 + } 655 + } 656 + 657 + 658 + do { 659 + if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 660 + trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 661 + entry.param0, entry.param1); 662 + 663 + DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 664 + entry.trace_code, entry.tick_count, entry.param0, entry.param1); 665 + } else 666 + break; 667 + 668 + count++; 669 + 670 + } while (count <= DMUB_TRACE_MAX_READ); 671 + 672 + ASSERT(count <= DMUB_TRACE_MAX_READ); 673 + } 674 + 625 675 static int dm_set_clockgating_state(void *handle, 626 676 enum amd_clockgating_state state) 627 677 { ··· 992 938 } 993 939 994 940 #if defined(CONFIG_DRM_AMD_DC_DCN) 995 - #define DMUB_TRACE_MAX_READ 64 996 - static void dm_dmub_trace_high_irq(void *interrupt_params) 997 - { 998 - struct common_irq_params *irq_params = interrupt_params; 999 - struct amdgpu_device *adev = irq_params->adev; 1000 - struct amdgpu_display_manager *dm = &adev->dm; 1001 - struct dmcub_trace_buf_entry entry = { 0 }; 1002 - uint32_t count = 0; 1003 - 1004 - do { 1005 - if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { 1006 - trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, 1007 - entry.param0, entry.param1); 1008 - 1009 - DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", 1010 - entry.trace_code, entry.tick_count, entry.param0, entry.param1); 1011 - } else 1012 - break; 1013 - 1014 - count++; 1015 - 1016 - } while (count <= DMUB_TRACE_MAX_READ); 1017 - 1018 - ASSERT(count <= DMUB_TRACE_MAX_READ); 1019 - } 1020 - 1021 941 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) 1022 942 { 1023 943 uint64_t pt_base; ··· 1248 1220 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 1249 1221 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); 1250 1222 #endif 1223 + if (dc_enable_dmub_notifications(adev->dm.dc)) { 1224 + init_completion(&adev->dm.dmub_aux_transfer_done); 1225 + adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); 1226 + if (!adev->dm.dmub_notify) { 1227 + DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); 1228 + goto error; 1229 + } 1230 + amdgpu_dm_outbox_init(adev); 1231 + } 1232 + 1251 1233 if (amdgpu_dm_initialize_drm_device(adev)) { 1252 1234 DRM_ERROR( 1253 1235 "amdgpu: failed to initialize sw for display support.\n"); ··· 1329 1291 if (adev->dm.dc->ctx->dmub_srv) { 1330 1292 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); 1331 1293 adev->dm.dc->ctx->dmub_srv = NULL; 1294 + } 1295 + 1296 + if (dc_enable_dmub_notifications(adev->dm.dc)) { 1297 + kfree(adev->dm.dmub_notify); 1298 + adev->dm.dmub_notify = NULL; 1332 1299 } 1333 1300 1334 1301 if (adev->dm.dmub_bo) ··· 3195 3152 3196 3153 } 3197 3154 3198 - if (dc->ctx->dmub_srv) { 3199 - i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT; 3200 - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq); 3201 - 3202 - if (r) { 3203 - DRM_ERROR("Failed to add dmub trace irq id!\n"); 3204 - return r; 3205 - } 3206 - 3207 - int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; 3208 - int_params.irq_source = 3209 - dc_interrupt_to_irq_source(dc, i, 0); 3210 - 3211 - c_irq_params = &adev->dm.dmub_trace_params[0]; 3212 - 3213 - c_irq_params->adev = adev; 3214 - c_irq_params->irq_src = int_params.irq_source; 3215 - 3216 - amdgpu_dm_irq_register_interrupt(adev, &int_params, 3217 - dm_dmub_trace_high_irq, c_irq_params); 3218 - } 3219 - 3220 3155 /* HPD */ 3221 3156 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, 3222 3157 &adev->hpd_irq); ··· 3204 3183 } 3205 3184 3206 3185 register_hpd_handlers(adev); 3186 + 3187 + return 0; 3188 + } 3189 + /* Register Outbox IRQ sources and initialize IRQ callbacks */ 3190 + static int register_outbox_irq_handlers(struct amdgpu_device *adev) 3191 + { 3192 + struct dc *dc = adev->dm.dc; 3193 + struct common_irq_params *c_irq_params; 3194 + struct dc_interrupt_params int_params = {0}; 3195 + int r, i; 3196 + 3197 + int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; 3198 + int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; 3199 + 3200 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, 3201 + &adev->dmub_outbox_irq); 3202 + if (r) { 3203 + DRM_ERROR("Failed to add outbox irq id!\n"); 3204 + return r; 3205 + } 3206 + 3207 + if (dc->ctx->dmub_srv) { 3208 + i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; 3209 + int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; 3210 + int_params.irq_source = 3211 + dc_interrupt_to_irq_source(dc, i, 0); 3212 + 3213 + c_irq_params = &adev->dm.dmub_outbox_params[0]; 3214 + 3215 + c_irq_params->adev = adev; 3216 + c_irq_params->irq_src = int_params.irq_source; 3217 + 3218 + amdgpu_dm_irq_register_interrupt(adev, &int_params, 3219 + dm_dmub_outbox1_low_irq, c_irq_params); 3220 + } 3207 3221 3208 3222 return 0; 3209 3223 } ··· 3717 3661 DRM_ERROR("KMS: Failed to initialize crtc\n"); 3718 3662 goto fail; 3719 3663 } 3664 + 3665 + /* Use Outbox interrupt */ 3666 + switch (adev->asic_type) { 3667 + #if defined(CONFIG_DRM_AMD_DC_DCN3_0) 3668 + case CHIP_SIENNA_CICHLID: 3669 + case CHIP_NAVY_FLOUNDER: 3670 + #endif 3671 + case CHIP_RENOIR: 3672 + if (register_outbox_irq_handlers(dm->adev)) { 3673 + DRM_ERROR("DM: Failed to initialize IRQ\n"); 3674 + goto fail; 3675 + } 3676 + break; 3677 + default: 3678 + DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type); 3679 + } 3720 3680 3721 3681 /* loops over all connectors on the board */ 3722 3682 for (i = 0; i < link_cnt; i++) { ··· 10778 10706 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); 10779 10707 10780 10708 return value; 10709 + } 10710 + 10711 + int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex, 10712 + struct aux_payload *payload, enum aux_return_code_type *operation_result) 10713 + { 10714 + struct amdgpu_device *adev = ctx->driver_context; 10715 + int ret = 0; 10716 + 10717 + dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload); 10718 + ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ); 10719 + if (ret == 0) { 10720 + *operation_result = AUX_RET_ERROR_TIMEOUT; 10721 + return -1; 10722 + } 10723 + *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result; 10724 + 10725 + if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) { 10726 + (*payload->reply) = adev->dm.dmub_notify->aux_reply.command; 10727 + 10728 + // For read case, Copy data to payload 10729 + if (!payload->write && adev->dm.dmub_notify->aux_reply.length && 10730 + (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK)) 10731 + memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data, 10732 + adev->dm.dmub_notify->aux_reply.length); 10733 + } 10734 + 10735 + return adev->dm.dmub_notify->aux_reply.length; 10781 10736 }
+11
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 55 55 #include "irq_types.h" 56 56 #include "signal_types.h" 57 57 #include "amdgpu_dm_crc.h" 58 + struct aux_payload; 59 + enum aux_return_code_type; 58 60 59 61 /* Forward declarations */ 60 62 struct amdgpu_device; ··· 65 63 struct amdgpu_bo; 66 64 struct dmub_srv; 67 65 struct dc_plane_state; 66 + struct dmub_notification; 68 67 69 68 struct common_irq_params { 70 69 struct amdgpu_device *adev; ··· 182 179 * NULL on hardware that does not support it. 183 180 */ 184 181 struct dmub_srv *dmub_srv; 182 + 183 + struct dmub_notification *dmub_notify; 185 184 186 185 /** 187 186 * @dmub_fb_info: ··· 356 351 struct common_irq_params 357 352 dmub_trace_params[1]; 358 353 354 + struct common_irq_params 355 + dmub_outbox_params[1]; 356 + 359 357 spinlock_t irq_handler_list_table_lock; 360 358 361 359 struct backlight_device *backlight_dev; ··· 431 423 * DAL fb memory allocation list, for communication with SMU. 432 424 */ 433 425 struct list_head da_list; 426 + struct completion dmub_aux_transfer_done; 434 427 }; 435 428 436 429 enum dsc_clock_force_state { ··· 614 605 615 606 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; 616 607 608 + int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex, 609 + struct aux_payload *payload, enum aux_return_code_type *operation_result); 617 610 #endif /* __AMDGPU_DM_H__ */
+10 -3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
··· 640 640 641 641 return edid_status; 642 642 } 643 - 643 + int dm_helper_dmub_aux_transfer_sync( 644 + struct dc_context *ctx, 645 + const struct dc_link *link, 646 + struct aux_payload *payload, 647 + enum aux_return_code_type *operation_result) 648 + { 649 + return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload, operation_result); 650 + } 644 651 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) 645 652 { 646 653 /* TODO: something */ ··· 705 698 } 706 699 } 707 700 708 - bool dm_helpers_dmub_outbox0_interrupt_control(struct dc_context *ctx, bool enable) 701 + bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) 709 702 { 710 703 enum dc_irq_source irq_source; 711 704 bool ret; 712 705 713 - irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0; 706 + irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; 714 707 715 708 ret = dc_interrupt_set(ctx->dc, irq_source, enable); 716 709
+26 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
··· 769 769 __func__); 770 770 } 771 771 772 + static int amdgpu_dm_set_dmub_outbox_irq_state(struct amdgpu_device *adev, 773 + struct amdgpu_irq_src *source, 774 + unsigned int crtc_id, 775 + enum amdgpu_interrupt_state state) 776 + { 777 + enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; 778 + bool st = (state == AMDGPU_IRQ_STATE_ENABLE); 779 + 780 + dc_interrupt_set(adev->dm.dc, irq_source, st); 781 + return 0; 782 + } 783 + 772 784 static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev, 773 785 struct amdgpu_irq_src *source, 774 786 unsigned int crtc_id, ··· 817 805 .process = amdgpu_dm_irq_handler, 818 806 }; 819 807 808 + static const struct amdgpu_irq_src_funcs dm_dmub_outbox_irq_funcs = { 809 + .set = amdgpu_dm_set_dmub_outbox_irq_state, 810 + .process = amdgpu_dm_irq_handler, 811 + }; 812 + 820 813 static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = { 821 814 .set = amdgpu_dm_set_vupdate_irq_state, 822 815 .process = amdgpu_dm_irq_handler, ··· 844 827 845 828 void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) 846 829 { 847 - 848 830 adev->crtc_irq.num_types = adev->mode_info.num_crtc; 849 831 adev->crtc_irq.funcs = &dm_crtc_irq_funcs; 850 832 851 833 adev->vline0_irq.num_types = adev->mode_info.num_crtc; 852 834 adev->vline0_irq.funcs = &dm_vline0_irq_funcs; 835 + 836 + adev->dmub_outbox_irq.num_types = 1; 837 + adev->dmub_outbox_irq.funcs = &dm_dmub_outbox_irq_funcs; 853 838 854 839 adev->vupdate_irq.num_types = adev->mode_info.num_crtc; 855 840 adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs; ··· 864 845 865 846 adev->hpd_irq.num_types = adev->mode_info.num_hpd; 866 847 adev->hpd_irq.funcs = &dm_hpd_irq_funcs; 848 + } 849 + void amdgpu_dm_outbox_init(struct amdgpu_device *adev) 850 + { 851 + dc_interrupt_set(adev->dm.dc, 852 + DC_IRQ_SOURCE_DMCUB_OUTBOX, 853 + true); 867 854 } 868 855 869 856 /**
+1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
··· 82 82 83 83 void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev); 84 84 85 + void amdgpu_dm_outbox_init(struct amdgpu_device *adev); 85 86 void amdgpu_dm_hpd_init(struct amdgpu_device *adev); 86 87 void amdgpu_dm_hpd_fini(struct amdgpu_device *adev); 87 88
+1 -1
drivers/gpu/drm/amd/display/dc/Makefile
··· 54 54 55 55 include $(AMD_DC) 56 56 57 - DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ 57 + DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ 58 58 dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \ 59 59 dc_link_enc_cfg.o 60 60
+1 -1
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
··· 180 180 181 181 void dc_dmub_trace_event_control(struct dc *dc, bool enable) 182 182 { 183 - dm_helpers_dmub_outbox0_interrupt_control(dc->ctx, enable); 183 + dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable); 184 184 }
+19
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
··· 595 595 return res; 596 596 } 597 597 598 + int dce_aux_transfer_dmub_raw(struct ddc_service *ddc, 599 + struct aux_payload *payload, 600 + enum aux_return_code_type *operation_result) 601 + { 602 + struct ddc *ddc_pin = ddc->ddc_pin; 603 + 604 + if (ddc_pin != NULL) { 605 + struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; 606 + /* XXX: Workaround to configure ddc channels for aux transactions */ 607 + if (!acquire(aux_engine, ddc_pin)) { 608 + *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; 609 + return -1; 610 + } 611 + release_engine(aux_engine); 612 + } 613 + 614 + return dm_helper_dmub_aux_transfer_sync(ddc->ctx, ddc->link, payload, operation_result); 615 + } 616 + 598 617 #define AUX_MAX_RETRIES 7 599 618 #define AUX_MAX_DEFER_RETRIES 7 600 619 #define AUX_MAX_I2C_DEFER_RETRIES 7
+3
drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
··· 304 304 struct aux_payload *cmd, 305 305 enum aux_return_code_type *operation_result); 306 306 307 + int dce_aux_transfer_dmub_raw(struct ddc_service *ddc, 308 + struct aux_payload *payload, 309 + enum aux_return_code_type *operation_result); 307 310 bool dce_aux_transfer_with_retries(struct ddc_service *ddc, 308 311 struct aux_payload *cmd); 309 312
+8 -1
drivers/gpu/drm/amd/display/dc/dm_helpers.h
··· 34 34 #include "dc.h" 35 35 36 36 struct dp_mst_stream_allocation_table; 37 + struct aux_payload; 38 + enum aux_return_code_type; 37 39 38 40 /* 39 41 * Allocate memory accessible by the GPU ··· 160 158 struct dc_context *ctx, 161 159 struct dc_clocks *clks); 162 160 163 - bool dm_helpers_dmub_outbox0_interrupt_control(struct dc_context *ctx, bool enable); 161 + bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable); 164 162 163 + int dm_helper_dmub_aux_transfer_sync( 164 + struct dc_context *ctx, 165 + const struct dc_link *link, 166 + struct aux_payload *payload, 167 + enum aux_return_code_type *operation_result); 165 168 #endif /* __DM_HELPERS__ */
+9 -9
drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
··· 58 58 return DC_IRQ_SOURCE_VBLANK5; 59 59 case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP: 60 60 return DC_IRQ_SOURCE_VBLANK6; 61 - case DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT: 62 - return DC_IRQ_SOURCE_DMCUB_OUTBOX0; 61 + case DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT: 62 + return DC_IRQ_SOURCE_DMCUB_OUTBOX; 63 63 case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL: 64 64 return DC_IRQ_SOURCE_DC1_VLINE0; 65 65 case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL: ··· 187 187 .ack = NULL 188 188 }; 189 189 190 - static const struct irq_source_info_funcs dmub_trace_irq_info_funcs = { 190 + static const struct irq_source_info_funcs dmub_outbox_irq_info_funcs = { 191 191 .set = NULL, 192 192 .ack = NULL 193 193 }; ··· 301 301 .funcs = &vline0_irq_info_funcs\ 302 302 } 303 303 304 - #define dmub_trace_int_entry()\ 305 - [DC_IRQ_SOURCE_DMCUB_OUTBOX0] = {\ 306 - IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX0_READY_INT_EN,\ 307 - DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK),\ 308 - .funcs = &dmub_trace_irq_info_funcs\ 304 + #define dmub_outbox_int_entry()\ 305 + [DC_IRQ_SOURCE_DMCUB_OUTBOX] = {\ 306 + IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX1_READY_INT_EN,\ 307 + DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX1_READY_INT_ACK),\ 308 + .funcs = &dmub_outbox_irq_info_funcs\ 309 309 } 310 310 311 311 #define dummy_irq_entry() \ ··· 426 426 vline0_int_entry(3), 427 427 vline0_int_entry(4), 428 428 vline0_int_entry(5), 429 - dmub_trace_int_entry(), 429 + dmub_outbox_int_entry(), 430 430 }; 431 431 432 432 static const struct irq_service_funcs irq_service_funcs_dcn21 = {
+1 -1
drivers/gpu/drm/amd/display/dc/irq_types.h
··· 150 150 DC_IRQ_SOURCE_DC4_VLINE1, 151 151 DC_IRQ_SOURCE_DC5_VLINE1, 152 152 DC_IRQ_SOURCE_DC6_VLINE1, 153 - DC_IRQ_DMCUB_OUTBOX1, 153 + DC_IRQ_SOURCE_DMCUB_OUTBOX, 154 154 DC_IRQ_SOURCE_DMCUB_OUTBOX0, 155 155 156 156 DAL_IRQ_SOURCES_NUMBER