Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amd/display: Support for DMUB HPD interrupt handling

[WHY]
To add support for HPD interrupt handling from DMUB.
HPD interrupt could be triggered from outbox1 from DMUB

[HOW]
1) Use queue_work to handle hpd task from outbox1

2) Add handle_hpd_irq_helper to share interrupt handling code
between legacy and DMUB HPD from outbox1

3) Added DMUB HPD handling in dmub_srv_stat_get_notification().
HPD handling callback function and wake up the DMUB thread.

Reviewed-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
Signed-off-by: Jude Shih <shenshih@amd.com>
Tested-by: Daniel Wheeler <Daniel.Wheeler@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Jude Shih and committed by
Alex Deucher
e27c41d5 b5ce6fe8

+203 -8
+163 -8
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 215 215 static const struct drm_format_info * 216 216 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); 217 217 218 + static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); 219 + 218 220 static bool 219 221 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, 220 222 struct drm_crtc_state *new_crtc_state); ··· 620 618 } 621 619 #endif 622 620 621 + /** 622 + * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. 623 + * @adev: amdgpu_device pointer 624 + * @notify: dmub notification structure 625 + * 626 + * Dmub AUX or SET_CONFIG command completion processing callback 627 + * Copies dmub notification to DM which is to be read by AUX command. 628 + * issuing thread and also signals the event to wake up the thread. 629 + */ 630 + void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify) 631 + { 632 + if (adev->dm.dmub_notify) 633 + memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); 634 + if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) 635 + complete(&adev->dm.dmub_aux_transfer_done); 636 + } 637 + 638 + /** 639 + * dmub_hpd_callback - DMUB HPD interrupt processing callback. 640 + * @adev: amdgpu_device pointer 641 + * @notify: dmub notification structure 642 + * 643 + * Dmub Hpd interrupt processing callback. Gets displayindex through the 644 + * ink index and calls helper to do the processing. 645 + */ 646 + void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify) 647 + { 648 + struct amdgpu_dm_connector *aconnector; 649 + struct drm_connector *connector; 650 + struct drm_connector_list_iter iter; 651 + struct dc_link *link; 652 + uint8_t link_index = 0; 653 + struct drm_device *dev = adev->dm.ddev; 654 + 655 + if (adev == NULL) 656 + return; 657 + 658 + if (notify == NULL) { 659 + DRM_ERROR("DMUB HPD callback notification was NULL"); 660 + return; 661 + } 662 + 663 + if (notify->link_index > adev->dm.dc->link_count) { 664 + DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); 665 + return; 666 + } 667 + 668 + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 669 + 670 + link_index = notify->link_index; 671 + 672 + link = adev->dm.dc->links[link_index]; 673 + 674 + drm_connector_list_iter_begin(dev, &iter); 675 + drm_for_each_connector_iter(connector, &iter) { 676 + aconnector = to_amdgpu_dm_connector(connector); 677 + if (link && aconnector->dc_link == link) { 678 + DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); 679 + handle_hpd_irq_helper(aconnector); 680 + break; 681 + } 682 + } 683 + drm_connector_list_iter_end(&iter); 684 + drm_modeset_unlock(&dev->mode_config.connection_mutex); 685 + 686 + } 687 + 688 + /** 689 + * register_dmub_notify_callback - Sets callback for DMUB notify 690 + * @adev: amdgpu_device pointer 691 + * @type: Type of dmub notification 692 + * @callback: Dmub interrupt callback function 693 + * @dmub_int_thread_offload: offload indicator 694 + * 695 + * API to register a dmub callback handler for a dmub notification 696 + * Also sets indicator whether callback processing to be offloaded. 697 + * to dmub interrupt handling thread 698 + * Return: true if successfully registered, false if there is existing registration 699 + */ 700 + bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type, 701 + dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload) 702 + { 703 + if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { 704 + adev->dm.dmub_callback[type] = callback; 705 + adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; 706 + } else 707 + return false; 708 + 709 + return true; 710 + } 711 + 712 + static void dm_handle_hpd_work(struct work_struct *work) 713 + { 714 + struct dmub_hpd_work *dmub_hpd_wrk; 715 + 716 + dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); 717 + 718 + if (!dmub_hpd_wrk->dmub_notify) { 719 + DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); 720 + return; 721 + } 722 + 723 + if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { 724 + dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, 725 + dmub_hpd_wrk->dmub_notify); 726 + } 727 + kfree(dmub_hpd_wrk); 728 + 729 + } 730 + 623 731 #define DMUB_TRACE_MAX_READ 64 624 732 /** 625 733 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt ··· 746 634 struct amdgpu_display_manager *dm = &adev->dm; 747 635 struct dmcub_trace_buf_entry entry = { 0 }; 748 636 uint32_t count = 0; 637 + struct dmub_hpd_work *dmub_hpd_wrk; 749 638 750 639 if (dc_enable_dmub_notifications(adev->dm.dc)) { 640 + dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); 641 + if (!dmub_hpd_wrk) { 642 + DRM_ERROR("Failed to allocate dmub_hpd_wrk"); 643 + return; 644 + } 645 + INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); 646 + 751 647 if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { 752 648 do { 753 649 dc_stat_get_dmub_notification(adev->dm.dc, &notify); 754 - } while (notify.pending_notification); 650 + if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) { 651 + DRM_ERROR("DM: notify type %d larger than the array size %ld !", notify.type, 652 + ARRAY_SIZE(dm->dmub_thread_offload)); 653 + continue; 654 + } 655 + if (dm->dmub_thread_offload[notify.type] == true) { 656 + dmub_hpd_wrk->dmub_notify = &notify; 657 + dmub_hpd_wrk->adev = adev; 658 + queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); 659 + } else { 660 + dm->dmub_callback[notify.type](adev, &notify); 661 + } 755 662 756 - if (adev->dm.dmub_notify) 757 - memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification)); 758 - if (notify.type == DMUB_NOTIFICATION_AUX_REPLY) 759 - complete(&adev->dm.dmub_aux_transfer_done); 760 - // TODO : HPD Implementation 663 + } while (notify.pending_notification); 761 664 762 665 } else { 763 666 DRM_ERROR("DM: Failed to receive correct outbox IRQ !"); ··· 1378 1251 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); 1379 1252 goto error; 1380 1253 } 1254 + 1255 + adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); 1256 + if (!adev->dm.delayed_hpd_wq) { 1257 + DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); 1258 + goto error; 1259 + } 1260 + 1381 1261 amdgpu_dm_outbox_init(adev); 1262 + #if defined(CONFIG_DRM_AMD_DC_DCN) 1263 + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, 1264 + dmub_aux_setconfig_callback, false)) { 1265 + DRM_ERROR("amdgpu: fail to register dmub aux callback"); 1266 + goto error; 1267 + } 1268 + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { 1269 + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); 1270 + goto error; 1271 + } 1272 + #endif 1382 1273 } 1383 1274 1384 1275 if (amdgpu_dm_initialize_drm_device(adev)) { ··· 1478 1333 if (dc_enable_dmub_notifications(adev->dm.dc)) { 1479 1334 kfree(adev->dm.dmub_notify); 1480 1335 adev->dm.dmub_notify = NULL; 1336 + destroy_workqueue(adev->dm.delayed_hpd_wq); 1337 + adev->dm.delayed_hpd_wq = NULL; 1481 1338 } 1482 1339 1483 1340 if (adev->dm.dmub_bo) ··· 2758 2611 dc_sink_release(sink); 2759 2612 } 2760 2613 2761 - static void handle_hpd_irq(void *param) 2614 + static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) 2762 2615 { 2763 - struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 2764 2616 struct drm_connector *connector = &aconnector->base; 2765 2617 struct drm_device *dev = connector->dev; 2766 2618 enum dc_connection_type new_connection_type = dc_connection_none; ··· 2817 2671 mutex_unlock(&aconnector->hpd_lock); 2818 2672 2819 2673 } 2674 + 2675 + static void handle_hpd_irq(void *param) 2676 + { 2677 + struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 2678 + 2679 + handle_hpd_irq_helper(aconnector); 2680 + 2681 + } 2682 + 2820 2683 2821 2684 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) 2822 2685 {
+40
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 47 47 #define AMDGPU_DM_MAX_CRTC 6 48 48 49 49 #define AMDGPU_DM_MAX_NUM_EDP 2 50 + 51 + #define AMDGPU_DMUB_NOTIFICATION_MAX 5 50 52 /* 51 53 #include "include/amdgpu_dal_power_if.h" 52 54 #include "amdgpu_dm_irq.h" ··· 86 84 void *cpu_addr; 87 85 struct amdgpu_bo *bo_ptr; 88 86 uint64_t gpu_addr; 87 + }; 88 + 89 + typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify); 90 + 91 + /** 92 + * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ 93 + * 94 + * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq 95 + * @dmub_notify: notification for callback function 96 + * @adev: amdgpu_device pointer 97 + */ 98 + struct dmub_hpd_work { 99 + struct work_struct handle_hpd_work; 100 + struct dmub_notification *dmub_notify; 101 + struct amdgpu_device *adev; 89 102 }; 90 103 91 104 /** ··· 207 190 */ 208 191 struct dmub_srv *dmub_srv; 209 192 193 + /** 194 + * @dmub_notify: 195 + * 196 + * Notification from DMUB. 197 + */ 198 + 210 199 struct dmub_notification *dmub_notify; 200 + 201 + /** 202 + * @dmub_callback: 203 + * 204 + * Callback functions to handle notification from DMUB. 205 + */ 206 + 207 + dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX]; 208 + 209 + /** 210 + * @dmub_thread_offload: 211 + * 212 + * Flag to indicate if callback is offload. 213 + */ 214 + 215 + bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX]; 211 216 212 217 /** 213 218 * @dmub_fb_info: ··· 478 439 */ 479 440 struct list_head da_list; 480 441 struct completion dmub_aux_transfer_done; 442 + struct workqueue_struct *delayed_hpd_wq; 481 443 482 444 /** 483 445 * @brightness: