at v4.20 18 kB view raw
1/* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26#include <drm/drmP.h> 27 28#include "dm_services_types.h" 29#include "dc.h" 30 31#include "amdgpu.h" 32#include "amdgpu_dm.h" 33#include "amdgpu_dm_irq.h" 34 35/****************************************************************************** 36 * Private declarations. 37 *****************************************************************************/ 38 39struct amdgpu_dm_irq_handler_data { 40 struct list_head list; 41 interrupt_handler handler; 42 void *handler_arg; 43 44 /* DM which this handler belongs to */ 45 struct amdgpu_display_manager *dm; 46 /* DAL irq source which registered for this interrupt. */ 47 enum dc_irq_source irq_source; 48}; 49 50#define DM_IRQ_TABLE_LOCK(adev, flags) \ 51 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags) 52 53#define DM_IRQ_TABLE_UNLOCK(adev, flags) \ 54 spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags) 55 56/****************************************************************************** 57 * Private functions. 58 *****************************************************************************/ 59 60static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, 61 void (*ih)(void *), 62 void *args, 63 struct amdgpu_display_manager *dm) 64{ 65 hcd->handler = ih; 66 hcd->handler_arg = args; 67 hcd->dm = dm; 68} 69 70/** 71 * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper. 72 * 73 * @work: work struct 74 */ 75static void dm_irq_work_func(struct work_struct *work) 76{ 77 struct list_head *entry; 78 struct irq_list_head *irq_list_head = 79 container_of(work, struct irq_list_head, work); 80 struct list_head *handler_list = &irq_list_head->head; 81 struct amdgpu_dm_irq_handler_data *handler_data; 82 83 list_for_each(entry, handler_list) { 84 handler_data = list_entry(entry, 85 struct amdgpu_dm_irq_handler_data, 86 list); 87 88 DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n", 89 handler_data->irq_source); 90 91 DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n", 92 handler_data->irq_source); 93 94 handler_data->handler(handler_data->handler_arg); 95 } 96 97 /* Call a DAL subcomponent which registered for interrupt notification 98 * at INTERRUPT_LOW_IRQ_CONTEXT. 99 * (The most common use is HPD interrupt) */ 100} 101 102/** 103 * Remove a handler and return a pointer to hander list from which the 104 * handler was removed. 105 */ 106static struct list_head *remove_irq_handler(struct amdgpu_device *adev, 107 void *ih, 108 const struct dc_interrupt_params *int_params) 109{ 110 struct list_head *hnd_list; 111 struct list_head *entry, *tmp; 112 struct amdgpu_dm_irq_handler_data *handler; 113 unsigned long irq_table_flags; 114 bool handler_removed = false; 115 enum dc_irq_source irq_source; 116 117 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 118 119 irq_source = int_params->irq_source; 120 121 switch (int_params->int_context) { 122 case INTERRUPT_HIGH_IRQ_CONTEXT: 123 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; 124 break; 125 case INTERRUPT_LOW_IRQ_CONTEXT: 126 default: 127 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; 128 break; 129 } 130 131 list_for_each_safe(entry, tmp, hnd_list) { 132 133 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, 134 list); 135 136 if (ih == handler) { 137 /* Found our handler. Remove it from the list. */ 138 list_del(&handler->list); 139 handler_removed = true; 140 break; 141 } 142 } 143 144 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 145 146 if (handler_removed == false) { 147 /* Not necessarily an error - caller may not 148 * know the context. */ 149 return NULL; 150 } 151 152 kfree(handler); 153 154 DRM_DEBUG_KMS( 155 "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n", 156 ih, int_params->irq_source, int_params->int_context); 157 158 return hnd_list; 159} 160 161static bool 162validate_irq_registration_params(struct dc_interrupt_params *int_params, 163 void (*ih)(void *)) 164{ 165 if (NULL == int_params || NULL == ih) { 166 DRM_ERROR("DM_IRQ: invalid input!\n"); 167 return false; 168 } 169 170 if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) { 171 DRM_ERROR("DM_IRQ: invalid context: %d!\n", 172 int_params->int_context); 173 return false; 174 } 175 176 if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) { 177 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n", 178 int_params->irq_source); 179 return false; 180 } 181 182 return true; 183} 184 185static bool validate_irq_unregistration_params(enum dc_irq_source irq_source, 186 irq_handler_idx handler_idx) 187{ 188 if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) { 189 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n"); 190 return false; 191 } 192 193 if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) { 194 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source); 195 return false; 196 } 197 198 return true; 199} 200/****************************************************************************** 201 * Public functions. 202 * 203 * Note: caller is responsible for input validation. 204 *****************************************************************************/ 205 206void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, 207 struct dc_interrupt_params *int_params, 208 void (*ih)(void *), 209 void *handler_args) 210{ 211 struct list_head *hnd_list; 212 struct amdgpu_dm_irq_handler_data *handler_data; 213 unsigned long irq_table_flags; 214 enum dc_irq_source irq_source; 215 216 if (false == validate_irq_registration_params(int_params, ih)) 217 return DAL_INVALID_IRQ_HANDLER_IDX; 218 219 handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL); 220 if (!handler_data) { 221 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); 222 return DAL_INVALID_IRQ_HANDLER_IDX; 223 } 224 225 memset(handler_data, 0, sizeof(*handler_data)); 226 227 init_handler_common_data(handler_data, ih, handler_args, &adev->dm); 228 229 irq_source = int_params->irq_source; 230 231 handler_data->irq_source = irq_source; 232 233 /* Lock the list, add the handler. */ 234 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 235 236 switch (int_params->int_context) { 237 case INTERRUPT_HIGH_IRQ_CONTEXT: 238 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; 239 break; 240 case INTERRUPT_LOW_IRQ_CONTEXT: 241 default: 242 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; 243 break; 244 } 245 246 list_add_tail(&handler_data->list, hnd_list); 247 248 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 249 250 /* This pointer will be stored by code which requested interrupt 251 * registration. 252 * The same pointer will be needed in order to unregister the 253 * interrupt. */ 254 255 DRM_DEBUG_KMS( 256 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n", 257 handler_data, 258 irq_source, 259 int_params->int_context); 260 261 return handler_data; 262} 263 264void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, 265 enum dc_irq_source irq_source, 266 void *ih) 267{ 268 struct list_head *handler_list; 269 struct dc_interrupt_params int_params; 270 int i; 271 272 if (false == validate_irq_unregistration_params(irq_source, ih)) 273 return; 274 275 memset(&int_params, 0, sizeof(int_params)); 276 277 int_params.irq_source = irq_source; 278 279 for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) { 280 281 int_params.int_context = i; 282 283 handler_list = remove_irq_handler(adev, ih, &int_params); 284 285 if (handler_list != NULL) 286 break; 287 } 288 289 if (handler_list == NULL) { 290 /* If we got here, it means we searched all irq contexts 291 * for this irq source, but the handler was not found. */ 292 DRM_ERROR( 293 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n", 294 ih, irq_source); 295 } 296} 297 298int amdgpu_dm_irq_init(struct amdgpu_device *adev) 299{ 300 int src; 301 struct irq_list_head *lh; 302 303 DRM_DEBUG_KMS("DM_IRQ\n"); 304 305 spin_lock_init(&adev->dm.irq_handler_list_table_lock); 306 307 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { 308 /* low context handler list init */ 309 lh = &adev->dm.irq_handler_list_low_tab[src]; 310 INIT_LIST_HEAD(&lh->head); 311 INIT_WORK(&lh->work, dm_irq_work_func); 312 313 /* high context handler init */ 314 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]); 315 } 316 317 return 0; 318} 319 320/* DM IRQ and timer resource release */ 321void amdgpu_dm_irq_fini(struct amdgpu_device *adev) 322{ 323 int src; 324 struct irq_list_head *lh; 325 unsigned long irq_table_flags; 326 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); 327 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { 328 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 329 /* The handler was removed from the table, 330 * it means it is safe to flush all the 'work' 331 * (because no code can schedule a new one). */ 332 lh = &adev->dm.irq_handler_list_low_tab[src]; 333 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 334 flush_work(&lh->work); 335 } 336} 337 338int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) 339{ 340 int src; 341 struct list_head *hnd_list_h; 342 struct list_head *hnd_list_l; 343 unsigned long irq_table_flags; 344 345 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 346 347 DRM_DEBUG_KMS("DM_IRQ: suspend\n"); 348 349 /** 350 * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK 351 * will be disabled from manage_dm_interrupts on disable CRTC. 352 */ 353 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) { 354 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; 355 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; 356 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) 357 dc_interrupt_set(adev->dm.dc, src, false); 358 359 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 360 flush_work(&adev->dm.irq_handler_list_low_tab[src].work); 361 362 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 363 } 364 365 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 366 return 0; 367} 368 369int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) 370{ 371 int src; 372 struct list_head *hnd_list_h, *hnd_list_l; 373 unsigned long irq_table_flags; 374 375 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 376 377 DRM_DEBUG_KMS("DM_IRQ: early resume\n"); 378 379 /* re-enable short pulse interrupts HW interrupt */ 380 for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) { 381 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; 382 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; 383 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) 384 dc_interrupt_set(adev->dm.dc, src, true); 385 } 386 387 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 388 389 return 0; 390} 391 392int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) 393{ 394 int src; 395 struct list_head *hnd_list_h, *hnd_list_l; 396 unsigned long irq_table_flags; 397 398 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 399 400 DRM_DEBUG_KMS("DM_IRQ: resume\n"); 401 402 /** 403 * Renable HW interrupt for HPD and only since FLIP and VBLANK 404 * will be enabled from manage_dm_interrupts on enable CRTC. 405 */ 406 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) { 407 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; 408 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; 409 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) 410 dc_interrupt_set(adev->dm.dc, src, true); 411 } 412 413 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 414 return 0; 415} 416 417/** 418 * amdgpu_dm_irq_schedule_work - schedule all work items registered for the 419 * "irq_source". 420 */ 421static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, 422 enum dc_irq_source irq_source) 423{ 424 unsigned long irq_table_flags; 425 struct work_struct *work = NULL; 426 427 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 428 429 if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head)) 430 work = &adev->dm.irq_handler_list_low_tab[irq_source].work; 431 432 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 433 434 if (work) { 435 if (!schedule_work(work)) 436 DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n", 437 irq_source); 438 } 439 440} 441 442/** amdgpu_dm_irq_immediate_work 443 * Callback high irq work immediately, don't send to work queue 444 */ 445static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, 446 enum dc_irq_source irq_source) 447{ 448 struct amdgpu_dm_irq_handler_data *handler_data; 449 struct list_head *entry; 450 unsigned long irq_table_flags; 451 452 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 453 454 list_for_each( 455 entry, 456 &adev->dm.irq_handler_list_high_tab[irq_source]) { 457 458 handler_data = list_entry(entry, 459 struct amdgpu_dm_irq_handler_data, 460 list); 461 462 /* Call a subcomponent which registered for immediate 463 * interrupt notification */ 464 handler_data->handler(handler_data->handler_arg); 465 } 466 467 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 468} 469 470/* 471 * amdgpu_dm_irq_handler 472 * 473 * Generic IRQ handler, calls all registered high irq work immediately, and 474 * schedules work for low irq 475 */ 476static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, 477 struct amdgpu_irq_src *source, 478 struct amdgpu_iv_entry *entry) 479{ 480 481 enum dc_irq_source src = 482 dc_interrupt_to_irq_source( 483 adev->dm.dc, 484 entry->src_id, 485 entry->src_data[0]); 486 487 dc_interrupt_ack(adev->dm.dc, src); 488 489 /* Call high irq work immediately */ 490 amdgpu_dm_irq_immediate_work(adev, src); 491 /*Schedule low_irq work */ 492 amdgpu_dm_irq_schedule_work(adev, src); 493 494 return 0; 495} 496 497static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type) 498{ 499 switch (type) { 500 case AMDGPU_HPD_1: 501 return DC_IRQ_SOURCE_HPD1; 502 case AMDGPU_HPD_2: 503 return DC_IRQ_SOURCE_HPD2; 504 case AMDGPU_HPD_3: 505 return DC_IRQ_SOURCE_HPD3; 506 case AMDGPU_HPD_4: 507 return DC_IRQ_SOURCE_HPD4; 508 case AMDGPU_HPD_5: 509 return DC_IRQ_SOURCE_HPD5; 510 case AMDGPU_HPD_6: 511 return DC_IRQ_SOURCE_HPD6; 512 default: 513 return DC_IRQ_SOURCE_INVALID; 514 } 515} 516 517static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, 518 struct amdgpu_irq_src *source, 519 unsigned type, 520 enum amdgpu_interrupt_state state) 521{ 522 enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type); 523 bool st = (state == AMDGPU_IRQ_STATE_ENABLE); 524 525 dc_interrupt_set(adev->dm.dc, src, st); 526 return 0; 527} 528 529static inline int dm_irq_state(struct amdgpu_device *adev, 530 struct amdgpu_irq_src *source, 531 unsigned crtc_id, 532 enum amdgpu_interrupt_state state, 533 const enum irq_type dal_irq_type, 534 const char *func) 535{ 536 bool st; 537 enum dc_irq_source irq_source; 538 539 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id]; 540 541 if (!acrtc) { 542 DRM_ERROR( 543 "%s: crtc is NULL at id :%d\n", 544 func, 545 crtc_id); 546 return 0; 547 } 548 549 if (acrtc->otg_inst == -1) 550 return 0; 551 552 irq_source = dal_irq_type + acrtc->otg_inst; 553 554 st = (state == AMDGPU_IRQ_STATE_ENABLE); 555 556 dc_interrupt_set(adev->dm.dc, irq_source, st); 557 return 0; 558} 559 560static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, 561 struct amdgpu_irq_src *source, 562 unsigned crtc_id, 563 enum amdgpu_interrupt_state state) 564{ 565 return dm_irq_state( 566 adev, 567 source, 568 crtc_id, 569 state, 570 IRQ_TYPE_PFLIP, 571 __func__); 572} 573 574static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, 575 struct amdgpu_irq_src *source, 576 unsigned crtc_id, 577 enum amdgpu_interrupt_state state) 578{ 579 return dm_irq_state( 580 adev, 581 source, 582 crtc_id, 583 state, 584 IRQ_TYPE_VBLANK, 585 __func__); 586} 587 588static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = { 589 .set = amdgpu_dm_set_crtc_irq_state, 590 .process = amdgpu_dm_irq_handler, 591}; 592 593static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = { 594 .set = amdgpu_dm_set_pflip_irq_state, 595 .process = amdgpu_dm_irq_handler, 596}; 597 598static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = { 599 .set = amdgpu_dm_set_hpd_irq_state, 600 .process = amdgpu_dm_irq_handler, 601}; 602 603void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) 604{ 605 606 adev->crtc_irq.num_types = adev->mode_info.num_crtc; 607 adev->crtc_irq.funcs = &dm_crtc_irq_funcs; 608 609 adev->pageflip_irq.num_types = adev->mode_info.num_crtc; 610 adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs; 611 612 adev->hpd_irq.num_types = adev->mode_info.num_hpd; 613 adev->hpd_irq.funcs = &dm_hpd_irq_funcs; 614} 615 616/* 617 * amdgpu_dm_hpd_init - hpd setup callback. 618 * 619 * @adev: amdgpu_device pointer 620 * 621 * Setup the hpd pins used by the card (evergreen+). 622 * Enable the pin, set the polarity, and enable the hpd interrupts. 623 */ 624void amdgpu_dm_hpd_init(struct amdgpu_device *adev) 625{ 626 struct drm_device *dev = adev->ddev; 627 struct drm_connector *connector; 628 629 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 630 struct amdgpu_dm_connector *amdgpu_dm_connector = 631 to_amdgpu_dm_connector(connector); 632 633 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; 634 635 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { 636 dc_interrupt_set(adev->dm.dc, 637 dc_link->irq_source_hpd, 638 true); 639 } 640 641 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { 642 dc_interrupt_set(adev->dm.dc, 643 dc_link->irq_source_hpd_rx, 644 true); 645 } 646 } 647} 648 649/** 650 * amdgpu_dm_hpd_fini - hpd tear down callback. 651 * 652 * @adev: amdgpu_device pointer 653 * 654 * Tear down the hpd pins used by the card (evergreen+). 655 * Disable the hpd interrupts. 656 */ 657void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) 658{ 659 struct drm_device *dev = adev->ddev; 660 struct drm_connector *connector; 661 662 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 663 struct amdgpu_dm_connector *amdgpu_dm_connector = 664 to_amdgpu_dm_connector(connector); 665 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; 666 667 dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false); 668 669 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { 670 dc_interrupt_set(adev->dm.dc, 671 dc_link->irq_source_hpd_rx, 672 false); 673 } 674 } 675}