Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27#include "dm_services_types.h"
28#include "dc.h"
29
30#include "amdgpu.h"
31#include "amdgpu_dm.h"
32#include "amdgpu_dm_irq.h"
33
34/**
35 * DOC: overview
36 *
37 * DM provides another layer of IRQ management on top of what the base driver
38 * already provides. This is something that could be cleaned up, and is a
39 * future TODO item.
40 *
41 * The base driver provides IRQ source registration with DRM, handler
42 * registration into the base driver's IRQ table, and a handler callback
43 * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
44 * handler looks up the IRQ table, and calls the respective
45 * &amdgpu_irq_src_funcs.process hookups.
46 *
47 * What DM provides on top are two IRQ tables specifically for top-half and
48 * bottom-half IRQ handling, with the bottom-half implementing workqueues:
49 *
50 * - &amdgpu_display_manager.irq_handler_list_high_tab
51 * - &amdgpu_display_manager.irq_handler_list_low_tab
52 *
53 * They override the base driver's IRQ table, and the effect can be seen
54 * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
55 * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
56 * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
57 * still needs to register the IRQ with the base driver. See
58 * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
59 *
60 * To expose DC's hardware interrupt toggle to the base driver, DM implements
61 * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
62 * amdgpu_irq_update() to enable or disable the interrupt.
63 */
64
65/******************************************************************************
66 * Private declarations.
67 *****************************************************************************/
68
69/**
70 * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
71 *
72 * @list: Linked list entry referencing the next/previous handler
73 * @handler: Handler function
74 * @handler_arg: Argument passed to the handler when triggered
75 * @dm: DM which this handler belongs to
76 * @irq_source: DC interrupt source that this handler is registered for
77 * @work: work struct
78 */
79struct amdgpu_dm_irq_handler_data {
80 struct list_head list;
81 interrupt_handler handler;
82 void *handler_arg;
83
84 struct amdgpu_display_manager *dm;
85 /* DAL irq source which registered for this interrupt. */
86 enum dc_irq_source irq_source;
87 struct work_struct work;
88};
89
90#define DM_IRQ_TABLE_LOCK(adev, flags) \
91 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
92
93#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
94 spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
95
96/******************************************************************************
97 * Private functions.
98 *****************************************************************************/
99
100static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
101 void (*ih)(void *),
102 void *args,
103 struct amdgpu_display_manager *dm)
104{
105 hcd->handler = ih;
106 hcd->handler_arg = args;
107 hcd->dm = dm;
108}
109
110/**
111 * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
112 *
113 * @work: work struct
114 */
115static void dm_irq_work_func(struct work_struct *work)
116{
117 struct amdgpu_dm_irq_handler_data *handler_data =
118 container_of(work, struct amdgpu_dm_irq_handler_data, work);
119
120 handler_data->handler(handler_data->handler_arg);
121
122 /* Call a DAL subcomponent which registered for interrupt notification
123 * at INTERRUPT_LOW_IRQ_CONTEXT.
124 * (The most common use is HPD interrupt)
125 */
126}
127
128/*
129 * Remove a handler and return a pointer to handler list from which the
130 * handler was removed.
131 */
132static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
133 void *ih,
134 const struct dc_interrupt_params *int_params)
135{
136 struct list_head *hnd_list;
137 struct list_head *entry, *tmp;
138 struct amdgpu_dm_irq_handler_data *handler;
139 unsigned long irq_table_flags;
140 bool handler_removed = false;
141 enum dc_irq_source irq_source;
142
143 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
144
145 irq_source = int_params->irq_source;
146
147 switch (int_params->int_context) {
148 case INTERRUPT_HIGH_IRQ_CONTEXT:
149 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
150 break;
151 case INTERRUPT_LOW_IRQ_CONTEXT:
152 default:
153 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
154 break;
155 }
156
157 list_for_each_safe(entry, tmp, hnd_list) {
158
159 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
160 list);
161
162 if (handler == NULL)
163 continue;
164
165 if (ih == handler->handler) {
166 /* Found our handler. Remove it from the list. */
167 list_del(&handler->list);
168 handler_removed = true;
169 break;
170 }
171 }
172
173 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
174
175 if (handler_removed == false) {
176 /* Not necessarily an error - caller may not
177 * know the context.
178 */
179 return NULL;
180 }
181
182 kfree(handler);
183
184 DRM_DEBUG_KMS(
185 "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
186 ih, int_params->irq_source, int_params->int_context);
187
188 return hnd_list;
189}
190
191/**
192 * unregister_all_irq_handlers() - Cleans up handlers from the DM IRQ table
193 * @adev: The base driver device containing the DM device
194 *
195 * Go through low and high context IRQ tables and deallocate handlers.
196 */
197static void unregister_all_irq_handlers(struct amdgpu_device *adev)
198{
199 struct list_head *hnd_list_low;
200 struct list_head *hnd_list_high;
201 struct list_head *entry, *tmp;
202 struct amdgpu_dm_irq_handler_data *handler;
203 unsigned long irq_table_flags;
204 int i;
205
206 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
207
208 for (i = 0; i < DAL_IRQ_SOURCES_NUMBER; i++) {
209 hnd_list_low = &adev->dm.irq_handler_list_low_tab[i];
210 hnd_list_high = &adev->dm.irq_handler_list_high_tab[i];
211
212 list_for_each_safe(entry, tmp, hnd_list_low) {
213
214 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
215 list);
216
217 if (handler == NULL || handler->handler == NULL)
218 continue;
219
220 list_del(&handler->list);
221 kfree(handler);
222 }
223
224 list_for_each_safe(entry, tmp, hnd_list_high) {
225
226 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
227 list);
228
229 if (handler == NULL || handler->handler == NULL)
230 continue;
231
232 list_del(&handler->list);
233 kfree(handler);
234 }
235 }
236
237 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
238}
239
240static bool
241validate_irq_registration_params(struct dc_interrupt_params *int_params,
242 void (*ih)(void *))
243{
244 if (NULL == int_params || NULL == ih) {
245 DRM_ERROR("DM_IRQ: invalid input!\n");
246 return false;
247 }
248
249 if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
250 DRM_ERROR("DM_IRQ: invalid context: %d!\n",
251 int_params->int_context);
252 return false;
253 }
254
255 if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
256 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
257 int_params->irq_source);
258 return false;
259 }
260
261 return true;
262}
263
264static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
265 irq_handler_idx handler_idx)
266{
267 if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) {
268 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
269 return false;
270 }
271
272 if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
273 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
274 return false;
275 }
276
277 return true;
278}
279/******************************************************************************
280 * Public functions.
281 *
282 * Note: caller is responsible for input validation.
283 *****************************************************************************/
284
285/**
286 * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
287 * @adev: The base driver device containing the DM device.
288 * @int_params: Interrupt parameters containing the source, and handler context
289 * @ih: Function pointer to the interrupt handler to register
290 * @handler_args: Arguments passed to the handler when the interrupt occurs
291 *
292 * Register an interrupt handler for the given IRQ source, under the given
293 * context. The context can either be high or low. High context handlers are
294 * executed directly within ISR context, while low context is executed within a
295 * workqueue, thereby allowing operations that sleep.
296 *
297 * Registered handlers are called in a FIFO manner, i.e. the most recently
298 * registered handler will be called first.
299 *
300 * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
301 * source, handler function, and args
302 */
303void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
304 struct dc_interrupt_params *int_params,
305 void (*ih)(void *),
306 void *handler_args)
307{
308 struct list_head *hnd_list;
309 struct amdgpu_dm_irq_handler_data *handler_data;
310 unsigned long irq_table_flags;
311 enum dc_irq_source irq_source;
312
313 if (false == validate_irq_registration_params(int_params, ih))
314 return DAL_INVALID_IRQ_HANDLER_IDX;
315
316 handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
317 if (!handler_data) {
318 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
319 return DAL_INVALID_IRQ_HANDLER_IDX;
320 }
321
322 init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
323
324 irq_source = int_params->irq_source;
325
326 handler_data->irq_source = irq_source;
327
328 /* Lock the list, add the handler. */
329 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
330
331 switch (int_params->int_context) {
332 case INTERRUPT_HIGH_IRQ_CONTEXT:
333 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
334 break;
335 case INTERRUPT_LOW_IRQ_CONTEXT:
336 default:
337 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
338 INIT_WORK(&handler_data->work, dm_irq_work_func);
339 break;
340 }
341
342 list_add_tail(&handler_data->list, hnd_list);
343
344 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
345
346 /* This pointer will be stored by code which requested interrupt
347 * registration.
348 * The same pointer will be needed in order to unregister the
349 * interrupt.
350 */
351
352 DRM_DEBUG_KMS(
353 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
354 handler_data,
355 irq_source,
356 int_params->int_context);
357
358 return handler_data;
359}
360
361/**
362 * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
363 * @adev: The base driver device containing the DM device
364 * @irq_source: IRQ source to remove the given handler from
365 * @ih: Function pointer to the interrupt handler to unregister
366 *
367 * Go through both low and high context IRQ tables, and find the given handler
368 * for the given irq source. If found, remove it. Otherwise, do nothing.
369 */
370void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
371 enum dc_irq_source irq_source,
372 void *ih)
373{
374 struct list_head *handler_list;
375 struct dc_interrupt_params int_params;
376 int i;
377
378 if (false == validate_irq_unregistration_params(irq_source, ih))
379 return;
380
381 memset(&int_params, 0, sizeof(int_params));
382
383 int_params.irq_source = irq_source;
384
385 for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
386
387 int_params.int_context = i;
388
389 handler_list = remove_irq_handler(adev, ih, &int_params);
390
391 if (handler_list != NULL)
392 break;
393 }
394
395 if (handler_list == NULL) {
396 /* If we got here, it means we searched all irq contexts
397 * for this irq source, but the handler was not found.
398 */
399 DRM_ERROR(
400 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
401 ih, irq_source);
402 }
403}
404
405/**
406 * amdgpu_dm_irq_init() - Initialize DM IRQ management
407 * @adev: The base driver device containing the DM device
408 *
409 * Initialize DM's high and low context IRQ tables.
410 *
411 * The N by M table contains N IRQ sources, with M
412 * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
413 * list_heads are initialized here. When an interrupt n is triggered, all m
414 * handlers are called in sequence, FIFO according to registration order.
415 *
416 * The low context table requires special steps to initialize, since handlers
417 * will be deferred to a workqueue. See &struct irq_list_head.
418 */
419int amdgpu_dm_irq_init(struct amdgpu_device *adev)
420{
421 int src;
422 struct list_head *lh;
423
424 DRM_DEBUG_KMS("DM_IRQ\n");
425
426 spin_lock_init(&adev->dm.irq_handler_list_table_lock);
427
428 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
429 /* low context handler list init */
430 lh = &adev->dm.irq_handler_list_low_tab[src];
431 INIT_LIST_HEAD(lh);
432 /* high context handler init */
433 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
434 }
435
436 return 0;
437}
438
439/**
440 * amdgpu_dm_irq_fini() - Tear down DM IRQ management
441 * @adev: The base driver device containing the DM device
442 *
443 * Flush all work within the low context IRQ table.
444 */
445void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
446{
447 int src;
448 struct list_head *lh;
449 struct list_head *entry, *tmp;
450 struct amdgpu_dm_irq_handler_data *handler;
451 unsigned long irq_table_flags;
452
453 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
454 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
455 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
456 /* The handler was removed from the table,
457 * it means it is safe to flush all the 'work'
458 * (because no code can schedule a new one).
459 */
460 lh = &adev->dm.irq_handler_list_low_tab[src];
461 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
462
463 if (!list_empty(lh)) {
464 list_for_each_safe(entry, tmp, lh) {
465 handler = list_entry(
466 entry,
467 struct amdgpu_dm_irq_handler_data,
468 list);
469 flush_work(&handler->work);
470 }
471 }
472 }
473 /* Deallocate handlers from the table. */
474 unregister_all_irq_handlers(adev);
475}
476
477void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
478{
479 struct drm_device *dev = adev_to_drm(adev);
480 int src;
481 struct list_head *hnd_list_h;
482 struct list_head *hnd_list_l;
483 unsigned long irq_table_flags;
484 struct list_head *entry, *tmp;
485 struct amdgpu_dm_irq_handler_data *handler;
486
487 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
488
489 DRM_DEBUG_KMS("DM_IRQ: suspend\n");
490
491 /**
492 * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK
493 * will be disabled from manage_dm_interrupts on disable CRTC.
494 */
495 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
496 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
497 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
498 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
499 dc_interrupt_set(adev->dm.dc, src, false);
500
501 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
502
503 if (!list_empty(hnd_list_l)) {
504 list_for_each_safe(entry, tmp, hnd_list_l) {
505 handler = list_entry(
506 entry,
507 struct amdgpu_dm_irq_handler_data,
508 list);
509 flush_work(&handler->work);
510 }
511 }
512 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
513 }
514
515 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
516
517 if (dev->mode_config.poll_enabled)
518 drm_kms_helper_poll_disable(dev);
519}
520
521void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
522{
523 int src;
524 struct list_head *hnd_list_h, *hnd_list_l;
525 unsigned long irq_table_flags;
526
527 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
528
529 drm_dbg(adev_to_drm(adev), "DM_IRQ: early resume\n");
530
531 /* re-enable short pulse interrupts HW interrupt */
532 for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
533 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
534 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
535 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
536 dc_interrupt_set(adev->dm.dc, src, true);
537 }
538
539 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
540}
541
542void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
543{
544 struct drm_device *dev = adev_to_drm(adev);
545 int src;
546 struct list_head *hnd_list_h, *hnd_list_l;
547 unsigned long irq_table_flags;
548
549 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
550
551 drm_dbg(adev_to_drm(adev), "DM_IRQ: resume\n");
552
553 /**
554 * Renable HW interrupt for HPD and only since FLIP and VBLANK
555 * will be enabled from manage_dm_interrupts on enable CRTC.
556 */
557 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
558 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
559 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
560 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
561 dc_interrupt_set(adev->dm.dc, src, true);
562 }
563
564 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
565
566 if (dev->mode_config.poll_enabled)
567 drm_kms_helper_poll_enable(dev);
568}
569
570/*
571 * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
572 * "irq_source".
573 */
574static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
575 enum dc_irq_source irq_source)
576{
577 struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
578 struct amdgpu_dm_irq_handler_data *handler_data;
579 bool work_queued = false;
580
581 if (list_empty(handler_list))
582 return;
583
584 list_for_each_entry(handler_data, handler_list, list) {
585 if (queue_work(system_highpri_wq, &handler_data->work)) {
586 work_queued = true;
587 break;
588 }
589 }
590
591 if (!work_queued) {
592 struct amdgpu_dm_irq_handler_data *handler_data_add;
593 /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
594 handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
595
596 /*allocate a new amdgpu_dm_irq_handler_data*/
597 handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
598 if (!handler_data_add) {
599 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
600 return;
601 }
602
603 /*copy new amdgpu_dm_irq_handler_data members from handler_data*/
604 handler_data_add->handler = handler_data->handler;
605 handler_data_add->handler_arg = handler_data->handler_arg;
606 handler_data_add->dm = handler_data->dm;
607 handler_data_add->irq_source = irq_source;
608
609 list_add_tail(&handler_data_add->list, handler_list);
610
611 INIT_WORK(&handler_data_add->work, dm_irq_work_func);
612
613 if (queue_work(system_highpri_wq, &handler_data_add->work))
614 DRM_DEBUG("Queued work for handling interrupt from "
615 "display for IRQ source %d\n",
616 irq_source);
617 else
618 DRM_ERROR("Failed to queue work for handling interrupt "
619 "from display for IRQ source %d\n",
620 irq_source);
621 }
622}
623
624/*
625 * amdgpu_dm_irq_immediate_work
626 * Callback high irq work immediately, don't send to work queue
627 */
628static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
629 enum dc_irq_source irq_source)
630{
631 struct amdgpu_dm_irq_handler_data *handler_data;
632 unsigned long irq_table_flags;
633
634 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
635
636 list_for_each_entry(handler_data,
637 &adev->dm.irq_handler_list_high_tab[irq_source],
638 list) {
639 /* Call a subcomponent which registered for immediate
640 * interrupt notification
641 */
642 handler_data->handler(handler_data->handler_arg);
643 }
644
645 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
646}
647
648/**
649 * amdgpu_dm_irq_handler - Generic DM IRQ handler
650 * @adev: amdgpu base driver device containing the DM device
651 * @source: Unused
652 * @entry: Data about the triggered interrupt
653 *
654 * Calls all registered high irq work immediately, and schedules work for low
655 * irq. The DM IRQ table is used to find the corresponding handlers.
656 */
657static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
658 struct amdgpu_irq_src *source,
659 struct amdgpu_iv_entry *entry)
660{
661
662 enum dc_irq_source src =
663 dc_interrupt_to_irq_source(
664 adev->dm.dc,
665 entry->src_id,
666 entry->src_data[0]);
667
668 dc_interrupt_ack(adev->dm.dc, src);
669
670 /* Call high irq work immediately */
671 amdgpu_dm_irq_immediate_work(adev, src);
672 /*Schedule low_irq work */
673 amdgpu_dm_irq_schedule_work(adev, src);
674
675 return 0;
676}
677
678static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type)
679{
680 switch (type) {
681 case AMDGPU_HPD_1:
682 return DC_IRQ_SOURCE_HPD1;
683 case AMDGPU_HPD_2:
684 return DC_IRQ_SOURCE_HPD2;
685 case AMDGPU_HPD_3:
686 return DC_IRQ_SOURCE_HPD3;
687 case AMDGPU_HPD_4:
688 return DC_IRQ_SOURCE_HPD4;
689 case AMDGPU_HPD_5:
690 return DC_IRQ_SOURCE_HPD5;
691 case AMDGPU_HPD_6:
692 return DC_IRQ_SOURCE_HPD6;
693 default:
694 return DC_IRQ_SOURCE_INVALID;
695 }
696}
697
698static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
699 struct amdgpu_irq_src *source,
700 unsigned int type,
701 enum amdgpu_interrupt_state state)
702{
703 enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
704 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
705
706 dc_interrupt_set(adev->dm.dc, src, st);
707 return 0;
708}
709
710static inline int dm_irq_state(struct amdgpu_device *adev,
711 struct amdgpu_irq_src *source,
712 unsigned int crtc_id,
713 enum amdgpu_interrupt_state state,
714 const enum irq_type dal_irq_type,
715 const char *func)
716{
717 bool st;
718 enum dc_irq_source irq_source;
719 struct dc *dc = adev->dm.dc;
720 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
721
722 if (!acrtc) {
723 DRM_ERROR(
724 "%s: crtc is NULL at id :%d\n",
725 func,
726 crtc_id);
727 return 0;
728 }
729
730 if (acrtc->otg_inst == -1)
731 return 0;
732
733 irq_source = dal_irq_type + acrtc->otg_inst;
734
735 st = (state == AMDGPU_IRQ_STATE_ENABLE);
736
737 if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
738 dc_allow_idle_optimizations(dc, false);
739
740 dc_interrupt_set(adev->dm.dc, irq_source, st);
741 return 0;
742}
743
744static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
745 struct amdgpu_irq_src *source,
746 unsigned int crtc_id,
747 enum amdgpu_interrupt_state state)
748{
749 return dm_irq_state(
750 adev,
751 source,
752 crtc_id,
753 state,
754 IRQ_TYPE_PFLIP,
755 __func__);
756}
757
758static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
759 struct amdgpu_irq_src *source,
760 unsigned int crtc_id,
761 enum amdgpu_interrupt_state state)
762{
763 return dm_irq_state(
764 adev,
765 source,
766 crtc_id,
767 state,
768 IRQ_TYPE_VBLANK,
769 __func__);
770}
771
772static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev,
773 struct amdgpu_irq_src *source,
774 unsigned int crtc_id,
775 enum amdgpu_interrupt_state state)
776{
777 return dm_irq_state(
778 adev,
779 source,
780 crtc_id,
781 state,
782 IRQ_TYPE_VLINE0,
783 __func__);
784}
785
786static int amdgpu_dm_set_dmub_outbox_irq_state(struct amdgpu_device *adev,
787 struct amdgpu_irq_src *source,
788 unsigned int crtc_id,
789 enum amdgpu_interrupt_state state)
790{
791 enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX;
792 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
793
794 dc_interrupt_set(adev->dm.dc, irq_source, st);
795 return 0;
796}
797
798static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
799 struct amdgpu_irq_src *source,
800 unsigned int crtc_id,
801 enum amdgpu_interrupt_state state)
802{
803 return dm_irq_state(
804 adev,
805 source,
806 crtc_id,
807 state,
808 IRQ_TYPE_VUPDATE,
809 __func__);
810}
811
812static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev,
813 struct amdgpu_irq_src *source,
814 unsigned int type,
815 enum amdgpu_interrupt_state state)
816{
817 enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0;
818 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
819
820 dc_interrupt_set(adev->dm.dc, irq_source, st);
821 return 0;
822}
823
824static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
825 .set = amdgpu_dm_set_crtc_irq_state,
826 .process = amdgpu_dm_irq_handler,
827};
828
829static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = {
830 .set = amdgpu_dm_set_vline0_irq_state,
831 .process = amdgpu_dm_irq_handler,
832};
833
834static const struct amdgpu_irq_src_funcs dm_dmub_outbox_irq_funcs = {
835 .set = amdgpu_dm_set_dmub_outbox_irq_state,
836 .process = amdgpu_dm_irq_handler,
837};
838
839static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
840 .set = amdgpu_dm_set_vupdate_irq_state,
841 .process = amdgpu_dm_irq_handler,
842};
843
844static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = {
845 .set = amdgpu_dm_set_dmub_trace_irq_state,
846 .process = amdgpu_dm_irq_handler,
847};
848
849static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
850 .set = amdgpu_dm_set_pflip_irq_state,
851 .process = amdgpu_dm_irq_handler,
852};
853
854static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
855 .set = amdgpu_dm_set_hpd_irq_state,
856 .process = amdgpu_dm_irq_handler,
857};
858
859void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
860{
861 adev->crtc_irq.num_types = adev->mode_info.num_crtc;
862 adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
863
864 adev->vline0_irq.num_types = adev->mode_info.num_crtc;
865 adev->vline0_irq.funcs = &dm_vline0_irq_funcs;
866
867 adev->dmub_outbox_irq.num_types = 1;
868 adev->dmub_outbox_irq.funcs = &dm_dmub_outbox_irq_funcs;
869
870 adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
871 adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
872
873 adev->dmub_trace_irq.num_types = 1;
874 adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs;
875
876 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
877 adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
878
879 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
880 adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
881}
882void amdgpu_dm_outbox_init(struct amdgpu_device *adev)
883{
884 dc_interrupt_set(adev->dm.dc,
885 DC_IRQ_SOURCE_DMCUB_OUTBOX,
886 true);
887}
888
889/**
890 * amdgpu_dm_hpd_init - hpd setup callback.
891 *
892 * @adev: amdgpu_device pointer
893 *
894 * Setup the hpd pins used by the card (evergreen+).
895 * Enable the pin, set the polarity, and enable the hpd interrupts.
896 */
897void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
898{
899 struct drm_device *dev = adev_to_drm(adev);
900 struct drm_connector *connector;
901 struct drm_connector_list_iter iter;
902 int irq_type;
903 int i;
904 bool use_polling = false;
905
906 /* First, clear all hpd and hpdrx interrupts */
907 for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
908 if (!dc_interrupt_set(adev->dm.dc, i, false))
909 drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n",
910 i);
911 }
912
913 drm_connector_list_iter_begin(dev, &iter);
914 drm_for_each_connector_iter(connector, &iter) {
915 struct amdgpu_dm_connector *amdgpu_dm_connector;
916 const struct dc_link *dc_link;
917
918 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
919 continue;
920
921 amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
922
923 /*
924 * Analog connectors may be hot-plugged unlike other connector
925 * types that don't support HPD. Only poll analog connectors.
926 */
927 use_polling |=
928 amdgpu_dm_connector->dc_link &&
929 dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id);
930
931 dc_link = amdgpu_dm_connector->dc_link;
932
933 /*
934 * Get a base driver irq reference for hpd ints for the lifetime
935 * of dm. Note that only hpd interrupt types are registered with
936 * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on
937 * hpd_rx isn't available. DM currently controls hpd_rx
938 * explicitly with dc_interrupt_set()
939 */
940 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
941 irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
942 /*
943 * TODO: There's a mismatch between mode_info.num_hpd
944 * and what bios reports as the # of connectors with hpd
945 * sources. Since the # of hpd source types registered
946 * with base driver == mode_info.num_hpd, we have to
947 * fallback to dc_interrupt_set for the remaining types.
948 */
949 if (irq_type < adev->mode_info.num_hpd) {
950 if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type))
951 drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n",
952 dc_link->irq_source_hpd);
953 } else {
954 dc_interrupt_set(adev->dm.dc,
955 dc_link->irq_source_hpd,
956 true);
957 }
958 }
959
960 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
961 dc_interrupt_set(adev->dm.dc,
962 dc_link->irq_source_hpd_rx,
963 true);
964 }
965 }
966 drm_connector_list_iter_end(&iter);
967
968 if (use_polling)
969 drm_kms_helper_poll_init(dev);
970}
971
972/**
973 * amdgpu_dm_hpd_fini - hpd tear down callback.
974 *
975 * @adev: amdgpu_device pointer
976 *
977 * Tear down the hpd pins used by the card (evergreen+).
978 * Disable the hpd interrupts.
979 */
980void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
981{
982 struct drm_device *dev = adev_to_drm(adev);
983 struct drm_connector *connector;
984 struct drm_connector_list_iter iter;
985 int irq_type;
986
987 drm_connector_list_iter_begin(dev, &iter);
988 drm_for_each_connector_iter(connector, &iter) {
989 struct amdgpu_dm_connector *amdgpu_dm_connector;
990 const struct dc_link *dc_link;
991
992 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
993 continue;
994
995 amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
996 dc_link = amdgpu_dm_connector->dc_link;
997
998 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
999 irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
1000
1001 /* TODO: See same TODO in amdgpu_dm_hpd_init() */
1002 if (irq_type < adev->mode_info.num_hpd) {
1003 if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type))
1004 drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n",
1005 dc_link->irq_source_hpd);
1006 } else {
1007 dc_interrupt_set(adev->dm.dc,
1008 dc_link->irq_source_hpd,
1009 false);
1010 }
1011 }
1012
1013 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
1014 dc_interrupt_set(adev->dm.dc,
1015 dc_link->irq_source_hpd_rx,
1016 false);
1017 }
1018 }
1019 drm_connector_list_iter_end(&iter);
1020
1021 if (dev->mode_config.poll_enabled)
1022 drm_kms_helper_poll_fini(dev);
1023}