Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27#include "dm_services_types.h"
28#include "dc.h"
29
30#include "amdgpu.h"
31#include "amdgpu_dm.h"
32#include "amdgpu_dm_irq.h"
33
34/**
35 * DOC: overview
36 *
37 * DM provides another layer of IRQ management on top of what the base driver
38 * already provides. This is something that could be cleaned up, and is a
39 * future TODO item.
40 *
41 * The base driver provides IRQ source registration with DRM, handler
42 * registration into the base driver's IRQ table, and a handler callback
43 * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
44 * handler looks up the IRQ table, and calls the respective
45 * &amdgpu_irq_src_funcs.process hookups.
46 *
47 * What DM provides on top are two IRQ tables specifically for top-half and
48 * bottom-half IRQ handling, with the bottom-half implementing workqueues:
49 *
50 * - &amdgpu_display_manager.irq_handler_list_high_tab
51 * - &amdgpu_display_manager.irq_handler_list_low_tab
52 *
53 * They override the base driver's IRQ table, and the effect can be seen
54 * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
55 * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
56 * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
57 * still needs to register the IRQ with the base driver. See
58 * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
59 *
60 * To expose DC's hardware interrupt toggle to the base driver, DM implements
61 * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
62 * amdgpu_irq_update() to enable or disable the interrupt.
63 */
64
65/******************************************************************************
66 * Private declarations.
67 *****************************************************************************/
68
69/**
70 * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
71 *
72 * @list: Linked list entry referencing the next/previous handler
73 * @handler: Handler function
74 * @handler_arg: Argument passed to the handler when triggered
75 * @dm: DM which this handler belongs to
76 * @irq_source: DC interrupt source that this handler is registered for
77 * @work: work struct
78 */
79struct amdgpu_dm_irq_handler_data {
80 struct list_head list;
81 interrupt_handler handler;
82 void *handler_arg;
83
84 struct amdgpu_display_manager *dm;
85 /* DAL irq source which registered for this interrupt. */
86 enum dc_irq_source irq_source;
87 struct work_struct work;
88};
89
90#define DM_IRQ_TABLE_LOCK(adev, flags) \
91 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
92
93#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
94 spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
95
96/******************************************************************************
97 * Private functions.
98 *****************************************************************************/
99
100static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
101 void (*ih)(void *),
102 void *args,
103 struct amdgpu_display_manager *dm)
104{
105 hcd->handler = ih;
106 hcd->handler_arg = args;
107 hcd->dm = dm;
108}
109
110/**
111 * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
112 *
113 * @work: work struct
114 */
115static void dm_irq_work_func(struct work_struct *work)
116{
117 struct amdgpu_dm_irq_handler_data *handler_data =
118 container_of(work, struct amdgpu_dm_irq_handler_data, work);
119
120 handler_data->handler(handler_data->handler_arg);
121
122 /* Call a DAL subcomponent which registered for interrupt notification
123 * at INTERRUPT_LOW_IRQ_CONTEXT.
124 * (The most common use is HPD interrupt)
125 */
126}
127
128/*
129 * Remove a handler and return a pointer to handler list from which the
130 * handler was removed.
131 */
132static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
133 void *ih,
134 const struct dc_interrupt_params *int_params)
135{
136 struct list_head *hnd_list;
137 struct list_head *entry, *tmp;
138 struct amdgpu_dm_irq_handler_data *handler;
139 unsigned long irq_table_flags;
140 bool handler_removed = false;
141 enum dc_irq_source irq_source;
142
143 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
144
145 irq_source = int_params->irq_source;
146
147 switch (int_params->int_context) {
148 case INTERRUPT_HIGH_IRQ_CONTEXT:
149 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
150 break;
151 case INTERRUPT_LOW_IRQ_CONTEXT:
152 default:
153 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
154 break;
155 }
156
157 list_for_each_safe(entry, tmp, hnd_list) {
158
159 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
160 list);
161
162 if (handler == NULL)
163 continue;
164
165 if (ih == handler->handler) {
166 /* Found our handler. Remove it from the list. */
167 list_del(&handler->list);
168 handler_removed = true;
169 break;
170 }
171 }
172
173 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
174
175 if (handler_removed == false) {
176 /* Not necessarily an error - caller may not
177 * know the context.
178 */
179 return NULL;
180 }
181
182 kfree(handler);
183
184 DRM_DEBUG_KMS(
185 "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
186 ih, int_params->irq_source, int_params->int_context);
187
188 return hnd_list;
189}
190
191/**
192 * unregister_all_irq_handlers() - Cleans up handlers from the DM IRQ table
193 * @adev: The base driver device containing the DM device
194 *
195 * Go through low and high context IRQ tables and deallocate handlers.
196 */
197static void unregister_all_irq_handlers(struct amdgpu_device *adev)
198{
199 struct list_head *hnd_list_low;
200 struct list_head *hnd_list_high;
201 struct list_head *entry, *tmp;
202 struct amdgpu_dm_irq_handler_data *handler;
203 unsigned long irq_table_flags;
204 int i;
205
206 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
207
208 for (i = 0; i < DAL_IRQ_SOURCES_NUMBER; i++) {
209 hnd_list_low = &adev->dm.irq_handler_list_low_tab[i];
210 hnd_list_high = &adev->dm.irq_handler_list_high_tab[i];
211
212 list_for_each_safe(entry, tmp, hnd_list_low) {
213
214 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
215 list);
216
217 if (handler == NULL || handler->handler == NULL)
218 continue;
219
220 list_del(&handler->list);
221 kfree(handler);
222 }
223
224 list_for_each_safe(entry, tmp, hnd_list_high) {
225
226 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
227 list);
228
229 if (handler == NULL || handler->handler == NULL)
230 continue;
231
232 list_del(&handler->list);
233 kfree(handler);
234 }
235 }
236
237 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
238}
239
240static bool
241validate_irq_registration_params(struct dc_interrupt_params *int_params,
242 void (*ih)(void *))
243{
244 if (NULL == int_params || NULL == ih) {
245 DRM_ERROR("DM_IRQ: invalid input!\n");
246 return false;
247 }
248
249 if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
250 DRM_ERROR("DM_IRQ: invalid context: %d!\n",
251 int_params->int_context);
252 return false;
253 }
254
255 if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
256 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
257 int_params->irq_source);
258 return false;
259 }
260
261 return true;
262}
263
264static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
265 irq_handler_idx handler_idx)
266{
267 if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) {
268 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
269 return false;
270 }
271
272 if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
273 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
274 return false;
275 }
276
277 return true;
278}
279/******************************************************************************
280 * Public functions.
281 *
282 * Note: caller is responsible for input validation.
283 *****************************************************************************/
284
285/**
286 * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
287 * @adev: The base driver device containing the DM device.
288 * @int_params: Interrupt parameters containing the source, and handler context
289 * @ih: Function pointer to the interrupt handler to register
290 * @handler_args: Arguments passed to the handler when the interrupt occurs
291 *
292 * Register an interrupt handler for the given IRQ source, under the given
293 * context. The context can either be high or low. High context handlers are
294 * executed directly within ISR context, while low context is executed within a
295 * workqueue, thereby allowing operations that sleep.
296 *
297 * Registered handlers are called in a FIFO manner, i.e. the most recently
298 * registered handler will be called first.
299 *
300 * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
301 * source, handler function, and args
302 */
303void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
304 struct dc_interrupt_params *int_params,
305 void (*ih)(void *),
306 void *handler_args)
307{
308 struct list_head *hnd_list;
309 struct amdgpu_dm_irq_handler_data *handler_data;
310 unsigned long irq_table_flags;
311 enum dc_irq_source irq_source;
312
313 if (false == validate_irq_registration_params(int_params, ih))
314 return DAL_INVALID_IRQ_HANDLER_IDX;
315
316 handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
317 if (!handler_data) {
318 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
319 return DAL_INVALID_IRQ_HANDLER_IDX;
320 }
321
322 init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
323
324 irq_source = int_params->irq_source;
325
326 handler_data->irq_source = irq_source;
327
328 /* Lock the list, add the handler. */
329 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
330
331 switch (int_params->int_context) {
332 case INTERRUPT_HIGH_IRQ_CONTEXT:
333 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
334 break;
335 case INTERRUPT_LOW_IRQ_CONTEXT:
336 default:
337 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
338 INIT_WORK(&handler_data->work, dm_irq_work_func);
339 break;
340 }
341
342 list_add_tail(&handler_data->list, hnd_list);
343
344 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
345
346 /* This pointer will be stored by code which requested interrupt
347 * registration.
348 * The same pointer will be needed in order to unregister the
349 * interrupt.
350 */
351
352 DRM_DEBUG_KMS(
353 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
354 handler_data,
355 irq_source,
356 int_params->int_context);
357
358 return handler_data;
359}
360
361/**
362 * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
363 * @adev: The base driver device containing the DM device
364 * @irq_source: IRQ source to remove the given handler from
365 * @ih: Function pointer to the interrupt handler to unregister
366 *
367 * Go through both low and high context IRQ tables, and find the given handler
368 * for the given irq source. If found, remove it. Otherwise, do nothing.
369 */
370void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
371 enum dc_irq_source irq_source,
372 void *ih)
373{
374 struct list_head *handler_list;
375 struct dc_interrupt_params int_params;
376 int i;
377
378 if (false == validate_irq_unregistration_params(irq_source, ih))
379 return;
380
381 memset(&int_params, 0, sizeof(int_params));
382
383 int_params.irq_source = irq_source;
384
385 for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
386
387 int_params.int_context = i;
388
389 handler_list = remove_irq_handler(adev, ih, &int_params);
390
391 if (handler_list != NULL)
392 break;
393 }
394
395 if (handler_list == NULL) {
396 /* If we got here, it means we searched all irq contexts
397 * for this irq source, but the handler was not found.
398 */
399 DRM_ERROR(
400 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
401 ih, irq_source);
402 }
403}
404
405/**
406 * amdgpu_dm_irq_init() - Initialize DM IRQ management
407 * @adev: The base driver device containing the DM device
408 *
409 * Initialize DM's high and low context IRQ tables.
410 *
411 * The N by M table contains N IRQ sources, with M
412 * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
413 * list_heads are initialized here. When an interrupt n is triggered, all m
414 * handlers are called in sequence, FIFO according to registration order.
415 *
416 * The low context table requires special steps to initialize, since handlers
417 * will be deferred to a workqueue. See &struct irq_list_head.
418 */
419int amdgpu_dm_irq_init(struct amdgpu_device *adev)
420{
421 int src;
422 struct list_head *lh;
423
424 DRM_DEBUG_KMS("DM_IRQ\n");
425
426 spin_lock_init(&adev->dm.irq_handler_list_table_lock);
427
428 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
429 /* low context handler list init */
430 lh = &adev->dm.irq_handler_list_low_tab[src];
431 INIT_LIST_HEAD(lh);
432 /* high context handler init */
433 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
434 }
435
436 return 0;
437}
438
439/**
440 * amdgpu_dm_irq_fini() - Tear down DM IRQ management
441 * @adev: The base driver device containing the DM device
442 *
443 * Flush all work within the low context IRQ table.
444 */
445void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
446{
447 int src;
448 struct list_head *lh;
449 struct list_head *entry, *tmp;
450 struct amdgpu_dm_irq_handler_data *handler;
451 unsigned long irq_table_flags;
452
453 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
454 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
455 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
456 /* The handler was removed from the table,
457 * it means it is safe to flush all the 'work'
458 * (because no code can schedule a new one).
459 */
460 lh = &adev->dm.irq_handler_list_low_tab[src];
461 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
462
463 if (!list_empty(lh)) {
464 list_for_each_safe(entry, tmp, lh) {
465 handler = list_entry(
466 entry,
467 struct amdgpu_dm_irq_handler_data,
468 list);
469 flush_work(&handler->work);
470 }
471 }
472 }
473 /* Deallocate handlers from the table. */
474 unregister_all_irq_handlers(adev);
475}
476
477void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
478{
479 int src;
480 struct list_head *hnd_list_h;
481 struct list_head *hnd_list_l;
482 unsigned long irq_table_flags;
483 struct list_head *entry, *tmp;
484 struct amdgpu_dm_irq_handler_data *handler;
485
486 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
487
488 DRM_DEBUG_KMS("DM_IRQ: suspend\n");
489
490 /**
491 * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK
492 * will be disabled from manage_dm_interrupts on disable CRTC.
493 */
494 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
495 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
496 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
497 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
498 dc_interrupt_set(adev->dm.dc, src, false);
499
500 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
501
502 if (!list_empty(hnd_list_l)) {
503 list_for_each_safe(entry, tmp, hnd_list_l) {
504 handler = list_entry(
505 entry,
506 struct amdgpu_dm_irq_handler_data,
507 list);
508 flush_work(&handler->work);
509 }
510 }
511 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
512 }
513
514 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
515}
516
517void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
518{
519 int src;
520 struct list_head *hnd_list_h, *hnd_list_l;
521 unsigned long irq_table_flags;
522
523 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
524
525 drm_dbg(adev_to_drm(adev), "DM_IRQ: early resume\n");
526
527 /* re-enable short pulse interrupts HW interrupt */
528 for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
529 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
530 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
531 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
532 dc_interrupt_set(adev->dm.dc, src, true);
533 }
534
535 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
536}
537
538void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
539{
540 int src;
541 struct list_head *hnd_list_h, *hnd_list_l;
542 unsigned long irq_table_flags;
543
544 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
545
546 drm_dbg(adev_to_drm(adev), "DM_IRQ: resume\n");
547
548 /**
549 * Renable HW interrupt for HPD and only since FLIP and VBLANK
550 * will be enabled from manage_dm_interrupts on enable CRTC.
551 */
552 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
553 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
554 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
555 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
556 dc_interrupt_set(adev->dm.dc, src, true);
557 }
558
559 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
560}
561
562/*
563 * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
564 * "irq_source".
565 */
566static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
567 enum dc_irq_source irq_source)
568{
569 struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
570 struct amdgpu_dm_irq_handler_data *handler_data;
571 bool work_queued = false;
572
573 if (list_empty(handler_list))
574 return;
575
576 list_for_each_entry(handler_data, handler_list, list) {
577 if (queue_work(system_highpri_wq, &handler_data->work)) {
578 work_queued = true;
579 break;
580 }
581 }
582
583 if (!work_queued) {
584 struct amdgpu_dm_irq_handler_data *handler_data_add;
585 /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
586 handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
587
588 /*allocate a new amdgpu_dm_irq_handler_data*/
589 handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC);
590 if (!handler_data_add) {
591 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
592 return;
593 }
594
595 /*copy new amdgpu_dm_irq_handler_data members from handler_data*/
596 handler_data_add->handler = handler_data->handler;
597 handler_data_add->handler_arg = handler_data->handler_arg;
598 handler_data_add->dm = handler_data->dm;
599 handler_data_add->irq_source = irq_source;
600
601 list_add_tail(&handler_data_add->list, handler_list);
602
603 INIT_WORK(&handler_data_add->work, dm_irq_work_func);
604
605 if (queue_work(system_highpri_wq, &handler_data_add->work))
606 DRM_DEBUG("Queued work for handling interrupt from "
607 "display for IRQ source %d\n",
608 irq_source);
609 else
610 DRM_ERROR("Failed to queue work for handling interrupt "
611 "from display for IRQ source %d\n",
612 irq_source);
613 }
614}
615
616/*
617 * amdgpu_dm_irq_immediate_work
618 * Callback high irq work immediately, don't send to work queue
619 */
620static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
621 enum dc_irq_source irq_source)
622{
623 struct amdgpu_dm_irq_handler_data *handler_data;
624 unsigned long irq_table_flags;
625
626 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
627
628 list_for_each_entry(handler_data,
629 &adev->dm.irq_handler_list_high_tab[irq_source],
630 list) {
631 /* Call a subcomponent which registered for immediate
632 * interrupt notification
633 */
634 handler_data->handler(handler_data->handler_arg);
635 }
636
637 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
638}
639
640/**
641 * amdgpu_dm_irq_handler - Generic DM IRQ handler
642 * @adev: amdgpu base driver device containing the DM device
643 * @source: Unused
644 * @entry: Data about the triggered interrupt
645 *
646 * Calls all registered high irq work immediately, and schedules work for low
647 * irq. The DM IRQ table is used to find the corresponding handlers.
648 */
649static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
650 struct amdgpu_irq_src *source,
651 struct amdgpu_iv_entry *entry)
652{
653
654 enum dc_irq_source src =
655 dc_interrupt_to_irq_source(
656 adev->dm.dc,
657 entry->src_id,
658 entry->src_data[0]);
659
660 dc_interrupt_ack(adev->dm.dc, src);
661
662 /* Call high irq work immediately */
663 amdgpu_dm_irq_immediate_work(adev, src);
664 /*Schedule low_irq work */
665 amdgpu_dm_irq_schedule_work(adev, src);
666
667 return 0;
668}
669
670static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type)
671{
672 switch (type) {
673 case AMDGPU_HPD_1:
674 return DC_IRQ_SOURCE_HPD1;
675 case AMDGPU_HPD_2:
676 return DC_IRQ_SOURCE_HPD2;
677 case AMDGPU_HPD_3:
678 return DC_IRQ_SOURCE_HPD3;
679 case AMDGPU_HPD_4:
680 return DC_IRQ_SOURCE_HPD4;
681 case AMDGPU_HPD_5:
682 return DC_IRQ_SOURCE_HPD5;
683 case AMDGPU_HPD_6:
684 return DC_IRQ_SOURCE_HPD6;
685 default:
686 return DC_IRQ_SOURCE_INVALID;
687 }
688}
689
690static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
691 struct amdgpu_irq_src *source,
692 unsigned int type,
693 enum amdgpu_interrupt_state state)
694{
695 enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
696 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
697
698 dc_interrupt_set(adev->dm.dc, src, st);
699 return 0;
700}
701
702static inline int dm_irq_state(struct amdgpu_device *adev,
703 struct amdgpu_irq_src *source,
704 unsigned int crtc_id,
705 enum amdgpu_interrupt_state state,
706 const enum irq_type dal_irq_type,
707 const char *func)
708{
709 bool st;
710 enum dc_irq_source irq_source;
711 struct dc *dc = adev->dm.dc;
712 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
713
714 if (!acrtc) {
715 DRM_ERROR(
716 "%s: crtc is NULL at id :%d\n",
717 func,
718 crtc_id);
719 return 0;
720 }
721
722 if (acrtc->otg_inst == -1)
723 return 0;
724
725 irq_source = dal_irq_type + acrtc->otg_inst;
726
727 st = (state == AMDGPU_IRQ_STATE_ENABLE);
728
729 if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
730 dc_allow_idle_optimizations(dc, false);
731
732 dc_interrupt_set(adev->dm.dc, irq_source, st);
733 return 0;
734}
735
736static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
737 struct amdgpu_irq_src *source,
738 unsigned int crtc_id,
739 enum amdgpu_interrupt_state state)
740{
741 return dm_irq_state(
742 adev,
743 source,
744 crtc_id,
745 state,
746 IRQ_TYPE_PFLIP,
747 __func__);
748}
749
750static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
751 struct amdgpu_irq_src *source,
752 unsigned int crtc_id,
753 enum amdgpu_interrupt_state state)
754{
755 return dm_irq_state(
756 adev,
757 source,
758 crtc_id,
759 state,
760 IRQ_TYPE_VBLANK,
761 __func__);
762}
763
764static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev,
765 struct amdgpu_irq_src *source,
766 unsigned int crtc_id,
767 enum amdgpu_interrupt_state state)
768{
769 return dm_irq_state(
770 adev,
771 source,
772 crtc_id,
773 state,
774 IRQ_TYPE_VLINE0,
775 __func__);
776}
777
778static int amdgpu_dm_set_dmub_outbox_irq_state(struct amdgpu_device *adev,
779 struct amdgpu_irq_src *source,
780 unsigned int crtc_id,
781 enum amdgpu_interrupt_state state)
782{
783 enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX;
784 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
785
786 dc_interrupt_set(adev->dm.dc, irq_source, st);
787 return 0;
788}
789
790static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
791 struct amdgpu_irq_src *source,
792 unsigned int crtc_id,
793 enum amdgpu_interrupt_state state)
794{
795 return dm_irq_state(
796 adev,
797 source,
798 crtc_id,
799 state,
800 IRQ_TYPE_VUPDATE,
801 __func__);
802}
803
804static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev,
805 struct amdgpu_irq_src *source,
806 unsigned int type,
807 enum amdgpu_interrupt_state state)
808{
809 enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0;
810 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
811
812 dc_interrupt_set(adev->dm.dc, irq_source, st);
813 return 0;
814}
815
816static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
817 .set = amdgpu_dm_set_crtc_irq_state,
818 .process = amdgpu_dm_irq_handler,
819};
820
821static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = {
822 .set = amdgpu_dm_set_vline0_irq_state,
823 .process = amdgpu_dm_irq_handler,
824};
825
826static const struct amdgpu_irq_src_funcs dm_dmub_outbox_irq_funcs = {
827 .set = amdgpu_dm_set_dmub_outbox_irq_state,
828 .process = amdgpu_dm_irq_handler,
829};
830
831static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
832 .set = amdgpu_dm_set_vupdate_irq_state,
833 .process = amdgpu_dm_irq_handler,
834};
835
836static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = {
837 .set = amdgpu_dm_set_dmub_trace_irq_state,
838 .process = amdgpu_dm_irq_handler,
839};
840
841static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
842 .set = amdgpu_dm_set_pflip_irq_state,
843 .process = amdgpu_dm_irq_handler,
844};
845
846static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
847 .set = amdgpu_dm_set_hpd_irq_state,
848 .process = amdgpu_dm_irq_handler,
849};
850
851void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
852{
853 adev->crtc_irq.num_types = adev->mode_info.num_crtc;
854 adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
855
856 adev->vline0_irq.num_types = adev->mode_info.num_crtc;
857 adev->vline0_irq.funcs = &dm_vline0_irq_funcs;
858
859 adev->dmub_outbox_irq.num_types = 1;
860 adev->dmub_outbox_irq.funcs = &dm_dmub_outbox_irq_funcs;
861
862 adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
863 adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
864
865 adev->dmub_trace_irq.num_types = 1;
866 adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs;
867
868 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
869 adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
870
871 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
872 adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
873}
874void amdgpu_dm_outbox_init(struct amdgpu_device *adev)
875{
876 dc_interrupt_set(adev->dm.dc,
877 DC_IRQ_SOURCE_DMCUB_OUTBOX,
878 true);
879}
880
881/**
882 * amdgpu_dm_hpd_init - hpd setup callback.
883 *
884 * @adev: amdgpu_device pointer
885 *
886 * Setup the hpd pins used by the card (evergreen+).
887 * Enable the pin, set the polarity, and enable the hpd interrupts.
888 */
889void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
890{
891 struct drm_device *dev = adev_to_drm(adev);
892 struct drm_connector *connector;
893 struct drm_connector_list_iter iter;
894 int irq_type;
895 int i;
896
897 /* First, clear all hpd and hpdrx interrupts */
898 for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
899 if (!dc_interrupt_set(adev->dm.dc, i, false))
900 drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n",
901 i);
902 }
903
904 drm_connector_list_iter_begin(dev, &iter);
905 drm_for_each_connector_iter(connector, &iter) {
906 struct amdgpu_dm_connector *amdgpu_dm_connector;
907 const struct dc_link *dc_link;
908
909 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
910 continue;
911
912 amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
913
914 dc_link = amdgpu_dm_connector->dc_link;
915
916 /*
917 * Get a base driver irq reference for hpd ints for the lifetime
918 * of dm. Note that only hpd interrupt types are registered with
919 * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on
920 * hpd_rx isn't available. DM currently controls hpd_rx
921 * explicitly with dc_interrupt_set()
922 */
923 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
924 irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
925 /*
926 * TODO: There's a mismatch between mode_info.num_hpd
927 * and what bios reports as the # of connectors with hpd
928 * sources. Since the # of hpd source types registered
929 * with base driver == mode_info.num_hpd, we have to
930 * fallback to dc_interrupt_set for the remaining types.
931 */
932 if (irq_type < adev->mode_info.num_hpd) {
933 if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type))
934 drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n",
935 dc_link->irq_source_hpd);
936 } else {
937 dc_interrupt_set(adev->dm.dc,
938 dc_link->irq_source_hpd,
939 true);
940 }
941 }
942
943 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
944 dc_interrupt_set(adev->dm.dc,
945 dc_link->irq_source_hpd_rx,
946 true);
947 }
948 }
949 drm_connector_list_iter_end(&iter);
950}
951
952/**
953 * amdgpu_dm_hpd_fini - hpd tear down callback.
954 *
955 * @adev: amdgpu_device pointer
956 *
957 * Tear down the hpd pins used by the card (evergreen+).
958 * Disable the hpd interrupts.
959 */
960void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
961{
962 struct drm_device *dev = adev_to_drm(adev);
963 struct drm_connector *connector;
964 struct drm_connector_list_iter iter;
965 int irq_type;
966
967 drm_connector_list_iter_begin(dev, &iter);
968 drm_for_each_connector_iter(connector, &iter) {
969 struct amdgpu_dm_connector *amdgpu_dm_connector;
970 const struct dc_link *dc_link;
971
972 if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
973 continue;
974
975 amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
976 dc_link = amdgpu_dm_connector->dc_link;
977
978 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
979 irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1;
980
981 /* TODO: See same TODO in amdgpu_dm_hpd_init() */
982 if (irq_type < adev->mode_info.num_hpd) {
983 if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type))
984 drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n",
985 dc_link->irq_source_hpd);
986 } else {
987 dc_interrupt_set(adev->dm.dc,
988 dc_link->irq_source_hpd,
989 false);
990 }
991 }
992
993 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
994 dc_interrupt_set(adev->dm.dc,
995 dc_link->irq_source_hpd_rx,
996 false);
997 }
998 }
999 drm_connector_list_iter_end(&iter);
1000}