Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26#include "dm_services_types.h"
27#include "dc.h"
28
29#include "amdgpu.h"
30#include "amdgpu_dm.h"
31#include "amdgpu_dm_irq.h"
32
33/**
34 * DOC: overview
35 *
36 * DM provides another layer of IRQ management on top of what the base driver
37 * already provides. This is something that could be cleaned up, and is a
38 * future TODO item.
39 *
40 * The base driver provides IRQ source registration with DRM, handler
41 * registration into the base driver's IRQ table, and a handler callback
42 * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
43 * handler looks up the IRQ table, and calls the respective
44 * &amdgpu_irq_src_funcs.process hookups.
45 *
46 * What DM provides on top are two IRQ tables specifically for top-half and
47 * bottom-half IRQ handling, with the bottom-half implementing workqueues:
48 *
49 * - &amdgpu_display_manager.irq_handler_list_high_tab
50 * - &amdgpu_display_manager.irq_handler_list_low_tab
51 *
52 * They override the base driver's IRQ table, and the effect can be seen
53 * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
54 * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
55 * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
56 * still needs to register the IRQ with the base driver. See
57 * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
58 *
59 * To expose DC's hardware interrupt toggle to the base driver, DM implements
60 * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
61 * amdgpu_irq_update() to enable or disable the interrupt.
62 */
63
64/******************************************************************************
65 * Private declarations.
66 *****************************************************************************/
67
68/**
69 * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
70 *
71 * @list: Linked list entry referencing the next/previous handler
72 * @handler: Handler function
73 * @handler_arg: Argument passed to the handler when triggered
74 * @dm: DM which this handler belongs to
75 * @irq_source: DC interrupt source that this handler is registered for
76 */
77struct amdgpu_dm_irq_handler_data {
78 struct list_head list;
79 interrupt_handler handler;
80 void *handler_arg;
81
82 struct amdgpu_display_manager *dm;
83 /* DAL irq source which registered for this interrupt. */
84 enum dc_irq_source irq_source;
85};
86
87#define DM_IRQ_TABLE_LOCK(adev, flags) \
88 spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
89
90#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
91 spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
92
93/******************************************************************************
94 * Private functions.
95 *****************************************************************************/
96
97static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
98 void (*ih)(void *),
99 void *args,
100 struct amdgpu_display_manager *dm)
101{
102 hcd->handler = ih;
103 hcd->handler_arg = args;
104 hcd->dm = dm;
105}
106
107/**
108 * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
109 *
110 * @work: work struct
111 */
112static void dm_irq_work_func(struct work_struct *work)
113{
114 struct irq_list_head *irq_list_head =
115 container_of(work, struct irq_list_head, work);
116 struct list_head *handler_list = &irq_list_head->head;
117 struct amdgpu_dm_irq_handler_data *handler_data;
118
119 list_for_each_entry(handler_data, handler_list, list) {
120 DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
121 handler_data->irq_source);
122
123 DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
124 handler_data->irq_source);
125
126 handler_data->handler(handler_data->handler_arg);
127 }
128
129 /* Call a DAL subcomponent which registered for interrupt notification
130 * at INTERRUPT_LOW_IRQ_CONTEXT.
131 * (The most common use is HPD interrupt) */
132}
133
134/*
135 * Remove a handler and return a pointer to handler list from which the
136 * handler was removed.
137 */
138static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
139 void *ih,
140 const struct dc_interrupt_params *int_params)
141{
142 struct list_head *hnd_list;
143 struct list_head *entry, *tmp;
144 struct amdgpu_dm_irq_handler_data *handler;
145 unsigned long irq_table_flags;
146 bool handler_removed = false;
147 enum dc_irq_source irq_source;
148
149 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
150
151 irq_source = int_params->irq_source;
152
153 switch (int_params->int_context) {
154 case INTERRUPT_HIGH_IRQ_CONTEXT:
155 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
156 break;
157 case INTERRUPT_LOW_IRQ_CONTEXT:
158 default:
159 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
160 break;
161 }
162
163 list_for_each_safe(entry, tmp, hnd_list) {
164
165 handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
166 list);
167
168 if (handler == NULL)
169 continue;
170
171 if (ih == handler->handler) {
172 /* Found our handler. Remove it from the list. */
173 list_del(&handler->list);
174 handler_removed = true;
175 break;
176 }
177 }
178
179 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
180
181 if (handler_removed == false) {
182 /* Not necessarily an error - caller may not
183 * know the context. */
184 return NULL;
185 }
186
187 kfree(handler);
188
189 DRM_DEBUG_KMS(
190 "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
191 ih, int_params->irq_source, int_params->int_context);
192
193 return hnd_list;
194}
195
196static bool
197validate_irq_registration_params(struct dc_interrupt_params *int_params,
198 void (*ih)(void *))
199{
200 if (NULL == int_params || NULL == ih) {
201 DRM_ERROR("DM_IRQ: invalid input!\n");
202 return false;
203 }
204
205 if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
206 DRM_ERROR("DM_IRQ: invalid context: %d!\n",
207 int_params->int_context);
208 return false;
209 }
210
211 if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
212 DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
213 int_params->irq_source);
214 return false;
215 }
216
217 return true;
218}
219
220static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
221 irq_handler_idx handler_idx)
222{
223 if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
224 DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
225 return false;
226 }
227
228 if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
229 DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
230 return false;
231 }
232
233 return true;
234}
235/******************************************************************************
236 * Public functions.
237 *
238 * Note: caller is responsible for input validation.
239 *****************************************************************************/
240
241/**
242 * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
243 * @adev: The base driver device containing the DM device.
244 * @int_params: Interrupt parameters containing the source, and handler context
245 * @ih: Function pointer to the interrupt handler to register
246 * @handler_args: Arguments passed to the handler when the interrupt occurs
247 *
248 * Register an interrupt handler for the given IRQ source, under the given
249 * context. The context can either be high or low. High context handlers are
250 * executed directly within ISR context, while low context is executed within a
251 * workqueue, thereby allowing operations that sleep.
252 *
253 * Registered handlers are called in a FIFO manner, i.e. the most recently
254 * registered handler will be called first.
255 *
256 * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
257 * source, handler function, and args
258 */
259void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
260 struct dc_interrupt_params *int_params,
261 void (*ih)(void *),
262 void *handler_args)
263{
264 struct list_head *hnd_list;
265 struct amdgpu_dm_irq_handler_data *handler_data;
266 unsigned long irq_table_flags;
267 enum dc_irq_source irq_source;
268
269 if (false == validate_irq_registration_params(int_params, ih))
270 return DAL_INVALID_IRQ_HANDLER_IDX;
271
272 handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
273 if (!handler_data) {
274 DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
275 return DAL_INVALID_IRQ_HANDLER_IDX;
276 }
277
278 init_handler_common_data(handler_data, ih, handler_args, &adev->dm);
279
280 irq_source = int_params->irq_source;
281
282 handler_data->irq_source = irq_source;
283
284 /* Lock the list, add the handler. */
285 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
286
287 switch (int_params->int_context) {
288 case INTERRUPT_HIGH_IRQ_CONTEXT:
289 hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
290 break;
291 case INTERRUPT_LOW_IRQ_CONTEXT:
292 default:
293 hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
294 break;
295 }
296
297 list_add_tail(&handler_data->list, hnd_list);
298
299 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
300
301 /* This pointer will be stored by code which requested interrupt
302 * registration.
303 * The same pointer will be needed in order to unregister the
304 * interrupt. */
305
306 DRM_DEBUG_KMS(
307 "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
308 handler_data,
309 irq_source,
310 int_params->int_context);
311
312 return handler_data;
313}
314
315/**
316 * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
317 * @adev: The base driver device containing the DM device
318 * @irq_source: IRQ source to remove the given handler from
319 * @ih: Function pointer to the interrupt handler to unregister
320 *
321 * Go through both low and high context IRQ tables, and find the given handler
322 * for the given irq source. If found, remove it. Otherwise, do nothing.
323 */
324void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
325 enum dc_irq_source irq_source,
326 void *ih)
327{
328 struct list_head *handler_list;
329 struct dc_interrupt_params int_params;
330 int i;
331
332 if (false == validate_irq_unregistration_params(irq_source, ih))
333 return;
334
335 memset(&int_params, 0, sizeof(int_params));
336
337 int_params.irq_source = irq_source;
338
339 for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
340
341 int_params.int_context = i;
342
343 handler_list = remove_irq_handler(adev, ih, &int_params);
344
345 if (handler_list != NULL)
346 break;
347 }
348
349 if (handler_list == NULL) {
350 /* If we got here, it means we searched all irq contexts
351 * for this irq source, but the handler was not found. */
352 DRM_ERROR(
353 "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
354 ih, irq_source);
355 }
356}
357
358/**
359 * amdgpu_dm_irq_init() - Initialize DM IRQ management
360 * @adev: The base driver device containing the DM device
361 *
362 * Initialize DM's high and low context IRQ tables.
363 *
364 * The N by M table contains N IRQ sources, with M
365 * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
366 * list_heads are initialized here. When an interrupt n is triggered, all m
367 * handlers are called in sequence, FIFO according to registration order.
368 *
369 * The low context table requires special steps to initialize, since handlers
370 * will be deferred to a workqueue. See &struct irq_list_head.
371 */
372int amdgpu_dm_irq_init(struct amdgpu_device *adev)
373{
374 int src;
375 struct irq_list_head *lh;
376
377 DRM_DEBUG_KMS("DM_IRQ\n");
378
379 spin_lock_init(&adev->dm.irq_handler_list_table_lock);
380
381 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
382 /* low context handler list init */
383 lh = &adev->dm.irq_handler_list_low_tab[src];
384 INIT_LIST_HEAD(&lh->head);
385 INIT_WORK(&lh->work, dm_irq_work_func);
386
387 /* high context handler init */
388 INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
389 }
390
391 return 0;
392}
393
394/**
395 * amdgpu_dm_irq_fini() - Tear down DM IRQ management
396 * @adev: The base driver device containing the DM device
397 *
398 * Flush all work within the low context IRQ table.
399 */
400void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
401{
402 int src;
403 struct irq_list_head *lh;
404 unsigned long irq_table_flags;
405 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
406 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
407 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
408 /* The handler was removed from the table,
409 * it means it is safe to flush all the 'work'
410 * (because no code can schedule a new one). */
411 lh = &adev->dm.irq_handler_list_low_tab[src];
412 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
413 flush_work(&lh->work);
414 }
415}
416
417int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
418{
419 int src;
420 struct list_head *hnd_list_h;
421 struct list_head *hnd_list_l;
422 unsigned long irq_table_flags;
423
424 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
425
426 DRM_DEBUG_KMS("DM_IRQ: suspend\n");
427
428 /**
429 * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK
430 * will be disabled from manage_dm_interrupts on disable CRTC.
431 */
432 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
433 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
434 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
435 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
436 dc_interrupt_set(adev->dm.dc, src, false);
437
438 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
439 flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
440
441 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
442 }
443
444 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
445 return 0;
446}
447
448int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
449{
450 int src;
451 struct list_head *hnd_list_h, *hnd_list_l;
452 unsigned long irq_table_flags;
453
454 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
455
456 DRM_DEBUG_KMS("DM_IRQ: early resume\n");
457
458 /* re-enable short pulse interrupts HW interrupt */
459 for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
460 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
461 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
462 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
463 dc_interrupt_set(adev->dm.dc, src, true);
464 }
465
466 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
467
468 return 0;
469}
470
471int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
472{
473 int src;
474 struct list_head *hnd_list_h, *hnd_list_l;
475 unsigned long irq_table_flags;
476
477 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
478
479 DRM_DEBUG_KMS("DM_IRQ: resume\n");
480
481 /**
482 * Renable HW interrupt for HPD and only since FLIP and VBLANK
483 * will be enabled from manage_dm_interrupts on enable CRTC.
484 */
485 for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
486 hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
487 hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
488 if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
489 dc_interrupt_set(adev->dm.dc, src, true);
490 }
491
492 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
493 return 0;
494}
495
496/*
497 * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
498 * "irq_source".
499 */
500static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
501 enum dc_irq_source irq_source)
502{
503 unsigned long irq_table_flags;
504 struct work_struct *work = NULL;
505
506 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
507
508 if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
509 work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
510
511 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
512
513 if (work) {
514 if (!schedule_work(work))
515 DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
516 irq_source);
517 }
518
519}
520
521/*
522 * amdgpu_dm_irq_immediate_work
523 * Callback high irq work immediately, don't send to work queue
524 */
525static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
526 enum dc_irq_source irq_source)
527{
528 struct amdgpu_dm_irq_handler_data *handler_data;
529 unsigned long irq_table_flags;
530
531 DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
532
533 list_for_each_entry(handler_data,
534 &adev->dm.irq_handler_list_high_tab[irq_source],
535 list) {
536 /* Call a subcomponent which registered for immediate
537 * interrupt notification */
538 handler_data->handler(handler_data->handler_arg);
539 }
540
541 DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
542}
543
544/**
545 * amdgpu_dm_irq_handler - Generic DM IRQ handler
546 * @adev: amdgpu base driver device containing the DM device
547 * @source: Unused
548 * @entry: Data about the triggered interrupt
549 *
550 * Calls all registered high irq work immediately, and schedules work for low
551 * irq. The DM IRQ table is used to find the corresponding handlers.
552 */
553static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
554 struct amdgpu_irq_src *source,
555 struct amdgpu_iv_entry *entry)
556{
557
558 enum dc_irq_source src =
559 dc_interrupt_to_irq_source(
560 adev->dm.dc,
561 entry->src_id,
562 entry->src_data[0]);
563
564 dc_interrupt_ack(adev->dm.dc, src);
565
566 /* Call high irq work immediately */
567 amdgpu_dm_irq_immediate_work(adev, src);
568 /*Schedule low_irq work */
569 amdgpu_dm_irq_schedule_work(adev, src);
570
571 return 0;
572}
573
574static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
575{
576 switch (type) {
577 case AMDGPU_HPD_1:
578 return DC_IRQ_SOURCE_HPD1;
579 case AMDGPU_HPD_2:
580 return DC_IRQ_SOURCE_HPD2;
581 case AMDGPU_HPD_3:
582 return DC_IRQ_SOURCE_HPD3;
583 case AMDGPU_HPD_4:
584 return DC_IRQ_SOURCE_HPD4;
585 case AMDGPU_HPD_5:
586 return DC_IRQ_SOURCE_HPD5;
587 case AMDGPU_HPD_6:
588 return DC_IRQ_SOURCE_HPD6;
589 default:
590 return DC_IRQ_SOURCE_INVALID;
591 }
592}
593
594static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
595 struct amdgpu_irq_src *source,
596 unsigned type,
597 enum amdgpu_interrupt_state state)
598{
599 enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
600 bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
601
602 dc_interrupt_set(adev->dm.dc, src, st);
603 return 0;
604}
605
606static inline int dm_irq_state(struct amdgpu_device *adev,
607 struct amdgpu_irq_src *source,
608 unsigned crtc_id,
609 enum amdgpu_interrupt_state state,
610 const enum irq_type dal_irq_type,
611 const char *func)
612{
613 bool st;
614 enum dc_irq_source irq_source;
615
616 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
617
618 if (!acrtc) {
619 DRM_ERROR(
620 "%s: crtc is NULL at id :%d\n",
621 func,
622 crtc_id);
623 return 0;
624 }
625
626 if (acrtc->otg_inst == -1)
627 return 0;
628
629 irq_source = dal_irq_type + acrtc->otg_inst;
630
631 st = (state == AMDGPU_IRQ_STATE_ENABLE);
632
633 dc_interrupt_set(adev->dm.dc, irq_source, st);
634 return 0;
635}
636
637static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
638 struct amdgpu_irq_src *source,
639 unsigned crtc_id,
640 enum amdgpu_interrupt_state state)
641{
642 return dm_irq_state(
643 adev,
644 source,
645 crtc_id,
646 state,
647 IRQ_TYPE_PFLIP,
648 __func__);
649}
650
651static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
652 struct amdgpu_irq_src *source,
653 unsigned crtc_id,
654 enum amdgpu_interrupt_state state)
655{
656 return dm_irq_state(
657 adev,
658 source,
659 crtc_id,
660 state,
661 IRQ_TYPE_VBLANK,
662 __func__);
663}
664
665static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev,
666 struct amdgpu_irq_src *source,
667 unsigned int crtc_id,
668 enum amdgpu_interrupt_state state)
669{
670 return dm_irq_state(
671 adev,
672 source,
673 crtc_id,
674 state,
675 IRQ_TYPE_VLINE0,
676 __func__);
677}
678
679static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
680 struct amdgpu_irq_src *source,
681 unsigned int crtc_id,
682 enum amdgpu_interrupt_state state)
683{
684 return dm_irq_state(
685 adev,
686 source,
687 crtc_id,
688 state,
689 IRQ_TYPE_VUPDATE,
690 __func__);
691}
692
693static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
694 .set = amdgpu_dm_set_crtc_irq_state,
695 .process = amdgpu_dm_irq_handler,
696};
697
698static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = {
699 .set = amdgpu_dm_set_vline0_irq_state,
700 .process = amdgpu_dm_irq_handler,
701};
702
703static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
704 .set = amdgpu_dm_set_vupdate_irq_state,
705 .process = amdgpu_dm_irq_handler,
706};
707
708static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
709 .set = amdgpu_dm_set_pflip_irq_state,
710 .process = amdgpu_dm_irq_handler,
711};
712
713static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
714 .set = amdgpu_dm_set_hpd_irq_state,
715 .process = amdgpu_dm_irq_handler,
716};
717
718void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
719{
720
721 adev->crtc_irq.num_types = adev->mode_info.num_crtc;
722 adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
723
724 adev->vline0_irq.num_types = adev->mode_info.num_crtc;
725 adev->vline0_irq.funcs = &dm_vline0_irq_funcs;
726
727 adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
728 adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
729
730 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
731 adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
732
733 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
734 adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
735}
736
737/**
738 * amdgpu_dm_hpd_init - hpd setup callback.
739 *
740 * @adev: amdgpu_device pointer
741 *
742 * Setup the hpd pins used by the card (evergreen+).
743 * Enable the pin, set the polarity, and enable the hpd interrupts.
744 */
745void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
746{
747 struct drm_device *dev = adev_to_drm(adev);
748 struct drm_connector *connector;
749 struct drm_connector_list_iter iter;
750
751 drm_connector_list_iter_begin(dev, &iter);
752 drm_for_each_connector_iter(connector, &iter) {
753 struct amdgpu_dm_connector *amdgpu_dm_connector =
754 to_amdgpu_dm_connector(connector);
755
756 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
757
758 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
759 dc_interrupt_set(adev->dm.dc,
760 dc_link->irq_source_hpd,
761 true);
762 }
763
764 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
765 dc_interrupt_set(adev->dm.dc,
766 dc_link->irq_source_hpd_rx,
767 true);
768 }
769 }
770 drm_connector_list_iter_end(&iter);
771}
772
773/**
774 * amdgpu_dm_hpd_fini - hpd tear down callback.
775 *
776 * @adev: amdgpu_device pointer
777 *
778 * Tear down the hpd pins used by the card (evergreen+).
779 * Disable the hpd interrupts.
780 */
781void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
782{
783 struct drm_device *dev = adev_to_drm(adev);
784 struct drm_connector *connector;
785 struct drm_connector_list_iter iter;
786
787 drm_connector_list_iter_begin(dev, &iter);
788 drm_for_each_connector_iter(connector, &iter) {
789 struct amdgpu_dm_connector *amdgpu_dm_connector =
790 to_amdgpu_dm_connector(connector);
791 const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
792
793 dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
794
795 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
796 dc_interrupt_set(adev->dm.dc,
797 dc_link->irq_source_hpd_rx,
798 false);
799 }
800 }
801 drm_connector_list_iter_end(&iter);
802}