Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <linux/debugfs.h>
25#include <linux/kernel.h>
26
27#include <drm/drm_print.h>
28#include <drm/drm_probe_helper.h>
29
30#include "i915_drv.h"
31#include "i915_irq.h"
32#include "intel_connector.h"
33#include "intel_display_core.h"
34#include "intel_display_power.h"
35#include "intel_display_rpm.h"
36#include "intel_display_types.h"
37#include "intel_display_utils.h"
38#include "intel_dp.h"
39#include "intel_hdcp.h"
40#include "intel_hotplug.h"
41#include "intel_hotplug_irq.h"
42
43/**
44 * DOC: Hotplug
45 *
46 * Simply put, hotplug occurs when a display is connected to or disconnected
47 * from the system. However, there may be adapters and docking stations and
48 * Display Port short pulses and MST devices involved, complicating matters.
49 *
50 * Hotplug in i915 is handled in many different levels of abstraction.
51 *
52 * The platform dependent interrupt handling code in i915_irq.c enables,
53 * disables, and does preliminary handling of the interrupts. The interrupt
54 * handlers gather the hotplug detect (HPD) information from relevant registers
55 * into a platform independent mask of hotplug pins that have fired.
56 *
57 * The platform independent interrupt handler intel_hpd_irq_handler() in
58 * intel_hotplug.c does hotplug irq storm detection and mitigation, and passes
59 * further processing to appropriate bottom halves (Display Port specific and
60 * regular hotplug).
61 *
62 * The Display Port work function i915_digport_work_func() calls into
63 * intel_dp_hpd_pulse() via hooks, which handles DP short pulses and DP MST long
64 * pulses, with failures and non-MST long pulses triggering regular hotplug
65 * processing on the connector.
66 *
67 * The regular hotplug work function i915_hotplug_work_func() calls connector
68 * detect hooks, and, if connector status changes, triggers sending of hotplug
69 * uevent to userspace via drm_kms_helper_hotplug_event().
70 *
71 * Finally, the userspace is responsible for triggering a modeset upon receiving
72 * the hotplug uevent, disabling or enabling the crtc as needed.
73 *
74 * The hotplug interrupt storm detection and mitigation code keeps track of the
75 * number of interrupts per hotplug pin per a period of time, and if the number
76 * of interrupts exceeds a certain threshold, the interrupt is disabled for a
77 * while before being re-enabled. The intention is to mitigate issues raising
78 * from broken hardware triggering massive amounts of interrupts and grinding
79 * the system to a halt.
80 *
81 * Current implementation expects that hotplug interrupt storm will not be
82 * seen when display port sink is connected, hence on platforms whose DP
83 * callback is handled by i915_digport_work_func reenabling of hpd is not
84 * performed (it was never expected to be disabled in the first place ;) )
85 * this is specific to DP sinks handled by this routine and any other display
86 * such as HDMI or DVI enabled on the same port will have proper logic since
87 * it will use i915_hotplug_work_func where this logic is handled.
88 */
89
90/**
91 * intel_hpd_pin_default - return default pin associated with certain port.
92 * @port: the hpd port to get associated pin
93 *
94 * It is only valid and used by digital port encoder.
95 *
96 * Return pin that is associatade with @port.
97 */
98enum hpd_pin intel_hpd_pin_default(enum port port)
99{
100 return HPD_PORT_A + port - PORT_A;
101}
102
103/* Threshold == 5 for long IRQs, 50 for short */
104#define HPD_STORM_DEFAULT_THRESHOLD 50
105
106#define HPD_STORM_DETECT_PERIOD 1000
107#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
108#define HPD_RETRY_DELAY 1000
109
110static enum hpd_pin
111intel_connector_hpd_pin(struct intel_connector *connector)
112{
113 struct intel_encoder *encoder = intel_attached_encoder(connector);
114
115 /*
116 * MST connectors get their encoder attached dynamically
117 * so need to make sure we have an encoder here. But since
118 * MST encoders have their hpd_pin set to HPD_NONE we don't
119 * have to special case them beyond that.
120 */
121 return encoder ? encoder->hpd_pin : HPD_NONE;
122}
123
124/**
125 * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
126 * @display: display device
127 * @pin: the pin to gather stats on
128 * @long_hpd: whether the HPD IRQ was long or short
129 *
130 * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
131 * storms. Only the pin specific stats and state are changed, the caller is
132 * responsible for further action.
133 *
134 * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
135 * stored in @display->hotplug.hpd_storm_threshold which defaults to
136 * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
137 * short IRQs count as +1. If this threshold is exceeded, it's considered an
138 * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
139 *
140 * By default, most systems will only count long IRQs towards
141 * &display->hotplug.hpd_storm_threshold. However, some older systems also
142 * suffer from short IRQ storms and must also track these. Because short IRQ
143 * storms are naturally caused by sideband interactions with DP MST devices,
144 * short IRQ detection is only enabled for systems without DP MST support.
145 * Systems which are new enough to support DP MST are far less likely to
146 * suffer from IRQ storms at all, so this is fine.
147 *
148 * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
149 * and should only be adjusted for automated hotplug testing.
150 *
151 * Return true if an IRQ storm was detected on @pin.
152 */
153static bool intel_hpd_irq_storm_detect(struct intel_display *display,
154 enum hpd_pin pin, bool long_hpd)
155{
156 struct intel_hotplug *hpd = &display->hotplug;
157 unsigned long start = hpd->stats[pin].last_jiffies;
158 unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
159 const int increment = long_hpd ? 10 : 1;
160 const int threshold = hpd->hpd_storm_threshold;
161 bool storm = false;
162
163 if (!threshold ||
164 (!long_hpd && !display->hotplug.hpd_short_storm_enabled))
165 return false;
166
167 if (!time_in_range(jiffies, start, end)) {
168 hpd->stats[pin].last_jiffies = jiffies;
169 hpd->stats[pin].count = 0;
170 }
171
172 hpd->stats[pin].count += increment;
173 if (hpd->stats[pin].count > threshold) {
174 hpd->stats[pin].state = HPD_MARK_DISABLED;
175 drm_dbg_kms(display->drm,
176 "HPD interrupt storm detected on PIN %d\n", pin);
177 storm = true;
178 } else {
179 drm_dbg_kms(display->drm,
180 "Received HPD interrupt on PIN %d - cnt: %d\n",
181 pin,
182 hpd->stats[pin].count);
183 }
184
185 return storm;
186}
187
188static bool detection_work_enabled(struct intel_display *display)
189{
190 lockdep_assert_held(&display->irq.lock);
191
192 return display->hotplug.detection_work_enabled;
193}
194
195static bool
196mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
197{
198 lockdep_assert_held(&display->irq.lock);
199
200 if (!detection_work_enabled(display))
201 return false;
202
203 return mod_delayed_work(display->wq.unordered, work, delay);
204}
205
206static bool
207queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay)
208{
209 lockdep_assert_held(&display->irq.lock);
210
211 if (!detection_work_enabled(display))
212 return false;
213
214 return queue_delayed_work(display->wq.unordered, work, delay);
215}
216
217static bool
218queue_detection_work(struct intel_display *display, struct work_struct *work)
219{
220 lockdep_assert_held(&display->irq.lock);
221
222 if (!detection_work_enabled(display))
223 return false;
224
225 return queue_work(display->wq.unordered, work);
226}
227
228static void
229intel_hpd_irq_storm_switch_to_polling(struct intel_display *display)
230{
231 struct drm_connector_list_iter conn_iter;
232 struct intel_connector *connector;
233 bool hpd_disabled = false;
234
235 lockdep_assert_held(&display->irq.lock);
236
237 drm_connector_list_iter_begin(display->drm, &conn_iter);
238 for_each_intel_connector_iter(connector, &conn_iter) {
239 enum hpd_pin pin;
240
241 if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
242 continue;
243
244 pin = intel_connector_hpd_pin(connector);
245 if (pin == HPD_NONE ||
246 display->hotplug.stats[pin].state != HPD_MARK_DISABLED)
247 continue;
248
249 drm_info(display->drm,
250 "HPD interrupt storm detected on connector %s: "
251 "switching from hotplug detection to polling\n",
252 connector->base.name);
253
254 display->hotplug.stats[pin].state = HPD_DISABLED;
255 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
256 DRM_CONNECTOR_POLL_DISCONNECT;
257 hpd_disabled = true;
258 }
259 drm_connector_list_iter_end(&conn_iter);
260
261 /* Enable polling and queue hotplug re-enabling. */
262 if (hpd_disabled) {
263 drm_kms_helper_poll_reschedule(display->drm);
264 mod_delayed_detection_work(display,
265 &display->hotplug.reenable_work,
266 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
267 }
268}
269
270static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
271{
272 struct intel_display *display =
273 container_of(work, typeof(*display), hotplug.reenable_work.work);
274 struct drm_connector_list_iter conn_iter;
275 struct intel_connector *connector;
276 struct ref_tracker *wakeref;
277 enum hpd_pin pin;
278
279 wakeref = intel_display_rpm_get(display);
280
281 spin_lock_irq(&display->irq.lock);
282
283 drm_connector_list_iter_begin(display->drm, &conn_iter);
284 for_each_intel_connector_iter(connector, &conn_iter) {
285 pin = intel_connector_hpd_pin(connector);
286 if (pin == HPD_NONE ||
287 display->hotplug.stats[pin].state != HPD_DISABLED)
288 continue;
289
290 if (connector->base.polled != connector->polled)
291 drm_dbg(display->drm,
292 "Reenabling HPD on connector %s\n",
293 connector->base.name);
294 connector->base.polled = connector->polled;
295 }
296 drm_connector_list_iter_end(&conn_iter);
297
298 for_each_hpd_pin(pin) {
299 if (display->hotplug.stats[pin].state == HPD_DISABLED)
300 display->hotplug.stats[pin].state = HPD_ENABLED;
301 }
302
303 intel_hpd_irq_setup(display);
304
305 spin_unlock_irq(&display->irq.lock);
306
307 intel_display_rpm_put(display, wakeref);
308}
309
310static enum intel_hotplug_state
311intel_hotplug_detect_connector(struct intel_connector *connector)
312{
313 struct drm_device *dev = connector->base.dev;
314 enum drm_connector_status old_status;
315 u64 old_epoch_counter;
316 int status;
317 bool ret = false;
318
319 drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
320 old_status = connector->base.status;
321 old_epoch_counter = connector->base.epoch_counter;
322
323 status = drm_helper_probe_detect(&connector->base, NULL, false);
324 if (!connector->base.force)
325 connector->base.status = status;
326
327 if (old_epoch_counter != connector->base.epoch_counter)
328 ret = true;
329
330 if (ret) {
331 drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n",
332 connector->base.base.id,
333 connector->base.name,
334 drm_get_connector_status_name(old_status),
335 drm_get_connector_status_name(connector->base.status),
336 old_epoch_counter,
337 connector->base.epoch_counter);
338 return INTEL_HOTPLUG_CHANGED;
339 }
340 return INTEL_HOTPLUG_UNCHANGED;
341}
342
343enum intel_hotplug_state
344intel_encoder_hotplug(struct intel_encoder *encoder,
345 struct intel_connector *connector)
346{
347 return intel_hotplug_detect_connector(connector);
348}
349
350static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
351{
352 return intel_encoder_is_dig_port(encoder) &&
353 enc_to_dig_port(encoder)->hpd_pulse != NULL;
354}
355
356static bool hpd_pin_has_pulse(struct intel_display *display, enum hpd_pin pin)
357{
358 struct intel_encoder *encoder;
359
360 for_each_intel_encoder(display->drm, encoder) {
361 if (encoder->hpd_pin != pin)
362 continue;
363
364 if (intel_encoder_has_hpd_pulse(encoder))
365 return true;
366 }
367
368 return false;
369}
370
371static bool hpd_pin_is_blocked(struct intel_display *display, enum hpd_pin pin)
372{
373 lockdep_assert_held(&display->irq.lock);
374
375 return display->hotplug.stats[pin].blocked_count;
376}
377
378static u32 get_blocked_hpd_pin_mask(struct intel_display *display)
379{
380 enum hpd_pin pin;
381 u32 hpd_pin_mask = 0;
382
383 for_each_hpd_pin(pin) {
384 if (hpd_pin_is_blocked(display, pin))
385 hpd_pin_mask |= BIT(pin);
386 }
387
388 return hpd_pin_mask;
389}
390
391static void i915_digport_work_func(struct work_struct *work)
392{
393 struct intel_display *display =
394 container_of(work, struct intel_display, hotplug.dig_port_work);
395 struct intel_hotplug *hotplug = &display->hotplug;
396 u32 long_hpd_pin_mask, short_hpd_pin_mask;
397 struct intel_encoder *encoder;
398 u32 blocked_hpd_pin_mask;
399 u32 old_bits = 0;
400
401 spin_lock_irq(&display->irq.lock);
402
403 blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
404 long_hpd_pin_mask = hotplug->long_hpd_pin_mask & ~blocked_hpd_pin_mask;
405 hotplug->long_hpd_pin_mask &= ~long_hpd_pin_mask;
406 short_hpd_pin_mask = hotplug->short_hpd_pin_mask & ~blocked_hpd_pin_mask;
407 hotplug->short_hpd_pin_mask &= ~short_hpd_pin_mask;
408
409 spin_unlock_irq(&display->irq.lock);
410
411 for_each_intel_encoder(display->drm, encoder) {
412 struct intel_digital_port *dig_port;
413 enum hpd_pin pin = encoder->hpd_pin;
414 bool long_hpd, short_hpd;
415 enum irqreturn ret;
416
417 if (!intel_encoder_has_hpd_pulse(encoder))
418 continue;
419
420 long_hpd = long_hpd_pin_mask & BIT(pin);
421 short_hpd = short_hpd_pin_mask & BIT(pin);
422
423 if (!long_hpd && !short_hpd)
424 continue;
425
426 dig_port = enc_to_dig_port(encoder);
427
428 ret = dig_port->hpd_pulse(dig_port, long_hpd);
429 if (ret == IRQ_NONE) {
430 /* fall back to old school hpd */
431 old_bits |= BIT(pin);
432 }
433 }
434
435 if (old_bits) {
436 spin_lock_irq(&display->irq.lock);
437 display->hotplug.event_bits |= old_bits;
438 queue_delayed_detection_work(display,
439 &display->hotplug.hotplug_work, 0);
440 spin_unlock_irq(&display->irq.lock);
441 }
442}
443
444/**
445 * intel_hpd_trigger_irq - trigger an hpd irq event for a port
446 * @dig_port: digital port
447 *
448 * Trigger an HPD interrupt event for the given port, emulating a short pulse
449 * generated by the sink, and schedule the dig port work to handle it.
450 */
451void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
452{
453 struct intel_display *display = to_intel_display(dig_port);
454 struct intel_hotplug *hotplug = &display->hotplug;
455 struct intel_encoder *encoder = &dig_port->base;
456
457 spin_lock_irq(&display->irq.lock);
458
459 hotplug->short_hpd_pin_mask |= BIT(encoder->hpd_pin);
460 if (!hpd_pin_is_blocked(display, encoder->hpd_pin))
461 queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
462
463 spin_unlock_irq(&display->irq.lock);
464}
465
466/*
467 * Handle hotplug events outside the interrupt handler proper.
468 */
469static void i915_hotplug_work_func(struct work_struct *work)
470{
471 struct intel_display *display =
472 container_of(work, struct intel_display, hotplug.hotplug_work.work);
473 struct intel_hotplug *hotplug = &display->hotplug;
474 struct drm_connector_list_iter conn_iter;
475 struct intel_connector *connector;
476 u32 changed = 0, retry = 0;
477 u32 hpd_event_bits;
478 u32 hpd_retry_bits;
479 struct drm_connector *first_changed_connector = NULL;
480 int changed_connectors = 0;
481 u32 blocked_hpd_pin_mask;
482
483 mutex_lock(&display->drm->mode_config.mutex);
484 drm_dbg_kms(display->drm, "running encoder hotplug functions\n");
485
486 spin_lock_irq(&display->irq.lock);
487
488 blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
489 hpd_event_bits = hotplug->event_bits & ~blocked_hpd_pin_mask;
490 hotplug->event_bits &= ~hpd_event_bits;
491 hpd_retry_bits = hotplug->retry_bits & ~blocked_hpd_pin_mask;
492 hotplug->retry_bits &= ~hpd_retry_bits;
493
494 /* Enable polling for connectors which had HPD IRQ storms */
495 intel_hpd_irq_storm_switch_to_polling(display);
496
497 spin_unlock_irq(&display->irq.lock);
498
499 /* Skip calling encode hotplug handlers if ignore long HPD set*/
500 if (display->hotplug.ignore_long_hpd) {
501 drm_dbg_kms(display->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n");
502 mutex_unlock(&display->drm->mode_config.mutex);
503 return;
504 }
505
506 drm_connector_list_iter_begin(display->drm, &conn_iter);
507 for_each_intel_connector_iter(connector, &conn_iter) {
508 enum hpd_pin pin;
509 u32 hpd_bit;
510
511 pin = intel_connector_hpd_pin(connector);
512 if (pin == HPD_NONE)
513 continue;
514
515 hpd_bit = BIT(pin);
516 if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
517 struct intel_encoder *encoder =
518 intel_attached_encoder(connector);
519
520 if (hpd_event_bits & hpd_bit)
521 connector->hotplug_retries = 0;
522 else
523 connector->hotplug_retries++;
524
525 drm_dbg_kms(display->drm,
526 "Connector %s (pin %i) received hotplug event. (retry %d)\n",
527 connector->base.name, pin,
528 connector->hotplug_retries);
529
530 switch (encoder->hotplug(encoder, connector)) {
531 case INTEL_HOTPLUG_UNCHANGED:
532 break;
533 case INTEL_HOTPLUG_CHANGED:
534 changed |= hpd_bit;
535 changed_connectors++;
536 if (!first_changed_connector) {
537 drm_connector_get(&connector->base);
538 first_changed_connector = &connector->base;
539 }
540 break;
541 case INTEL_HOTPLUG_RETRY:
542 retry |= hpd_bit;
543 break;
544 }
545 }
546 }
547 drm_connector_list_iter_end(&conn_iter);
548 mutex_unlock(&display->drm->mode_config.mutex);
549
550 if (changed_connectors == 1)
551 drm_kms_helper_connector_hotplug_event(first_changed_connector);
552 else if (changed_connectors > 0)
553 drm_kms_helper_hotplug_event(display->drm);
554
555 if (first_changed_connector)
556 drm_connector_put(first_changed_connector);
557
558 /* Remove shared HPD pins that have changed */
559 retry &= ~changed;
560 if (retry) {
561 spin_lock_irq(&display->irq.lock);
562 display->hotplug.retry_bits |= retry;
563
564 mod_delayed_detection_work(display,
565 &display->hotplug.hotplug_work,
566 msecs_to_jiffies(HPD_RETRY_DELAY));
567 spin_unlock_irq(&display->irq.lock);
568 }
569}
570
571
572/**
573 * intel_hpd_irq_handler - main hotplug irq handler
574 * @display: display device
575 * @pin_mask: a mask of hpd pins that have triggered the irq
576 * @long_mask: a mask of hpd pins that may be long hpd pulses
577 *
578 * This is the main hotplug irq handler for all platforms. The platform specific
579 * irq handlers call the platform specific hotplug irq handlers, which read and
580 * decode the appropriate registers into bitmasks about hpd pins that have
581 * triggered (@pin_mask), and which of those pins may be long pulses
582 * (@long_mask). The @long_mask is ignored if the port corresponding to the pin
583 * is not a digital port.
584 *
585 * Here, we do hotplug irq storm detection and mitigation, and pass further
586 * processing to appropriate bottom halves.
587 */
588void intel_hpd_irq_handler(struct intel_display *display,
589 u32 pin_mask, u32 long_mask)
590{
591 struct intel_encoder *encoder;
592 bool storm_detected = false;
593 bool queue_dig = false, queue_hp = false;
594 u32 long_hpd_pulse_mask = 0;
595 u32 short_hpd_pulse_mask = 0;
596 enum hpd_pin pin;
597
598 if (!pin_mask)
599 return;
600
601 spin_lock(&display->irq.lock);
602
603 /*
604 * Determine whether ->hpd_pulse() exists for each pin, and
605 * whether we have a short or a long pulse. This is needed
606 * as each pin may have up to two encoders (HDMI and DP) and
607 * only the one of them (DP) will have ->hpd_pulse().
608 */
609 for_each_intel_encoder(display->drm, encoder) {
610 bool long_hpd;
611
612 pin = encoder->hpd_pin;
613 if (!(BIT(pin) & pin_mask))
614 continue;
615
616 if (!intel_encoder_has_hpd_pulse(encoder))
617 continue;
618
619 long_hpd = long_mask & BIT(pin);
620
621 drm_dbg(display->drm,
622 "digital hpd on [ENCODER:%d:%s] - %s\n",
623 encoder->base.base.id, encoder->base.name,
624 long_hpd ? "long" : "short");
625
626 if (!hpd_pin_is_blocked(display, pin))
627 queue_dig = true;
628
629 if (long_hpd) {
630 long_hpd_pulse_mask |= BIT(pin);
631 display->hotplug.long_hpd_pin_mask |= BIT(pin);
632 } else {
633 short_hpd_pulse_mask |= BIT(pin);
634 display->hotplug.short_hpd_pin_mask |= BIT(pin);
635 }
636 }
637
638 /* Now process each pin just once */
639 for_each_hpd_pin(pin) {
640 bool long_hpd;
641
642 if (!(BIT(pin) & pin_mask))
643 continue;
644
645 if (display->hotplug.stats[pin].state == HPD_DISABLED) {
646 /*
647 * On GMCH platforms the interrupt mask bits only
648 * prevent irq generation, not the setting of the
649 * hotplug bits itself. So only WARN about unexpected
650 * interrupts on saner platforms.
651 */
652 drm_WARN_ONCE(display->drm, !HAS_GMCH(display),
653 "Received HPD interrupt on pin %d although disabled\n",
654 pin);
655 continue;
656 }
657
658 if (display->hotplug.stats[pin].state != HPD_ENABLED)
659 continue;
660
661 /*
662 * Delegate to ->hpd_pulse() if one of the encoders for this
663 * pin has it, otherwise let the hotplug_work deal with this
664 * pin directly.
665 */
666 if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
667 long_hpd = long_hpd_pulse_mask & BIT(pin);
668 } else {
669 display->hotplug.event_bits |= BIT(pin);
670 long_hpd = true;
671
672 if (!hpd_pin_is_blocked(display, pin))
673 queue_hp = true;
674 }
675
676 if (intel_hpd_irq_storm_detect(display, pin, long_hpd)) {
677 display->hotplug.event_bits &= ~BIT(pin);
678 storm_detected = true;
679 queue_hp = true;
680 }
681 }
682
683 /*
684 * Disable any IRQs that storms were detected on. Polling enablement
685 * happens later in our hotplug work.
686 */
687 if (storm_detected)
688 intel_hpd_irq_setup(display);
689
690 /*
691 * Our hotplug handler can grab modeset locks (by calling down into the
692 * fb helpers). Hence it must not be run on our own dev-priv->wq work
693 * queue for otherwise the flush_work in the pageflip code will
694 * deadlock.
695 */
696 if (queue_dig)
697 queue_work(display->hotplug.dp_wq, &display->hotplug.dig_port_work);
698 if (queue_hp)
699 queue_delayed_detection_work(display,
700 &display->hotplug.hotplug_work, 0);
701
702 spin_unlock(&display->irq.lock);
703}
704
705/**
706 * intel_hpd_init - initializes and enables hpd support
707 * @display: display device instance
708 *
709 * This function enables the hotplug support. It requires that interrupts have
710 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
711 * poll request can run concurrently to other code, so locking rules must be
712 * obeyed.
713 *
714 * This is a separate step from interrupt enabling to simplify the locking rules
715 * in the driver load and resume code.
716 *
717 * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
718 */
719void intel_hpd_init(struct intel_display *display)
720{
721 int i;
722
723 if (!HAS_DISPLAY(display))
724 return;
725
726 for_each_hpd_pin(i) {
727 display->hotplug.stats[i].count = 0;
728 display->hotplug.stats[i].state = HPD_ENABLED;
729 }
730
731 /*
732 * Interrupt setup is already guaranteed to be single-threaded, this is
733 * just to make the assert_spin_locked checks happy.
734 */
735 spin_lock_irq(&display->irq.lock);
736 intel_hpd_irq_setup(display);
737 spin_unlock_irq(&display->irq.lock);
738}
739
740static void i915_hpd_poll_detect_connectors(struct intel_display *display)
741{
742 struct drm_connector_list_iter conn_iter;
743 struct intel_connector *connector;
744 struct intel_connector *first_changed_connector = NULL;
745 int changed = 0;
746
747 mutex_lock(&display->drm->mode_config.mutex);
748
749 if (!display->drm->mode_config.poll_enabled)
750 goto out;
751
752 drm_connector_list_iter_begin(display->drm, &conn_iter);
753 for_each_intel_connector_iter(connector, &conn_iter) {
754 if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD))
755 continue;
756
757 if (intel_hotplug_detect_connector(connector) != INTEL_HOTPLUG_CHANGED)
758 continue;
759
760 changed++;
761
762 if (changed == 1) {
763 drm_connector_get(&connector->base);
764 first_changed_connector = connector;
765 }
766 }
767 drm_connector_list_iter_end(&conn_iter);
768
769out:
770 mutex_unlock(&display->drm->mode_config.mutex);
771
772 if (!changed)
773 return;
774
775 if (changed == 1)
776 drm_kms_helper_connector_hotplug_event(&first_changed_connector->base);
777 else
778 drm_kms_helper_hotplug_event(display->drm);
779
780 drm_connector_put(&first_changed_connector->base);
781}
782
783static void i915_hpd_poll_init_work(struct work_struct *work)
784{
785 struct intel_display *display =
786 container_of(work, typeof(*display), hotplug.poll_init_work);
787 struct drm_connector_list_iter conn_iter;
788 struct intel_connector *connector;
789 intel_wakeref_t wakeref;
790 bool enabled;
791
792 mutex_lock(&display->drm->mode_config.mutex);
793
794 enabled = READ_ONCE(display->hotplug.poll_enabled);
795 /*
796 * Prevent taking a power reference from this sequence of
797 * i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() ->
798 * connector detect which would requeue i915_hpd_poll_init_work()
799 * and so risk an endless loop of this same sequence.
800 */
801 if (!enabled) {
802 wakeref = intel_display_power_get(display,
803 POWER_DOMAIN_DISPLAY_CORE);
804 drm_WARN_ON(display->drm,
805 READ_ONCE(display->hotplug.poll_enabled));
806 cancel_work(&display->hotplug.poll_init_work);
807 }
808
809 spin_lock_irq(&display->irq.lock);
810
811 drm_connector_list_iter_begin(display->drm, &conn_iter);
812 for_each_intel_connector_iter(connector, &conn_iter) {
813 enum hpd_pin pin;
814
815 pin = intel_connector_hpd_pin(connector);
816 if (pin == HPD_NONE)
817 continue;
818
819 if (display->hotplug.stats[pin].state == HPD_DISABLED)
820 continue;
821
822 connector->base.polled = connector->polled;
823
824 if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
825 connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
826 DRM_CONNECTOR_POLL_DISCONNECT;
827 }
828 drm_connector_list_iter_end(&conn_iter);
829
830 spin_unlock_irq(&display->irq.lock);
831
832 if (enabled)
833 drm_kms_helper_poll_reschedule(display->drm);
834
835 mutex_unlock(&display->drm->mode_config.mutex);
836
837 /*
838 * We might have missed any hotplugs that happened while we were
839 * in the middle of disabling polling
840 */
841 if (!enabled) {
842 i915_hpd_poll_detect_connectors(display);
843
844 intel_display_power_put(display,
845 POWER_DOMAIN_DISPLAY_CORE,
846 wakeref);
847 }
848}
849
850/**
851 * intel_hpd_poll_enable - enable polling for connectors with hpd
852 * @display: display device instance
853 *
854 * This function enables polling for all connectors which support HPD.
855 * Under certain conditions HPD may not be functional. On most Intel GPUs,
856 * this happens when we enter runtime suspend.
857 * On Valleyview and Cherryview systems, this also happens when we shut off all
858 * of the powerwells.
859 *
860 * Since this function can get called in contexts where we're already holding
861 * dev->mode_config.mutex, we do the actual hotplug enabling in a separate
862 * worker.
863 *
864 * Also see: intel_hpd_init() and intel_hpd_poll_disable().
865 */
866void intel_hpd_poll_enable(struct intel_display *display)
867{
868 if (!HAS_DISPLAY(display) || !intel_display_device_enabled(display))
869 return;
870
871 WRITE_ONCE(display->hotplug.poll_enabled, true);
872
873 /*
874 * We might already be holding dev->mode_config.mutex, so do this in a
875 * separate worker
876 * As well, there's no issue if we race here since we always reschedule
877 * this worker anyway
878 */
879 spin_lock_irq(&display->irq.lock);
880 queue_detection_work(display,
881 &display->hotplug.poll_init_work);
882 spin_unlock_irq(&display->irq.lock);
883}
884
885/**
886 * intel_hpd_poll_disable - disable polling for connectors with hpd
887 * @display: display device instance
888 *
889 * This function disables polling for all connectors which support HPD.
890 * Under certain conditions HPD may not be functional. On most Intel GPUs,
891 * this happens when we enter runtime suspend.
892 * On Valleyview and Cherryview systems, this also happens when we shut off all
893 * of the powerwells.
894 *
895 * Since this function can get called in contexts where we're already holding
896 * dev->mode_config.mutex, we do the actual hotplug enabling in a separate
897 * worker.
898 *
899 * Also used during driver init to initialize connector->polled
900 * appropriately for all connectors.
901 *
902 * Also see: intel_hpd_init() and intel_hpd_poll_enable().
903 */
904void intel_hpd_poll_disable(struct intel_display *display)
905{
906 struct intel_encoder *encoder;
907
908 if (!HAS_DISPLAY(display))
909 return;
910
911 for_each_intel_dp(display->drm, encoder)
912 intel_dp_dpcd_set_probe(enc_to_intel_dp(encoder), true);
913
914 WRITE_ONCE(display->hotplug.poll_enabled, false);
915
916 spin_lock_irq(&display->irq.lock);
917 queue_detection_work(display,
918 &display->hotplug.poll_init_work);
919 spin_unlock_irq(&display->irq.lock);
920}
921
922void intel_hpd_poll_fini(struct intel_display *display)
923{
924 struct intel_connector *connector;
925 struct drm_connector_list_iter conn_iter;
926
927 /* Kill all the work that may have been queued by hpd. */
928 drm_connector_list_iter_begin(display->drm, &conn_iter);
929 for_each_intel_connector_iter(connector, &conn_iter) {
930 intel_connector_cancel_modeset_retry_work(connector);
931 intel_hdcp_cancel_works(connector);
932 }
933 drm_connector_list_iter_end(&conn_iter);
934}
935
936void intel_hpd_init_early(struct intel_display *display)
937{
938 INIT_DELAYED_WORK(&display->hotplug.hotplug_work,
939 i915_hotplug_work_func);
940 INIT_WORK(&display->hotplug.dig_port_work, i915_digport_work_func);
941 INIT_WORK(&display->hotplug.poll_init_work, i915_hpd_poll_init_work);
942 INIT_DELAYED_WORK(&display->hotplug.reenable_work,
943 intel_hpd_irq_storm_reenable_work);
944
945 display->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
946 /* If we have MST support, we want to avoid doing short HPD IRQ storm
947 * detection, as short HPD storms will occur as a natural part of
948 * sideband messaging with MST.
949 * On older platforms however, IRQ storms can occur with both long and
950 * short pulses, as seen on some G4x systems.
951 */
952 display->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(display);
953}
954
955static bool cancel_all_detection_work(struct intel_display *display)
956{
957 bool was_pending = false;
958
959 if (cancel_delayed_work_sync(&display->hotplug.hotplug_work))
960 was_pending = true;
961 if (cancel_work_sync(&display->hotplug.poll_init_work))
962 was_pending = true;
963 if (cancel_delayed_work_sync(&display->hotplug.reenable_work))
964 was_pending = true;
965
966 return was_pending;
967}
968
969void intel_hpd_cancel_work(struct intel_display *display)
970{
971 if (!HAS_DISPLAY(display))
972 return;
973
974 spin_lock_irq(&display->irq.lock);
975
976 display->hotplug.long_hpd_pin_mask = 0;
977 display->hotplug.short_hpd_pin_mask = 0;
978 display->hotplug.event_bits = 0;
979 display->hotplug.retry_bits = 0;
980
981 spin_unlock_irq(&display->irq.lock);
982
983 cancel_work_sync(&display->hotplug.dig_port_work);
984
985 /*
986 * All other work triggered by hotplug events should be canceled by
987 * now.
988 */
989 if (cancel_all_detection_work(display))
990 drm_dbg_kms(display->drm, "Hotplug detection work still active\n");
991}
992
993static void queue_work_for_missed_irqs(struct intel_display *display)
994{
995 struct intel_hotplug *hotplug = &display->hotplug;
996 bool queue_hp_work = false;
997 u32 blocked_hpd_pin_mask;
998 enum hpd_pin pin;
999
1000 lockdep_assert_held(&display->irq.lock);
1001
1002 blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display);
1003 if ((hotplug->event_bits | hotplug->retry_bits) & ~blocked_hpd_pin_mask)
1004 queue_hp_work = true;
1005
1006 for_each_hpd_pin(pin) {
1007 switch (display->hotplug.stats[pin].state) {
1008 case HPD_MARK_DISABLED:
1009 queue_hp_work = true;
1010 break;
1011 case HPD_DISABLED:
1012 case HPD_ENABLED:
1013 break;
1014 default:
1015 MISSING_CASE(display->hotplug.stats[pin].state);
1016 }
1017 }
1018
1019 if ((hotplug->long_hpd_pin_mask | hotplug->short_hpd_pin_mask) & ~blocked_hpd_pin_mask)
1020 queue_work(hotplug->dp_wq, &hotplug->dig_port_work);
1021
1022 if (queue_hp_work)
1023 queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
1024}
1025
1026static bool block_hpd_pin(struct intel_display *display, enum hpd_pin pin)
1027{
1028 struct intel_hotplug *hotplug = &display->hotplug;
1029
1030 lockdep_assert_held(&display->irq.lock);
1031
1032 hotplug->stats[pin].blocked_count++;
1033
1034 return hotplug->stats[pin].blocked_count == 1;
1035}
1036
1037static bool unblock_hpd_pin(struct intel_display *display, enum hpd_pin pin)
1038{
1039 struct intel_hotplug *hotplug = &display->hotplug;
1040
1041 lockdep_assert_held(&display->irq.lock);
1042
1043 if (drm_WARN_ON(display->drm, hotplug->stats[pin].blocked_count == 0))
1044 return true;
1045
1046 hotplug->stats[pin].blocked_count--;
1047
1048 return hotplug->stats[pin].blocked_count == 0;
1049}
1050
1051/**
1052 * intel_hpd_block - Block handling of HPD IRQs on an HPD pin
1053 * @encoder: Encoder to block the HPD handling for
1054 *
1055 * Blocks the handling of HPD IRQs on the HPD pin of @encoder.
1056 *
1057 * On return:
1058 *
1059 * - It's guaranteed that the blocked encoders' HPD pulse handler
1060 * (via intel_digital_port::hpd_pulse()) is not running.
1061 * - The hotplug event handling (via intel_encoder::hotplug()) of an
1062 * HPD IRQ pending at the time this function is called may be still
1063 * running.
1064 * - Detection on the encoder's connector (via
1065 * drm_connector_helper_funcs::detect_ctx(),
1066 * drm_connector_funcs::detect()) remains allowed, for instance as part of
1067 * userspace connector probing, or DRM core's connector polling.
1068 *
1069 * The call must be followed by calling intel_hpd_unblock(), or
1070 * intel_hpd_clear_and_unblock().
1071 *
1072 * Note that the handling of HPD IRQs for another encoder using the same HPD
1073 * pin as that of @encoder will be also blocked.
1074 */
1075void intel_hpd_block(struct intel_encoder *encoder)
1076{
1077 struct intel_display *display = to_intel_display(encoder);
1078 struct intel_hotplug *hotplug = &display->hotplug;
1079 bool do_flush = false;
1080
1081 if (encoder->hpd_pin == HPD_NONE)
1082 return;
1083
1084 spin_lock_irq(&display->irq.lock);
1085
1086 if (block_hpd_pin(display, encoder->hpd_pin))
1087 do_flush = true;
1088
1089 spin_unlock_irq(&display->irq.lock);
1090
1091 if (do_flush && hpd_pin_has_pulse(display, encoder->hpd_pin))
1092 flush_work(&hotplug->dig_port_work);
1093}
1094
1095/**
1096 * intel_hpd_unblock - Unblock handling of HPD IRQs on an HPD pin
1097 * @encoder: Encoder to unblock the HPD handling for
1098 *
1099 * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
1100 * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
1101 * HPD pin while it was blocked will be handled for @encoder and for any
1102 * other encoder sharing the same HPD pin.
1103 */
1104void intel_hpd_unblock(struct intel_encoder *encoder)
1105{
1106 struct intel_display *display = to_intel_display(encoder);
1107
1108 if (encoder->hpd_pin == HPD_NONE)
1109 return;
1110
1111 spin_lock_irq(&display->irq.lock);
1112
1113 if (unblock_hpd_pin(display, encoder->hpd_pin))
1114 queue_work_for_missed_irqs(display);
1115
1116 spin_unlock_irq(&display->irq.lock);
1117}
1118
1119/**
1120 * intel_hpd_clear_and_unblock - Unblock handling of new HPD IRQs on an HPD pin
1121 * @encoder: Encoder to unblock the HPD handling for
1122 *
1123 * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was
1124 * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the
1125 * HPD pin while it was blocked will be cleared, handling only new IRQs.
1126 */
1127void intel_hpd_clear_and_unblock(struct intel_encoder *encoder)
1128{
1129 struct intel_display *display = to_intel_display(encoder);
1130 struct intel_hotplug *hotplug = &display->hotplug;
1131 enum hpd_pin pin = encoder->hpd_pin;
1132
1133 if (pin == HPD_NONE)
1134 return;
1135
1136 spin_lock_irq(&display->irq.lock);
1137
1138 if (unblock_hpd_pin(display, pin)) {
1139 hotplug->event_bits &= ~BIT(pin);
1140 hotplug->retry_bits &= ~BIT(pin);
1141 hotplug->short_hpd_pin_mask &= ~BIT(pin);
1142 hotplug->long_hpd_pin_mask &= ~BIT(pin);
1143 }
1144
1145 spin_unlock_irq(&display->irq.lock);
1146}
1147
1148void intel_hpd_enable_detection_work(struct intel_display *display)
1149{
1150 spin_lock_irq(&display->irq.lock);
1151 display->hotplug.detection_work_enabled = true;
1152 queue_work_for_missed_irqs(display);
1153 spin_unlock_irq(&display->irq.lock);
1154}
1155
1156void intel_hpd_disable_detection_work(struct intel_display *display)
1157{
1158 spin_lock_irq(&display->irq.lock);
1159 display->hotplug.detection_work_enabled = false;
1160 spin_unlock_irq(&display->irq.lock);
1161
1162 cancel_all_detection_work(display);
1163}
1164
1165bool intel_hpd_schedule_detection(struct intel_display *display)
1166{
1167 unsigned long flags;
1168 bool ret;
1169
1170 spin_lock_irqsave(&display->irq.lock, flags);
1171 ret = queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0);
1172 spin_unlock_irqrestore(&display->irq.lock, flags);
1173
1174 return ret;
1175}
1176
1177static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1178{
1179 struct intel_display *display = m->private;
1180 struct drm_i915_private *dev_priv = to_i915(display->drm);
1181 struct intel_hotplug *hotplug = &display->hotplug;
1182
1183 /* Synchronize with everything first in case there's been an HPD
1184 * storm, but we haven't finished handling it in the kernel yet
1185 */
1186 intel_synchronize_irq(dev_priv);
1187 flush_work(&display->hotplug.dig_port_work);
1188 flush_delayed_work(&display->hotplug.hotplug_work);
1189
1190 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1191 seq_printf(m, "Detected: %s\n",
1192 str_yes_no(delayed_work_pending(&hotplug->reenable_work)));
1193
1194 return 0;
1195}
1196
1197static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1198 const char __user *ubuf, size_t len,
1199 loff_t *offp)
1200{
1201 struct seq_file *m = file->private_data;
1202 struct intel_display *display = m->private;
1203 struct intel_hotplug *hotplug = &display->hotplug;
1204 unsigned int new_threshold;
1205 int i;
1206 char *newline;
1207 char tmp[16];
1208
1209 if (len >= sizeof(tmp))
1210 return -EINVAL;
1211
1212 if (copy_from_user(tmp, ubuf, len))
1213 return -EFAULT;
1214
1215 tmp[len] = '\0';
1216
1217 /* Strip newline, if any */
1218 newline = strchr(tmp, '\n');
1219 if (newline)
1220 *newline = '\0';
1221
1222 if (strcmp(tmp, "reset") == 0)
1223 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1224 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1225 return -EINVAL;
1226
1227 if (new_threshold > 0)
1228 drm_dbg_kms(display->drm,
1229 "Setting HPD storm detection threshold to %d\n",
1230 new_threshold);
1231 else
1232 drm_dbg_kms(display->drm, "Disabling HPD storm detection\n");
1233
1234 spin_lock_irq(&display->irq.lock);
1235 hotplug->hpd_storm_threshold = new_threshold;
1236 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
1237 for_each_hpd_pin(i)
1238 hotplug->stats[i].count = 0;
1239 spin_unlock_irq(&display->irq.lock);
1240
1241 /* Re-enable hpd immediately if we were in an irq storm */
1242 flush_delayed_work(&display->hotplug.reenable_work);
1243
1244 return len;
1245}
1246
1247static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1248{
1249 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1250}
1251
1252static const struct file_operations i915_hpd_storm_ctl_fops = {
1253 .owner = THIS_MODULE,
1254 .open = i915_hpd_storm_ctl_open,
1255 .read = seq_read,
1256 .llseek = seq_lseek,
1257 .release = single_release,
1258 .write = i915_hpd_storm_ctl_write
1259};
1260
1261static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1262{
1263 struct intel_display *display = m->private;
1264
1265 seq_printf(m, "Enabled: %s\n",
1266 str_yes_no(display->hotplug.hpd_short_storm_enabled));
1267
1268 return 0;
1269}
1270
1271static int
1272i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1273{
1274 return single_open(file, i915_hpd_short_storm_ctl_show,
1275 inode->i_private);
1276}
1277
1278static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1279 const char __user *ubuf,
1280 size_t len, loff_t *offp)
1281{
1282 struct seq_file *m = file->private_data;
1283 struct intel_display *display = m->private;
1284 struct intel_hotplug *hotplug = &display->hotplug;
1285 char *newline;
1286 char tmp[16];
1287 int i;
1288 bool new_state;
1289
1290 if (len >= sizeof(tmp))
1291 return -EINVAL;
1292
1293 if (copy_from_user(tmp, ubuf, len))
1294 return -EFAULT;
1295
1296 tmp[len] = '\0';
1297
1298 /* Strip newline, if any */
1299 newline = strchr(tmp, '\n');
1300 if (newline)
1301 *newline = '\0';
1302
1303 /* Reset to the "default" state for this system */
1304 if (strcmp(tmp, "reset") == 0)
1305 new_state = !HAS_DP_MST(display);
1306 else if (kstrtobool(tmp, &new_state) != 0)
1307 return -EINVAL;
1308
1309 drm_dbg_kms(display->drm, "%sabling HPD short storm detection\n",
1310 new_state ? "En" : "Dis");
1311
1312 spin_lock_irq(&display->irq.lock);
1313 hotplug->hpd_short_storm_enabled = new_state;
1314 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
1315 for_each_hpd_pin(i)
1316 hotplug->stats[i].count = 0;
1317 spin_unlock_irq(&display->irq.lock);
1318
1319 /* Re-enable hpd immediately if we were in an irq storm */
1320 flush_delayed_work(&display->hotplug.reenable_work);
1321
1322 return len;
1323}
1324
1325static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1326 .owner = THIS_MODULE,
1327 .open = i915_hpd_short_storm_ctl_open,
1328 .read = seq_read,
1329 .llseek = seq_lseek,
1330 .release = single_release,
1331 .write = i915_hpd_short_storm_ctl_write,
1332};
1333
1334void intel_hpd_debugfs_register(struct intel_display *display)
1335{
1336 struct dentry *debugfs_root = display->drm->debugfs_root;
1337
1338 debugfs_create_file("i915_hpd_storm_ctl", 0644, debugfs_root,
1339 display, &i915_hpd_storm_ctl_fops);
1340 debugfs_create_file("i915_hpd_short_storm_ctl", 0644, debugfs_root,
1341 display, &i915_hpd_short_storm_ctl_fops);
1342 debugfs_create_bool("i915_ignore_long_hpd", 0644, debugfs_root,
1343 &display->hotplug.ignore_long_hpd);
1344}