Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright © 2013 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <linux/pm_runtime.h>
25
26#include "gt/intel_lrc_reg.h" /* for shadow reg list */
27
28#include "i915_drv.h"
29#include "i915_iosf_mbi.h"
30#include "i915_trace.h"
31#include "i915_vgpu.h"
32#include "intel_pm.h"
33
34#define FORCEWAKE_ACK_TIMEOUT_MS 50
35#define GT_FIFO_TIMEOUT_MS 10
36
37#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
38
39static void
40fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
41{
42 uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
43}
44
45void
46intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
47{
48 spin_lock_init(&mmio_debug->lock);
49 mmio_debug->unclaimed_mmio_check = 1;
50}
51
52static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
53{
54 lockdep_assert_held(&mmio_debug->lock);
55
56 /* Save and disable mmio debugging for the user bypass */
57 if (!mmio_debug->suspend_count++) {
58 mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
59 mmio_debug->unclaimed_mmio_check = 0;
60 }
61}
62
63static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
64{
65 lockdep_assert_held(&mmio_debug->lock);
66
67 if (!--mmio_debug->suspend_count)
68 mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
69}
70
71static const char * const forcewake_domain_names[] = {
72 "render",
73 "gt",
74 "media",
75 "vdbox0",
76 "vdbox1",
77 "vdbox2",
78 "vdbox3",
79 "vdbox4",
80 "vdbox5",
81 "vdbox6",
82 "vdbox7",
83 "vebox0",
84 "vebox1",
85 "vebox2",
86 "vebox3",
87};
88
89const char *
90intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
91{
92 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
93
94 if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
95 return forcewake_domain_names[id];
96
97 WARN_ON(id);
98
99 return "unknown";
100}
101
102#define fw_ack(d) readl((d)->reg_ack)
103#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
104#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
105
106static inline void
107fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
108{
109 /*
110 * We don't really know if the powerwell for the forcewake domain we are
111 * trying to reset here does exist at this point (engines could be fused
112 * off in ICL+), so no waiting for acks
113 */
114 /* WaRsClearFWBitsAtReset:bdw,skl */
115 fw_clear(d, 0xffff);
116}
117
118static inline void
119fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
120{
121 GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
122 d->uncore->fw_domains_timer |= d->mask;
123 d->wake_count++;
124 hrtimer_start_range_ns(&d->timer,
125 NSEC_PER_MSEC,
126 NSEC_PER_MSEC,
127 HRTIMER_MODE_REL);
128}
129
130static inline int
131__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
132 const u32 ack,
133 const u32 value)
134{
135 return wait_for_atomic((fw_ack(d) & ack) == value,
136 FORCEWAKE_ACK_TIMEOUT_MS);
137}
138
139static inline int
140wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
141 const u32 ack)
142{
143 return __wait_for_ack(d, ack, 0);
144}
145
146static inline int
147wait_ack_set(const struct intel_uncore_forcewake_domain *d,
148 const u32 ack)
149{
150 return __wait_for_ack(d, ack, ack);
151}
152
153static inline void
154fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
155{
156 if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
157 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
158 intel_uncore_forcewake_domain_to_str(d->id));
159 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
160 }
161}
162
163enum ack_type {
164 ACK_CLEAR = 0,
165 ACK_SET
166};
167
168static int
169fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
170 const enum ack_type type)
171{
172 const u32 ack_bit = FORCEWAKE_KERNEL;
173 const u32 value = type == ACK_SET ? ack_bit : 0;
174 unsigned int pass;
175 bool ack_detected;
176
177 /*
178 * There is a possibility of driver's wake request colliding
179 * with hardware's own wake requests and that can cause
180 * hardware to not deliver the driver's ack message.
181 *
182 * Use a fallback bit toggle to kick the gpu state machine
183 * in the hope that the original ack will be delivered along with
184 * the fallback ack.
185 *
186 * This workaround is described in HSDES #1604254524 and it's known as:
187 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
188 * although the name is a bit misleading.
189 */
190
191 pass = 1;
192 do {
193 wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
194
195 fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
196 /* Give gt some time to relax before the polling frenzy */
197 udelay(10 * pass);
198 wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
199
200 ack_detected = (fw_ack(d) & ack_bit) == value;
201
202 fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
203 } while (!ack_detected && pass++ < 10);
204
205 DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
206 intel_uncore_forcewake_domain_to_str(d->id),
207 type == ACK_SET ? "set" : "clear",
208 fw_ack(d),
209 pass);
210
211 return ack_detected ? 0 : -ETIMEDOUT;
212}
213
214static inline void
215fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
216{
217 if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
218 return;
219
220 if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
221 fw_domain_wait_ack_clear(d);
222}
223
224static inline void
225fw_domain_get(const struct intel_uncore_forcewake_domain *d)
226{
227 fw_set(d, FORCEWAKE_KERNEL);
228}
229
230static inline void
231fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
232{
233 if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
234 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
235 intel_uncore_forcewake_domain_to_str(d->id));
236 add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
237 }
238}
239
240static inline void
241fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
242{
243 if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
244 return;
245
246 if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
247 fw_domain_wait_ack_set(d);
248}
249
250static inline void
251fw_domain_put(const struct intel_uncore_forcewake_domain *d)
252{
253 fw_clear(d, FORCEWAKE_KERNEL);
254}
255
256static void
257fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
258{
259 struct intel_uncore_forcewake_domain *d;
260 unsigned int tmp;
261
262 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
263
264 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
265 fw_domain_wait_ack_clear(d);
266 fw_domain_get(d);
267 }
268
269 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
270 fw_domain_wait_ack_set(d);
271
272 uncore->fw_domains_active |= fw_domains;
273}
274
275static void
276fw_domains_get_with_fallback(struct intel_uncore *uncore,
277 enum forcewake_domains fw_domains)
278{
279 struct intel_uncore_forcewake_domain *d;
280 unsigned int tmp;
281
282 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
283
284 for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
285 fw_domain_wait_ack_clear_fallback(d);
286 fw_domain_get(d);
287 }
288
289 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
290 fw_domain_wait_ack_set_fallback(d);
291
292 uncore->fw_domains_active |= fw_domains;
293}
294
295static void
296fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
297{
298 struct intel_uncore_forcewake_domain *d;
299 unsigned int tmp;
300
301 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
302
303 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
304 fw_domain_put(d);
305
306 uncore->fw_domains_active &= ~fw_domains;
307}
308
309static void
310fw_domains_reset(struct intel_uncore *uncore,
311 enum forcewake_domains fw_domains)
312{
313 struct intel_uncore_forcewake_domain *d;
314 unsigned int tmp;
315
316 if (!fw_domains)
317 return;
318
319 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
320
321 for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
322 fw_domain_reset(d);
323}
324
325static inline u32 gt_thread_status(struct intel_uncore *uncore)
326{
327 u32 val;
328
329 val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
330 val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
331
332 return val;
333}
334
335static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
336{
337 /*
338 * w/a for a sporadic read returning 0 by waiting for the GT
339 * thread to wake up.
340 */
341 drm_WARN_ONCE(&uncore->i915->drm,
342 wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
343 "GT thread status wait timed out\n");
344}
345
346static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
347 enum forcewake_domains fw_domains)
348{
349 fw_domains_get_normal(uncore, fw_domains);
350
351 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
352 __gen6_gt_wait_for_thread_c0(uncore);
353}
354
355static inline u32 fifo_free_entries(struct intel_uncore *uncore)
356{
357 u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
358
359 return count & GT_FIFO_FREE_ENTRIES_MASK;
360}
361
362static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
363{
364 u32 n;
365
366 /* On VLV, FIFO will be shared by both SW and HW.
367 * So, we need to read the FREE_ENTRIES everytime */
368 if (IS_VALLEYVIEW(uncore->i915))
369 n = fifo_free_entries(uncore);
370 else
371 n = uncore->fifo_count;
372
373 if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
374 if (wait_for_atomic((n = fifo_free_entries(uncore)) >
375 GT_FIFO_NUM_RESERVED_ENTRIES,
376 GT_FIFO_TIMEOUT_MS)) {
377 drm_dbg(&uncore->i915->drm,
378 "GT_FIFO timeout, entries: %u\n", n);
379 return;
380 }
381 }
382
383 uncore->fifo_count = n - 1;
384}
385
386static enum hrtimer_restart
387intel_uncore_fw_release_timer(struct hrtimer *timer)
388{
389 struct intel_uncore_forcewake_domain *domain =
390 container_of(timer, struct intel_uncore_forcewake_domain, timer);
391 struct intel_uncore *uncore = domain->uncore;
392 unsigned long irqflags;
393
394 assert_rpm_device_not_suspended(uncore->rpm);
395
396 if (xchg(&domain->active, false))
397 return HRTIMER_RESTART;
398
399 spin_lock_irqsave(&uncore->lock, irqflags);
400
401 uncore->fw_domains_timer &= ~domain->mask;
402
403 GEM_BUG_ON(!domain->wake_count);
404 if (--domain->wake_count == 0)
405 fw_domains_put(uncore, domain->mask);
406
407 spin_unlock_irqrestore(&uncore->lock, irqflags);
408
409 return HRTIMER_NORESTART;
410}
411
412/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
413static unsigned int
414intel_uncore_forcewake_reset(struct intel_uncore *uncore)
415{
416 unsigned long irqflags;
417 struct intel_uncore_forcewake_domain *domain;
418 int retry_count = 100;
419 enum forcewake_domains fw, active_domains;
420
421 iosf_mbi_assert_punit_acquired();
422
423 /* Hold uncore.lock across reset to prevent any register access
424 * with forcewake not set correctly. Wait until all pending
425 * timers are run before holding.
426 */
427 while (1) {
428 unsigned int tmp;
429
430 active_domains = 0;
431
432 for_each_fw_domain(domain, uncore, tmp) {
433 smp_store_mb(domain->active, false);
434 if (hrtimer_cancel(&domain->timer) == 0)
435 continue;
436
437 intel_uncore_fw_release_timer(&domain->timer);
438 }
439
440 spin_lock_irqsave(&uncore->lock, irqflags);
441
442 for_each_fw_domain(domain, uncore, tmp) {
443 if (hrtimer_active(&domain->timer))
444 active_domains |= domain->mask;
445 }
446
447 if (active_domains == 0)
448 break;
449
450 if (--retry_count == 0) {
451 drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
452 break;
453 }
454
455 spin_unlock_irqrestore(&uncore->lock, irqflags);
456 cond_resched();
457 }
458
459 drm_WARN_ON(&uncore->i915->drm, active_domains);
460
461 fw = uncore->fw_domains_active;
462 if (fw)
463 fw_domains_put(uncore, fw);
464
465 fw_domains_reset(uncore, uncore->fw_domains);
466 assert_forcewakes_inactive(uncore);
467
468 spin_unlock_irqrestore(&uncore->lock, irqflags);
469
470 return fw; /* track the lost user forcewake domains */
471}
472
473static bool
474fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
475{
476 u32 dbg;
477
478 dbg = __raw_uncore_read32(uncore, FPGA_DBG);
479 if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
480 return false;
481
482 /*
483 * Bugs in PCI programming (or failing hardware) can occasionally cause
484 * us to lose access to the MMIO BAR. When this happens, register
485 * reads will come back with 0xFFFFFFFF for every register and things
486 * go bad very quickly. Let's try to detect that special case and at
487 * least try to print a more informative message about what has
488 * happened.
489 *
490 * During normal operation the FPGA_DBG register has several unused
491 * bits that will always read back as 0's so we can use them as canaries
492 * to recognize when MMIO accesses are just busted.
493 */
494 if (unlikely(dbg == ~0))
495 drm_err(&uncore->i915->drm,
496 "Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
497
498 __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
499
500 return true;
501}
502
503static bool
504vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
505{
506 u32 cer;
507
508 cer = __raw_uncore_read32(uncore, CLAIM_ER);
509 if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
510 return false;
511
512 __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
513
514 return true;
515}
516
517static bool
518gen6_check_for_fifo_debug(struct intel_uncore *uncore)
519{
520 u32 fifodbg;
521
522 fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
523
524 if (unlikely(fifodbg)) {
525 drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
526 __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
527 }
528
529 return fifodbg;
530}
531
532static bool
533check_for_unclaimed_mmio(struct intel_uncore *uncore)
534{
535 bool ret = false;
536
537 lockdep_assert_held(&uncore->debug->lock);
538
539 if (uncore->debug->suspend_count)
540 return false;
541
542 if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
543 ret |= fpga_check_for_unclaimed_mmio(uncore);
544
545 if (intel_uncore_has_dbg_unclaimed(uncore))
546 ret |= vlv_check_for_unclaimed_mmio(uncore);
547
548 if (intel_uncore_has_fifo(uncore))
549 ret |= gen6_check_for_fifo_debug(uncore);
550
551 return ret;
552}
553
554static void forcewake_early_sanitize(struct intel_uncore *uncore,
555 unsigned int restore_forcewake)
556{
557 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
558
559 /* WaDisableShadowRegForCpd:chv */
560 if (IS_CHERRYVIEW(uncore->i915)) {
561 __raw_uncore_write32(uncore, GTFIFOCTL,
562 __raw_uncore_read32(uncore, GTFIFOCTL) |
563 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
564 GT_FIFO_CTL_RC6_POLICY_STALL);
565 }
566
567 iosf_mbi_punit_acquire();
568 intel_uncore_forcewake_reset(uncore);
569 if (restore_forcewake) {
570 spin_lock_irq(&uncore->lock);
571 fw_domains_get(uncore, restore_forcewake);
572
573 if (intel_uncore_has_fifo(uncore))
574 uncore->fifo_count = fifo_free_entries(uncore);
575 spin_unlock_irq(&uncore->lock);
576 }
577 iosf_mbi_punit_release();
578}
579
580void intel_uncore_suspend(struct intel_uncore *uncore)
581{
582 if (!intel_uncore_has_forcewake(uncore))
583 return;
584
585 iosf_mbi_punit_acquire();
586 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
587 &uncore->pmic_bus_access_nb);
588 uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
589 iosf_mbi_punit_release();
590}
591
592void intel_uncore_resume_early(struct intel_uncore *uncore)
593{
594 unsigned int restore_forcewake;
595
596 if (intel_uncore_unclaimed_mmio(uncore))
597 drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
598
599 if (!intel_uncore_has_forcewake(uncore))
600 return;
601
602 restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
603 forcewake_early_sanitize(uncore, restore_forcewake);
604
605 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
606}
607
608void intel_uncore_runtime_resume(struct intel_uncore *uncore)
609{
610 if (!intel_uncore_has_forcewake(uncore))
611 return;
612
613 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
614}
615
616static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
617 enum forcewake_domains fw_domains)
618{
619 struct intel_uncore_forcewake_domain *domain;
620 unsigned int tmp;
621
622 fw_domains &= uncore->fw_domains;
623
624 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
625 if (domain->wake_count++) {
626 fw_domains &= ~domain->mask;
627 domain->active = true;
628 }
629 }
630
631 if (fw_domains)
632 fw_domains_get(uncore, fw_domains);
633}
634
635/**
636 * intel_uncore_forcewake_get - grab forcewake domain references
637 * @uncore: the intel_uncore structure
638 * @fw_domains: forcewake domains to get reference on
639 *
640 * This function can be used get GT's forcewake domain references.
641 * Normal register access will handle the forcewake domains automatically.
642 * However if some sequence requires the GT to not power down a particular
643 * forcewake domains this function should be called at the beginning of the
644 * sequence. And subsequently the reference should be dropped by symmetric
645 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
646 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
647 */
648void intel_uncore_forcewake_get(struct intel_uncore *uncore,
649 enum forcewake_domains fw_domains)
650{
651 unsigned long irqflags;
652
653 if (!uncore->fw_get_funcs)
654 return;
655
656 assert_rpm_wakelock_held(uncore->rpm);
657
658 spin_lock_irqsave(&uncore->lock, irqflags);
659 __intel_uncore_forcewake_get(uncore, fw_domains);
660 spin_unlock_irqrestore(&uncore->lock, irqflags);
661}
662
663/**
664 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
665 * @uncore: the intel_uncore structure
666 *
667 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
668 * the GT powerwell and in the process disable our debugging for the
669 * duration of userspace's bypass.
670 */
671void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
672{
673 spin_lock_irq(&uncore->lock);
674 if (!uncore->user_forcewake_count++) {
675 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
676 spin_lock(&uncore->debug->lock);
677 mmio_debug_suspend(uncore->debug);
678 spin_unlock(&uncore->debug->lock);
679 }
680 spin_unlock_irq(&uncore->lock);
681}
682
683/**
684 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
685 * @uncore: the intel_uncore structure
686 *
687 * This function complements intel_uncore_forcewake_user_get() and releases
688 * the GT powerwell taken on behalf of the userspace bypass.
689 */
690void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
691{
692 spin_lock_irq(&uncore->lock);
693 if (!--uncore->user_forcewake_count) {
694 spin_lock(&uncore->debug->lock);
695 mmio_debug_resume(uncore->debug);
696
697 if (check_for_unclaimed_mmio(uncore))
698 drm_info(&uncore->i915->drm,
699 "Invalid mmio detected during user access\n");
700 spin_unlock(&uncore->debug->lock);
701
702 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
703 }
704 spin_unlock_irq(&uncore->lock);
705}
706
707/**
708 * intel_uncore_forcewake_get__locked - grab forcewake domain references
709 * @uncore: the intel_uncore structure
710 * @fw_domains: forcewake domains to get reference on
711 *
712 * See intel_uncore_forcewake_get(). This variant places the onus
713 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
714 */
715void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
716 enum forcewake_domains fw_domains)
717{
718 lockdep_assert_held(&uncore->lock);
719
720 if (!uncore->fw_get_funcs)
721 return;
722
723 __intel_uncore_forcewake_get(uncore, fw_domains);
724}
725
726static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
727 enum forcewake_domains fw_domains,
728 bool delayed)
729{
730 struct intel_uncore_forcewake_domain *domain;
731 unsigned int tmp;
732
733 fw_domains &= uncore->fw_domains;
734
735 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
736 GEM_BUG_ON(!domain->wake_count);
737
738 if (--domain->wake_count) {
739 domain->active = true;
740 continue;
741 }
742
743 if (delayed &&
744 !(domain->uncore->fw_domains_timer & domain->mask))
745 fw_domain_arm_timer(domain);
746 else
747 fw_domains_put(uncore, domain->mask);
748 }
749}
750
751/**
752 * intel_uncore_forcewake_put - release a forcewake domain reference
753 * @uncore: the intel_uncore structure
754 * @fw_domains: forcewake domains to put references
755 *
756 * This function drops the device-level forcewakes for specified
757 * domains obtained by intel_uncore_forcewake_get().
758 */
759void intel_uncore_forcewake_put(struct intel_uncore *uncore,
760 enum forcewake_domains fw_domains)
761{
762 unsigned long irqflags;
763
764 if (!uncore->fw_get_funcs)
765 return;
766
767 spin_lock_irqsave(&uncore->lock, irqflags);
768 __intel_uncore_forcewake_put(uncore, fw_domains, false);
769 spin_unlock_irqrestore(&uncore->lock, irqflags);
770}
771
772void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
773 enum forcewake_domains fw_domains)
774{
775 unsigned long irqflags;
776
777 if (!uncore->fw_get_funcs)
778 return;
779
780 spin_lock_irqsave(&uncore->lock, irqflags);
781 __intel_uncore_forcewake_put(uncore, fw_domains, true);
782 spin_unlock_irqrestore(&uncore->lock, irqflags);
783}
784
785/**
786 * intel_uncore_forcewake_flush - flush the delayed release
787 * @uncore: the intel_uncore structure
788 * @fw_domains: forcewake domains to flush
789 */
790void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
791 enum forcewake_domains fw_domains)
792{
793 struct intel_uncore_forcewake_domain *domain;
794 unsigned int tmp;
795
796 if (!uncore->fw_get_funcs)
797 return;
798
799 fw_domains &= uncore->fw_domains;
800 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
801 WRITE_ONCE(domain->active, false);
802 if (hrtimer_cancel(&domain->timer))
803 intel_uncore_fw_release_timer(&domain->timer);
804 }
805}
806
807/**
808 * intel_uncore_forcewake_put__locked - grab forcewake domain references
809 * @uncore: the intel_uncore structure
810 * @fw_domains: forcewake domains to get reference on
811 *
812 * See intel_uncore_forcewake_put(). This variant places the onus
813 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
814 */
815void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
816 enum forcewake_domains fw_domains)
817{
818 lockdep_assert_held(&uncore->lock);
819
820 if (!uncore->fw_get_funcs)
821 return;
822
823 __intel_uncore_forcewake_put(uncore, fw_domains, false);
824}
825
826void assert_forcewakes_inactive(struct intel_uncore *uncore)
827{
828 if (!uncore->fw_get_funcs)
829 return;
830
831 drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
832 "Expected all fw_domains to be inactive, but %08x are still on\n",
833 uncore->fw_domains_active);
834}
835
836void assert_forcewakes_active(struct intel_uncore *uncore,
837 enum forcewake_domains fw_domains)
838{
839 struct intel_uncore_forcewake_domain *domain;
840 unsigned int tmp;
841
842 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
843 return;
844
845 if (!uncore->fw_get_funcs)
846 return;
847
848 spin_lock_irq(&uncore->lock);
849
850 assert_rpm_wakelock_held(uncore->rpm);
851
852 fw_domains &= uncore->fw_domains;
853 drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
854 "Expected %08x fw_domains to be active, but %08x are off\n",
855 fw_domains, fw_domains & ~uncore->fw_domains_active);
856
857 /*
858 * Check that the caller has an explicit wakeref and we don't mistake
859 * it for the auto wakeref.
860 */
861 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
862 unsigned int actual = READ_ONCE(domain->wake_count);
863 unsigned int expect = 1;
864
865 if (uncore->fw_domains_timer & domain->mask)
866 expect++; /* pending automatic release */
867
868 if (drm_WARN(&uncore->i915->drm, actual < expect,
869 "Expected domain %d to be held awake by caller, count=%d\n",
870 domain->id, actual))
871 break;
872 }
873
874 spin_unlock_irq(&uncore->lock);
875}
876
877/* We give fast paths for the really cool registers */
878#define NEEDS_FORCE_WAKE(reg) ({ \
879 u32 __reg = (reg); \
880 __reg < 0x40000 || __reg >= GEN11_BSD_RING_BASE; \
881})
882
883static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
884{
885 if (offset < entry->start)
886 return -1;
887 else if (offset > entry->end)
888 return 1;
889 else
890 return 0;
891}
892
893/* Copied and "macroized" from lib/bsearch.c */
894#define BSEARCH(key, base, num, cmp) ({ \
895 unsigned int start__ = 0, end__ = (num); \
896 typeof(base) result__ = NULL; \
897 while (start__ < end__) { \
898 unsigned int mid__ = start__ + (end__ - start__) / 2; \
899 int ret__ = (cmp)((key), (base) + mid__); \
900 if (ret__ < 0) { \
901 end__ = mid__; \
902 } else if (ret__ > 0) { \
903 start__ = mid__ + 1; \
904 } else { \
905 result__ = (base) + mid__; \
906 break; \
907 } \
908 } \
909 result__; \
910})
911
912static enum forcewake_domains
913find_fw_domain(struct intel_uncore *uncore, u32 offset)
914{
915 const struct intel_forcewake_range *entry;
916
917 entry = BSEARCH(offset,
918 uncore->fw_domains_table,
919 uncore->fw_domains_table_entries,
920 fw_range_cmp);
921
922 if (!entry)
923 return 0;
924
925 /*
926 * The list of FW domains depends on the SKU in gen11+ so we
927 * can't determine it statically. We use FORCEWAKE_ALL and
928 * translate it here to the list of available domains.
929 */
930 if (entry->domains == FORCEWAKE_ALL)
931 return uncore->fw_domains;
932
933 drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
934 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
935 entry->domains & ~uncore->fw_domains, offset);
936
937 return entry->domains;
938}
939
940#define GEN_FW_RANGE(s, e, d) \
941 { .start = (s), .end = (e), .domains = (d) }
942
943/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
944static const struct intel_forcewake_range __vlv_fw_ranges[] = {
945 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
946 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
947 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
948 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
949 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
950 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
951 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
952};
953
954#define __fwtable_reg_read_fw_domains(uncore, offset) \
955({ \
956 enum forcewake_domains __fwd = 0; \
957 if (NEEDS_FORCE_WAKE((offset))) \
958 __fwd = find_fw_domain(uncore, offset); \
959 __fwd; \
960})
961
962/* *Must* be sorted by offset! See intel_shadow_table_check(). */
963static const struct i915_range gen8_shadowed_regs[] = {
964 { .start = 0x2030, .end = 0x2030 },
965 { .start = 0xA008, .end = 0xA00C },
966 { .start = 0x12030, .end = 0x12030 },
967 { .start = 0x1a030, .end = 0x1a030 },
968 { .start = 0x22030, .end = 0x22030 },
969 /* TODO: Other registers are not yet used */
970};
971
972static const struct i915_range gen11_shadowed_regs[] = {
973 { .start = 0x2030, .end = 0x2030 },
974 { .start = 0x2550, .end = 0x2550 },
975 { .start = 0xA008, .end = 0xA00C },
976 { .start = 0x22030, .end = 0x22030 },
977 { .start = 0x22230, .end = 0x22230 },
978 { .start = 0x22510, .end = 0x22550 },
979 { .start = 0x1C0030, .end = 0x1C0030 },
980 { .start = 0x1C0230, .end = 0x1C0230 },
981 { .start = 0x1C0510, .end = 0x1C0550 },
982 { .start = 0x1C4030, .end = 0x1C4030 },
983 { .start = 0x1C4230, .end = 0x1C4230 },
984 { .start = 0x1C4510, .end = 0x1C4550 },
985 { .start = 0x1C8030, .end = 0x1C8030 },
986 { .start = 0x1C8230, .end = 0x1C8230 },
987 { .start = 0x1C8510, .end = 0x1C8550 },
988 { .start = 0x1D0030, .end = 0x1D0030 },
989 { .start = 0x1D0230, .end = 0x1D0230 },
990 { .start = 0x1D0510, .end = 0x1D0550 },
991 { .start = 0x1D4030, .end = 0x1D4030 },
992 { .start = 0x1D4230, .end = 0x1D4230 },
993 { .start = 0x1D4510, .end = 0x1D4550 },
994 { .start = 0x1D8030, .end = 0x1D8030 },
995 { .start = 0x1D8230, .end = 0x1D8230 },
996 { .start = 0x1D8510, .end = 0x1D8550 },
997};
998
999static const struct i915_range gen12_shadowed_regs[] = {
1000 { .start = 0x2030, .end = 0x2030 },
1001 { .start = 0x2510, .end = 0x2550 },
1002 { .start = 0xA008, .end = 0xA00C },
1003 { .start = 0xA188, .end = 0xA188 },
1004 { .start = 0xA278, .end = 0xA278 },
1005 { .start = 0xA540, .end = 0xA56C },
1006 { .start = 0xC4C8, .end = 0xC4C8 },
1007 { .start = 0xC4D4, .end = 0xC4D4 },
1008 { .start = 0xC600, .end = 0xC600 },
1009 { .start = 0x22030, .end = 0x22030 },
1010 { .start = 0x22510, .end = 0x22550 },
1011 { .start = 0x1C0030, .end = 0x1C0030 },
1012 { .start = 0x1C0510, .end = 0x1C0550 },
1013 { .start = 0x1C4030, .end = 0x1C4030 },
1014 { .start = 0x1C4510, .end = 0x1C4550 },
1015 { .start = 0x1C8030, .end = 0x1C8030 },
1016 { .start = 0x1C8510, .end = 0x1C8550 },
1017 { .start = 0x1D0030, .end = 0x1D0030 },
1018 { .start = 0x1D0510, .end = 0x1D0550 },
1019 { .start = 0x1D4030, .end = 0x1D4030 },
1020 { .start = 0x1D4510, .end = 0x1D4550 },
1021 { .start = 0x1D8030, .end = 0x1D8030 },
1022 { .start = 0x1D8510, .end = 0x1D8550 },
1023
1024 /*
1025 * The rest of these ranges are specific to Xe_HP and beyond, but
1026 * are reserved/unused ranges on earlier gen12 platforms, so they can
1027 * be safely added to the gen12 table.
1028 */
1029 { .start = 0x1E0030, .end = 0x1E0030 },
1030 { .start = 0x1E0510, .end = 0x1E0550 },
1031 { .start = 0x1E4030, .end = 0x1E4030 },
1032 { .start = 0x1E4510, .end = 0x1E4550 },
1033 { .start = 0x1E8030, .end = 0x1E8030 },
1034 { .start = 0x1E8510, .end = 0x1E8550 },
1035 { .start = 0x1F0030, .end = 0x1F0030 },
1036 { .start = 0x1F0510, .end = 0x1F0550 },
1037 { .start = 0x1F4030, .end = 0x1F4030 },
1038 { .start = 0x1F4510, .end = 0x1F4550 },
1039 { .start = 0x1F8030, .end = 0x1F8030 },
1040 { .start = 0x1F8510, .end = 0x1F8550 },
1041};
1042
1043static const struct i915_range dg2_shadowed_regs[] = {
1044 { .start = 0x2030, .end = 0x2030 },
1045 { .start = 0x2510, .end = 0x2550 },
1046 { .start = 0xA008, .end = 0xA00C },
1047 { .start = 0xA188, .end = 0xA188 },
1048 { .start = 0xA278, .end = 0xA278 },
1049 { .start = 0xA540, .end = 0xA56C },
1050 { .start = 0xC4C8, .end = 0xC4C8 },
1051 { .start = 0xC4E0, .end = 0xC4E0 },
1052 { .start = 0xC600, .end = 0xC600 },
1053 { .start = 0xC658, .end = 0xC658 },
1054 { .start = 0x22030, .end = 0x22030 },
1055 { .start = 0x22510, .end = 0x22550 },
1056 { .start = 0x1C0030, .end = 0x1C0030 },
1057 { .start = 0x1C0510, .end = 0x1C0550 },
1058 { .start = 0x1C4030, .end = 0x1C4030 },
1059 { .start = 0x1C4510, .end = 0x1C4550 },
1060 { .start = 0x1C8030, .end = 0x1C8030 },
1061 { .start = 0x1C8510, .end = 0x1C8550 },
1062 { .start = 0x1D0030, .end = 0x1D0030 },
1063 { .start = 0x1D0510, .end = 0x1D0550 },
1064 { .start = 0x1D4030, .end = 0x1D4030 },
1065 { .start = 0x1D4510, .end = 0x1D4550 },
1066 { .start = 0x1D8030, .end = 0x1D8030 },
1067 { .start = 0x1D8510, .end = 0x1D8550 },
1068 { .start = 0x1E0030, .end = 0x1E0030 },
1069 { .start = 0x1E0510, .end = 0x1E0550 },
1070 { .start = 0x1E4030, .end = 0x1E4030 },
1071 { .start = 0x1E4510, .end = 0x1E4550 },
1072 { .start = 0x1E8030, .end = 0x1E8030 },
1073 { .start = 0x1E8510, .end = 0x1E8550 },
1074 { .start = 0x1F0030, .end = 0x1F0030 },
1075 { .start = 0x1F0510, .end = 0x1F0550 },
1076 { .start = 0x1F4030, .end = 0x1F4030 },
1077 { .start = 0x1F4510, .end = 0x1F4550 },
1078 { .start = 0x1F8030, .end = 0x1F8030 },
1079 { .start = 0x1F8510, .end = 0x1F8550 },
1080};
1081
1082static int mmio_range_cmp(u32 key, const struct i915_range *range)
1083{
1084 if (key < range->start)
1085 return -1;
1086 else if (key > range->end)
1087 return 1;
1088 else
1089 return 0;
1090}
1091
1092static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1093{
1094 if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1095 return false;
1096
1097 return BSEARCH(offset,
1098 uncore->shadowed_reg_table,
1099 uncore->shadowed_reg_table_entries,
1100 mmio_range_cmp);
1101}
1102
1103static enum forcewake_domains
1104gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1105{
1106 return FORCEWAKE_RENDER;
1107}
1108
1109static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1110 GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1111};
1112
1113/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1114static const struct intel_forcewake_range __chv_fw_ranges[] = {
1115 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1116 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1117 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1118 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1119 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1120 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1121 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
1122 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1123 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1124 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1125 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1126 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1127 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1128 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1129 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1130 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
1131};
1132
1133#define __fwtable_reg_write_fw_domains(uncore, offset) \
1134({ \
1135 enum forcewake_domains __fwd = 0; \
1136 const u32 __offset = (offset); \
1137 if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1138 __fwd = find_fw_domain(uncore, __offset); \
1139 __fwd; \
1140})
1141
1142/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1143static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1144 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
1145 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1146 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1147 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1148 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1149 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1150 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1151 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
1152 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1153 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1154 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1155 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1156 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1157 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1158 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
1159 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1160 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
1161 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1162 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1163 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1164 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
1165 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1166 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
1167 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1168 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
1169 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1170 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
1171 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1172 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
1173 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1174 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
1175 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1176};
1177
1178/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
1179static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1180 GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1181 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1182 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1183 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1184 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1185 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1186 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1187 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1188 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1189 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1190 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1191 GEN_FW_RANGE(0x8800, 0x8bff, 0),
1192 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1193 GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
1194 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1195 GEN_FW_RANGE(0x9560, 0x95ff, 0),
1196 GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
1197 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1198 GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
1199 GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1200 GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
1201 GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1202 GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
1203 GEN_FW_RANGE(0x24000, 0x2407f, 0),
1204 GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
1205 GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1206 GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
1207 GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1208 GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
1209 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1210 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1211 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1212 GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1213 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1214 GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1215};
1216
1217/*
1218 * *Must* be sorted by offset ranges! See intel_fw_table_check().
1219 *
1220 * Note that the spec lists several reserved/unused ranges that don't
1221 * actually contain any registers. In the table below we'll combine those
1222 * reserved ranges with either the preceding or following range to keep the
1223 * table small and lookups fast.
1224 */
1225static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1226 GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1227 0x0 - 0xaff: reserved
1228 0xb00 - 0x1fff: always on */
1229 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1230 GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1231 GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1232 GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
1233 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1234 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1235 0x4000 - 0x48ff: gt
1236 0x4900 - 0x51ff: reserved */
1237 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1238 0x5200 - 0x53ff: render
1239 0x5400 - 0x54ff: reserved
1240 0x5500 - 0x7fff: render */
1241 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1242 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1243 GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1244 0x8160 - 0x817f: reserved
1245 0x8180 - 0x81ff: always on */
1246 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1247 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1248 GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1249 0x8500 - 0x87ff: gt
1250 0x8800 - 0x8fff: reserved
1251 0x9000 - 0x947f: gt
1252 0x9480 - 0x94cf: reserved */
1253 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1254 GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1255 0x9560 - 0x95ff: always on
1256 0x9600 - 0x97ff: reserved */
1257 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1258 GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1259 GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1260 0xb400 - 0xbf7f: gt
1261 0xb480 - 0xbfff: reserved
1262 0xc000 - 0xcfff: gt */
1263 GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1264 GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1265 GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1266 GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1267 0xdc00 - 0xddff: render
1268 0xde00 - 0xde7f: reserved
1269 0xde80 - 0xe8ff: render
1270 0xe900 - 0xefff: reserved */
1271 GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1272 0xf000 - 0xffff: gt
1273 0x10000 - 0x147ff: reserved */
1274 GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1275 0x14800 - 0x14fff: render
1276 0x15000 - 0x16dff: reserved
1277 0x16e00 - 0x1bfff: render
1278 0x1c000 - 0x1ffff: reserved */
1279 GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1280 GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1281 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1282 GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1283 0x24000 - 0x2407f: always on
1284 0x24080 - 0x2417f: reserved */
1285 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1286 0x24180 - 0x241ff: gt
1287 0x24200 - 0x249ff: reserved */
1288 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1289 0x24a00 - 0x24a7f: render
1290 0x24a80 - 0x251ff: reserved */
1291 GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1292 0x25200 - 0x252ff: gt
1293 0x25300 - 0x255ff: reserved */
1294 GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1295 GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1296 0x25680 - 0x256ff: VD2
1297 0x25700 - 0x259ff: reserved */
1298 GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1299 GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1300 0x25a80 - 0x25aff: VD2
1301 0x25b00 - 0x2ffff: reserved */
1302 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1303 GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1304 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1305 0x1c0000 - 0x1c2bff: VD0
1306 0x1c2c00 - 0x1c2cff: reserved
1307 0x1c2d00 - 0x1c2dff: VD0
1308 0x1c2e00 - 0x1c3eff: reserved
1309 0x1c3f00 - 0x1c3fff: VD0 */
1310 GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1311 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1312 0x1c8000 - 0x1ca0ff: VE0
1313 0x1ca100 - 0x1cbeff: reserved
1314 0x1cbf00 - 0x1cbfff: VE0 */
1315 GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1316 0x1cc000 - 0x1ccfff: VD0
1317 0x1cd000 - 0x1cffff: reserved */
1318 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1319 0x1d0000 - 0x1d2bff: VD2
1320 0x1d2c00 - 0x1d2cff: reserved
1321 0x1d2d00 - 0x1d2dff: VD2
1322 0x1d2e00 - 0x1d3eff: reserved
1323 0x1d3f00 - 0x1d3fff: VD2 */
1324};
1325
1326/*
1327 * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
1328 * switching it from the GT domain to the render domain.
1329 *
1330 * *Must* be sorted by offset ranges! See intel_fw_table_check().
1331 */
1332#define XEHP_FWRANGES(FW_RANGE_D800) \
1333 GEN_FW_RANGE(0x0, 0x1fff, 0), /* \
1334 0x0 - 0xaff: reserved \
1335 0xb00 - 0x1fff: always on */ \
1336 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), \
1337 GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT), \
1338 GEN_FW_RANGE(0x4b00, 0x51ff, 0), /* \
1339 0x4b00 - 0x4fff: reserved \
1340 0x5000 - 0x51ff: always on */ \
1341 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), \
1342 GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT), \
1343 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), \
1344 GEN_FW_RANGE(0x8160, 0x81ff, 0), /* \
1345 0x8160 - 0x817f: reserved \
1346 0x8180 - 0x81ff: always on */ \
1347 GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT), \
1348 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), \
1349 GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /* \
1350 0x8500 - 0x87ff: gt \
1351 0x8800 - 0x8c7f: reserved \
1352 0x8c80 - 0x8cff: gt (DG2 only) */ \
1353 GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /* \
1354 0x8d00 - 0x8dff: render (DG2 only) \
1355 0x8e00 - 0x8fff: reserved */ \
1356 GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /* \
1357 0x9000 - 0x947f: gt \
1358 0x9480 - 0x94cf: reserved */ \
1359 GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER), \
1360 GEN_FW_RANGE(0x9560, 0x967f, 0), /* \
1361 0x9560 - 0x95ff: always on \
1362 0x9600 - 0x967f: reserved */ \
1363 GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /* \
1364 0x9680 - 0x96ff: render (DG2 only) \
1365 0x9700 - 0x97ff: reserved */ \
1366 GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /* \
1367 0x9800 - 0xb4ff: gt \
1368 0xb500 - 0xbfff: reserved \
1369 0xc000 - 0xcfff: gt */ \
1370 GEN_FW_RANGE(0xd000, 0xd7ff, 0), \
1371 GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800), \
1372 GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT), \
1373 GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER), \
1374 GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /* \
1375 0xdd00 - 0xddff: gt \
1376 0xde00 - 0xde7f: reserved */ \
1377 GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /* \
1378 0xde80 - 0xdfff: render \
1379 0xe000 - 0xe0ff: reserved \
1380 0xe100 - 0xe8ff: render */ \
1381 GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /* \
1382 0xe900 - 0xe9ff: gt \
1383 0xea00 - 0xefff: reserved \
1384 0xf000 - 0xffff: gt */ \
1385 GEN_FW_RANGE(0x10000, 0x12fff, 0), /* \
1386 0x10000 - 0x11fff: reserved \
1387 0x12000 - 0x127ff: always on \
1388 0x12800 - 0x12fff: reserved */ \
1389 GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */ \
1390 GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /* \
1391 0x13200 - 0x133ff: VD2 (DG2 only) \
1392 0x13400 - 0x13fff: reserved */ \
1393 GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */ \
1394 GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */ \
1395 GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */ \
1396 GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */ \
1397 GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), \
1398 GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /* \
1399 0x15000 - 0x15fff: gt (DG2 only) \
1400 0x16000 - 0x16dff: reserved */ \
1401 GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER), \
1402 GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /* \
1403 0x20000 - 0x20fff: VD0 (XEHPSDV only) \
1404 0x21000 - 0x21fff: reserved */ \
1405 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), \
1406 GEN_FW_RANGE(0x24000, 0x2417f, 0), /* \
1407 0x24000 - 0x2407f: always on \
1408 0x24080 - 0x2417f: reserved */ \
1409 GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /* \
1410 0x24180 - 0x241ff: gt \
1411 0x24200 - 0x249ff: reserved */ \
1412 GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /* \
1413 0x24a00 - 0x24a7f: render \
1414 0x24a80 - 0x251ff: reserved */ \
1415 GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /* \
1416 0x25200 - 0x252ff: gt \
1417 0x25300 - 0x25fff: reserved */ \
1418 GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /* \
1419 0x26000 - 0x27fff: render \
1420 0x28000 - 0x29fff: reserved \
1421 0x2a000 - 0x2ffff: undocumented */ \
1422 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT), \
1423 GEN_FW_RANGE(0x40000, 0x1bffff, 0), \
1424 GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /* \
1425 0x1c0000 - 0x1c2bff: VD0 \
1426 0x1c2c00 - 0x1c2cff: reserved \
1427 0x1c2d00 - 0x1c2dff: VD0 \
1428 0x1c2e00 - 0x1c3eff: VD0 (DG2 only) \
1429 0x1c3f00 - 0x1c3fff: VD0 */ \
1430 GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /* \
1431 0x1c4000 - 0x1c6bff: VD1 \
1432 0x1c6c00 - 0x1c6cff: reserved \
1433 0x1c6d00 - 0x1c6dff: VD1 \
1434 0x1c6e00 - 0x1c7fff: reserved */ \
1435 GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /* \
1436 0x1c8000 - 0x1ca0ff: VE0 \
1437 0x1ca100 - 0x1cbfff: reserved */ \
1438 GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0), \
1439 GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2), \
1440 GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4), \
1441 GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6), \
1442 GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /* \
1443 0x1d0000 - 0x1d2bff: VD2 \
1444 0x1d2c00 - 0x1d2cff: reserved \
1445 0x1d2d00 - 0x1d2dff: VD2 \
1446 0x1d2e00 - 0x1d3dff: VD2 (DG2 only) \
1447 0x1d3e00 - 0x1d3eff: reserved \
1448 0x1d3f00 - 0x1d3fff: VD2 */ \
1449 GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /* \
1450 0x1d4000 - 0x1d6bff: VD3 \
1451 0x1d6c00 - 0x1d6cff: reserved \
1452 0x1d6d00 - 0x1d6dff: VD3 \
1453 0x1d6e00 - 0x1d7fff: reserved */ \
1454 GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /* \
1455 0x1d8000 - 0x1da0ff: VE1 \
1456 0x1da100 - 0x1dffff: reserved */ \
1457 GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /* \
1458 0x1e0000 - 0x1e2bff: VD4 \
1459 0x1e2c00 - 0x1e2cff: reserved \
1460 0x1e2d00 - 0x1e2dff: VD4 \
1461 0x1e2e00 - 0x1e3eff: reserved \
1462 0x1e3f00 - 0x1e3fff: VD4 */ \
1463 GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /* \
1464 0x1e4000 - 0x1e6bff: VD5 \
1465 0x1e6c00 - 0x1e6cff: reserved \
1466 0x1e6d00 - 0x1e6dff: VD5 \
1467 0x1e6e00 - 0x1e7fff: reserved */ \
1468 GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /* \
1469 0x1e8000 - 0x1ea0ff: VE2 \
1470 0x1ea100 - 0x1effff: reserved */ \
1471 GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /* \
1472 0x1f0000 - 0x1f2bff: VD6 \
1473 0x1f2c00 - 0x1f2cff: reserved \
1474 0x1f2d00 - 0x1f2dff: VD6 \
1475 0x1f2e00 - 0x1f3eff: reserved \
1476 0x1f3f00 - 0x1f3fff: VD6 */ \
1477 GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /* \
1478 0x1f4000 - 0x1f6bff: VD7 \
1479 0x1f6c00 - 0x1f6cff: reserved \
1480 0x1f6d00 - 0x1f6dff: VD7 \
1481 0x1f6e00 - 0x1f7fff: reserved */ \
1482 GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
1483
1484static const struct intel_forcewake_range __xehp_fw_ranges[] = {
1485 XEHP_FWRANGES(FORCEWAKE_GT)
1486};
1487
1488static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1489 XEHP_FWRANGES(FORCEWAKE_RENDER)
1490};
1491
1492static void
1493ilk_dummy_write(struct intel_uncore *uncore)
1494{
1495 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1496 * the chip from rc6 before touching it for real. MI_MODE is masked,
1497 * hence harmless to write 0 into. */
1498 __raw_uncore_write32(uncore, MI_MODE, 0);
1499}
1500
1501static void
1502__unclaimed_reg_debug(struct intel_uncore *uncore,
1503 const i915_reg_t reg,
1504 const bool read,
1505 const bool before)
1506{
1507 if (drm_WARN(&uncore->i915->drm,
1508 check_for_unclaimed_mmio(uncore) && !before,
1509 "Unclaimed %s register 0x%x\n",
1510 read ? "read from" : "write to",
1511 i915_mmio_reg_offset(reg)))
1512 /* Only report the first N failures */
1513 uncore->i915->params.mmio_debug--;
1514}
1515
1516static inline void
1517unclaimed_reg_debug(struct intel_uncore *uncore,
1518 const i915_reg_t reg,
1519 const bool read,
1520 const bool before)
1521{
1522 if (likely(!uncore->i915->params.mmio_debug))
1523 return;
1524
1525 /* interrupts are disabled and re-enabled around uncore->lock usage */
1526 lockdep_assert_held(&uncore->lock);
1527
1528 if (before)
1529 spin_lock(&uncore->debug->lock);
1530
1531 __unclaimed_reg_debug(uncore, reg, read, before);
1532
1533 if (!before)
1534 spin_unlock(&uncore->debug->lock);
1535}
1536
1537#define __vgpu_read(x) \
1538static u##x \
1539vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1540 u##x val = __raw_uncore_read##x(uncore, reg); \
1541 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1542 return val; \
1543}
1544__vgpu_read(8)
1545__vgpu_read(16)
1546__vgpu_read(32)
1547__vgpu_read(64)
1548
1549#define GEN2_READ_HEADER(x) \
1550 u##x val = 0; \
1551 assert_rpm_wakelock_held(uncore->rpm);
1552
1553#define GEN2_READ_FOOTER \
1554 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1555 return val
1556
1557#define __gen2_read(x) \
1558static u##x \
1559gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1560 GEN2_READ_HEADER(x); \
1561 val = __raw_uncore_read##x(uncore, reg); \
1562 GEN2_READ_FOOTER; \
1563}
1564
1565#define __gen5_read(x) \
1566static u##x \
1567gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1568 GEN2_READ_HEADER(x); \
1569 ilk_dummy_write(uncore); \
1570 val = __raw_uncore_read##x(uncore, reg); \
1571 GEN2_READ_FOOTER; \
1572}
1573
1574__gen5_read(8)
1575__gen5_read(16)
1576__gen5_read(32)
1577__gen5_read(64)
1578__gen2_read(8)
1579__gen2_read(16)
1580__gen2_read(32)
1581__gen2_read(64)
1582
1583#undef __gen5_read
1584#undef __gen2_read
1585
1586#undef GEN2_READ_FOOTER
1587#undef GEN2_READ_HEADER
1588
1589#define GEN6_READ_HEADER(x) \
1590 u32 offset = i915_mmio_reg_offset(reg); \
1591 unsigned long irqflags; \
1592 u##x val = 0; \
1593 assert_rpm_wakelock_held(uncore->rpm); \
1594 spin_lock_irqsave(&uncore->lock, irqflags); \
1595 unclaimed_reg_debug(uncore, reg, true, true)
1596
1597#define GEN6_READ_FOOTER \
1598 unclaimed_reg_debug(uncore, reg, true, false); \
1599 spin_unlock_irqrestore(&uncore->lock, irqflags); \
1600 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1601 return val
1602
1603static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1604 enum forcewake_domains fw_domains)
1605{
1606 struct intel_uncore_forcewake_domain *domain;
1607 unsigned int tmp;
1608
1609 GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1610
1611 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1612 fw_domain_arm_timer(domain);
1613
1614 fw_domains_get(uncore, fw_domains);
1615}
1616
1617static inline void __force_wake_auto(struct intel_uncore *uncore,
1618 enum forcewake_domains fw_domains)
1619{
1620 GEM_BUG_ON(!fw_domains);
1621
1622 /* Turn on all requested but inactive supported forcewake domains. */
1623 fw_domains &= uncore->fw_domains;
1624 fw_domains &= ~uncore->fw_domains_active;
1625
1626 if (fw_domains)
1627 ___force_wake_auto(uncore, fw_domains);
1628}
1629
1630#define __gen_fwtable_read(x) \
1631static u##x \
1632fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
1633{ \
1634 enum forcewake_domains fw_engine; \
1635 GEN6_READ_HEADER(x); \
1636 fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
1637 if (fw_engine) \
1638 __force_wake_auto(uncore, fw_engine); \
1639 val = __raw_uncore_read##x(uncore, reg); \
1640 GEN6_READ_FOOTER; \
1641}
1642
1643static enum forcewake_domains
1644fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
1645 return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
1646}
1647
1648__gen_fwtable_read(8)
1649__gen_fwtable_read(16)
1650__gen_fwtable_read(32)
1651__gen_fwtable_read(64)
1652
1653#undef __gen_fwtable_read
1654#undef GEN6_READ_FOOTER
1655#undef GEN6_READ_HEADER
1656
1657#define GEN2_WRITE_HEADER \
1658 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1659 assert_rpm_wakelock_held(uncore->rpm); \
1660
1661#define GEN2_WRITE_FOOTER
1662
1663#define __gen2_write(x) \
1664static void \
1665gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1666 GEN2_WRITE_HEADER; \
1667 __raw_uncore_write##x(uncore, reg, val); \
1668 GEN2_WRITE_FOOTER; \
1669}
1670
1671#define __gen5_write(x) \
1672static void \
1673gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1674 GEN2_WRITE_HEADER; \
1675 ilk_dummy_write(uncore); \
1676 __raw_uncore_write##x(uncore, reg, val); \
1677 GEN2_WRITE_FOOTER; \
1678}
1679
1680__gen5_write(8)
1681__gen5_write(16)
1682__gen5_write(32)
1683__gen2_write(8)
1684__gen2_write(16)
1685__gen2_write(32)
1686
1687#undef __gen5_write
1688#undef __gen2_write
1689
1690#undef GEN2_WRITE_FOOTER
1691#undef GEN2_WRITE_HEADER
1692
1693#define GEN6_WRITE_HEADER \
1694 u32 offset = i915_mmio_reg_offset(reg); \
1695 unsigned long irqflags; \
1696 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1697 assert_rpm_wakelock_held(uncore->rpm); \
1698 spin_lock_irqsave(&uncore->lock, irqflags); \
1699 unclaimed_reg_debug(uncore, reg, false, true)
1700
1701#define GEN6_WRITE_FOOTER \
1702 unclaimed_reg_debug(uncore, reg, false, false); \
1703 spin_unlock_irqrestore(&uncore->lock, irqflags)
1704
1705#define __gen6_write(x) \
1706static void \
1707gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1708 GEN6_WRITE_HEADER; \
1709 if (NEEDS_FORCE_WAKE(offset)) \
1710 __gen6_gt_wait_for_fifo(uncore); \
1711 __raw_uncore_write##x(uncore, reg, val); \
1712 GEN6_WRITE_FOOTER; \
1713}
1714__gen6_write(8)
1715__gen6_write(16)
1716__gen6_write(32)
1717
1718#define __gen_fwtable_write(x) \
1719static void \
1720fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1721 enum forcewake_domains fw_engine; \
1722 GEN6_WRITE_HEADER; \
1723 fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
1724 if (fw_engine) \
1725 __force_wake_auto(uncore, fw_engine); \
1726 __raw_uncore_write##x(uncore, reg, val); \
1727 GEN6_WRITE_FOOTER; \
1728}
1729
1730static enum forcewake_domains
1731fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1732{
1733 return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
1734}
1735
1736__gen_fwtable_write(8)
1737__gen_fwtable_write(16)
1738__gen_fwtable_write(32)
1739
1740#undef __gen_fwtable_write
1741#undef GEN6_WRITE_FOOTER
1742#undef GEN6_WRITE_HEADER
1743
1744#define __vgpu_write(x) \
1745static void \
1746vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1747 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1748 __raw_uncore_write##x(uncore, reg, val); \
1749}
1750__vgpu_write(8)
1751__vgpu_write(16)
1752__vgpu_write(32)
1753
1754#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
1755do { \
1756 (uncore)->funcs.mmio_writeb = x##_write8; \
1757 (uncore)->funcs.mmio_writew = x##_write16; \
1758 (uncore)->funcs.mmio_writel = x##_write32; \
1759} while (0)
1760
1761#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
1762do { \
1763 (uncore)->funcs.mmio_readb = x##_read8; \
1764 (uncore)->funcs.mmio_readw = x##_read16; \
1765 (uncore)->funcs.mmio_readl = x##_read32; \
1766 (uncore)->funcs.mmio_readq = x##_read64; \
1767} while (0)
1768
1769#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
1770do { \
1771 ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
1772 (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
1773} while (0)
1774
1775#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
1776do { \
1777 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
1778 (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
1779} while (0)
1780
1781static int __fw_domain_init(struct intel_uncore *uncore,
1782 enum forcewake_domain_id domain_id,
1783 i915_reg_t reg_set,
1784 i915_reg_t reg_ack)
1785{
1786 struct intel_uncore_forcewake_domain *d;
1787
1788 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1789 GEM_BUG_ON(uncore->fw_domain[domain_id]);
1790
1791 if (i915_inject_probe_failure(uncore->i915))
1792 return -ENOMEM;
1793
1794 d = kzalloc(sizeof(*d), GFP_KERNEL);
1795 if (!d)
1796 return -ENOMEM;
1797
1798 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
1799 drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
1800
1801 d->uncore = uncore;
1802 d->wake_count = 0;
1803 d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
1804 d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
1805
1806 d->id = domain_id;
1807
1808 BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
1809 BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
1810 BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
1811 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
1812 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
1813 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
1814 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
1815 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
1816 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
1817 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
1818 BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
1819 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
1820 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
1821 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
1822 BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
1823
1824 d->mask = BIT(domain_id);
1825
1826 hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1827 d->timer.function = intel_uncore_fw_release_timer;
1828
1829 uncore->fw_domains |= BIT(domain_id);
1830
1831 fw_domain_reset(d);
1832
1833 uncore->fw_domain[domain_id] = d;
1834
1835 return 0;
1836}
1837
1838static void fw_domain_fini(struct intel_uncore *uncore,
1839 enum forcewake_domain_id domain_id)
1840{
1841 struct intel_uncore_forcewake_domain *d;
1842
1843 GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
1844
1845 d = fetch_and_zero(&uncore->fw_domain[domain_id]);
1846 if (!d)
1847 return;
1848
1849 uncore->fw_domains &= ~BIT(domain_id);
1850 drm_WARN_ON(&uncore->i915->drm, d->wake_count);
1851 drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
1852 kfree(d);
1853}
1854
1855static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
1856{
1857 struct intel_uncore_forcewake_domain *d;
1858 int tmp;
1859
1860 for_each_fw_domain(d, uncore, tmp)
1861 fw_domain_fini(uncore, d->id);
1862}
1863
1864static const struct intel_uncore_fw_get uncore_get_fallback = {
1865 .force_wake_get = fw_domains_get_with_fallback
1866};
1867
1868static const struct intel_uncore_fw_get uncore_get_normal = {
1869 .force_wake_get = fw_domains_get_normal,
1870};
1871
1872static const struct intel_uncore_fw_get uncore_get_thread_status = {
1873 .force_wake_get = fw_domains_get_with_thread_status
1874};
1875
1876static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
1877{
1878 struct drm_i915_private *i915 = uncore->i915;
1879 int ret = 0;
1880
1881 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
1882
1883#define fw_domain_init(uncore__, id__, set__, ack__) \
1884 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
1885
1886 if (GRAPHICS_VER(i915) >= 11) {
1887 /* we'll prune the domains of missing engines later */
1888 intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
1889 int i;
1890
1891 uncore->fw_get_funcs = &uncore_get_fallback;
1892 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1893 FORCEWAKE_RENDER_GEN9,
1894 FORCEWAKE_ACK_RENDER_GEN9);
1895 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
1896 FORCEWAKE_GT_GEN9,
1897 FORCEWAKE_ACK_GT_GEN9);
1898
1899 for (i = 0; i < I915_MAX_VCS; i++) {
1900 if (!__HAS_ENGINE(emask, _VCS(i)))
1901 continue;
1902
1903 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
1904 FORCEWAKE_MEDIA_VDBOX_GEN11(i),
1905 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
1906 }
1907 for (i = 0; i < I915_MAX_VECS; i++) {
1908 if (!__HAS_ENGINE(emask, _VECS(i)))
1909 continue;
1910
1911 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
1912 FORCEWAKE_MEDIA_VEBOX_GEN11(i),
1913 FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
1914 }
1915 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
1916 uncore->fw_get_funcs = &uncore_get_fallback;
1917 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1918 FORCEWAKE_RENDER_GEN9,
1919 FORCEWAKE_ACK_RENDER_GEN9);
1920 fw_domain_init(uncore, FW_DOMAIN_ID_GT,
1921 FORCEWAKE_GT_GEN9,
1922 FORCEWAKE_ACK_GT_GEN9);
1923 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1924 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1925 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
1926 uncore->fw_get_funcs = &uncore_get_normal;
1927 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1928 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1929 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
1930 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1931 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1932 uncore->fw_get_funcs = &uncore_get_thread_status;
1933 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1934 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1935 } else if (IS_IVYBRIDGE(i915)) {
1936 u32 ecobus;
1937
1938 /* IVB configs may use multi-threaded forcewake */
1939
1940 /* A small trick here - if the bios hasn't configured
1941 * MT forcewake, and if the device is in RC6, then
1942 * force_wake_mt_get will not wake the device and the
1943 * ECOBUS read will return zero. Which will be
1944 * (correctly) interpreted by the test below as MT
1945 * forcewake being disabled.
1946 */
1947 uncore->fw_get_funcs = &uncore_get_thread_status;
1948
1949 /* We need to init first for ECOBUS access and then
1950 * determine later if we want to reinit, in case of MT access is
1951 * not working. In this stage we don't know which flavour this
1952 * ivb is, so it is better to reset also the gen6 fw registers
1953 * before the ecobus check.
1954 */
1955
1956 __raw_uncore_write32(uncore, FORCEWAKE, 0);
1957 __raw_posting_read(uncore, ECOBUS);
1958
1959 ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1960 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1961 if (ret)
1962 goto out;
1963
1964 spin_lock_irq(&uncore->lock);
1965 fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
1966 ecobus = __raw_uncore_read32(uncore, ECOBUS);
1967 fw_domains_put(uncore, FORCEWAKE_RENDER);
1968 spin_unlock_irq(&uncore->lock);
1969
1970 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1971 drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
1972 drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
1973 fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
1974 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1975 FORCEWAKE, FORCEWAKE_ACK);
1976 }
1977 } else if (GRAPHICS_VER(i915) == 6) {
1978 uncore->fw_get_funcs = &uncore_get_thread_status;
1979 fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
1980 FORCEWAKE, FORCEWAKE_ACK);
1981 }
1982
1983#undef fw_domain_init
1984
1985 /* All future platforms are expected to require complex power gating */
1986 drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
1987
1988out:
1989 if (ret)
1990 intel_uncore_fw_domains_fini(uncore);
1991
1992 return ret;
1993}
1994
1995#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
1996{ \
1997 (uncore)->fw_domains_table = \
1998 (struct intel_forcewake_range *)(d); \
1999 (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
2000}
2001
2002#define ASSIGN_SHADOW_TABLE(uncore, d) \
2003{ \
2004 (uncore)->shadowed_reg_table = d; \
2005 (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
2006}
2007
2008static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2009 unsigned long action, void *data)
2010{
2011 struct intel_uncore *uncore = container_of(nb,
2012 struct intel_uncore, pmic_bus_access_nb);
2013
2014 switch (action) {
2015 case MBI_PMIC_BUS_ACCESS_BEGIN:
2016 /*
2017 * forcewake all now to make sure that we don't need to do a
2018 * forcewake later which on systems where this notifier gets
2019 * called requires the punit to access to the shared pmic i2c
2020 * bus, which will be busy after this notification, leading to:
2021 * "render: timed out waiting for forcewake ack request."
2022 * errors.
2023 *
2024 * The notifier is unregistered during intel_runtime_suspend(),
2025 * so it's ok to access the HW here without holding a RPM
2026 * wake reference -> disable wakeref asserts for the time of
2027 * the access.
2028 */
2029 disable_rpm_wakeref_asserts(uncore->rpm);
2030 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2031 enable_rpm_wakeref_asserts(uncore->rpm);
2032 break;
2033 case MBI_PMIC_BUS_ACCESS_END:
2034 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2035 break;
2036 }
2037
2038 return NOTIFY_OK;
2039}
2040
2041int intel_uncore_setup_mmio(struct intel_uncore *uncore)
2042{
2043 struct drm_i915_private *i915 = uncore->i915;
2044 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
2045 int mmio_bar;
2046 int mmio_size;
2047
2048 mmio_bar = GRAPHICS_VER(i915) == 2 ? 1 : 0;
2049 /*
2050 * Before gen4, the registers and the GTT are behind different BARs.
2051 * However, from gen4 onwards, the registers and the GTT are shared
2052 * in the same BAR, so we want to restrict this ioremap from
2053 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2054 * the register BAR remains the same size for all the earlier
2055 * generations up to Ironlake.
2056 * For dgfx chips register range is expanded to 4MB.
2057 */
2058 if (GRAPHICS_VER(i915) < 5)
2059 mmio_size = 512 * 1024;
2060 else if (IS_DGFX(i915))
2061 mmio_size = 4 * 1024 * 1024;
2062 else
2063 mmio_size = 2 * 1024 * 1024;
2064
2065 uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
2066 if (uncore->regs == NULL) {
2067 drm_err(&i915->drm, "failed to map registers\n");
2068 return -EIO;
2069 }
2070
2071 return 0;
2072}
2073
2074void intel_uncore_cleanup_mmio(struct intel_uncore *uncore)
2075{
2076 struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev);
2077
2078 pci_iounmap(pdev, uncore->regs);
2079}
2080
2081void intel_uncore_init_early(struct intel_uncore *uncore,
2082 struct intel_gt *gt)
2083{
2084 spin_lock_init(&uncore->lock);
2085 uncore->i915 = gt->i915;
2086 uncore->gt = gt;
2087 uncore->rpm = >->i915->runtime_pm;
2088 uncore->debug = >->i915->mmio_debug;
2089}
2090
2091static void uncore_raw_init(struct intel_uncore *uncore)
2092{
2093 GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
2094
2095 if (intel_vgpu_active(uncore->i915)) {
2096 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2097 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
2098 } else if (GRAPHICS_VER(uncore->i915) == 5) {
2099 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2100 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2101 } else {
2102 ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2103 ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2104 }
2105}
2106
2107static int uncore_forcewake_init(struct intel_uncore *uncore)
2108{
2109 struct drm_i915_private *i915 = uncore->i915;
2110 int ret;
2111
2112 GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2113
2114 ret = intel_uncore_fw_domains_init(uncore);
2115 if (ret)
2116 return ret;
2117 forcewake_early_sanitize(uncore, 0);
2118
2119 ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2120
2121 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
2122 ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
2123 ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
2124 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2125 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
2126 ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
2127 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2128 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2129 } else if (GRAPHICS_VER(i915) >= 12) {
2130 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2131 ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2132 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2133 } else if (GRAPHICS_VER(i915) == 11) {
2134 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2135 ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
2136 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2137 } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2138 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2139 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2140 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2141 } else if (IS_CHERRYVIEW(i915)) {
2142 ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2143 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2144 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2145 } else if (GRAPHICS_VER(i915) == 8) {
2146 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2147 ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2148 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2149 } else if (IS_VALLEYVIEW(i915)) {
2150 ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2151 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2152 } else if (IS_GRAPHICS_VER(i915, 6, 7)) {
2153 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2154 ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2155 }
2156
2157 uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2158 iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2159
2160 return 0;
2161}
2162
2163int intel_uncore_init_mmio(struct intel_uncore *uncore)
2164{
2165 struct drm_i915_private *i915 = uncore->i915;
2166 int ret;
2167
2168 /*
2169 * The boot firmware initializes local memory and assesses its health.
2170 * If memory training fails, the punit will have been instructed to
2171 * keep the GT powered down; we won't be able to communicate with it
2172 * and we should not continue with driver initialization.
2173 */
2174 if (IS_DGFX(i915) &&
2175 !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2176 drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2177 return -ENODEV;
2178 }
2179
2180 if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2181 uncore->flags |= UNCORE_HAS_FORCEWAKE;
2182
2183 if (!intel_uncore_has_forcewake(uncore)) {
2184 uncore_raw_init(uncore);
2185 } else {
2186 ret = uncore_forcewake_init(uncore);
2187 if (ret)
2188 return ret;
2189 }
2190
2191 /* make sure fw funcs are set if and only if we have fw*/
2192 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
2193 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2194 GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2195
2196 if (HAS_FPGA_DBG_UNCLAIMED(i915))
2197 uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2198
2199 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2200 uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2201
2202 if (IS_GRAPHICS_VER(i915, 6, 7))
2203 uncore->flags |= UNCORE_HAS_FIFO;
2204
2205 /* clear out unclaimed reg detection bit */
2206 if (intel_uncore_unclaimed_mmio(uncore))
2207 drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
2208
2209 return 0;
2210}
2211
2212/*
2213 * We might have detected that some engines are fused off after we initialized
2214 * the forcewake domains. Prune them, to make sure they only reference existing
2215 * engines.
2216 */
2217void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2218 struct intel_gt *gt)
2219{
2220 enum forcewake_domains fw_domains = uncore->fw_domains;
2221 enum forcewake_domain_id domain_id;
2222 int i;
2223
2224 if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2225 return;
2226
2227 for (i = 0; i < I915_MAX_VCS; i++) {
2228 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
2229
2230 if (HAS_ENGINE(gt, _VCS(i)))
2231 continue;
2232
2233 /*
2234 * Starting with XeHP, the power well for an even-numbered
2235 * VDBOX is also used for shared units within the
2236 * media slice such as SFC. So even if the engine
2237 * itself is fused off, we still need to initialize
2238 * the forcewake domain if any of the other engines
2239 * in the same media slice are present.
2240 */
2241 if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
2242 if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2243 continue;
2244
2245 if (HAS_ENGINE(gt, _VECS(i / 2)))
2246 continue;
2247 }
2248
2249 if (fw_domains & BIT(domain_id))
2250 fw_domain_fini(uncore, domain_id);
2251 }
2252
2253 for (i = 0; i < I915_MAX_VECS; i++) {
2254 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
2255
2256 if (HAS_ENGINE(gt, _VECS(i)))
2257 continue;
2258
2259 if (fw_domains & BIT(domain_id))
2260 fw_domain_fini(uncore, domain_id);
2261 }
2262}
2263
2264void intel_uncore_fini_mmio(struct intel_uncore *uncore)
2265{
2266 if (intel_uncore_has_forcewake(uncore)) {
2267 iosf_mbi_punit_acquire();
2268 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2269 &uncore->pmic_bus_access_nb);
2270 intel_uncore_forcewake_reset(uncore);
2271 intel_uncore_fw_domains_fini(uncore);
2272 iosf_mbi_punit_release();
2273 }
2274}
2275
2276static const struct reg_whitelist {
2277 i915_reg_t offset_ldw;
2278 i915_reg_t offset_udw;
2279 u8 min_graphics_ver;
2280 u8 max_graphics_ver;
2281 u8 size;
2282} reg_read_whitelist[] = { {
2283 .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
2284 .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
2285 .min_graphics_ver = 4,
2286 .max_graphics_ver = 12,
2287 .size = 8
2288} };
2289
2290int i915_reg_read_ioctl(struct drm_device *dev,
2291 void *data, struct drm_file *file)
2292{
2293 struct drm_i915_private *i915 = to_i915(dev);
2294 struct intel_uncore *uncore = &i915->uncore;
2295 struct drm_i915_reg_read *reg = data;
2296 struct reg_whitelist const *entry;
2297 intel_wakeref_t wakeref;
2298 unsigned int flags;
2299 int remain;
2300 int ret = 0;
2301
2302 entry = reg_read_whitelist;
2303 remain = ARRAY_SIZE(reg_read_whitelist);
2304 while (remain) {
2305 u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw);
2306
2307 GEM_BUG_ON(!is_power_of_2(entry->size));
2308 GEM_BUG_ON(entry->size > 8);
2309 GEM_BUG_ON(entry_offset & (entry->size - 1));
2310
2311 if (IS_GRAPHICS_VER(i915, entry->min_graphics_ver, entry->max_graphics_ver) &&
2312 entry_offset == (reg->offset & -entry->size))
2313 break;
2314 entry++;
2315 remain--;
2316 }
2317
2318 if (!remain)
2319 return -EINVAL;
2320
2321 flags = reg->offset & (entry->size - 1);
2322
2323 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
2324 if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
2325 reg->val = intel_uncore_read64_2x32(uncore,
2326 entry->offset_ldw,
2327 entry->offset_udw);
2328 else if (entry->size == 8 && flags == 0)
2329 reg->val = intel_uncore_read64(uncore,
2330 entry->offset_ldw);
2331 else if (entry->size == 4 && flags == 0)
2332 reg->val = intel_uncore_read(uncore, entry->offset_ldw);
2333 else if (entry->size == 2 && flags == 0)
2334 reg->val = intel_uncore_read16(uncore,
2335 entry->offset_ldw);
2336 else if (entry->size == 1 && flags == 0)
2337 reg->val = intel_uncore_read8(uncore,
2338 entry->offset_ldw);
2339 else
2340 ret = -EINVAL;
2341 }
2342
2343 return ret;
2344}
2345
2346/**
2347 * __intel_wait_for_register_fw - wait until register matches expected state
2348 * @uncore: the struct intel_uncore
2349 * @reg: the register to read
2350 * @mask: mask to apply to register value
2351 * @value: expected value
2352 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2353 * @slow_timeout_ms: slow timeout in millisecond
2354 * @out_value: optional placeholder to hold registry value
2355 *
2356 * This routine waits until the target register @reg contains the expected
2357 * @value after applying the @mask, i.e. it waits until ::
2358 *
2359 * (intel_uncore_read_fw(uncore, reg) & mask) == value
2360 *
2361 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
2362 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
2363 * must be not larger than 20,0000 microseconds.
2364 *
2365 * Note that this routine assumes the caller holds forcewake asserted, it is
2366 * not suitable for very long waits. See intel_wait_for_register() if you
2367 * wish to wait without holding forcewake for the duration (i.e. you expect
2368 * the wait to be slow).
2369 *
2370 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2371 */
2372int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2373 i915_reg_t reg,
2374 u32 mask,
2375 u32 value,
2376 unsigned int fast_timeout_us,
2377 unsigned int slow_timeout_ms,
2378 u32 *out_value)
2379{
2380 u32 reg_value = 0;
2381#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2382 int ret;
2383
2384 /* Catch any overuse of this function */
2385 might_sleep_if(slow_timeout_ms);
2386 GEM_BUG_ON(fast_timeout_us > 20000);
2387 GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
2388
2389 ret = -ETIMEDOUT;
2390 if (fast_timeout_us && fast_timeout_us <= 20000)
2391 ret = _wait_for_atomic(done, fast_timeout_us, 0);
2392 if (ret && slow_timeout_ms)
2393 ret = wait_for(done, slow_timeout_ms);
2394
2395 if (out_value)
2396 *out_value = reg_value;
2397
2398 return ret;
2399#undef done
2400}
2401
2402/**
2403 * __intel_wait_for_register - wait until register matches expected state
2404 * @uncore: the struct intel_uncore
2405 * @reg: the register to read
2406 * @mask: mask to apply to register value
2407 * @value: expected value
2408 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2409 * @slow_timeout_ms: slow timeout in millisecond
2410 * @out_value: optional placeholder to hold registry value
2411 *
2412 * This routine waits until the target register @reg contains the expected
2413 * @value after applying the @mask, i.e. it waits until ::
2414 *
2415 * (intel_uncore_read(uncore, reg) & mask) == value
2416 *
2417 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2418 *
2419 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2420 */
2421int __intel_wait_for_register(struct intel_uncore *uncore,
2422 i915_reg_t reg,
2423 u32 mask,
2424 u32 value,
2425 unsigned int fast_timeout_us,
2426 unsigned int slow_timeout_ms,
2427 u32 *out_value)
2428{
2429 unsigned fw =
2430 intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2431 u32 reg_value;
2432 int ret;
2433
2434 might_sleep_if(slow_timeout_ms);
2435
2436 spin_lock_irq(&uncore->lock);
2437 intel_uncore_forcewake_get__locked(uncore, fw);
2438
2439 ret = __intel_wait_for_register_fw(uncore,
2440 reg, mask, value,
2441 fast_timeout_us, 0, ®_value);
2442
2443 intel_uncore_forcewake_put__locked(uncore, fw);
2444 spin_unlock_irq(&uncore->lock);
2445
2446 if (ret && slow_timeout_ms)
2447 ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2448 reg),
2449 (reg_value & mask) == value,
2450 slow_timeout_ms * 1000, 10, 1000);
2451
2452 /* just trace the final value */
2453 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2454
2455 if (out_value)
2456 *out_value = reg_value;
2457
2458 return ret;
2459}
2460
2461bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2462{
2463 bool ret;
2464
2465 spin_lock_irq(&uncore->debug->lock);
2466 ret = check_for_unclaimed_mmio(uncore);
2467 spin_unlock_irq(&uncore->debug->lock);
2468
2469 return ret;
2470}
2471
2472bool
2473intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2474{
2475 bool ret = false;
2476
2477 spin_lock_irq(&uncore->debug->lock);
2478
2479 if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2480 goto out;
2481
2482 if (unlikely(check_for_unclaimed_mmio(uncore))) {
2483 if (!uncore->i915->params.mmio_debug) {
2484 drm_dbg(&uncore->i915->drm,
2485 "Unclaimed register detected, "
2486 "enabling oneshot unclaimed register reporting. "
2487 "Please use i915.mmio_debug=N for more information.\n");
2488 uncore->i915->params.mmio_debug++;
2489 }
2490 uncore->debug->unclaimed_mmio_check--;
2491 ret = true;
2492 }
2493
2494out:
2495 spin_unlock_irq(&uncore->debug->lock);
2496
2497 return ret;
2498}
2499
2500/**
2501 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2502 * a register
2503 * @uncore: pointer to struct intel_uncore
2504 * @reg: register in question
2505 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2506 *
2507 * Returns a set of forcewake domains required to be taken with for example
2508 * intel_uncore_forcewake_get for the specified register to be accessible in the
2509 * specified mode (read, write or read/write) with raw mmio accessors.
2510 *
2511 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2512 * callers to do FIFO management on their own or risk losing writes.
2513 */
2514enum forcewake_domains
2515intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2516 i915_reg_t reg, unsigned int op)
2517{
2518 enum forcewake_domains fw_domains = 0;
2519
2520 drm_WARN_ON(&uncore->i915->drm, !op);
2521
2522 if (!intel_uncore_has_forcewake(uncore))
2523 return 0;
2524
2525 if (op & FW_REG_READ)
2526 fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2527
2528 if (op & FW_REG_WRITE)
2529 fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2530
2531 drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
2532
2533 return fw_domains;
2534}
2535
2536u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
2537 i915_reg_t reg,
2538 int slice, int subslice)
2539{
2540 u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
2541
2542 lockdep_assert_held(&uncore->lock);
2543
2544 if (GRAPHICS_VER(uncore->i915) >= 11) {
2545 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
2546 mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
2547 } else {
2548 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
2549 mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
2550 }
2551
2552 old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
2553
2554 mcr &= ~mcr_mask;
2555 mcr |= mcr_ss;
2556 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
2557
2558 val = intel_uncore_read_fw(uncore, reg);
2559
2560 mcr &= ~mcr_mask;
2561 mcr |= old_mcr & mcr_mask;
2562
2563 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
2564
2565 return val;
2566}
2567
2568u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
2569 i915_reg_t reg, int slice, int subslice)
2570{
2571 enum forcewake_domains fw_domains;
2572 u32 val;
2573
2574 fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
2575 FW_REG_READ);
2576 fw_domains |= intel_uncore_forcewake_for_reg(uncore,
2577 GEN8_MCR_SELECTOR,
2578 FW_REG_READ | FW_REG_WRITE);
2579
2580 spin_lock_irq(&uncore->lock);
2581 intel_uncore_forcewake_get__locked(uncore, fw_domains);
2582
2583 val = intel_uncore_read_with_mcr_steering_fw(uncore, reg, slice, subslice);
2584
2585 intel_uncore_forcewake_put__locked(uncore, fw_domains);
2586 spin_unlock_irq(&uncore->lock);
2587
2588 return val;
2589}
2590
2591#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2592#include "selftests/mock_uncore.c"
2593#include "selftests/intel_uncore.c"
2594#endif