Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Record GT workarounds in a list

To enable later verification of GT workaround state at various stages of
driver lifetime, we record the list of applicable ones per platforms to a
list, from which they are also applied.

The added data structure is a simple array of register, mask and value
items, which is allocated on demand as workarounds are added to the list.

This is a temporary implementation which later in the series gets fused
with the existing per context workaround list handling. It is separated at
this stage since the following patch fixes a bug which needs to be as easy
to backport as possible.

Also, since in the following patch we will be adding a new class of
workarounds (per engine) which can be applied from interrupt context, we
straight away make the provision for safe read-modify-write cycle.

v2:
* Change dev_priv to i915 along the init path. (Chris Wilson)
* API rename. (Chris Wilson)

v3:
* Remove explicit list size tracking in favour of growing the allocation
in power of two chunks. (Chris Wilson)

v4:
Chris Wilson:
* Change wa_list_finish to early return.
* Copy workarounds using the compiler for static checking.
* Do not bother zeroing unused entries.
* Re-order struct i915_wa_list.

v5:
* kmalloc_array.
* Whitespace cleanup.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20181203133319.10174-1-tvrtko.ursulin@linux.intel.com
(cherry picked from commit 25d140faaa25f728159eb8c304eae53d88a7f14e)
Fixes: 59b449d5c82a ("drm/i915: Split out functions for different kinds of workarounds")
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

authored by

Tvrtko Ursulin and committed by
Joonas Lahtinen
00936779 25956467

+352 -160
+1
drivers/gpu/drm/i915/i915_drv.c
··· 1444 1444 1445 1445 intel_uncore_sanitize(dev_priv); 1446 1446 1447 + intel_gt_init_workarounds(dev_priv); 1447 1448 i915_gem_load_init_fences(dev_priv); 1448 1449 1449 1450 /* On the 945G/GM, the chipset reports the MSI capability on the
+2
drivers/gpu/drm/i915/i915_drv.h
··· 67 67 #include "intel_ringbuffer.h" 68 68 #include "intel_uncore.h" 69 69 #include "intel_wopcm.h" 70 + #include "intel_workarounds.h" 70 71 #include "intel_uc.h" 71 72 72 73 #include "i915_gem.h" ··· 1806 1805 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1807 1806 1808 1807 struct i915_workarounds workarounds; 1808 + struct i915_wa_list gt_wa_list; 1809 1809 1810 1810 struct i915_frontbuffer_tracking fb_tracking; 1811 1811
+3 -1
drivers/gpu/drm/i915/i915_gem.c
··· 5305 5305 } 5306 5306 } 5307 5307 5308 - intel_gt_workarounds_apply(dev_priv); 5308 + intel_gt_apply_workarounds(dev_priv); 5309 5309 5310 5310 i915_gem_init_swizzling(dev_priv); 5311 5311 ··· 5676 5676 i915_gem_cleanup_engines(dev_priv); 5677 5677 i915_gem_contexts_fini(dev_priv); 5678 5678 mutex_unlock(&dev_priv->drm.struct_mutex); 5679 + 5680 + intel_wa_list_free(&dev_priv->gt_wa_list); 5679 5681 5680 5682 intel_cleanup_gt_powersave(dev_priv); 5681 5683
+324 -158
drivers/gpu/drm/i915/intel_workarounds.c
··· 48 48 * - Public functions to init or apply the given workaround type. 49 49 */ 50 50 51 + static void wa_init_start(struct i915_wa_list *wal, const char *name) 52 + { 53 + wal->name = name; 54 + } 55 + 56 + static void wa_init_finish(struct i915_wa_list *wal) 57 + { 58 + if (!wal->count) 59 + return; 60 + 61 + DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n", 62 + wal->count, wal->name); 63 + } 64 + 51 65 static void wa_add(struct drm_i915_private *i915, 52 66 i915_reg_t reg, const u32 mask, const u32 val) 53 67 { ··· 594 580 return 0; 595 581 } 596 582 597 - static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv) 583 + static void 584 + wal_add(struct i915_wa_list *wal, const struct i915_wa *wa) 598 585 { 586 + const unsigned int grow = 1 << 4; 587 + 588 + GEM_BUG_ON(!is_power_of_2(grow)); 589 + 590 + if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */ 591 + struct i915_wa *list; 592 + 593 + list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa), 594 + GFP_KERNEL); 595 + if (!list) { 596 + DRM_ERROR("No space for workaround init!\n"); 597 + return; 598 + } 599 + 600 + if (wal->list) 601 + memcpy(list, wal->list, sizeof(*wa) * wal->count); 602 + 603 + wal->list = list; 604 + } 605 + 606 + wal->list[wal->count++] = *wa; 599 607 } 600 608 601 - static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv) 609 + static void 610 + wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) 602 611 { 612 + struct i915_wa wa = { 613 + .reg = reg, 614 + .mask = val, 615 + .val = _MASKED_BIT_ENABLE(val) 616 + }; 617 + 618 + wal_add(wal, &wa); 603 619 } 604 620 605 - static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv) 621 + static void 622 + wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, 623 + u32 val) 606 624 { 625 + struct i915_wa wa = { 626 + .reg = reg, 627 + .mask = mask, 628 + .val = val 629 + }; 630 + 631 + wal_add(wal, &wa); 632 + } 633 + 634 + static void 635 + wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val) 636 + { 637 + wa_write_masked_or(wal, reg, ~0, val); 638 + } 639 + 640 + static void 641 + wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val) 642 + { 643 + wa_write_masked_or(wal, reg, val, val); 644 + } 645 + 646 + static void gen9_gt_workarounds_init(struct drm_i915_private *i915) 647 + { 648 + struct i915_wa_list *wal = &i915->gt_wa_list; 649 + 607 650 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ 608 - I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, 609 - _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE)); 651 + wa_masked_en(wal, 652 + GEN9_CSFE_CHICKEN1_RCS, 653 + GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE); 654 + 610 655 611 656 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ 612 - I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | 613 - GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 657 + wa_write_or(wal, 658 + BDW_SCRATCH1, 659 + GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 614 660 615 661 /* WaDisableKillLogic:bxt,skl,kbl */ 616 - if (!IS_COFFEELAKE(dev_priv)) 617 - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 618 - ECOCHK_DIS_TLB); 662 + if (!IS_COFFEELAKE(i915)) 663 + wa_write_or(wal, 664 + GAM_ECOCHK, 665 + ECOCHK_DIS_TLB); 619 666 620 - if (HAS_LLC(dev_priv)) { 667 + if (HAS_LLC(i915)) { 621 668 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl 622 669 * 623 670 * Must match Display Engine. See 624 671 * WaCompressedResourceDisplayNewHashMode. 625 672 */ 626 - I915_WRITE(MMCD_MISC_CTRL, 627 - I915_READ(MMCD_MISC_CTRL) | 628 - MMCD_PCLA | 629 - MMCD_HOTSPOT_EN); 673 + wa_write_or(wal, 674 + MMCD_MISC_CTRL, 675 + MMCD_PCLA | MMCD_HOTSPOT_EN); 630 676 } 631 677 632 678 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ 633 - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 634 - BDW_DISABLE_HDC_INVALIDATION); 679 + wa_write_or(wal, 680 + GAM_ECOCHK, 681 + BDW_DISABLE_HDC_INVALIDATION); 635 682 636 683 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ 637 - if (IS_GEN9_LP(dev_priv)) { 638 - u32 val = I915_READ(GEN8_L3SQCREG1); 639 - 640 - val &= ~L3_PRIO_CREDITS_MASK; 641 - val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2); 642 - I915_WRITE(GEN8_L3SQCREG1, val); 643 - } 684 + if (IS_GEN9_LP(i915)) 685 + wa_write_masked_or(wal, 686 + GEN8_L3SQCREG1, 687 + L3_PRIO_CREDITS_MASK, 688 + L3_GENERAL_PRIO_CREDITS(62) | 689 + L3_HIGH_PRIO_CREDITS(2)); 644 690 645 691 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ 646 - I915_WRITE(GEN8_L3SQCREG4, 647 - I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES); 692 + wa_write_or(wal, 693 + GEN8_L3SQCREG4, 694 + GEN8_LQSC_FLUSH_COHERENT_LINES); 648 695 649 696 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */ 650 - I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, 651 - _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); 697 + wa_masked_en(wal, 698 + GEN7_FF_SLICE_CS_CHICKEN1, 699 + GEN9_FFSC_PERCTX_PREEMPT_CTRL); 652 700 } 653 701 654 - static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv) 702 + static void skl_gt_workarounds_init(struct drm_i915_private *i915) 655 703 { 656 - gen9_gt_workarounds_apply(dev_priv); 704 + struct i915_wa_list *wal = &i915->gt_wa_list; 705 + 706 + gen9_gt_workarounds_init(i915); 657 707 658 708 /* WaEnableGapsTsvCreditFix:skl */ 659 - I915_WRITE(GEN8_GARBCNTL, 660 - I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); 709 + wa_write_or(wal, 710 + GEN8_GARBCNTL, 711 + GEN9_GAPS_TSV_CREDIT_DISABLE); 661 712 662 713 /* WaDisableGafsUnitClkGating:skl */ 663 - I915_WRITE(GEN7_UCGCTL4, 664 - I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 714 + wa_write_or(wal, 715 + GEN7_UCGCTL4, 716 + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 665 717 666 718 /* WaInPlaceDecompressionHang:skl */ 667 - if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) 668 - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, 669 - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 670 - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 719 + if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER)) 720 + wa_write_or(wal, 721 + GEN9_GAMT_ECO_REG_RW_IA, 722 + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 671 723 } 672 724 673 - static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv) 725 + static void bxt_gt_workarounds_init(struct drm_i915_private *i915) 674 726 { 675 - gen9_gt_workarounds_apply(dev_priv); 727 + struct i915_wa_list *wal = &i915->gt_wa_list; 728 + 729 + gen9_gt_workarounds_init(i915); 676 730 677 731 /* WaDisablePooledEuLoadBalancingFix:bxt */ 678 - I915_WRITE(FF_SLICE_CS_CHICKEN2, 679 - _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE)); 732 + wa_masked_en(wal, 733 + FF_SLICE_CS_CHICKEN2, 734 + GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); 680 735 681 736 /* WaInPlaceDecompressionHang:bxt */ 682 - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, 683 - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 684 - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 737 + wa_write_or(wal, 738 + GEN9_GAMT_ECO_REG_RW_IA, 739 + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 685 740 } 686 741 687 - static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv) 742 + static void kbl_gt_workarounds_init(struct drm_i915_private *i915) 688 743 { 689 - gen9_gt_workarounds_apply(dev_priv); 744 + struct i915_wa_list *wal = &i915->gt_wa_list; 745 + 746 + gen9_gt_workarounds_init(i915); 690 747 691 748 /* WaEnableGapsTsvCreditFix:kbl */ 692 - I915_WRITE(GEN8_GARBCNTL, 693 - I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); 749 + wa_write_or(wal, 750 + GEN8_GARBCNTL, 751 + GEN9_GAPS_TSV_CREDIT_DISABLE); 694 752 695 753 /* WaDisableDynamicCreditSharing:kbl */ 696 - if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) 697 - I915_WRITE(GAMT_CHKN_BIT_REG, 698 - I915_READ(GAMT_CHKN_BIT_REG) | 699 - GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); 754 + if (IS_KBL_REVID(i915, 0, KBL_REVID_B0)) 755 + wa_write_or(wal, 756 + GAMT_CHKN_BIT_REG, 757 + GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); 700 758 701 759 /* WaDisableGafsUnitClkGating:kbl */ 702 - I915_WRITE(GEN7_UCGCTL4, 703 - I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 760 + wa_write_or(wal, 761 + GEN7_UCGCTL4, 762 + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 704 763 705 764 /* WaInPlaceDecompressionHang:kbl */ 706 - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, 707 - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 708 - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 765 + wa_write_or(wal, 766 + GEN9_GAMT_ECO_REG_RW_IA, 767 + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 709 768 710 769 /* WaKBLVECSSemaphoreWaitPoll:kbl */ 711 - if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) { 770 + if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) { 712 771 struct intel_engine_cs *engine; 713 772 unsigned int tmp; 714 773 715 - for_each_engine(engine, dev_priv, tmp) { 774 + for_each_engine(engine, i915, tmp) { 716 775 if (engine->id == RCS) 717 776 continue; 718 777 719 - I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1); 778 + wa_write(wal, 779 + RING_SEMA_WAIT_POLL(engine->mmio_base), 780 + 1); 720 781 } 721 782 } 722 783 } 723 784 724 - static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv) 785 + static void glk_gt_workarounds_init(struct drm_i915_private *i915) 725 786 { 726 - gen9_gt_workarounds_apply(dev_priv); 787 + gen9_gt_workarounds_init(i915); 727 788 } 728 789 729 - static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv) 790 + static void cfl_gt_workarounds_init(struct drm_i915_private *i915) 730 791 { 731 - gen9_gt_workarounds_apply(dev_priv); 792 + struct i915_wa_list *wal = &i915->gt_wa_list; 793 + 794 + gen9_gt_workarounds_init(i915); 732 795 733 796 /* WaEnableGapsTsvCreditFix:cfl */ 734 - I915_WRITE(GEN8_GARBCNTL, 735 - I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE); 797 + wa_write_or(wal, 798 + GEN8_GARBCNTL, 799 + GEN9_GAPS_TSV_CREDIT_DISABLE); 736 800 737 801 /* WaDisableGafsUnitClkGating:cfl */ 738 - I915_WRITE(GEN7_UCGCTL4, 739 - I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 802 + wa_write_or(wal, 803 + GEN7_UCGCTL4, 804 + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 740 805 741 806 /* WaInPlaceDecompressionHang:cfl */ 742 - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, 743 - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 744 - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 807 + wa_write_or(wal, 808 + GEN9_GAMT_ECO_REG_RW_IA, 809 + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 745 810 } 746 811 747 812 static void wa_init_mcr(struct drm_i915_private *dev_priv) 748 813 { 749 814 const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); 750 - u32 mcr; 815 + struct i915_wa_list *wal = &dev_priv->gt_wa_list; 751 816 u32 mcr_slice_subslice_mask; 752 817 753 818 /* ··· 863 770 WARN_ON((enabled_mask & disabled_mask) != enabled_mask); 864 771 } 865 772 866 - mcr = I915_READ(GEN8_MCR_SELECTOR); 867 - 868 773 if (INTEL_GEN(dev_priv) >= 11) 869 774 mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | 870 775 GEN11_MCR_SUBSLICE_MASK; ··· 880 789 * occasions, such as INSTDONE, where this value is dependent 881 790 * on s/ss combo, the read should be done with read_subslice_reg. 882 791 */ 883 - mcr &= ~mcr_slice_subslice_mask; 884 - mcr |= intel_calculate_mcr_s_ss_select(dev_priv); 885 - I915_WRITE(GEN8_MCR_SELECTOR, mcr); 792 + wa_write_masked_or(wal, 793 + GEN8_MCR_SELECTOR, 794 + mcr_slice_subslice_mask, 795 + intel_calculate_mcr_s_ss_select(dev_priv)); 886 796 } 887 797 888 - static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv) 798 + static void cnl_gt_workarounds_init(struct drm_i915_private *i915) 889 799 { 890 - wa_init_mcr(dev_priv); 800 + struct i915_wa_list *wal = &i915->gt_wa_list; 801 + 802 + wa_init_mcr(i915); 891 803 892 804 /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ 893 - if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) 894 - I915_WRITE(GAMT_CHKN_BIT_REG, 895 - I915_READ(GAMT_CHKN_BIT_REG) | 896 - GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); 805 + if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0)) 806 + wa_write_or(wal, 807 + GAMT_CHKN_BIT_REG, 808 + GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT); 897 809 898 810 /* WaInPlaceDecompressionHang:cnl */ 899 - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, 900 - I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 901 - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 811 + wa_write_or(wal, 812 + GEN9_GAMT_ECO_REG_RW_IA, 813 + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 902 814 903 815 /* WaEnablePreemptionGranularityControlByUMD:cnl */ 904 - I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, 905 - _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); 816 + wa_masked_en(wal, 817 + GEN7_FF_SLICE_CS_CHICKEN1, 818 + GEN9_FFSC_PERCTX_PREEMPT_CTRL); 906 819 } 907 820 908 - static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv) 821 + static void icl_gt_workarounds_init(struct drm_i915_private *i915) 909 822 { 910 - wa_init_mcr(dev_priv); 823 + struct i915_wa_list *wal = &i915->gt_wa_list; 824 + 825 + wa_init_mcr(i915); 911 826 912 827 /* This is not an Wa. Enable for better image quality */ 913 - I915_WRITE(_3D_CHICKEN3, 914 - _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); 828 + wa_masked_en(wal, 829 + _3D_CHICKEN3, 830 + _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE); 915 831 916 832 /* WaInPlaceDecompressionHang:icl */ 917 - I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | 918 - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 833 + wa_write_or(wal, 834 + GEN9_GAMT_ECO_REG_RW_IA, 835 + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 919 836 920 837 /* WaPipelineFlushCoherentLines:icl */ 921 - I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 922 - GEN8_LQSC_FLUSH_COHERENT_LINES); 838 + wa_write_or(wal, 839 + GEN8_L3SQCREG4, 840 + GEN8_LQSC_FLUSH_COHERENT_LINES); 923 841 924 842 /* Wa_1405543622:icl 925 843 * Formerly known as WaGAPZPriorityScheme 926 844 */ 927 - I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) | 928 - GEN11_ARBITRATION_PRIO_ORDER_MASK); 845 + wa_write_or(wal, 846 + GEN8_GARBCNTL, 847 + GEN11_ARBITRATION_PRIO_ORDER_MASK); 929 848 930 849 /* Wa_1604223664:icl 931 850 * Formerly known as WaL3BankAddressHashing 932 851 */ 933 - I915_WRITE(GEN8_GARBCNTL, 934 - (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) | 935 - GEN11_HASH_CTRL_EXCL_BIT0); 936 - I915_WRITE(GEN11_GLBLINVL, 937 - (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) | 938 - GEN11_BANK_HASH_ADDR_EXCL_BIT0); 852 + wa_write_masked_or(wal, 853 + GEN8_GARBCNTL, 854 + GEN11_HASH_CTRL_EXCL_MASK, 855 + GEN11_HASH_CTRL_EXCL_BIT0); 856 + wa_write_masked_or(wal, 857 + GEN11_GLBLINVL, 858 + GEN11_BANK_HASH_ADDR_EXCL_MASK, 859 + GEN11_BANK_HASH_ADDR_EXCL_BIT0); 939 860 940 861 /* WaModifyGamTlbPartitioning:icl */ 941 - I915_WRITE(GEN11_GACB_PERF_CTRL, 942 - (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) | 943 - GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); 862 + wa_write_masked_or(wal, 863 + GEN11_GACB_PERF_CTRL, 864 + GEN11_HASH_CTRL_MASK, 865 + GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); 944 866 945 867 /* Wa_1405733216:icl 946 868 * Formerly known as WaDisableCleanEvicts 947 869 */ 948 - I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 949 - GEN11_LQSC_CLEAN_EVICT_DISABLE); 870 + wa_write_or(wal, 871 + GEN8_L3SQCREG4, 872 + GEN11_LQSC_CLEAN_EVICT_DISABLE); 950 873 951 874 /* Wa_1405766107:icl 952 875 * Formerly known as WaCL2SFHalfMaxAlloc 953 876 */ 954 - I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) | 955 - GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | 956 - GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); 877 + wa_write_or(wal, 878 + GEN11_LSN_UNSLCVC, 879 + GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | 880 + GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); 957 881 958 882 /* Wa_220166154:icl 959 883 * Formerly known as WaDisCtxReload 960 884 */ 961 - I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) | 962 - GAMW_ECO_DEV_CTX_RELOAD_DISABLE); 885 + wa_write_or(wal, 886 + GEN8_GAMW_ECO_DEV_RW_IA, 887 + GAMW_ECO_DEV_CTX_RELOAD_DISABLE); 963 888 964 889 /* Wa_1405779004:icl (pre-prod) */ 965 - if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) 966 - I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, 967 - I915_READ(SLICE_UNIT_LEVEL_CLKGATE) | 968 - MSCUNIT_CLKGATE_DIS); 890 + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) 891 + wa_write_or(wal, 892 + SLICE_UNIT_LEVEL_CLKGATE, 893 + MSCUNIT_CLKGATE_DIS); 969 894 970 895 /* Wa_1406680159:icl */ 971 - I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, 972 - I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) | 973 - GWUNIT_CLKGATE_DIS); 896 + wa_write_or(wal, 897 + SUBSLICE_UNIT_LEVEL_CLKGATE, 898 + GWUNIT_CLKGATE_DIS); 974 899 975 900 /* Wa_1604302699:icl */ 976 - I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER, 977 - I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) | 978 - GEN11_I2M_WRITE_DISABLE); 901 + wa_write_or(wal, 902 + GEN10_L3_CHICKEN_MODE_REGISTER, 903 + GEN11_I2M_WRITE_DISABLE); 979 904 980 905 /* Wa_1406838659:icl (pre-prod) */ 981 - if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0)) 982 - I915_WRITE(INF_UNIT_LEVEL_CLKGATE, 983 - I915_READ(INF_UNIT_LEVEL_CLKGATE) | 984 - CGPSF_CLKGATE_DIS); 906 + if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) 907 + wa_write_or(wal, 908 + INF_UNIT_LEVEL_CLKGATE, 909 + CGPSF_CLKGATE_DIS); 985 910 986 911 /* WaForwardProgressSoftReset:icl */ 987 - I915_WRITE(GEN10_SCRATCH_LNCF2, 988 - I915_READ(GEN10_SCRATCH_LNCF2) | 989 - PMFLUSHDONE_LNICRSDROP | 990 - PMFLUSH_GAPL3UNBLOCK | 991 - PMFLUSHDONE_LNEBLK); 912 + wa_write_or(wal, 913 + GEN10_SCRATCH_LNCF2, 914 + PMFLUSHDONE_LNICRSDROP | 915 + PMFLUSH_GAPL3UNBLOCK | 916 + PMFLUSHDONE_LNEBLK); 992 917 993 918 /* Wa_1406463099:icl 994 919 * Formerly known as WaGamTlbPendError 995 920 */ 996 - I915_WRITE(GAMT_CHKN_BIT_REG, 997 - I915_READ(GAMT_CHKN_BIT_REG) | 998 - GAMT_CHKN_DISABLE_L3_COH_PIPE); 921 + wa_write_or(wal, 922 + GAMT_CHKN_BIT_REG, 923 + GAMT_CHKN_DISABLE_L3_COH_PIPE); 999 924 } 1000 925 1001 - void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv) 926 + void intel_gt_init_workarounds(struct drm_i915_private *i915) 1002 927 { 1003 - if (INTEL_GEN(dev_priv) < 8) 928 + struct i915_wa_list *wal = &i915->gt_wa_list; 929 + 930 + wa_init_start(wal, "GT"); 931 + 932 + if (INTEL_GEN(i915) < 8) 1004 933 return; 1005 - else if (IS_BROADWELL(dev_priv)) 1006 - bdw_gt_workarounds_apply(dev_priv); 1007 - else if (IS_CHERRYVIEW(dev_priv)) 1008 - chv_gt_workarounds_apply(dev_priv); 1009 - else if (IS_SKYLAKE(dev_priv)) 1010 - skl_gt_workarounds_apply(dev_priv); 1011 - else if (IS_BROXTON(dev_priv)) 1012 - bxt_gt_workarounds_apply(dev_priv); 1013 - else if (IS_KABYLAKE(dev_priv)) 1014 - kbl_gt_workarounds_apply(dev_priv); 1015 - else if (IS_GEMINILAKE(dev_priv)) 1016 - glk_gt_workarounds_apply(dev_priv); 1017 - else if (IS_COFFEELAKE(dev_priv)) 1018 - cfl_gt_workarounds_apply(dev_priv); 1019 - else if (IS_CANNONLAKE(dev_priv)) 1020 - cnl_gt_workarounds_apply(dev_priv); 1021 - else if (IS_ICELAKE(dev_priv)) 1022 - icl_gt_workarounds_apply(dev_priv); 934 + else if (IS_BROADWELL(i915)) 935 + return; 936 + else if (IS_CHERRYVIEW(i915)) 937 + return; 938 + else if (IS_SKYLAKE(i915)) 939 + skl_gt_workarounds_init(i915); 940 + else if (IS_BROXTON(i915)) 941 + bxt_gt_workarounds_init(i915); 942 + else if (IS_KABYLAKE(i915)) 943 + kbl_gt_workarounds_init(i915); 944 + else if (IS_GEMINILAKE(i915)) 945 + glk_gt_workarounds_init(i915); 946 + else if (IS_COFFEELAKE(i915)) 947 + cfl_gt_workarounds_init(i915); 948 + else if (IS_CANNONLAKE(i915)) 949 + cnl_gt_workarounds_init(i915); 950 + else if (IS_ICELAKE(i915)) 951 + icl_gt_workarounds_init(i915); 1023 952 else 1024 - MISSING_CASE(INTEL_GEN(dev_priv)); 953 + MISSING_CASE(INTEL_GEN(i915)); 954 + 955 + wa_init_finish(wal); 956 + } 957 + 958 + static enum forcewake_domains 959 + wal_get_fw_for_rmw(struct drm_i915_private *dev_priv, 960 + const struct i915_wa_list *wal) 961 + { 962 + enum forcewake_domains fw = 0; 963 + struct i915_wa *wa; 964 + unsigned int i; 965 + 966 + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) 967 + fw |= intel_uncore_forcewake_for_reg(dev_priv, 968 + wa->reg, 969 + FW_REG_READ | 970 + FW_REG_WRITE); 971 + 972 + return fw; 973 + } 974 + 975 + static void 976 + wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal) 977 + { 978 + enum forcewake_domains fw; 979 + unsigned long flags; 980 + struct i915_wa *wa; 981 + unsigned int i; 982 + 983 + if (!wal->count) 984 + return; 985 + 986 + fw = wal_get_fw_for_rmw(dev_priv, wal); 987 + 988 + spin_lock_irqsave(&dev_priv->uncore.lock, flags); 989 + intel_uncore_forcewake_get__locked(dev_priv, fw); 990 + 991 + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { 992 + u32 val = I915_READ_FW(wa->reg); 993 + 994 + val &= ~wa->mask; 995 + val |= wa->val; 996 + 997 + I915_WRITE_FW(wa->reg, val); 998 + } 999 + 1000 + intel_uncore_forcewake_put__locked(dev_priv, fw); 1001 + spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); 1002 + 1003 + DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name); 1004 + } 1005 + 1006 + void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv) 1007 + { 1008 + wa_list_apply(dev_priv, &dev_priv->gt_wa_list); 1025 1009 } 1026 1010 1027 1011 struct whitelist {
+22 -1
drivers/gpu/drm/i915/intel_workarounds.h
··· 7 7 #ifndef _I915_WORKAROUNDS_H_ 8 8 #define _I915_WORKAROUNDS_H_ 9 9 10 + #include <linux/slab.h> 11 + 12 + struct i915_wa { 13 + i915_reg_t reg; 14 + u32 mask; 15 + u32 val; 16 + }; 17 + 18 + struct i915_wa_list { 19 + const char *name; 20 + struct i915_wa *list; 21 + unsigned int count; 22 + }; 23 + 24 + static inline void intel_wa_list_free(struct i915_wa_list *wal) 25 + { 26 + kfree(wal->list); 27 + memset(wal, 0, sizeof(*wal)); 28 + } 29 + 10 30 int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv); 11 31 int intel_ctx_workarounds_emit(struct i915_request *rq); 12 32 13 - void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv); 33 + void intel_gt_init_workarounds(struct drm_i915_private *dev_priv); 34 + void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv); 14 35 15 36 void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine); 16 37