Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Use the gt in HAS_ENGINE

A follow up patch will move the engine mask under the gt structure,
so get ready for that.

v2: switch the remaining gvt case using dev_priv->gt to gvt->gt (Chris)

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Andi Shyti <andi.shyti@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> #v1
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20200708003952.21831-3-daniele.ceraolospurio@intel.com

authored by

Daniele Ceraolo Spurio and committed by
Chris Wilson
242613af fdeb6d02

+38 -31
+1 -1
drivers/gpu/drm/i915/gt/intel_engine_cs.c
··· 473 473 return -ENODEV; 474 474 475 475 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { 476 - if (!HAS_ENGINE(i915, i)) 476 + if (!HAS_ENGINE(gt, i)) 477 477 continue; 478 478 479 479 err = intel_engine_setup(gt, i);
+1 -1
drivers/gpu/drm/i915/gt/intel_gt_irq.c
··· 457 457 * RPS interrupts will get enabled/disabled on demand when RPS 458 458 * itself is enabled/disabled. 459 459 */ 460 - if (HAS_ENGINE(gt->i915, VECS0)) { 460 + if (HAS_ENGINE(gt, VECS0)) { 461 461 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 462 462 gt->pm_ier |= PM_VEBOX_USER_INTERRUPT; 463 463 }
+4 -3
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
··· 67 67 68 68 static void __guc_ads_init(struct intel_guc *guc) 69 69 { 70 - struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; 70 + struct intel_gt *gt = guc_to_gt(guc); 71 + struct drm_i915_private *dev_priv = gt->i915; 71 72 struct __guc_ads_blob *blob = guc->ads_blob; 72 73 const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; 73 74 u32 base; ··· 104 103 blob->system_info.rcs_enabled = 1; 105 104 blob->system_info.bcs_enabled = 1; 106 105 107 - blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv); 108 - blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv); 106 + blob->system_info.vdbox_enable_mask = VDBOX_MASK(gt); 107 + blob->system_info.vebox_enable_mask = VEBOX_MASK(gt); 109 108 blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; 110 109 111 110 base = intel_guc_ggtt_offset(guc, guc->ads_vma);
+1 -1
drivers/gpu/drm/i915/gvt/handlers.c
··· 1867 1867 MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \ 1868 1868 MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \ 1869 1869 MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \ 1870 - if (HAS_ENGINE(dev_priv, VCS1)) \ 1870 + if (HAS_ENGINE(gvt->gt, VCS1)) \ 1871 1871 MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \ 1872 1872 } while (0) 1873 1873
+1 -1
drivers/gpu/drm/i915/gvt/interrupt.c
··· 540 540 SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); 541 541 SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); 542 542 543 - if (HAS_ENGINE(gvt->gt->i915, VCS1)) { 543 + if (HAS_ENGINE(gvt->gt, VCS1)) { 544 544 SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, 545 545 INTEL_GVT_IRQ_INFO_GT1); 546 546 SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
+1 -1
drivers/gpu/drm/i915/gvt/mmio_context.c
··· 171 171 return; 172 172 173 173 for (ring_id = 0; ring_id < cnt; ring_id++) { 174 - if (!HAS_ENGINE(engine->i915, ring_id)) 174 + if (!HAS_ENGINE(engine->gt, ring_id)) 175 175 continue; 176 176 177 177 offset.reg = regs[ring_id];
+1 -1
drivers/gpu/drm/i915/i915_drv.c
··· 533 533 534 534 intel_device_info_init_mmio(dev_priv); 535 535 536 - intel_uncore_prune_mmio_domains(&dev_priv->uncore); 536 + intel_uncore_prune_engine_fw_domains(&dev_priv->uncore, &dev_priv->gt); 537 537 538 538 intel_uc_init_mmio(&dev_priv->gt.uc); 539 539
+8 -7
drivers/gpu/drm/i915/i915_drv.h
··· 1562 1562 #define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv)) 1563 1563 #define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) 1564 1564 1565 - #define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id)) 1565 + #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) 1566 + #define HAS_ENGINE(gt, id) __HAS_ENGINE(INTEL_INFO((gt)->i915)->engine_mask, id) 1566 1567 1567 - #define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \ 1568 + #define ENGINE_INSTANCES_MASK(gt, first, count) ({ \ 1568 1569 unsigned int first__ = (first); \ 1569 1570 unsigned int count__ = (count); \ 1570 - (INTEL_INFO(dev_priv)->engine_mask & \ 1571 + (INTEL_INFO((gt)->i915)->engine_mask & \ 1571 1572 GENMASK(first__ + count__ - 1, first__)) >> first__; \ 1572 1573 }) 1573 - #define VDBOX_MASK(dev_priv) \ 1574 - ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS) 1575 - #define VEBOX_MASK(dev_priv) \ 1576 - ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS) 1574 + #define VDBOX_MASK(gt) \ 1575 + ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS) 1576 + #define VEBOX_MASK(gt) \ 1577 + ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS) 1577 1578 1578 1579 /* 1579 1580 * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
+7 -6
drivers/gpu/drm/i915/intel_device_info.c
··· 1100 1100 { 1101 1101 struct intel_device_info *info = mkwrite_device_info(dev_priv); 1102 1102 struct intel_uncore *uncore = &dev_priv->uncore; 1103 + struct intel_gt *gt = &dev_priv->gt; 1103 1104 unsigned int logical_vdbox = 0; 1104 1105 unsigned int i; 1105 1106 u32 media_fuse; ··· 1117 1116 GEN11_GT_VEBOX_DISABLE_SHIFT; 1118 1117 1119 1118 for (i = 0; i < I915_MAX_VCS; i++) { 1120 - if (!HAS_ENGINE(dev_priv, _VCS(i))) { 1119 + if (!HAS_ENGINE(gt, _VCS(i))) { 1121 1120 vdbox_mask &= ~BIT(i); 1122 1121 continue; 1123 1122 } ··· 1137 1136 RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i); 1138 1137 } 1139 1138 drm_dbg(&dev_priv->drm, "vdbox enable: %04x, instances: %04lx\n", 1140 - vdbox_mask, VDBOX_MASK(dev_priv)); 1141 - GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv)); 1139 + vdbox_mask, VDBOX_MASK(gt)); 1140 + GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); 1142 1141 1143 1142 for (i = 0; i < I915_MAX_VECS; i++) { 1144 - if (!HAS_ENGINE(dev_priv, _VECS(i))) { 1143 + if (!HAS_ENGINE(gt, _VECS(i))) { 1145 1144 vebox_mask &= ~BIT(i); 1146 1145 continue; 1147 1146 } ··· 1152 1151 } 1153 1152 } 1154 1153 drm_dbg(&dev_priv->drm, "vebox enable: %04x, instances: %04lx\n", 1155 - vebox_mask, VEBOX_MASK(dev_priv)); 1156 - GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv)); 1154 + vebox_mask, VEBOX_MASK(gt)); 1155 + GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); 1157 1156 }
+1 -1
drivers/gpu/drm/i915/intel_pm.c
··· 7114 7114 7115 7115 /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ 7116 7116 for (i = 0; i < I915_MAX_VCS; i++) { 7117 - if (HAS_ENGINE(dev_priv, _VCS(i))) 7117 + if (HAS_ENGINE(&dev_priv->gt, _VCS(i))) 7118 7118 vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) | 7119 7119 VDN_MFX_POWERGATE_ENABLE(i); 7120 7120 }
+9 -7
drivers/gpu/drm/i915/intel_uncore.c
··· 1529 1529 (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__)))) 1530 1530 1531 1531 if (INTEL_GEN(i915) >= 11) { 1532 + /* we'll prune the domains of missing engines later */ 1533 + intel_engine_mask_t emask = INTEL_INFO(i915)->engine_mask; 1532 1534 int i; 1533 1535 1534 1536 uncore->funcs.force_wake_get = fw_domains_get_with_fallback; ··· 1543 1541 FORCEWAKE_ACK_BLITTER_GEN9); 1544 1542 1545 1543 for (i = 0; i < I915_MAX_VCS; i++) { 1546 - if (!HAS_ENGINE(i915, _VCS(i))) 1544 + if (!__HAS_ENGINE(emask, _VCS(i))) 1547 1545 continue; 1548 1546 1549 1547 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, ··· 1551 1549 FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)); 1552 1550 } 1553 1551 for (i = 0; i < I915_MAX_VECS; i++) { 1554 - if (!HAS_ENGINE(i915, _VECS(i))) 1552 + if (!__HAS_ENGINE(emask, _VECS(i))) 1555 1553 continue; 1556 1554 1557 1555 fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, ··· 1846 1844 * the forcewake domains. Prune them, to make sure they only reference existing 1847 1845 * engines. 1848 1846 */ 1849 - void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore) 1847 + void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, 1848 + struct intel_gt *gt) 1850 1849 { 1851 - struct drm_i915_private *i915 = uncore->i915; 1852 1850 enum forcewake_domains fw_domains = uncore->fw_domains; 1853 1851 enum forcewake_domain_id domain_id; 1854 1852 int i; 1855 1853 1856 - if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11) 1854 + if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(uncore->i915) < 11) 1857 1855 return; 1858 1856 1859 1857 for (i = 0; i < I915_MAX_VCS; i++) { 1860 1858 domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; 1861 1859 1862 - if (HAS_ENGINE(i915, _VCS(i))) 1860 + if (HAS_ENGINE(gt, _VCS(i))) 1863 1861 continue; 1864 1862 1865 1863 if (fw_domains & BIT(domain_id)) ··· 1869 1867 for (i = 0; i < I915_MAX_VECS; i++) { 1870 1868 domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; 1871 1869 1872 - if (HAS_ENGINE(i915, _VECS(i))) 1870 + if (HAS_ENGINE(gt, _VECS(i))) 1873 1871 continue; 1874 1872 1875 1873 if (fw_domains & BIT(domain_id))
+3 -1
drivers/gpu/drm/i915/intel_uncore.h
··· 35 35 struct drm_i915_private; 36 36 struct intel_runtime_pm; 37 37 struct intel_uncore; 38 + struct intel_gt; 38 39 39 40 struct intel_uncore_mmio_debug { 40 41 spinlock_t lock; /** lock is also taken in irq contexts. */ ··· 187 186 void intel_uncore_init_early(struct intel_uncore *uncore, 188 187 struct drm_i915_private *i915); 189 188 int intel_uncore_init_mmio(struct intel_uncore *uncore); 190 - void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore); 189 + void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, 190 + struct intel_gt *gt); 191 191 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); 192 192 bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore); 193 193 void intel_uncore_fini_mmio(struct intel_uncore *uncore);