Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Use to_gt() helper for GGTT accesses

GGTT is currently available both through i915->ggtt and gt->ggtt, and we
eventually want to get rid of the i915->ggtt one.
Use to_gt() for all i915->ggtt accesses to help with the future
refactoring.

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220104223550.56135-1-andi.shyti@linux.intel.com

authored by

Michał Winiarski and committed by
Matt Roper
204129a2 848915c3

+25 -24
+1 -1
drivers/gpu/drm/i915/gvt/dmabuf.c
··· 84 84 kfree(st); 85 85 return ret; 86 86 } 87 - gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + 87 + gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm + 88 88 (fb_info->start >> PAGE_SHIFT); 89 89 for_each_sg(st->sgl, sg, page_num, i) { 90 90 dma_addr_t dma_addr =
+2 -2
drivers/gpu/drm/i915/i915_debugfs.c
··· 391 391 intel_wakeref_t wakeref; 392 392 393 393 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 394 - swizzle_string(dev_priv->ggtt.bit_6_swizzle_x)); 394 + swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_x)); 395 395 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 396 - swizzle_string(dev_priv->ggtt.bit_6_swizzle_y)); 396 + swizzle_string(to_gt(dev_priv)->ggtt->bit_6_swizzle_y)); 397 397 398 398 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 399 399 seq_puts(m, "L-shaped memory detected\n");
+2 -2
drivers/gpu/drm/i915/i915_drv.c
··· 1142 1142 1143 1143 intel_suspend_hw(dev_priv); 1144 1144 1145 - i915_ggtt_suspend(&dev_priv->ggtt); 1145 + i915_ggtt_suspend(to_gt(dev_priv)->ggtt); 1146 1146 1147 1147 i915_save_display(dev_priv); 1148 1148 ··· 1257 1257 if (ret) 1258 1258 drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); 1259 1259 1260 - i915_ggtt_resume(&dev_priv->ggtt); 1260 + i915_ggtt_resume(to_gt(dev_priv)->ggtt); 1261 1261 1262 1262 intel_dmc_ucode_resume(dev_priv); 1263 1263
+1 -1
drivers/gpu/drm/i915/i915_drv.h
··· 1958 1958 { 1959 1959 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1960 1960 1961 - return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 1961 + return to_gt(i915)->ggtt->bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 1962 1962 i915_gem_object_is_tiled(obj); 1963 1963 } 1964 1964
+12 -11
drivers/gpu/drm/i915/i915_gem.c
··· 88 88 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 89 89 struct drm_file *file) 90 90 { 91 - struct i915_ggtt *ggtt = &to_i915(dev)->ggtt; 91 + struct drm_i915_private *i915 = to_i915(dev); 92 + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 92 93 struct drm_i915_gem_get_aperture *args = data; 93 94 struct i915_vma *vma; 94 95 u64 pinned; ··· 290 289 bool write) 291 290 { 292 291 struct drm_i915_private *i915 = to_i915(obj->base.dev); 293 - struct i915_ggtt *ggtt = &i915->ggtt; 292 + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 294 293 struct i915_vma *vma; 295 294 struct i915_gem_ww_ctx ww; 296 295 int ret; ··· 351 350 struct i915_vma *vma) 352 351 { 353 352 struct drm_i915_private *i915 = to_i915(obj->base.dev); 354 - struct i915_ggtt *ggtt = &i915->ggtt; 353 + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 355 354 356 355 i915_gem_object_unpin_pages(obj); 357 356 if (drm_mm_node_allocated(node)) { ··· 367 366 const struct drm_i915_gem_pread *args) 368 367 { 369 368 struct drm_i915_private *i915 = to_i915(obj->base.dev); 370 - struct i915_ggtt *ggtt = &i915->ggtt; 369 + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 371 370 intel_wakeref_t wakeref; 372 371 struct drm_mm_node node; 373 372 void __user *user_data; ··· 523 522 const struct drm_i915_gem_pwrite *args) 524 523 { 525 524 struct drm_i915_private *i915 = to_i915(obj->base.dev); 526 - struct i915_ggtt *ggtt = &i915->ggtt; 525 + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 527 526 struct intel_runtime_pm *rpm = &i915->runtime_pm; 528 527 intel_wakeref_t wakeref; 529 528 struct drm_mm_node node; ··· 824 823 */ 825 824 826 825 list_for_each_entry_safe(obj, on, 827 - &i915->ggtt.userfault_list, userfault_link) 826 + &to_gt(i915)->ggtt->userfault_list, userfault_link) 828 827 __i915_gem_object_release_mmap_gtt(obj); 829 828 830 829 /* ··· 832 831 * in use by hardware (i.e. they are pinned), we should not be powering 833 832 * down! All other fences will be reacquired by the user upon waking. 834 833 */ 835 - for (i = 0; i < i915->ggtt.num_fences; i++) { 836 - struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i]; 834 + for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) { 835 + struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i]; 837 836 838 837 /* 839 838 * Ideally we want to assert that the fence register is not ··· 874 873 u64 size, u64 alignment, u64 flags) 875 874 { 876 875 struct drm_i915_private *i915 = to_i915(obj->base.dev); 877 - struct i915_ggtt *ggtt = &i915->ggtt; 876 + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 878 877 struct i915_vma *vma; 879 878 int ret; 880 879 ··· 1124 1123 1125 1124 /* Minimal basic recovery for KMS */ 1126 1125 ret = i915_ggtt_enable_hw(dev_priv); 1127 - i915_ggtt_resume(&dev_priv->ggtt); 1126 + i915_ggtt_resume(to_gt(dev_priv)->ggtt); 1128 1127 intel_init_clock_gating(dev_priv); 1129 1128 } 1130 1129 ··· 1147 1146 1148 1147 void i915_gem_driver_remove(struct drm_i915_private *dev_priv) 1149 1148 { 1150 - intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref); 1149 + intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref); 1151 1150 1152 1151 i915_gem_suspend_late(dev_priv); 1153 1152 intel_gt_driver_remove(to_gt(dev_priv));
+3 -3
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 56 56 struct sg_table *pages) 57 57 { 58 58 struct drm_i915_private *i915 = to_i915(obj->base.dev); 59 - struct i915_ggtt *ggtt = &i915->ggtt; 59 + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 60 60 61 61 /* XXX This does not prevent more requests being submitted! */ 62 62 if (unlikely(ggtt->do_idle_maps)) ··· 103 103 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 104 104 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT)); 105 105 GEM_BUG_ON(range_overflows(offset, size, vm->total)); 106 - GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); 106 + GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm); 107 107 GEM_BUG_ON(drm_mm_node_allocated(node)); 108 108 109 109 node->size = size; ··· 201 201 GEM_BUG_ON(start >= end); 202 202 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 203 203 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 204 - GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); 204 + GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm); 205 205 GEM_BUG_ON(drm_mm_node_allocated(node)); 206 206 207 207 if (unlikely(range_overflows(start, size, end)))
+1 -1
drivers/gpu/drm/i915/i915_getparam.c
··· 31 31 value = pdev->revision; 32 32 break; 33 33 case I915_PARAM_NUM_FENCES_AVAIL: 34 - value = i915->ggtt.num_fences; 34 + value = to_gt(i915)->ggtt->num_fences; 35 35 break; 36 36 case I915_PARAM_HAS_OVERLAY: 37 37 value = !!i915->overlay;
+3 -3
drivers/gpu/drm/i915/i915_perf.c
··· 1630 1630 struct drm_i915_gem_object *bo; 1631 1631 struct i915_vma *vma; 1632 1632 const u64 delay_ticks = 0xffffffffffffffff - 1633 - intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt, 1634 - atomic64_read(&stream->perf->noa_programming_delay)); 1633 + intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915), 1634 + atomic64_read(&stream->perf->noa_programming_delay)); 1635 1635 const u32 base = stream->engine->mmio_base; 1636 1636 #define CS_GPR(x) GEN8_RING_CS_GPR(base, x) 1637 1637 u32 *batch, *ts0, *cs, *jump; ··· 3542 3542 3543 3543 static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent) 3544 3544 { 3545 - return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt, 3545 + return intel_gt_clock_interval_to_ns(to_gt(perf->i915), 3546 3546 2ULL << exponent); 3547 3547 } 3548 3548