Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915/gem: Use to_gt() helper for GGTT accesses

GGTT is currently available both through i915->ggtt and gt->ggtt, and we
eventually want to get rid of the i915->ggtt one.
Use to_gt() for all i915->ggtt accesses to help with the future
refactoring.

Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Reviewed-by: Sujaritha Sundaresan <sujaritha.sundaresan@intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211219212500.61432-4-andi.shyti@linux.intel.com

authored by

Michał Winiarski and committed by
Matt Roper
5c24c9d2 204129a2

+42 -37
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_context.h
··· 174 174 175 175 vm = ctx->vm; 176 176 if (!vm) 177 - vm = &ctx->i915->ggtt.vm; 177 + vm = &to_gt(ctx->i915)->ggtt->vm; 178 178 vm = i915_vm_get(vm); 179 179 180 180 return vm;
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 1095 1095 { 1096 1096 struct drm_i915_private *i915 = 1097 1097 container_of(cache, struct i915_execbuffer, reloc_cache)->i915; 1098 - return &i915->ggtt; 1098 + return to_gt(i915)->ggtt; 1099 1099 } 1100 1100 1101 1101 static void reloc_cache_unmap(struct reloc_cache *cache)
+10 -9
drivers/gpu/drm/i915/gem/i915_gem_mman.c
··· 294 294 struct drm_device *dev = obj->base.dev; 295 295 struct drm_i915_private *i915 = to_i915(dev); 296 296 struct intel_runtime_pm *rpm = &i915->runtime_pm; 297 - struct i915_ggtt *ggtt = &i915->ggtt; 297 + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 298 298 bool write = area->vm_flags & VM_WRITE; 299 299 struct i915_gem_ww_ctx ww; 300 300 intel_wakeref_t wakeref; ··· 387 387 assert_rpm_wakelock_held(rpm); 388 388 389 389 /* Mark as being mmapped into userspace for later revocation */ 390 - mutex_lock(&i915->ggtt.vm.mutex); 390 + mutex_lock(&to_gt(i915)->ggtt->vm.mutex); 391 391 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) 392 - list_add(&obj->userfault_link, &i915->ggtt.userfault_list); 393 - mutex_unlock(&i915->ggtt.vm.mutex); 392 + list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list); 393 + mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); 394 394 395 395 /* Track the mmo associated with the fenced vma */ 396 396 vma->mmo = mmo; 397 397 398 398 if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) 399 - intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 399 + intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 400 400 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); 401 401 402 402 if (write) { ··· 511 511 * wakeref. 512 512 */ 513 513 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 514 - mutex_lock(&i915->ggtt.vm.mutex); 514 + mutex_lock(&to_gt(i915)->ggtt->vm.mutex); 515 515 516 516 if (!obj->userfault_count) 517 517 goto out; ··· 529 529 wmb(); 530 530 531 531 out: 532 - mutex_unlock(&i915->ggtt.vm.mutex); 532 + mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); 533 533 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 534 534 } 535 535 ··· 732 732 u32 handle, 733 733 u64 *offset) 734 734 { 735 + struct drm_i915_private *i915 = to_i915(dev); 735 736 enum i915_mmap_type mmap_type; 736 737 737 738 if (HAS_LMEM(to_i915(dev))) 738 739 mmap_type = I915_MMAP_TYPE_FIXED; 739 740 else if (pat_enabled()) 740 741 mmap_type = I915_MMAP_TYPE_WC; 741 - else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt)) 742 + else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 742 743 return -ENODEV; 743 744 else 744 745 mmap_type = I915_MMAP_TYPE_GTT; ··· 787 786 788 787 switch (args->flags) { 789 788 case I915_MMAP_OFFSET_GTT: 790 - if (!i915_ggtt_has_aperture(&i915->ggtt)) 789 + if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 791 790 return -ENODEV; 792 791 type = I915_MMAP_TYPE_GTT; 793 792 break;
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_pm.c
··· 23 23 { 24 24 GEM_TRACE("%s\n", dev_name(i915->drm.dev)); 25 25 26 - intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0); 26 + intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0); 27 27 flush_workqueue(i915->wq); 28 28 29 29 /*
+3 -3
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
··· 401 401 I915_SHRINK_VMAPS); 402 402 403 403 /* We also want to clear any cached iomaps as they wrap vmap */ 404 - mutex_lock(&i915->ggtt.vm.mutex); 404 + mutex_lock(&to_gt(i915)->ggtt->vm.mutex); 405 405 list_for_each_entry_safe(vma, next, 406 - &i915->ggtt.vm.bound_list, vm_link) { 406 + &to_gt(i915)->ggtt->vm.bound_list, vm_link) { 407 407 unsigned long count = vma->node.size >> PAGE_SHIFT; 408 408 struct drm_i915_gem_object *obj = vma->obj; 409 409 ··· 418 418 419 419 i915_gem_object_unlock(obj); 420 420 } 421 - mutex_unlock(&i915->ggtt.vm.mutex); 421 + mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); 422 422 423 423 *(unsigned long *)ptr += freed_pages; 424 424 return NOTIFY_DONE;
+5 -3
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
··· 71 71 static int i915_adjust_stolen(struct drm_i915_private *i915, 72 72 struct resource *dsm) 73 73 { 74 - struct i915_ggtt *ggtt = &i915->ggtt; 74 + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 75 75 struct intel_uncore *uncore = ggtt->vm.gt->uncore; 76 76 struct resource *r; 77 77 ··· 582 582 583 583 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) 584 584 { 585 + struct drm_i915_private *i915 = to_i915(obj->base.dev); 585 586 struct sg_table *pages = 586 587 i915_pages_create_for_stolen(obj->base.dev, 587 588 obj->stolen->start, ··· 590 589 if (IS_ERR(pages)) 591 590 return PTR_ERR(pages); 592 591 593 - dbg_poison(&to_i915(obj->base.dev)->ggtt, 592 + dbg_poison(to_gt(i915)->ggtt, 594 593 sg_dma_address(pages->sgl), 595 594 sg_dma_len(pages->sgl), 596 595 POISON_INUSE); ··· 603 602 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj, 604 603 struct sg_table *pages) 605 604 { 605 + struct drm_i915_private *i915 = to_i915(obj->base.dev); 606 606 /* Should only be called from i915_gem_object_release_stolen() */ 607 607 608 - dbg_poison(&to_i915(obj->base.dev)->ggtt, 608 + dbg_poison(to_gt(i915)->ggtt, 609 609 sg_dma_address(pages->sgl), 610 610 sg_dma_len(pages->sgl), 611 611 POISON_FREE);
+8 -7
drivers/gpu/drm/i915/gem/i915_gem_tiling.c
··· 181 181 i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, 182 182 int tiling_mode, unsigned int stride) 183 183 { 184 - struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; 184 + struct drm_i915_private *i915 = to_i915(obj->base.dev); 185 + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 185 186 struct i915_vma *vma, *vn; 186 187 LIST_HEAD(unbind); 187 188 int ret = 0; ··· 337 336 struct drm_i915_gem_object *obj; 338 337 int err; 339 338 340 - if (!dev_priv->ggtt.num_fences) 339 + if (!to_gt(dev_priv)->ggtt->num_fences) 341 340 return -EOPNOTSUPP; 342 341 343 342 obj = i915_gem_object_lookup(file, args->handle); ··· 363 362 args->stride = 0; 364 363 } else { 365 364 if (args->tiling_mode == I915_TILING_X) 366 - args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_x; 365 + args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x; 367 366 else 368 - args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_y; 367 + args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y; 369 368 370 369 /* Hide bit 17 swizzling from the user. This prevents old Mesa 371 370 * from aborting the application on sw fallbacks to bit 17, ··· 420 419 struct drm_i915_gem_object *obj; 421 420 int err = -ENOENT; 422 421 423 - if (!dev_priv->ggtt.num_fences) 422 + if (!to_gt(dev_priv)->ggtt->num_fences) 424 423 return -EOPNOTSUPP; 425 424 426 425 rcu_read_lock(); ··· 436 435 437 436 switch (args->tiling_mode) { 438 437 case I915_TILING_X: 439 - args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_x; 438 + args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x; 440 439 break; 441 440 case I915_TILING_Y: 442 - args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_y; 441 + args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y; 443 442 break; 444 443 default: 445 444 case I915_TILING_NONE:
+1 -1
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
··· 543 543 544 544 static bool bad_swizzling(struct drm_i915_private *i915) 545 545 { 546 - struct i915_ggtt *ggtt = &i915->ggtt; 546 + struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 547 547 548 548 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) 549 549 return true;
+1 -1
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
··· 1374 1374 goto out_file; 1375 1375 } 1376 1376 1377 - vm = ctx->vm ?: &i915->ggtt.alias->vm; 1377 + vm = ctx->vm ?: &to_gt(i915)->ggtt->alias->vm; 1378 1378 if (!vm || !vm->has_read_only) { 1379 1379 err = 0; 1380 1380 goto out_file;
+10 -9
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
··· 307 307 int tiling; 308 308 int err; 309 309 310 - if (!i915_ggtt_has_aperture(&i915->ggtt)) 310 + if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 311 311 return 0; 312 312 313 313 /* We want to check the page mapping and fencing of a large object ··· 320 320 321 321 obj = huge_gem_object(i915, 322 322 nreal << PAGE_SHIFT, 323 - (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); 323 + (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); 324 324 if (IS_ERR(obj)) 325 325 return PTR_ERR(obj); 326 326 ··· 366 366 tile.tiling = tiling; 367 367 switch (tiling) { 368 368 case I915_TILING_X: 369 - tile.swizzle = i915->ggtt.bit_6_swizzle_x; 369 + tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x; 370 370 break; 371 371 case I915_TILING_Y: 372 - tile.swizzle = i915->ggtt.bit_6_swizzle_y; 372 + tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y; 373 373 break; 374 374 } 375 375 ··· 440 440 IGT_TIMEOUT(end); 441 441 int err; 442 442 443 - if (!i915_ggtt_has_aperture(&i915->ggtt)) 443 + if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 444 444 return 0; 445 445 446 446 /* ··· 457 457 458 458 obj = huge_gem_object(i915, 459 459 nreal << PAGE_SHIFT, 460 - (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); 460 + (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); 461 461 if (IS_ERR(obj)) 462 462 return PTR_ERR(obj); 463 463 ··· 486 486 break; 487 487 488 488 case I915_TILING_X: 489 - tile.swizzle = i915->ggtt.bit_6_swizzle_x; 489 + tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x; 490 490 break; 491 491 case I915_TILING_Y: 492 - tile.swizzle = i915->ggtt.bit_6_swizzle_y; 492 + tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y; 493 493 break; 494 494 } 495 495 ··· 856 856 857 857 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type) 858 858 { 859 + struct drm_i915_private *i915 = to_i915(obj->base.dev); 859 860 bool no_map; 860 861 861 862 if (obj->ops->mmap_offset) ··· 865 864 return false; 866 865 867 866 if (type == I915_MMAP_TYPE_GTT && 868 - !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt)) 867 + !i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 869 868 return false; 870 869 871 870 i915_gem_object_lock(obj, NULL);
+1 -1
drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
··· 43 43 44 44 obj = huge_gem_object(i915, 45 45 nreal * PAGE_SIZE, 46 - i915->ggtt.vm.total + PAGE_SIZE); 46 + to_gt(i915)->ggtt->vm.total + PAGE_SIZE); 47 47 if (IS_ERR(obj)) 48 48 return PTR_ERR(obj); 49 49