Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: vma is always backed by an object.

vma->obj and vma->resv are now never NULL, and some checks can be removed.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211117142024.1043017-4-matthew.auld@intel.com

authored by

Maarten Lankhorst and committed by
Matthew Auld
e6e1a304 d03a29e0

+22 -33
+1 -1
drivers/gpu/drm/i915/gt/intel_context.c
··· 219 219 */ 220 220 221 221 err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); 222 - if (!err && ce->ring->vma->obj) 222 + if (!err) 223 223 err = i915_gem_object_lock(ce->ring->vma->obj, ww); 224 224 if (!err && ce->state) 225 225 err = i915_gem_object_lock(ce->state->obj, ww);
+1 -1
drivers/gpu/drm/i915/gt/intel_ring_submission.c
··· 1357 1357 err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww); 1358 1358 if (!err && gen7_wa_vma) 1359 1359 err = i915_gem_object_lock(gen7_wa_vma->obj, &ww); 1360 - if (!err && engine->legacy.ring->vma->obj) 1360 + if (!err) 1361 1361 err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww); 1362 1362 if (!err) 1363 1363 err = intel_timeline_pin(timeline, &ww);
+20 -28
drivers/gpu/drm/i915/i915_vma.c
··· 40 40 41 41 static struct kmem_cache *slab_vmas; 42 42 43 - struct i915_vma *i915_vma_alloc(void) 43 + static struct i915_vma *i915_vma_alloc(void) 44 44 { 45 45 return kmem_cache_zalloc(slab_vmas, GFP_KERNEL); 46 46 } 47 47 48 - void i915_vma_free(struct i915_vma *vma) 48 + static void i915_vma_free(struct i915_vma *vma) 49 49 { 50 50 return kmem_cache_free(slab_vmas, vma); 51 51 } ··· 426 426 427 427 work->base.dma.error = 0; /* enable the queue_work() */ 428 428 429 - if (vma->obj) { 430 - __i915_gem_object_pin_pages(vma->obj); 431 - work->pinned = i915_gem_object_get(vma->obj); 432 - } 429 + __i915_gem_object_pin_pages(vma->obj); 430 + work->pinned = i915_gem_object_get(vma->obj); 433 431 } else { 434 432 vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); 435 433 } ··· 668 670 } 669 671 670 672 color = 0; 671 - if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) 673 + if (i915_vm_has_cache_coloring(vma->vm)) 672 674 color = vma->obj->cache_level; 673 675 674 676 if (flags & PIN_OFFSET_FIXED) { ··· 793 795 static int vma_get_pages(struct i915_vma *vma) 794 796 { 795 797 int err = 0; 796 - bool pinned_pages = false; 798 + bool pinned_pages = true; 797 799 798 800 if (atomic_add_unless(&vma->pages_count, 1, 0)) 799 801 return 0; 800 802 801 - if (vma->obj) { 802 - err = i915_gem_object_pin_pages(vma->obj); 803 - if (err) 804 - return err; 805 - pinned_pages = true; 806 - } 803 + err = i915_gem_object_pin_pages(vma->obj); 804 + if (err) 805 + return err; 807 806 808 807 /* Allocations ahoy! */ 809 808 if (mutex_lock_interruptible(&vma->pages_mutex)) { ··· 833 838 if (atomic_sub_return(count, &vma->pages_count) == 0) { 834 839 vma->ops->clear_pages(vma); 835 840 GEM_BUG_ON(vma->pages); 836 - if (vma->obj) 837 - i915_gem_object_unpin_pages(vma->obj); 841 + 842 + i915_gem_object_unpin_pages(vma->obj); 838 843 } 839 844 mutex_unlock(&vma->pages_mutex); 840 845 } ··· 870 875 int err; 871 876 872 877 #ifdef CONFIG_PROVE_LOCKING 873 - if (debug_locks && !WARN_ON(!ww) && vma->resv) 878 + if (debug_locks && !WARN_ON(!ww)) 874 879 assert_vma_held(vma); 875 880 #endif 876 881 ··· 978 983 979 984 GEM_BUG_ON(!vma->pages); 980 985 err = i915_vma_bind(vma, 981 - vma->obj ? vma->obj->cache_level : 0, 986 + vma->obj->cache_level, 982 987 flags, work); 983 988 if (err) 984 989 goto err_remove; ··· 1032 1037 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); 1033 1038 1034 1039 #ifdef CONFIG_LOCKDEP 1035 - WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv)); 1040 + WARN_ON(!ww && dma_resv_held(vma->resv)); 1036 1041 #endif 1037 1042 1038 1043 do { ··· 1111 1116 void i915_vma_release(struct kref *ref) 1112 1117 { 1113 1118 struct i915_vma *vma = container_of(ref, typeof(*vma), ref); 1119 + struct drm_i915_gem_object *obj = vma->obj; 1114 1120 1115 1121 if (drm_mm_node_allocated(&vma->node)) { 1116 1122 mutex_lock(&vma->vm->mutex); ··· 1122 1126 } 1123 1127 GEM_BUG_ON(i915_vma_is_active(vma)); 1124 1128 1125 - if (vma->obj) { 1126 - struct drm_i915_gem_object *obj = vma->obj; 1127 - 1128 - spin_lock(&obj->vma.lock); 1129 - list_del(&vma->obj_link); 1130 - if (!RB_EMPTY_NODE(&vma->obj_node)) 1131 - rb_erase(&vma->obj_node, &obj->vma.tree); 1132 - spin_unlock(&obj->vma.lock); 1133 - } 1129 + spin_lock(&obj->vma.lock); 1130 + list_del(&vma->obj_link); 1131 + if (!RB_EMPTY_NODE(&vma->obj_node)) 1132 + rb_erase(&vma->obj_node, &obj->vma.tree); 1133 + spin_unlock(&obj->vma.lock); 1134 1134 1135 1135 __i915_vma_remove_closed(vma); 1136 1136 i915_vm_put(vma->vm);
-3
drivers/gpu/drm/i915/i915_vma.h
··· 418 418 list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \ 419 419 for_each_until(!i915_vma_is_ggtt(V)) 420 420 421 - struct i915_vma *i915_vma_alloc(void); 422 - void i915_vma_free(struct i915_vma *vma); 423 - 424 421 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma); 425 422 void i915_vma_make_shrinkable(struct i915_vma *vma); 426 423 void i915_vma_make_purgeable(struct i915_vma *vma);