Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Double check the active status on the batch pool

We should not rely on obj->active being uptodate unless we manually
flush it. Instead, we can verify that the next available batch object is
idle by looking at its last active request (and checking it for
completion).

v2: remove the struct drm_device forward declaration added in the
process of removing its necessity

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-3-git-send-email-chris@chris-wilson.co.uk

+13 -10
+8 -7
drivers/gpu/drm/i915/i915_gem_batch_pool.c
··· 41 41 42 42 /** 43 43 * i915_gem_batch_pool_init() - initialize a batch buffer pool 44 - * @dev: the drm device 44 + * @engine: the associated request submission engine 45 45 * @pool: the batch buffer pool 46 46 */ 47 - void i915_gem_batch_pool_init(struct drm_device *dev, 47 + void i915_gem_batch_pool_init(struct intel_engine_cs *engine, 48 48 struct i915_gem_batch_pool *pool) 49 49 { 50 50 int n; 51 51 52 - pool->dev = dev; 52 + pool->engine = engine; 53 53 54 54 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) 55 55 INIT_LIST_HEAD(&pool->cache_list[n]); ··· 65 65 { 66 66 int n; 67 67 68 - WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); 68 + lockdep_assert_held(&pool->engine->i915->drm.struct_mutex); 69 69 70 70 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { 71 71 struct drm_i915_gem_object *obj, *next; ··· 101 101 struct list_head *list; 102 102 int n; 103 103 104 - WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); 104 + lockdep_assert_held(&pool->engine->i915->drm.struct_mutex); 105 105 106 106 /* Compute a power-of-two bucket, but throw everything greater than 107 107 * 16KiB into the same bucket: i.e. the the buckets hold objects of ··· 114 114 115 115 list_for_each_entry_safe(tmp, next, list, batch_pool_link) { 116 116 /* The batches are strictly LRU ordered */ 117 - if (tmp->active) 117 + if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id], 118 + &tmp->base.dev->struct_mutex)) 118 119 break; 119 120 120 121 /* While we're looping, do some clean up */ ··· 134 133 if (obj == NULL) { 135 134 int ret; 136 135 137 - obj = i915_gem_object_create(pool->dev, size); 136 + obj = i915_gem_object_create(&pool->engine->i915->drm, size); 138 137 if (IS_ERR(obj)) 139 138 return obj; 140 139
+4 -2
drivers/gpu/drm/i915/i915_gem_batch_pool.h
··· 27 27 28 28 #include "i915_drv.h" 29 29 30 + struct intel_engine_cs; 31 + 30 32 struct i915_gem_batch_pool { 31 - struct drm_device *dev; 33 + struct intel_engine_cs *engine; 32 34 struct list_head cache_list[4]; 33 35 }; 34 36 35 37 /* i915_gem_batch_pool.c */ 36 - void i915_gem_batch_pool_init(struct drm_device *dev, 38 + void i915_gem_batch_pool_init(struct intel_engine_cs *engine, 37 39 struct i915_gem_batch_pool *pool); 38 40 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool); 39 41 struct drm_i915_gem_object*
+1 -1
drivers/gpu/drm/i915/intel_engine_cs.c
··· 185 185 engine->fence_context = fence_context_alloc(1); 186 186 187 187 intel_engine_init_hangcheck(engine); 188 - i915_gem_batch_pool_init(&engine->i915->drm, &engine->batch_pool); 188 + i915_gem_batch_pool_init(engine, &engine->batch_pool); 189 189 } 190 190 191 191 /**