Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915/pool: constrain pool objects by mapping type

In a few places we always end up mapping the pool object with the FORCE
constraint(to prevent hitting -EBUSY) which will destroy the cached
mapping if it has a different type. As a simple first step, make the
mapping type part of the pool interface, where the behaviour is to only
give out pool objects which match the requested mapping type.

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20210119133106.66294-4-matthew.auld@intel.com

authored by

Matthew Auld and committed by
Chris Wilson
8f47c8c3 e2f4367a

+25 -17
+7 -6
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 1276 1276 int err; 1277 1277 1278 1278 if (!pool) { 1279 - pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE); 1279 + pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE, 1280 + cache->has_llc ? 1281 + I915_MAP_WB : 1282 + I915_MAP_WC); 1280 1283 if (IS_ERR(pool)) 1281 1284 return PTR_ERR(pool); 1282 1285 } ··· 1289 1286 if (err) 1290 1287 goto err_pool; 1291 1288 1292 - cmd = i915_gem_object_pin_map(pool->obj, 1293 - cache->has_llc ? 1294 - I915_MAP_FORCE_WB : 1295 - I915_MAP_FORCE_WC); 1289 + cmd = i915_gem_object_pin_map(pool->obj, pool->type); 1296 1290 if (IS_ERR(cmd)) { 1297 1291 err = PTR_ERR(cmd); 1298 1292 goto err_pool; ··· 2458 2458 return -EINVAL; 2459 2459 2460 2460 if (!pool) { 2461 - pool = intel_gt_get_buffer_pool(eb->engine->gt, len); 2461 + pool = intel_gt_get_buffer_pool(eb->engine->gt, len, 2462 + I915_MAP_WB); 2462 2463 if (IS_ERR(pool)) 2463 2464 return PTR_ERR(pool); 2464 2465 eb->batch_pool = pool;
+4 -4
drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
··· 35 35 count = div_u64(round_up(vma->size, block_size), block_size); 36 36 size = (1 + 8 * count) * sizeof(u32); 37 37 size = round_up(size, PAGE_SIZE); 38 - pool = intel_gt_get_buffer_pool(ce->engine->gt, size); 38 + pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC); 39 39 if (IS_ERR(pool)) { 40 40 err = PTR_ERR(pool); 41 41 goto out_pm; ··· 55 55 if (unlikely(err)) 56 56 goto out_put; 57 57 58 - cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC); 58 + cmd = i915_gem_object_pin_map(pool->obj, pool->type); 59 59 if (IS_ERR(cmd)) { 60 60 err = PTR_ERR(cmd); 61 61 goto out_unpin; ··· 257 257 count = div_u64(round_up(dst->size, block_size), block_size); 258 258 size = (1 + 11 * count) * sizeof(u32); 259 259 size = round_up(size, PAGE_SIZE); 260 - pool = intel_gt_get_buffer_pool(ce->engine->gt, size); 260 + pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC); 261 261 if (IS_ERR(pool)) { 262 262 err = PTR_ERR(pool); 263 263 goto out_pm; ··· 277 277 if (unlikely(err)) 278 278 goto out_put; 279 279 280 - cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC); 280 + cmd = i915_gem_object_pin_map(pool->obj, pool->type); 281 281 if (IS_ERR(cmd)) { 282 282 err = PTR_ERR(cmd); 283 283 goto out_unpin;
+9 -3
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
··· 145 145 } 146 146 147 147 static struct intel_gt_buffer_pool_node * 148 - node_create(struct intel_gt_buffer_pool *pool, size_t sz) 148 + node_create(struct intel_gt_buffer_pool *pool, size_t sz, 149 + enum i915_map_type type) 149 150 { 150 151 struct intel_gt *gt = to_gt(pool); 151 152 struct intel_gt_buffer_pool_node *node; ··· 170 169 171 170 i915_gem_object_set_readonly(obj); 172 171 172 + node->type = type; 173 173 node->obj = obj; 174 174 return node; 175 175 } 176 176 177 177 struct intel_gt_buffer_pool_node * 178 - intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size) 178 + intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size, 179 + enum i915_map_type type) 179 180 { 180 181 struct intel_gt_buffer_pool *pool = &gt->buffer_pool; 181 182 struct intel_gt_buffer_pool_node *node; ··· 194 191 if (node->obj->base.size < size) 195 192 continue; 196 193 194 + if (node->type != type) 195 + continue; 196 + 197 197 age = READ_ONCE(node->age); 198 198 if (!age) 199 199 continue; ··· 211 205 rcu_read_unlock(); 212 206 213 207 if (&node->link == list) { 214 - node = node_create(pool, size); 208 + node = node_create(pool, size, type); 215 209 if (IS_ERR(node)) 216 210 return node; 217 211 }
+2 -1
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
··· 15 15 struct i915_request; 16 16 17 17 struct intel_gt_buffer_pool_node * 18 - intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size); 18 + intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size, 19 + enum i915_map_type type); 19 20 20 21 static inline int 21 22 intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
+2 -2
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
··· 11 11 #include <linux/spinlock.h> 12 12 #include <linux/workqueue.h> 13 13 14 + #include "gem/i915_gem_object_types.h" 14 15 #include "i915_active_types.h" 15 - 16 - struct drm_i915_gem_object; 17 16 18 17 struct intel_gt_buffer_pool { 19 18 spinlock_t lock; ··· 30 31 struct rcu_head rcu; 31 32 }; 32 33 unsigned long age; 34 + enum i915_map_type type; 33 35 }; 34 36 35 37 #endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
+1 -1
drivers/gpu/drm/i915/i915_cmd_parser.c
··· 1143 1143 void *dst, *src; 1144 1144 int ret; 1145 1145 1146 - dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB); 1146 + dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB); 1147 1147 if (IS_ERR(dst)) 1148 1148 return dst; 1149 1149