Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915/gem: Zap the i915_gem_object_blt code

It's unused with the exception of selftest. Replace a call in the
memory_region live selftest with a call into a corresponding
function in the new migrate code.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210617063018.92802-13-thomas.hellstrom@linux.intel.com

authored by

Thomas Hellström and committed by
Matthew Auld
99919be7 57143f2e

+14 -1107
-1
drivers/gpu/drm/i915/Makefile
··· 143 143 gem/i915_gem_execbuffer.o \ 144 144 gem/i915_gem_internal.o \ 145 145 gem/i915_gem_object.o \ 146 - gem/i915_gem_object_blt.o \ 147 146 gem/i915_gem_lmem.o \ 148 147 gem/i915_gem_mman.o \ 149 148 gem/i915_gem_pages.o \
-461
drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
··· 1 - // SPDX-License-Identifier: MIT 2 - /* 3 - * Copyright © 2019 Intel Corporation 4 - */ 5 - 6 - #include "i915_drv.h" 7 - #include "gt/intel_context.h" 8 - #include "gt/intel_engine_pm.h" 9 - #include "gt/intel_gpu_commands.h" 10 - #include "gt/intel_gt.h" 11 - #include "gt/intel_gt_buffer_pool.h" 12 - #include "gt/intel_ring.h" 13 - #include "i915_gem_clflush.h" 14 - #include "i915_gem_object_blt.h" 15 - 16 - struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, 17 - struct i915_vma *vma, 18 - struct i915_gem_ww_ctx *ww, 19 - u32 value) 20 - { 21 - struct drm_i915_private *i915 = ce->vm->i915; 22 - const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */ 23 - struct intel_gt_buffer_pool_node *pool; 24 - struct i915_vma *batch; 25 - u64 offset; 26 - u64 count; 27 - u64 rem; 28 - u32 size; 29 - u32 *cmd; 30 - int err; 31 - 32 - GEM_BUG_ON(intel_engine_is_virtual(ce->engine)); 33 - intel_engine_pm_get(ce->engine); 34 - 35 - count = div_u64(round_up(vma->size, block_size), block_size); 36 - size = (1 + 8 * count) * sizeof(u32); 37 - size = round_up(size, PAGE_SIZE); 38 - pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC); 39 - if (IS_ERR(pool)) { 40 - err = PTR_ERR(pool); 41 - goto out_pm; 42 - } 43 - 44 - err = i915_gem_object_lock(pool->obj, ww); 45 - if (err) 46 - goto out_put; 47 - 48 - batch = i915_vma_instance(pool->obj, ce->vm, NULL); 49 - if (IS_ERR(batch)) { 50 - err = PTR_ERR(batch); 51 - goto out_put; 52 - } 53 - 54 - err = i915_vma_pin_ww(batch, ww, 0, 0, PIN_USER); 55 - if (unlikely(err)) 56 - goto out_put; 57 - 58 - /* we pinned the pool, mark it as such */ 59 - intel_gt_buffer_pool_mark_used(pool); 60 - 61 - cmd = i915_gem_object_pin_map(pool->obj, pool->type); 62 - if (IS_ERR(cmd)) { 63 - err = PTR_ERR(cmd); 64 - goto out_unpin; 65 - } 66 - 67 - rem = vma->size; 68 - offset = vma->node.start; 69 - 70 - do { 71 - u32 size = min_t(u64, rem, block_size); 72 - 73 - GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX); 74 - 75 - if (GRAPHICS_VER(i915) >= 8) { 76 - *cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2); 77 - *cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE; 78 - *cmd++ = 0; 79 - *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; 80 - *cmd++ = lower_32_bits(offset); 81 - *cmd++ = upper_32_bits(offset); 82 - *cmd++ = value; 83 - } else { 84 - *cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); 85 - *cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE; 86 - *cmd++ = 0; 87 - *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; 88 - *cmd++ = offset; 89 - *cmd++ = value; 90 - } 91 - 92 - /* Allow ourselves to be preempted in between blocks. */ 93 - *cmd++ = MI_ARB_CHECK; 94 - 95 - offset += size; 96 - rem -= size; 97 - } while (rem); 98 - 99 - *cmd = MI_BATCH_BUFFER_END; 100 - 101 - i915_gem_object_flush_map(pool->obj); 102 - i915_gem_object_unpin_map(pool->obj); 103 - 104 - intel_gt_chipset_flush(ce->vm->gt); 105 - 106 - batch->private = pool; 107 - return batch; 108 - 109 - out_unpin: 110 - i915_vma_unpin(batch); 111 - out_put: 112 - intel_gt_buffer_pool_put(pool); 113 - out_pm: 114 - intel_engine_pm_put(ce->engine); 115 - return ERR_PTR(err); 116 - } 117 - 118 - int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq) 119 - { 120 - int err; 121 - 122 - err = i915_request_await_object(rq, vma->obj, false); 123 - if (err == 0) 124 - err = i915_vma_move_to_active(vma, rq, 0); 125 - if (unlikely(err)) 126 - return err; 127 - 128 - return intel_gt_buffer_pool_mark_active(vma->private, rq); 129 - } 130 - 131 - void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma) 132 - { 133 - i915_vma_unpin(vma); 134 - intel_gt_buffer_pool_put(vma->private); 135 - intel_engine_pm_put(ce->engine); 136 - } 137 - 138 - static int 139 - move_obj_to_gpu(struct drm_i915_gem_object *obj, 140 - struct i915_request *rq, 141 - bool write) 142 - { 143 - if (obj->cache_dirty & ~obj->cache_coherent) 144 - i915_gem_clflush_object(obj, 0); 145 - 146 - return i915_request_await_object(rq, obj, write); 147 - } 148 - 149 - int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, 150 - struct intel_context *ce, 151 - u32 value) 152 - { 153 - struct i915_gem_ww_ctx ww; 154 - struct i915_request *rq; 155 - struct i915_vma *batch; 156 - struct i915_vma *vma; 157 - int err; 158 - 159 - vma = i915_vma_instance(obj, ce->vm, NULL); 160 - if (IS_ERR(vma)) 161 - return PTR_ERR(vma); 162 - 163 - i915_gem_ww_ctx_init(&ww, true); 164 - intel_engine_pm_get(ce->engine); 165 - retry: 166 - err = i915_gem_object_lock(obj, &ww); 167 - if (err) 168 - goto out; 169 - 170 - err = intel_context_pin_ww(ce, &ww); 171 - if (err) 172 - goto out; 173 - 174 - err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER); 175 - if (err) 176 - goto out_ctx; 177 - 178 - batch = intel_emit_vma_fill_blt(ce, vma, &ww, value); 179 - if (IS_ERR(batch)) { 180 - err = PTR_ERR(batch); 181 - goto out_vma; 182 - } 183 - 184 - rq = i915_request_create(ce); 185 - if (IS_ERR(rq)) { 186 - err = PTR_ERR(rq); 187 - goto out_batch; 188 - } 189 - 190 - err = intel_emit_vma_mark_active(batch, rq); 191 - if (unlikely(err)) 192 - goto out_request; 193 - 194 - err = move_obj_to_gpu(vma->obj, rq, true); 195 - if (err == 0) 196 - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 197 - if (unlikely(err)) 198 - goto out_request; 199 - 200 - if (ce->engine->emit_init_breadcrumb) 201 - err = ce->engine->emit_init_breadcrumb(rq); 202 - 203 - if (likely(!err)) 204 - err = ce->engine->emit_bb_start(rq, 205 - batch->node.start, 206 - batch->node.size, 207 - 0); 208 - out_request: 209 - if (unlikely(err)) 210 - i915_request_set_error_once(rq, err); 211 - 212 - i915_request_add(rq); 213 - out_batch: 214 - intel_emit_vma_release(ce, batch); 215 - out_vma: 216 - i915_vma_unpin(vma); 217 - out_ctx: 218 - intel_context_unpin(ce); 219 - out: 220 - if (err == -EDEADLK) { 221 - err = i915_gem_ww_ctx_backoff(&ww); 222 - if (!err) 223 - goto retry; 224 - } 225 - i915_gem_ww_ctx_fini(&ww); 226 - intel_engine_pm_put(ce->engine); 227 - return err; 228 - } 229 - 230 - /* Wa_1209644611:icl,ehl */ 231 - static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size) 232 - { 233 - u32 height = size >> PAGE_SHIFT; 234 - 235 - if (GRAPHICS_VER(i915) != 11) 236 - return false; 237 - 238 - return height % 4 == 3 && height <= 8; 239 - } 240 - 241 - struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, 242 - struct i915_gem_ww_ctx *ww, 243 - struct i915_vma *src, 244 - struct i915_vma *dst) 245 - { 246 - struct drm_i915_private *i915 = ce->vm->i915; 247 - const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */ 248 - struct intel_gt_buffer_pool_node *pool; 249 - struct i915_vma *batch; 250 - u64 src_offset, dst_offset; 251 - u64 count, rem; 252 - u32 size, *cmd; 253 - int err; 254 - 255 - GEM_BUG_ON(src->size != dst->size); 256 - 257 - GEM_BUG_ON(intel_engine_is_virtual(ce->engine)); 258 - intel_engine_pm_get(ce->engine); 259 - 260 - count = div_u64(round_up(dst->size, block_size), block_size); 261 - size = (1 + 11 * count) * sizeof(u32); 262 - size = round_up(size, PAGE_SIZE); 263 - pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC); 264 - if (IS_ERR(pool)) { 265 - err = PTR_ERR(pool); 266 - goto out_pm; 267 - } 268 - 269 - err = i915_gem_object_lock(pool->obj, ww); 270 - if (err) 271 - goto out_put; 272 - 273 - batch = i915_vma_instance(pool->obj, ce->vm, NULL); 274 - if (IS_ERR(batch)) { 275 - err = PTR_ERR(batch); 276 - goto out_put; 277 - } 278 - 279 - err = i915_vma_pin_ww(batch, ww, 0, 0, PIN_USER); 280 - if (unlikely(err)) 281 - goto out_put; 282 - 283 - /* we pinned the pool, mark it as such */ 284 - intel_gt_buffer_pool_mark_used(pool); 285 - 286 - cmd = i915_gem_object_pin_map(pool->obj, pool->type); 287 - if (IS_ERR(cmd)) { 288 - err = PTR_ERR(cmd); 289 - goto out_unpin; 290 - } 291 - 292 - rem = src->size; 293 - src_offset = src->node.start; 294 - dst_offset = dst->node.start; 295 - 296 - do { 297 - size = min_t(u64, rem, block_size); 298 - GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX); 299 - 300 - if (GRAPHICS_VER(i915) >= 9 && 301 - !wa_1209644611_applies(i915, size)) { 302 - *cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2); 303 - *cmd++ = BLT_DEPTH_32 | PAGE_SIZE; 304 - *cmd++ = 0; 305 - *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; 306 - *cmd++ = lower_32_bits(dst_offset); 307 - *cmd++ = upper_32_bits(dst_offset); 308 - *cmd++ = 0; 309 - *cmd++ = PAGE_SIZE; 310 - *cmd++ = lower_32_bits(src_offset); 311 - *cmd++ = upper_32_bits(src_offset); 312 - } else if (GRAPHICS_VER(i915) >= 8) { 313 - *cmd++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2); 314 - *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE; 315 - *cmd++ = 0; 316 - *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4; 317 - *cmd++ = lower_32_bits(dst_offset); 318 - *cmd++ = upper_32_bits(dst_offset); 319 - *cmd++ = 0; 320 - *cmd++ = PAGE_SIZE; 321 - *cmd++ = lower_32_bits(src_offset); 322 - *cmd++ = upper_32_bits(src_offset); 323 - } else { 324 - *cmd++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); 325 - *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE; 326 - *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE; 327 - *cmd++ = dst_offset; 328 - *cmd++ = PAGE_SIZE; 329 - *cmd++ = src_offset; 330 - } 331 - 332 - /* Allow ourselves to be preempted in between blocks. */ 333 - *cmd++ = MI_ARB_CHECK; 334 - 335 - src_offset += size; 336 - dst_offset += size; 337 - rem -= size; 338 - } while (rem); 339 - 340 - *cmd = MI_BATCH_BUFFER_END; 341 - 342 - i915_gem_object_flush_map(pool->obj); 343 - i915_gem_object_unpin_map(pool->obj); 344 - 345 - intel_gt_chipset_flush(ce->vm->gt); 346 - batch->private = pool; 347 - return batch; 348 - 349 - out_unpin: 350 - i915_vma_unpin(batch); 351 - out_put: 352 - intel_gt_buffer_pool_put(pool); 353 - out_pm: 354 - intel_engine_pm_put(ce->engine); 355 - return ERR_PTR(err); 356 - } 357 - 358 - int i915_gem_object_copy_blt(struct drm_i915_gem_object *src, 359 - struct drm_i915_gem_object *dst, 360 - struct intel_context *ce) 361 - { 362 - struct i915_address_space *vm = ce->vm; 363 - struct i915_vma *vma[2], *batch; 364 - struct i915_gem_ww_ctx ww; 365 - struct i915_request *rq; 366 - int err, i; 367 - 368 - vma[0] = i915_vma_instance(src, vm, NULL); 369 - if (IS_ERR(vma[0])) 370 - return PTR_ERR(vma[0]); 371 - 372 - vma[1] = i915_vma_instance(dst, vm, NULL); 373 - if (IS_ERR(vma[1])) 374 - return PTR_ERR(vma[1]); 375 - 376 - i915_gem_ww_ctx_init(&ww, true); 377 - intel_engine_pm_get(ce->engine); 378 - retry: 379 - err = i915_gem_object_lock(src, &ww); 380 - if (!err) 381 - err = i915_gem_object_lock(dst, &ww); 382 - if (!err) 383 - err = intel_context_pin_ww(ce, &ww); 384 - if (err) 385 - goto out; 386 - 387 - err = i915_vma_pin_ww(vma[0], &ww, 0, 0, PIN_USER); 388 - if (err) 389 - goto out_ctx; 390 - 391 - err = i915_vma_pin_ww(vma[1], &ww, 0, 0, PIN_USER); 392 - if (unlikely(err)) 393 - goto out_unpin_src; 394 - 395 - batch = intel_emit_vma_copy_blt(ce, &ww, vma[0], vma[1]); 396 - if (IS_ERR(batch)) { 397 - err = PTR_ERR(batch); 398 - goto out_unpin_dst; 399 - } 400 - 401 - rq = i915_request_create(ce); 402 - if (IS_ERR(rq)) { 403 - err = PTR_ERR(rq); 404 - goto out_batch; 405 - } 406 - 407 - err = intel_emit_vma_mark_active(batch, rq); 408 - if (unlikely(err)) 409 - goto out_request; 410 - 411 - for (i = 0; i < ARRAY_SIZE(vma); i++) { 412 - err = move_obj_to_gpu(vma[i]->obj, rq, i); 413 - if (unlikely(err)) 414 - goto out_request; 415 - } 416 - 417 - for (i = 0; i < ARRAY_SIZE(vma); i++) { 418 - unsigned int flags = i ? EXEC_OBJECT_WRITE : 0; 419 - 420 - err = i915_vma_move_to_active(vma[i], rq, flags); 421 - if (unlikely(err)) 422 - goto out_request; 423 - } 424 - 425 - if (rq->engine->emit_init_breadcrumb) { 426 - err = rq->engine->emit_init_breadcrumb(rq); 427 - if (unlikely(err)) 428 - goto out_request; 429 - } 430 - 431 - err = rq->engine->emit_bb_start(rq, 432 - batch->node.start, batch->node.size, 433 - 0); 434 - 435 - out_request: 436 - if (unlikely(err)) 437 - i915_request_set_error_once(rq, err); 438 - 439 - i915_request_add(rq); 440 - out_batch: 441 - intel_emit_vma_release(ce, batch); 442 - out_unpin_dst: 443 - i915_vma_unpin(vma[1]); 444 - out_unpin_src: 445 - i915_vma_unpin(vma[0]); 446 - out_ctx: 447 - intel_context_unpin(ce); 448 - out: 449 - if (err == -EDEADLK) { 450 - err = i915_gem_ww_ctx_backoff(&ww); 451 - if (!err) 452 - goto retry; 453 - } 454 - i915_gem_ww_ctx_fini(&ww); 455 - intel_engine_pm_put(ce->engine); 456 - return err; 457 - } 458 - 459 - #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 460 - #include "selftests/i915_gem_object_blt.c" 461 - #endif
-39
drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
··· 1 - /* SPDX-License-Identifier: MIT */ 2 - /* 3 - * Copyright © 2019 Intel Corporation 4 - */ 5 - 6 - #ifndef __I915_GEM_OBJECT_BLT_H__ 7 - #define __I915_GEM_OBJECT_BLT_H__ 8 - 9 - #include <linux/types.h> 10 - 11 - #include "gt/intel_context.h" 12 - #include "gt/intel_engine_pm.h" 13 - #include "i915_vma.h" 14 - 15 - struct drm_i915_gem_object; 16 - struct i915_gem_ww_ctx; 17 - 18 - struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, 19 - struct i915_vma *vma, 20 - struct i915_gem_ww_ctx *ww, 21 - u32 value); 22 - 23 - struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, 24 - struct i915_gem_ww_ctx *ww, 25 - struct i915_vma *src, 26 - struct i915_vma *dst); 27 - 28 - int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq); 29 - void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma); 30 - 31 - int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, 32 - struct intel_context *ce, 33 - u32 value); 34 - 35 - int i915_gem_object_copy_blt(struct drm_i915_gem_object *src, 36 - struct drm_i915_gem_object *dst, 37 - struct intel_context *ce); 38 - 39 - #endif
-597
drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
··· 1 - // SPDX-License-Identifier: MIT 2 - /* 3 - * Copyright © 2019 Intel Corporation 4 - */ 5 - 6 - #include <linux/sort.h> 7 - 8 - #include "gt/intel_gt.h" 9 - #include "gt/intel_engine_user.h" 10 - 11 - #include "i915_selftest.h" 12 - 13 - #include "gem/i915_gem_context.h" 14 - #include "selftests/igt_flush_test.h" 15 - #include "selftests/i915_random.h" 16 - #include "selftests/mock_drm.h" 17 - #include "huge_gem_object.h" 18 - #include "mock_context.h" 19 - 20 - static int wrap_ktime_compare(const void *A, const void *B) 21 - { 22 - const ktime_t *a = A, *b = B; 23 - 24 - return ktime_compare(*a, *b); 25 - } 26 - 27 - static int __perf_fill_blt(struct drm_i915_gem_object *obj) 28 - { 29 - struct drm_i915_private *i915 = to_i915(obj->base.dev); 30 - int inst = 0; 31 - 32 - do { 33 - struct intel_engine_cs *engine; 34 - ktime_t t[5]; 35 - int pass; 36 - int err; 37 - 38 - engine = intel_engine_lookup_user(i915, 39 - I915_ENGINE_CLASS_COPY, 40 - inst++); 41 - if (!engine) 42 - return 0; 43 - 44 - intel_engine_pm_get(engine); 45 - for (pass = 0; pass < ARRAY_SIZE(t); pass++) { 46 - struct intel_context *ce = engine->kernel_context; 47 - ktime_t t0, t1; 48 - 49 - t0 = ktime_get(); 50 - 51 - err = i915_gem_object_fill_blt(obj, ce, 0); 52 - if (err) 53 - break; 54 - 55 - err = i915_gem_object_wait(obj, 56 - I915_WAIT_ALL, 57 - MAX_SCHEDULE_TIMEOUT); 58 - if (err) 59 - break; 60 - 61 - t1 = ktime_get(); 62 - t[pass] = ktime_sub(t1, t0); 63 - } 64 - intel_engine_pm_put(engine); 65 - if (err) 66 - return err; 67 - 68 - sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL); 69 - pr_info("%s: blt %zd KiB fill: %lld MiB/s\n", 70 - engine->name, 71 - obj->base.size >> 10, 72 - div64_u64(mul_u32_u32(4 * obj->base.size, 73 - 1000 * 1000 * 1000), 74 - t[1] + 2 * t[2] + t[3]) >> 20); 75 - } while (1); 76 - } 77 - 78 - static int perf_fill_blt(void *arg) 79 - { 80 - struct drm_i915_private *i915 = arg; 81 - static const unsigned long sizes[] = { 82 - SZ_4K, 83 - SZ_64K, 84 - SZ_2M, 85 - SZ_64M 86 - }; 87 - int i; 88 - 89 - for (i = 0; i < ARRAY_SIZE(sizes); i++) { 90 - struct drm_i915_gem_object *obj; 91 - int err; 92 - 93 - obj = i915_gem_object_create_internal(i915, sizes[i]); 94 - if (IS_ERR(obj)) 95 - return PTR_ERR(obj); 96 - 97 - err = __perf_fill_blt(obj); 98 - i915_gem_object_put(obj); 99 - if (err) 100 - return err; 101 - } 102 - 103 - return 0; 104 - } 105 - 106 - static int __perf_copy_blt(struct drm_i915_gem_object *src, 107 - struct drm_i915_gem_object *dst) 108 - { 109 - struct drm_i915_private *i915 = to_i915(src->base.dev); 110 - int inst = 0; 111 - 112 - do { 113 - struct intel_engine_cs *engine; 114 - ktime_t t[5]; 115 - int pass; 116 - int err = 0; 117 - 118 - engine = intel_engine_lookup_user(i915, 119 - I915_ENGINE_CLASS_COPY, 120 - inst++); 121 - if (!engine) 122 - return 0; 123 - 124 - intel_engine_pm_get(engine); 125 - for (pass = 0; pass < ARRAY_SIZE(t); pass++) { 126 - struct intel_context *ce = engine->kernel_context; 127 - ktime_t t0, t1; 128 - 129 - t0 = ktime_get(); 130 - 131 - err = i915_gem_object_copy_blt(src, dst, ce); 132 - if (err) 133 - break; 134 - 135 - err = i915_gem_object_wait(dst, 136 - I915_WAIT_ALL, 137 - MAX_SCHEDULE_TIMEOUT); 138 - if (err) 139 - break; 140 - 141 - t1 = ktime_get(); 142 - t[pass] = ktime_sub(t1, t0); 143 - } 144 - intel_engine_pm_put(engine); 145 - if (err) 146 - return err; 147 - 148 - sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL); 149 - pr_info("%s: blt %zd KiB copy: %lld MiB/s\n", 150 - engine->name, 151 - src->base.size >> 10, 152 - div64_u64(mul_u32_u32(4 * src->base.size, 153 - 1000 * 1000 * 1000), 154 - t[1] + 2 * t[2] + t[3]) >> 20); 155 - } while (1); 156 - } 157 - 158 - static int perf_copy_blt(void *arg) 159 - { 160 - struct drm_i915_private *i915 = arg; 161 - static const unsigned long sizes[] = { 162 - SZ_4K, 163 - SZ_64K, 164 - SZ_2M, 165 - SZ_64M 166 - }; 167 - int i; 168 - 169 - for (i = 0; i < ARRAY_SIZE(sizes); i++) { 170 - struct drm_i915_gem_object *src, *dst; 171 - int err; 172 - 173 - src = i915_gem_object_create_internal(i915, sizes[i]); 174 - if (IS_ERR(src)) 175 - return PTR_ERR(src); 176 - 177 - dst = i915_gem_object_create_internal(i915, sizes[i]); 178 - if (IS_ERR(dst)) { 179 - err = PTR_ERR(dst); 180 - goto err_src; 181 - } 182 - 183 - err = __perf_copy_blt(src, dst); 184 - 185 - i915_gem_object_put(dst); 186 - err_src: 187 - i915_gem_object_put(src); 188 - if (err) 189 - return err; 190 - } 191 - 192 - return 0; 193 - } 194 - 195 - struct igt_thread_arg { 196 - struct intel_engine_cs *engine; 197 - struct i915_gem_context *ctx; 198 - struct file *file; 199 - struct rnd_state prng; 200 - unsigned int n_cpus; 201 - }; 202 - 203 - static int igt_fill_blt_thread(void *arg) 204 - { 205 - struct igt_thread_arg *thread = arg; 206 - struct intel_engine_cs *engine = thread->engine; 207 - struct rnd_state *prng = &thread->prng; 208 - struct drm_i915_gem_object *obj; 209 - struct i915_gem_context *ctx; 210 - struct intel_context *ce; 211 - unsigned int prio; 212 - IGT_TIMEOUT(end); 213 - u64 total, max; 214 - int err; 215 - 216 - ctx = thread->ctx; 217 - if (!ctx) { 218 - ctx = live_context_for_engine(engine, thread->file); 219 - if (IS_ERR(ctx)) 220 - return PTR_ERR(ctx); 221 - 222 - prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng); 223 - ctx->sched.priority = prio; 224 - } 225 - 226 - ce = i915_gem_context_get_engine(ctx, 0); 227 - GEM_BUG_ON(IS_ERR(ce)); 228 - 229 - /* 230 - * If we have a tiny shared address space, like for the GGTT 231 - * then we can't be too greedy. 232 - */ 233 - max = ce->vm->total; 234 - if (i915_is_ggtt(ce->vm) || thread->ctx) 235 - max = div_u64(max, thread->n_cpus); 236 - max >>= 4; 237 - 238 - total = PAGE_SIZE; 239 - do { 240 - /* Aim to keep the runtime under reasonable bounds! */ 241 - const u32 max_phys_size = SZ_64K; 242 - u32 val = prandom_u32_state(prng); 243 - u32 phys_sz; 244 - u32 sz; 245 - u32 *vaddr; 246 - u32 i; 247 - 248 - total = min(total, max); 249 - sz = i915_prandom_u32_max_state(total, prng) + 1; 250 - phys_sz = sz % max_phys_size + 1; 251 - 252 - sz = round_up(sz, PAGE_SIZE); 253 - phys_sz = round_up(phys_sz, PAGE_SIZE); 254 - phys_sz = min(phys_sz, sz); 255 - 256 - pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__, 257 - phys_sz, sz, val); 258 - 259 - obj = huge_gem_object(engine->i915, phys_sz, sz); 260 - if (IS_ERR(obj)) { 261 - err = PTR_ERR(obj); 262 - goto err_flush; 263 - } 264 - 265 - vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); 266 - if (IS_ERR(vaddr)) { 267 - err = PTR_ERR(vaddr); 268 - goto err_put; 269 - } 270 - 271 - /* 272 - * Make sure the potentially async clflush does its job, if 273 - * required. 274 - */ 275 - memset32(vaddr, val ^ 0xdeadbeaf, 276 - huge_gem_object_phys_size(obj) / sizeof(u32)); 277 - 278 - if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) 279 - obj->cache_dirty = true; 280 - 281 - err = i915_gem_object_fill_blt(obj, ce, val); 282 - if (err) 283 - goto err_unpin; 284 - 285 - err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT); 286 - if (err) 287 - goto err_unpin; 288 - 289 - for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) { 290 - if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 291 - drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i])); 292 - 293 - if (vaddr[i] != val) { 294 - pr_err("vaddr[%u]=%x, expected=%x\n", i, 295 - vaddr[i], val); 296 - err = -EINVAL; 297 - goto err_unpin; 298 - } 299 - } 300 - 301 - i915_gem_object_unpin_map(obj); 302 - i915_gem_object_put(obj); 303 - 304 - total <<= 1; 305 - } while (!time_after(jiffies, end)); 306 - 307 - goto err_flush; 308 - 309 - err_unpin: 310 - i915_gem_object_unpin_map(obj); 311 - err_put: 312 - i915_gem_object_put(obj); 313 - err_flush: 314 - if (err == -ENOMEM) 315 - err = 0; 316 - 317 - intel_context_put(ce); 318 - return err; 319 - } 320 - 321 - static int igt_copy_blt_thread(void *arg) 322 - { 323 - struct igt_thread_arg *thread = arg; 324 - struct intel_engine_cs *engine = thread->engine; 325 - struct rnd_state *prng = &thread->prng; 326 - struct drm_i915_gem_object *src, *dst; 327 - struct i915_gem_context *ctx; 328 - struct intel_context *ce; 329 - unsigned int prio; 330 - IGT_TIMEOUT(end); 331 - u64 total, max; 332 - int err; 333 - 334 - ctx = thread->ctx; 335 - if (!ctx) { 336 - ctx = live_context_for_engine(engine, thread->file); 337 - if (IS_ERR(ctx)) 338 - return PTR_ERR(ctx); 339 - 340 - prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng); 341 - ctx->sched.priority = prio; 342 - } 343 - 344 - ce = i915_gem_context_get_engine(ctx, 0); 345 - GEM_BUG_ON(IS_ERR(ce)); 346 - 347 - /* 348 - * If we have a tiny shared address space, like for the GGTT 349 - * then we can't be too greedy. 350 - */ 351 - max = ce->vm->total; 352 - if (i915_is_ggtt(ce->vm) || thread->ctx) 353 - max = div_u64(max, thread->n_cpus); 354 - max >>= 4; 355 - 356 - total = PAGE_SIZE; 357 - do { 358 - /* Aim to keep the runtime under reasonable bounds! */ 359 - const u32 max_phys_size = SZ_64K; 360 - u32 val = prandom_u32_state(prng); 361 - u32 phys_sz; 362 - u32 sz; 363 - u32 *vaddr; 364 - u32 i; 365 - 366 - total = min(total, max); 367 - sz = i915_prandom_u32_max_state(total, prng) + 1; 368 - phys_sz = sz % max_phys_size + 1; 369 - 370 - sz = round_up(sz, PAGE_SIZE); 371 - phys_sz = round_up(phys_sz, PAGE_SIZE); 372 - phys_sz = min(phys_sz, sz); 373 - 374 - pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__, 375 - phys_sz, sz, val); 376 - 377 - src = huge_gem_object(engine->i915, phys_sz, sz); 378 - if (IS_ERR(src)) { 379 - err = PTR_ERR(src); 380 - goto err_flush; 381 - } 382 - 383 - vaddr = i915_gem_object_pin_map_unlocked(src, I915_MAP_WB); 384 - if (IS_ERR(vaddr)) { 385 - err = PTR_ERR(vaddr); 386 - goto err_put_src; 387 - } 388 - 389 - memset32(vaddr, val, 390 - huge_gem_object_phys_size(src) / sizeof(u32)); 391 - 392 - i915_gem_object_unpin_map(src); 393 - 394 - if (!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 395 - src->cache_dirty = true; 396 - 397 - dst = huge_gem_object(engine->i915, phys_sz, sz); 398 - if (IS_ERR(dst)) { 399 - err = PTR_ERR(dst); 400 - goto err_put_src; 401 - } 402 - 403 - vaddr = i915_gem_object_pin_map_unlocked(dst, I915_MAP_WB); 404 - if (IS_ERR(vaddr)) { 405 - err = PTR_ERR(vaddr); 406 - goto err_put_dst; 407 - } 408 - 409 - memset32(vaddr, val ^ 0xdeadbeaf, 410 - huge_gem_object_phys_size(dst) / sizeof(u32)); 411 - 412 - if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) 413 - dst->cache_dirty = true; 414 - 415 - err = i915_gem_object_copy_blt(src, dst, ce); 416 - if (err) 417 - goto err_unpin; 418 - 419 - err = i915_gem_object_wait(dst, 0, MAX_SCHEDULE_TIMEOUT); 420 - if (err) 421 - goto err_unpin; 422 - 423 - for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) { 424 - if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 425 - drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i])); 426 - 427 - if (vaddr[i] != val) { 428 - pr_err("vaddr[%u]=%x, expected=%x\n", i, 429 - vaddr[i], val); 430 - err = -EINVAL; 431 - goto err_unpin; 432 - } 433 - } 434 - 435 - i915_gem_object_unpin_map(dst); 436 - 437 - i915_gem_object_put(src); 438 - i915_gem_object_put(dst); 439 - 440 - total <<= 1; 441 - } while (!time_after(jiffies, end)); 442 - 443 - goto err_flush; 444 - 445 - err_unpin: 446 - i915_gem_object_unpin_map(dst); 447 - err_put_dst: 448 - i915_gem_object_put(dst); 449 - err_put_src: 450 - i915_gem_object_put(src); 451 - err_flush: 452 - if (err == -ENOMEM) 453 - err = 0; 454 - 455 - intel_context_put(ce); 456 - return err; 457 - } 458 - 459 - static int igt_threaded_blt(struct intel_engine_cs *engine, 460 - int (*blt_fn)(void *arg), 461 - unsigned int flags) 462 - #define SINGLE_CTX BIT(0) 463 - { 464 - struct igt_thread_arg *thread; 465 - struct task_struct **tsk; 466 - unsigned int n_cpus, i; 467 - I915_RND_STATE(prng); 468 - int err = 0; 469 - 470 - n_cpus = num_online_cpus() + 1; 471 - 472 - tsk = kcalloc(n_cpus, sizeof(struct task_struct *), GFP_KERNEL); 473 - if (!tsk) 474 - return 0; 475 - 476 - thread = kcalloc(n_cpus, sizeof(struct igt_thread_arg), GFP_KERNEL); 477 - if (!thread) 478 - goto out_tsk; 479 - 480 - thread[0].file = mock_file(engine->i915); 481 - if (IS_ERR(thread[0].file)) { 482 - err = PTR_ERR(thread[0].file); 483 - goto out_thread; 484 - } 485 - 486 - if (flags & SINGLE_CTX) { 487 - thread[0].ctx = live_context_for_engine(engine, thread[0].file); 488 - if (IS_ERR(thread[0].ctx)) { 489 - err = PTR_ERR(thread[0].ctx); 490 - goto out_file; 491 - } 492 - } 493 - 494 - for (i = 0; i < n_cpus; ++i) { 495 - thread[i].engine = engine; 496 - thread[i].file = thread[0].file; 497 - thread[i].ctx = thread[0].ctx; 498 - thread[i].n_cpus = n_cpus; 499 - thread[i].prng = 500 - I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng)); 501 - 502 - tsk[i] = kthread_run(blt_fn, &thread[i], "igt/blt-%d", i); 503 - if (IS_ERR(tsk[i])) { 504 - err = PTR_ERR(tsk[i]); 505 - break; 506 - } 507 - 508 - get_task_struct(tsk[i]); 509 - } 510 - 511 - yield(); /* start all threads before we kthread_stop() */ 512 - 513 - for (i = 0; i < n_cpus; ++i) { 514 - int status; 515 - 516 - if (IS_ERR_OR_NULL(tsk[i])) 517 - continue; 518 - 519 - status = kthread_stop(tsk[i]); 520 - if (status && !err) 521 - err = status; 522 - 523 - put_task_struct(tsk[i]); 524 - } 525 - 526 - out_file: 527 - fput(thread[0].file); 528 - out_thread: 529 - kfree(thread); 530 - out_tsk: 531 - kfree(tsk); 532 - return err; 533 - } 534 - 535 - static int test_copy_engines(struct drm_i915_private *i915, 536 - int (*fn)(void *arg), 537 - unsigned int flags) 538 - { 539 - struct intel_engine_cs *engine; 540 - int ret; 541 - 542 - for_each_uabi_class_engine(engine, I915_ENGINE_CLASS_COPY, i915) { 543 - ret = igt_threaded_blt(engine, fn, flags); 544 - if (ret) 545 - return ret; 546 - } 547 - 548 - return 0; 549 - } 550 - 551 - static int igt_fill_blt(void *arg) 552 - { 553 - return test_copy_engines(arg, igt_fill_blt_thread, 0); 554 - } 555 - 556 - static int igt_fill_blt_ctx0(void *arg) 557 - { 558 - return test_copy_engines(arg, igt_fill_blt_thread, SINGLE_CTX); 559 - } 560 - 561 - static int igt_copy_blt(void *arg) 562 - { 563 - return test_copy_engines(arg, igt_copy_blt_thread, 0); 564 - } 565 - 566 - static int igt_copy_blt_ctx0(void *arg) 567 - { 568 - return test_copy_engines(arg, igt_copy_blt_thread, SINGLE_CTX); 569 - } 570 - 571 - int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915) 572 - { 573 - static const struct i915_subtest tests[] = { 574 - SUBTEST(igt_fill_blt), 575 - SUBTEST(igt_fill_blt_ctx0), 576 - SUBTEST(igt_copy_blt), 577 - SUBTEST(igt_copy_blt_ctx0), 578 - }; 579 - 580 - if (intel_gt_is_wedged(&i915->gt)) 581 - return 0; 582 - 583 - return i915_live_subtests(tests, i915); 584 - } 585 - 586 - int i915_gem_object_blt_perf_selftests(struct drm_i915_private *i915) 587 - { 588 - static const struct i915_subtest tests[] = { 589 - SUBTEST(perf_fill_blt), 590 - SUBTEST(perf_copy_blt), 591 - }; 592 - 593 - if (intel_gt_is_wedged(&i915->gt)) 594 - return 0; 595 - 596 - return i915_live_subtests(tests, i915); 597 - }
-1
drivers/gpu/drm/i915/selftests/i915_live_selftests.h
··· 39 39 selftest(hugepages, i915_gem_huge_page_live_selftests) 40 40 selftest(gem_contexts, i915_gem_context_live_selftests) 41 41 selftest(gem_execbuf, i915_gem_execbuffer_live_selftests) 42 - selftest(blt, i915_gem_object_blt_live_selftests) 43 42 selftest(client, i915_gem_client_blt_live_selftests) 44 43 selftest(reset, intel_reset_live_selftests) 45 44 selftest(memory_region, intel_memory_region_live_selftests)
-1
drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
··· 18 18 selftest(engine_cs, intel_engine_cs_perf_selftests) 19 19 selftest(request, i915_request_perf_selftests) 20 20 selftest(migrate, intel_migrate_perf_selftests) 21 - selftest(blt, i915_gem_object_blt_perf_selftests) 22 21 selftest(region, intel_memory_region_perf_selftests)
+14 -7
drivers/gpu/drm/i915/selftests/intel_memory_region.c
··· 15 15 #include "gem/i915_gem_context.h" 16 16 #include "gem/i915_gem_lmem.h" 17 17 #include "gem/i915_gem_region.h" 18 - #include "gem/i915_gem_object_blt.h" 19 18 #include "gem/selftests/igt_gem_utils.h" 20 19 #include "gem/selftests/mock_context.h" 20 + #include "gt/intel_engine_pm.h" 21 21 #include "gt/intel_engine_user.h" 22 22 #include "gt/intel_gt.h" 23 23 #include "i915_buddy.h" 24 + #include "gt/intel_migrate.h" 24 25 #include "i915_memcpy.h" 25 26 #include "i915_ttm_buddy_manager.h" 26 27 #include "selftests/igt_flush_test.h" ··· 809 808 PAGE_SIZE - 64, 810 809 }; 811 810 struct intel_engine_cs *engine; 811 + struct i915_request *rq; 812 812 u32 *vaddr; 813 813 u32 sz; 814 814 u32 i; ··· 836 834 goto out_put; 837 835 } 838 836 837 + i915_gem_object_lock(obj, NULL); 839 838 /* Put the pages into a known state -- from the gpu for added fun */ 840 839 intel_engine_pm_get(engine); 841 - err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf); 842 - intel_engine_pm_put(engine); 843 - if (err) 844 - goto out_unpin; 840 + err = intel_context_migrate_clear(engine->gt->migrate.context, NULL, 841 + obj->mm.pages->sgl, I915_CACHE_NONE, 842 + true, 0xdeadbeaf, &rq); 843 + if (rq) { 844 + dma_resv_add_excl_fence(obj->base.resv, &rq->fence); 845 + i915_request_put(rq); 846 + } 845 847 846 - i915_gem_object_lock(obj, NULL); 847 - err = i915_gem_object_set_to_wc_domain(obj, true); 848 + intel_engine_pm_put(engine); 849 + if (!err) 850 + err = i915_gem_object_set_to_wc_domain(obj, true); 848 851 i915_gem_object_unlock(obj); 849 852 if (err) 850 853 goto out_unpin;