Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-gt-next-2021-01-21-1' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Cross-subsystem Changes:

- Includes gvt-gt-next-2021-01-18 + header check fix for GVT

Driver Changes:

- Fix for #2955: Clear potentially malicious register state before
executing clear residuals security mitigation (Chris)
- Fixes that lead to marking per-engine-reset as supported on Gen7
(Chris)
- Remove per-client stats from debugfs/i915_gem_objects) (Tvrtko, Chris)
- Add arbitration check before semaphore wait (Chris)
- Apply interactive priority to explicit flip fences (Chris)
- Make GEM errors non-fatal by default to help capturing logs during
development (Chris)
- Fix object page offset within a region in error capture (CQ, Matt A)
- Close race between enable_breadcrumbs and cancel_breadcrumbs (Chris)
- Almagamate clflushes on suspend/freeze to speed up S/R (Chris)
- Protect used framebuffers from casual eviction (Chris)

- Fix the sgt.pfn sanity check (Kui, Matt A)
- Reduce locking around i915_request.lock and ctx->engines_mutex (Chris)
- Simplify tracking for engine->fw_active and stats.active (Chris)
- Constrain pool objects by mapping type (Chris, Matt A)
- Use shrinkable status for unknown swizzle quirks (Chris)
- Do not suspend bonded requests if one hangs (Chris)
- Restore "Skip over completed active execlists" optimization (Chris)

- Move stolen node into GEM object union (Chris)
. Split gem_create into own file (Matt A)
- Convert object_create into object_init in LMEM region code (Matt A)
- Reduce test_and_set_bit to set_bit in i915_request_submit() (Chris)
- Mark up protected uses of 'i915_request_completed' (Chris)
- Remove extraneous inline modifiers (Chris)
- Add function to define defaults for GuC/HuC enable (John)

- Improve code locality by moving closer to single user (Matt A, Chris)
- Compiler warning fixes (Matt A, Chris)
- Selftest / CI improvements (Chris)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210121150747.GA58732@jlahtine-mobl.ger.corp.intel.com

+1190 -996
+20 -2
drivers/gpu/drm/i915/Kconfig.debug
··· 31 31 select DRM_DEBUG_SELFTEST 32 32 select DMABUF_SELFTESTS 33 33 select SW_SYNC # signaling validation framework (igt/syncobj*) 34 + select DRM_I915_WERROR 35 + select DRM_I915_DEBUG_GEM 36 + select DRM_I915_DEBUG_GEM_ONCE 37 + select DRM_I915_DEBUG_MMIO 38 + select DRM_I915_DEBUG_RUNTIME_PM 34 39 select DRM_I915_SW_FENCE_DEBUG_OBJECTS 35 40 select DRM_I915_SELFTEST 36 - select DRM_I915_DEBUG_RUNTIME_PM 37 - select DRM_I915_DEBUG_MMIO 38 41 default n 39 42 help 40 43 Choose this option to turn on extra driver debugging that may affect ··· 67 64 help 68 65 Enable extra sanity checks (including BUGs) along the GEM driver 69 66 paths that may slow the system down and if hit hang the machine. 67 + 68 + Recommended for driver developers only. 69 + 70 + If in doubt, say "N". 71 + 72 + config DRM_I915_DEBUG_GEM_ONCE 73 + bool "Make a GEM debug failure fatal" 74 + default n 75 + depends on DRM_I915_DEBUG_GEM 76 + help 77 + During development, we often only want the very first failure 78 + as that would otherwise be lost in the deluge of subsequent 79 + failures. However, more casual testers may not want to trigger 80 + a hard BUG_ON and hope that the system remains sufficiently usable 81 + to capture a bug report in situ. 70 82 71 83 Recommended for driver developers only. 72 84
+1
drivers/gpu/drm/i915/Makefile
··· 136 136 gem/i915_gem_clflush.o \ 137 137 gem/i915_gem_client_blt.o \ 138 138 gem/i915_gem_context.o \ 139 + gem/i915_gem_create.o \ 139 140 gem/i915_gem_dmabuf.o \ 140 141 gem/i915_gem_domain.o \ 141 142 gem/i915_gem_execbuffer.o \
+8 -15
drivers/gpu/drm/i915/display/intel_display.c
··· 2247 2247 */ 2248 2248 ret = i915_vma_pin_fence(vma); 2249 2249 if (ret != 0 && INTEL_GEN(dev_priv) < 4) { 2250 - i915_gem_object_unpin_from_display_plane(vma); 2250 + i915_vma_unpin(vma); 2251 2251 vma = ERR_PTR(ret); 2252 2252 goto err; 2253 2253 } ··· 2265 2265 2266 2266 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) 2267 2267 { 2268 - i915_gem_object_lock(vma->obj, NULL); 2269 2268 if (flags & PLANE_HAS_FENCE) 2270 2269 i915_vma_unpin_fence(vma); 2271 - i915_gem_object_unpin_from_display_plane(vma); 2272 - i915_gem_object_unlock(vma->obj); 2273 - 2270 + i915_vma_unpin(vma); 2274 2271 i915_vma_put(vma); 2275 2272 } 2276 2273 ··· 15628 15631 intel_unpin_fb_vma(vma, old_plane_state->flags); 15629 15632 } 15630 15633 15631 - static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) 15632 - { 15633 - struct i915_sched_attr attr = { 15634 - .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY), 15635 - }; 15636 - 15637 - i915_gem_object_wait_priority(obj, 0, &attr); 15638 - } 15639 - 15640 15634 /** 15641 15635 * intel_prepare_plane_fb - Prepare fb for usage on plane 15642 15636 * @_plane: drm plane to prepare for ··· 15644 15656 intel_prepare_plane_fb(struct drm_plane *_plane, 15645 15657 struct drm_plane_state *_new_plane_state) 15646 15658 { 15659 + struct i915_sched_attr attr = { 15660 + .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY), 15661 + }; 15647 15662 struct intel_plane *plane = to_intel_plane(_plane); 15648 15663 struct intel_plane_state *new_plane_state = 15649 15664 to_intel_plane_state(_new_plane_state); ··· 15686 15695 } 15687 15696 15688 15697 if (new_plane_state->uapi.fence) { /* explicit fencing */ 15698 + i915_gem_fence_wait_priority(new_plane_state->uapi.fence, 15699 + &attr); 15689 15700 ret = i915_sw_fence_await_dma_fence(&state->commit_ready, 15690 15701 new_plane_state->uapi.fence, 15691 15702 i915_fence_timeout(dev_priv), ··· 15709 15716 if (ret) 15710 15717 return ret; 15711 15718 15712 - fb_obj_bump_render_priority(obj); 15719 + i915_gem_object_wait_priority(obj, 0, &attr); 15713 15720 i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB); 15714 15721 15715 15722 if (!new_plane_state->uapi.fence) { /* implicit fencing */
+2 -2
drivers/gpu/drm/i915/display/intel_fbdev.c
··· 256 256 * If the object is stolen however, it will be full of whatever 257 257 * garbage was left in there. 258 258 */ 259 - if (vma->obj->stolen && !prealloc) 259 + if (!i915_gem_object_is_shmem(vma->obj) && !prealloc) 260 260 memset_io(info->screen_base, 0, info->screen_size); 261 261 262 262 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ ··· 595 595 * full of whatever garbage was left in there. 596 596 */ 597 597 if (state == FBINFO_STATE_RUNNING && 598 - intel_fb_obj(&ifbdev->fb->base)->stolen) 598 + !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base))) 599 599 memset_io(info->screen_base, 0, info->screen_size); 600 600 601 601 drm_fb_helper_set_suspend(&ifbdev->helper, state);
+3 -1
drivers/gpu/drm/i915/display/intel_frontbuffer.c
··· 225 225 struct i915_vma *vma; 226 226 227 227 spin_lock(&obj->vma.lock); 228 - for_each_ggtt_vma(vma, obj) 228 + for_each_ggtt_vma(vma, obj) { 229 + i915_vma_clear_scanout(vma); 229 230 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 231 + } 230 232 spin_unlock(&obj->vma.lock); 231 233 232 234 RCU_INIT_POINTER(obj->frontbuffer, NULL);
+2 -2
drivers/gpu/drm/i915/display/intel_overlay.c
··· 360 360 intel_frontbuffer_flip_complete(overlay->i915, 361 361 INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); 362 362 363 - i915_gem_object_unpin_from_display_plane(vma); 363 + i915_vma_unpin(vma); 364 364 i915_vma_put(vma); 365 365 } 366 366 ··· 861 861 return 0; 862 862 863 863 out_unpin: 864 - i915_gem_object_unpin_from_display_plane(vma); 864 + i915_vma_unpin(vma); 865 865 out_pin_section: 866 866 atomic_dec(&dev_priv->gpu_error.pending_fb_pin); 867 867
+24 -41
drivers/gpu/drm/i915/gem/i915_gem_context.c
··· 408 408 } 409 409 410 410 if (i915_request_is_active(rq)) { 411 - if (!i915_request_completed(rq)) 411 + if (!__i915_request_is_complete(rq)) 412 412 *active = locked; 413 413 ret = true; 414 414 } ··· 717 717 } 718 718 719 719 static inline struct i915_gem_engines * 720 - __context_engines_await(const struct i915_gem_context *ctx) 720 + __context_engines_await(const struct i915_gem_context *ctx, 721 + bool *user_engines) 721 722 { 722 723 struct i915_gem_engines *engines; 723 724 ··· 727 726 engines = rcu_dereference(ctx->engines); 728 727 GEM_BUG_ON(!engines); 729 728 729 + if (user_engines) 730 + *user_engines = i915_gem_context_user_engines(ctx); 731 + 732 + /* successful await => strong mb */ 730 733 if (unlikely(!i915_sw_fence_await(&engines->fence))) 731 734 continue; 732 735 ··· 754 749 struct intel_context *ce; 755 750 int err = 0; 756 751 757 - e = __context_engines_await(ctx); 752 + e = __context_engines_await(ctx, NULL); 758 753 for_each_gem_engine(ce, e, it) { 759 754 err = fn(ce, data); 760 755 if (err) ··· 1080 1075 return err; 1081 1076 } 1082 1077 1083 - e = __context_engines_await(ctx); 1078 + e = __context_engines_await(ctx, NULL); 1084 1079 if (!e) { 1085 1080 i915_active_release(&cb->base); 1086 1081 return -ENOENT; ··· 1843 1838 return 0; 1844 1839 } 1845 1840 1846 - static struct i915_gem_engines * 1847 - __copy_engines(struct i915_gem_engines *e) 1848 - { 1849 - struct i915_gem_engines *copy; 1850 - unsigned int n; 1851 - 1852 - copy = alloc_engines(e->num_engines); 1853 - if (!copy) 1854 - return ERR_PTR(-ENOMEM); 1855 - 1856 - for (n = 0; n < e->num_engines; n++) { 1857 - if (e->engines[n]) 1858 - copy->engines[n] = intel_context_get(e->engines[n]); 1859 - else 1860 - copy->engines[n] = NULL; 1861 - } 1862 - copy->num_engines = n; 1863 - 1864 - return copy; 1865 - } 1866 - 1867 1841 static int 1868 1842 get_engines(struct i915_gem_context *ctx, 1869 1843 struct drm_i915_gem_context_param *args) ··· 1850 1866 struct i915_context_param_engines __user *user; 1851 1867 struct i915_gem_engines *e; 1852 1868 size_t n, count, size; 1869 + bool user_engines; 1853 1870 int err = 0; 1854 1871 1855 - err = mutex_lock_interruptible(&ctx->engines_mutex); 1856 - if (err) 1857 - return err; 1872 + e = __context_engines_await(ctx, &user_engines); 1873 + if (!e) 1874 + return -ENOENT; 1858 1875 1859 - e = NULL; 1860 - if (i915_gem_context_user_engines(ctx)) 1861 - e = __copy_engines(i915_gem_context_engines(ctx)); 1862 - mutex_unlock(&ctx->engines_mutex); 1863 - if (IS_ERR_OR_NULL(e)) { 1876 + if (!user_engines) { 1877 + i915_sw_fence_complete(&e->fence); 1864 1878 args->size = 0; 1865 - return PTR_ERR_OR_ZERO(e); 1879 + return 0; 1866 1880 } 1867 1881 1868 1882 count = e->num_engines; ··· 1911 1929 args->size = size; 1912 1930 1913 1931 err_free: 1914 - free_engines(e); 1932 + i915_sw_fence_complete(&e->fence); 1915 1933 return err; 1916 1934 } 1917 1935 ··· 2077 2095 static int clone_engines(struct i915_gem_context *dst, 2078 2096 struct i915_gem_context *src) 2079 2097 { 2080 - struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 2081 - struct i915_gem_engines *clone; 2098 + struct i915_gem_engines *clone, *e; 2082 2099 bool user_engines; 2083 2100 unsigned long n; 2101 + 2102 + e = __context_engines_await(src, &user_engines); 2103 + if (!e) 2104 + return -ENOENT; 2084 2105 2085 2106 clone = alloc_engines(e->num_engines); 2086 2107 if (!clone) ··· 2126 2141 } 2127 2142 } 2128 2143 clone->num_engines = n; 2129 - 2130 - user_engines = i915_gem_context_user_engines(src); 2131 - i915_gem_context_unlock_engines(src); 2144 + i915_sw_fence_complete(&e->fence); 2132 2145 2133 2146 /* Serialised by constructor */ 2134 2147 engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1)); ··· 2137 2154 return 0; 2138 2155 2139 2156 err_unlock: 2140 - i915_gem_context_unlock_engines(src); 2157 + i915_sw_fence_complete(&e->fence); 2141 2158 return -ENOMEM; 2142 2159 } 2143 2160
+113
drivers/gpu/drm/i915/gem/i915_gem_create.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2020 Intel Corporation 4 + */ 5 + 6 + #include "gem/i915_gem_ioctls.h" 7 + #include "gem/i915_gem_region.h" 8 + 9 + #include "i915_drv.h" 10 + 11 + static int 12 + i915_gem_create(struct drm_file *file, 13 + struct intel_memory_region *mr, 14 + u64 *size_p, 15 + u32 *handle_p) 16 + { 17 + struct drm_i915_gem_object *obj; 18 + u32 handle; 19 + u64 size; 20 + int ret; 21 + 22 + GEM_BUG_ON(!is_power_of_2(mr->min_page_size)); 23 + size = round_up(*size_p, mr->min_page_size); 24 + if (size == 0) 25 + return -EINVAL; 26 + 27 + /* For most of the ABI (e.g. mmap) we think in system pages */ 28 + GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); 29 + 30 + /* Allocate the new object */ 31 + obj = i915_gem_object_create_region(mr, size, 0); 32 + if (IS_ERR(obj)) 33 + return PTR_ERR(obj); 34 + 35 + GEM_BUG_ON(size != obj->base.size); 36 + 37 + ret = drm_gem_handle_create(file, &obj->base, &handle); 38 + /* drop reference from allocate - handle holds it now */ 39 + i915_gem_object_put(obj); 40 + if (ret) 41 + return ret; 42 + 43 + *handle_p = handle; 44 + *size_p = size; 45 + return 0; 46 + } 47 + 48 + int 49 + i915_gem_dumb_create(struct drm_file *file, 50 + struct drm_device *dev, 51 + struct drm_mode_create_dumb *args) 52 + { 53 + enum intel_memory_type mem_type; 54 + int cpp = DIV_ROUND_UP(args->bpp, 8); 55 + u32 format; 56 + 57 + switch (cpp) { 58 + case 1: 59 + format = DRM_FORMAT_C8; 60 + break; 61 + case 2: 62 + format = DRM_FORMAT_RGB565; 63 + break; 64 + case 4: 65 + format = DRM_FORMAT_XRGB8888; 66 + break; 67 + default: 68 + return -EINVAL; 69 + } 70 + 71 + /* have to work out size/pitch and return them */ 72 + args->pitch = ALIGN(args->width * cpp, 64); 73 + 74 + /* align stride to page size so that we can remap */ 75 + if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format, 76 + DRM_FORMAT_MOD_LINEAR)) 77 + args->pitch = ALIGN(args->pitch, 4096); 78 + 79 + if (args->pitch < args->width) 80 + return -EINVAL; 81 + 82 + args->size = mul_u32_u32(args->pitch, args->height); 83 + 84 + mem_type = INTEL_MEMORY_SYSTEM; 85 + if (HAS_LMEM(to_i915(dev))) 86 + mem_type = INTEL_MEMORY_LOCAL; 87 + 88 + return i915_gem_create(file, 89 + intel_memory_region_by_type(to_i915(dev), 90 + mem_type), 91 + &args->size, &args->handle); 92 + } 93 + 94 + /** 95 + * Creates a new mm object and returns a handle to it. 96 + * @dev: drm device pointer 97 + * @data: ioctl data blob 98 + * @file: drm file pointer 99 + */ 100 + int 101 + i915_gem_create_ioctl(struct drm_device *dev, void *data, 102 + struct drm_file *file) 103 + { 104 + struct drm_i915_private *i915 = to_i915(dev); 105 + struct drm_i915_gem_create *args = data; 106 + 107 + i915_gem_flush_free_objects(i915); 108 + 109 + return i915_gem_create(file, 110 + intel_memory_region_by_type(i915, 111 + INTEL_MEMORY_SYSTEM), 112 + &args->size, &args->handle); 113 + }
+53 -51
drivers/gpu/drm/i915/gem/i915_gem_domain.c
··· 5 5 */ 6 6 7 7 #include "display/intel_frontbuffer.h" 8 + #include "gt/intel_gt.h" 8 9 9 10 #include "i915_drv.h" 10 11 #include "i915_gem_clflush.h" ··· 16 15 #include "i915_gem_lmem.h" 17 16 #include "i915_gem_mman.h" 18 17 18 + static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) 19 + { 20 + return !(obj->cache_level == I915_CACHE_NONE || 21 + obj->cache_level == I915_CACHE_WT); 22 + } 23 + 24 + static void 25 + flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) 26 + { 27 + struct i915_vma *vma; 28 + 29 + assert_object_held(obj); 30 + 31 + if (!(obj->write_domain & flush_domains)) 32 + return; 33 + 34 + switch (obj->write_domain) { 35 + case I915_GEM_DOMAIN_GTT: 36 + spin_lock(&obj->vma.lock); 37 + for_each_ggtt_vma(vma, obj) { 38 + if (i915_vma_unset_ggtt_write(vma)) 39 + intel_gt_flush_ggtt_writes(vma->vm->gt); 40 + } 41 + spin_unlock(&obj->vma.lock); 42 + 43 + i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); 44 + break; 45 + 46 + case I915_GEM_DOMAIN_WC: 47 + wmb(); 48 + break; 49 + 50 + case I915_GEM_DOMAIN_CPU: 51 + i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 52 + break; 53 + 54 + case I915_GEM_DOMAIN_RENDER: 55 + if (gpu_write_needs_clflush(obj)) 56 + obj->cache_dirty = true; 57 + break; 58 + } 59 + 60 + obj->write_domain = 0; 61 + } 62 + 19 63 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) 20 64 { 21 65 /* 22 66 * We manually flush the CPU domain so that we can override and 23 67 * force the flush for the display, and perform it asyncrhonously. 24 68 */ 25 - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 69 + flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 26 70 if (obj->cache_dirty) 27 71 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); 28 72 obj->write_domain = 0; ··· 126 80 if (ret) 127 81 return ret; 128 82 129 - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); 83 + flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); 130 84 131 85 /* Serialise direct access to this object with the barriers for 132 86 * coherent writes from the GPU, by effectively invalidating the ··· 187 141 if (ret) 188 142 return ret; 189 143 190 - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); 144 + flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); 191 145 192 146 /* Serialise direct access to this object with the barriers for 193 147 * coherent writes from the GPU, by effectively invalidating the ··· 416 370 } 417 371 418 372 vma->display_alignment = max_t(u64, vma->display_alignment, alignment); 373 + i915_vma_mark_scanout(vma); 419 374 420 375 i915_gem_object_flush_if_display_locked(obj); 421 376 ··· 432 385 return ERR_PTR(ret); 433 386 434 387 return vma; 435 - } 436 - 437 - static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) 438 - { 439 - struct drm_i915_private *i915 = to_i915(obj->base.dev); 440 - struct i915_vma *vma; 441 - 442 - if (list_empty(&obj->vma.list)) 443 - return; 444 - 445 - mutex_lock(&i915->ggtt.vm.mutex); 446 - spin_lock(&obj->vma.lock); 447 - for_each_ggtt_vma(vma, obj) { 448 - if (!drm_mm_node_allocated(&vma->node)) 449 - continue; 450 - 451 - GEM_BUG_ON(vma->vm != &i915->ggtt.vm); 452 - list_move_tail(&vma->vm_link, &vma->vm->bound_list); 453 - } 454 - spin_unlock(&obj->vma.lock); 455 - mutex_unlock(&i915->ggtt.vm.mutex); 456 - 457 - if (i915_gem_object_is_shrinkable(obj)) { 458 - unsigned long flags; 459 - 460 - spin_lock_irqsave(&i915->mm.obj_lock, flags); 461 - 462 - if (obj->mm.madv == I915_MADV_WILLNEED && 463 - !atomic_read(&obj->mm.shrink_pin)) 464 - list_move_tail(&obj->mm.link, &i915->mm.shrink_list); 465 - 466 - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 467 - } 468 - } 469 - 470 - void 471 - i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) 472 - { 473 - /* Bump the LRU to try and avoid premature eviction whilst flipping */ 474 - i915_gem_object_bump_inactive_ggtt(vma->obj); 475 - 476 - i915_vma_unpin(vma); 477 388 } 478 389 479 390 /** ··· 456 451 if (ret) 457 452 return ret; 458 453 459 - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 454 + flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 460 455 461 456 /* Flush the CPU cache if it's still invalid. */ 462 457 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { ··· 574 569 else 575 570 err = i915_gem_object_set_to_cpu_domain(obj, write_domain); 576 571 577 - /* And bump the LRU for this access */ 578 - i915_gem_object_bump_inactive_ggtt(obj); 579 - 580 572 i915_gem_object_unlock(obj); 581 573 582 574 if (write_domain) ··· 621 619 goto out; 622 620 } 623 621 624 - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 622 + flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 625 623 626 624 /* If we're not in the cpu read domain, set ourself into the gtt 627 625 * read domain and manually flush cachelines (if required). This ··· 672 670 goto out; 673 671 } 674 672 675 - i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 673 + flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 676 674 677 675 /* If we're not in the cpu write domain, set ourself into the 678 676 * gtt write domain and manually flush cachelines (as required).
+7 -6
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 1276 1276 int err; 1277 1277 1278 1278 if (!pool) { 1279 - pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE); 1279 + pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE, 1280 + cache->has_llc ? 1281 + I915_MAP_WB : 1282 + I915_MAP_WC); 1280 1283 if (IS_ERR(pool)) 1281 1284 return PTR_ERR(pool); 1282 1285 } ··· 1289 1286 if (err) 1290 1287 goto err_pool; 1291 1288 1292 - cmd = i915_gem_object_pin_map(pool->obj, 1293 - cache->has_llc ? 1294 - I915_MAP_FORCE_WB : 1295 - I915_MAP_FORCE_WC); 1289 + cmd = i915_gem_object_pin_map(pool->obj, pool->type); 1296 1290 if (IS_ERR(cmd)) { 1297 1291 err = PTR_ERR(cmd); 1298 1292 goto err_pool; ··· 2458 2458 return -EINVAL; 2459 2459 2460 2460 if (!pool) { 2461 - pool = intel_gt_get_buffer_pool(eb->engine->gt, len); 2461 + pool = intel_gt_get_buffer_pool(eb->engine->gt, len, 2462 + I915_MAP_WB); 2462 2463 if (IS_ERR(pool)) 2463 2464 return PTR_ERR(pool); 2464 2465 eb->batch_pool = pool;
+5 -10
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
··· 31 31 size, flags); 32 32 } 33 33 34 - struct drm_i915_gem_object * 35 - __i915_gem_lmem_object_create(struct intel_memory_region *mem, 36 - resource_size_t size, 37 - unsigned int flags) 34 + int __i915_gem_lmem_object_init(struct intel_memory_region *mem, 35 + struct drm_i915_gem_object *obj, 36 + resource_size_t size, 37 + unsigned int flags) 38 38 { 39 39 static struct lock_class_key lock_class; 40 40 struct drm_i915_private *i915 = mem->i915; 41 - struct drm_i915_gem_object *obj; 42 - 43 - obj = i915_gem_object_alloc(); 44 - if (!obj) 45 - return ERR_PTR(-ENOMEM); 46 41 47 42 drm_gem_private_object_init(&i915->drm, &obj->base, size); 48 43 i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class); ··· 48 53 49 54 i915_gem_object_init_memory_region(obj, mem, flags); 50 55 51 - return obj; 56 + return 0; 52 57 }
+4 -4
drivers/gpu/drm/i915/gem/i915_gem_lmem.h
··· 21 21 resource_size_t size, 22 22 unsigned int flags); 23 23 24 - struct drm_i915_gem_object * 25 - __i915_gem_lmem_object_create(struct intel_memory_region *mem, 26 - resource_size_t size, 27 - unsigned int flags); 24 + int __i915_gem_lmem_object_init(struct intel_memory_region *mem, 25 + struct drm_i915_gem_object *obj, 26 + resource_size_t size, 27 + unsigned int flags); 28 28 29 29 #endif /* !__I915_GEM_LMEM_H */
-47
drivers/gpu/drm/i915/gem/i915_gem_object.c
··· 25 25 #include <linux/sched/mm.h> 26 26 27 27 #include "display/intel_frontbuffer.h" 28 - #include "gt/intel_gt.h" 29 28 #include "i915_drv.h" 30 29 #include "i915_gem_clflush.h" 31 30 #include "i915_gem_context.h" ··· 310 311 */ 311 312 if (llist_add(&obj->freed, &i915->mm.free_list)) 312 313 queue_work(i915->wq, &i915->mm.free_work); 313 - } 314 - 315 - static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) 316 - { 317 - return !(obj->cache_level == I915_CACHE_NONE || 318 - obj->cache_level == I915_CACHE_WT); 319 - } 320 - 321 - void 322 - i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, 323 - unsigned int flush_domains) 324 - { 325 - struct i915_vma *vma; 326 - 327 - assert_object_held(obj); 328 - 329 - if (!(obj->write_domain & flush_domains)) 330 - return; 331 - 332 - switch (obj->write_domain) { 333 - case I915_GEM_DOMAIN_GTT: 334 - spin_lock(&obj->vma.lock); 335 - for_each_ggtt_vma(vma, obj) { 336 - if (i915_vma_unset_ggtt_write(vma)) 337 - intel_gt_flush_ggtt_writes(vma->vm->gt); 338 - } 339 - spin_unlock(&obj->vma.lock); 340 - 341 - i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); 342 - break; 343 - 344 - case I915_GEM_DOMAIN_WC: 345 - wmb(); 346 - break; 347 - 348 - case I915_GEM_DOMAIN_CPU: 349 - i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 350 - break; 351 - 352 - case I915_GEM_DOMAIN_RENDER: 353 - if (gpu_write_needs_clflush(obj)) 354 - obj->cache_dirty = true; 355 - break; 356 - } 357 - 358 - obj->write_domain = 0; 359 314 } 360 315 361 316 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+23 -13
drivers/gpu/drm/i915/gem/i915_gem_object.h
··· 188 188 } 189 189 190 190 static inline bool 191 + i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj) 192 + { 193 + return test_bit(I915_TILING_QUIRK_BIT, &obj->flags); 194 + } 195 + 196 + static inline void 197 + i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj) 198 + { 199 + set_bit(I915_TILING_QUIRK_BIT, &obj->flags); 200 + } 201 + 202 + static inline void 203 + i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj) 204 + { 205 + clear_bit(I915_TILING_QUIRK_BIT, &obj->flags); 206 + } 207 + 208 + static inline bool 191 209 i915_gem_object_type_has(const struct drm_i915_gem_object *obj, 192 210 unsigned long flags) 193 211 { ··· 402 384 void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 403 385 void i915_gem_object_writeback(struct drm_i915_gem_object *obj); 404 386 405 - enum i915_map_type { 406 - I915_MAP_WB = 0, 407 - I915_MAP_WC, 408 - #define I915_MAP_OVERRIDE BIT(31) 409 - I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, 410 - I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, 411 - }; 412 - 413 387 /** 414 388 * i915_gem_object_pin_map - return a contiguous mapping of the entire object 415 389 * @obj: the object to map into kernel address space ··· 444 434 } 445 435 446 436 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); 447 - 448 - void 449 - i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, 450 - unsigned int flush_domains); 451 437 452 438 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj, 453 439 unsigned int *needs_clflush); ··· 492 486 u32 alignment, 493 487 const struct i915_ggtt_view *view, 494 488 unsigned int flags); 495 - void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 496 489 497 490 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); 498 491 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); ··· 516 511 if (cpu_write_needs_clflush(obj)) 517 512 obj->cache_dirty = true; 518 513 } 514 + 515 + void i915_gem_fence_wait_priority(struct dma_fence *fence, 516 + const struct i915_sched_attr *attr); 519 517 520 518 int i915_gem_object_wait(struct drm_i915_gem_object *obj, 521 519 unsigned int flags, ··· 547 539 if (unlikely(rcu_access_pointer(obj->frontbuffer))) 548 540 __i915_gem_object_invalidate_frontbuffer(obj, origin); 549 541 } 542 + 543 + bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj); 550 544 551 545 #endif
+4 -4
drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
··· 35 35 count = div_u64(round_up(vma->size, block_size), block_size); 36 36 size = (1 + 8 * count) * sizeof(u32); 37 37 size = round_up(size, PAGE_SIZE); 38 - pool = intel_gt_get_buffer_pool(ce->engine->gt, size); 38 + pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC); 39 39 if (IS_ERR(pool)) { 40 40 err = PTR_ERR(pool); 41 41 goto out_pm; ··· 55 55 if (unlikely(err)) 56 56 goto out_put; 57 57 58 - cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC); 58 + cmd = i915_gem_object_pin_map(pool->obj, pool->type); 59 59 if (IS_ERR(cmd)) { 60 60 err = PTR_ERR(cmd); 61 61 goto out_unpin; ··· 257 257 count = div_u64(round_up(dst->size, block_size), block_size); 258 258 size = (1 + 11 * count) * sizeof(u32); 259 259 size = round_up(size, PAGE_SIZE); 260 - pool = intel_gt_get_buffer_pool(ce->engine->gt, size); 260 + pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC); 261 261 if (IS_ERR(pool)) { 262 262 err = PTR_ERR(pool); 263 263 goto out_pm; ··· 277 277 if (unlikely(err)) 278 278 goto out_put; 279 279 280 - cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC); 280 + cmd = i915_gem_object_pin_map(pool->obj, pool->type); 281 281 if (IS_ERR(cmd)) { 282 282 err = PTR_ERR(cmd); 283 283 goto out_unpin;
+11 -8
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
··· 67 67 const char *name; /* friendly name for debug, e.g. lockdep classes */ 68 68 }; 69 69 70 + enum i915_map_type { 71 + I915_MAP_WB = 0, 72 + I915_MAP_WC, 73 + #define I915_MAP_OVERRIDE BIT(31) 74 + I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE, 75 + I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE, 76 + }; 77 + 70 78 enum i915_mmap_type { 71 79 I915_MMAP_TYPE_GTT = 0, 72 80 I915_MMAP_TYPE_WC, ··· 150 142 */ 151 143 struct list_head obj_link; 152 144 153 - /** Stolen memory for this object, instead of being backed by shmem. */ 154 - struct drm_mm_node *stolen; 155 145 union { 156 146 struct rcu_head rcu; 157 147 struct llist_node freed; ··· 173 167 #define I915_BO_ALLOC_VOLATILE BIT(1) 174 168 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE) 175 169 #define I915_BO_READONLY BIT(2) 170 + #define I915_TILING_QUIRK_BIT 3 /* unknown swizzling; do not release! */ 176 171 177 172 /* 178 173 * Is the object to be mapped as read-only to the GPU ··· 282 275 * pages were last acquired. 283 276 */ 284 277 bool dirty:1; 285 - 286 - /** 287 - * This is set if the object has been pinned due to unknown 288 - * swizzling. 289 - */ 290 - bool quirked:1; 291 278 } mm; 292 279 293 280 /** Record of address bit 17 of each page at last unbind. */ ··· 295 294 struct i915_mmu_object *mmu_object; 296 295 struct work_struct *work; 297 296 } userptr; 297 + 298 + struct drm_mm_node *stolen; 298 299 299 300 unsigned long scratch; 300 301 u64 encode;
+11 -8
drivers/gpu/drm/i915/gem/i915_gem_pages.c
··· 16 16 { 17 17 struct drm_i915_private *i915 = to_i915(obj->base.dev); 18 18 unsigned long supported = INTEL_INFO(i915)->page_sizes; 19 + bool shrinkable; 19 20 int i; 20 21 21 22 lockdep_assert_held(&obj->mm.lock); ··· 39 38 40 39 obj->mm.pages = pages; 41 40 42 - if (i915_gem_object_is_tiled(obj) && 43 - i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 44 - GEM_BUG_ON(obj->mm.quirked); 45 - __i915_gem_object_pin_pages(obj); 46 - obj->mm.quirked = true; 47 - } 48 - 49 41 GEM_BUG_ON(!sg_page_sizes); 50 42 obj->mm.page_sizes.phys = sg_page_sizes; 51 43 ··· 57 63 } 58 64 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); 59 65 60 - if (i915_gem_object_is_shrinkable(obj)) { 66 + shrinkable = i915_gem_object_is_shrinkable(obj); 67 + 68 + if (i915_gem_object_is_tiled(obj) && 69 + i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 70 + GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); 71 + i915_gem_object_set_tiling_quirk(obj); 72 + shrinkable = false; 73 + } 74 + 75 + if (shrinkable) { 61 76 struct list_head *list; 62 77 unsigned long flags; 63 78
+2 -2
drivers/gpu/drm/i915/gem/i915_gem_phys.c
··· 213 213 if (obj->ops == &i915_gem_phys_ops) 214 214 return 0; 215 215 216 - if (obj->ops != &i915_gem_shmem_ops) 216 + if (!i915_gem_object_is_shmem(obj)) 217 217 return -EINVAL; 218 218 219 219 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); ··· 227 227 goto err_unlock; 228 228 } 229 229 230 - if (obj->mm.quirked) { 230 + if (i915_gem_object_has_tiling_quirk(obj)) { 231 231 err = -EFAULT; 232 232 goto err_unlock; 233 233 }
+14 -27
drivers/gpu/drm/i915/gem/i915_gem_pm.c
··· 11 11 12 12 #include "i915_drv.h" 13 13 14 + #if defined(CONFIG_X86) 15 + #include <asm/smp.h> 16 + #else 17 + #define wbinvd_on_all_cpus() \ 18 + pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__) 19 + #endif 20 + 14 21 void i915_gem_suspend(struct drm_i915_private *i915) 15 22 { 16 23 GEM_TRACE("%s\n", dev_name(i915->drm.dev)); ··· 39 32 i915_gem_drain_freed_objects(i915); 40 33 } 41 34 42 - static struct drm_i915_gem_object *first_mm_object(struct list_head *list) 43 - { 44 - return list_first_entry_or_null(list, 45 - struct drm_i915_gem_object, 46 - mm.link); 47 - } 48 - 49 35 void i915_gem_suspend_late(struct drm_i915_private *i915) 50 36 { 51 37 struct drm_i915_gem_object *obj; ··· 48 48 NULL 49 49 }, **phase; 50 50 unsigned long flags; 51 + bool flush = false; 51 52 52 53 /* 53 54 * Neither the BIOS, ourselves or any other kernel ··· 74 73 75 74 spin_lock_irqsave(&i915->mm.obj_lock, flags); 76 75 for (phase = phases; *phase; phase++) { 77 - LIST_HEAD(keep); 78 - 79 - while ((obj = first_mm_object(*phase))) { 80 - list_move_tail(&obj->mm.link, &keep); 81 - 82 - /* Beware the background _i915_gem_free_objects */ 83 - if (!kref_get_unless_zero(&obj->base.refcount)) 84 - continue; 85 - 86 - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 87 - 88 - i915_gem_object_lock(obj, NULL); 89 - drm_WARN_ON(&i915->drm, 90 - i915_gem_object_set_to_gtt_domain(obj, false)); 91 - i915_gem_object_unlock(obj); 92 - i915_gem_object_put(obj); 93 - 94 - spin_lock_irqsave(&i915->mm.obj_lock, flags); 76 + list_for_each_entry(obj, *phase, mm.link) { 77 + if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 78 + flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0; 79 + __start_cpu_write(obj); /* presume auto-hibernate */ 95 80 } 96 - 97 - list_splice_tail(&keep, *phase); 98 81 } 99 82 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 83 + if (flush) 84 + wbinvd_on_all_cpus(); 100 85 } 101 86 102 87 void i915_gem_resume(struct drm_i915_private *i915)
+13 -3
drivers/gpu/drm/i915/gem/i915_gem_region.c
··· 143 143 unsigned int flags) 144 144 { 145 145 struct drm_i915_gem_object *obj; 146 + int err; 146 147 147 148 /* 148 149 * NB: Our use of resource_size_t for the size stems from using struct ··· 174 173 if (overflows_type(size, obj->base.size)) 175 174 return ERR_PTR(-E2BIG); 176 175 177 - obj = mem->ops->create_object(mem, size, flags); 178 - if (!IS_ERR(obj)) 179 - trace_i915_gem_object_create(obj); 176 + obj = i915_gem_object_alloc(); 177 + if (!obj) 178 + return ERR_PTR(-ENOMEM); 180 179 180 + err = mem->ops->init_object(mem, obj, size, flags); 181 + if (err) 182 + goto err_object_free; 183 + 184 + trace_i915_gem_object_create(obj); 181 185 return obj; 186 + 187 + err_object_free: 188 + i915_gem_object_free(obj); 189 + return ERR_PTR(err); 182 190 }
+12 -16
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
··· 464 464 return 0; 465 465 } 466 466 467 - static struct drm_i915_gem_object * 468 - create_shmem(struct intel_memory_region *mem, 469 - resource_size_t size, 470 - unsigned int flags) 467 + static int shmem_object_init(struct intel_memory_region *mem, 468 + struct drm_i915_gem_object *obj, 469 + resource_size_t size, 470 + unsigned int flags) 471 471 { 472 472 static struct lock_class_key lock_class; 473 473 struct drm_i915_private *i915 = mem->i915; 474 - struct drm_i915_gem_object *obj; 475 474 struct address_space *mapping; 476 475 unsigned int cache_level; 477 476 gfp_t mask; 478 477 int ret; 479 478 480 - obj = i915_gem_object_alloc(); 481 - if (!obj) 482 - return ERR_PTR(-ENOMEM); 483 - 484 479 ret = __create_shmem(i915, &obj->base, size); 485 480 if (ret) 486 - goto fail; 481 + return ret; 487 482 488 483 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 489 484 if (IS_I965GM(i915) || IS_I965G(i915)) { ··· 517 522 518 523 i915_gem_object_init_memory_region(obj, mem, 0); 519 524 520 - return obj; 521 - 522 - fail: 523 - i915_gem_object_free(obj); 524 - return ERR_PTR(ret); 525 + return 0; 525 526 } 526 527 527 528 struct drm_i915_gem_object * ··· 602 611 static const struct intel_memory_region_ops shmem_region_ops = { 603 612 .init = init_shmem, 604 613 .release = release_shmem, 605 - .create_object = create_shmem, 614 + .init_object = shmem_object_init, 606 615 }; 607 616 608 617 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915) ··· 611 620 totalram_pages() << PAGE_SHIFT, 612 621 PAGE_SIZE, 0, 613 622 &shmem_region_ops); 623 + } 624 + 625 + bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj) 626 + { 627 + return obj->ops == &i915_gem_shmem_ops; 614 628 }
+36 -34
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
··· 621 621 .release = i915_gem_object_release_stolen, 622 622 }; 623 623 624 - static struct drm_i915_gem_object * 625 - __i915_gem_object_create_stolen(struct intel_memory_region *mem, 626 - struct drm_mm_node *stolen) 624 + static int __i915_gem_object_create_stolen(struct intel_memory_region *mem, 625 + struct drm_i915_gem_object *obj, 626 + struct drm_mm_node *stolen) 627 627 { 628 628 static struct lock_class_key lock_class; 629 - struct drm_i915_gem_object *obj; 630 629 unsigned int cache_level; 631 - int err = -ENOMEM; 632 - 633 - obj = i915_gem_object_alloc(); 634 - if (!obj) 635 - goto err; 630 + int err; 636 631 637 632 drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size); 638 633 i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class); ··· 639 644 640 645 err = i915_gem_object_pin_pages(obj); 641 646 if (err) 642 - goto cleanup; 647 + return err; 643 648 644 649 i915_gem_object_init_memory_region(obj, mem, 0); 645 650 646 - return obj; 647 - 648 - cleanup: 649 - i915_gem_object_free(obj); 650 - err: 651 - return ERR_PTR(err); 651 + return 0; 652 652 } 653 653 654 - static struct drm_i915_gem_object * 655 - _i915_gem_object_create_stolen(struct intel_memory_region *mem, 656 - resource_size_t size, 657 - unsigned int flags) 654 + static int _i915_gem_object_stolen_init(struct intel_memory_region *mem, 655 + struct drm_i915_gem_object *obj, 656 + resource_size_t size, 657 + unsigned int flags) 658 658 { 659 659 struct drm_i915_private *i915 = mem->i915; 660 - struct drm_i915_gem_object *obj; 661 660 struct drm_mm_node *stolen; 662 661 int ret; 663 662 664 663 if (!drm_mm_initialized(&i915->mm.stolen)) 665 - return ERR_PTR(-ENODEV); 664 + return -ENODEV; 666 665 667 666 if (size == 0) 668 - return ERR_PTR(-EINVAL); 667 + return -EINVAL; 669 668 670 669 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); 671 670 if (!stolen) 672 - return ERR_PTR(-ENOMEM); 671 + return -ENOMEM; 673 672 674 673 ret = i915_gem_stolen_insert_node(i915, stolen, size, 4096); 675 - if (ret) { 676 - obj = ERR_PTR(ret); 674 + if (ret) 677 675 goto err_free; 678 - } 679 676 680 - obj = __i915_gem_object_create_stolen(mem, stolen); 681 - if (IS_ERR(obj)) 677 + ret = __i915_gem_object_create_stolen(mem, obj, stolen); 678 + if (ret) 682 679 goto err_remove; 683 680 684 - return obj; 681 + return 0; 685 682 686 683 err_remove: 687 684 i915_gem_stolen_remove_node(i915, stolen); 688 685 err_free: 689 686 kfree(stolen); 690 - return obj; 687 + return ret; 691 688 } 692 689 693 690 struct drm_i915_gem_object * ··· 709 722 static const struct intel_memory_region_ops i915_region_stolen_ops = { 710 723 .init = init_stolen, 711 724 .release = release_stolen, 712 - .create_object = _i915_gem_object_create_stolen, 725 + .init_object = _i915_gem_object_stolen_init, 713 726 }; 714 727 715 728 struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915) ··· 758 771 goto err_free; 759 772 } 760 773 761 - obj = __i915_gem_object_create_stolen(mem, stolen); 762 - if (IS_ERR(obj)) 774 + obj = i915_gem_object_alloc(); 775 + if (!obj) { 776 + obj = ERR_PTR(-ENOMEM); 763 777 goto err_stolen; 778 + } 779 + 780 + ret = __i915_gem_object_create_stolen(mem, obj, stolen); 781 + if (ret) { 782 + obj = ERR_PTR(ret); 783 + goto err_object_free; 784 + } 764 785 765 786 i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); 766 787 return obj; 767 788 789 + err_object_free: 790 + i915_gem_object_free(obj); 768 791 err_stolen: 769 792 i915_gem_stolen_remove_node(i915, stolen); 770 793 err_free: 771 794 kfree(stolen); 772 795 return obj; 796 + } 797 + 798 + bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj) 799 + { 800 + return obj->ops == &i915_gem_object_stolen_ops; 773 801 }
+2
drivers/gpu/drm/i915/gem/i915_gem_stolen.h
··· 30 30 resource_size_t stolen_offset, 31 31 resource_size_t size); 32 32 33 + bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj); 34 + 33 35 #define I915_GEM_STOLEN_BIAS SZ_128K 34 36 35 37 #endif /* __I915_GEM_STOLEN_H__ */
+6 -6
drivers/gpu/drm/i915/gem/i915_gem_tiling.c
··· 270 270 obj->mm.madv == I915_MADV_WILLNEED && 271 271 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 272 272 if (tiling == I915_TILING_NONE) { 273 - GEM_BUG_ON(!obj->mm.quirked); 274 - __i915_gem_object_unpin_pages(obj); 275 - obj->mm.quirked = false; 273 + GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj)); 274 + i915_gem_object_clear_tiling_quirk(obj); 275 + i915_gem_object_make_shrinkable(obj); 276 276 } 277 277 if (!i915_gem_object_is_tiled(obj)) { 278 - GEM_BUG_ON(obj->mm.quirked); 279 - __i915_gem_object_pin_pages(obj); 280 - obj->mm.quirked = true; 278 + GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); 279 + i915_gem_object_make_unshrinkable(obj); 280 + i915_gem_object_set_tiling_quirk(obj); 281 281 } 282 282 } 283 283 mutex_unlock(&obj->mm.lock);
+33 -13
drivers/gpu/drm/i915/gem/i915_gem_wait.c
··· 5 5 */ 6 6 7 7 #include <linux/dma-fence-array.h> 8 + #include <linux/dma-fence-chain.h> 8 9 #include <linux/jiffies.h> 9 10 10 11 #include "gt/intel_engine.h" ··· 45 44 unsigned int count, i; 46 45 int ret; 47 46 48 - ret = dma_resv_get_fences_rcu(resv, 49 - &excl, &count, &shared); 47 + ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared); 50 48 if (ret) 51 49 return ret; 52 50 ··· 91 91 return timeout; 92 92 } 93 93 94 - static void __fence_set_priority(struct dma_fence *fence, 95 - const struct i915_sched_attr *attr) 94 + static void fence_set_priority(struct dma_fence *fence, 95 + const struct i915_sched_attr *attr) 96 96 { 97 97 struct i915_request *rq; 98 98 struct intel_engine_cs *engine; ··· 103 103 rq = to_request(fence); 104 104 engine = rq->engine; 105 105 106 - local_bh_disable(); 107 106 rcu_read_lock(); /* RCU serialisation for set-wedged protection */ 108 107 if (engine->schedule) 109 108 engine->schedule(rq, attr); 110 109 rcu_read_unlock(); 111 - local_bh_enable(); /* kick the tasklets if queues were reprioritised */ 112 110 } 113 111 114 - static void fence_set_priority(struct dma_fence *fence, 115 - const struct i915_sched_attr *attr) 112 + static inline bool __dma_fence_is_chain(const struct dma_fence *fence) 116 113 { 114 + return fence->ops == &dma_fence_chain_ops; 115 + } 116 + 117 + void i915_gem_fence_wait_priority(struct dma_fence *fence, 118 + const struct i915_sched_attr *attr) 119 + { 120 + if (dma_fence_is_signaled(fence)) 121 + return; 122 + 123 + local_bh_disable(); 124 + 117 125 /* Recurse once into a fence-array */ 118 126 if (dma_fence_is_array(fence)) { 119 127 struct dma_fence_array *array = to_dma_fence_array(fence); 120 128 int i; 121 129 122 130 for (i = 0; i < array->num_fences; i++) 123 - __fence_set_priority(array->fences[i], attr); 131 + fence_set_priority(array->fences[i], attr); 132 + } else if (__dma_fence_is_chain(fence)) { 133 + struct dma_fence *iter; 134 + 135 + /* The chain is ordered; if we boost the last, we boost all */ 136 + dma_fence_chain_for_each(iter, fence) { 137 + fence_set_priority(to_dma_fence_chain(iter)->fence, 138 + attr); 139 + break; 140 + } 141 + dma_fence_put(iter); 124 142 } else { 125 - __fence_set_priority(fence, attr); 143 + fence_set_priority(fence, attr); 126 144 } 145 + 146 + local_bh_enable(); /* kick the tasklets if queues were reprioritised */ 127 147 } 128 148 129 149 int ··· 159 139 int ret; 160 140 161 141 ret = dma_resv_get_fences_rcu(obj->base.resv, 162 - &excl, &count, &shared); 142 + &excl, &count, &shared); 163 143 if (ret) 164 144 return ret; 165 145 166 146 for (i = 0; i < count; i++) { 167 - fence_set_priority(shared[i], attr); 147 + i915_gem_fence_wait_priority(shared[i], attr); 168 148 dma_fence_put(shared[i]); 169 149 } 170 150 ··· 174 154 } 175 155 176 156 if (excl) { 177 - fence_set_priority(excl, attr); 157 + i915_gem_fence_wait_priority(excl, attr); 178 158 dma_fence_put(excl); 179 159 } 180 160 return 0;
+3 -12
drivers/gpu/drm/i915/gt/gen6_ppgtt.c
··· 12 12 #include "intel_gt.h" 13 13 14 14 /* Write pde (index) from the page directory @pd to the page table @pt */ 15 - static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt, 16 - const unsigned int pde, 17 - const struct i915_page_table *pt) 15 + static void gen6_write_pde(const struct gen6_ppgtt *ppgtt, 16 + const unsigned int pde, 17 + const struct i915_page_table *pt) 18 18 { 19 19 dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]); 20 20 ··· 27 27 { 28 28 struct drm_i915_private *i915 = gt->i915; 29 29 struct intel_uncore *uncore = gt->uncore; 30 - struct intel_engine_cs *engine; 31 - enum intel_engine_id id; 32 30 u32 ecochk; 33 31 34 32 intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B); ··· 39 41 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; 40 42 } 41 43 intel_uncore_write(uncore, GAM_ECOCHK, ecochk); 42 - 43 - for_each_engine(engine, gt, id) { 44 - /* GFX_MODE is per-ring on gen7+ */ 45 - ENGINE_WRITE(engine, 46 - RING_MODE_GEN7, 47 - _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 48 - } 49 44 } 50 45 51 46 void gen6_ppgtt_enable(struct intel_gt *gt)
+19 -4
drivers/gpu/drm/i915/gt/gen7_renderclear.c
··· 40 40 u32 size; 41 41 }; 42 42 43 - static inline int num_primitives(const struct batch_vals *bv) 43 + static int num_primitives(const struct batch_vals *bv) 44 44 { 45 45 /* 46 46 * We need to saturate the GPU with work in order to dispatch ··· 353 353 354 354 static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch) 355 355 { 356 - u32 *cs = batch_alloc_items(batch, 0, 8); 356 + u32 *cs = batch_alloc_items(batch, 0, 10); 357 357 358 358 /* ivb: Stall before STATE_CACHE_INVALIDATE */ 359 - *cs++ = GFX_OP_PIPE_CONTROL(4); 359 + *cs++ = GFX_OP_PIPE_CONTROL(5); 360 360 *cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD | 361 361 PIPE_CONTROL_CS_STALL; 362 362 *cs++ = 0; 363 363 *cs++ = 0; 364 + *cs++ = 0; 364 365 365 - *cs++ = GFX_OP_PIPE_CONTROL(4); 366 + *cs++ = GFX_OP_PIPE_CONTROL(5); 366 367 *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE; 368 + *cs++ = 0; 367 369 *cs++ = 0; 368 370 *cs++ = 0; 369 371 ··· 392 390 &cb_kernel_ivb, 393 391 desc_count); 394 392 393 + /* Reset inherited context registers */ 394 + gen7_emit_pipeline_invalidate(&cmds); 395 + batch_add(&cmds, MI_LOAD_REGISTER_IMM(2)); 396 + batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7)); 397 + batch_add(&cmds, 0xffff0000); 398 + batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1)); 399 + batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); 400 + gen7_emit_pipeline_invalidate(&cmds); 401 + gen7_emit_pipeline_flush(&cmds); 402 + 403 + /* Switch to the media pipeline and our base address */ 395 404 gen7_emit_pipeline_invalidate(&cmds); 396 405 batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA); 397 406 batch_add(&cmds, MI_NOOP); ··· 412 399 gen7_emit_state_base_address(&cmds, descriptors); 413 400 gen7_emit_pipeline_invalidate(&cmds); 414 401 402 + /* Set the clear-residual kernel state */ 415 403 gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0); 416 404 gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count); 417 405 406 + /* Execute the kernel on all HW threads */ 418 407 for (i = 0; i < num_primitives(bv); i++) 419 408 gen7_emit_media_object(&cmds, i); 420 409
+3 -1
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
··· 330 330 return 0; 331 331 } 332 332 333 - static inline u32 preempt_address(struct intel_engine_cs *engine) 333 + static u32 preempt_address(struct intel_engine_cs *engine) 334 334 { 335 335 return (i915_ggtt_offset(engine->status_page.vma) + 336 336 I915_GEM_HWS_PREEMPT_ADDR); ··· 488 488 489 489 static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs) 490 490 { 491 + *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */ 491 492 *cs++ = MI_SEMAPHORE_WAIT | 492 493 MI_SEMAPHORE_GLOBAL_GTT | 493 494 MI_SEMAPHORE_POLL | ··· 496 495 *cs++ = 0; 497 496 *cs++ = preempt_address(rq->engine); 498 497 *cs++ = 0; 498 + *cs++ = MI_NOOP; 499 499 500 500 return cs; 501 501 }
+6 -7
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
··· 109 109 110 110 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt) 111 111 112 - static inline unsigned int 112 + static unsigned int 113 113 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx) 114 114 { 115 115 const int shift = gen8_pd_shift(lvl); ··· 125 125 return i915_pde_index(end, shift) - *idx; 126 126 } 127 127 128 - static inline bool gen8_pd_contains(u64 start, u64 end, int lvl) 128 + static bool gen8_pd_contains(u64 start, u64 end, int lvl) 129 129 { 130 130 const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); 131 131 ··· 133 133 return (start ^ end) & mask && (start & ~mask) == 0; 134 134 } 135 135 136 - static inline unsigned int gen8_pt_count(u64 start, u64 end) 136 + static unsigned int gen8_pt_count(u64 start, u64 end) 137 137 { 138 138 GEM_BUG_ON(start >= end); 139 139 if ((start ^ end) >> gen8_pd_shift(1)) ··· 142 142 return end - start; 143 143 } 144 144 145 - static inline unsigned int 146 - gen8_pd_top_count(const struct i915_address_space *vm) 145 + static unsigned int gen8_pd_top_count(const struct i915_address_space *vm) 147 146 { 148 147 unsigned int shift = __gen8_pte_shift(vm->top); 149 148 return (vm->total + (1ull << shift) - 1) >> shift; 150 149 } 151 150 152 - static inline struct i915_page_directory * 151 + static struct i915_page_directory * 153 152 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) 154 153 { 155 154 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); ··· 159 160 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)); 160 161 } 161 162 162 - static inline struct i915_page_directory * 163 + static struct i915_page_directory * 163 164 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr) 164 165 { 165 166 return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
+7 -6
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
··· 453 453 { 454 454 struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs; 455 455 struct intel_context *ce = rq->context; 456 - unsigned long flags; 457 456 bool release; 458 457 459 - if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) 458 + spin_lock(&ce->signal_lock); 459 + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { 460 + spin_unlock(&ce->signal_lock); 460 461 return; 462 + } 461 463 462 - spin_lock_irqsave(&ce->signal_lock, flags); 463 464 list_del_rcu(&rq->signal_link); 464 465 release = remove_signaling_context(b, ce); 465 - spin_unlock_irqrestore(&ce->signal_lock, flags); 466 + spin_unlock(&ce->signal_lock); 466 467 if (release) 467 468 intel_context_put(ce); 468 469 ··· 518 517 list_for_each_entry_rcu(rq, &ce->signals, signal_link) 519 518 drm_printf(p, "\t[%llx:%llx%s] @ %dms\n", 520 519 rq->fence.context, rq->fence.seqno, 521 - i915_request_completed(rq) ? "!" : 522 - i915_request_started(rq) ? "*" : 520 + __i915_request_is_complete(rq) ? "!" : 521 + __i915_request_has_started(rq) ? "*" : 523 522 "", 524 523 jiffies_to_msecs(jiffies - rq->emitted_jiffies)); 525 524 }
+8 -8
drivers/gpu/drm/i915/gt/intel_engine_cs.c
··· 342 342 engine->schedule = NULL; 343 343 344 344 ewma__engine_latency_init(&engine->latency); 345 - seqlock_init(&engine->stats.lock); 345 + seqcount_init(&engine->stats.lock); 346 346 347 347 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); 348 348 ··· 1676 1676 ktime_to_ms(intel_engine_get_busy_time(engine, 1677 1677 &dummy))); 1678 1678 drm_printf(m, "\tForcewake: %x domains, %d active\n", 1679 - engine->fw_domain, atomic_read(&engine->fw_active)); 1679 + engine->fw_domain, READ_ONCE(engine->fw_active)); 1680 1680 1681 1681 rcu_read_lock(); 1682 1682 rq = READ_ONCE(engine->heartbeat.systole); ··· 1754 1754 * add it to the total. 1755 1755 */ 1756 1756 *now = ktime_get(); 1757 - if (atomic_read(&engine->stats.active)) 1757 + if (READ_ONCE(engine->stats.active)) 1758 1758 total = ktime_add(total, ktime_sub(*now, engine->stats.start)); 1759 1759 1760 1760 return total; ··· 1773 1773 ktime_t total; 1774 1774 1775 1775 do { 1776 - seq = read_seqbegin(&engine->stats.lock); 1776 + seq = read_seqcount_begin(&engine->stats.lock); 1777 1777 total = __intel_engine_get_busy_time(engine, now); 1778 - } while (read_seqretry(&engine->stats.lock, seq)); 1778 + } while (read_seqcount_retry(&engine->stats.lock, seq)); 1779 1779 1780 1780 return total; 1781 1781 } ··· 1811 1811 struct intel_timeline *tl = request->context->timeline; 1812 1812 1813 1813 list_for_each_entry_from_reverse(request, &tl->requests, link) { 1814 - if (i915_request_completed(request)) 1814 + if (__i915_request_is_complete(request)) 1815 1815 break; 1816 1816 1817 1817 active = request; ··· 1822 1822 return active; 1823 1823 1824 1824 list_for_each_entry(request, &engine->active.requests, sched.link) { 1825 - if (i915_request_completed(request)) 1825 + if (__i915_request_is_complete(request)) 1826 1826 continue; 1827 1827 1828 - if (!i915_request_started(request)) 1828 + if (!__i915_request_has_started(request)) 1829 1829 continue; 1830 1830 1831 1831 /* More than one preemptible request may match! */
+6 -6
drivers/gpu/drm/i915/gt/intel_engine_pm.c
··· 79 79 80 80 #if IS_ENABLED(CONFIG_LOCKDEP) 81 81 82 - static inline unsigned long __timeline_mark_lock(struct intel_context *ce) 82 + static unsigned long __timeline_mark_lock(struct intel_context *ce) 83 83 { 84 84 unsigned long flags; 85 85 ··· 89 89 return flags; 90 90 } 91 91 92 - static inline void __timeline_mark_unlock(struct intel_context *ce, 93 - unsigned long flags) 92 + static void __timeline_mark_unlock(struct intel_context *ce, 93 + unsigned long flags) 94 94 { 95 95 mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_); 96 96 local_irq_restore(flags); ··· 98 98 99 99 #else 100 100 101 - static inline unsigned long __timeline_mark_lock(struct intel_context *ce) 101 + static unsigned long __timeline_mark_lock(struct intel_context *ce) 102 102 { 103 103 return 0; 104 104 } 105 105 106 - static inline void __timeline_mark_unlock(struct intel_context *ce, 107 - unsigned long flags) 106 + static void __timeline_mark_unlock(struct intel_context *ce, 107 + unsigned long flags) 108 108 { 109 109 } 110 110
+60
drivers/gpu/drm/i915/gt/intel_engine_stats.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2020 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_ENGINE_STATS_H__ 7 + #define __INTEL_ENGINE_STATS_H__ 8 + 9 + #include <linux/atomic.h> 10 + #include <linux/ktime.h> 11 + #include <linux/seqlock.h> 12 + 13 + #include "i915_gem.h" /* GEM_BUG_ON */ 14 + #include "intel_engine.h" 15 + 16 + static inline void intel_engine_context_in(struct intel_engine_cs *engine) 17 + { 18 + unsigned long flags; 19 + 20 + if (engine->stats.active) { 21 + engine->stats.active++; 22 + return; 23 + } 24 + 25 + /* The writer is serialised; but the pmu reader may be from hardirq */ 26 + local_irq_save(flags); 27 + write_seqcount_begin(&engine->stats.lock); 28 + 29 + engine->stats.start = ktime_get(); 30 + engine->stats.active++; 31 + 32 + write_seqcount_end(&engine->stats.lock); 33 + local_irq_restore(flags); 34 + 35 + GEM_BUG_ON(!engine->stats.active); 36 + } 37 + 38 + static inline void intel_engine_context_out(struct intel_engine_cs *engine) 39 + { 40 + unsigned long flags; 41 + 42 + GEM_BUG_ON(!engine->stats.active); 43 + if (engine->stats.active > 1) { 44 + engine->stats.active--; 45 + return; 46 + } 47 + 48 + local_irq_save(flags); 49 + write_seqcount_begin(&engine->stats.lock); 50 + 51 + engine->stats.active--; 52 + engine->stats.total = 53 + ktime_add(engine->stats.total, 54 + ktime_sub(ktime_get(), engine->stats.start)); 55 + 56 + write_seqcount_end(&engine->stats.lock); 57 + local_irq_restore(flags); 58 + } 59 + 60 + #endif /* __INTEL_ENGINE_STATS_H__ */
+3 -3
drivers/gpu/drm/i915/gt/intel_engine_types.h
··· 319 319 * as possible. 320 320 */ 321 321 enum forcewake_domains fw_domain; 322 - atomic_t fw_active; 322 + unsigned int fw_active; 323 323 324 324 unsigned long context_tag; 325 325 ··· 516 516 /** 517 517 * @active: Number of contexts currently scheduled in. 518 518 */ 519 - atomic_t active; 519 + unsigned int active; 520 520 521 521 /** 522 522 * @lock: Lock protecting the below fields. 523 523 */ 524 - seqlock_t lock; 524 + seqcount_t lock; 525 525 526 526 /** 527 527 * @total: Total time this engine was busy.
+56 -89
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
··· 115 115 #include "intel_breadcrumbs.h" 116 116 #include "intel_context.h" 117 117 #include "intel_engine_pm.h" 118 + #include "intel_engine_stats.h" 118 119 #include "intel_execlists_submission.h" 119 120 #include "intel_gt.h" 120 121 #include "intel_gt_pm.h" ··· 231 230 return __active_request(tl, rq, 0); 232 231 } 233 232 234 - static inline void 235 - ring_set_paused(const struct intel_engine_cs *engine, int state) 233 + static void ring_set_paused(const struct intel_engine_cs *engine, int state) 236 234 { 237 235 /* 238 236 * We inspect HWS_PREEMPT with a semaphore inside ··· 244 244 wmb(); 245 245 } 246 246 247 - static inline struct i915_priolist *to_priolist(struct rb_node *rb) 247 + static struct i915_priolist *to_priolist(struct rb_node *rb) 248 248 { 249 249 return rb_entry(rb, struct i915_priolist, node); 250 250 } 251 251 252 - static inline int rq_prio(const struct i915_request *rq) 252 + static int rq_prio(const struct i915_request *rq) 253 253 { 254 254 return READ_ONCE(rq->sched.attr.priority); 255 255 } ··· 299 299 return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN; 300 300 } 301 301 302 - static inline bool need_preempt(const struct intel_engine_cs *engine, 303 - const struct i915_request *rq) 302 + static bool need_preempt(const struct intel_engine_cs *engine, 303 + const struct i915_request *rq) 304 304 { 305 305 int last_prio; 306 306 ··· 351 351 queue_prio(&engine->execlists)) > last_prio; 352 352 } 353 353 354 - __maybe_unused static inline bool 354 + __maybe_unused static bool 355 355 assert_priority_queue(const struct i915_request *prev, 356 356 const struct i915_request *next) 357 357 { ··· 418 418 return __unwind_incomplete_requests(engine); 419 419 } 420 420 421 - static inline void 421 + static void 422 422 execlists_context_status_change(struct i915_request *rq, unsigned long status) 423 423 { 424 424 /* ··· 430 430 431 431 atomic_notifier_call_chain(&rq->engine->context_status_notifier, 432 432 status, rq); 433 - } 434 - 435 - static void intel_engine_context_in(struct intel_engine_cs *engine) 436 - { 437 - unsigned long flags; 438 - 439 - if (atomic_add_unless(&engine->stats.active, 1, 0)) 440 - return; 441 - 442 - write_seqlock_irqsave(&engine->stats.lock, flags); 443 - if (!atomic_add_unless(&engine->stats.active, 1, 0)) { 444 - engine->stats.start = ktime_get(); 445 - atomic_inc(&engine->stats.active); 446 - } 447 - write_sequnlock_irqrestore(&engine->stats.lock, flags); 448 - } 449 - 450 - static void intel_engine_context_out(struct intel_engine_cs *engine) 451 - { 452 - unsigned long flags; 453 - 454 - GEM_BUG_ON(!atomic_read(&engine->stats.active)); 455 - 456 - if (atomic_add_unless(&engine->stats.active, -1, 1)) 457 - return; 458 - 459 - write_seqlock_irqsave(&engine->stats.lock, flags); 460 - if (atomic_dec_and_test(&engine->stats.active)) { 461 - engine->stats.total = 462 - ktime_add(engine->stats.total, 463 - ktime_sub(ktime_get(), engine->stats.start)); 464 - } 465 - write_sequnlock_irqrestore(&engine->stats.lock, flags); 466 433 } 467 434 468 435 static void reset_active(struct i915_request *rq, ··· 470 503 ce->lrc.lrca = lrc_update_regs(ce, engine, head); 471 504 } 472 505 473 - static inline struct intel_engine_cs * 506 + static struct intel_engine_cs * 474 507 __execlists_schedule_in(struct i915_request *rq) 475 508 { 476 509 struct intel_engine_cs * const engine = rq->engine; ··· 506 539 ce->lrc.ccid |= engine->execlists.ccid; 507 540 508 541 __intel_gt_pm_get(engine->gt); 509 - if (engine->fw_domain && !atomic_fetch_inc(&engine->fw_active)) 542 + if (engine->fw_domain && !engine->fw_active++) 510 543 intel_uncore_forcewake_get(engine->uncore, engine->fw_domain); 511 544 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); 512 545 intel_engine_context_in(engine); ··· 516 549 return engine; 517 550 } 518 551 519 - static inline void execlists_schedule_in(struct i915_request *rq, int idx) 552 + static void execlists_schedule_in(struct i915_request *rq, int idx) 520 553 { 521 554 struct intel_context * const ce = rq->context; 522 555 struct intel_engine_cs *old; ··· 575 608 tasklet_hi_schedule(&ve->base.execlists.tasklet); 576 609 } 577 610 578 - static inline void __execlists_schedule_out(struct i915_request *rq) 611 + static void __execlists_schedule_out(struct i915_request * const rq, 612 + struct intel_context * const ce) 579 613 { 580 - struct intel_context * const ce = rq->context; 581 614 struct intel_engine_cs * const engine = rq->engine; 582 615 unsigned int ccid; 583 616 ··· 588 621 */ 589 622 590 623 CE_TRACE(ce, "schedule-out, ccid:%x\n", ce->lrc.ccid); 624 + GEM_BUG_ON(ce->inflight != engine); 591 625 592 626 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 593 627 lrc_check_regs(ce, engine, "after"); ··· 613 645 lrc_update_runtime(ce); 614 646 intel_engine_context_out(engine); 615 647 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); 616 - if (engine->fw_domain && !atomic_dec_return(&engine->fw_active)) 648 + if (engine->fw_domain && !--engine->fw_active) 617 649 intel_uncore_forcewake_put(engine->uncore, engine->fw_domain); 618 650 intel_gt_pm_put_async(engine->gt); 619 651 ··· 628 660 */ 629 661 if (ce->engine != engine) 630 662 kick_siblings(rq, ce); 663 + 664 + WRITE_ONCE(ce->inflight, NULL); 665 + intel_context_put(ce); 631 666 } 632 667 633 - static inline void 634 - execlists_schedule_out(struct i915_request *rq) 668 + static inline void execlists_schedule_out(struct i915_request *rq) 635 669 { 636 670 struct intel_context * const ce = rq->context; 637 671 ··· 641 671 642 672 GEM_BUG_ON(!ce->inflight); 643 673 ce->inflight = ptr_dec(ce->inflight); 644 - if (!__intel_context_inflight_count(ce->inflight)) { 645 - GEM_BUG_ON(ce->inflight != rq->engine); 646 - __execlists_schedule_out(rq); 647 - WRITE_ONCE(ce->inflight, NULL); 648 - intel_context_put(ce); 649 - } 674 + if (!__intel_context_inflight_count(ce->inflight)) 675 + __execlists_schedule_out(rq, ce); 650 676 651 677 i915_request_put(rq); 652 678 } ··· 694 728 return desc; 695 729 } 696 730 697 - static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) 731 + static void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) 698 732 { 699 733 if (execlists->ctrl_reg) { 700 734 writel(lower_32_bits(desc), execlists->submit_reg + port * 2); ··· 723 757 return buf; 724 758 } 725 759 726 - static __maybe_unused void 760 + static __maybe_unused noinline void 727 761 trace_ports(const struct intel_engine_execlists *execlists, 728 762 const char *msg, 729 763 struct i915_request * const *ports) ··· 740 774 dump_port(p1, sizeof(p1), ", ", ports[1])); 741 775 } 742 776 743 - static inline bool 777 + static bool 744 778 reset_in_progress(const struct intel_engine_execlists *execlists) 745 779 { 746 780 return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); 747 781 } 748 782 749 - static __maybe_unused bool 783 + static __maybe_unused noinline bool 750 784 assert_pending_valid(const struct intel_engine_execlists *execlists, 751 785 const char *msg) 752 786 { ··· 1224 1258 active_preempt_timeout(engine, rq)); 1225 1259 } 1226 1260 1261 + static bool completed(const struct i915_request *rq) 1262 + { 1263 + if (i915_request_has_sentinel(rq)) 1264 + return false; 1265 + 1266 + return __i915_request_is_complete(rq); 1267 + } 1268 + 1227 1269 static void execlists_dequeue(struct intel_engine_cs *engine) 1228 1270 { 1229 1271 struct intel_engine_execlists * const execlists = &engine->execlists; 1230 1272 struct i915_request **port = execlists->pending; 1231 1273 struct i915_request ** const last_port = port + execlists->port_mask; 1232 - struct i915_request *last = *execlists->active; 1274 + struct i915_request *last, * const *active; 1233 1275 struct virtual_engine *ve; 1234 1276 struct rb_node *rb; 1235 1277 bool submit = false; ··· 1274 1300 * i.e. we will retrigger preemption following the ack in case 1275 1301 * of trouble. 1276 1302 * 1277 - * In theory we can skip over completed contexts that have not 1278 - * yet been processed by events (as those events are in flight): 1279 - * 1280 - * while ((last = *active) && i915_request_completed(last)) 1281 - * active++; 1282 - * 1283 - * However, the GPU cannot handle this as it will ultimately 1284 - * find itself trying to jump back into a context it has just 1285 - * completed and barf. 1286 1303 */ 1304 + active = execlists->active; 1305 + while ((last = *active) && completed(last)) 1306 + active++; 1287 1307 1288 1308 if (last) { 1289 - if (__i915_request_is_complete(last)) { 1290 - goto check_secondary; 1291 - } else if (need_preempt(engine, last)) { 1309 + if (need_preempt(engine, last)) { 1292 1310 ENGINE_TRACE(engine, 1293 1311 "preempting last=%llx:%lld, prio=%d, hint=%d\n", 1294 1312 last->fence.context, ··· 1359 1393 * we hopefully coalesce several updates into a single 1360 1394 * submission. 1361 1395 */ 1362 - check_secondary: 1363 - if (!list_is_last(&last->sched.link, 1364 - &engine->active.requests)) { 1396 + if (active[1]) { 1365 1397 /* 1366 1398 * Even if ELSP[1] is occupied and not worthy 1367 1399 * of timeslices, our queue might be. ··· 1560 1596 * of ordered contexts. 1561 1597 */ 1562 1598 if (submit && 1563 - memcmp(execlists->active, 1599 + memcmp(active, 1564 1600 execlists->pending, 1565 1601 (port - execlists->pending) * sizeof(*port))) { 1566 1602 *port = NULL; ··· 1568 1604 execlists_schedule_in(*port, port - execlists->pending); 1569 1605 1570 1606 WRITE_ONCE(execlists->yield, -1); 1571 - set_preempt_timeout(engine, *execlists->active); 1607 + set_preempt_timeout(engine, *active); 1572 1608 execlists_submit_ports(engine); 1573 1609 } else { 1574 1610 ring_set_paused(engine, 0); ··· 1585 1621 local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */ 1586 1622 } 1587 1623 1588 - static inline void clear_ports(struct i915_request **ports, int count) 1624 + static void clear_ports(struct i915_request **ports, int count) 1589 1625 { 1590 1626 memset_p((void **)ports, NULL, count); 1591 1627 } 1592 1628 1593 - static inline void 1629 + static void 1594 1630 copy_ports(struct i915_request **dst, struct i915_request **src, int count) 1595 1631 { 1596 1632 /* A memcpy_p() would be very useful here! */ ··· 1624 1660 return inactive; 1625 1661 } 1626 1662 1627 - static inline void 1628 - invalidate_csb_entries(const u64 *first, const u64 *last) 1663 + static void invalidate_csb_entries(const u64 *first, const u64 *last) 1629 1664 { 1630 1665 clflush((void *)first); 1631 1666 clflush((void *)last); ··· 1656 1693 * bits 47-57: sw context id of the lrc the GT switched away from 1657 1694 * bits 58-63: sw counter of the lrc the GT switched away from 1658 1695 */ 1659 - static inline bool gen12_csb_parse(const u64 csb) 1696 + static bool gen12_csb_parse(const u64 csb) 1660 1697 { 1661 1698 bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb)); 1662 1699 bool new_queue = ··· 1683 1720 return false; 1684 1721 } 1685 1722 1686 - static inline bool gen8_csb_parse(const u64 csb) 1723 + static bool gen8_csb_parse(const u64 csb) 1687 1724 { 1688 1725 return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); 1689 1726 } ··· 1722 1759 return entry; 1723 1760 } 1724 1761 1725 - static inline u64 1726 - csb_read(const struct intel_engine_cs *engine, u64 * const csb) 1762 + static u64 csb_read(const struct intel_engine_cs *engine, u64 * const csb) 1727 1763 { 1728 1764 u64 entry = READ_ONCE(*csb); 1729 1765 ··· 1988 2026 struct i915_request *w = 1989 2027 container_of(p->waiter, typeof(*w), sched); 1990 2028 2029 + if (p->flags & I915_DEPENDENCY_WEAK) 2030 + continue; 2031 + 1991 2032 /* Leave semaphores spinning on the other engines */ 1992 2033 if (w->engine != rq->engine) 1993 2034 continue; ··· 2088 2123 for_each_waiter(p, rq) { 2089 2124 struct i915_request *w = 2090 2125 container_of(p->waiter, typeof(*w), sched); 2126 + 2127 + if (p->flags & I915_DEPENDENCY_WEAK) 2128 + continue; 2091 2129 2092 2130 /* Propagate any change in error status */ 2093 2131 if (rq->fence.error) ··· 3148 3180 } 3149 3181 } 3150 3182 3151 - static inline void 3152 - logical_ring_default_irqs(struct intel_engine_cs *engine) 3183 + static void logical_ring_default_irqs(struct intel_engine_cs *engine) 3153 3184 { 3154 3185 unsigned int shift = 0; 3155 3186 ··· 3263 3296 3264 3297 old = fetch_and_zero(&ve->request); 3265 3298 if (old) { 3266 - GEM_BUG_ON(!i915_request_completed(old)); 3299 + GEM_BUG_ON(!__i915_request_is_complete(old)); 3267 3300 __i915_request_submit(old); 3268 3301 i915_request_put(old); 3269 3302 } ··· 3540 3573 } 3541 3574 3542 3575 if (ve->request) { /* background completion from preempt-to-busy */ 3543 - GEM_BUG_ON(!i915_request_completed(ve->request)); 3576 + GEM_BUG_ON(!__i915_request_is_complete(ve->request)); 3544 3577 __i915_request_submit(ve->request); 3545 3578 i915_request_put(ve->request); 3546 3579 }
+9 -3
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
··· 145 145 } 146 146 147 147 static struct intel_gt_buffer_pool_node * 148 - node_create(struct intel_gt_buffer_pool *pool, size_t sz) 148 + node_create(struct intel_gt_buffer_pool *pool, size_t sz, 149 + enum i915_map_type type) 149 150 { 150 151 struct intel_gt *gt = to_gt(pool); 151 152 struct intel_gt_buffer_pool_node *node; ··· 170 169 171 170 i915_gem_object_set_readonly(obj); 172 171 172 + node->type = type; 173 173 node->obj = obj; 174 174 return node; 175 175 } 176 176 177 177 struct intel_gt_buffer_pool_node * 178 - intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size) 178 + intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size, 179 + enum i915_map_type type) 179 180 { 180 181 struct intel_gt_buffer_pool *pool = &gt->buffer_pool; 181 182 struct intel_gt_buffer_pool_node *node; ··· 194 191 if (node->obj->base.size < size) 195 192 continue; 196 193 194 + if (node->type != type) 195 + continue; 196 + 197 197 age = READ_ONCE(node->age); 198 198 if (!age) 199 199 continue; ··· 211 205 rcu_read_unlock(); 212 206 213 207 if (&node->link == list) { 214 - node = node_create(pool, size); 208 + node = node_create(pool, size, type); 215 209 if (IS_ERR(node)) 216 210 return node; 217 211 }
+2 -1
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.h
··· 15 15 struct i915_request; 16 16 17 17 struct intel_gt_buffer_pool_node * 18 - intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size); 18 + intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size, 19 + enum i915_map_type type); 19 20 20 21 static inline int 21 22 intel_gt_buffer_pool_mark_active(struct intel_gt_buffer_pool_node *node,
+2 -2
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool_types.h
··· 11 11 #include <linux/spinlock.h> 12 12 #include <linux/workqueue.h> 13 13 14 + #include "gem/i915_gem_object_types.h" 14 15 #include "i915_active_types.h" 15 - 16 - struct drm_i915_gem_object; 17 16 18 17 struct intel_gt_buffer_pool { 19 18 spinlock_t lock; ··· 30 31 struct rcu_head rcu; 31 32 }; 32 33 unsigned long age; 34 + enum i915_map_type type; 33 35 }; 34 36 35 37 #endif /* INTEL_GT_BUFFER_POOL_TYPES_H */
+2 -2
drivers/gpu/drm/i915/gt/intel_lrc.c
··· 1035 1035 return cs; 1036 1036 } 1037 1037 1038 - static inline u32 context_wa_bb_offset(const struct intel_context *ce) 1038 + static u32 context_wa_bb_offset(const struct intel_context *ce) 1039 1039 { 1040 1040 return PAGE_SIZE * ce->wa_bb_page; 1041 1041 } ··· 1098 1098 * engine info, SW context ID and SW counter need to form a unique number 1099 1099 * (Context ID) per lrc. 1100 1100 */ 1101 - static inline u32 lrc_descriptor(const struct intel_context *ce) 1101 + static u32 lrc_descriptor(const struct intel_context *ce) 1102 1102 { 1103 1103 u32 desc; 1104 1104
+1 -1
drivers/gpu/drm/i915/gt/intel_mocs.c
··· 472 472 return table->table[I915_MOCS_PTE].l3cc_value; 473 473 } 474 474 475 - static inline u32 l3cc_combine(u16 low, u16 high) 475 + static u32 l3cc_combine(u16 low, u16 high) 476 476 { 477 477 return low | (u32)high << 16; 478 478 }
+1 -1
drivers/gpu/drm/i915/gt/intel_ppgtt.c
··· 80 80 kfree(pt); 81 81 } 82 82 83 - static inline void 83 + static void 84 84 write_dma_entry(struct drm_i915_gem_object * const pdma, 85 85 const unsigned short idx, 86 86 const u64 encoded_entry)
+1 -1
drivers/gpu/drm/i915/gt/intel_rc6.c
··· 49 49 return rc6_to_gt(rc)->i915; 50 50 } 51 51 52 - static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) 52 + static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) 53 53 { 54 54 intel_uncore_write_fw(uncore, reg, val); 55 55 }
+1 -1
drivers/gpu/drm/i915/gt/intel_region_lmem.c
··· 98 98 static const struct intel_memory_region_ops intel_region_lmem_ops = { 99 99 .init = region_lmem_init, 100 100 .release = region_lmem_release, 101 - .create_object = __i915_gem_lmem_object_create, 101 + .init_object = __i915_gem_lmem_object_init, 102 102 }; 103 103 104 104 struct intel_memory_region *
+2 -3
drivers/gpu/drm/i915/gt/intel_reset.c
··· 151 151 void __i915_request_reset(struct i915_request *rq, bool guilty) 152 152 { 153 153 RQ_TRACE(rq, "guilty? %s\n", yesno(guilty)); 154 - 155 - GEM_BUG_ON(i915_request_completed(rq)); 154 + GEM_BUG_ON(__i915_request_is_complete(rq)); 156 155 157 156 rcu_read_lock(); /* protect the GEM context */ 158 157 if (guilty) { ··· 1109 1110 goto finish; 1110 1111 } 1111 1112 1112 - static inline int intel_gt_reset_engine(struct intel_engine_cs *engine) 1113 + static int intel_gt_reset_engine(struct intel_engine_cs *engine) 1113 1114 { 1114 1115 return __intel_gt_reset(engine->gt, engine->mask); 1115 1116 }
+1 -1
drivers/gpu/drm/i915/gt/intel_ring.c
··· 42 42 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 43 43 flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); 44 44 45 - if (vma->obj->stolen) 45 + if (i915_gem_object_is_stolen(vma->obj)) 46 46 flags |= PIN_MAPPABLE; 47 47 else 48 48 flags |= PIN_HIGH;
+93 -114
drivers/gpu/drm/i915/gt/intel_ring_submission.c
··· 122 122 hwsp = RING_HWS_PGA(engine->mmio_base); 123 123 } 124 124 125 - intel_uncore_write(engine->uncore, hwsp, offset); 126 - intel_uncore_posting_read(engine->uncore, hwsp); 125 + intel_uncore_write_fw(engine->uncore, hwsp, offset); 126 + intel_uncore_posting_read_fw(engine->uncore, hwsp); 127 127 } 128 128 129 129 static void flush_cs_tlb(struct intel_engine_cs *engine) 130 130 { 131 - struct drm_i915_private *dev_priv = engine->i915; 132 - 133 - if (!IS_GEN_RANGE(dev_priv, 6, 7)) 131 + if (!IS_GEN_RANGE(engine->i915, 6, 7)) 134 132 return; 135 133 136 134 /* ring should be idle before issuing a sync flush*/ 137 - drm_WARN_ON(&dev_priv->drm, 138 - (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 135 + GEM_DEBUG_WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 139 136 140 - ENGINE_WRITE(engine, RING_INSTPM, 141 - _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 142 - INSTPM_SYNC_FLUSH)); 143 - if (intel_wait_for_register(engine->uncore, 144 - RING_INSTPM(engine->mmio_base), 145 - INSTPM_SYNC_FLUSH, 0, 146 - 1000)) 147 - drm_err(&dev_priv->drm, 148 - "%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 149 - engine->name); 137 + ENGINE_WRITE_FW(engine, RING_INSTPM, 138 + _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 139 + INSTPM_SYNC_FLUSH)); 140 + if (__intel_wait_for_register_fw(engine->uncore, 141 + RING_INSTPM(engine->mmio_base), 142 + INSTPM_SYNC_FLUSH, 0, 143 + 2000, 0, NULL)) 144 + ENGINE_TRACE(engine, 145 + "wait for SyncFlush to complete for TLB invalidation timed out\n"); 150 146 } 151 147 152 148 static void ring_setup_status_page(struct intel_engine_cs *engine) ··· 151 155 set_hwstam(engine, ~0u); 152 156 153 157 flush_cs_tlb(engine); 154 - } 155 - 156 - static bool stop_ring(struct intel_engine_cs *engine) 157 - { 158 - intel_engine_stop_cs(engine); 159 - 160 - ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL)); 161 - 162 - ENGINE_WRITE(engine, RING_HEAD, 0); 163 - ENGINE_WRITE(engine, RING_TAIL, 0); 164 - 165 - /* The ring must be empty before it is disabled */ 166 - ENGINE_WRITE(engine, RING_CTL, 0); 167 - 168 - return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0; 169 158 } 170 159 171 160 static struct i915_address_space *vm_alias(struct i915_address_space *vm) ··· 170 189 { 171 190 struct i915_address_space *vm = vm_alias(engine->gt->vm); 172 191 173 - if (vm) { 174 - ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); 175 - ENGINE_WRITE(engine, RING_PP_DIR_BASE, pp_dir(vm)); 192 + if (!vm) 193 + return; 194 + 195 + ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); 196 + ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm)); 197 + 198 + if (INTEL_GEN(engine->i915) >= 7) { 199 + ENGINE_WRITE_FW(engine, 200 + RING_MODE_GEN7, 201 + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 176 202 } 177 203 } 178 204 ··· 187 199 { 188 200 struct drm_i915_private *dev_priv = engine->i915; 189 201 struct intel_ring *ring = engine->legacy.ring; 190 - int ret = 0; 191 202 192 203 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", 193 204 ring->head, ring->tail); 194 - 195 - intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); 196 - 197 - /* WaClearRingBufHeadRegAtInit:ctg,elk */ 198 - if (!stop_ring(engine)) { 199 - /* G45 ring initialization often fails to reset head to zero */ 200 - drm_dbg(&dev_priv->drm, "%s head not reset to zero " 201 - "ctl %08x head %08x tail %08x start %08x\n", 202 - engine->name, 203 - ENGINE_READ(engine, RING_CTL), 204 - ENGINE_READ(engine, RING_HEAD), 205 - ENGINE_READ(engine, RING_TAIL), 206 - ENGINE_READ(engine, RING_START)); 207 - 208 - if (!stop_ring(engine)) { 209 - drm_err(&dev_priv->drm, 210 - "failed to set %s head to zero " 211 - "ctl %08x head %08x tail %08x start %08x\n", 212 - engine->name, 213 - ENGINE_READ(engine, RING_CTL), 214 - ENGINE_READ(engine, RING_HEAD), 215 - ENGINE_READ(engine, RING_TAIL), 216 - ENGINE_READ(engine, RING_START)); 217 - ret = -EIO; 218 - goto out; 219 - } 220 - } 221 205 222 206 if (HWS_NEEDS_PHYSICAL(dev_priv)) 223 207 ring_setup_phys_status_page(engine); ··· 207 247 * also enforces ordering), otherwise the hw might lose the new ring 208 248 * register values. 209 249 */ 210 - ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma)); 250 + ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma)); 211 251 212 252 /* Check that the ring offsets point within the ring! */ 213 253 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); ··· 217 257 set_pp_dir(engine); 218 258 219 259 /* First wake the ring up to an empty/idle ring */ 220 - ENGINE_WRITE(engine, RING_HEAD, ring->head); 221 - ENGINE_WRITE(engine, RING_TAIL, ring->head); 260 + ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); 261 + ENGINE_WRITE_FW(engine, RING_TAIL, ring->head); 222 262 ENGINE_POSTING_READ(engine, RING_TAIL); 223 263 224 - ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID); 264 + ENGINE_WRITE_FW(engine, RING_CTL, 265 + RING_CTL_SIZE(ring->size) | RING_VALID); 225 266 226 267 /* If the head is still not zero, the ring is dead */ 227 - if (intel_wait_for_register(engine->uncore, 228 - RING_CTL(engine->mmio_base), 229 - RING_VALID, RING_VALID, 230 - 50)) { 231 - drm_err(&dev_priv->drm, "%s initialization failed " 232 - "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 233 - engine->name, 234 - ENGINE_READ(engine, RING_CTL), 235 - ENGINE_READ(engine, RING_CTL) & RING_VALID, 236 - ENGINE_READ(engine, RING_HEAD), ring->head, 237 - ENGINE_READ(engine, RING_TAIL), ring->tail, 238 - ENGINE_READ(engine, RING_START), 239 - i915_ggtt_offset(ring->vma)); 240 - ret = -EIO; 241 - goto out; 268 + if (__intel_wait_for_register_fw(engine->uncore, 269 + RING_CTL(engine->mmio_base), 270 + RING_VALID, RING_VALID, 271 + 5000, 0, NULL)) { 272 + drm_err(&dev_priv->drm, 273 + "%s initialization failed; " 274 + "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 275 + engine->name, 276 + ENGINE_READ(engine, RING_CTL), 277 + ENGINE_READ(engine, RING_CTL) & RING_VALID, 278 + ENGINE_READ(engine, RING_HEAD), ring->head, 279 + ENGINE_READ(engine, RING_TAIL), ring->tail, 280 + ENGINE_READ(engine, RING_START), 281 + i915_ggtt_offset(ring->vma)); 282 + return -EIO; 242 283 } 243 284 244 285 if (INTEL_GEN(dev_priv) > 2) 245 - ENGINE_WRITE(engine, 246 - RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 286 + ENGINE_WRITE_FW(engine, 287 + RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 247 288 248 289 /* Now awake, let it get started */ 249 290 if (ring->tail != ring->head) { 250 - ENGINE_WRITE(engine, RING_TAIL, ring->tail); 291 + ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail); 251 292 ENGINE_POSTING_READ(engine, RING_TAIL); 252 293 } 253 294 254 295 /* Papering over lost _interrupts_ immediately following the restart */ 255 296 intel_engine_signal_breadcrumbs(engine); 256 - out: 257 - intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); 258 - 259 - return ret; 297 + return 0; 260 298 } 261 299 262 300 static void sanitize_hwsp(struct intel_engine_cs *engine) ··· 290 332 clflush_cache_range(engine->status_page.addr, PAGE_SIZE); 291 333 } 292 334 335 + static bool stop_ring(struct intel_engine_cs *engine) 336 + { 337 + /* Empty the ring by skipping to the end */ 338 + ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL)); 339 + ENGINE_POSTING_READ(engine, RING_HEAD); 340 + 341 + /* The ring must be empty before it is disabled */ 342 + ENGINE_WRITE_FW(engine, RING_CTL, 0); 343 + ENGINE_POSTING_READ(engine, RING_CTL); 344 + 345 + /* Then reset the disabled ring */ 346 + ENGINE_WRITE_FW(engine, RING_HEAD, 0); 347 + ENGINE_WRITE_FW(engine, RING_TAIL, 0); 348 + 349 + return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0; 350 + } 351 + 293 352 static void reset_prepare(struct intel_engine_cs *engine) 294 353 { 295 - struct intel_uncore *uncore = engine->uncore; 296 - const u32 base = engine->mmio_base; 297 - 298 354 /* 299 355 * We stop engines, otherwise we might get failed reset and a 300 356 * dead gpu (on elk). Also as modern gpu as kbl can suffer ··· 320 348 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 321 349 * 322 350 * WaMediaResetMainRingCleanup:ctg,elk (presumably) 351 + * WaClearRingBufHeadRegAtInit:ctg,elk 323 352 * 324 353 * FIXME: Wa for more modern gens needs to be validated 325 354 */ 326 355 ENGINE_TRACE(engine, "\n"); 356 + intel_engine_stop_cs(engine); 327 357 328 - if (intel_engine_stop_cs(engine)) 329 - ENGINE_TRACE(engine, "timed out on STOP_RING\n"); 358 + if (!stop_ring(engine)) { 359 + /* G45 ring initialization often fails to reset head to zero */ 360 + drm_dbg(&engine->i915->drm, 361 + "%s head not reset to zero " 362 + "ctl %08x head %08x tail %08x start %08x\n", 363 + engine->name, 364 + ENGINE_READ_FW(engine, RING_CTL), 365 + ENGINE_READ_FW(engine, RING_HEAD), 366 + ENGINE_READ_FW(engine, RING_TAIL), 367 + ENGINE_READ_FW(engine, RING_START)); 368 + } 330 369 331 - intel_uncore_write_fw(uncore, 332 - RING_HEAD(base), 333 - intel_uncore_read_fw(uncore, RING_TAIL(base))); 334 - intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */ 335 - 336 - intel_uncore_write_fw(uncore, RING_HEAD(base), 0); 337 - intel_uncore_write_fw(uncore, RING_TAIL(base), 0); 338 - intel_uncore_posting_read_fw(uncore, RING_TAIL(base)); 339 - 340 - /* The ring must be empty before it is disabled */ 341 - intel_uncore_write_fw(uncore, RING_CTL(base), 0); 342 - 343 - /* Check acts as a post */ 344 - if (intel_uncore_read_fw(uncore, RING_HEAD(base))) 345 - ENGINE_TRACE(engine, "ring head [%x] not parked\n", 346 - intel_uncore_read_fw(uncore, RING_HEAD(base))); 370 + if (!stop_ring(engine)) { 371 + drm_err(&engine->i915->drm, 372 + "failed to set %s head to zero " 373 + "ctl %08x head %08x tail %08x start %08x\n", 374 + engine->name, 375 + ENGINE_READ_FW(engine, RING_CTL), 376 + ENGINE_READ_FW(engine, RING_HEAD), 377 + ENGINE_READ_FW(engine, RING_TAIL), 378 + ENGINE_READ_FW(engine, RING_START)); 379 + } 347 380 } 348 381 349 382 static void reset_rewind(struct intel_engine_cs *engine, bool stalled) ··· 359 382 360 383 rq = NULL; 361 384 spin_lock_irqsave(&engine->active.lock, flags); 385 + rcu_read_lock(); 362 386 list_for_each_entry(pos, &engine->active.requests, sched.link) { 363 - if (!i915_request_completed(pos)) { 387 + if (!__i915_request_is_complete(pos)) { 364 388 rq = pos; 365 389 break; 366 390 } 367 391 } 392 + rcu_read_unlock(); 368 393 369 394 /* 370 395 * The guilty request will get skipped on a hung engine. ··· 642 663 return rq->engine->emit_flush(rq, EMIT_FLUSH); 643 664 } 644 665 645 - static inline int mi_set_context(struct i915_request *rq, 646 - struct intel_context *ce, 647 - u32 flags) 666 + static int mi_set_context(struct i915_request *rq, 667 + struct intel_context *ce, 668 + u32 flags) 648 669 { 649 670 struct intel_engine_cs *engine = rq->engine; 650 671 struct drm_i915_private *i915 = engine->i915;
+1 -1
drivers/gpu/drm/i915/gt/intel_rps.c
··· 43 43 return mask & ~rps->pm_intrmsk_mbz; 44 44 } 45 45 46 - static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) 46 + static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) 47 47 { 48 48 intel_uncore_write_fw(uncore, reg, val); 49 49 }
+2 -2
drivers/gpu/drm/i915/gt/intel_timeline.c
··· 582 582 583 583 rcu_read_lock(); 584 584 cl = rcu_dereference(from->hwsp_cacheline); 585 - if (i915_request_completed(from)) /* confirm cacheline is valid */ 585 + if (i915_request_signaled(from)) /* confirm cacheline is valid */ 586 586 goto unlock; 587 587 if (unlikely(!i915_active_acquire_if_busy(&cl->active))) 588 588 goto unlock; /* seqno wrapped and completed! */ 589 - if (unlikely(i915_request_completed(from))) 589 + if (unlikely(__i915_request_is_complete(from))) 590 590 goto release; 591 591 rcu_read_unlock(); 592 592
+1 -1
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 1304 1304 } 1305 1305 1306 1306 __maybe_unused 1307 - static inline bool is_nonpriv_flags_valid(u32 flags) 1307 + static bool is_nonpriv_flags_valid(u32 flags) 1308 1308 { 1309 1309 /* Check only valid flag bits are set */ 1310 1310 if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
+14 -4
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
··· 704 704 705 705 for_each_engine(engine, gt, id) { 706 706 unsigned int reset_count, reset_engine_count; 707 + unsigned long count; 707 708 IGT_TIMEOUT(end_time); 708 709 709 710 if (active && !intel_engine_can_store_dword(engine)) ··· 722 721 723 722 st_engine_heartbeat_disable(engine); 724 723 set_bit(I915_RESET_ENGINE + id, &gt->reset.flags); 724 + count = 0; 725 725 do { 726 726 if (active) { 727 727 struct i915_request *rq; ··· 772 770 err = -EINVAL; 773 771 break; 774 772 } 773 + 774 + count++; 775 775 } while (time_before(jiffies, end_time)); 776 776 clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags); 777 777 st_engine_heartbeat_enable(engine); 778 + pr_info("%s: Completed %lu %s resets\n", 779 + engine->name, count, active ? "active" : "idle"); 778 780 779 781 if (err) 780 782 break; ··· 1629 1623 prev = rq; 1630 1624 count++; 1631 1625 } while (time_before(jiffies, end_time)); 1632 - pr_info("%s: Completed %d resets\n", engine->name, count); 1626 + pr_info("%s: Completed %d queued resets\n", 1627 + engine->name, count); 1633 1628 1634 1629 *h.batch = MI_BATCH_BUFFER_END; 1635 1630 intel_gt_chipset_flush(engine->gt); ··· 1727 1720 GEM_TRACE("i915_reset_engine(%s:%s) under %s\n", 1728 1721 engine->name, mode, p->name); 1729 1722 1730 - tasklet_disable(t); 1723 + if (t->func) 1724 + tasklet_disable(t); 1731 1725 if (strcmp(p->name, "softirq")) 1732 1726 local_bh_disable(); 1733 1727 p->critical_section_begin(); ··· 1738 1730 p->critical_section_end(); 1739 1731 if (strcmp(p->name, "softirq")) 1740 1732 local_bh_enable(); 1741 - tasklet_enable(t); 1742 - tasklet_hi_schedule(t); 1733 + if (t->func) { 1734 + tasklet_enable(t); 1735 + tasklet_hi_schedule(t); 1736 + } 1743 1737 1744 1738 if (err) 1745 1739 pr_err("i915_reset_engine(%s:%s) failed under %s\n",
+8 -3
drivers/gpu/drm/i915/gt/selftest_reset.c
··· 321 321 goto out_unlock; 322 322 323 323 for_each_engine(engine, gt, id) { 324 - tasklet_disable(&engine->execlists.tasklet); 324 + struct tasklet_struct *t = &engine->execlists.tasklet; 325 + 326 + if (t->func) 327 + tasklet_disable(t); 325 328 intel_engine_pm_get(engine); 326 329 327 330 for (p = igt_atomic_phases; p->name; p++) { ··· 348 345 } 349 346 350 347 intel_engine_pm_put(engine); 351 - tasklet_enable(&engine->execlists.tasklet); 352 - tasklet_hi_schedule(&engine->execlists.tasklet); 348 + if (t->func) { 349 + tasklet_enable(t); 350 + tasklet_hi_schedule(t); 351 + } 353 352 if (err) 354 353 break; 355 354 }
+1 -1
drivers/gpu/drm/i915/gt/shmem_utils.c
··· 33 33 struct file *file; 34 34 void *ptr; 35 35 36 - if (obj->ops == &i915_gem_shmem_ops) { 36 + if (i915_gem_object_is_shmem(obj)) { 37 37 file = obj->base.filp; 38 38 atomic_long_inc(&file->f_count); 39 39 return file;
+26 -5
drivers/gpu/drm/i915/gt/uc/intel_uc.c
··· 15 15 static const struct intel_uc_ops uc_ops_off; 16 16 static const struct intel_uc_ops uc_ops_on; 17 17 18 + static void uc_expand_default_options(struct intel_uc *uc) 19 + { 20 + struct drm_i915_private *i915 = uc_to_gt(uc)->i915; 21 + 22 + if (i915->params.enable_guc != -1) 23 + return; 24 + 25 + /* Don't enable GuC/HuC on pre-Gen12 */ 26 + if (INTEL_GEN(i915) < 12) { 27 + i915->params.enable_guc = 0; 28 + return; 29 + } 30 + 31 + /* Don't enable GuC/HuC on older Gen12 platforms */ 32 + if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) { 33 + i915->params.enable_guc = 0; 34 + return; 35 + } 36 + 37 + /* Default: enable HuC authentication only */ 38 + i915->params.enable_guc = ENABLE_GUC_LOAD_HUC; 39 + } 40 + 18 41 /* Reset GuC providing us with fresh state for both GuC and HuC. 19 42 */ 20 43 static int __intel_uc_reset_hw(struct intel_uc *uc) ··· 75 52 yesno(intel_uc_wants_guc_submission(uc)), 76 53 yesno(intel_uc_wants_huc(uc))); 77 54 78 - if (i915->params.enable_guc == -1) 79 - return; 80 - 81 55 if (i915->params.enable_guc == 0) { 82 56 GEM_BUG_ON(intel_uc_wants_guc(uc)); 83 57 GEM_BUG_ON(intel_uc_wants_guc_submission(uc)); ··· 99 79 "Incompatible option enable_guc=%d - %s\n", 100 80 i915->params.enable_guc, "GuC submission is N/A"); 101 81 102 - if (i915->params.enable_guc & ~(ENABLE_GUC_SUBMISSION | 103 - ENABLE_GUC_LOAD_HUC)) 82 + if (i915->params.enable_guc & ~ENABLE_GUC_MASK) 104 83 drm_info(&i915->drm, 105 84 "Incompatible option enable_guc=%d - %s\n", 106 85 i915->params.enable_guc, "undocumented flag"); ··· 107 88 108 89 void intel_uc_init_early(struct intel_uc *uc) 109 90 { 91 + uc_expand_default_options(uc); 92 + 110 93 intel_guc_init_early(&uc->guc); 111 94 intel_huc_init_early(&uc->huc); 112 95
+1 -6
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
··· 152 152 uc_fw->path = NULL; 153 153 } 154 154 } 155 - 156 - /* We don't want to enable GuC/HuC on pre-Gen11 by default */ 157 - if (i915->params.enable_guc == -1 && p < INTEL_ICELAKE) 158 - uc_fw->path = NULL; 159 155 } 160 156 161 157 static const char *__override_guc_firmware_path(struct drm_i915_private *i915) 162 158 { 163 - if (i915->params.enable_guc & (ENABLE_GUC_SUBMISSION | 164 - ENABLE_GUC_LOAD_HUC)) 159 + if (i915->params.enable_guc & ENABLE_GUC_MASK) 165 160 return i915->params.guc_firmware_path; 166 161 return ""; 167 162 }
+265 -70
drivers/gpu/drm/i915/gvt/cmd_parser.c
··· 38 38 39 39 #include "i915_drv.h" 40 40 #include "gt/intel_gpu_commands.h" 41 + #include "gt/intel_lrc.h" 41 42 #include "gt/intel_ring.h" 43 + #include "gt/intel_gt_requests.h" 42 44 #include "gvt.h" 43 45 #include "i915_pvinfo.h" 44 46 #include "trace.h" 47 + 48 + #include "gem/i915_gem_context.h" 49 + #include "gem/i915_gem_pm.h" 50 + #include "gt/intel_context.h" 45 51 46 52 #define INVALID_OP (~0U) 47 53 ··· 461 455 RING_BUFFER_INSTRUCTION, 462 456 BATCH_BUFFER_INSTRUCTION, 463 457 BATCH_BUFFER_2ND_LEVEL, 458 + RING_BUFFER_CTX, 464 459 }; 465 460 466 461 enum { ··· 503 496 */ 504 497 int saved_buf_addr_type; 505 498 bool is_ctx_wa; 499 + bool is_init_ctx; 506 500 507 501 const struct cmd_info *info; 508 502 ··· 717 709 return *cmd_ptr(s, index); 718 710 } 719 711 712 + static inline bool is_init_ctx(struct parser_exec_state *s) 713 + { 714 + return (s->buf_type == RING_BUFFER_CTX && s->is_init_ctx); 715 + } 716 + 720 717 static void parser_exec_state_dump(struct parser_exec_state *s) 721 718 { 722 719 int cnt = 0; ··· 735 722 736 723 gvt_dbg_cmd(" %s %s ip_gma(%08lx) ", 737 724 s->buf_type == RING_BUFFER_INSTRUCTION ? 738 - "RING_BUFFER" : "BATCH_BUFFER", 725 + "RING_BUFFER" : ((s->buf_type == RING_BUFFER_CTX) ? 726 + "CTX_BUFFER" : "BATCH_BUFFER"), 739 727 s->buf_addr_type == GTT_BUFFER ? 740 728 "GTT" : "PPGTT", s->ip_gma); 741 729 ··· 771 757 if (WARN_ON(s->ring_head == s->ring_tail)) 772 758 return; 773 759 774 - if (s->buf_type == RING_BUFFER_INSTRUCTION) { 760 + if (s->buf_type == RING_BUFFER_INSTRUCTION || 761 + s->buf_type == RING_BUFFER_CTX) { 775 762 unsigned long ring_top = s->ring_start + s->ring_size; 776 763 777 764 if (s->ring_head > s->ring_tail) { ··· 836 821 *addr = val; \ 837 822 } while (0) 838 823 839 - static bool is_shadowed_mmio(unsigned int offset) 840 - { 841 - bool ret = false; 842 - 843 - if ((offset == 0x2168) || /*BB current head register UDW */ 844 - (offset == 0x2140) || /*BB current header register */ 845 - (offset == 0x211c) || /*second BB header register UDW */ 846 - (offset == 0x2114)) { /*second BB header register UDW */ 847 - ret = true; 848 - } 849 - return ret; 850 - } 851 - 852 - static inline bool is_force_nonpriv_mmio(unsigned int offset) 853 - { 854 - return (offset >= 0x24d0 && offset < 0x2500); 855 - } 856 - 857 - static int force_nonpriv_reg_handler(struct parser_exec_state *s, 858 - unsigned int offset, unsigned int index, char *cmd) 859 - { 860 - struct intel_gvt *gvt = s->vgpu->gvt; 861 - unsigned int data; 862 - u32 ring_base; 863 - u32 nopid; 864 - 865 - if (!strcmp(cmd, "lri")) 866 - data = cmd_val(s, index + 1); 867 - else { 868 - gvt_err("Unexpected forcenonpriv 0x%x write from cmd %s\n", 869 - offset, cmd); 870 - return -EINVAL; 871 - } 872 - 873 - ring_base = s->engine->mmio_base; 874 - nopid = i915_mmio_reg_offset(RING_NOPID(ring_base)); 875 - 876 - if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) && 877 - data != nopid) { 878 - gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n", 879 - offset, data); 880 - patch_value(s, cmd_ptr(s, index), nopid); 881 - return 0; 882 - } 883 - return 0; 884 - } 885 - 886 824 static inline bool is_mocs_mmio(unsigned int offset) 887 825 { 888 826 return ((offset >= 0xc800) && (offset <= 0xcff8)) || 889 827 ((offset >= 0xb020) && (offset <= 0xb0a0)); 890 - } 891 - 892 - static int mocs_cmd_reg_handler(struct parser_exec_state *s, 893 - unsigned int offset, unsigned int index) 894 - { 895 - if (!is_mocs_mmio(offset)) 896 - return -EINVAL; 897 - vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1); 898 - return 0; 899 828 } 900 829 901 830 static int is_cmd_update_pdps(unsigned int offset, ··· 889 930 struct intel_vgpu *vgpu = s->vgpu; 890 931 struct intel_gvt *gvt = vgpu->gvt; 891 932 u32 ctx_sr_ctl; 933 + u32 *vreg, vreg_old; 892 934 893 935 if (offset + 4 > gvt->device_info.mmio_size) { 894 936 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", 895 937 cmd, offset); 896 938 return -EFAULT; 939 + } 940 + 941 + if (is_init_ctx(s)) { 942 + struct intel_gvt_mmio_info *mmio_info; 943 + 944 + intel_gvt_mmio_set_cmd_accessible(gvt, offset); 945 + mmio_info = intel_gvt_find_mmio_info(gvt, offset); 946 + if (mmio_info && mmio_info->write) 947 + intel_gvt_mmio_set_cmd_write_patch(gvt, offset); 948 + return 0; 897 949 } 898 950 899 951 if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) { ··· 913 943 return -EBADRQC; 914 944 } 915 945 916 - if (is_shadowed_mmio(offset)) { 917 - gvt_vgpu_err("found access of shadowed MMIO %x\n", offset); 946 + if (!strncmp(cmd, "srm", 3) || 947 + !strncmp(cmd, "lrm", 3)) { 948 + if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) && 949 + offset != 0x21f0) { 950 + gvt_vgpu_err("%s access to register (%x)\n", 951 + cmd, offset); 952 + return -EPERM; 953 + } else 954 + return 0; 955 + } 956 + 957 + if (!strncmp(cmd, "lrr-src", 7) || 958 + !strncmp(cmd, "lrr-dst", 7)) { 959 + gvt_vgpu_err("not allowed cmd %s\n", cmd); 960 + return -EPERM; 961 + } 962 + 963 + if (!strncmp(cmd, "pipe_ctrl", 9)) { 964 + /* TODO: add LRI POST logic here */ 918 965 return 0; 919 966 } 920 967 921 - if (is_mocs_mmio(offset) && 922 - mocs_cmd_reg_handler(s, offset, index)) 923 - return -EINVAL; 924 - 925 - if (is_force_nonpriv_mmio(offset) && 926 - force_nonpriv_reg_handler(s, offset, index, cmd)) 968 + if (strncmp(cmd, "lri", 3)) 927 969 return -EPERM; 970 + 971 + /* below are all lri handlers */ 972 + vreg = &vgpu_vreg(s->vgpu, offset); 973 + if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) { 974 + gvt_vgpu_err("%s access to non-render register (%x)\n", 975 + cmd, offset); 976 + return -EBADRQC; 977 + } 978 + 979 + if (is_cmd_update_pdps(offset, s) && 980 + cmd_pdp_mmio_update_handler(s, offset, index)) 981 + return -EINVAL; 928 982 929 983 if (offset == i915_mmio_reg_offset(DERRMR) || 930 984 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { ··· 956 962 patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); 957 963 } 958 964 959 - if (is_cmd_update_pdps(offset, s) && 960 - cmd_pdp_mmio_update_handler(s, offset, index)) 961 - return -EINVAL; 965 + if (is_mocs_mmio(offset)) 966 + *vreg = cmd_val(s, index + 1); 967 + 968 + vreg_old = *vreg; 969 + 970 + if (intel_gvt_mmio_is_cmd_write_patch(gvt, offset)) { 971 + u32 cmdval_new, cmdval; 972 + struct intel_gvt_mmio_info *mmio_info; 973 + 974 + cmdval = cmd_val(s, index + 1); 975 + 976 + mmio_info = intel_gvt_find_mmio_info(gvt, offset); 977 + if (!mmio_info) { 978 + cmdval_new = cmdval; 979 + } else { 980 + u64 ro_mask = mmio_info->ro_mask; 981 + int ret; 982 + 983 + if (likely(!ro_mask)) 984 + ret = mmio_info->write(s->vgpu, offset, 985 + &cmdval, 4); 986 + else { 987 + gvt_vgpu_err("try to write RO reg %x\n", 988 + offset); 989 + ret = -EBADRQC; 990 + } 991 + if (ret) 992 + return ret; 993 + cmdval_new = *vreg; 994 + } 995 + if (cmdval_new != cmdval) 996 + patch_value(s, cmd_ptr(s, index+1), cmdval_new); 997 + } 998 + 999 + /* only patch cmd. restore vreg value if changed in mmio write handler*/ 1000 + *vreg = vreg_old; 962 1001 963 1002 /* TODO 964 1003 * In order to let workload with inhibit context to generate ··· 1243 1216 s->buf_type = BATCH_BUFFER_INSTRUCTION; 1244 1217 ret = ip_gma_set(s, s->ret_ip_gma_bb); 1245 1218 s->buf_addr_type = s->saved_buf_addr_type; 1219 + } else if (s->buf_type == RING_BUFFER_CTX) { 1220 + ret = ip_gma_set(s, s->ring_tail); 1246 1221 } else { 1247 1222 s->buf_type = RING_BUFFER_INSTRUCTION; 1248 1223 s->buf_addr_type = GTT_BUFFER; ··· 2793 2764 gma_bottom = rb_start + rb_len; 2794 2765 2795 2766 while (s->ip_gma != gma_tail) { 2796 - if (s->buf_type == RING_BUFFER_INSTRUCTION) { 2767 + if (s->buf_type == RING_BUFFER_INSTRUCTION || 2768 + s->buf_type == RING_BUFFER_CTX) { 2797 2769 if (!(s->ip_gma >= rb_start) || 2798 2770 !(s->ip_gma < gma_bottom)) { 2799 2771 gvt_vgpu_err("ip_gma %lx out of ring scope." ··· 3085 3055 } 3086 3056 3087 3057 return 0; 3058 + } 3059 + 3060 + /* generate dummy contexts by sending empty requests to HW, and let 3061 + * the HW to fill Engine Contexts. This dummy contexts are used for 3062 + * initialization purpose (update reg whitelist), so referred to as 3063 + * init context here 3064 + */ 3065 + void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu) 3066 + { 3067 + struct intel_gvt *gvt = vgpu->gvt; 3068 + struct drm_i915_private *dev_priv = gvt->gt->i915; 3069 + struct intel_engine_cs *engine; 3070 + enum intel_engine_id id; 3071 + const unsigned long start = LRC_STATE_PN * PAGE_SIZE; 3072 + struct i915_request *rq; 3073 + struct intel_vgpu_submission *s = &vgpu->submission; 3074 + struct i915_request *requests[I915_NUM_ENGINES] = {}; 3075 + bool is_ctx_pinned[I915_NUM_ENGINES] = {}; 3076 + int ret; 3077 + 3078 + if (gvt->is_reg_whitelist_updated) 3079 + return; 3080 + 3081 + for_each_engine(engine, &dev_priv->gt, id) { 3082 + ret = intel_context_pin(s->shadow[id]); 3083 + if (ret) { 3084 + gvt_vgpu_err("fail to pin shadow ctx\n"); 3085 + goto out; 3086 + } 3087 + is_ctx_pinned[id] = true; 3088 + 3089 + rq = i915_request_create(s->shadow[id]); 3090 + if (IS_ERR(rq)) { 3091 + gvt_vgpu_err("fail to alloc default request\n"); 3092 + ret = -EIO; 3093 + goto out; 3094 + } 3095 + requests[id] = i915_request_get(rq); 3096 + i915_request_add(rq); 3097 + } 3098 + 3099 + if (intel_gt_wait_for_idle(&dev_priv->gt, 3100 + I915_GEM_IDLE_TIMEOUT) == -ETIME) { 3101 + ret = -EIO; 3102 + goto out; 3103 + } 3104 + 3105 + /* scan init ctx to update cmd accessible list */ 3106 + for_each_engine(engine, &dev_priv->gt, id) { 3107 + int size = engine->context_size - PAGE_SIZE; 3108 + void *vaddr; 3109 + struct parser_exec_state s; 3110 + struct drm_i915_gem_object *obj; 3111 + struct i915_request *rq; 3112 + 3113 + rq = requests[id]; 3114 + GEM_BUG_ON(!i915_request_completed(rq)); 3115 + GEM_BUG_ON(!intel_context_is_pinned(rq->context)); 3116 + obj = rq->context->state->obj; 3117 + 3118 + if (!obj) { 3119 + ret = -EIO; 3120 + goto out; 3121 + } 3122 + 3123 + i915_gem_object_set_cache_coherency(obj, 3124 + I915_CACHE_LLC); 3125 + 3126 + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 3127 + if (IS_ERR(vaddr)) { 3128 + gvt_err("failed to pin init ctx obj, ring=%d, err=%lx\n", 3129 + id, PTR_ERR(vaddr)); 3130 + goto out; 3131 + } 3132 + 3133 + s.buf_type = RING_BUFFER_CTX; 3134 + s.buf_addr_type = GTT_BUFFER; 3135 + s.vgpu = vgpu; 3136 + s.engine = engine; 3137 + s.ring_start = 0; 3138 + s.ring_size = size; 3139 + s.ring_head = 0; 3140 + s.ring_tail = size; 3141 + s.rb_va = vaddr + start; 3142 + s.workload = NULL; 3143 + s.is_ctx_wa = false; 3144 + s.is_init_ctx = true; 3145 + 3146 + /* skipping the first RING_CTX_SIZE(0x50) dwords */ 3147 + ret = ip_gma_set(&s, RING_CTX_SIZE); 3148 + if (ret) { 3149 + i915_gem_object_unpin_map(obj); 3150 + goto out; 3151 + } 3152 + 3153 + ret = command_scan(&s, 0, size, 0, size); 3154 + if (ret) 3155 + gvt_err("Scan init ctx error\n"); 3156 + 3157 + i915_gem_object_unpin_map(obj); 3158 + } 3159 + 3160 + out: 3161 + if (!ret) 3162 + gvt->is_reg_whitelist_updated = true; 3163 + 3164 + for (id = 0; id < I915_NUM_ENGINES ; id++) { 3165 + if (requests[id]) 3166 + i915_request_put(requests[id]); 3167 + 3168 + if (is_ctx_pinned[id]) 3169 + intel_context_unpin(s->shadow[id]); 3170 + } 3171 + } 3172 + 3173 + int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload) 3174 + { 3175 + struct intel_vgpu *vgpu = workload->vgpu; 3176 + unsigned long gma_head, gma_tail, gma_start, ctx_size; 3177 + struct parser_exec_state s; 3178 + int ring_id = workload->engine->id; 3179 + struct intel_context *ce = vgpu->submission.shadow[ring_id]; 3180 + int ret; 3181 + 3182 + GEM_BUG_ON(atomic_read(&ce->pin_count) < 0); 3183 + 3184 + ctx_size = workload->engine->context_size - PAGE_SIZE; 3185 + 3186 + /* Only ring contxt is loaded to HW for inhibit context, no need to 3187 + * scan engine context 3188 + */ 3189 + if (is_inhibit_context(ce)) 3190 + return 0; 3191 + 3192 + gma_start = i915_ggtt_offset(ce->state) + LRC_STATE_PN*PAGE_SIZE; 3193 + gma_head = 0; 3194 + gma_tail = ctx_size; 3195 + 3196 + s.buf_type = RING_BUFFER_CTX; 3197 + s.buf_addr_type = GTT_BUFFER; 3198 + s.vgpu = workload->vgpu; 3199 + s.engine = workload->engine; 3200 + s.ring_start = gma_start; 3201 + s.ring_size = ctx_size; 3202 + s.ring_head = gma_start + gma_head; 3203 + s.ring_tail = gma_start + gma_tail; 3204 + s.rb_va = ce->lrc_reg_state; 3205 + s.workload = workload; 3206 + s.is_ctx_wa = false; 3207 + s.is_init_ctx = false; 3208 + 3209 + /* don't scan the first RING_CTX_SIZE(0x50) dwords, as it's ring 3210 + * context 3211 + */ 3212 + ret = ip_gma_set(&s, gma_start + gma_head + RING_CTX_SIZE); 3213 + if (ret) 3214 + goto out; 3215 + 3216 + ret = command_scan(&s, gma_head, gma_tail, 3217 + gma_start, ctx_size); 3218 + out: 3219 + if (ret) 3220 + gvt_vgpu_err("scan shadow ctx error\n"); 3221 + 3222 + return ret; 3088 3223 } 3089 3224 3090 3225 static int init_cmd_table(struct intel_gvt *gvt)
+5
drivers/gpu/drm/i915/gvt/cmd_parser.h
··· 40 40 41 41 struct intel_gvt; 42 42 struct intel_shadow_wa_ctx; 43 + struct intel_vgpu; 43 44 struct intel_vgpu_workload; 44 45 45 46 void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt); ··· 50 49 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload); 51 50 52 51 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); 52 + 53 + void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu); 54 + 55 + int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload); 53 56 54 57 #endif
+36 -1
drivers/gpu/drm/i915/gvt/gvt.h
··· 248 248 #define INTEL_GVT_MMIO_HASH_BITS 11 249 249 250 250 struct intel_gvt_mmio { 251 - u8 *mmio_attribute; 251 + u16 *mmio_attribute; 252 252 /* Register contains RO bits */ 253 253 #define F_RO (1 << 0) 254 254 /* Register contains graphics address */ ··· 267 267 * logical context image 268 268 */ 269 269 #define F_SR_IN_CTX (1 << 7) 270 + /* Value of command write of this reg needs to be patched */ 271 + #define F_CMD_WRITE_PATCH (1 << 8) 270 272 271 273 struct gvt_mmio_block *mmio_block; 272 274 unsigned int num_mmio_block; ··· 335 333 u32 *mocs_mmio_offset_list; 336 334 u32 mocs_mmio_offset_list_cnt; 337 335 } engine_mmio_list; 336 + bool is_reg_whitelist_updated; 338 337 339 338 struct dentry *debugfs_root; 340 339 }; ··· 418 415 419 416 #define vgpu_fence_base(vgpu) (vgpu->fence.base) 420 417 #define vgpu_fence_sz(vgpu) (vgpu->fence.size) 418 + 419 + /* ring context size i.e. the first 0x50 dwords*/ 420 + #define RING_CTX_SIZE 320 421 421 422 422 struct intel_vgpu_creation_params { 423 423 __u64 handle; ··· 693 687 } 694 688 695 689 void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); 690 + /** 691 + * intel_gvt_mmio_set_cmd_write_patch - 692 + * mark an MMIO if its cmd write needs to be 693 + * patched 694 + * @gvt: a GVT device 695 + * @offset: register offset 696 + * 697 + */ 698 + static inline void intel_gvt_mmio_set_cmd_write_patch( 699 + struct intel_gvt *gvt, unsigned int offset) 700 + { 701 + gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH; 702 + } 703 + 704 + /** 705 + * intel_gvt_mmio_is_cmd_write_patch - check if an mmio's cmd access needs to 706 + * be patched 707 + * @gvt: a GVT device 708 + * @offset: register offset 709 + * 710 + * Returns: 711 + * True if GPU commmand write to an MMIO should be patched 712 + */ 713 + static inline bool intel_gvt_mmio_is_cmd_write_patch( 714 + struct intel_gvt *gvt, unsigned int offset) 715 + { 716 + return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH; 717 + } 718 + 696 719 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); 697 720 void intel_gvt_debugfs_init(struct intel_gvt *gvt); 698 721 void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
+8 -7
drivers/gpu/drm/i915/gvt/handlers.c
··· 83 83 memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes); 84 84 } 85 85 86 - static struct intel_gvt_mmio_info *find_mmio_info(struct intel_gvt *gvt, 86 + struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, 87 87 unsigned int offset) 88 88 { 89 89 struct intel_gvt_mmio_info *e; ··· 96 96 } 97 97 98 98 static int new_mmio_info(struct intel_gvt *gvt, 99 - u32 offset, u8 flags, u32 size, 99 + u32 offset, u16 flags, u32 size, 100 100 u32 addr_mask, u32 ro_mask, u32 device, 101 101 gvt_mmio_func read, gvt_mmio_func write) 102 102 { ··· 118 118 return -ENOMEM; 119 119 120 120 info->offset = i; 121 - p = find_mmio_info(gvt, info->offset); 121 + p = intel_gvt_find_mmio_info(gvt, info->offset); 122 122 if (p) { 123 123 WARN(1, "dup mmio definition offset %x\n", 124 124 info->offset); ··· 1965 1965 1966 1966 /* RING MODE */ 1967 1967 #define RING_REG(base) _MMIO((base) + 0x29c) 1968 - MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, 1968 + MMIO_RING_DFH(RING_REG, D_ALL, 1969 + F_MODE_MASK | F_CMD_ACCESS | F_CMD_WRITE_PATCH, NULL, 1969 1970 ring_mode_mmio_write); 1970 1971 #undef RING_REG 1971 1972 ··· 2886 2885 MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL); 2887 2886 MMIO_D(_MMIO(0xb110), D_BDW); 2888 2887 2889 - MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, 2890 - NULL, force_nonpriv_write); 2888 + MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS | F_CMD_WRITE_PATCH, 0, 0, 2889 + D_BDW_PLUS, NULL, force_nonpriv_write); 2891 2890 2892 2891 MMIO_D(_MMIO(0x44484), D_BDW_PLUS); 2893 2892 MMIO_D(_MMIO(0x4448c), D_BDW_PLUS); ··· 3627 3626 /* 3628 3627 * Normal tracked MMIOs. 3629 3628 */ 3630 - mmio_info = find_mmio_info(gvt, offset); 3629 + mmio_info = intel_gvt_find_mmio_info(gvt, offset); 3631 3630 if (!mmio_info) { 3632 3631 gvt_dbg_mmio("untracked MMIO %08x len %d\n", offset, bytes); 3633 3632 goto default_rw;
+3
drivers/gpu/drm/i915/gvt/mmio.h
··· 80 80 int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), 81 81 void *data); 82 82 83 + struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, 84 + unsigned int offset); 85 + 83 86 int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); 84 87 void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr); 85 88 void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
+2
drivers/gpu/drm/i915/gvt/reg.h
··· 133 133 #define RING_GFX_MODE(base) _MMIO((base) + 0x29c) 134 134 #define VF_GUARDBAND _MMIO(0x83a4) 135 135 136 + 137 + #define BCS_TILE_REGISTER_VAL_OFFSET (0x43*4) 136 138 #endif
+18 -4
drivers/gpu/drm/i915/gvt/scheduler.c
··· 137 137 int i; 138 138 bool skip = false; 139 139 int ring_id = workload->engine->id; 140 + int ret; 140 141 141 142 GEM_BUG_ON(!intel_context_is_pinned(ctx)); 142 143 ··· 164 163 COPY_REG(bb_per_ctx_ptr); 165 164 COPY_REG(rcs_indirect_ctx); 166 165 COPY_REG(rcs_indirect_ctx_offset); 167 - } 166 + } else if (workload->engine->id == BCS0) 167 + intel_gvt_hypervisor_read_gpa(vgpu, 168 + workload->ring_context_gpa + 169 + BCS_TILE_REGISTER_VAL_OFFSET, 170 + (void *)shadow_ring_context + 171 + BCS_TILE_REGISTER_VAL_OFFSET, 4); 168 172 #undef COPY_REG 169 173 #undef COPY_REG_MASKED 170 174 175 + /* don't copy Ring Context (the first 0x50 dwords), 176 + * only copy the Engine Context part from guest 177 + */ 171 178 intel_gvt_hypervisor_read_gpa(vgpu, 172 179 workload->ring_context_gpa + 173 - sizeof(*shadow_ring_context), 180 + RING_CTX_SIZE, 174 181 (void *)shadow_ring_context + 175 - sizeof(*shadow_ring_context), 176 - I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); 182 + RING_CTX_SIZE, 183 + I915_GTT_PAGE_SIZE - RING_CTX_SIZE); 177 184 178 185 sr_oa_regs(workload, (u32 *)shadow_ring_context, false); 179 186 ··· 247 238 gpa_base = context_gpa; 248 239 gpa_size = I915_GTT_PAGE_SIZE; 249 240 dst = context_base + (i << I915_GTT_PAGE_SHIFT); 241 + } 242 + ret = intel_gvt_scan_engine_context(workload); 243 + if (ret) { 244 + gvt_vgpu_err("invalid cmd found in guest context pages\n"); 245 + return ret; 250 246 } 251 247 s->last_ctx[ring_id].valid = true; 252 248 return 0;
+3 -1
drivers/gpu/drm/i915/gvt/vgpu.c
··· 500 500 501 501 mutex_lock(&gvt->lock); 502 502 vgpu = __intel_gvt_create_vgpu(gvt, &param); 503 - if (!IS_ERR(vgpu)) 503 + if (!IS_ERR(vgpu)) { 504 504 /* calculate left instance change for types */ 505 505 intel_gvt_update_vgpu_types(gvt); 506 + intel_gvt_update_reg_whitelist(vgpu); 507 + } 506 508 mutex_unlock(&gvt->lock); 507 509 508 510 return vgpu;
+1 -1
drivers/gpu/drm/i915/i915_cmd_parser.c
··· 1143 1143 void *dst, *src; 1144 1144 int ret; 1145 1145 1146 - dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB); 1146 + dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB); 1147 1147 if (IS_ERR(dst)) 1148 1148 return dst; 1149 1149
+1 -143
drivers/gpu/drm/i915/i915_debugfs.c
··· 210 210 spin_unlock(&obj->vma.lock); 211 211 212 212 seq_printf(m, " (pinned x %d)", pin_count); 213 - if (obj->stolen) 213 + if (i915_gem_object_is_stolen(obj)) 214 214 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 215 215 if (i915_gem_object_is_framebuffer(obj)) 216 216 seq_printf(m, " (fb)"); ··· 218 218 engine = i915_gem_object_last_write_engine(obj); 219 219 if (engine) 220 220 seq_printf(m, " (%s)", engine->name); 221 - } 222 - 223 - struct file_stats { 224 - struct i915_address_space *vm; 225 - unsigned long count; 226 - u64 total; 227 - u64 active, inactive; 228 - u64 closed; 229 - }; 230 - 231 - static int per_file_stats(int id, void *ptr, void *data) 232 - { 233 - struct drm_i915_gem_object *obj = ptr; 234 - struct file_stats *stats = data; 235 - struct i915_vma *vma; 236 - 237 - if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount)) 238 - return 0; 239 - 240 - stats->count++; 241 - stats->total += obj->base.size; 242 - 243 - spin_lock(&obj->vma.lock); 244 - if (!stats->vm) { 245 - for_each_ggtt_vma(vma, obj) { 246 - if (!drm_mm_node_allocated(&vma->node)) 247 - continue; 248 - 249 - if (i915_vma_is_active(vma)) 250 - stats->active += vma->node.size; 251 - else 252 - stats->inactive += vma->node.size; 253 - 254 - if (i915_vma_is_closed(vma)) 255 - stats->closed += vma->node.size; 256 - } 257 - } else { 258 - struct rb_node *p = obj->vma.tree.rb_node; 259 - 260 - while (p) { 261 - long cmp; 262 - 263 - vma = rb_entry(p, typeof(*vma), obj_node); 264 - cmp = i915_vma_compare(vma, stats->vm, NULL); 265 - if (cmp == 0) { 266 - if (drm_mm_node_allocated(&vma->node)) { 267 - if (i915_vma_is_active(vma)) 268 - stats->active += vma->node.size; 269 - else 270 - stats->inactive += vma->node.size; 271 - 272 - if (i915_vma_is_closed(vma)) 273 - stats->closed += vma->node.size; 274 - } 275 - break; 276 - } 277 - if (cmp < 0) 278 - p = p->rb_right; 279 - else 280 - p = p->rb_left; 281 - } 282 - } 283 - spin_unlock(&obj->vma.lock); 284 - 285 - i915_gem_object_put(obj); 286 - return 0; 287 - } 288 - 289 - #define print_file_stats(m, name, stats) do { \ 290 - if (stats.count) \ 291 - seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \ 292 - name, \ 293 - stats.count, \ 294 - stats.total, \ 295 - stats.active, \ 296 - stats.inactive, \ 297 - stats.closed); \ 298 - } while (0) 299 - 300 - static void print_context_stats(struct seq_file *m, 301 - struct drm_i915_private *i915) 302 - { 303 - struct file_stats kstats = {}; 304 - struct i915_gem_context *ctx, *cn; 305 - 306 - spin_lock(&i915->gem.contexts.lock); 307 - list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { 308 - struct i915_gem_engines_iter it; 309 - struct intel_context *ce; 310 - 311 - if (!kref_get_unless_zero(&ctx->ref)) 312 - continue; 313 - 314 - spin_unlock(&i915->gem.contexts.lock); 315 - 316 - for_each_gem_engine(ce, 317 - i915_gem_context_lock_engines(ctx), it) { 318 - if (intel_context_pin_if_active(ce)) { 319 - rcu_read_lock(); 320 - if (ce->state) 321 - per_file_stats(0, 322 - ce->state->obj, &kstats); 323 - per_file_stats(0, ce->ring->vma->obj, &kstats); 324 - rcu_read_unlock(); 325 - intel_context_unpin(ce); 326 - } 327 - } 328 - i915_gem_context_unlock_engines(ctx); 329 - 330 - mutex_lock(&ctx->mutex); 331 - if (!IS_ERR_OR_NULL(ctx->file_priv)) { 332 - struct file_stats stats = { 333 - .vm = rcu_access_pointer(ctx->vm), 334 - }; 335 - struct drm_file *file = ctx->file_priv->file; 336 - struct task_struct *task; 337 - char name[80]; 338 - 339 - rcu_read_lock(); 340 - idr_for_each(&file->object_idr, per_file_stats, &stats); 341 - rcu_read_unlock(); 342 - 343 - rcu_read_lock(); 344 - task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID); 345 - snprintf(name, sizeof(name), "%s", 346 - task ? task->comm : "<unknown>"); 347 - rcu_read_unlock(); 348 - 349 - print_file_stats(m, name, stats); 350 - } 351 - mutex_unlock(&ctx->mutex); 352 - 353 - spin_lock(&i915->gem.contexts.lock); 354 - list_safe_reset_next(ctx, cn, link); 355 - i915_gem_context_put(ctx); 356 - } 357 - spin_unlock(&i915->gem.contexts.lock); 358 - 359 - print_file_stats(m, "[k]contexts", kstats); 360 221 } 361 222 362 223 static int i915_gem_object_info(struct seq_file *m, void *data) ··· 233 372 for_each_memory_region(mr, i915, id) 234 373 seq_printf(m, "%s: total:%pa, available:%pa bytes\n", 235 374 mr->name, &mr->total, &mr->avail); 236 - seq_putc(m, '\n'); 237 - 238 - print_context_stats(m, i915); 239 375 240 376 return 0; 241 377 }
+11 -119
drivers/gpu/drm/i915/i915_gem.c
··· 180 180 } 181 181 182 182 static int 183 - i915_gem_create(struct drm_file *file, 184 - struct intel_memory_region *mr, 185 - u64 *size_p, 186 - u32 *handle_p) 187 - { 188 - struct drm_i915_gem_object *obj; 189 - u32 handle; 190 - u64 size; 191 - int ret; 192 - 193 - GEM_BUG_ON(!is_power_of_2(mr->min_page_size)); 194 - size = round_up(*size_p, mr->min_page_size); 195 - if (size == 0) 196 - return -EINVAL; 197 - 198 - /* For most of the ABI (e.g. mmap) we think in system pages */ 199 - GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); 200 - 201 - /* Allocate the new object */ 202 - obj = i915_gem_object_create_region(mr, size, 0); 203 - if (IS_ERR(obj)) 204 - return PTR_ERR(obj); 205 - 206 - ret = drm_gem_handle_create(file, &obj->base, &handle); 207 - /* drop reference from allocate - handle holds it now */ 208 - i915_gem_object_put(obj); 209 - if (ret) 210 - return ret; 211 - 212 - *handle_p = handle; 213 - *size_p = size; 214 - return 0; 215 - } 216 - 217 - int 218 - i915_gem_dumb_create(struct drm_file *file, 219 - struct drm_device *dev, 220 - struct drm_mode_create_dumb *args) 221 - { 222 - enum intel_memory_type mem_type; 223 - int cpp = DIV_ROUND_UP(args->bpp, 8); 224 - u32 format; 225 - 226 - switch (cpp) { 227 - case 1: 228 - format = DRM_FORMAT_C8; 229 - break; 230 - case 2: 231 - format = DRM_FORMAT_RGB565; 232 - break; 233 - case 4: 234 - format = DRM_FORMAT_XRGB8888; 235 - break; 236 - default: 237 - return -EINVAL; 238 - } 239 - 240 - /* have to work out size/pitch and return them */ 241 - args->pitch = ALIGN(args->width * cpp, 64); 242 - 243 - /* align stride to page size so that we can remap */ 244 - if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format, 245 - DRM_FORMAT_MOD_LINEAR)) 246 - args->pitch = ALIGN(args->pitch, 4096); 247 - 248 - if (args->pitch < args->width) 249 - return -EINVAL; 250 - 251 - args->size = mul_u32_u32(args->pitch, args->height); 252 - 253 - mem_type = INTEL_MEMORY_SYSTEM; 254 - if (HAS_LMEM(to_i915(dev))) 255 - mem_type = INTEL_MEMORY_LOCAL; 256 - 257 - return i915_gem_create(file, 258 - intel_memory_region_by_type(to_i915(dev), 259 - mem_type), 260 - &args->size, &args->handle); 261 - } 262 - 263 - /** 264 - * Creates a new mm object and returns a handle to it. 265 - * @dev: drm device pointer 266 - * @data: ioctl data blob 267 - * @file: drm file pointer 268 - */ 269 - int 270 - i915_gem_create_ioctl(struct drm_device *dev, void *data, 271 - struct drm_file *file) 272 - { 273 - struct drm_i915_private *i915 = to_i915(dev); 274 - struct drm_i915_gem_create *args = data; 275 - 276 - i915_gem_flush_free_objects(i915); 277 - 278 - return i915_gem_create(file, 279 - intel_memory_region_by_type(i915, 280 - INTEL_MEMORY_SYSTEM), 281 - &args->size, &args->handle); 282 - } 283 - 284 - static int 285 183 shmem_pread(struct page *page, int offset, int len, char __user *user_data, 286 184 bool needs_clflush) 287 185 { ··· 957 1059 i915_gem_object_is_tiled(obj) && 958 1060 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 959 1061 if (obj->mm.madv == I915_MADV_WILLNEED) { 960 - GEM_BUG_ON(!obj->mm.quirked); 961 - __i915_gem_object_unpin_pages(obj); 962 - obj->mm.quirked = false; 1062 + GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj)); 1063 + i915_gem_object_clear_tiling_quirk(obj); 1064 + i915_gem_object_make_shrinkable(obj); 963 1065 } 964 1066 if (args->madv == I915_MADV_WILLNEED) { 965 - GEM_BUG_ON(obj->mm.quirked); 966 - __i915_gem_object_pin_pages(obj); 967 - obj->mm.quirked = true; 1067 + GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); 1068 + i915_gem_object_make_unshrinkable(obj); 1069 + i915_gem_object_set_tiling_quirk(obj); 968 1070 } 969 1071 } 970 1072 ··· 1175 1277 * the objects as well, see i915_gem_freeze() 1176 1278 */ 1177 1279 1178 - wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1179 - 1180 - i915_gem_shrink(i915, -1UL, NULL, ~0); 1280 + with_intel_runtime_pm(&i915->runtime_pm, wakeref) 1281 + i915_gem_shrink(i915, -1UL, NULL, ~0); 1181 1282 i915_gem_drain_freed_objects(i915); 1182 1283 1183 - list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) { 1184 - i915_gem_object_lock(obj, NULL); 1185 - drm_WARN_ON(&i915->drm, 1186 - i915_gem_object_set_to_cpu_domain(obj, true)); 1187 - i915_gem_object_unlock(obj); 1188 - } 1189 - 1190 - intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1284 + wbinvd_on_all_cpus(); 1285 + list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) 1286 + __start_cpu_write(obj); 1191 1287 1192 1288 return 0; 1193 1289 }
+8 -1
drivers/gpu/drm/i915/i915_gem.h
··· 38 38 39 39 #define GEM_SHOW_DEBUG() drm_debug_enabled(DRM_UT_DRIVER) 40 40 41 + #ifdef CONFIG_DRM_I915_DEBUG_GEM_ONCE 42 + #define __GEM_BUG(cond) BUG() 43 + #else 44 + #define __GEM_BUG(cond) \ 45 + WARN(1, "%s:%d GEM_BUG_ON(%s)\n", __func__, __LINE__, __stringify(cond)) 46 + #endif 47 + 41 48 #define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \ 42 49 GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \ 43 50 __func__, __LINE__, __stringify(condition)); \ 44 51 GEM_TRACE_DUMP(); \ 45 - BUG(); \ 52 + __GEM_BUG(condition); \ 46 53 } \ 47 54 } while(0) 48 55 #define GEM_WARN_ON(expr) WARN_ON(expr)
+12 -1
drivers/gpu/drm/i915/i915_gem_evict.c
··· 61 61 return drm_mm_scan_add_block(scan, &vma->node); 62 62 } 63 63 64 + static bool defer_evict(struct i915_vma *vma) 65 + { 66 + if (i915_vma_is_active(vma)) 67 + return true; 68 + 69 + if (i915_vma_is_scanout(vma)) 70 + return true; 71 + 72 + return false; 73 + } 74 + 64 75 /** 65 76 * i915_gem_evict_something - Evict vmas to make room for binding a new one 66 77 * @vm: address space to evict from ··· 161 150 * To notice when we complete one full cycle, we record the 162 151 * first active element seen, before moving it to the tail. 163 152 */ 164 - if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) { 153 + if (active != ERR_PTR(-EAGAIN) && defer_evict(vma)) { 165 154 if (!active) 166 155 active = vma; 167 156
+3 -1
drivers/gpu/drm/i915/i915_gpu_error.c
··· 1051 1051 for_each_sgt_daddr(dma, iter, vma->pages) { 1052 1052 void __iomem *s; 1053 1053 1054 - s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE); 1054 + s = io_mapping_map_wc(&mem->iomap, 1055 + dma - mem->region.start, 1056 + PAGE_SIZE); 1055 1057 ret = compress_page(compress, 1056 1058 (void __force *)s, dst, 1057 1059 true);
+1 -1
drivers/gpu/drm/i915/i915_mm.c
··· 62 62 { 63 63 struct remap_pfn *r = data; 64 64 65 - if (GEM_WARN_ON(!r->sgt.pfn)) 65 + if (GEM_WARN_ON(!r->sgt.sgp)) 66 66 return -EINVAL; 67 67 68 68 /* Special PTE are not associated with any struct page */
+1
drivers/gpu/drm/i915/i915_params.h
··· 32 32 33 33 #define ENABLE_GUC_SUBMISSION BIT(0) 34 34 #define ENABLE_GUC_LOAD_HUC BIT(1) 35 + #define ENABLE_GUC_MASK GENMASK(1, 0) 35 36 36 37 /* 37 38 * Invoke param, a function-like macro, for each i915 param, with arguments:
+3 -2
drivers/gpu/drm/i915/i915_pci.c
··· 455 455 .has_llc = 1, \ 456 456 .has_rc6 = 1, \ 457 457 .has_rc6p = 1, \ 458 + .has_reset_engine = true, \ 458 459 .has_rps = true, \ 459 460 .dma_mask_size = 40, \ 460 461 .ppgtt_type = INTEL_PPGTT_ALIASING, \ ··· 514 513 .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), 515 514 .has_runtime_pm = 1, 516 515 .has_rc6 = 1, 516 + .has_reset_engine = true, 517 517 .has_rps = true, 518 518 .display.has_gmch = 1, 519 519 .display.has_hotplug = 1, ··· 573 571 .dma_mask_size = 39, \ 574 572 .ppgtt_type = INTEL_PPGTT_FULL, \ 575 573 .ppgtt_size = 48, \ 576 - .has_64bit_reloc = 1, \ 577 - .has_reset_engine = 1 574 + .has_64bit_reloc = 1 578 575 579 576 #define BDW_PLATFORM \ 580 577 GEN8_FEATURES, \
+25 -18
drivers/gpu/drm/i915/i915_request.c
··· 276 276 277 277 bool i915_request_retire(struct i915_request *rq) 278 278 { 279 - if (!i915_request_completed(rq)) 279 + if (!__i915_request_is_complete(rq)) 280 280 return false; 281 281 282 282 RQ_TRACE(rq, "\n"); ··· 342 342 struct i915_request *tmp; 343 343 344 344 RQ_TRACE(rq, "\n"); 345 - 346 - GEM_BUG_ON(!i915_request_completed(rq)); 345 + GEM_BUG_ON(!__i915_request_is_complete(rq)); 347 346 348 347 do { 349 348 tmp = list_first_entry(&tl->requests, typeof(*tmp), link); ··· 551 552 * dropped upon retiring. (Otherwise if resubmit a *retired* 552 553 * request, this would be a horrible use-after-free.) 553 554 */ 554 - if (i915_request_completed(request)) 555 - goto xfer; 555 + if (__i915_request_is_complete(request)) { 556 + list_del_init(&request->sched.link); 557 + goto active; 558 + } 556 559 557 560 if (unlikely(intel_context_is_banned(request->context))) 558 561 i915_request_set_error_once(request, -EIO); ··· 589 588 engine->serial++; 590 589 result = true; 591 590 592 - xfer: 593 - if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) { 594 - list_move_tail(&request->sched.link, &engine->active.requests); 595 - clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags); 596 - } 591 + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); 592 + list_move_tail(&request->sched.link, &engine->active.requests); 593 + active: 594 + clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags); 595 + set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); 597 596 598 597 /* 599 598 * XXX Rollback bonded-execution on __i915_request_unsubmit()? ··· 653 652 i915_request_cancel_breadcrumb(request); 654 653 655 654 /* We've already spun, don't charge on resubmitting. */ 656 - if (request->sched.semaphores && i915_request_started(request)) 655 + if (request->sched.semaphores && __i915_request_has_started(request)) 657 656 request->sched.semaphores = 0; 658 657 659 658 /* ··· 865 864 RCU_INIT_POINTER(rq->timeline, tl); 866 865 RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline); 867 866 rq->hwsp_seqno = tl->hwsp_seqno; 868 - GEM_BUG_ON(i915_request_completed(rq)); 867 + GEM_BUG_ON(__i915_request_is_complete(rq)); 869 868 870 869 rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ 871 870 ··· 971 970 if (i915_request_started(signal)) 972 971 return 0; 973 972 973 + /* 974 + * The caller holds a reference on @signal, but we do not serialise 975 + * against it being retired and removed from the lists. 976 + * 977 + * We do not hold a reference to the request before @signal, and 978 + * so must be very careful to ensure that it is not _recycled_ as 979 + * we follow the link backwards. 980 + */ 974 981 fence = NULL; 975 982 rcu_read_lock(); 976 - spin_lock_irq(&signal->lock); 977 983 do { 978 984 struct list_head *pos = READ_ONCE(signal->link.prev); 979 985 struct i915_request *prev; 980 986 981 987 /* Confirm signal has not been retired, the link is valid */ 982 - if (unlikely(i915_request_started(signal))) 988 + if (unlikely(__i915_request_has_started(signal))) 983 989 break; 984 990 985 991 /* Is signal the earliest request on its timeline? */ ··· 1011 1003 1012 1004 fence = &prev->fence; 1013 1005 } while (0); 1014 - spin_unlock_irq(&signal->lock); 1015 1006 rcu_read_unlock(); 1016 1007 if (!fence) 1017 1008 return 0; ··· 1527 1520 */ 1528 1521 prev = to_request(__i915_active_fence_set(&timeline->last_request, 1529 1522 &rq->fence)); 1530 - if (prev && !i915_request_completed(prev)) { 1523 + if (prev && !__i915_request_is_complete(prev)) { 1531 1524 /* 1532 1525 * The requests are supposed to be kept in order. However, 1533 1526 * we need to be wary in case the timeline->last_request ··· 1904 1897 1905 1898 static const char *run_status(const struct i915_request *rq) 1906 1899 { 1907 - if (i915_request_completed(rq)) 1900 + if (__i915_request_is_complete(rq)) 1908 1901 return "!"; 1909 1902 1910 - if (i915_request_started(rq)) 1903 + if (__i915_request_has_started(rq)) 1911 1904 return "*"; 1912 1905 1913 1906 if (!i915_sw_fence_signaled(&rq->semaphore))
+1 -1
drivers/gpu/drm/i915/i915_scheduler.c
··· 520 520 if (signaler->timeline == rq->timeline) 521 521 continue; 522 522 523 - if (i915_request_completed(signaler)) 523 + if (__i915_request_is_complete(signaler)) 524 524 continue; 525 525 526 526 i915_request_show(m, signaler, prefix, indent + 2);
+15
drivers/gpu/drm/i915/i915_vma.h
··· 363 363 364 364 void i915_vma_parked(struct intel_gt *gt); 365 365 366 + static inline bool i915_vma_is_scanout(const struct i915_vma *vma) 367 + { 368 + return test_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma)); 369 + } 370 + 371 + static inline void i915_vma_mark_scanout(struct i915_vma *vma) 372 + { 373 + set_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma)); 374 + } 375 + 376 + static inline void i915_vma_clear_scanout(struct i915_vma *vma) 377 + { 378 + clear_bit(I915_VMA_SCANOUT_BIT, __i915_vma_flags(vma)); 379 + } 380 + 366 381 #define for_each_until(cond) if (cond) break; else 367 382 368 383 /**
+3
drivers/gpu/drm/i915/i915_vma_types.h
··· 249 249 #define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT)) 250 250 #define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT)) 251 251 252 + #define I915_VMA_SCANOUT_BIT 18 253 + #define I915_VMA_SCANOUT ((int)BIT(I915_VMA_SCANOUT_BIT)) 254 + 252 255 struct i915_active active; 253 256 254 257 #define I915_VMA_PAGES_BIAS 24
+4 -4
drivers/gpu/drm/i915/intel_memory_region.h
··· 57 57 int (*init)(struct intel_memory_region *mem); 58 58 void (*release)(struct intel_memory_region *mem); 59 59 60 - struct drm_i915_gem_object * 61 - (*create_object)(struct intel_memory_region *mem, 62 - resource_size_t size, 63 - unsigned int flags); 60 + int (*init_object)(struct intel_memory_region *mem, 61 + struct drm_i915_gem_object *obj, 62 + resource_size_t size, 63 + unsigned int flags); 64 64 }; 65 65 66 66 struct intel_memory_region {
+5 -5
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
··· 38 38 struct list_head *objects) 39 39 { 40 40 /* quirk is only for live tiled objects, use it to declare ownership */ 41 - GEM_BUG_ON(obj->mm.quirked); 42 - obj->mm.quirked = true; 41 + GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); 42 + i915_gem_object_set_tiling_quirk(obj); 43 43 list_add(&obj->st_link, objects); 44 44 } 45 45 ··· 85 85 struct i915_vma *vma; 86 86 87 87 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) 88 - if (vma->obj->mm.quirked) 88 + if (i915_gem_object_has_tiling_quirk(vma->obj)) 89 89 i915_vma_unpin(vma); 90 90 } 91 91 ··· 94 94 struct drm_i915_gem_object *obj, *on; 95 95 96 96 list_for_each_entry_safe(obj, on, list, st_link) { 97 - GEM_BUG_ON(!obj->mm.quirked); 98 - obj->mm.quirked = false; 97 + GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj)); 98 + i915_gem_object_set_tiling_quirk(obj); 99 99 i915_gem_object_put(obj); 100 100 } 101 101
+7 -12
drivers/gpu/drm/i915/selftests/mock_region.c
··· 15 15 .release = i915_gem_object_release_memory_region, 16 16 }; 17 17 18 - static struct drm_i915_gem_object * 19 - mock_object_create(struct intel_memory_region *mem, 20 - resource_size_t size, 21 - unsigned int flags) 18 + static int mock_object_init(struct intel_memory_region *mem, 19 + struct drm_i915_gem_object *obj, 20 + resource_size_t size, 21 + unsigned int flags) 22 22 { 23 23 static struct lock_class_key lock_class; 24 24 struct drm_i915_private *i915 = mem->i915; 25 - struct drm_i915_gem_object *obj; 26 25 27 26 if (size > mem->mm.size) 28 - return ERR_PTR(-E2BIG); 29 - 30 - obj = i915_gem_object_alloc(); 31 - if (!obj) 32 - return ERR_PTR(-ENOMEM); 27 + return -E2BIG; 33 28 34 29 drm_gem_private_object_init(&i915->drm, &obj->base, size); 35 30 i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class); ··· 35 40 36 41 i915_gem_object_init_memory_region(obj, mem, flags); 37 42 38 - return obj; 43 + return 0; 39 44 } 40 45 41 46 static const struct intel_memory_region_ops mock_region_ops = { 42 47 .init = intel_memory_region_init_buddy, 43 48 .release = intel_memory_region_release_buddy, 44 - .create_object = mock_object_create, 49 + .init_object = mock_object_init, 45 50 }; 46 51 47 52 struct intel_memory_region *