Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Rename drm_i915_gem_request to i915_request

We want to de-emphasize the link between the request (dependency,
execution and fence tracking) from GEM and so rename the struct from
drm_i915_gem_request to i915_request. That is we may implement the GEM
user interface on top of requests, but they are an abstraction for
tracking execution rather than an implementation detail of GEM. (Since
they are not tied to HW, we keep the i915 prefix as opposed to intel.)

In short, the spatch:
@@

@@
- struct drm_i915_gem_request
+ struct i915_request

A corollary to contracting the type name, we also harmonise on using
'rq' shorthand for local variables where space if of the essence and
repetition makes 'request' unwieldy. For globals and struct members,
'request' is still much preferred for its clarity.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180221095636.6649-1-chris@chris-wilson.co.uk
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>
Acked-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

+990 -996
+1 -1
drivers/gpu/drm/i915/Makefile
··· 63 63 i915_gem.o \ 64 64 i915_gem_object.o \ 65 65 i915_gem_render_state.o \ 66 - i915_gem_request.o \ 67 66 i915_gem_shrinker.o \ 68 67 i915_gem_stolen.o \ 69 68 i915_gem_tiling.o \ 70 69 i915_gem_timeline.o \ 71 70 i915_gem_userptr.o \ 72 71 i915_gemfs.o \ 72 + i915_request.o \ 73 73 i915_trace_points.o \ 74 74 i915_vma.o \ 75 75 intel_breadcrumbs.o \
+8 -8
drivers/gpu/drm/i915/gvt/scheduler.c
··· 126 126 return 0; 127 127 } 128 128 129 - static inline bool is_gvt_request(struct drm_i915_gem_request *req) 129 + static inline bool is_gvt_request(struct i915_request *req) 130 130 { 131 131 return i915_gem_context_force_single_submission(req->ctx); 132 132 } ··· 148 148 static int shadow_context_status_change(struct notifier_block *nb, 149 149 unsigned long action, void *data) 150 150 { 151 - struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data; 151 + struct i915_request *req = data; 152 152 struct intel_gvt *gvt = container_of(nb, struct intel_gvt, 153 153 shadow_ctx_notifier_block[req->engine->id]); 154 154 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; ··· 333 333 int ring_id = workload->ring_id; 334 334 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; 335 335 struct intel_engine_cs *engine = dev_priv->engine[ring_id]; 336 - struct drm_i915_gem_request *rq; 336 + struct i915_request *rq; 337 337 struct intel_vgpu *vgpu = workload->vgpu; 338 338 struct intel_vgpu_submission *s = &vgpu->submission; 339 339 struct i915_gem_context *shadow_ctx = s->shadow_ctx; 340 340 int ret; 341 341 342 - rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); 342 + rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx); 343 343 if (IS_ERR(rq)) { 344 344 gvt_vgpu_err("fail to allocate gem request\n"); 345 345 ret = PTR_ERR(rq); ··· 348 348 349 349 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); 350 350 351 - workload->req = i915_gem_request_get(rq); 351 + workload->req = i915_request_get(rq); 352 352 ret = copy_workload_to_ring_buffer(workload); 353 353 if (ret) 354 354 goto err_unpin; ··· 582 582 if (!IS_ERR_OR_NULL(workload->req)) { 583 583 gvt_dbg_sched("ring id %d submit workload to i915 %p\n", 584 584 ring_id, workload->req); 585 - i915_add_request(workload->req); 585 + i915_request_add(workload->req); 586 586 workload->dispatched = true; 587 587 } 588 588 ··· 769 769 workload->status = 0; 770 770 } 771 771 772 - i915_gem_request_put(fetch_and_zero(&workload->req)); 772 + i915_request_put(fetch_and_zero(&workload->req)); 773 773 774 774 if (!workload->status && !(vgpu->resetting_eng & 775 775 ENGINE_MASK(ring_id))) { ··· 886 886 887 887 gvt_dbg_sched("ring id %d wait workload %p\n", 888 888 workload->ring_id, workload); 889 - i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT); 889 + i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT); 890 890 891 891 complete: 892 892 gvt_dbg_sched("will complete workload %p, status: %d\n",
+1 -1
drivers/gpu/drm/i915/gvt/scheduler.h
··· 80 80 struct intel_vgpu_workload { 81 81 struct intel_vgpu *vgpu; 82 82 int ring_id; 83 - struct drm_i915_gem_request *req; 83 + struct i915_request *req; 84 84 /* if this workload has been dispatched to i915? */ 85 85 bool dispatched; 86 86 bool shadowed;
+3 -3
drivers/gpu/drm/i915/i915_debugfs.c
··· 519 519 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 520 520 struct file_stats stats; 521 521 struct drm_i915_file_private *file_priv = file->driver_priv; 522 - struct drm_i915_gem_request *request; 522 + struct i915_request *request; 523 523 struct task_struct *task; 524 524 525 525 mutex_lock(&dev->struct_mutex); ··· 536 536 * Therefore, we need to protect this ->comm access using RCU. 537 537 */ 538 538 request = list_first_entry_or_null(&file_priv->mm.request_list, 539 - struct drm_i915_gem_request, 539 + struct i915_request, 540 540 client_link); 541 541 rcu_read_lock(); 542 542 task = pid_task(request && request->ctx->pid ? ··· 4060 4060 I915_WAIT_LOCKED); 4061 4061 4062 4062 if (val & DROP_RETIRE) 4063 - i915_gem_retire_requests(dev_priv); 4063 + i915_retire_requests(dev_priv); 4064 4064 4065 4065 mutex_unlock(&dev->struct_mutex); 4066 4066 }
+3 -3
drivers/gpu/drm/i915/i915_drv.c
··· 808 808 /* 809 809 * The i915 workqueue is primarily used for batched retirement of 810 810 * requests (and thus managing bo) once the task has been completed 811 - * by the GPU. i915_gem_retire_requests() is called directly when we 811 + * by the GPU. i915_retire_requests() is called directly when we 812 812 * need high-priority retirement, such as waiting for an explicit 813 813 * bo. 814 814 * ··· 1992 1992 add_taint(TAINT_WARN, LOCKDEP_STILL_OK); 1993 1993 error: 1994 1994 i915_gem_set_wedged(i915); 1995 - i915_gem_retire_requests(i915); 1995 + i915_retire_requests(i915); 1996 1996 intel_gpu_reset(i915, ALL_ENGINES); 1997 1997 goto finish; 1998 1998 } ··· 2019 2019 int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags) 2020 2020 { 2021 2021 struct i915_gpu_error *error = &engine->i915->gpu_error; 2022 - struct drm_i915_gem_request *active_request; 2022 + struct i915_request *active_request; 2023 2023 int ret; 2024 2024 2025 2025 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+12 -14
drivers/gpu/drm/i915/i915_drv.h
··· 71 71 #include "i915_gem_fence_reg.h" 72 72 #include "i915_gem_object.h" 73 73 #include "i915_gem_gtt.h" 74 - #include "i915_gem_request.h" 75 74 #include "i915_gem_timeline.h" 76 75 76 + #include "i915_request.h" 77 77 #include "i915_vma.h" 78 78 79 79 #include "intel_gvt.h" ··· 1231 1231 * 1232 1232 * #I915_WEDGED - If reset fails and we can no longer use the GPU, 1233 1233 * we set the #I915_WEDGED bit. Prior to command submission, e.g. 1234 - * i915_gem_request_alloc(), this bit is checked and the sequence 1234 + * i915_request_alloc(), this bit is checked and the sequence 1235 1235 * aborted (with -EIO reported to userspace) if set. 1236 1236 */ 1237 1237 unsigned long flags; ··· 3329 3329 3330 3330 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 3331 3331 void i915_vma_move_to_active(struct i915_vma *vma, 3332 - struct drm_i915_gem_request *req, 3332 + struct i915_request *rq, 3333 3333 unsigned int flags); 3334 3334 int i915_gem_dumb_create(struct drm_file *file_priv, 3335 3335 struct drm_device *dev, ··· 3344 3344 3345 3345 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); 3346 3346 3347 - struct drm_i915_gem_request * 3347 + struct i915_request * 3348 3348 i915_gem_find_active_request(struct intel_engine_cs *engine); 3349 - 3350 - void i915_gem_retire_requests(struct drm_i915_private *dev_priv); 3351 3349 3352 3350 static inline bool i915_reset_backoff(struct i915_gpu_error *error) 3353 3351 { ··· 3378 3380 return READ_ONCE(error->reset_engine_count[engine->id]); 3379 3381 } 3380 3382 3381 - struct drm_i915_gem_request * 3383 + struct i915_request * 3382 3384 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine); 3383 3385 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); 3384 3386 void i915_gem_reset(struct drm_i915_private *dev_priv); ··· 3387 3389 void i915_gem_set_wedged(struct drm_i915_private *dev_priv); 3388 3390 bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); 3389 3391 void i915_gem_reset_engine(struct intel_engine_cs *engine, 3390 - struct drm_i915_gem_request *request); 3392 + struct i915_request *request); 3391 3393 3392 3394 void i915_gem_init_mmio(struct drm_i915_private *i915); 3393 3395 int __must_check i915_gem_init(struct drm_i915_private *dev_priv); ··· 4005 4007 } 4006 4008 4007 4009 static inline bool 4008 - __i915_request_irq_complete(const struct drm_i915_gem_request *req) 4010 + __i915_request_irq_complete(const struct i915_request *rq) 4009 4011 { 4010 - struct intel_engine_cs *engine = req->engine; 4012 + struct intel_engine_cs *engine = rq->engine; 4011 4013 u32 seqno; 4012 4014 4013 4015 /* Note that the engine may have wrapped around the seqno, and ··· 4016 4018 * this by kicking all the waiters before resetting the seqno 4017 4019 * in hardware, and also signal the fence. 4018 4020 */ 4019 - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags)) 4021 + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) 4020 4022 return true; 4021 4023 4022 4024 /* The request was dequeued before we were awoken. We check after ··· 4025 4027 * the request execution are sufficient to ensure that a check 4026 4028 * after reading the value from hw matches this request. 4027 4029 */ 4028 - seqno = i915_gem_request_global_seqno(req); 4030 + seqno = i915_request_global_seqno(rq); 4029 4031 if (!seqno) 4030 4032 return false; 4031 4033 4032 4034 /* Before we do the heavier coherent read of the seqno, 4033 4035 * check the value (hopefully) in the CPU cacheline. 4034 4036 */ 4035 - if (__i915_gem_request_completed(req, seqno)) 4037 + if (__i915_request_completed(rq, seqno)) 4036 4038 return true; 4037 4039 4038 4040 /* Ensure our read of the seqno is coherent so that we ··· 4081 4083 wake_up_process(b->irq_wait->tsk); 4082 4084 spin_unlock_irq(&b->irq_lock); 4083 4085 4084 - if (__i915_gem_request_completed(req, seqno)) 4086 + if (__i915_request_completed(rq, seqno)) 4085 4087 return true; 4086 4088 } 4087 4089
+43 -45
drivers/gpu/drm/i915/i915_gem.c
··· 353 353 long timeout, 354 354 struct intel_rps_client *rps_client) 355 355 { 356 - struct drm_i915_gem_request *rq; 356 + struct i915_request *rq; 357 357 358 358 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); 359 359 ··· 366 366 timeout); 367 367 368 368 rq = to_request(fence); 369 - if (i915_gem_request_completed(rq)) 369 + if (i915_request_completed(rq)) 370 370 goto out; 371 371 372 372 /* ··· 385 385 * forcing the clocks too high for the whole system, we only allow 386 386 * each client to waitboost once in a busy period. 387 387 */ 388 - if (rps_client && !i915_gem_request_started(rq)) { 388 + if (rps_client && !i915_request_started(rq)) { 389 389 if (INTEL_GEN(rq->i915) >= 6) 390 390 gen6_rps_boost(rq, rps_client); 391 391 } 392 392 393 - timeout = i915_wait_request(rq, flags, timeout); 393 + timeout = i915_request_wait(rq, flags, timeout); 394 394 395 395 out: 396 - if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq)) 397 - i915_gem_request_retire_upto(rq); 396 + if (flags & I915_WAIT_LOCKED && i915_request_completed(rq)) 397 + i915_request_retire_upto(rq); 398 398 399 399 return timeout; 400 400 } ··· 463 463 464 464 static void __fence_set_priority(struct dma_fence *fence, int prio) 465 465 { 466 - struct drm_i915_gem_request *rq; 466 + struct i915_request *rq; 467 467 struct intel_engine_cs *engine; 468 468 469 469 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence)) ··· 2856 2856 atomic_inc(&ctx->active_count); 2857 2857 } 2858 2858 2859 - struct drm_i915_gem_request * 2859 + struct i915_request * 2860 2860 i915_gem_find_active_request(struct intel_engine_cs *engine) 2861 2861 { 2862 - struct drm_i915_gem_request *request, *active = NULL; 2862 + struct i915_request *request, *active = NULL; 2863 2863 unsigned long flags; 2864 2864 2865 2865 /* We are called by the error capture and reset at a random ··· 2872 2872 */ 2873 2873 spin_lock_irqsave(&engine->timeline->lock, flags); 2874 2874 list_for_each_entry(request, &engine->timeline->requests, link) { 2875 - if (__i915_gem_request_completed(request, 2876 - request->global_seqno)) 2875 + if (__i915_request_completed(request, request->global_seqno)) 2877 2876 continue; 2878 2877 2879 2878 GEM_BUG_ON(request->engine != engine); ··· 2905 2906 * Ensure irq handler finishes, and not run again. 2906 2907 * Also return the active request so that we only search for it once. 2907 2908 */ 2908 - struct drm_i915_gem_request * 2909 + struct i915_request * 2909 2910 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) 2910 2911 { 2911 - struct drm_i915_gem_request *request = NULL; 2912 + struct i915_request *request = NULL; 2912 2913 2913 2914 /* 2914 2915 * During the reset sequence, we must prevent the engine from ··· 2966 2967 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) 2967 2968 { 2968 2969 struct intel_engine_cs *engine; 2969 - struct drm_i915_gem_request *request; 2970 + struct i915_request *request; 2970 2971 enum intel_engine_id id; 2971 2972 int err = 0; 2972 2973 ··· 2985 2986 return err; 2986 2987 } 2987 2988 2988 - static void skip_request(struct drm_i915_gem_request *request) 2989 + static void skip_request(struct i915_request *request) 2989 2990 { 2990 2991 void *vaddr = request->ring->vaddr; 2991 2992 u32 head; ··· 3004 3005 dma_fence_set_error(&request->fence, -EIO); 3005 3006 } 3006 3007 3007 - static void engine_skip_context(struct drm_i915_gem_request *request) 3008 + static void engine_skip_context(struct i915_request *request) 3008 3009 { 3009 3010 struct intel_engine_cs *engine = request->engine; 3010 3011 struct i915_gem_context *hung_ctx = request->ctx; ··· 3028 3029 } 3029 3030 3030 3031 /* Returns the request if it was guilty of the hang */ 3031 - static struct drm_i915_gem_request * 3032 + static struct i915_request * 3032 3033 i915_gem_reset_request(struct intel_engine_cs *engine, 3033 - struct drm_i915_gem_request *request) 3034 + struct i915_request *request) 3034 3035 { 3035 3036 /* The guilty request will get skipped on a hung engine. 3036 3037 * ··· 3084 3085 } 3085 3086 3086 3087 void i915_gem_reset_engine(struct intel_engine_cs *engine, 3087 - struct drm_i915_gem_request *request) 3088 + struct i915_request *request) 3088 3089 { 3089 3090 /* 3090 3091 * Make sure this write is visible before we re-enable the interrupt ··· 3112 3113 3113 3114 lockdep_assert_held(&dev_priv->drm.struct_mutex); 3114 3115 3115 - i915_gem_retire_requests(dev_priv); 3116 + i915_retire_requests(dev_priv); 3116 3117 3117 3118 for_each_engine(engine, dev_priv, id) { 3118 3119 struct i915_gem_context *ctx; ··· 3133 3134 * empty request appears sufficient to paper over the glitch. 3134 3135 */ 3135 3136 if (intel_engine_is_idle(engine)) { 3136 - struct drm_i915_gem_request *rq; 3137 + struct i915_request *rq; 3137 3138 3138 - rq = i915_gem_request_alloc(engine, 3139 - dev_priv->kernel_context); 3139 + rq = i915_request_alloc(engine, 3140 + dev_priv->kernel_context); 3140 3141 if (!IS_ERR(rq)) 3141 - __i915_add_request(rq, false); 3142 + __i915_request_add(rq, false); 3142 3143 } 3143 3144 } 3144 3145 ··· 3173 3174 } 3174 3175 } 3175 3176 3176 - static void nop_submit_request(struct drm_i915_gem_request *request) 3177 + static void nop_submit_request(struct i915_request *request) 3177 3178 { 3178 3179 dma_fence_set_error(&request->fence, -EIO); 3179 3180 3180 - i915_gem_request_submit(request); 3181 + i915_request_submit(request); 3181 3182 } 3182 3183 3183 - static void nop_complete_submit_request(struct drm_i915_gem_request *request) 3184 + static void nop_complete_submit_request(struct i915_request *request) 3184 3185 { 3185 3186 unsigned long flags; 3186 3187 3187 3188 dma_fence_set_error(&request->fence, -EIO); 3188 3189 3189 3190 spin_lock_irqsave(&request->engine->timeline->lock, flags); 3190 - __i915_gem_request_submit(request); 3191 + __i915_request_submit(request); 3191 3192 intel_engine_init_global_seqno(request->engine, request->global_seqno); 3192 3193 spin_unlock_irqrestore(&request->engine->timeline->lock, flags); 3193 3194 } ··· 3280 3281 */ 3281 3282 list_for_each_entry(tl, &i915->gt.timelines, link) { 3282 3283 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3283 - struct drm_i915_gem_request *rq; 3284 + struct i915_request *rq; 3284 3285 3285 3286 rq = i915_gem_active_peek(&tl->engine[i].last_request, 3286 3287 &i915->drm.struct_mutex); ··· 3329 3330 3330 3331 /* Come back later if the device is busy... */ 3331 3332 if (mutex_trylock(&dev->struct_mutex)) { 3332 - i915_gem_retire_requests(dev_priv); 3333 + i915_retire_requests(dev_priv); 3333 3334 mutex_unlock(&dev->struct_mutex); 3334 3335 } 3335 3336 ··· 3683 3684 if (ret) 3684 3685 return ret; 3685 3686 } 3686 - i915_gem_retire_requests(i915); 3687 + i915_retire_requests(i915); 3687 3688 3688 3689 ret = wait_for_engines(i915); 3689 3690 } else { ··· 4223 4224 struct drm_i915_private *dev_priv = to_i915(dev); 4224 4225 struct drm_i915_file_private *file_priv = file->driver_priv; 4225 4226 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; 4226 - struct drm_i915_gem_request *request, *target = NULL; 4227 + struct i915_request *request, *target = NULL; 4227 4228 long ret; 4228 4229 4229 4230 /* ABI: return -EIO if already wedged */ ··· 4243 4244 target = request; 4244 4245 } 4245 4246 if (target) 4246 - i915_gem_request_get(target); 4247 + i915_request_get(target); 4247 4248 spin_unlock(&file_priv->mm.lock); 4248 4249 4249 4250 if (target == NULL) 4250 4251 return 0; 4251 4252 4252 - ret = i915_wait_request(target, 4253 + ret = i915_request_wait(target, 4253 4254 I915_WAIT_INTERRUPTIBLE, 4254 4255 MAX_SCHEDULE_TIMEOUT); 4255 - i915_gem_request_put(target); 4256 + i915_request_put(target); 4256 4257 4257 4258 return ret < 0 ? ret : 0; 4258 4259 } ··· 4366 4367 __busy_set_if_active(const struct dma_fence *fence, 4367 4368 unsigned int (*flag)(unsigned int id)) 4368 4369 { 4369 - struct drm_i915_gem_request *rq; 4370 + struct i915_request *rq; 4370 4371 4371 4372 /* We have to check the current hw status of the fence as the uABI 4372 4373 * guarantees forward progress. We could rely on the idle worker ··· 4379 4380 return 0; 4380 4381 4381 4382 /* opencode to_request() in order to avoid const warnings */ 4382 - rq = container_of(fence, struct drm_i915_gem_request, fence); 4383 - if (i915_gem_request_completed(rq)) 4383 + rq = container_of(fence, struct i915_request, fence); 4384 + if (i915_request_completed(rq)) 4384 4385 return 0; 4385 4386 4386 4387 return flag(rq->engine->uabi_id); ··· 4525 4526 } 4526 4527 4527 4528 static void 4528 - frontbuffer_retire(struct i915_gem_active *active, 4529 - struct drm_i915_gem_request *request) 4529 + frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request) 4530 4530 { 4531 4531 struct drm_i915_gem_object *obj = 4532 4532 container_of(active, typeof(*obj), frontbuffer_write); ··· 5159 5161 return PTR_ERR(ctx); 5160 5162 5161 5163 for_each_engine(engine, i915, id) { 5162 - struct drm_i915_gem_request *rq; 5164 + struct i915_request *rq; 5163 5165 5164 - rq = i915_gem_request_alloc(engine, ctx); 5166 + rq = i915_request_alloc(engine, ctx); 5165 5167 if (IS_ERR(rq)) { 5166 5168 err = PTR_ERR(rq); 5167 5169 goto out_ctx; ··· 5171 5173 if (engine->init_context) 5172 5174 err = engine->init_context(rq); 5173 5175 5174 - __i915_add_request(rq, true); 5176 + __i915_request_add(rq, true); 5175 5177 if (err) 5176 5178 goto err_active; 5177 5179 } ··· 5477 5479 if (!dev_priv->luts) 5478 5480 goto err_vmas; 5479 5481 5480 - dev_priv->requests = KMEM_CACHE(drm_i915_gem_request, 5482 + dev_priv->requests = KMEM_CACHE(i915_request, 5481 5483 SLAB_HWCACHE_ALIGN | 5482 5484 SLAB_RECLAIM_ACCOUNT | 5483 5485 SLAB_TYPESAFE_BY_RCU); ··· 5610 5612 void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5611 5613 { 5612 5614 struct drm_i915_file_private *file_priv = file->driver_priv; 5613 - struct drm_i915_gem_request *request; 5615 + struct i915_request *request; 5614 5616 5615 5617 /* Clean up our request list when the client is going away, so that 5616 5618 * later retire_requests won't dereference our soon-to-be-gone
+1 -1
drivers/gpu/drm/i915/i915_gem_batch_pool.c
··· 119 119 if (!reservation_object_test_signaled_rcu(resv, true)) 120 120 break; 121 121 122 - i915_gem_retire_requests(pool->engine->i915); 122 + i915_retire_requests(pool->engine->i915); 123 123 GEM_BUG_ON(i915_gem_object_is_active(obj)); 124 124 125 125 /*
+9 -9
drivers/gpu/drm/i915/i915_gem_context.c
··· 219 219 * Flush any pending retires to hopefully release some 220 220 * stale contexts and try again. 221 221 */ 222 - i915_gem_retire_requests(dev_priv); 222 + i915_retire_requests(dev_priv); 223 223 ret = ida_simple_get(&dev_priv->contexts.hw_ida, 224 224 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); 225 225 if (ret < 0) ··· 590 590 591 591 lockdep_assert_held(&dev_priv->drm.struct_mutex); 592 592 593 - i915_gem_retire_requests(dev_priv); 593 + i915_retire_requests(dev_priv); 594 594 595 595 for_each_engine(engine, dev_priv, id) { 596 - struct drm_i915_gem_request *req; 596 + struct i915_request *rq; 597 597 598 598 if (engine_has_idle_kernel_context(engine)) 599 599 continue; 600 600 601 - req = i915_gem_request_alloc(engine, dev_priv->kernel_context); 602 - if (IS_ERR(req)) 603 - return PTR_ERR(req); 601 + rq = i915_request_alloc(engine, dev_priv->kernel_context); 602 + if (IS_ERR(rq)) 603 + return PTR_ERR(rq); 604 604 605 605 /* Queue this switch after all other activity */ 606 606 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { 607 - struct drm_i915_gem_request *prev; 607 + struct i915_request *prev; 608 608 struct intel_timeline *tl; 609 609 610 610 tl = &timeline->engine[engine->id]; 611 611 prev = i915_gem_active_raw(&tl->last_request, 612 612 &dev_priv->drm.struct_mutex); 613 613 if (prev) 614 - i915_sw_fence_await_sw_fence_gfp(&req->submit, 614 + i915_sw_fence_await_sw_fence_gfp(&rq->submit, 615 615 &prev->submit, 616 616 I915_FENCE_GFP); 617 617 } ··· 623 623 * but an extra layer of paranoia before we declare the system 624 624 * idle (on suspend etc) is advisable! 625 625 */ 626 - __i915_add_request(req, true); 626 + __i915_request_add(rq, true); 627 627 } 628 628 629 629 return 0;
+1 -1
drivers/gpu/drm/i915/i915_gem_context.h
··· 276 276 struct drm_file *file); 277 277 void i915_gem_context_close(struct drm_file *file); 278 278 279 - int i915_switch_context(struct drm_i915_gem_request *req); 279 + int i915_switch_context(struct i915_request *rq); 280 280 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); 281 281 282 282 void i915_gem_context_release(struct kref *ctx_ref);
+2 -2
drivers/gpu/drm/i915/i915_gem_evict.c
··· 168 168 * retiring. 169 169 */ 170 170 if (!(flags & PIN_NONBLOCK)) 171 - i915_gem_retire_requests(dev_priv); 171 + i915_retire_requests(dev_priv); 172 172 else 173 173 phases[1] = NULL; 174 174 ··· 293 293 * retiring. 294 294 */ 295 295 if (!(flags & PIN_NONBLOCK)) 296 - i915_gem_retire_requests(vm->i915); 296 + i915_retire_requests(vm->i915); 297 297 298 298 check_color = vm->mm.color_adjust; 299 299 if (check_color) {
+30 -30
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 200 200 struct i915_gem_context *ctx; /** context for building the request */ 201 201 struct i915_address_space *vm; /** GTT and vma for the request */ 202 202 203 - struct drm_i915_gem_request *request; /** our request to build */ 203 + struct i915_request *request; /** our request to build */ 204 204 struct i915_vma *batch; /** identity of the batch obj/vma */ 205 205 206 206 /** actual size of execobj[] as we may extend it for the cmdparser */ ··· 227 227 bool has_fence : 1; 228 228 bool needs_unfenced : 1; 229 229 230 - struct drm_i915_gem_request *rq; 230 + struct i915_request *rq; 231 231 u32 *rq_cmd; 232 232 unsigned int rq_size; 233 233 } reloc_cache; ··· 886 886 i915_gem_object_unpin_map(cache->rq->batch->obj); 887 887 i915_gem_chipset_flush(cache->rq->i915); 888 888 889 - __i915_add_request(cache->rq, true); 889 + __i915_request_add(cache->rq, true); 890 890 cache->rq = NULL; 891 891 } 892 892 ··· 1070 1070 { 1071 1071 struct reloc_cache *cache = &eb->reloc_cache; 1072 1072 struct drm_i915_gem_object *obj; 1073 - struct drm_i915_gem_request *rq; 1073 + struct i915_request *rq; 1074 1074 struct i915_vma *batch; 1075 1075 u32 *cmd; 1076 1076 int err; ··· 1103 1103 if (err) 1104 1104 goto err_unmap; 1105 1105 1106 - rq = i915_gem_request_alloc(eb->engine, eb->ctx); 1106 + rq = i915_request_alloc(eb->engine, eb->ctx); 1107 1107 if (IS_ERR(rq)) { 1108 1108 err = PTR_ERR(rq); 1109 1109 goto err_unpin; 1110 1110 } 1111 1111 1112 - err = i915_gem_request_await_object(rq, vma->obj, true); 1112 + err = i915_request_await_object(rq, vma->obj, true); 1113 1113 if (err) 1114 1114 goto err_request; 1115 1115 ··· 1141 1141 return 0; 1142 1142 1143 1143 err_request: 1144 - i915_add_request(rq); 1144 + i915_request_add(rq); 1145 1145 err_unpin: 1146 1146 i915_vma_unpin(batch); 1147 1147 err_unmap: ··· 1727 1727 } 1728 1728 1729 1729 static void eb_export_fence(struct i915_vma *vma, 1730 - struct drm_i915_gem_request *req, 1730 + struct i915_request *rq, 1731 1731 unsigned int flags) 1732 1732 { 1733 1733 struct reservation_object *resv = vma->resv; ··· 1739 1739 */ 1740 1740 reservation_object_lock(resv, NULL); 1741 1741 if (flags & EXEC_OBJECT_WRITE) 1742 - reservation_object_add_excl_fence(resv, &req->fence); 1742 + reservation_object_add_excl_fence(resv, &rq->fence); 1743 1743 else if (reservation_object_reserve_shared(resv) == 0) 1744 - reservation_object_add_shared_fence(resv, &req->fence); 1744 + reservation_object_add_shared_fence(resv, &rq->fence); 1745 1745 reservation_object_unlock(resv); 1746 1746 } 1747 1747 ··· 1757 1757 struct drm_i915_gem_object *obj = vma->obj; 1758 1758 1759 1759 if (flags & EXEC_OBJECT_CAPTURE) { 1760 - struct i915_gem_capture_list *capture; 1760 + struct i915_capture_list *capture; 1761 1761 1762 1762 capture = kmalloc(sizeof(*capture), GFP_KERNEL); 1763 1763 if (unlikely(!capture)) ··· 1788 1788 if (flags & EXEC_OBJECT_ASYNC) 1789 1789 continue; 1790 1790 1791 - err = i915_gem_request_await_object 1791 + err = i915_request_await_object 1792 1792 (eb->request, obj, flags & EXEC_OBJECT_WRITE); 1793 1793 if (err) 1794 1794 return err; ··· 1840 1840 } 1841 1841 1842 1842 void i915_vma_move_to_active(struct i915_vma *vma, 1843 - struct drm_i915_gem_request *req, 1843 + struct i915_request *rq, 1844 1844 unsigned int flags) 1845 1845 { 1846 1846 struct drm_i915_gem_object *obj = vma->obj; 1847 - const unsigned int idx = req->engine->id; 1847 + const unsigned int idx = rq->engine->id; 1848 1848 1849 - lockdep_assert_held(&req->i915->drm.struct_mutex); 1849 + lockdep_assert_held(&rq->i915->drm.struct_mutex); 1850 1850 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 1851 1851 1852 1852 /* ··· 1860 1860 if (!i915_vma_is_active(vma)) 1861 1861 obj->active_count++; 1862 1862 i915_vma_set_active(vma, idx); 1863 - i915_gem_active_set(&vma->last_read[idx], req); 1863 + i915_gem_active_set(&vma->last_read[idx], rq); 1864 1864 list_move_tail(&vma->vm_link, &vma->vm->active_list); 1865 1865 1866 1866 obj->write_domain = 0; ··· 1868 1868 obj->write_domain = I915_GEM_DOMAIN_RENDER; 1869 1869 1870 1870 if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) 1871 - i915_gem_active_set(&obj->frontbuffer_write, req); 1871 + i915_gem_active_set(&obj->frontbuffer_write, rq); 1872 1872 1873 1873 obj->read_domains = 0; 1874 1874 } 1875 1875 obj->read_domains |= I915_GEM_GPU_DOMAINS; 1876 1876 1877 1877 if (flags & EXEC_OBJECT_NEEDS_FENCE) 1878 - i915_gem_active_set(&vma->last_fence, req); 1878 + i915_gem_active_set(&vma->last_fence, rq); 1879 1879 } 1880 1880 1881 - static int i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req) 1881 + static int i915_reset_gen7_sol_offsets(struct i915_request *rq) 1882 1882 { 1883 1883 u32 *cs; 1884 1884 int i; 1885 1885 1886 - if (!IS_GEN7(req->i915) || req->engine->id != RCS) { 1886 + if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) { 1887 1887 DRM_DEBUG("sol reset is gen7/rcs only\n"); 1888 1888 return -EINVAL; 1889 1889 } 1890 1890 1891 - cs = intel_ring_begin(req, 4 * 2 + 2); 1891 + cs = intel_ring_begin(rq, 4 * 2 + 2); 1892 1892 if (IS_ERR(cs)) 1893 1893 return PTR_ERR(cs); 1894 1894 ··· 1898 1898 *cs++ = 0; 1899 1899 } 1900 1900 *cs++ = MI_NOOP; 1901 - intel_ring_advance(req, cs); 1901 + intel_ring_advance(rq, cs); 1902 1902 1903 1903 return 0; 1904 1904 } ··· 1944 1944 } 1945 1945 1946 1946 static void 1947 - add_to_client(struct drm_i915_gem_request *req, struct drm_file *file) 1947 + add_to_client(struct i915_request *rq, struct drm_file *file) 1948 1948 { 1949 - req->file_priv = file->driver_priv; 1950 - list_add_tail(&req->client_link, &req->file_priv->mm.request_list); 1949 + rq->file_priv = file->driver_priv; 1950 + list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list); 1951 1951 } 1952 1952 1953 1953 static int eb_submit(struct i915_execbuffer *eb) ··· 2151 2151 if (!fence) 2152 2152 return -EINVAL; 2153 2153 2154 - err = i915_gem_request_await_dma_fence(eb->request, fence); 2154 + err = i915_request_await_dma_fence(eb->request, fence); 2155 2155 dma_fence_put(fence); 2156 2156 if (err < 0) 2157 2157 return err; ··· 2365 2365 GEM_BUG_ON(eb.reloc_cache.rq); 2366 2366 2367 2367 /* Allocate a request for this batch buffer nice and early. */ 2368 - eb.request = i915_gem_request_alloc(eb.engine, eb.ctx); 2368 + eb.request = i915_request_alloc(eb.engine, eb.ctx); 2369 2369 if (IS_ERR(eb.request)) { 2370 2370 err = PTR_ERR(eb.request); 2371 2371 goto err_batch_unpin; 2372 2372 } 2373 2373 2374 2374 if (in_fence) { 2375 - err = i915_gem_request_await_dma_fence(eb.request, in_fence); 2375 + err = i915_request_await_dma_fence(eb.request, in_fence); 2376 2376 if (err < 0) 2377 2377 goto err_request; 2378 2378 } ··· 2400 2400 */ 2401 2401 eb.request->batch = eb.batch; 2402 2402 2403 - trace_i915_gem_request_queue(eb.request, eb.batch_flags); 2403 + trace_i915_request_queue(eb.request, eb.batch_flags); 2404 2404 err = eb_submit(&eb); 2405 2405 err_request: 2406 - __i915_add_request(eb.request, err == 0); 2406 + __i915_request_add(eb.request, err == 0); 2407 2407 add_to_client(eb.request, file); 2408 2408 2409 2409 if (fences)
+19 -19
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 765 765 } 766 766 767 767 /* Broadwell Page Directory Pointer Descriptors */ 768 - static int gen8_write_pdp(struct drm_i915_gem_request *req, 768 + static int gen8_write_pdp(struct i915_request *rq, 769 769 unsigned entry, 770 770 dma_addr_t addr) 771 771 { 772 - struct intel_engine_cs *engine = req->engine; 772 + struct intel_engine_cs *engine = rq->engine; 773 773 u32 *cs; 774 774 775 775 BUG_ON(entry >= 4); 776 776 777 - cs = intel_ring_begin(req, 6); 777 + cs = intel_ring_begin(rq, 6); 778 778 if (IS_ERR(cs)) 779 779 return PTR_ERR(cs); 780 780 ··· 784 784 *cs++ = MI_LOAD_REGISTER_IMM(1); 785 785 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry)); 786 786 *cs++ = lower_32_bits(addr); 787 - intel_ring_advance(req, cs); 787 + intel_ring_advance(rq, cs); 788 788 789 789 return 0; 790 790 } 791 791 792 792 static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt, 793 - struct drm_i915_gem_request *req) 793 + struct i915_request *rq) 794 794 { 795 795 int i, ret; 796 796 797 797 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) { 798 798 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 799 799 800 - ret = gen8_write_pdp(req, i, pd_daddr); 800 + ret = gen8_write_pdp(rq, i, pd_daddr); 801 801 if (ret) 802 802 return ret; 803 803 } ··· 806 806 } 807 807 808 808 static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt, 809 - struct drm_i915_gem_request *req) 809 + struct i915_request *rq) 810 810 { 811 - return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4)); 811 + return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4)); 812 812 } 813 813 814 814 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify ··· 1732 1732 } 1733 1733 1734 1734 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, 1735 - struct drm_i915_gem_request *req) 1735 + struct i915_request *rq) 1736 1736 { 1737 - struct intel_engine_cs *engine = req->engine; 1737 + struct intel_engine_cs *engine = rq->engine; 1738 1738 u32 *cs; 1739 1739 1740 1740 /* NB: TLBs must be flushed and invalidated before a switch */ 1741 - cs = intel_ring_begin(req, 6); 1741 + cs = intel_ring_begin(rq, 6); 1742 1742 if (IS_ERR(cs)) 1743 1743 return PTR_ERR(cs); 1744 1744 ··· 1748 1748 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); 1749 1749 *cs++ = get_pd_offset(ppgtt); 1750 1750 *cs++ = MI_NOOP; 1751 - intel_ring_advance(req, cs); 1751 + intel_ring_advance(rq, cs); 1752 1752 1753 1753 return 0; 1754 1754 } 1755 1755 1756 1756 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, 1757 - struct drm_i915_gem_request *req) 1757 + struct i915_request *rq) 1758 1758 { 1759 - struct intel_engine_cs *engine = req->engine; 1759 + struct intel_engine_cs *engine = rq->engine; 1760 1760 u32 *cs; 1761 1761 1762 1762 /* NB: TLBs must be flushed and invalidated before a switch */ 1763 - cs = intel_ring_begin(req, 6); 1763 + cs = intel_ring_begin(rq, 6); 1764 1764 if (IS_ERR(cs)) 1765 1765 return PTR_ERR(cs); 1766 1766 ··· 1770 1770 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); 1771 1771 *cs++ = get_pd_offset(ppgtt); 1772 1772 *cs++ = MI_NOOP; 1773 - intel_ring_advance(req, cs); 1773 + intel_ring_advance(rq, cs); 1774 1774 1775 1775 return 0; 1776 1776 } 1777 1777 1778 1778 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, 1779 - struct drm_i915_gem_request *req) 1779 + struct i915_request *rq) 1780 1780 { 1781 - struct intel_engine_cs *engine = req->engine; 1782 - struct drm_i915_private *dev_priv = req->i915; 1781 + struct intel_engine_cs *engine = rq->engine; 1782 + struct drm_i915_private *dev_priv = rq->i915; 1783 1783 1784 1784 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); 1785 1785 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
+3 -2
drivers/gpu/drm/i915/i915_gem_gtt.h
··· 39 39 #include <linux/pagevec.h> 40 40 41 41 #include "i915_gem_timeline.h" 42 - #include "i915_gem_request.h" 42 + 43 + #include "i915_request.h" 43 44 #include "i915_selftest.h" 44 45 45 46 #define I915_GTT_PAGE_SIZE_4K BIT(12) ··· 399 398 gen6_pte_t __iomem *pd_addr; 400 399 401 400 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, 402 - struct drm_i915_gem_request *req); 401 + struct i915_request *rq); 403 402 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); 404 403 }; 405 404
+1 -1
drivers/gpu/drm/i915/i915_gem_object.h
··· 33 33 34 34 #include <drm/i915_drm.h> 35 35 36 - #include "i915_gem_request.h" 36 + #include "i915_request.h" 37 37 #include "i915_selftest.h" 38 38 39 39 struct drm_i915_gem_object;
+1 -1
drivers/gpu/drm/i915/i915_gem_render_state.c
··· 177 177 178 178 #undef OUT_BATCH 179 179 180 - int i915_gem_render_state_emit(struct drm_i915_gem_request *rq) 180 + int i915_gem_render_state_emit(struct i915_request *rq) 181 181 { 182 182 struct intel_engine_cs *engine = rq->engine; 183 183 struct intel_render_state so = {}; /* keep the compiler happy */
+2 -2
drivers/gpu/drm/i915/i915_gem_render_state.h
··· 24 24 #ifndef _I915_GEM_RENDER_STATE_H_ 25 25 #define _I915_GEM_RENDER_STATE_H_ 26 26 27 - struct drm_i915_gem_request; 27 + struct i915_request; 28 28 29 - int i915_gem_render_state_emit(struct drm_i915_gem_request *rq); 29 + int i915_gem_render_state_emit(struct i915_request *rq); 30 30 31 31 #endif /* _I915_GEM_RENDER_STATE_H_ */
+196 -182
drivers/gpu/drm/i915/i915_gem_request.c drivers/gpu/drm/i915/i915_request.c
··· 37 37 38 38 static const char *i915_fence_get_timeline_name(struct dma_fence *fence) 39 39 { 40 - /* The timeline struct (as part of the ppgtt underneath a context) 40 + /* 41 + * The timeline struct (as part of the ppgtt underneath a context) 41 42 * may be freed when the request is no longer in use by the GPU. 42 43 * We could extend the life of a context to beyond that of all 43 44 * fences, possibly keeping the hw resource around indefinitely, ··· 54 53 55 54 static bool i915_fence_signaled(struct dma_fence *fence) 56 55 { 57 - return i915_gem_request_completed(to_request(fence)); 56 + return i915_request_completed(to_request(fence)); 58 57 } 59 58 60 59 static bool i915_fence_enable_signaling(struct dma_fence *fence) ··· 70 69 bool interruptible, 71 70 signed long timeout) 72 71 { 73 - return i915_wait_request(to_request(fence), interruptible, timeout); 72 + return i915_request_wait(to_request(fence), interruptible, timeout); 74 73 } 75 74 76 75 static void i915_fence_release(struct dma_fence *fence) 77 76 { 78 - struct drm_i915_gem_request *req = to_request(fence); 77 + struct i915_request *rq = to_request(fence); 79 78 80 - /* The request is put onto a RCU freelist (i.e. the address 79 + /* 80 + * The request is put onto a RCU freelist (i.e. the address 81 81 * is immediately reused), mark the fences as being freed now. 82 82 * Otherwise the debugobjects for the fences are only marked as 83 83 * freed when the slab cache itself is freed, and so we would get 84 84 * caught trying to reuse dead objects. 85 85 */ 86 - i915_sw_fence_fini(&req->submit); 86 + i915_sw_fence_fini(&rq->submit); 87 87 88 - kmem_cache_free(req->i915->requests, req); 88 + kmem_cache_free(rq->i915->requests, rq); 89 89 } 90 90 91 91 const struct dma_fence_ops i915_fence_ops = { ··· 99 97 }; 100 98 101 99 static inline void 102 - i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) 100 + i915_request_remove_from_client(struct i915_request *request) 103 101 { 104 102 struct drm_i915_file_private *file_priv; 105 103 ··· 240 238 241 239 int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) 242 240 { 243 - struct drm_i915_private *dev_priv = to_i915(dev); 241 + struct drm_i915_private *i915 = to_i915(dev); 244 242 245 - lockdep_assert_held(&dev_priv->drm.struct_mutex); 243 + lockdep_assert_held(&i915->drm.struct_mutex); 246 244 247 245 if (seqno == 0) 248 246 return -EINVAL; 249 247 250 - /* HWS page needs to be set less than what we 251 - * will inject to ring 252 - */ 253 - return reset_all_global_seqno(dev_priv, seqno - 1); 248 + /* HWS page needs to be set less than what we will inject to ring */ 249 + return reset_all_global_seqno(i915, seqno - 1); 254 250 } 255 251 256 252 static void mark_busy(struct drm_i915_private *i915) ··· 331 331 } 332 332 333 333 void i915_gem_retire_noop(struct i915_gem_active *active, 334 - struct drm_i915_gem_request *request) 334 + struct i915_request *request) 335 335 { 336 336 /* Space left intentionally blank */ 337 337 } 338 338 339 - static void advance_ring(struct drm_i915_gem_request *request) 339 + static void advance_ring(struct i915_request *request) 340 340 { 341 341 unsigned int tail; 342 342 343 - /* We know the GPU must have read the request to have 343 + /* 344 + * We know the GPU must have read the request to have 344 345 * sent us the seqno + interrupt, so use the position 345 346 * of tail of the request to update the last known position 346 347 * of the GPU head. ··· 350 349 * completion order. 351 350 */ 352 351 if (list_is_last(&request->ring_link, &request->ring->request_list)) { 353 - /* We may race here with execlists resubmitting this request 352 + /* 353 + * We may race here with execlists resubmitting this request 354 354 * as we retire it. The resubmission will move the ring->tail 355 355 * forwards (to request->wa_tail). We either read the 356 356 * current value that was written to hw, or the value that ··· 367 365 request->ring->head = tail; 368 366 } 369 367 370 - static void free_capture_list(struct drm_i915_gem_request *request) 368 + static void free_capture_list(struct i915_request *request) 371 369 { 372 - struct i915_gem_capture_list *capture; 370 + struct i915_capture_list *capture; 373 371 374 372 capture = request->capture_list; 375 373 while (capture) { 376 - struct i915_gem_capture_list *next = capture->next; 374 + struct i915_capture_list *next = capture->next; 377 375 378 376 kfree(capture); 379 377 capture = next; 380 378 } 381 379 } 382 380 383 - static void i915_gem_request_retire(struct drm_i915_gem_request *request) 381 + static void i915_request_retire(struct i915_request *request) 384 382 { 385 383 struct intel_engine_cs *engine = request->engine; 386 384 struct i915_gem_active *active, *next; 387 385 388 386 lockdep_assert_held(&request->i915->drm.struct_mutex); 389 387 GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); 390 - GEM_BUG_ON(!i915_gem_request_completed(request)); 388 + GEM_BUG_ON(!i915_request_completed(request)); 391 389 GEM_BUG_ON(!request->i915->gt.active_requests); 392 390 393 - trace_i915_gem_request_retire(request); 391 + trace_i915_request_retire(request); 394 392 395 393 spin_lock_irq(&engine->timeline->lock); 396 394 list_del_init(&request->link); ··· 401 399 402 400 free_capture_list(request); 403 401 404 - /* Walk through the active list, calling retire on each. This allows 402 + /* 403 + * Walk through the active list, calling retire on each. This allows 405 404 * objects to track their GPU activity and mark themselves as idle 406 405 * when their *last* active request is completed (updating state 407 406 * tracking lists for eviction, active references for GEM, etc). ··· 412 409 * the node after the callback). 413 410 */ 414 411 list_for_each_entry_safe(active, next, &request->active_list, link) { 415 - /* In microbenchmarks or focusing upon time inside the kernel, 412 + /* 413 + * In microbenchmarks or focusing upon time inside the kernel, 416 414 * we may spend an inordinate amount of time simply handling 417 415 * the retirement of requests and processing their callbacks. 418 416 * Of which, this loop itself is particularly hot due to the ··· 430 426 active->retire(active, request); 431 427 } 432 428 433 - i915_gem_request_remove_from_client(request); 429 + i915_request_remove_from_client(request); 434 430 435 431 /* Retirement decays the ban score as it is a sign of ctx progress */ 436 432 atomic_dec_if_positive(&request->ctx->ban_score); 437 433 438 - /* The backing object for the context is done after switching to the 434 + /* 435 + * The backing object for the context is done after switching to the 439 436 * *next* context. Therefore we cannot retire the previous context until 440 437 * the next context has already started running. However, since we 441 - * cannot take the required locks at i915_gem_request_submit() we 438 + * cannot take the required locks at i915_request_submit() we 442 439 * defer the unpinning of the active context to now, retirement of 443 440 * the subsequent request. 444 441 */ ··· 459 454 spin_unlock_irq(&request->lock); 460 455 461 456 i915_priotree_fini(request->i915, &request->priotree); 462 - i915_gem_request_put(request); 457 + i915_request_put(request); 463 458 } 464 459 465 - void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) 460 + void i915_request_retire_upto(struct i915_request *rq) 466 461 { 467 - struct intel_engine_cs *engine = req->engine; 468 - struct drm_i915_gem_request *tmp; 462 + struct intel_engine_cs *engine = rq->engine; 463 + struct i915_request *tmp; 469 464 470 - lockdep_assert_held(&req->i915->drm.struct_mutex); 471 - GEM_BUG_ON(!i915_gem_request_completed(req)); 465 + lockdep_assert_held(&rq->i915->drm.struct_mutex); 466 + GEM_BUG_ON(!i915_request_completed(rq)); 472 467 473 - if (list_empty(&req->link)) 468 + if (list_empty(&rq->link)) 474 469 return; 475 470 476 471 do { 477 472 tmp = list_first_entry(&engine->timeline->requests, 478 473 typeof(*tmp), link); 479 474 480 - i915_gem_request_retire(tmp); 481 - } while (tmp != req); 475 + i915_request_retire(tmp); 476 + } while (tmp != rq); 482 477 } 483 478 484 479 static u32 timeline_get_seqno(struct intel_timeline *tl) ··· 486 481 return ++tl->seqno; 487 482 } 488 483 489 - void __i915_gem_request_submit(struct drm_i915_gem_request *request) 484 + void __i915_request_submit(struct i915_request *request) 490 485 { 491 486 struct intel_engine_cs *engine = request->engine; 492 487 struct intel_timeline *timeline; ··· 518 513 list_move_tail(&request->link, &timeline->requests); 519 514 spin_unlock(&request->timeline->lock); 520 515 521 - trace_i915_gem_request_execute(request); 516 + trace_i915_request_execute(request); 522 517 523 518 wake_up_all(&request->execute); 524 519 } 525 520 526 - void i915_gem_request_submit(struct drm_i915_gem_request *request) 521 + void i915_request_submit(struct i915_request *request) 527 522 { 528 523 struct intel_engine_cs *engine = request->engine; 529 524 unsigned long flags; ··· 531 526 /* Will be called from irq-context when using foreign fences. */ 532 527 spin_lock_irqsave(&engine->timeline->lock, flags); 533 528 534 - __i915_gem_request_submit(request); 529 + __i915_request_submit(request); 535 530 536 531 spin_unlock_irqrestore(&engine->timeline->lock, flags); 537 532 } 538 533 539 - void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request) 534 + void __i915_request_unsubmit(struct i915_request *request) 540 535 { 541 536 struct intel_engine_cs *engine = request->engine; 542 537 struct intel_timeline *timeline; ··· 544 539 GEM_BUG_ON(!irqs_disabled()); 545 540 lockdep_assert_held(&engine->timeline->lock); 546 541 547 - /* Only unwind in reverse order, required so that the per-context list 542 + /* 543 + * Only unwind in reverse order, required so that the per-context list 548 544 * is kept in seqno/ring order. 549 545 */ 550 546 GEM_BUG_ON(!request->global_seqno); ··· 569 563 list_move(&request->link, &timeline->requests); 570 564 spin_unlock(&timeline->lock); 571 565 572 - /* We don't need to wake_up any waiters on request->execute, they 566 + /* 567 + * We don't need to wake_up any waiters on request->execute, they 573 568 * will get woken by any other event or us re-adding this request 574 - * to the engine timeline (__i915_gem_request_submit()). The waiters 569 + * to the engine timeline (__i915_request_submit()). The waiters 575 570 * should be quite adapt at finding that the request now has a new 576 571 * global_seqno to the one they went to sleep on. 577 572 */ 578 573 } 579 574 580 - void i915_gem_request_unsubmit(struct drm_i915_gem_request *request) 575 + void i915_request_unsubmit(struct i915_request *request) 581 576 { 582 577 struct intel_engine_cs *engine = request->engine; 583 578 unsigned long flags; ··· 586 579 /* Will be called from irq-context when using foreign fences. */ 587 580 spin_lock_irqsave(&engine->timeline->lock, flags); 588 581 589 - __i915_gem_request_unsubmit(request); 582 + __i915_request_unsubmit(request); 590 583 591 584 spin_unlock_irqrestore(&engine->timeline->lock, flags); 592 585 } ··· 594 587 static int __i915_sw_fence_call 595 588 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 596 589 { 597 - struct drm_i915_gem_request *request = 590 + struct i915_request *request = 598 591 container_of(fence, typeof(*request), submit); 599 592 600 593 switch (state) { 601 594 case FENCE_COMPLETE: 602 - trace_i915_gem_request_submit(request); 595 + trace_i915_request_submit(request); 603 596 /* 604 - * We need to serialize use of the submit_request() callback with its 605 - * hotplugging performed during an emergency i915_gem_set_wedged(). 606 - * We use the RCU mechanism to mark the critical section in order to 607 - * force i915_gem_set_wedged() to wait until the submit_request() is 608 - * completed before proceeding. 597 + * We need to serialize use of the submit_request() callback 598 + * with its hotplugging performed during an emergency 599 + * i915_gem_set_wedged(). We use the RCU mechanism to mark the 600 + * critical section in order to force i915_gem_set_wedged() to 601 + * wait until the submit_request() is completed before 602 + * proceeding. 609 603 */ 610 604 rcu_read_lock(); 611 605 request->engine->submit_request(request); ··· 614 606 break; 615 607 616 608 case FENCE_FREE: 617 - i915_gem_request_put(request); 609 + i915_request_put(request); 618 610 break; 619 611 } 620 612 ··· 622 614 } 623 615 624 616 /** 625 - * i915_gem_request_alloc - allocate a request structure 617 + * i915_request_alloc - allocate a request structure 626 618 * 627 619 * @engine: engine that we wish to issue the request on. 628 620 * @ctx: context that the request will be associated with. ··· 630 622 * Returns a pointer to the allocated request if successful, 631 623 * or an error code if not. 632 624 */ 633 - struct drm_i915_gem_request * 634 - i915_gem_request_alloc(struct intel_engine_cs *engine, 635 - struct i915_gem_context *ctx) 625 + struct i915_request * 626 + i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) 636 627 { 637 - struct drm_i915_private *dev_priv = engine->i915; 638 - struct drm_i915_gem_request *req; 628 + struct drm_i915_private *i915 = engine->i915; 629 + struct i915_request *rq; 639 630 struct intel_ring *ring; 640 631 int ret; 641 632 642 - lockdep_assert_held(&dev_priv->drm.struct_mutex); 633 + lockdep_assert_held(&i915->drm.struct_mutex); 643 634 644 635 /* 645 636 * Preempt contexts are reserved for exclusive use to inject a 646 637 * preemption context switch. They are never to be used for any trivial 647 638 * request! 648 639 */ 649 - GEM_BUG_ON(ctx == dev_priv->preempt_context); 640 + GEM_BUG_ON(ctx == i915->preempt_context); 650 641 651 - /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report 642 + /* 643 + * ABI: Before userspace accesses the GPU (e.g. execbuffer), report 652 644 * EIO if the GPU is already wedged. 653 645 */ 654 - if (i915_terminally_wedged(&dev_priv->gpu_error)) 646 + if (i915_terminally_wedged(&i915->gpu_error)) 655 647 return ERR_PTR(-EIO); 656 648 657 - /* Pinning the contexts may generate requests in order to acquire 649 + /* 650 + * Pinning the contexts may generate requests in order to acquire 658 651 * GGTT space, so do this first before we reserve a seqno for 659 652 * ourselves. 660 653 */ ··· 673 664 goto err_unreserve; 674 665 675 666 /* Move the oldest request to the slab-cache (if not in use!) */ 676 - req = list_first_entry_or_null(&engine->timeline->requests, 677 - typeof(*req), link); 678 - if (req && i915_gem_request_completed(req)) 679 - i915_gem_request_retire(req); 667 + rq = list_first_entry_or_null(&engine->timeline->requests, 668 + typeof(*rq), link); 669 + if (rq && i915_request_completed(rq)) 670 + i915_request_retire(rq); 680 671 681 - /* Beware: Dragons be flying overhead. 672 + /* 673 + * Beware: Dragons be flying overhead. 682 674 * 683 675 * We use RCU to look up requests in flight. The lookups may 684 676 * race with the request being allocated from the slab freelist. ··· 707 697 * 708 698 * Do not use kmem_cache_zalloc() here! 709 699 */ 710 - req = kmem_cache_alloc(dev_priv->requests, 711 - GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 712 - if (unlikely(!req)) { 700 + rq = kmem_cache_alloc(i915->requests, 701 + GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 702 + if (unlikely(!rq)) { 713 703 /* Ratelimit ourselves to prevent oom from malicious clients */ 714 - ret = i915_gem_wait_for_idle(dev_priv, 704 + ret = i915_gem_wait_for_idle(i915, 715 705 I915_WAIT_LOCKED | 716 706 I915_WAIT_INTERRUPTIBLE); 717 707 if (ret) ··· 725 715 * Having already penalized the client to stall, we spend 726 716 * a little extra time to re-optimise page allocation. 727 717 */ 728 - kmem_cache_shrink(dev_priv->requests); 718 + kmem_cache_shrink(i915->requests); 729 719 rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */ 730 720 731 - req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL); 732 - if (!req) { 721 + rq = kmem_cache_alloc(i915->requests, GFP_KERNEL); 722 + if (!rq) { 733 723 ret = -ENOMEM; 734 724 goto err_unreserve; 735 725 } 736 726 } 737 727 738 - req->timeline = i915_gem_context_lookup_timeline(ctx, engine); 739 - GEM_BUG_ON(req->timeline == engine->timeline); 728 + rq->timeline = i915_gem_context_lookup_timeline(ctx, engine); 729 + GEM_BUG_ON(rq->timeline == engine->timeline); 740 730 741 - spin_lock_init(&req->lock); 742 - dma_fence_init(&req->fence, 731 + spin_lock_init(&rq->lock); 732 + dma_fence_init(&rq->fence, 743 733 &i915_fence_ops, 744 - &req->lock, 745 - req->timeline->fence_context, 746 - timeline_get_seqno(req->timeline)); 734 + &rq->lock, 735 + rq->timeline->fence_context, 736 + timeline_get_seqno(rq->timeline)); 747 737 748 738 /* We bump the ref for the fence chain */ 749 - i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify); 750 - init_waitqueue_head(&req->execute); 739 + i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); 740 + init_waitqueue_head(&rq->execute); 751 741 752 - i915_priotree_init(&req->priotree); 742 + i915_priotree_init(&rq->priotree); 753 743 754 - INIT_LIST_HEAD(&req->active_list); 755 - req->i915 = dev_priv; 756 - req->engine = engine; 757 - req->ctx = ctx; 758 - req->ring = ring; 744 + INIT_LIST_HEAD(&rq->active_list); 745 + rq->i915 = i915; 746 + rq->engine = engine; 747 + rq->ctx = ctx; 748 + rq->ring = ring; 759 749 760 750 /* No zalloc, must clear what we need by hand */ 761 - req->global_seqno = 0; 762 - req->signaling.wait.seqno = 0; 763 - req->file_priv = NULL; 764 - req->batch = NULL; 765 - req->capture_list = NULL; 766 - req->waitboost = false; 751 + rq->global_seqno = 0; 752 + rq->signaling.wait.seqno = 0; 753 + rq->file_priv = NULL; 754 + rq->batch = NULL; 755 + rq->capture_list = NULL; 756 + rq->waitboost = false; 767 757 768 758 /* 769 759 * Reserve space in the ring buffer for all the commands required to 770 760 * eventually emit this request. This is to guarantee that the 771 - * i915_add_request() call can't fail. Note that the reserve may need 761 + * i915_request_add() call can't fail. Note that the reserve may need 772 762 * to be redone if the request is not actually submitted straight 773 763 * away, e.g. because a GPU scheduler has deferred it. 774 764 */ 775 - req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; 776 - GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz); 765 + rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; 766 + GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz); 777 767 778 768 /* 779 769 * Record the position of the start of the request so that ··· 781 771 * GPU processing the request, we never over-estimate the 782 772 * position of the head. 783 773 */ 784 - req->head = req->ring->emit; 774 + rq->head = rq->ring->emit; 785 775 786 776 /* Unconditionally invalidate GPU caches and TLBs. */ 787 - ret = engine->emit_flush(req, EMIT_INVALIDATE); 777 + ret = engine->emit_flush(rq, EMIT_INVALIDATE); 788 778 if (ret) 789 779 goto err_unwind; 790 780 791 - ret = engine->request_alloc(req); 781 + ret = engine->request_alloc(rq); 792 782 if (ret) 793 783 goto err_unwind; 794 784 795 785 /* Check that we didn't interrupt ourselves with a new request */ 796 - GEM_BUG_ON(req->timeline->seqno != req->fence.seqno); 797 - return req; 786 + GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno); 787 + return rq; 798 788 799 789 err_unwind: 800 - req->ring->emit = req->head; 790 + rq->ring->emit = rq->head; 801 791 802 792 /* Make sure we didn't add ourselves to external state before freeing */ 803 - GEM_BUG_ON(!list_empty(&req->active_list)); 804 - GEM_BUG_ON(!list_empty(&req->priotree.signalers_list)); 805 - GEM_BUG_ON(!list_empty(&req->priotree.waiters_list)); 793 + GEM_BUG_ON(!list_empty(&rq->active_list)); 794 + GEM_BUG_ON(!list_empty(&rq->priotree.signalers_list)); 795 + GEM_BUG_ON(!list_empty(&rq->priotree.waiters_list)); 806 796 807 - kmem_cache_free(dev_priv->requests, req); 797 + kmem_cache_free(i915->requests, rq); 808 798 err_unreserve: 809 799 unreserve_engine(engine); 810 800 err_unpin: ··· 813 803 } 814 804 815 805 static int 816 - i915_gem_request_await_request(struct drm_i915_gem_request *to, 817 - struct drm_i915_gem_request *from) 806 + i915_request_await_request(struct i915_request *to, struct i915_request *from) 818 807 { 819 808 int ret; 820 809 821 810 GEM_BUG_ON(to == from); 822 811 GEM_BUG_ON(to->timeline == from->timeline); 823 812 824 - if (i915_gem_request_completed(from)) 813 + if (i915_request_completed(from)) 825 814 return 0; 826 815 827 816 if (to->engine->schedule) { ··· 843 834 844 835 GEM_BUG_ON(!from->engine->semaphore.signal); 845 836 846 - seqno = i915_gem_request_global_seqno(from); 837 + seqno = i915_request_global_seqno(from); 847 838 if (!seqno) 848 839 goto await_dma_fence; 849 840 ··· 867 858 } 868 859 869 860 int 870 - i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req, 871 - struct dma_fence *fence) 861 + i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) 872 862 { 873 863 struct dma_fence **child = &fence; 874 864 unsigned int nchild = 1; 875 865 int ret; 876 866 877 - /* Note that if the fence-array was created in signal-on-any mode, 867 + /* 868 + * Note that if the fence-array was created in signal-on-any mode, 878 869 * we should *not* decompose it into its individual fences. However, 879 870 * we don't currently store which mode the fence-array is operating 880 871 * in. Fortunately, the only user of signal-on-any is private to ··· 896 887 897 888 /* 898 889 * Requests on the same timeline are explicitly ordered, along 899 - * with their dependencies, by i915_add_request() which ensures 890 + * with their dependencies, by i915_request_add() which ensures 900 891 * that requests are submitted in-order through each ring. 901 892 */ 902 - if (fence->context == req->fence.context) 893 + if (fence->context == rq->fence.context) 903 894 continue; 904 895 905 896 /* Squash repeated waits to the same timelines */ 906 - if (fence->context != req->i915->mm.unordered_timeline && 907 - intel_timeline_sync_is_later(req->timeline, fence)) 897 + if (fence->context != rq->i915->mm.unordered_timeline && 898 + intel_timeline_sync_is_later(rq->timeline, fence)) 908 899 continue; 909 900 910 901 if (dma_fence_is_i915(fence)) 911 - ret = i915_gem_request_await_request(req, 912 - to_request(fence)); 902 + ret = i915_request_await_request(rq, to_request(fence)); 913 903 else 914 - ret = i915_sw_fence_await_dma_fence(&req->submit, fence, 904 + ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, 915 905 I915_FENCE_TIMEOUT, 916 906 I915_FENCE_GFP); 917 907 if (ret < 0) 918 908 return ret; 919 909 920 910 /* Record the latest fence used against each timeline */ 921 - if (fence->context != req->i915->mm.unordered_timeline) 922 - intel_timeline_sync_set(req->timeline, fence); 911 + if (fence->context != rq->i915->mm.unordered_timeline) 912 + intel_timeline_sync_set(rq->timeline, fence); 923 913 } while (--nchild); 924 914 925 915 return 0; 926 916 } 927 917 928 918 /** 929 - * i915_gem_request_await_object - set this request to (async) wait upon a bo 919 + * i915_request_await_object - set this request to (async) wait upon a bo 930 920 * @to: request we are wishing to use 931 921 * @obj: object which may be in use on another ring. 932 922 * @write: whether the wait is on behalf of a writer ··· 945 937 * Returns 0 if successful, else propagates up the lower layer error. 946 938 */ 947 939 int 948 - i915_gem_request_await_object(struct drm_i915_gem_request *to, 949 - struct drm_i915_gem_object *obj, 950 - bool write) 940 + i915_request_await_object(struct i915_request *to, 941 + struct drm_i915_gem_object *obj, 942 + bool write) 951 943 { 952 944 struct dma_fence *excl; 953 945 int ret = 0; ··· 962 954 return ret; 963 955 964 956 for (i = 0; i < count; i++) { 965 - ret = i915_gem_request_await_dma_fence(to, shared[i]); 957 + ret = i915_request_await_dma_fence(to, shared[i]); 966 958 if (ret) 967 959 break; 968 960 ··· 978 970 979 971 if (excl) { 980 972 if (ret == 0) 981 - ret = i915_gem_request_await_dma_fence(to, excl); 973 + ret = i915_request_await_dma_fence(to, excl); 982 974 983 975 dma_fence_put(excl); 984 976 } ··· 991 983 * request is not being tracked for completion but the work itself is 992 984 * going to happen on the hardware. This would be a Bad Thing(tm). 993 985 */ 994 - void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) 986 + void __i915_request_add(struct i915_request *request, bool flush_caches) 995 987 { 996 988 struct intel_engine_cs *engine = request->engine; 997 989 struct intel_ring *ring = request->ring; 998 990 struct intel_timeline *timeline = request->timeline; 999 - struct drm_i915_gem_request *prev; 991 + struct i915_request *prev; 1000 992 u32 *cs; 1001 993 int err; 1002 994 1003 995 lockdep_assert_held(&request->i915->drm.struct_mutex); 1004 - trace_i915_gem_request_add(request); 996 + trace_i915_request_add(request); 1005 997 1006 998 /* 1007 999 * Make sure that no request gazumped us - if it was allocated after 1008 - * our i915_gem_request_alloc() and called __i915_add_request() before 1000 + * our i915_request_alloc() and called __i915_request_add() before 1009 1001 * us, the timeline will hold its seqno which is later than ours. 1010 1002 */ 1011 1003 GEM_BUG_ON(timeline->seqno != request->fence.seqno); ··· 1050 1042 1051 1043 prev = i915_gem_active_raw(&timeline->last_request, 1052 1044 &request->i915->drm.struct_mutex); 1053 - if (prev && !i915_gem_request_completed(prev)) { 1045 + if (prev && !i915_request_completed(prev)) { 1054 1046 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, 1055 1047 &request->submitq); 1056 1048 if (engine->schedule) ··· 1105 1097 * work on behalf of others -- but instead we should benefit from 1106 1098 * improved resource management. (Well, that's the theory at least.) 1107 1099 */ 1108 - if (prev && i915_gem_request_completed(prev)) 1109 - i915_gem_request_retire_upto(prev); 1100 + if (prev && i915_request_completed(prev)) 1101 + i915_request_retire_upto(prev); 1110 1102 } 1111 1103 1112 1104 static unsigned long local_clock_us(unsigned int *cpu) 1113 1105 { 1114 1106 unsigned long t; 1115 1107 1116 - /* Cheaply and approximately convert from nanoseconds to microseconds. 1108 + /* 1109 + * Cheaply and approximately convert from nanoseconds to microseconds. 1117 1110 * The result and subsequent calculations are also defined in the same 1118 1111 * approximate microseconds units. The principal source of timing 1119 1112 * error here is from the simple truncation. ··· 1142 1133 return this_cpu != cpu; 1143 1134 } 1144 1135 1145 - static bool __i915_spin_request(const struct drm_i915_gem_request *req, 1136 + static bool __i915_spin_request(const struct i915_request *rq, 1146 1137 u32 seqno, int state, unsigned long timeout_us) 1147 1138 { 1148 - struct intel_engine_cs *engine = req->engine; 1139 + struct intel_engine_cs *engine = rq->engine; 1149 1140 unsigned int irq, cpu; 1150 1141 1151 1142 GEM_BUG_ON(!seqno); ··· 1164 1155 if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1)) 1165 1156 return false; 1166 1157 1167 - /* When waiting for high frequency requests, e.g. during synchronous 1158 + /* 1159 + * When waiting for high frequency requests, e.g. during synchronous 1168 1160 * rendering split between the CPU and GPU, the finite amount of time 1169 1161 * required to set up the irq and wait upon it limits the response 1170 1162 * rate. By busywaiting on the request completion for a short while we ··· 1179 1169 timeout_us += local_clock_us(&cpu); 1180 1170 do { 1181 1171 if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno)) 1182 - return seqno == i915_gem_request_global_seqno(req); 1172 + return seqno == i915_request_global_seqno(rq); 1183 1173 1184 - /* Seqno are meant to be ordered *before* the interrupt. If 1174 + /* 1175 + * Seqno are meant to be ordered *before* the interrupt. If 1185 1176 * we see an interrupt without a corresponding seqno advance, 1186 1177 * assume we won't see one in the near future but require 1187 1178 * the engine->seqno_barrier() to fixup coherency. ··· 1202 1191 return false; 1203 1192 } 1204 1193 1205 - static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request) 1194 + static bool __i915_wait_request_check_and_reset(struct i915_request *request) 1206 1195 { 1207 1196 if (likely(!i915_reset_handoff(&request->i915->gpu_error))) 1208 1197 return false; ··· 1214 1203 1215 1204 /** 1216 1205 * i915_wait_request - wait until execution of request has finished 1217 - * @req: the request to wait upon 1206 + * @rq: the request to wait upon 1218 1207 * @flags: how to wait 1219 1208 * @timeout: how long to wait in jiffies 1220 1209 * ··· 1231 1220 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is 1232 1221 * pending before the request completes. 1233 1222 */ 1234 - long i915_wait_request(struct drm_i915_gem_request *req, 1223 + long i915_request_wait(struct i915_request *rq, 1235 1224 unsigned int flags, 1236 1225 long timeout) 1237 1226 { 1238 1227 const int state = flags & I915_WAIT_INTERRUPTIBLE ? 1239 1228 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1240 - wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue; 1229 + wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue; 1241 1230 DEFINE_WAIT_FUNC(reset, default_wake_function); 1242 1231 DEFINE_WAIT_FUNC(exec, default_wake_function); 1243 1232 struct intel_wait wait; ··· 1245 1234 might_sleep(); 1246 1235 #if IS_ENABLED(CONFIG_LOCKDEP) 1247 1236 GEM_BUG_ON(debug_locks && 1248 - !!lockdep_is_held(&req->i915->drm.struct_mutex) != 1237 + !!lockdep_is_held(&rq->i915->drm.struct_mutex) != 1249 1238 !!(flags & I915_WAIT_LOCKED)); 1250 1239 #endif 1251 1240 GEM_BUG_ON(timeout < 0); 1252 1241 1253 - if (i915_gem_request_completed(req)) 1242 + if (i915_request_completed(rq)) 1254 1243 return timeout; 1255 1244 1256 1245 if (!timeout) 1257 1246 return -ETIME; 1258 1247 1259 - trace_i915_gem_request_wait_begin(req, flags); 1248 + trace_i915_request_wait_begin(rq, flags); 1260 1249 1261 - add_wait_queue(&req->execute, &exec); 1250 + add_wait_queue(&rq->execute, &exec); 1262 1251 if (flags & I915_WAIT_LOCKED) 1263 1252 add_wait_queue(errq, &reset); 1264 1253 1265 - intel_wait_init(&wait, req); 1254 + intel_wait_init(&wait, rq); 1266 1255 1267 1256 restart: 1268 1257 do { 1269 1258 set_current_state(state); 1270 - if (intel_wait_update_request(&wait, req)) 1259 + if (intel_wait_update_request(&wait, rq)) 1271 1260 break; 1272 1261 1273 1262 if (flags & I915_WAIT_LOCKED && 1274 - __i915_wait_request_check_and_reset(req)) 1263 + __i915_wait_request_check_and_reset(rq)) 1275 1264 continue; 1276 1265 1277 1266 if (signal_pending_state(state, current)) { ··· 1288 1277 } while (1); 1289 1278 1290 1279 GEM_BUG_ON(!intel_wait_has_seqno(&wait)); 1291 - GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit)); 1280 + GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); 1292 1281 1293 1282 /* Optimistic short spin before touching IRQs */ 1294 - if (__i915_spin_request(req, wait.seqno, state, 5)) 1283 + if (__i915_spin_request(rq, wait.seqno, state, 5)) 1295 1284 goto complete; 1296 1285 1297 1286 set_current_state(state); 1298 - if (intel_engine_add_wait(req->engine, &wait)) 1299 - /* In order to check that we haven't missed the interrupt 1287 + if (intel_engine_add_wait(rq->engine, &wait)) 1288 + /* 1289 + * In order to check that we haven't missed the interrupt 1300 1290 * as we enabled it, we need to kick ourselves to do a 1301 1291 * coherent check on the seqno before we sleep. 1302 1292 */ 1303 1293 goto wakeup; 1304 1294 1305 1295 if (flags & I915_WAIT_LOCKED) 1306 - __i915_wait_request_check_and_reset(req); 1296 + __i915_wait_request_check_and_reset(rq); 1307 1297 1308 1298 for (;;) { 1309 1299 if (signal_pending_state(state, current)) { ··· 1320 1308 timeout = io_schedule_timeout(timeout); 1321 1309 1322 1310 if (intel_wait_complete(&wait) && 1323 - intel_wait_check_request(&wait, req)) 1311 + intel_wait_check_request(&wait, rq)) 1324 1312 break; 1325 1313 1326 1314 set_current_state(state); 1327 1315 1328 1316 wakeup: 1329 - /* Carefully check if the request is complete, giving time 1317 + /* 1318 + * Carefully check if the request is complete, giving time 1330 1319 * for the seqno to be visible following the interrupt. 1331 1320 * We also have to check in case we are kicked by the GPU 1332 1321 * reset in order to drop the struct_mutex. 1333 1322 */ 1334 - if (__i915_request_irq_complete(req)) 1323 + if (__i915_request_irq_complete(rq)) 1335 1324 break; 1336 1325 1337 - /* If the GPU is hung, and we hold the lock, reset the GPU 1326 + /* 1327 + * If the GPU is hung, and we hold the lock, reset the GPU 1338 1328 * and then check for completion. On a full reset, the engine's 1339 1329 * HW seqno will be advanced passed us and we are complete. 1340 1330 * If we do a partial reset, we have to wait for the GPU to ··· 1347 1333 * itself, or indirectly by recovering the GPU). 1348 1334 */ 1349 1335 if (flags & I915_WAIT_LOCKED && 1350 - __i915_wait_request_check_and_reset(req)) 1336 + __i915_wait_request_check_and_reset(rq)) 1351 1337 continue; 1352 1338 1353 1339 /* Only spin if we know the GPU is processing this request */ 1354 - if (__i915_spin_request(req, wait.seqno, state, 2)) 1340 + if (__i915_spin_request(rq, wait.seqno, state, 2)) 1355 1341 break; 1356 1342 1357 - if (!intel_wait_check_request(&wait, req)) { 1358 - intel_engine_remove_wait(req->engine, &wait); 1343 + if (!intel_wait_check_request(&wait, rq)) { 1344 + intel_engine_remove_wait(rq->engine, &wait); 1359 1345 goto restart; 1360 1346 } 1361 1347 } 1362 1348 1363 - intel_engine_remove_wait(req->engine, &wait); 1349 + intel_engine_remove_wait(rq->engine, &wait); 1364 1350 complete: 1365 1351 __set_current_state(TASK_RUNNING); 1366 1352 if (flags & I915_WAIT_LOCKED) 1367 1353 remove_wait_queue(errq, &reset); 1368 - remove_wait_queue(&req->execute, &exec); 1369 - trace_i915_gem_request_wait_end(req); 1354 + remove_wait_queue(&rq->execute, &exec); 1355 + trace_i915_request_wait_end(rq); 1370 1356 1371 1357 return timeout; 1372 1358 } 1373 1359 1374 1360 static void engine_retire_requests(struct intel_engine_cs *engine) 1375 1361 { 1376 - struct drm_i915_gem_request *request, *next; 1362 + struct i915_request *request, *next; 1377 1363 u32 seqno = intel_engine_get_seqno(engine); 1378 1364 LIST_HEAD(retire); 1379 1365 ··· 1388 1374 spin_unlock_irq(&engine->timeline->lock); 1389 1375 1390 1376 list_for_each_entry_safe(request, next, &retire, link) 1391 - i915_gem_request_retire(request); 1377 + i915_request_retire(request); 1392 1378 } 1393 1379 1394 - void i915_gem_retire_requests(struct drm_i915_private *dev_priv) 1380 + void i915_retire_requests(struct drm_i915_private *i915) 1395 1381 { 1396 1382 struct intel_engine_cs *engine; 1397 1383 enum intel_engine_id id; 1398 1384 1399 - lockdep_assert_held(&dev_priv->drm.struct_mutex); 1385 + lockdep_assert_held(&i915->drm.struct_mutex); 1400 1386 1401 - if (!dev_priv->gt.active_requests) 1387 + if (!i915->gt.active_requests) 1402 1388 return; 1403 1389 1404 - for_each_engine(engine, dev_priv, id) 1390 + for_each_engine(engine, i915, id) 1405 1391 engine_retire_requests(engine); 1406 1392 } 1407 1393 1408 1394 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1409 1395 #include "selftests/mock_request.c" 1410 - #include "selftests/i915_gem_request.c" 1396 + #include "selftests/i915_request.c" 1411 1397 #endif
+114 -106
drivers/gpu/drm/i915/i915_gem_request.h drivers/gpu/drm/i915/i915_request.h
··· 1 1 /* 2 - * Copyright © 2008-2015 Intel Corporation 2 + * Copyright © 2008-2018 Intel Corporation 3 3 * 4 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 5 * copy of this software and associated documentation files (the "Software"), ··· 22 22 * 23 23 */ 24 24 25 - #ifndef I915_GEM_REQUEST_H 26 - #define I915_GEM_REQUEST_H 25 + #ifndef I915_REQUEST_H 26 + #define I915_REQUEST_H 27 27 28 28 #include <linux/dma-fence.h> 29 29 ··· 34 34 35 35 struct drm_file; 36 36 struct drm_i915_gem_object; 37 - struct drm_i915_gem_request; 37 + struct i915_request; 38 38 39 39 struct intel_wait { 40 40 struct rb_node node; 41 41 struct task_struct *tsk; 42 - struct drm_i915_gem_request *request; 42 + struct i915_request *request; 43 43 u32 seqno; 44 44 }; 45 45 ··· 57 57 #define I915_DEPENDENCY_ALLOC BIT(0) 58 58 }; 59 59 60 - /* Requests exist in a complex web of interdependencies. Each request 60 + /* 61 + * "People assume that time is a strict progression of cause to effect, but 62 + * actually, from a nonlinear, non-subjective viewpoint, it's more like a big 63 + * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015 64 + * 65 + * Requests exist in a complex web of interdependencies. Each request 61 66 * has to wait for some other request to complete before it is ready to be run 62 67 * (e.g. we have to wait until the pixels have been rendering into a texture 63 68 * before we can copy from it). We track the readiness of a request in terms ··· 86 81 I915_PRIORITY_INVALID = INT_MIN 87 82 }; 88 83 89 - struct i915_gem_capture_list { 90 - struct i915_gem_capture_list *next; 84 + struct i915_capture_list { 85 + struct i915_capture_list *next; 91 86 struct i915_vma *vma; 92 87 }; 93 88 ··· 111 106 * 112 107 * The requests are reference counted. 113 108 */ 114 - struct drm_i915_gem_request { 109 + struct i915_request { 115 110 struct dma_fence fence; 116 111 spinlock_t lock; 117 112 ··· 125 120 * it persists while any request is linked to it. Requests themselves 126 121 * are also refcounted, so the request will only be freed when the last 127 122 * reference to it is dismissed, and the code in 128 - * i915_gem_request_free() will then decrement the refcount on the 123 + * i915_request_free() will then decrement the refcount on the 129 124 * context. 130 125 */ 131 126 struct i915_gem_context *ctx; ··· 134 129 struct intel_timeline *timeline; 135 130 struct intel_signal_node signaling; 136 131 137 - /* Fences for the various phases in the request's lifetime. 132 + /* 133 + * Fences for the various phases in the request's lifetime. 138 134 * 139 135 * The submit fence is used to await upon all of the request's 140 136 * dependencies. When it is signaled, the request is ready to run. ··· 145 139 wait_queue_entry_t submitq; 146 140 wait_queue_head_t execute; 147 141 148 - /* A list of everyone we wait upon, and everyone who waits upon us. 142 + /* 143 + * A list of everyone we wait upon, and everyone who waits upon us. 149 144 * Even though we will not be submitted to the hardware before the 150 145 * submit fence is signaled (it waits for all external events as well 151 146 * as our own requests), the scheduler still needs to know the ··· 157 150 struct i915_priotree priotree; 158 151 struct i915_dependency dep; 159 152 160 - /** GEM sequence number associated with this request on the 153 + /** 154 + * GEM sequence number associated with this request on the 161 155 * global execution timeline. It is zero when the request is not 162 156 * on the HW queue (i.e. not on the engine timeline list). 163 157 * Its value is guarded by the timeline spinlock. ··· 188 180 * error state dump only). 189 181 */ 190 182 struct i915_vma *batch; 191 - /** Additional buffers requested by userspace to be captured upon 183 + /** 184 + * Additional buffers requested by userspace to be captured upon 192 185 * a GPU hang. The vma/obj on this list are protected by their 193 186 * active reference - all objects on this list must also be 194 187 * on the active_list (of their final request). 195 188 */ 196 - struct i915_gem_capture_list *capture_list; 189 + struct i915_capture_list *capture_list; 197 190 struct list_head active_list; 198 191 199 192 /** Time at which this request was emitted, in jiffies. */ ··· 222 213 return fence->ops == &i915_fence_ops; 223 214 } 224 215 225 - struct drm_i915_gem_request * __must_check 226 - i915_gem_request_alloc(struct intel_engine_cs *engine, 227 - struct i915_gem_context *ctx); 228 - void i915_gem_request_retire_upto(struct drm_i915_gem_request *req); 216 + struct i915_request * __must_check 217 + i915_request_alloc(struct intel_engine_cs *engine, 218 + struct i915_gem_context *ctx); 219 + void i915_request_retire_upto(struct i915_request *rq); 229 220 230 - static inline struct drm_i915_gem_request * 221 + static inline struct i915_request * 231 222 to_request(struct dma_fence *fence) 232 223 { 233 224 /* We assume that NULL fence/request are interoperable */ 234 - BUILD_BUG_ON(offsetof(struct drm_i915_gem_request, fence) != 0); 225 + BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0); 235 226 GEM_BUG_ON(fence && !dma_fence_is_i915(fence)); 236 - return container_of(fence, struct drm_i915_gem_request, fence); 227 + return container_of(fence, struct i915_request, fence); 237 228 } 238 229 239 - static inline struct drm_i915_gem_request * 240 - i915_gem_request_get(struct drm_i915_gem_request *req) 230 + static inline struct i915_request * 231 + i915_request_get(struct i915_request *rq) 241 232 { 242 - return to_request(dma_fence_get(&req->fence)); 233 + return to_request(dma_fence_get(&rq->fence)); 243 234 } 244 235 245 - static inline struct drm_i915_gem_request * 246 - i915_gem_request_get_rcu(struct drm_i915_gem_request *req) 236 + static inline struct i915_request * 237 + i915_request_get_rcu(struct i915_request *rq) 247 238 { 248 - return to_request(dma_fence_get_rcu(&req->fence)); 239 + return to_request(dma_fence_get_rcu(&rq->fence)); 249 240 } 250 241 251 242 static inline void 252 - i915_gem_request_put(struct drm_i915_gem_request *req) 243 + i915_request_put(struct i915_request *rq) 253 244 { 254 - dma_fence_put(&req->fence); 245 + dma_fence_put(&rq->fence); 255 246 } 256 247 257 248 /** 258 - * i915_gem_request_global_seqno - report the current global seqno 249 + * i915_request_global_seqno - report the current global seqno 259 250 * @request - the request 260 251 * 261 252 * A request is assigned a global seqno only when it is on the hardware ··· 273 264 * after the read, it is indeed complete). 274 265 */ 275 266 static u32 276 - i915_gem_request_global_seqno(const struct drm_i915_gem_request *request) 267 + i915_request_global_seqno(const struct i915_request *request) 277 268 { 278 269 return READ_ONCE(request->global_seqno); 279 270 } 280 271 281 - int 282 - i915_gem_request_await_object(struct drm_i915_gem_request *to, 272 + int i915_request_await_object(struct i915_request *to, 283 273 struct drm_i915_gem_object *obj, 284 274 bool write); 285 - int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req, 286 - struct dma_fence *fence); 275 + int i915_request_await_dma_fence(struct i915_request *rq, 276 + struct dma_fence *fence); 287 277 288 - void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches); 289 - #define i915_add_request(req) \ 290 - __i915_add_request(req, false) 278 + void __i915_request_add(struct i915_request *rq, bool flush_caches); 279 + #define i915_request_add(rq) \ 280 + __i915_request_add(rq, false) 291 281 292 - void __i915_gem_request_submit(struct drm_i915_gem_request *request); 293 - void i915_gem_request_submit(struct drm_i915_gem_request *request); 282 + void __i915_request_submit(struct i915_request *request); 283 + void i915_request_submit(struct i915_request *request); 294 284 295 - void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request); 296 - void i915_gem_request_unsubmit(struct drm_i915_gem_request *request); 285 + void __i915_request_unsubmit(struct i915_request *request); 286 + void i915_request_unsubmit(struct i915_request *request); 297 287 298 - struct intel_rps_client; 299 - #define NO_WAITBOOST ERR_PTR(-1) 300 - #define IS_RPS_CLIENT(p) (!IS_ERR(p)) 301 - #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p)) 302 - 303 - long i915_wait_request(struct drm_i915_gem_request *req, 288 + long i915_request_wait(struct i915_request *rq, 304 289 unsigned int flags, 305 290 long timeout) 306 291 __attribute__((nonnull(1))); ··· 313 310 } 314 311 315 312 static inline bool 316 - __i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno) 313 + __i915_request_completed(const struct i915_request *rq, u32 seqno) 317 314 { 318 315 GEM_BUG_ON(!seqno); 319 - return i915_seqno_passed(intel_engine_get_seqno(req->engine), seqno) && 320 - seqno == i915_gem_request_global_seqno(req); 316 + return i915_seqno_passed(intel_engine_get_seqno(rq->engine), seqno) && 317 + seqno == i915_request_global_seqno(rq); 321 318 } 322 319 323 - static inline bool 324 - i915_gem_request_completed(const struct drm_i915_gem_request *req) 320 + static inline bool i915_request_completed(const struct i915_request *rq) 325 321 { 326 322 u32 seqno; 327 323 328 - seqno = i915_gem_request_global_seqno(req); 324 + seqno = i915_request_global_seqno(rq); 329 325 if (!seqno) 330 326 return false; 331 327 332 - return __i915_gem_request_completed(req, seqno); 328 + return __i915_request_completed(rq, seqno); 333 329 } 334 330 335 - static inline bool 336 - i915_gem_request_started(const struct drm_i915_gem_request *req) 331 + static inline bool i915_request_started(const struct i915_request *rq) 337 332 { 338 333 u32 seqno; 339 334 340 - seqno = i915_gem_request_global_seqno(req); 335 + seqno = i915_request_global_seqno(rq); 341 336 if (!seqno) 342 337 return false; 343 338 344 - return i915_seqno_passed(intel_engine_get_seqno(req->engine), 339 + return i915_seqno_passed(intel_engine_get_seqno(rq->engine), 345 340 seqno - 1); 346 341 } 347 342 348 343 static inline bool i915_priotree_signaled(const struct i915_priotree *pt) 349 344 { 350 - const struct drm_i915_gem_request *rq = 351 - container_of(pt, const struct drm_i915_gem_request, priotree); 345 + const struct i915_request *rq = 346 + container_of(pt, const struct i915_request, priotree); 352 347 353 - return i915_gem_request_completed(rq); 348 + return i915_request_completed(rq); 354 349 } 355 350 356 - /* We treat requests as fences. This is not be to confused with our 351 + void i915_retire_requests(struct drm_i915_private *i915); 352 + 353 + /* 354 + * We treat requests as fences. This is not be to confused with our 357 355 * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync. 358 356 * We use the fences to synchronize access from the CPU with activity on the 359 357 * GPU, for example, we should not rewrite an object's PTE whilst the GPU ··· 384 380 struct i915_gem_active; 385 381 386 382 typedef void (*i915_gem_retire_fn)(struct i915_gem_active *, 387 - struct drm_i915_gem_request *); 383 + struct i915_request *); 388 384 389 385 struct i915_gem_active { 390 - struct drm_i915_gem_request __rcu *request; 386 + struct i915_request __rcu *request; 391 387 struct list_head link; 392 388 i915_gem_retire_fn retire; 393 389 }; 394 390 395 391 void i915_gem_retire_noop(struct i915_gem_active *, 396 - struct drm_i915_gem_request *request); 392 + struct i915_request *request); 397 393 398 394 /** 399 395 * init_request_active - prepares the activity tracker for use ··· 425 421 */ 426 422 static inline void 427 423 i915_gem_active_set(struct i915_gem_active *active, 428 - struct drm_i915_gem_request *request) 424 + struct i915_request *request) 429 425 { 430 426 list_move(&active->link, &request->active_list); 431 427 rcu_assign_pointer(active->request, request); ··· 450 446 active->retire = fn ?: i915_gem_retire_noop; 451 447 } 452 448 453 - static inline struct drm_i915_gem_request * 449 + static inline struct i915_request * 454 450 __i915_gem_active_peek(const struct i915_gem_active *active) 455 451 { 456 - /* Inside the error capture (running with the driver in an unknown 452 + /* 453 + * Inside the error capture (running with the driver in an unknown 457 454 * state), we want to bend the rules slightly (a lot). 458 455 * 459 456 * Work is in progress to make it safer, in the meantime this keeps ··· 471 466 * It does not obtain a reference on the request for the caller, so the caller 472 467 * must hold struct_mutex. 473 468 */ 474 - static inline struct drm_i915_gem_request * 469 + static inline struct i915_request * 475 470 i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex) 476 471 { 477 472 return rcu_dereference_protected(active->request, ··· 486 481 * still active, or NULL. It does not obtain a reference on the request 487 482 * for the caller, so the caller must hold struct_mutex. 488 483 */ 489 - static inline struct drm_i915_gem_request * 484 + static inline struct i915_request * 490 485 i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex) 491 486 { 492 - struct drm_i915_gem_request *request; 487 + struct i915_request *request; 493 488 494 489 request = i915_gem_active_raw(active, mutex); 495 - if (!request || i915_gem_request_completed(request)) 490 + if (!request || i915_request_completed(request)) 496 491 return NULL; 497 492 498 493 return request; ··· 505 500 * i915_gem_active_get() returns a reference to the active request, or NULL 506 501 * if the active tracker is idle. The caller must hold struct_mutex. 507 502 */ 508 - static inline struct drm_i915_gem_request * 503 + static inline struct i915_request * 509 504 i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex) 510 505 { 511 - return i915_gem_request_get(i915_gem_active_peek(active, mutex)); 506 + return i915_request_get(i915_gem_active_peek(active, mutex)); 512 507 } 513 508 514 509 /** ··· 519 514 * if the active tracker is idle. The caller must hold the RCU read lock, but 520 515 * the returned pointer is safe to use outside of RCU. 521 516 */ 522 - static inline struct drm_i915_gem_request * 517 + static inline struct i915_request * 523 518 __i915_gem_active_get_rcu(const struct i915_gem_active *active) 524 519 { 525 - /* Performing a lockless retrieval of the active request is super 520 + /* 521 + * Performing a lockless retrieval of the active request is super 526 522 * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing 527 523 * slab of request objects will not be freed whilst we hold the 528 524 * RCU read lock. It does not guarantee that the request itself ··· 531 525 * 532 526 * Thread A Thread B 533 527 * 534 - * req = active.request 535 - * retire(req) -> free(req); 536 - * (req is now first on the slab freelist) 528 + * rq = active.request 529 + * retire(rq) -> free(rq); 530 + * (rq is now first on the slab freelist) 537 531 * active.request = NULL 538 532 * 539 - * req = new submission on a new object 540 - * ref(req) 533 + * rq = new submission on a new object 534 + * ref(rq) 541 535 * 542 536 * To prevent the request from being reused whilst the caller 543 537 * uses it, we take a reference like normal. Whilst acquiring ··· 566 560 * 567 561 * It is then imperative that we do not zero the request on 568 562 * reallocation, so that we can chase the dangling pointers! 569 - * See i915_gem_request_alloc(). 563 + * See i915_request_alloc(). 570 564 */ 571 565 do { 572 - struct drm_i915_gem_request *request; 566 + struct i915_request *request; 573 567 574 568 request = rcu_dereference(active->request); 575 - if (!request || i915_gem_request_completed(request)) 569 + if (!request || i915_request_completed(request)) 576 570 return NULL; 577 571 578 - /* An especially silly compiler could decide to recompute the 579 - * result of i915_gem_request_completed, more specifically 572 + /* 573 + * An especially silly compiler could decide to recompute the 574 + * result of i915_request_completed, more specifically 580 575 * re-emit the load for request->fence.seqno. A race would catch 581 576 * a later seqno value, which could flip the result from true to 582 577 * false. Which means part of the instructions below might not 583 578 * be executed, while later on instructions are executed. Due to 584 579 * barriers within the refcounting the inconsistency can't reach 585 - * past the call to i915_gem_request_get_rcu, but not executing 586 - * that while still executing i915_gem_request_put() creates 580 + * past the call to i915_request_get_rcu, but not executing 581 + * that while still executing i915_request_put() creates 587 582 * havoc enough. Prevent this with a compiler barrier. 588 583 */ 589 584 barrier(); 590 585 591 - request = i915_gem_request_get_rcu(request); 586 + request = i915_request_get_rcu(request); 592 587 593 - /* What stops the following rcu_access_pointer() from occurring 594 - * before the above i915_gem_request_get_rcu()? If we were 588 + /* 589 + * What stops the following rcu_access_pointer() from occurring 590 + * before the above i915_request_get_rcu()? If we were 595 591 * to read the value before pausing to get the reference to 596 592 * the request, we may not notice a change in the active 597 593 * tracker. ··· 607 599 * compiler. 608 600 * 609 601 * The atomic operation at the heart of 610 - * i915_gem_request_get_rcu(), see dma_fence_get_rcu(), is 602 + * i915_request_get_rcu(), see dma_fence_get_rcu(), is 611 603 * atomic_inc_not_zero() which is only a full memory barrier 612 - * when successful. That is, if i915_gem_request_get_rcu() 604 + * when successful. That is, if i915_request_get_rcu() 613 605 * returns the request (and so with the reference counted 614 606 * incremented) then the following read for rcu_access_pointer() 615 607 * must occur after the atomic operation and so confirm ··· 621 613 if (!request || request == rcu_access_pointer(active->request)) 622 614 return rcu_pointer_handoff(request); 623 615 624 - i915_gem_request_put(request); 616 + i915_request_put(request); 625 617 } while (1); 626 618 } 627 619 ··· 633 625 * or NULL if the active tracker is idle. The reference is obtained under RCU, 634 626 * so no locking is required by the caller. 635 627 * 636 - * The reference should be freed with i915_gem_request_put(). 628 + * The reference should be freed with i915_request_put(). 637 629 */ 638 - static inline struct drm_i915_gem_request * 630 + static inline struct i915_request * 639 631 i915_gem_active_get_unlocked(const struct i915_gem_active *active) 640 632 { 641 - struct drm_i915_gem_request *request; 633 + struct i915_request *request; 642 634 643 635 rcu_read_lock(); 644 636 request = __i915_gem_active_get_rcu(active); ··· 678 670 * can then wait upon the request, and afterwards release our reference, 679 671 * free of any locking. 680 672 * 681 - * This function wraps i915_wait_request(), see it for the full details on 673 + * This function wraps i915_request_wait(), see it for the full details on 682 674 * the arguments. 683 675 * 684 676 * Returns 0 if successful, or a negative error code. ··· 686 678 static inline int 687 679 i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags) 688 680 { 689 - struct drm_i915_gem_request *request; 681 + struct i915_request *request; 690 682 long ret = 0; 691 683 692 684 request = i915_gem_active_get_unlocked(active); 693 685 if (request) { 694 - ret = i915_wait_request(request, flags, MAX_SCHEDULE_TIMEOUT); 695 - i915_gem_request_put(request); 686 + ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT); 687 + i915_request_put(request); 696 688 } 697 689 698 690 return ret < 0 ? ret : 0; ··· 711 703 i915_gem_active_retire(struct i915_gem_active *active, 712 704 struct mutex *mutex) 713 705 { 714 - struct drm_i915_gem_request *request; 706 + struct i915_request *request; 715 707 long ret; 716 708 717 709 request = i915_gem_active_raw(active, mutex); 718 710 if (!request) 719 711 return 0; 720 712 721 - ret = i915_wait_request(request, 713 + ret = i915_request_wait(request, 722 714 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, 723 715 MAX_SCHEDULE_TIMEOUT); 724 716 if (ret < 0) ··· 735 727 #define for_each_active(mask, idx) \ 736 728 for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx)) 737 729 738 - #endif /* I915_GEM_REQUEST_H */ 730 + #endif /* I915_REQUEST_H */
+2 -2
drivers/gpu/drm/i915/i915_gem_shrinker.c
··· 175 175 i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED); 176 176 177 177 trace_i915_gem_shrink(i915, target, flags); 178 - i915_gem_retire_requests(i915); 178 + i915_retire_requests(i915); 179 179 180 180 /* 181 181 * Unbinding of objects will require HW access; Let us not wake the ··· 267 267 if (flags & I915_SHRINK_BOUND) 268 268 intel_runtime_pm_put(i915); 269 269 270 - i915_gem_retire_requests(i915); 270 + i915_retire_requests(i915); 271 271 272 272 shrinker_unlock(i915, unlock); 273 273
+2 -2
drivers/gpu/drm/i915/i915_gem_timeline.h
··· 27 27 28 28 #include <linux/list.h> 29 29 30 - #include "i915_utils.h" 31 - #include "i915_gem_request.h" 30 + #include "i915_request.h" 32 31 #include "i915_syncmap.h" 32 + #include "i915_utils.h" 33 33 34 34 struct i915_gem_timeline; 35 35
+9 -9
drivers/gpu/drm/i915/i915_gpu_error.c
··· 991 991 static inline uint32_t 992 992 __active_get_seqno(struct i915_gem_active *active) 993 993 { 994 - struct drm_i915_gem_request *request; 994 + struct i915_request *request; 995 995 996 996 request = __i915_gem_active_peek(active); 997 997 return request ? request->global_seqno : 0; ··· 1000 1000 static inline int 1001 1001 __active_get_engine_id(struct i915_gem_active *active) 1002 1002 { 1003 - struct drm_i915_gem_request *request; 1003 + struct i915_request *request; 1004 1004 1005 1005 request = __i915_gem_active_peek(active); 1006 1006 return request ? request->engine->id : -1; ··· 1293 1293 } 1294 1294 } 1295 1295 1296 - static void record_request(struct drm_i915_gem_request *request, 1296 + static void record_request(struct i915_request *request, 1297 1297 struct drm_i915_error_request *erq) 1298 1298 { 1299 1299 erq->context = request->ctx->hw_id; ··· 1310 1310 } 1311 1311 1312 1312 static void engine_record_requests(struct intel_engine_cs *engine, 1313 - struct drm_i915_gem_request *first, 1313 + struct i915_request *first, 1314 1314 struct drm_i915_error_engine *ee) 1315 1315 { 1316 - struct drm_i915_gem_request *request; 1316 + struct i915_request *request; 1317 1317 int count; 1318 1318 1319 1319 count = 0; ··· 1363 1363 unsigned int n; 1364 1364 1365 1365 for (n = 0; n < execlists_num_ports(execlists); n++) { 1366 - struct drm_i915_gem_request *rq = port_request(&execlists->port[n]); 1366 + struct i915_request *rq = port_request(&execlists->port[n]); 1367 1367 1368 1368 if (!rq) 1369 1369 break; ··· 1398 1398 e->active = atomic_read(&ctx->active_count); 1399 1399 } 1400 1400 1401 - static void request_record_user_bo(struct drm_i915_gem_request *request, 1401 + static void request_record_user_bo(struct i915_request *request, 1402 1402 struct drm_i915_error_engine *ee) 1403 1403 { 1404 - struct i915_gem_capture_list *c; 1404 + struct i915_capture_list *c; 1405 1405 struct drm_i915_error_object **bo; 1406 1406 long count; 1407 1407 ··· 1454 1454 for (i = 0; i < I915_NUM_ENGINES; i++) { 1455 1455 struct intel_engine_cs *engine = dev_priv->engine[i]; 1456 1456 struct drm_i915_error_engine *ee = &error->engine[i]; 1457 - struct drm_i915_gem_request *request; 1457 + struct i915_request *request; 1458 1458 1459 1459 ee->engine_id = -1; 1460 1460
+4 -4
drivers/gpu/drm/i915/i915_irq.c
··· 1071 1071 1072 1072 static void notify_ring(struct intel_engine_cs *engine) 1073 1073 { 1074 - struct drm_i915_gem_request *rq = NULL; 1074 + struct i915_request *rq = NULL; 1075 1075 struct intel_wait *wait; 1076 1076 1077 1077 if (!engine->breadcrumbs.irq_armed) ··· 1098 1098 */ 1099 1099 if (i915_seqno_passed(intel_engine_get_seqno(engine), 1100 1100 wait->seqno)) { 1101 - struct drm_i915_gem_request *waiter = wait->request; 1101 + struct i915_request *waiter = wait->request; 1102 1102 1103 1103 wakeup = true; 1104 1104 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1105 1105 &waiter->fence.flags) && 1106 1106 intel_wait_check_request(wait, waiter)) 1107 - rq = i915_gem_request_get(waiter); 1107 + rq = i915_request_get(waiter); 1108 1108 } 1109 1109 1110 1110 if (wakeup) ··· 1117 1117 1118 1118 if (rq) { 1119 1119 dma_fence_signal(&rq->fence); 1120 - i915_gem_request_put(rq); 1120 + i915_request_put(rq); 1121 1121 } 1122 1122 1123 1123 trace_intel_engine_notify(engine, wait);
+14 -14
drivers/gpu/drm/i915/i915_perf.c
··· 1630 1630 * Same as gen8_update_reg_state_unlocked only through the batchbuffer. This 1631 1631 * is only used by the kernel context. 1632 1632 */ 1633 - static int gen8_emit_oa_config(struct drm_i915_gem_request *req, 1633 + static int gen8_emit_oa_config(struct i915_request *rq, 1634 1634 const struct i915_oa_config *oa_config) 1635 1635 { 1636 - struct drm_i915_private *dev_priv = req->i915; 1636 + struct drm_i915_private *dev_priv = rq->i915; 1637 1637 /* The MMIO offsets for Flex EU registers aren't contiguous */ 1638 1638 u32 flex_mmio[] = { 1639 1639 i915_mmio_reg_offset(EU_PERF_CNTL0), ··· 1647 1647 u32 *cs; 1648 1648 int i; 1649 1649 1650 - cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4); 1650 + cs = intel_ring_begin(rq, ARRAY_SIZE(flex_mmio) * 2 + 4); 1651 1651 if (IS_ERR(cs)) 1652 1652 return PTR_ERR(cs); 1653 1653 ··· 1685 1685 } 1686 1686 1687 1687 *cs++ = MI_NOOP; 1688 - intel_ring_advance(req, cs); 1688 + intel_ring_advance(rq, cs); 1689 1689 1690 1690 return 0; 1691 1691 } ··· 1695 1695 { 1696 1696 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 1697 1697 struct i915_gem_timeline *timeline; 1698 - struct drm_i915_gem_request *req; 1698 + struct i915_request *rq; 1699 1699 int ret; 1700 1700 1701 1701 lockdep_assert_held(&dev_priv->drm.struct_mutex); 1702 1702 1703 - i915_gem_retire_requests(dev_priv); 1703 + i915_retire_requests(dev_priv); 1704 1704 1705 - req = i915_gem_request_alloc(engine, dev_priv->kernel_context); 1706 - if (IS_ERR(req)) 1707 - return PTR_ERR(req); 1705 + rq = i915_request_alloc(engine, dev_priv->kernel_context); 1706 + if (IS_ERR(rq)) 1707 + return PTR_ERR(rq); 1708 1708 1709 - ret = gen8_emit_oa_config(req, oa_config); 1709 + ret = gen8_emit_oa_config(rq, oa_config); 1710 1710 if (ret) { 1711 - i915_add_request(req); 1711 + i915_request_add(rq); 1712 1712 return ret; 1713 1713 } 1714 1714 1715 1715 /* Queue this switch after all other activity */ 1716 1716 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) { 1717 - struct drm_i915_gem_request *prev; 1717 + struct i915_request *prev; 1718 1718 struct intel_timeline *tl; 1719 1719 1720 1720 tl = &timeline->engine[engine->id]; 1721 1721 prev = i915_gem_active_raw(&tl->last_request, 1722 1722 &dev_priv->drm.struct_mutex); 1723 1723 if (prev) 1724 - i915_sw_fence_await_sw_fence_gfp(&req->submit, 1724 + i915_sw_fence_await_sw_fence_gfp(&rq->submit, 1725 1725 &prev->submit, 1726 1726 GFP_KERNEL); 1727 1727 } 1728 1728 1729 - i915_add_request(req); 1729 + i915_request_add(rq); 1730 1730 1731 1731 return 0; 1732 1732 }
+63 -65
drivers/gpu/drm/i915/i915_trace.h
··· 586 586 ); 587 587 588 588 TRACE_EVENT(i915_gem_ring_sync_to, 589 - TP_PROTO(struct drm_i915_gem_request *to, 590 - struct drm_i915_gem_request *from), 589 + TP_PROTO(struct i915_request *to, struct i915_request *from), 591 590 TP_ARGS(to, from), 592 591 593 592 TP_STRUCT__entry( ··· 609 610 __entry->seqno) 610 611 ); 611 612 612 - TRACE_EVENT(i915_gem_request_queue, 613 - TP_PROTO(struct drm_i915_gem_request *req, u32 flags), 614 - TP_ARGS(req, flags), 613 + TRACE_EVENT(i915_request_queue, 614 + TP_PROTO(struct i915_request *rq, u32 flags), 615 + TP_ARGS(rq, flags), 615 616 616 617 TP_STRUCT__entry( 617 618 __field(u32, dev) ··· 623 624 ), 624 625 625 626 TP_fast_assign( 626 - __entry->dev = req->i915->drm.primary->index; 627 - __entry->hw_id = req->ctx->hw_id; 628 - __entry->ring = req->engine->id; 629 - __entry->ctx = req->fence.context; 630 - __entry->seqno = req->fence.seqno; 627 + __entry->dev = rq->i915->drm.primary->index; 628 + __entry->hw_id = rq->ctx->hw_id; 629 + __entry->ring = rq->engine->id; 630 + __entry->ctx = rq->fence.context; 631 + __entry->seqno = rq->fence.seqno; 631 632 __entry->flags = flags; 632 633 ), 633 634 ··· 636 637 __entry->seqno, __entry->flags) 637 638 ); 638 639 639 - DECLARE_EVENT_CLASS(i915_gem_request, 640 - TP_PROTO(struct drm_i915_gem_request *req), 641 - TP_ARGS(req), 640 + DECLARE_EVENT_CLASS(i915_request, 641 + TP_PROTO(struct i915_request *rq), 642 + TP_ARGS(rq), 642 643 643 644 TP_STRUCT__entry( 644 645 __field(u32, dev) ··· 650 651 ), 651 652 652 653 TP_fast_assign( 653 - __entry->dev = req->i915->drm.primary->index; 654 - __entry->hw_id = req->ctx->hw_id; 655 - __entry->ring = req->engine->id; 656 - __entry->ctx = req->fence.context; 657 - __entry->seqno = req->fence.seqno; 658 - __entry->global = req->global_seqno; 654 + __entry->dev = rq->i915->drm.primary->index; 655 + __entry->hw_id = rq->ctx->hw_id; 656 + __entry->ring = rq->engine->id; 657 + __entry->ctx = rq->fence.context; 658 + __entry->seqno = rq->fence.seqno; 659 + __entry->global = rq->global_seqno; 659 660 ), 660 661 661 662 TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u", ··· 663 664 __entry->seqno, __entry->global) 664 665 ); 665 666 666 - DEFINE_EVENT(i915_gem_request, i915_gem_request_add, 667 - TP_PROTO(struct drm_i915_gem_request *req), 668 - TP_ARGS(req) 667 + DEFINE_EVENT(i915_request, i915_request_add, 668 + TP_PROTO(struct i915_request *rq), 669 + TP_ARGS(rq) 669 670 ); 670 671 671 672 #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) 672 - DEFINE_EVENT(i915_gem_request, i915_gem_request_submit, 673 - TP_PROTO(struct drm_i915_gem_request *req), 674 - TP_ARGS(req) 673 + DEFINE_EVENT(i915_request, i915_request_submit, 674 + TP_PROTO(struct i915_request *rq), 675 + TP_ARGS(rq) 675 676 ); 676 677 677 - DEFINE_EVENT(i915_gem_request, i915_gem_request_execute, 678 - TP_PROTO(struct drm_i915_gem_request *req), 679 - TP_ARGS(req) 678 + DEFINE_EVENT(i915_request, i915_request_execute, 679 + TP_PROTO(struct i915_request *rq), 680 + TP_ARGS(rq) 680 681 ); 681 682 682 - DECLARE_EVENT_CLASS(i915_gem_request_hw, 683 - TP_PROTO(struct drm_i915_gem_request *req, 684 - unsigned int port), 685 - TP_ARGS(req, port), 683 + DECLARE_EVENT_CLASS(i915_request_hw, 684 + TP_PROTO(struct i915_request *rq, unsigned int port), 685 + TP_ARGS(rq, port), 686 686 687 687 TP_STRUCT__entry( 688 688 __field(u32, dev) ··· 694 696 ), 695 697 696 698 TP_fast_assign( 697 - __entry->dev = req->i915->drm.primary->index; 698 - __entry->hw_id = req->ctx->hw_id; 699 - __entry->ring = req->engine->id; 700 - __entry->ctx = req->fence.context; 701 - __entry->seqno = req->fence.seqno; 702 - __entry->global_seqno = req->global_seqno; 703 - __entry->port = port; 704 - ), 699 + __entry->dev = rq->i915->drm.primary->index; 700 + __entry->hw_id = rq->ctx->hw_id; 701 + __entry->ring = rq->engine->id; 702 + __entry->ctx = rq->fence.context; 703 + __entry->seqno = rq->fence.seqno; 704 + __entry->global_seqno = rq->global_seqno; 705 + __entry->port = port; 706 + ), 705 707 706 708 TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u", 707 709 __entry->dev, __entry->hw_id, __entry->ring, ··· 709 711 __entry->global_seqno, __entry->port) 710 712 ); 711 713 712 - DEFINE_EVENT(i915_gem_request_hw, i915_gem_request_in, 713 - TP_PROTO(struct drm_i915_gem_request *req, unsigned int port), 714 - TP_ARGS(req, port) 714 + DEFINE_EVENT(i915_request_hw, i915_request_in, 715 + TP_PROTO(struct i915_request *rq, unsigned int port), 716 + TP_ARGS(rq, port) 715 717 ); 716 718 717 - DEFINE_EVENT(i915_gem_request, i915_gem_request_out, 718 - TP_PROTO(struct drm_i915_gem_request *req), 719 - TP_ARGS(req) 719 + DEFINE_EVENT(i915_request, i915_request_out, 720 + TP_PROTO(struct i915_request *rq), 721 + TP_ARGS(rq) 720 722 ); 721 723 #else 722 724 #if !defined(TRACE_HEADER_MULTI_READ) 723 725 static inline void 724 - trace_i915_gem_request_submit(struct drm_i915_gem_request *req) 726 + trace_i915_request_submit(struct i915_request *rq) 725 727 { 726 728 } 727 729 728 730 static inline void 729 - trace_i915_gem_request_execute(struct drm_i915_gem_request *req) 731 + trace_i915_request_execute(struct i915_request *rq) 730 732 { 731 733 } 732 734 733 735 static inline void 734 - trace_i915_gem_request_in(struct drm_i915_gem_request *req, unsigned int port) 736 + trace_i915_request_in(struct i915_request *rq, unsigned int port) 735 737 { 736 738 } 737 739 738 740 static inline void 739 - trace_i915_gem_request_out(struct drm_i915_gem_request *req) 741 + trace_i915_request_out(struct i915_request *rq) 740 742 { 741 743 } 742 744 #endif ··· 765 767 __entry->waiters) 766 768 ); 767 769 768 - DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, 769 - TP_PROTO(struct drm_i915_gem_request *req), 770 - TP_ARGS(req) 770 + DEFINE_EVENT(i915_request, i915_request_retire, 771 + TP_PROTO(struct i915_request *rq), 772 + TP_ARGS(rq) 771 773 ); 772 774 773 - TRACE_EVENT(i915_gem_request_wait_begin, 774 - TP_PROTO(struct drm_i915_gem_request *req, unsigned int flags), 775 - TP_ARGS(req, flags), 775 + TRACE_EVENT(i915_request_wait_begin, 776 + TP_PROTO(struct i915_request *rq, unsigned int flags), 777 + TP_ARGS(rq, flags), 776 778 777 779 TP_STRUCT__entry( 778 780 __field(u32, dev) ··· 791 793 * less desirable. 792 794 */ 793 795 TP_fast_assign( 794 - __entry->dev = req->i915->drm.primary->index; 795 - __entry->hw_id = req->ctx->hw_id; 796 - __entry->ring = req->engine->id; 797 - __entry->ctx = req->fence.context; 798 - __entry->seqno = req->fence.seqno; 799 - __entry->global = req->global_seqno; 796 + __entry->dev = rq->i915->drm.primary->index; 797 + __entry->hw_id = rq->ctx->hw_id; 798 + __entry->ring = rq->engine->id; 799 + __entry->ctx = rq->fence.context; 800 + __entry->seqno = rq->fence.seqno; 801 + __entry->global = rq->global_seqno; 800 802 __entry->flags = flags; 801 803 ), 802 804 ··· 806 808 !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags) 807 809 ); 808 810 809 - DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, 810 - TP_PROTO(struct drm_i915_gem_request *req), 811 - TP_ARGS(req) 811 + DEFINE_EVENT(i915_request, i915_request_wait_end, 812 + TP_PROTO(struct i915_request *rq), 813 + TP_ARGS(rq) 812 814 ); 813 815 814 816 TRACE_EVENT(i915_flip_request,
+1 -2
drivers/gpu/drm/i915/i915_vma.c
··· 31 31 #include <drm/drm_gem.h> 32 32 33 33 static void 34 - i915_vma_retire(struct i915_gem_active *active, 35 - struct drm_i915_gem_request *rq) 34 + i915_vma_retire(struct i915_gem_active *active, struct i915_request *rq) 36 35 { 37 36 const unsigned int idx = rq->engine->id; 38 37 struct i915_vma *vma =
+1 -1
drivers/gpu/drm/i915/i915_vma.h
··· 32 32 #include "i915_gem_gtt.h" 33 33 #include "i915_gem_fence_reg.h" 34 34 #include "i915_gem_object.h" 35 - #include "i915_gem_request.h" 36 35 36 + #include "i915_request.h" 37 37 38 38 enum i915_cache_level; 39 39
+15 -16
drivers/gpu/drm/i915/intel_breadcrumbs.c
··· 588 588 spin_unlock_irq(&b->rb_lock); 589 589 } 590 590 591 - static bool signal_complete(const struct drm_i915_gem_request *request) 591 + static bool signal_complete(const struct i915_request *request) 592 592 { 593 593 if (!request) 594 594 return false; ··· 600 600 return __i915_request_irq_complete(request); 601 601 } 602 602 603 - static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) 603 + static struct i915_request *to_signaler(struct rb_node *rb) 604 604 { 605 - return rb_entry(rb, struct drm_i915_gem_request, signaling.node); 605 + return rb_entry(rb, struct i915_request, signaling.node); 606 606 } 607 607 608 608 static void signaler_set_rtpriority(void) ··· 613 613 } 614 614 615 615 static void __intel_engine_remove_signal(struct intel_engine_cs *engine, 616 - struct drm_i915_gem_request *request) 616 + struct i915_request *request) 617 617 { 618 618 struct intel_breadcrumbs *b = &engine->breadcrumbs; 619 619 ··· 644 644 } 645 645 } 646 646 647 - static struct drm_i915_gem_request * 647 + static struct i915_request * 648 648 get_first_signal_rcu(struct intel_breadcrumbs *b) 649 649 { 650 650 /* ··· 654 654 * the required memory barriers. 655 655 */ 656 656 do { 657 - struct drm_i915_gem_request *request; 657 + struct i915_request *request; 658 658 659 659 request = rcu_dereference(b->first_signal); 660 660 if (request) 661 - request = i915_gem_request_get_rcu(request); 661 + request = i915_request_get_rcu(request); 662 662 663 663 barrier(); 664 664 665 665 if (!request || request == rcu_access_pointer(b->first_signal)) 666 666 return rcu_pointer_handoff(request); 667 667 668 - i915_gem_request_put(request); 668 + i915_request_put(request); 669 669 } while (1); 670 670 } 671 671 ··· 673 673 { 674 674 struct intel_engine_cs *engine = arg; 675 675 struct intel_breadcrumbs *b = &engine->breadcrumbs; 676 - struct drm_i915_gem_request *request; 676 + struct i915_request *request; 677 677 678 678 /* Install ourselves with high priority to reduce signalling latency */ 679 679 signaler_set_rtpriority(); ··· 699 699 &request->fence.flags)) { 700 700 local_bh_disable(); 701 701 dma_fence_signal(&request->fence); 702 - GEM_BUG_ON(!i915_gem_request_completed(request)); 702 + GEM_BUG_ON(!i915_request_completed(request)); 703 703 local_bh_enable(); /* kick start the tasklets */ 704 704 } 705 705 ··· 718 718 */ 719 719 do_schedule = need_resched(); 720 720 } 721 - i915_gem_request_put(request); 721 + i915_request_put(request); 722 722 723 723 if (unlikely(do_schedule)) { 724 724 if (kthread_should_park()) ··· 735 735 return 0; 736 736 } 737 737 738 - void intel_engine_enable_signaling(struct drm_i915_gem_request *request, 739 - bool wakeup) 738 + void intel_engine_enable_signaling(struct i915_request *request, bool wakeup) 740 739 { 741 740 struct intel_engine_cs *engine = request->engine; 742 741 struct intel_breadcrumbs *b = &engine->breadcrumbs; ··· 752 753 GEM_BUG_ON(!irqs_disabled()); 753 754 lockdep_assert_held(&request->lock); 754 755 755 - seqno = i915_gem_request_global_seqno(request); 756 + seqno = i915_request_global_seqno(request); 756 757 if (!seqno) 757 758 return; 758 759 ··· 773 774 */ 774 775 wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait); 775 776 776 - if (!__i915_gem_request_completed(request, seqno)) { 777 + if (!__i915_request_completed(request, seqno)) { 777 778 struct rb_node *parent, **p; 778 779 bool first; 779 780 ··· 810 811 wake_up_process(b->signaler); 811 812 } 812 813 813 - void intel_engine_cancel_signaling(struct drm_i915_gem_request *request) 814 + void intel_engine_cancel_signaling(struct i915_request *request) 814 815 { 815 816 GEM_BUG_ON(!irqs_disabled()); 816 817 lockdep_assert_held(&request->lock);
+4 -4
drivers/gpu/drm/i915/intel_display.c
··· 12584 12584 struct wait_queue_entry wait; 12585 12585 12586 12586 struct drm_crtc *crtc; 12587 - struct drm_i915_gem_request *request; 12587 + struct i915_request *request; 12588 12588 }; 12589 12589 12590 12590 static int do_rps_boost(struct wait_queue_entry *_wait, 12591 12591 unsigned mode, int sync, void *key) 12592 12592 { 12593 12593 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 12594 - struct drm_i915_gem_request *rq = wait->request; 12594 + struct i915_request *rq = wait->request; 12595 12595 12596 12596 /* 12597 12597 * If we missed the vblank, but the request is already running it 12598 12598 * is reasonable to assume that it will complete before the next 12599 12599 * vblank without our intervention, so leave RPS alone. 12600 12600 */ 12601 - if (!i915_gem_request_started(rq)) 12601 + if (!i915_request_started(rq)) 12602 12602 gen6_rps_boost(rq, NULL); 12603 - i915_gem_request_put(rq); 12603 + i915_request_put(rq); 12604 12604 12605 12605 drm_crtc_vblank_put(wait->crtc); 12606 12606
+1 -2
drivers/gpu/drm/i915/intel_drv.h
··· 1894 1894 void gen6_rps_busy(struct drm_i915_private *dev_priv); 1895 1895 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); 1896 1896 void gen6_rps_idle(struct drm_i915_private *dev_priv); 1897 - void gen6_rps_boost(struct drm_i915_gem_request *rq, 1898 - struct intel_rps_client *rps); 1897 + void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps); 1899 1898 void g4x_wm_get_hw_state(struct drm_device *dev); 1900 1899 void vlv_wm_get_hw_state(struct drm_device *dev); 1901 1900 void ilk_wm_get_hw_state(struct drm_device *dev);
+13 -13
drivers/gpu/drm/i915/intel_engine_cs.c
··· 1426 1426 return 0; 1427 1427 } 1428 1428 1429 - int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 1429 + int intel_ring_workarounds_emit(struct i915_request *rq) 1430 1430 { 1431 - struct i915_workarounds *w = &req->i915->workarounds; 1431 + struct i915_workarounds *w = &rq->i915->workarounds; 1432 1432 u32 *cs; 1433 1433 int ret, i; 1434 1434 1435 1435 if (w->count == 0) 1436 1436 return 0; 1437 1437 1438 - ret = req->engine->emit_flush(req, EMIT_BARRIER); 1438 + ret = rq->engine->emit_flush(rq, EMIT_BARRIER); 1439 1439 if (ret) 1440 1440 return ret; 1441 1441 1442 - cs = intel_ring_begin(req, (w->count * 2 + 2)); 1442 + cs = intel_ring_begin(rq, w->count * 2 + 2); 1443 1443 if (IS_ERR(cs)) 1444 1444 return PTR_ERR(cs); 1445 1445 ··· 1450 1450 } 1451 1451 *cs++ = MI_NOOP; 1452 1452 1453 - intel_ring_advance(req, cs); 1453 + intel_ring_advance(rq, cs); 1454 1454 1455 - ret = req->engine->emit_flush(req, EMIT_BARRIER); 1455 + ret = rq->engine->emit_flush(rq, EMIT_BARRIER); 1456 1456 if (ret) 1457 1457 return ret; 1458 1458 ··· 1552 1552 { 1553 1553 const struct i915_gem_context * const kernel_context = 1554 1554 engine->i915->kernel_context; 1555 - struct drm_i915_gem_request *rq; 1555 + struct i915_request *rq; 1556 1556 1557 1557 lockdep_assert_held(&engine->i915->drm.struct_mutex); 1558 1558 ··· 1664 1664 } 1665 1665 1666 1666 static void print_request(struct drm_printer *m, 1667 - struct drm_i915_gem_request *rq, 1667 + struct i915_request *rq, 1668 1668 const char *prefix) 1669 1669 { 1670 1670 drm_printf(m, "%s%x%s [%x:%x] prio=%d @ %dms: %s\n", prefix, 1671 1671 rq->global_seqno, 1672 - i915_gem_request_completed(rq) ? "!" : "", 1672 + i915_request_completed(rq) ? "!" : "", 1673 1673 rq->ctx->hw_id, rq->fence.seqno, 1674 1674 rq->priotree.priority, 1675 1675 jiffies_to_msecs(jiffies - rq->emitted_jiffies), ··· 1803 1803 1804 1804 rcu_read_lock(); 1805 1805 for (idx = 0; idx < execlists_num_ports(execlists); idx++) { 1806 - struct drm_i915_gem_request *rq; 1806 + struct i915_request *rq; 1807 1807 unsigned int count; 1808 1808 1809 1809 rq = port_unpack(&execlists->port[idx], &count); ··· 1837 1837 struct intel_breadcrumbs * const b = &engine->breadcrumbs; 1838 1838 const struct intel_engine_execlists * const execlists = &engine->execlists; 1839 1839 struct i915_gpu_error * const error = &engine->i915->gpu_error; 1840 - struct drm_i915_gem_request *rq; 1840 + struct i915_request *rq; 1841 1841 struct rb_node *rb; 1842 1842 1843 1843 if (header) { ··· 1866 1866 drm_printf(m, "\tRequests:\n"); 1867 1867 1868 1868 rq = list_first_entry(&engine->timeline->requests, 1869 - struct drm_i915_gem_request, link); 1869 + struct i915_request, link); 1870 1870 if (&rq->link != &engine->timeline->requests) 1871 1871 print_request(m, rq, "\t\tfirst "); 1872 1872 1873 1873 rq = list_last_entry(&engine->timeline->requests, 1874 - struct drm_i915_gem_request, link); 1874 + struct i915_request, link); 1875 1875 if (&rq->link != &engine->timeline->requests) 1876 1876 print_request(m, rq, "\t\tlast "); 1877 1877
+12 -15
drivers/gpu/drm/i915/intel_guc_submission.c
··· 496 496 GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED); 497 497 } 498 498 499 - static void guc_add_request(struct intel_guc *guc, 500 - struct drm_i915_gem_request *rq) 499 + static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) 501 500 { 502 501 struct intel_guc_client *client = guc->execbuf_client; 503 502 struct intel_engine_cs *engine = rq->engine; ··· 647 648 unsigned int n; 648 649 649 650 for (n = 0; n < execlists_num_ports(execlists); n++) { 650 - struct drm_i915_gem_request *rq; 651 + struct i915_request *rq; 651 652 unsigned int count; 652 653 653 654 rq = port_unpack(&port[n], &count); ··· 661 662 } 662 663 } 663 664 664 - static void port_assign(struct execlist_port *port, 665 - struct drm_i915_gem_request *rq) 665 + static void port_assign(struct execlist_port *port, struct i915_request *rq) 666 666 { 667 667 GEM_BUG_ON(port_isset(port)); 668 668 669 - port_set(port, i915_gem_request_get(rq)); 669 + port_set(port, i915_request_get(rq)); 670 670 } 671 671 672 672 static void guc_dequeue(struct intel_engine_cs *engine) 673 673 { 674 674 struct intel_engine_execlists * const execlists = &engine->execlists; 675 675 struct execlist_port *port = execlists->port; 676 - struct drm_i915_gem_request *last = NULL; 676 + struct i915_request *last = NULL; 677 677 const struct execlist_port * const last_port = 678 678 &execlists->port[execlists->port_mask]; 679 679 bool submit = false; ··· 708 710 709 711 do { 710 712 struct i915_priolist *p = rb_entry(rb, typeof(*p), node); 711 - struct drm_i915_gem_request *rq, *rn; 713 + struct i915_request *rq, *rn; 712 714 713 715 list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { 714 716 if (last && rq->ctx != last->ctx) { ··· 725 727 726 728 INIT_LIST_HEAD(&rq->priotree.link); 727 729 728 - __i915_gem_request_submit(rq); 729 - trace_i915_gem_request_in(rq, 730 - port_index(port, execlists)); 730 + __i915_request_submit(rq); 731 + trace_i915_request_in(rq, port_index(port, execlists)); 731 732 last = rq; 732 733 submit = true; 733 734 } ··· 759 762 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; 760 763 struct intel_engine_execlists * const execlists = &engine->execlists; 761 764 struct execlist_port *port = execlists->port; 762 - struct drm_i915_gem_request *rq; 765 + struct i915_request *rq; 763 766 764 767 rq = port_request(&port[0]); 765 - while (rq && i915_gem_request_completed(rq)) { 766 - trace_i915_gem_request_out(rq); 767 - i915_gem_request_put(rq); 768 + while (rq && i915_request_completed(rq)) { 769 + trace_i915_request_out(rq); 770 + i915_request_put(rq); 768 771 769 772 execlists_port_complete(execlists, port); 770 773
+56 -59
drivers/gpu/drm/i915/intel_lrc.c
··· 267 267 return ptr_pack_bits(p, first, 1); 268 268 } 269 269 270 - static void unwind_wa_tail(struct drm_i915_gem_request *rq) 270 + static void unwind_wa_tail(struct i915_request *rq) 271 271 { 272 272 rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); 273 273 assert_ring_tail_valid(rq->ring, rq->tail); ··· 275 275 276 276 static void __unwind_incomplete_requests(struct intel_engine_cs *engine) 277 277 { 278 - struct drm_i915_gem_request *rq, *rn; 278 + struct i915_request *rq, *rn; 279 279 struct i915_priolist *uninitialized_var(p); 280 280 int last_prio = I915_PRIORITY_INVALID; 281 281 ··· 284 284 list_for_each_entry_safe_reverse(rq, rn, 285 285 &engine->timeline->requests, 286 286 link) { 287 - if (i915_gem_request_completed(rq)) 287 + if (i915_request_completed(rq)) 288 288 return; 289 289 290 - __i915_gem_request_unsubmit(rq); 290 + __i915_request_unsubmit(rq); 291 291 unwind_wa_tail(rq); 292 292 293 293 GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID); ··· 316 316 } 317 317 318 318 static inline void 319 - execlists_context_status_change(struct drm_i915_gem_request *rq, 320 - unsigned long status) 319 + execlists_context_status_change(struct i915_request *rq, unsigned long status) 321 320 { 322 321 /* 323 322 * Only used when GVT-g is enabled now. When GVT-g is disabled, ··· 330 331 } 331 332 332 333 static inline void 333 - execlists_context_schedule_in(struct drm_i915_gem_request *rq) 334 + execlists_context_schedule_in(struct i915_request *rq) 334 335 { 335 336 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); 336 337 intel_engine_context_in(rq->engine); 337 338 } 338 339 339 340 static inline void 340 - execlists_context_schedule_out(struct drm_i915_gem_request *rq) 341 + execlists_context_schedule_out(struct i915_request *rq) 341 342 { 342 343 intel_engine_context_out(rq->engine); 343 344 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); ··· 352 353 ASSIGN_CTX_PDP(ppgtt, reg_state, 0); 353 354 } 354 355 355 - static u64 execlists_update_context(struct drm_i915_gem_request *rq) 356 + static u64 execlists_update_context(struct i915_request *rq) 356 357 { 357 358 struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; 358 359 struct i915_hw_ppgtt *ppgtt = ··· 384 385 unsigned int n; 385 386 386 387 for (n = execlists_num_ports(&engine->execlists); n--; ) { 387 - struct drm_i915_gem_request *rq; 388 + struct i915_request *rq; 388 389 unsigned int count; 389 390 u64 desc; 390 391 ··· 429 430 return true; 430 431 } 431 432 432 - static void port_assign(struct execlist_port *port, 433 - struct drm_i915_gem_request *rq) 433 + static void port_assign(struct execlist_port *port, struct i915_request *rq) 434 434 { 435 435 GEM_BUG_ON(rq == port_request(port)); 436 436 437 437 if (port_isset(port)) 438 - i915_gem_request_put(port_request(port)); 438 + i915_request_put(port_request(port)); 439 439 440 - port_set(port, port_pack(i915_gem_request_get(rq), port_count(port))); 440 + port_set(port, port_pack(i915_request_get(rq), port_count(port))); 441 441 } 442 442 443 443 static void inject_preempt_context(struct intel_engine_cs *engine) ··· 474 476 struct execlist_port *port = execlists->port; 475 477 const struct execlist_port * const last_port = 476 478 &execlists->port[execlists->port_mask]; 477 - struct drm_i915_gem_request *last = port_request(port); 479 + struct i915_request *last = port_request(port); 478 480 struct rb_node *rb; 479 481 bool submit = false; 480 482 ··· 563 565 564 566 /* WaIdleLiteRestore:bdw,skl 565 567 * Apply the wa NOOPs to prevent 566 - * ring:HEAD == req:TAIL as we resubmit the 568 + * ring:HEAD == rq:TAIL as we resubmit the 567 569 * request. See gen8_emit_breadcrumb() for 568 570 * where we prepare the padding after the 569 571 * end of the request. ··· 574 576 575 577 do { 576 578 struct i915_priolist *p = rb_entry(rb, typeof(*p), node); 577 - struct drm_i915_gem_request *rq, *rn; 579 + struct i915_request *rq, *rn; 578 580 579 581 list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { 580 582 /* ··· 624 626 } 625 627 626 628 INIT_LIST_HEAD(&rq->priotree.link); 627 - __i915_gem_request_submit(rq); 628 - trace_i915_gem_request_in(rq, port_index(port, execlists)); 629 + __i915_request_submit(rq); 630 + trace_i915_request_in(rq, port_index(port, execlists)); 629 631 last = rq; 630 632 submit = true; 631 633 } ··· 663 665 unsigned int num_ports = execlists_num_ports(execlists); 664 666 665 667 while (num_ports-- && port_isset(port)) { 666 - struct drm_i915_gem_request *rq = port_request(port); 668 + struct i915_request *rq = port_request(port); 667 669 668 670 GEM_BUG_ON(!execlists->active); 669 671 intel_engine_context_out(rq->engine); 670 672 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED); 671 - i915_gem_request_put(rq); 673 + i915_request_put(rq); 672 674 673 675 memset(port, 0, sizeof(*port)); 674 676 port++; ··· 678 680 static void execlists_cancel_requests(struct intel_engine_cs *engine) 679 681 { 680 682 struct intel_engine_execlists * const execlists = &engine->execlists; 681 - struct drm_i915_gem_request *rq, *rn; 683 + struct i915_request *rq, *rn; 682 684 struct rb_node *rb; 683 685 unsigned long flags; 684 686 ··· 690 692 /* Mark all executing requests as skipped. */ 691 693 list_for_each_entry(rq, &engine->timeline->requests, link) { 692 694 GEM_BUG_ON(!rq->global_seqno); 693 - if (!i915_gem_request_completed(rq)) 695 + if (!i915_request_completed(rq)) 694 696 dma_fence_set_error(&rq->fence, -EIO); 695 697 } 696 698 ··· 703 705 INIT_LIST_HEAD(&rq->priotree.link); 704 706 705 707 dma_fence_set_error(&rq->fence, -EIO); 706 - __i915_gem_request_submit(rq); 708 + __i915_request_submit(rq); 707 709 } 708 710 709 711 rb = rb_next(rb); ··· 804 806 tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?"); 805 807 806 808 while (head != tail) { 807 - struct drm_i915_gem_request *rq; 809 + struct i915_request *rq; 808 810 unsigned int status; 809 811 unsigned int count; 810 812 ··· 883 885 GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); 884 886 GEM_BUG_ON(port_isset(&port[1]) && 885 887 !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)); 886 - GEM_BUG_ON(!i915_gem_request_completed(rq)); 888 + GEM_BUG_ON(!i915_request_completed(rq)); 887 889 execlists_context_schedule_out(rq); 888 - trace_i915_gem_request_out(rq); 889 - i915_gem_request_put(rq); 890 + trace_i915_request_out(rq); 891 + i915_request_put(rq); 890 892 891 893 execlists_port_complete(execlists, port); 892 894 } else { ··· 926 928 tasklet_hi_schedule(&engine->execlists.tasklet); 927 929 } 928 930 929 - static void execlists_submit_request(struct drm_i915_gem_request *request) 931 + static void execlists_submit_request(struct i915_request *request) 930 932 { 931 933 struct intel_engine_cs *engine = request->engine; 932 934 unsigned long flags; ··· 942 944 spin_unlock_irqrestore(&engine->timeline->lock, flags); 943 945 } 944 946 945 - static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt) 947 + static struct i915_request *pt_to_request(struct i915_priotree *pt) 946 948 { 947 - return container_of(pt, struct drm_i915_gem_request, priotree); 949 + return container_of(pt, struct i915_request, priotree); 948 950 } 949 951 950 952 static struct intel_engine_cs * ··· 962 964 return engine; 963 965 } 964 966 965 - static void execlists_schedule(struct drm_i915_gem_request *request, int prio) 967 + static void execlists_schedule(struct i915_request *request, int prio) 966 968 { 967 969 struct intel_engine_cs *engine; 968 970 struct i915_dependency *dep, *p; ··· 971 973 972 974 GEM_BUG_ON(prio == I915_PRIORITY_INVALID); 973 975 974 - if (i915_gem_request_completed(request)) 976 + if (i915_request_completed(request)) 975 977 return; 976 978 977 979 if (prio <= READ_ONCE(request->priotree.priority)) ··· 1156 1158 i915_gem_context_put(ctx); 1157 1159 } 1158 1160 1159 - static int execlists_request_alloc(struct drm_i915_gem_request *request) 1161 + static int execlists_request_alloc(struct i915_request *request) 1160 1162 { 1161 1163 struct intel_engine_cs *engine = request->engine; 1162 1164 struct intel_context *ce = &request->ctx->engine[engine->id]; ··· 1588 1590 } 1589 1591 1590 1592 static void reset_common_ring(struct intel_engine_cs *engine, 1591 - struct drm_i915_gem_request *request) 1593 + struct i915_request *request) 1592 1594 { 1593 1595 struct intel_engine_execlists * const execlists = &engine->execlists; 1594 1596 struct intel_context *ce; ··· 1656 1658 unwind_wa_tail(request); 1657 1659 } 1658 1660 1659 - static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) 1661 + static int intel_logical_ring_emit_pdps(struct i915_request *rq) 1660 1662 { 1661 - struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; 1662 - struct intel_engine_cs *engine = req->engine; 1663 + struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; 1664 + struct intel_engine_cs *engine = rq->engine; 1663 1665 const int num_lri_cmds = GEN8_3LVL_PDPES * 2; 1664 1666 u32 *cs; 1665 1667 int i; 1666 1668 1667 - cs = intel_ring_begin(req, num_lri_cmds * 2 + 2); 1669 + cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2); 1668 1670 if (IS_ERR(cs)) 1669 1671 return PTR_ERR(cs); 1670 1672 ··· 1679 1681 } 1680 1682 1681 1683 *cs++ = MI_NOOP; 1682 - intel_ring_advance(req, cs); 1684 + intel_ring_advance(rq, cs); 1683 1685 1684 1686 return 0; 1685 1687 } 1686 1688 1687 - static int gen8_emit_bb_start(struct drm_i915_gem_request *req, 1689 + static int gen8_emit_bb_start(struct i915_request *rq, 1688 1690 u64 offset, u32 len, 1689 1691 const unsigned int flags) 1690 1692 { ··· 1697 1699 * it is unsafe in case of lite-restore (because the ctx is 1698 1700 * not idle). PML4 is allocated during ppgtt init so this is 1699 1701 * not needed in 48-bit.*/ 1700 - if (req->ctx->ppgtt && 1701 - (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings) && 1702 - !i915_vm_is_48bit(&req->ctx->ppgtt->base) && 1703 - !intel_vgpu_active(req->i915)) { 1704 - ret = intel_logical_ring_emit_pdps(req); 1702 + if (rq->ctx->ppgtt && 1703 + (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) && 1704 + !i915_vm_is_48bit(&rq->ctx->ppgtt->base) && 1705 + !intel_vgpu_active(rq->i915)) { 1706 + ret = intel_logical_ring_emit_pdps(rq); 1705 1707 if (ret) 1706 1708 return ret; 1707 1709 1708 - req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); 1710 + rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine); 1709 1711 } 1710 1712 1711 - cs = intel_ring_begin(req, 4); 1713 + cs = intel_ring_begin(rq, 4); 1712 1714 if (IS_ERR(cs)) 1713 1715 return PTR_ERR(cs); 1714 1716 ··· 1737 1739 (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0); 1738 1740 *cs++ = lower_32_bits(offset); 1739 1741 *cs++ = upper_32_bits(offset); 1740 - intel_ring_advance(req, cs); 1742 + intel_ring_advance(rq, cs); 1741 1743 1742 1744 return 0; 1743 1745 } ··· 1756 1758 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1757 1759 } 1758 1760 1759 - static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode) 1761 + static int gen8_emit_flush(struct i915_request *request, u32 mode) 1760 1762 { 1761 1763 u32 cmd, *cs; 1762 1764 ··· 1788 1790 return 0; 1789 1791 } 1790 1792 1791 - static int gen8_emit_flush_render(struct drm_i915_gem_request *request, 1793 + static int gen8_emit_flush_render(struct i915_request *request, 1792 1794 u32 mode) 1793 1795 { 1794 1796 struct intel_engine_cs *engine = request->engine; ··· 1863 1865 * used as a workaround for not being allowed to do lite 1864 1866 * restore with HEAD==TAIL (WaIdleLiteRestore). 1865 1867 */ 1866 - static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs) 1868 + static void gen8_emit_wa_tail(struct i915_request *request, u32 *cs) 1867 1869 { 1868 1870 /* Ensure there's always at least one preemption point per-request. */ 1869 1871 *cs++ = MI_ARB_CHECK; ··· 1871 1873 request->wa_tail = intel_ring_offset(request, cs); 1872 1874 } 1873 1875 1874 - static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs) 1876 + static void gen8_emit_breadcrumb(struct i915_request *request, u32 *cs) 1875 1877 { 1876 1878 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ 1877 1879 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); ··· 1887 1889 } 1888 1890 static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS; 1889 1891 1890 - static void gen8_emit_breadcrumb_rcs(struct drm_i915_gem_request *request, 1891 - u32 *cs) 1892 + static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs) 1892 1893 { 1893 1894 /* We're using qword write, seqno should be aligned to 8 bytes. */ 1894 1895 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); ··· 1903 1906 } 1904 1907 static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS; 1905 1908 1906 - static int gen8_init_rcs_context(struct drm_i915_gem_request *req) 1909 + static int gen8_init_rcs_context(struct i915_request *rq) 1907 1910 { 1908 1911 int ret; 1909 1912 1910 - ret = intel_ring_workarounds_emit(req); 1913 + ret = intel_ring_workarounds_emit(rq); 1911 1914 if (ret) 1912 1915 return ret; 1913 1916 1914 - ret = intel_rcs_context_init_mocs(req); 1917 + ret = intel_rcs_context_init_mocs(rq); 1915 1918 /* 1916 1919 * Failing to program the MOCS is non-fatal.The system will not 1917 1920 * run at peak performance. So generate an error and carry on. ··· 1919 1922 if (ret) 1920 1923 DRM_ERROR("MOCS failed to program: expect performance issues.\n"); 1921 1924 1922 - return i915_gem_render_state_emit(req); 1925 + return i915_gem_render_state_emit(rq); 1923 1926 } 1924 1927 1925 1928 /**
+14 -14
drivers/gpu/drm/i915/intel_mocs.c
··· 265 265 266 266 /** 267 267 * emit_mocs_control_table() - emit the mocs control table 268 - * @req: Request to set up the MOCS table for. 268 + * @rq: Request to set up the MOCS table for. 269 269 * @table: The values to program into the control regs. 270 270 * 271 271 * This function simply emits a MI_LOAD_REGISTER_IMM command for the ··· 273 273 * 274 274 * Return: 0 on success, otherwise the error status. 275 275 */ 276 - static int emit_mocs_control_table(struct drm_i915_gem_request *req, 276 + static int emit_mocs_control_table(struct i915_request *rq, 277 277 const struct drm_i915_mocs_table *table) 278 278 { 279 - enum intel_engine_id engine = req->engine->id; 279 + enum intel_engine_id engine = rq->engine->id; 280 280 unsigned int index; 281 281 u32 *cs; 282 282 283 283 if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) 284 284 return -ENODEV; 285 285 286 - cs = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); 286 + cs = intel_ring_begin(rq, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); 287 287 if (IS_ERR(cs)) 288 288 return PTR_ERR(cs); 289 289 ··· 308 308 } 309 309 310 310 *cs++ = MI_NOOP; 311 - intel_ring_advance(req, cs); 311 + intel_ring_advance(rq, cs); 312 312 313 313 return 0; 314 314 } ··· 323 323 324 324 /** 325 325 * emit_mocs_l3cc_table() - emit the mocs control table 326 - * @req: Request to set up the MOCS table for. 326 + * @rq: Request to set up the MOCS table for. 327 327 * @table: The values to program into the control regs. 328 328 * 329 329 * This function simply emits a MI_LOAD_REGISTER_IMM command for the ··· 332 332 * 333 333 * Return: 0 on success, otherwise the error status. 334 334 */ 335 - static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, 335 + static int emit_mocs_l3cc_table(struct i915_request *rq, 336 336 const struct drm_i915_mocs_table *table) 337 337 { 338 338 unsigned int i; ··· 341 341 if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) 342 342 return -ENODEV; 343 343 344 - cs = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); 344 + cs = intel_ring_begin(rq, 2 + GEN9_NUM_MOCS_ENTRIES); 345 345 if (IS_ERR(cs)) 346 346 return PTR_ERR(cs); 347 347 ··· 370 370 } 371 371 372 372 *cs++ = MI_NOOP; 373 - intel_ring_advance(req, cs); 373 + intel_ring_advance(rq, cs); 374 374 375 375 return 0; 376 376 } ··· 417 417 418 418 /** 419 419 * intel_rcs_context_init_mocs() - program the MOCS register. 420 - * @req: Request to set up the MOCS tables for. 420 + * @rq: Request to set up the MOCS tables for. 421 421 * 422 422 * This function will emit a batch buffer with the values required for 423 423 * programming the MOCS register values for all the currently supported ··· 431 431 * 432 432 * Return: 0 on success, otherwise the error status. 433 433 */ 434 - int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req) 434 + int intel_rcs_context_init_mocs(struct i915_request *rq) 435 435 { 436 436 struct drm_i915_mocs_table t; 437 437 int ret; 438 438 439 - if (get_mocs_settings(req->i915, &t)) { 439 + if (get_mocs_settings(rq->i915, &t)) { 440 440 /* Program the RCS control registers */ 441 - ret = emit_mocs_control_table(req, &t); 441 + ret = emit_mocs_control_table(rq, &t); 442 442 if (ret) 443 443 return ret; 444 444 445 445 /* Now program the l3cc registers */ 446 - ret = emit_mocs_l3cc_table(req, &t); 446 + ret = emit_mocs_l3cc_table(rq, &t); 447 447 if (ret) 448 448 return ret; 449 449 }
+1 -1
drivers/gpu/drm/i915/intel_mocs.h
··· 52 52 #include <drm/drmP.h> 53 53 #include "i915_drv.h" 54 54 55 - int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req); 55 + int intel_rcs_context_init_mocs(struct i915_request *rq); 56 56 void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv); 57 57 int intel_mocs_init_engine(struct intel_engine_cs *engine); 58 58
+41 -41
drivers/gpu/drm/i915/intel_overlay.c
··· 234 234 } 235 235 236 236 static void intel_overlay_submit_request(struct intel_overlay *overlay, 237 - struct drm_i915_gem_request *req, 237 + struct i915_request *rq, 238 238 i915_gem_retire_fn retire) 239 239 { 240 240 GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip, 241 241 &overlay->i915->drm.struct_mutex)); 242 242 i915_gem_active_set_retire_fn(&overlay->last_flip, retire, 243 243 &overlay->i915->drm.struct_mutex); 244 - i915_gem_active_set(&overlay->last_flip, req); 245 - i915_add_request(req); 244 + i915_gem_active_set(&overlay->last_flip, rq); 245 + i915_request_add(rq); 246 246 } 247 247 248 248 static int intel_overlay_do_wait_request(struct intel_overlay *overlay, 249 - struct drm_i915_gem_request *req, 249 + struct i915_request *rq, 250 250 i915_gem_retire_fn retire) 251 251 { 252 - intel_overlay_submit_request(overlay, req, retire); 252 + intel_overlay_submit_request(overlay, rq, retire); 253 253 return i915_gem_active_retire(&overlay->last_flip, 254 254 &overlay->i915->drm.struct_mutex); 255 255 } 256 256 257 - static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay) 257 + static struct i915_request *alloc_request(struct intel_overlay *overlay) 258 258 { 259 259 struct drm_i915_private *dev_priv = overlay->i915; 260 260 struct intel_engine_cs *engine = dev_priv->engine[RCS]; 261 261 262 - return i915_gem_request_alloc(engine, dev_priv->kernel_context); 262 + return i915_request_alloc(engine, dev_priv->kernel_context); 263 263 } 264 264 265 265 /* overlay needs to be disable in OCMD reg */ 266 266 static int intel_overlay_on(struct intel_overlay *overlay) 267 267 { 268 268 struct drm_i915_private *dev_priv = overlay->i915; 269 - struct drm_i915_gem_request *req; 269 + struct i915_request *rq; 270 270 u32 *cs; 271 271 272 272 WARN_ON(overlay->active); 273 273 274 - req = alloc_request(overlay); 275 - if (IS_ERR(req)) 276 - return PTR_ERR(req); 274 + rq = alloc_request(overlay); 275 + if (IS_ERR(rq)) 276 + return PTR_ERR(rq); 277 277 278 - cs = intel_ring_begin(req, 4); 278 + cs = intel_ring_begin(rq, 4); 279 279 if (IS_ERR(cs)) { 280 - i915_add_request(req); 280 + i915_request_add(rq); 281 281 return PTR_ERR(cs); 282 282 } 283 283 ··· 290 290 *cs++ = overlay->flip_addr | OFC_UPDATE; 291 291 *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; 292 292 *cs++ = MI_NOOP; 293 - intel_ring_advance(req, cs); 293 + intel_ring_advance(rq, cs); 294 294 295 - return intel_overlay_do_wait_request(overlay, req, NULL); 295 + return intel_overlay_do_wait_request(overlay, rq, NULL); 296 296 } 297 297 298 298 static void intel_overlay_flip_prepare(struct intel_overlay *overlay, ··· 322 322 bool load_polyphase_filter) 323 323 { 324 324 struct drm_i915_private *dev_priv = overlay->i915; 325 - struct drm_i915_gem_request *req; 325 + struct i915_request *rq; 326 326 u32 flip_addr = overlay->flip_addr; 327 327 u32 tmp, *cs; 328 328 ··· 336 336 if (tmp & (1 << 17)) 337 337 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 338 338 339 - req = alloc_request(overlay); 340 - if (IS_ERR(req)) 341 - return PTR_ERR(req); 339 + rq = alloc_request(overlay); 340 + if (IS_ERR(rq)) 341 + return PTR_ERR(rq); 342 342 343 - cs = intel_ring_begin(req, 2); 343 + cs = intel_ring_begin(rq, 2); 344 344 if (IS_ERR(cs)) { 345 - i915_add_request(req); 345 + i915_request_add(rq); 346 346 return PTR_ERR(cs); 347 347 } 348 348 349 349 *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE; 350 350 *cs++ = flip_addr; 351 - intel_ring_advance(req, cs); 351 + intel_ring_advance(rq, cs); 352 352 353 353 intel_overlay_flip_prepare(overlay, vma); 354 354 355 - intel_overlay_submit_request(overlay, req, NULL); 355 + intel_overlay_submit_request(overlay, rq, NULL); 356 356 357 357 return 0; 358 358 } ··· 373 373 } 374 374 375 375 static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active, 376 - struct drm_i915_gem_request *req) 376 + struct i915_request *rq) 377 377 { 378 378 struct intel_overlay *overlay = 379 379 container_of(active, typeof(*overlay), last_flip); ··· 382 382 } 383 383 384 384 static void intel_overlay_off_tail(struct i915_gem_active *active, 385 - struct drm_i915_gem_request *req) 385 + struct i915_request *rq) 386 386 { 387 387 struct intel_overlay *overlay = 388 388 container_of(active, typeof(*overlay), last_flip); ··· 401 401 /* overlay needs to be disabled in OCMD reg */ 402 402 static int intel_overlay_off(struct intel_overlay *overlay) 403 403 { 404 - struct drm_i915_gem_request *req; 404 + struct i915_request *rq; 405 405 u32 *cs, flip_addr = overlay->flip_addr; 406 406 407 407 WARN_ON(!overlay->active); ··· 412 412 * of the hw. Do it in both cases */ 413 413 flip_addr |= OFC_UPDATE; 414 414 415 - req = alloc_request(overlay); 416 - if (IS_ERR(req)) 417 - return PTR_ERR(req); 415 + rq = alloc_request(overlay); 416 + if (IS_ERR(rq)) 417 + return PTR_ERR(rq); 418 418 419 - cs = intel_ring_begin(req, 6); 419 + cs = intel_ring_begin(rq, 6); 420 420 if (IS_ERR(cs)) { 421 - i915_add_request(req); 421 + i915_request_add(rq); 422 422 return PTR_ERR(cs); 423 423 } 424 424 ··· 432 432 *cs++ = flip_addr; 433 433 *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; 434 434 435 - intel_ring_advance(req, cs); 435 + intel_ring_advance(rq, cs); 436 436 437 437 intel_overlay_flip_prepare(overlay, NULL); 438 438 439 - return intel_overlay_do_wait_request(overlay, req, 439 + return intel_overlay_do_wait_request(overlay, rq, 440 440 intel_overlay_off_tail); 441 441 } 442 442 ··· 468 468 469 469 if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { 470 470 /* synchronous slowpath */ 471 - struct drm_i915_gem_request *req; 471 + struct i915_request *rq; 472 472 473 - req = alloc_request(overlay); 474 - if (IS_ERR(req)) 475 - return PTR_ERR(req); 473 + rq = alloc_request(overlay); 474 + if (IS_ERR(rq)) 475 + return PTR_ERR(rq); 476 476 477 - cs = intel_ring_begin(req, 2); 477 + cs = intel_ring_begin(rq, 2); 478 478 if (IS_ERR(cs)) { 479 - i915_add_request(req); 479 + i915_request_add(rq); 480 480 return PTR_ERR(cs); 481 481 } 482 482 483 483 *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP; 484 484 *cs++ = MI_NOOP; 485 - intel_ring_advance(req, cs); 485 + intel_ring_advance(rq, cs); 486 486 487 - ret = intel_overlay_do_wait_request(overlay, req, 487 + ret = intel_overlay_do_wait_request(overlay, rq, 488 488 intel_overlay_release_old_vid_tail); 489 489 if (ret) 490 490 return ret;
+2 -2
drivers/gpu/drm/i915/intel_pm.c
··· 6360 6360 mutex_unlock(&dev_priv->pcu_lock); 6361 6361 } 6362 6362 6363 - void gen6_rps_boost(struct drm_i915_gem_request *rq, 6363 + void gen6_rps_boost(struct i915_request *rq, 6364 6364 struct intel_rps_client *rps_client) 6365 6365 { 6366 6366 struct intel_rps *rps = &rq->i915->gt_pm.rps; ··· 6376 6376 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) 6377 6377 return; 6378 6378 6379 - /* Serializes with i915_gem_request_retire() */ 6379 + /* Serializes with i915_request_retire() */ 6380 6380 boost = false; 6381 6381 spin_lock_irqsave(&rq->lock, flags); 6382 6382 if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
+96 -100
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 66 66 } 67 67 68 68 static int 69 - gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 69 + gen2_render_ring_flush(struct i915_request *rq, u32 mode) 70 70 { 71 71 u32 cmd, *cs; 72 72 ··· 75 75 if (mode & EMIT_INVALIDATE) 76 76 cmd |= MI_READ_FLUSH; 77 77 78 - cs = intel_ring_begin(req, 2); 78 + cs = intel_ring_begin(rq, 2); 79 79 if (IS_ERR(cs)) 80 80 return PTR_ERR(cs); 81 81 82 82 *cs++ = cmd; 83 83 *cs++ = MI_NOOP; 84 - intel_ring_advance(req, cs); 84 + intel_ring_advance(rq, cs); 85 85 86 86 return 0; 87 87 } 88 88 89 89 static int 90 - gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 90 + gen4_render_ring_flush(struct i915_request *rq, u32 mode) 91 91 { 92 92 u32 cmd, *cs; 93 93 ··· 122 122 cmd = MI_FLUSH; 123 123 if (mode & EMIT_INVALIDATE) { 124 124 cmd |= MI_EXE_FLUSH; 125 - if (IS_G4X(req->i915) || IS_GEN5(req->i915)) 125 + if (IS_G4X(rq->i915) || IS_GEN5(rq->i915)) 126 126 cmd |= MI_INVALIDATE_ISP; 127 127 } 128 128 129 - cs = intel_ring_begin(req, 2); 129 + cs = intel_ring_begin(rq, 2); 130 130 if (IS_ERR(cs)) 131 131 return PTR_ERR(cs); 132 132 133 133 *cs++ = cmd; 134 134 *cs++ = MI_NOOP; 135 - intel_ring_advance(req, cs); 135 + intel_ring_advance(rq, cs); 136 136 137 137 return 0; 138 138 } ··· 175 175 * really our business. That leaves only stall at scoreboard. 176 176 */ 177 177 static int 178 - intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) 178 + intel_emit_post_sync_nonzero_flush(struct i915_request *rq) 179 179 { 180 180 u32 scratch_addr = 181 - i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 181 + i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; 182 182 u32 *cs; 183 183 184 - cs = intel_ring_begin(req, 6); 184 + cs = intel_ring_begin(rq, 6); 185 185 if (IS_ERR(cs)) 186 186 return PTR_ERR(cs); 187 187 ··· 191 191 *cs++ = 0; /* low dword */ 192 192 *cs++ = 0; /* high dword */ 193 193 *cs++ = MI_NOOP; 194 - intel_ring_advance(req, cs); 194 + intel_ring_advance(rq, cs); 195 195 196 - cs = intel_ring_begin(req, 6); 196 + cs = intel_ring_begin(rq, 6); 197 197 if (IS_ERR(cs)) 198 198 return PTR_ERR(cs); 199 199 ··· 203 203 *cs++ = 0; 204 204 *cs++ = 0; 205 205 *cs++ = MI_NOOP; 206 - intel_ring_advance(req, cs); 206 + intel_ring_advance(rq, cs); 207 207 208 208 return 0; 209 209 } 210 210 211 211 static int 212 - gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 212 + gen6_render_ring_flush(struct i915_request *rq, u32 mode) 213 213 { 214 214 u32 scratch_addr = 215 - i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 215 + i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; 216 216 u32 *cs, flags = 0; 217 217 int ret; 218 218 219 219 /* Force SNB workarounds for PIPE_CONTROL flushes */ 220 - ret = intel_emit_post_sync_nonzero_flush(req); 220 + ret = intel_emit_post_sync_nonzero_flush(rq); 221 221 if (ret) 222 222 return ret; 223 223 ··· 247 247 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 248 248 } 249 249 250 - cs = intel_ring_begin(req, 4); 250 + cs = intel_ring_begin(rq, 4); 251 251 if (IS_ERR(cs)) 252 252 return PTR_ERR(cs); 253 253 ··· 255 255 *cs++ = flags; 256 256 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 257 257 *cs++ = 0; 258 - intel_ring_advance(req, cs); 258 + intel_ring_advance(rq, cs); 259 259 260 260 return 0; 261 261 } 262 262 263 263 static int 264 - gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) 264 + gen7_render_ring_cs_stall_wa(struct i915_request *rq) 265 265 { 266 266 u32 *cs; 267 267 268 - cs = intel_ring_begin(req, 4); 268 + cs = intel_ring_begin(rq, 4); 269 269 if (IS_ERR(cs)) 270 270 return PTR_ERR(cs); 271 271 ··· 273 273 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 274 274 *cs++ = 0; 275 275 *cs++ = 0; 276 - intel_ring_advance(req, cs); 276 + intel_ring_advance(rq, cs); 277 277 278 278 return 0; 279 279 } 280 280 281 281 static int 282 - gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode) 282 + gen7_render_ring_flush(struct i915_request *rq, u32 mode) 283 283 { 284 284 u32 scratch_addr = 285 - i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES; 285 + i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES; 286 286 u32 *cs, flags = 0; 287 287 288 288 /* ··· 324 324 /* Workaround: we must issue a pipe_control with CS-stall bit 325 325 * set before a pipe_control command that has the state cache 326 326 * invalidate bit set. */ 327 - gen7_render_ring_cs_stall_wa(req); 327 + gen7_render_ring_cs_stall_wa(rq); 328 328 } 329 329 330 - cs = intel_ring_begin(req, 4); 330 + cs = intel_ring_begin(rq, 4); 331 331 if (IS_ERR(cs)) 332 332 return PTR_ERR(cs); 333 333 ··· 335 335 *cs++ = flags; 336 336 *cs++ = scratch_addr; 337 337 *cs++ = 0; 338 - intel_ring_advance(req, cs); 338 + intel_ring_advance(rq, cs); 339 339 340 340 return 0; 341 341 } ··· 531 531 } 532 532 533 533 static void reset_ring_common(struct intel_engine_cs *engine, 534 - struct drm_i915_gem_request *request) 534 + struct i915_request *request) 535 535 { 536 536 /* 537 537 * RC6 must be prevented until the reset is complete and the engine ··· 595 595 } 596 596 } 597 597 598 - static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) 598 + static int intel_rcs_ctx_init(struct i915_request *rq) 599 599 { 600 600 int ret; 601 601 602 - ret = intel_ring_workarounds_emit(req); 602 + ret = intel_ring_workarounds_emit(rq); 603 603 if (ret != 0) 604 604 return ret; 605 605 606 - ret = i915_gem_render_state_emit(req); 606 + ret = i915_gem_render_state_emit(rq); 607 607 if (ret) 608 608 return ret; 609 609 ··· 661 661 return init_workarounds_ring(engine); 662 662 } 663 663 664 - static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs) 664 + static u32 *gen6_signal(struct i915_request *rq, u32 *cs) 665 665 { 666 - struct drm_i915_private *dev_priv = req->i915; 666 + struct drm_i915_private *dev_priv = rq->i915; 667 667 struct intel_engine_cs *engine; 668 668 enum intel_engine_id id; 669 669 int num_rings = 0; ··· 674 674 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK)) 675 675 continue; 676 676 677 - mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id]; 677 + mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id]; 678 678 if (i915_mmio_reg_valid(mbox_reg)) { 679 679 *cs++ = MI_LOAD_REGISTER_IMM(1); 680 680 *cs++ = i915_mmio_reg_offset(mbox_reg); 681 - *cs++ = req->global_seqno; 681 + *cs++ = rq->global_seqno; 682 682 num_rings++; 683 683 } 684 684 } ··· 690 690 691 691 static void cancel_requests(struct intel_engine_cs *engine) 692 692 { 693 - struct drm_i915_gem_request *request; 693 + struct i915_request *request; 694 694 unsigned long flags; 695 695 696 696 spin_lock_irqsave(&engine->timeline->lock, flags); ··· 698 698 /* Mark all submitted requests as skipped. */ 699 699 list_for_each_entry(request, &engine->timeline->requests, link) { 700 700 GEM_BUG_ON(!request->global_seqno); 701 - if (!i915_gem_request_completed(request)) 701 + if (!i915_request_completed(request)) 702 702 dma_fence_set_error(&request->fence, -EIO); 703 703 } 704 704 /* Remaining _unready_ requests will be nop'ed when submitted */ ··· 706 706 spin_unlock_irqrestore(&engine->timeline->lock, flags); 707 707 } 708 708 709 - static void i9xx_submit_request(struct drm_i915_gem_request *request) 709 + static void i9xx_submit_request(struct i915_request *request) 710 710 { 711 711 struct drm_i915_private *dev_priv = request->i915; 712 712 713 - i915_gem_request_submit(request); 713 + i915_request_submit(request); 714 714 715 715 I915_WRITE_TAIL(request->engine, 716 716 intel_ring_set_tail(request->ring, request->tail)); 717 717 } 718 718 719 - static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) 719 + static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) 720 720 { 721 721 *cs++ = MI_STORE_DWORD_INDEX; 722 722 *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT; 723 - *cs++ = req->global_seqno; 723 + *cs++ = rq->global_seqno; 724 724 *cs++ = MI_USER_INTERRUPT; 725 725 726 - req->tail = intel_ring_offset(req, cs); 727 - assert_ring_tail_valid(req->ring, req->tail); 726 + rq->tail = intel_ring_offset(rq, cs); 727 + assert_ring_tail_valid(rq->ring, rq->tail); 728 728 } 729 729 730 730 static const int i9xx_emit_breadcrumb_sz = 4; 731 731 732 - static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) 732 + static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs) 733 733 { 734 - return i9xx_emit_breadcrumb(req, 735 - req->engine->semaphore.signal(req, cs)); 734 + return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs)); 736 735 } 737 736 738 737 static int 739 - gen6_ring_sync_to(struct drm_i915_gem_request *req, 740 - struct drm_i915_gem_request *signal) 738 + gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal) 741 739 { 742 740 u32 dw1 = MI_SEMAPHORE_MBOX | 743 741 MI_SEMAPHORE_COMPARE | 744 742 MI_SEMAPHORE_REGISTER; 745 - u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id]; 743 + u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id]; 746 744 u32 *cs; 747 745 748 746 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 749 747 750 - cs = intel_ring_begin(req, 4); 748 + cs = intel_ring_begin(rq, 4); 751 749 if (IS_ERR(cs)) 752 750 return PTR_ERR(cs); 753 751 ··· 757 759 *cs++ = signal->global_seqno - 1; 758 760 *cs++ = 0; 759 761 *cs++ = MI_NOOP; 760 - intel_ring_advance(req, cs); 762 + intel_ring_advance(rq, cs); 761 763 762 764 return 0; 763 765 } ··· 856 858 } 857 859 858 860 static int 859 - bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode) 861 + bsd_ring_flush(struct i915_request *rq, u32 mode) 860 862 { 861 863 u32 *cs; 862 864 863 - cs = intel_ring_begin(req, 2); 865 + cs = intel_ring_begin(rq, 2); 864 866 if (IS_ERR(cs)) 865 867 return PTR_ERR(cs); 866 868 867 869 *cs++ = MI_FLUSH; 868 870 *cs++ = MI_NOOP; 869 - intel_ring_advance(req, cs); 871 + intel_ring_advance(rq, cs); 870 872 return 0; 871 873 } 872 874 ··· 909 911 } 910 912 911 913 static int 912 - i965_emit_bb_start(struct drm_i915_gem_request *req, 914 + i965_emit_bb_start(struct i915_request *rq, 913 915 u64 offset, u32 length, 914 916 unsigned int dispatch_flags) 915 917 { 916 918 u32 *cs; 917 919 918 - cs = intel_ring_begin(req, 2); 920 + cs = intel_ring_begin(rq, 2); 919 921 if (IS_ERR(cs)) 920 922 return PTR_ERR(cs); 921 923 922 924 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags & 923 925 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965); 924 926 *cs++ = offset; 925 - intel_ring_advance(req, cs); 927 + intel_ring_advance(rq, cs); 926 928 927 929 return 0; 928 930 } ··· 932 934 #define I830_TLB_ENTRIES (2) 933 935 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 934 936 static int 935 - i830_emit_bb_start(struct drm_i915_gem_request *req, 937 + i830_emit_bb_start(struct i915_request *rq, 936 938 u64 offset, u32 len, 937 939 unsigned int dispatch_flags) 938 940 { 939 - u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch); 941 + u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch); 940 942 941 - cs = intel_ring_begin(req, 6); 943 + cs = intel_ring_begin(rq, 6); 942 944 if (IS_ERR(cs)) 943 945 return PTR_ERR(cs); 944 946 ··· 949 951 *cs++ = cs_offset; 950 952 *cs++ = 0xdeadbeef; 951 953 *cs++ = MI_NOOP; 952 - intel_ring_advance(req, cs); 954 + intel_ring_advance(rq, cs); 953 955 954 956 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 955 957 if (len > I830_BATCH_LIMIT) 956 958 return -ENOSPC; 957 959 958 - cs = intel_ring_begin(req, 6 + 2); 960 + cs = intel_ring_begin(rq, 6 + 2); 959 961 if (IS_ERR(cs)) 960 962 return PTR_ERR(cs); 961 963 ··· 972 974 973 975 *cs++ = MI_FLUSH; 974 976 *cs++ = MI_NOOP; 975 - intel_ring_advance(req, cs); 977 + intel_ring_advance(rq, cs); 976 978 977 979 /* ... and execute it. */ 978 980 offset = cs_offset; 979 981 } 980 982 981 - cs = intel_ring_begin(req, 2); 983 + cs = intel_ring_begin(rq, 2); 982 984 if (IS_ERR(cs)) 983 985 return PTR_ERR(cs); 984 986 985 987 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 986 988 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 987 989 MI_BATCH_NON_SECURE); 988 - intel_ring_advance(req, cs); 990 + intel_ring_advance(rq, cs); 989 991 990 992 return 0; 991 993 } 992 994 993 995 static int 994 - i915_emit_bb_start(struct drm_i915_gem_request *req, 996 + i915_emit_bb_start(struct i915_request *rq, 995 997 u64 offset, u32 len, 996 998 unsigned int dispatch_flags) 997 999 { 998 1000 u32 *cs; 999 1001 1000 - cs = intel_ring_begin(req, 2); 1002 + cs = intel_ring_begin(rq, 2); 1001 1003 if (IS_ERR(cs)) 1002 1004 return PTR_ERR(cs); 1003 1005 1004 1006 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 1005 1007 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 1006 1008 MI_BATCH_NON_SECURE); 1007 - intel_ring_advance(req, cs); 1009 + intel_ring_advance(rq, cs); 1008 1010 1009 1011 return 0; 1010 1012 } ··· 1375 1377 intel_ring_reset(engine->buffer, 0); 1376 1378 } 1377 1379 1378 - static inline int mi_set_context(struct drm_i915_gem_request *rq, u32 flags) 1380 + static inline int mi_set_context(struct i915_request *rq, u32 flags) 1379 1381 { 1380 1382 struct drm_i915_private *i915 = rq->i915; 1381 1383 struct intel_engine_cs *engine = rq->engine; ··· 1461 1463 return 0; 1462 1464 } 1463 1465 1464 - static int remap_l3(struct drm_i915_gem_request *rq, int slice) 1466 + static int remap_l3(struct i915_request *rq, int slice) 1465 1467 { 1466 1468 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; 1467 1469 int i; ··· 1489 1491 return 0; 1490 1492 } 1491 1493 1492 - static int switch_context(struct drm_i915_gem_request *rq) 1494 + static int switch_context(struct i915_request *rq) 1493 1495 { 1494 1496 struct intel_engine_cs *engine = rq->engine; 1495 1497 struct i915_gem_context *to_ctx = rq->ctx; ··· 1559 1561 return ret; 1560 1562 } 1561 1563 1562 - static int ring_request_alloc(struct drm_i915_gem_request *request) 1564 + static int ring_request_alloc(struct i915_request *request) 1563 1565 { 1564 1566 int ret; 1565 1567 ··· 1585 1587 1586 1588 static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes) 1587 1589 { 1588 - struct drm_i915_gem_request *target; 1590 + struct i915_request *target; 1589 1591 long timeout; 1590 1592 1591 1593 lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex); ··· 1603 1605 if (WARN_ON(&target->ring_link == &ring->request_list)) 1604 1606 return -ENOSPC; 1605 1607 1606 - timeout = i915_wait_request(target, 1608 + timeout = i915_request_wait(target, 1607 1609 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, 1608 1610 MAX_SCHEDULE_TIMEOUT); 1609 1611 if (timeout < 0) 1610 1612 return timeout; 1611 1613 1612 - i915_gem_request_retire_upto(target); 1614 + i915_request_retire_upto(target); 1613 1615 1614 1616 intel_ring_update_space(ring); 1615 1617 GEM_BUG_ON(ring->space < bytes); ··· 1632 1634 return 0; 1633 1635 } 1634 1636 1635 - u32 *intel_ring_begin(struct drm_i915_gem_request *req, 1636 - unsigned int num_dwords) 1637 + u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) 1637 1638 { 1638 - struct intel_ring *ring = req->ring; 1639 + struct intel_ring *ring = rq->ring; 1639 1640 const unsigned int remain_usable = ring->effective_size - ring->emit; 1640 1641 const unsigned int bytes = num_dwords * sizeof(u32); 1641 1642 unsigned int need_wrap = 0; ··· 1644 1647 /* Packets must be qword aligned. */ 1645 1648 GEM_BUG_ON(num_dwords & 1); 1646 1649 1647 - total_bytes = bytes + req->reserved_space; 1650 + total_bytes = bytes + rq->reserved_space; 1648 1651 GEM_BUG_ON(total_bytes > ring->effective_size); 1649 1652 1650 1653 if (unlikely(total_bytes > remain_usable)) { ··· 1665 1668 * wrap and only need to effectively wait for the 1666 1669 * reserved size from the start of ringbuffer. 1667 1670 */ 1668 - total_bytes = req->reserved_space + remain_actual; 1671 + total_bytes = rq->reserved_space + remain_actual; 1669 1672 } 1670 1673 } 1671 1674 ··· 1679 1682 * overallocation and the assumption is that then we never need 1680 1683 * to wait (which has the risk of failing with EINTR). 1681 1684 * 1682 - * See also i915_gem_request_alloc() and i915_add_request(). 1685 + * See also i915_request_alloc() and i915_request_add(). 1683 1686 */ 1684 - GEM_BUG_ON(!req->reserved_space); 1687 + GEM_BUG_ON(!rq->reserved_space); 1685 1688 1686 1689 ret = wait_for_space(ring, total_bytes); 1687 1690 if (unlikely(ret)) ··· 1710 1713 } 1711 1714 1712 1715 /* Align the ring tail to a cacheline boundary */ 1713 - int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 1716 + int intel_ring_cacheline_align(struct i915_request *rq) 1714 1717 { 1715 - int num_dwords = 1716 - (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1718 + int num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32); 1717 1719 u32 *cs; 1718 1720 1719 1721 if (num_dwords == 0) 1720 1722 return 0; 1721 1723 1722 - num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 1723 - cs = intel_ring_begin(req, num_dwords); 1724 + num_dwords = CACHELINE_BYTES / sizeof(u32) - num_dwords; 1725 + cs = intel_ring_begin(rq, num_dwords); 1724 1726 if (IS_ERR(cs)) 1725 1727 return PTR_ERR(cs); 1726 1728 1727 1729 while (num_dwords--) 1728 1730 *cs++ = MI_NOOP; 1729 1731 1730 - intel_ring_advance(req, cs); 1732 + intel_ring_advance(rq, cs); 1731 1733 1732 1734 return 0; 1733 1735 } 1734 1736 1735 - static void gen6_bsd_submit_request(struct drm_i915_gem_request *request) 1737 + static void gen6_bsd_submit_request(struct i915_request *request) 1736 1738 { 1737 1739 struct drm_i915_private *dev_priv = request->i915; 1738 1740 ··· 1768 1772 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1769 1773 } 1770 1774 1771 - static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode) 1775 + static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) 1772 1776 { 1773 1777 u32 cmd, *cs; 1774 1778 1775 - cs = intel_ring_begin(req, 4); 1779 + cs = intel_ring_begin(rq, 4); 1776 1780 if (IS_ERR(cs)) 1777 1781 return PTR_ERR(cs); 1778 1782 ··· 1798 1802 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; 1799 1803 *cs++ = 0; 1800 1804 *cs++ = MI_NOOP; 1801 - intel_ring_advance(req, cs); 1805 + intel_ring_advance(rq, cs); 1802 1806 return 0; 1803 1807 } 1804 1808 1805 1809 static int 1806 - hsw_emit_bb_start(struct drm_i915_gem_request *req, 1810 + hsw_emit_bb_start(struct i915_request *rq, 1807 1811 u64 offset, u32 len, 1808 1812 unsigned int dispatch_flags) 1809 1813 { 1810 1814 u32 *cs; 1811 1815 1812 - cs = intel_ring_begin(req, 2); 1816 + cs = intel_ring_begin(rq, 2); 1813 1817 if (IS_ERR(cs)) 1814 1818 return PTR_ERR(cs); 1815 1819 ··· 1819 1823 MI_BATCH_RESOURCE_STREAMER : 0); 1820 1824 /* bit0-7 is the length on GEN6+ */ 1821 1825 *cs++ = offset; 1822 - intel_ring_advance(req, cs); 1826 + intel_ring_advance(rq, cs); 1823 1827 1824 1828 return 0; 1825 1829 } 1826 1830 1827 1831 static int 1828 - gen6_emit_bb_start(struct drm_i915_gem_request *req, 1832 + gen6_emit_bb_start(struct i915_request *rq, 1829 1833 u64 offset, u32 len, 1830 1834 unsigned int dispatch_flags) 1831 1835 { 1832 1836 u32 *cs; 1833 1837 1834 - cs = intel_ring_begin(req, 2); 1838 + cs = intel_ring_begin(rq, 2); 1835 1839 if (IS_ERR(cs)) 1836 1840 return PTR_ERR(cs); 1837 1841 ··· 1839 1843 0 : MI_BATCH_NON_SECURE_I965); 1840 1844 /* bit0-7 is the length on GEN6+ */ 1841 1845 *cs++ = offset; 1842 - intel_ring_advance(req, cs); 1846 + intel_ring_advance(rq, cs); 1843 1847 1844 1848 return 0; 1845 1849 } 1846 1850 1847 1851 /* Blitter support (SandyBridge+) */ 1848 1852 1849 - static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode) 1853 + static int gen6_ring_flush(struct i915_request *rq, u32 mode) 1850 1854 { 1851 1855 u32 cmd, *cs; 1852 1856 1853 - cs = intel_ring_begin(req, 4); 1857 + cs = intel_ring_begin(rq, 4); 1854 1858 if (IS_ERR(cs)) 1855 1859 return PTR_ERR(cs); 1856 1860 ··· 1875 1879 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; 1876 1880 *cs++ = 0; 1877 1881 *cs++ = MI_NOOP; 1878 - intel_ring_advance(req, cs); 1882 + intel_ring_advance(rq, cs); 1879 1883 1880 1884 return 0; 1881 1885 }
+36 -42
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 3 3 #define _INTEL_RINGBUFFER_H_ 4 4 5 5 #include <linux/hashtable.h> 6 + 6 7 #include "i915_gem_batch_pool.h" 7 - #include "i915_gem_request.h" 8 8 #include "i915_gem_timeline.h" 9 + 9 10 #include "i915_pmu.h" 11 + #include "i915_request.h" 10 12 #include "i915_selftest.h" 11 13 12 14 struct drm_printer; ··· 117 115 unsigned long action_timestamp; 118 116 int deadlock; 119 117 struct intel_instdone instdone; 120 - struct drm_i915_gem_request *active_request; 118 + struct i915_request *active_request; 121 119 bool stalled; 122 120 }; 123 121 ··· 158 156 struct i915_vma *vma; 159 157 }; 160 158 161 - struct drm_i915_gem_request; 159 + struct i915_request; 162 160 163 161 /* 164 162 * Engine IDs definitions. ··· 220 218 /** 221 219 * @request_count: combined request and submission count 222 220 */ 223 - struct drm_i915_gem_request *request_count; 221 + struct i915_request *request_count; 224 222 #define EXECLIST_COUNT_BITS 2 225 223 #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) 226 224 #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) ··· 341 339 struct rb_root waiters; /* sorted by retirement, priority */ 342 340 struct rb_root signals; /* sorted by retirement */ 343 341 struct task_struct *signaler; /* used for fence signalling */ 344 - struct drm_i915_gem_request __rcu *first_signal; 342 + struct i915_request __rcu *first_signal; 345 343 struct timer_list fake_irq; /* used after a missed interrupt */ 346 344 struct timer_list hangcheck; /* detect missed interrupts */ 347 345 ··· 393 391 394 392 int (*init_hw)(struct intel_engine_cs *engine); 395 393 void (*reset_hw)(struct intel_engine_cs *engine, 396 - struct drm_i915_gem_request *req); 394 + struct i915_request *rq); 397 395 398 396 void (*park)(struct intel_engine_cs *engine); 399 397 void (*unpark)(struct intel_engine_cs *engine); ··· 404 402 struct i915_gem_context *ctx); 405 403 void (*context_unpin)(struct intel_engine_cs *engine, 406 404 struct i915_gem_context *ctx); 407 - int (*request_alloc)(struct drm_i915_gem_request *req); 408 - int (*init_context)(struct drm_i915_gem_request *req); 405 + int (*request_alloc)(struct i915_request *rq); 406 + int (*init_context)(struct i915_request *rq); 409 407 410 - int (*emit_flush)(struct drm_i915_gem_request *request, 411 - u32 mode); 408 + int (*emit_flush)(struct i915_request *request, u32 mode); 412 409 #define EMIT_INVALIDATE BIT(0) 413 410 #define EMIT_FLUSH BIT(1) 414 411 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) 415 - int (*emit_bb_start)(struct drm_i915_gem_request *req, 412 + int (*emit_bb_start)(struct i915_request *rq, 416 413 u64 offset, u32 length, 417 414 unsigned int dispatch_flags); 418 415 #define I915_DISPATCH_SECURE BIT(0) 419 416 #define I915_DISPATCH_PINNED BIT(1) 420 417 #define I915_DISPATCH_RS BIT(2) 421 - void (*emit_breadcrumb)(struct drm_i915_gem_request *req, 422 - u32 *cs); 418 + void (*emit_breadcrumb)(struct i915_request *rq, u32 *cs); 423 419 int emit_breadcrumb_sz; 424 420 425 421 /* Pass the request to the hardware queue (e.g. directly into ··· 426 426 * This is called from an atomic context with irqs disabled; must 427 427 * be irq safe. 428 428 */ 429 - void (*submit_request)(struct drm_i915_gem_request *req); 429 + void (*submit_request)(struct i915_request *rq); 430 430 431 431 /* Call when the priority on a request has changed and it and its 432 432 * dependencies may need rescheduling. Note the request itself may ··· 434 434 * 435 435 * Called under the struct_mutex. 436 436 */ 437 - void (*schedule)(struct drm_i915_gem_request *request, 438 - int priority); 437 + void (*schedule)(struct i915_request *request, int priority); 439 438 440 439 /* 441 440 * Cancel all requests on the hardware, or queued for execution. ··· 502 503 } mbox; 503 504 504 505 /* AKA wait() */ 505 - int (*sync_to)(struct drm_i915_gem_request *req, 506 - struct drm_i915_gem_request *signal); 507 - u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs); 506 + int (*sync_to)(struct i915_request *rq, 507 + struct i915_request *signal); 508 + u32 *(*signal)(struct i915_request *rq, u32 *cs); 508 509 } semaphore; 509 510 510 511 struct intel_engine_execlists execlists; ··· 725 726 726 727 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv); 727 728 728 - int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); 729 + int __must_check intel_ring_cacheline_align(struct i915_request *rq); 729 730 730 731 int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes); 731 - u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, 732 - unsigned int n); 732 + u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n); 733 733 734 - static inline void 735 - intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs) 734 + static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) 736 735 { 737 736 /* Dummy function. 738 737 * ··· 740 743 * reserved for the command packet (i.e. the value passed to 741 744 * intel_ring_begin()). 742 745 */ 743 - GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs); 746 + GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs); 744 747 } 745 748 746 - static inline u32 747 - intel_ring_wrap(const struct intel_ring *ring, u32 pos) 749 + static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) 748 750 { 749 751 return pos & (ring->size - 1); 750 752 } 751 753 752 - static inline u32 753 - intel_ring_offset(const struct drm_i915_gem_request *req, void *addr) 754 + static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) 754 755 { 755 756 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ 756 - u32 offset = addr - req->ring->vaddr; 757 - GEM_BUG_ON(offset > req->ring->size); 758 - return intel_ring_wrap(req->ring, offset); 757 + u32 offset = addr - rq->ring->vaddr; 758 + GEM_BUG_ON(offset > rq->ring->size); 759 + return intel_ring_wrap(rq->ring, offset); 759 760 } 760 761 761 762 static inline void ··· 791 796 { 792 797 /* Whilst writes to the tail are strictly order, there is no 793 798 * serialisation between readers and the writers. The tail may be 794 - * read by i915_gem_request_retire() just as it is being updated 799 + * read by i915_request_retire() just as it is being updated 795 800 * by execlists, as although the breadcrumb is complete, the context 796 801 * switch hasn't been seen. 797 802 */ ··· 833 838 } 834 839 835 840 int init_workarounds_ring(struct intel_engine_cs *engine); 836 - int intel_ring_workarounds_emit(struct drm_i915_gem_request *req); 841 + int intel_ring_workarounds_emit(struct i915_request *rq); 837 842 838 843 void intel_engine_get_instdone(struct intel_engine_cs *engine, 839 844 struct intel_instdone *instdone); ··· 861 866 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); 862 867 863 868 static inline void intel_wait_init(struct intel_wait *wait, 864 - struct drm_i915_gem_request *rq) 869 + struct i915_request *rq) 865 870 { 866 871 wait->tsk = current; 867 872 wait->request = rq; ··· 887 892 888 893 static inline bool 889 894 intel_wait_update_request(struct intel_wait *wait, 890 - const struct drm_i915_gem_request *rq) 895 + const struct i915_request *rq) 891 896 { 892 - return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq)); 897 + return intel_wait_update_seqno(wait, i915_request_global_seqno(rq)); 893 898 } 894 899 895 900 static inline bool ··· 900 905 901 906 static inline bool 902 907 intel_wait_check_request(const struct intel_wait *wait, 903 - const struct drm_i915_gem_request *rq) 908 + const struct i915_request *rq) 904 909 { 905 - return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq)); 910 + return intel_wait_check_seqno(wait, i915_request_global_seqno(rq)); 906 911 } 907 912 908 913 static inline bool intel_wait_complete(const struct intel_wait *wait) ··· 914 919 struct intel_wait *wait); 915 920 void intel_engine_remove_wait(struct intel_engine_cs *engine, 916 921 struct intel_wait *wait); 917 - void intel_engine_enable_signaling(struct drm_i915_gem_request *request, 918 - bool wakeup); 919 - void intel_engine_cancel_signaling(struct drm_i915_gem_request *request); 922 + void intel_engine_enable_signaling(struct i915_request *request, bool wakeup); 923 + void intel_engine_cancel_signaling(struct i915_request *request); 920 924 921 925 static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) 922 926 {
+3 -3
drivers/gpu/drm/i915/selftests/huge_pages.c
··· 964 964 u32 dword, 965 965 u32 value) 966 966 { 967 - struct drm_i915_gem_request *rq; 967 + struct i915_request *rq; 968 968 struct i915_vma *batch; 969 969 int flags = 0; 970 970 int err; ··· 975 975 if (err) 976 976 return err; 977 977 978 - rq = i915_gem_request_alloc(engine, ctx); 978 + rq = i915_request_alloc(engine, ctx); 979 979 if (IS_ERR(rq)) 980 980 return PTR_ERR(rq); 981 981 ··· 1003 1003 reservation_object_unlock(vma->resv); 1004 1004 1005 1005 err_request: 1006 - __i915_add_request(rq, err == 0); 1006 + __i915_request_add(rq, err == 0); 1007 1007 1008 1008 return err; 1009 1009 }
+4 -4
drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
··· 178 178 u32 v) 179 179 { 180 180 struct drm_i915_private *i915 = to_i915(obj->base.dev); 181 - struct drm_i915_gem_request *rq; 181 + struct i915_request *rq; 182 182 struct i915_vma *vma; 183 183 u32 *cs; 184 184 int err; ··· 191 191 if (IS_ERR(vma)) 192 192 return PTR_ERR(vma); 193 193 194 - rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context); 194 + rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context); 195 195 if (IS_ERR(rq)) { 196 196 i915_vma_unpin(vma); 197 197 return PTR_ERR(rq); ··· 199 199 200 200 cs = intel_ring_begin(rq, 4); 201 201 if (IS_ERR(cs)) { 202 - __i915_add_request(rq, false); 202 + __i915_request_add(rq, false); 203 203 i915_vma_unpin(vma); 204 204 return PTR_ERR(cs); 205 205 } ··· 229 229 reservation_object_add_excl_fence(obj->resv, &rq->fence); 230 230 reservation_object_unlock(obj->resv); 231 231 232 - __i915_add_request(rq, true); 232 + __i915_request_add(rq, true); 233 233 234 234 return 0; 235 235 }
+4 -4
drivers/gpu/drm/i915/selftests/i915_gem_context.c
··· 114 114 struct drm_i915_private *i915 = to_i915(obj->base.dev); 115 115 struct i915_address_space *vm = 116 116 ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; 117 - struct drm_i915_gem_request *rq; 117 + struct i915_request *rq; 118 118 struct i915_vma *vma; 119 119 struct i915_vma *batch; 120 120 unsigned int flags; ··· 152 152 goto err_vma; 153 153 } 154 154 155 - rq = i915_gem_request_alloc(engine, ctx); 155 + rq = i915_request_alloc(engine, ctx); 156 156 if (IS_ERR(rq)) { 157 157 err = PTR_ERR(rq); 158 158 goto err_batch; ··· 180 180 reservation_object_add_excl_fence(obj->resv, &rq->fence); 181 181 reservation_object_unlock(obj->resv); 182 182 183 - __i915_add_request(rq, true); 183 + __i915_request_add(rq, true); 184 184 185 185 return 0; 186 186 187 187 err_request: 188 - __i915_add_request(rq, false); 188 + __i915_request_add(rq, false); 189 189 err_batch: 190 190 i915_vma_unpin(batch); 191 191 err_vma:
+3 -3
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
··· 407 407 mutex_lock(&i915->drm.struct_mutex); 408 408 onstack_fence_init(&fence); 409 409 do { 410 - struct drm_i915_gem_request *rq; 410 + struct i915_request *rq; 411 411 struct i915_gem_context *ctx; 412 412 413 413 ctx = live_context(i915, file); ··· 416 416 417 417 /* We will need some GGTT space for the rq's context */ 418 418 igt_evict_ctl.fail_if_busy = true; 419 - rq = i915_gem_request_alloc(engine, ctx); 419 + rq = i915_request_alloc(engine, ctx); 420 420 igt_evict_ctl.fail_if_busy = false; 421 421 422 422 if (IS_ERR(rq)) { ··· 437 437 if (err < 0) 438 438 break; 439 439 440 - i915_add_request(rq); 440 + i915_request_add(rq); 441 441 count++; 442 442 err = 0; 443 443 } while(1);
+3 -3
drivers/gpu/drm/i915/selftests/i915_gem_object.c
··· 436 436 static int make_obj_busy(struct drm_i915_gem_object *obj) 437 437 { 438 438 struct drm_i915_private *i915 = to_i915(obj->base.dev); 439 - struct drm_i915_gem_request *rq; 439 + struct i915_request *rq; 440 440 struct i915_vma *vma; 441 441 int err; 442 442 ··· 448 448 if (err) 449 449 return err; 450 450 451 - rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context); 451 + rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context); 452 452 if (IS_ERR(rq)) { 453 453 i915_vma_unpin(vma); 454 454 return PTR_ERR(rq); 455 455 } 456 456 457 457 i915_vma_move_to_active(vma, rq, 0); 458 - i915_add_request(rq); 458 + i915_request_add(rq); 459 459 460 460 i915_gem_object_set_active_reference(obj); 461 461 i915_vma_unpin(vma);
+61 -64
drivers/gpu/drm/i915/selftests/i915_gem_request.c drivers/gpu/drm/i915/selftests/i915_request.c
··· 32 32 static int igt_add_request(void *arg) 33 33 { 34 34 struct drm_i915_private *i915 = arg; 35 - struct drm_i915_gem_request *request; 35 + struct i915_request *request; 36 36 int err = -ENOMEM; 37 37 38 38 /* Basic preliminary test to create a request and let it loose! */ ··· 44 44 if (!request) 45 45 goto out_unlock; 46 46 47 - i915_add_request(request); 47 + i915_request_add(request); 48 48 49 49 err = 0; 50 50 out_unlock: ··· 56 56 { 57 57 const long T = HZ / 4; 58 58 struct drm_i915_private *i915 = arg; 59 - struct drm_i915_gem_request *request; 59 + struct i915_request *request; 60 60 int err = -EINVAL; 61 61 62 62 /* Submit a request, then wait upon it */ ··· 68 68 goto out_unlock; 69 69 } 70 70 71 - if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) { 71 + if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) { 72 72 pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n"); 73 73 goto out_unlock; 74 74 } 75 75 76 - if (i915_wait_request(request, I915_WAIT_LOCKED, T) != -ETIME) { 76 + if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) { 77 77 pr_err("request wait succeeded (expected timeout before submit!)\n"); 78 78 goto out_unlock; 79 79 } 80 80 81 - if (i915_gem_request_completed(request)) { 81 + if (i915_request_completed(request)) { 82 82 pr_err("request completed before submit!!\n"); 83 83 goto out_unlock; 84 84 } 85 85 86 - i915_add_request(request); 86 + i915_request_add(request); 87 87 88 - if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) { 88 + if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) { 89 89 pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n"); 90 90 goto out_unlock; 91 91 } 92 92 93 - if (i915_gem_request_completed(request)) { 93 + if (i915_request_completed(request)) { 94 94 pr_err("request completed immediately!\n"); 95 95 goto out_unlock; 96 96 } 97 97 98 - if (i915_wait_request(request, I915_WAIT_LOCKED, T / 2) != -ETIME) { 98 + if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) { 99 99 pr_err("request wait succeeded (expected timeout!)\n"); 100 100 goto out_unlock; 101 101 } 102 102 103 - if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) { 103 + if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) { 104 104 pr_err("request wait timed out!\n"); 105 105 goto out_unlock; 106 106 } 107 107 108 - if (!i915_gem_request_completed(request)) { 108 + if (!i915_request_completed(request)) { 109 109 pr_err("request not complete after waiting!\n"); 110 110 goto out_unlock; 111 111 } 112 112 113 - if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) { 113 + if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) { 114 114 pr_err("request wait timed out when already complete!\n"); 115 115 goto out_unlock; 116 116 } ··· 126 126 { 127 127 const long T = HZ / 4; 128 128 struct drm_i915_private *i915 = arg; 129 - struct drm_i915_gem_request *request; 129 + struct i915_request *request; 130 130 int err = -EINVAL; 131 131 132 132 /* Submit a request, treat it as a fence and wait upon it */ ··· 145 145 } 146 146 147 147 mutex_lock(&i915->drm.struct_mutex); 148 - i915_add_request(request); 148 + i915_request_add(request); 149 149 mutex_unlock(&i915->drm.struct_mutex); 150 150 151 151 if (dma_fence_is_signaled(&request->fence)) { ··· 185 185 static int igt_request_rewind(void *arg) 186 186 { 187 187 struct drm_i915_private *i915 = arg; 188 - struct drm_i915_gem_request *request, *vip; 188 + struct i915_request *request, *vip; 189 189 struct i915_gem_context *ctx[2]; 190 190 int err = -EINVAL; 191 191 ··· 197 197 goto err_context_0; 198 198 } 199 199 200 - i915_gem_request_get(request); 201 - i915_add_request(request); 200 + i915_request_get(request); 201 + i915_request_add(request); 202 202 203 203 ctx[1] = mock_context(i915, "B"); 204 204 vip = mock_request(i915->engine[RCS], ctx[1], 0); ··· 210 210 /* Simulate preemption by manual reordering */ 211 211 if (!mock_cancel_request(request)) { 212 212 pr_err("failed to cancel request (already executed)!\n"); 213 - i915_add_request(vip); 213 + i915_request_add(vip); 214 214 goto err_context_1; 215 215 } 216 - i915_gem_request_get(vip); 217 - i915_add_request(vip); 216 + i915_request_get(vip); 217 + i915_request_add(vip); 218 218 rcu_read_lock(); 219 219 request->engine->submit_request(request); 220 220 rcu_read_unlock(); 221 221 222 222 mutex_unlock(&i915->drm.struct_mutex); 223 223 224 - if (i915_wait_request(vip, 0, HZ) == -ETIME) { 224 + if (i915_request_wait(vip, 0, HZ) == -ETIME) { 225 225 pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n", 226 226 vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS])); 227 227 goto err; 228 228 } 229 229 230 - if (i915_gem_request_completed(request)) { 230 + if (i915_request_completed(request)) { 231 231 pr_err("low priority request already completed\n"); 232 232 goto err; 233 233 } 234 234 235 235 err = 0; 236 236 err: 237 - i915_gem_request_put(vip); 237 + i915_request_put(vip); 238 238 mutex_lock(&i915->drm.struct_mutex); 239 239 err_context_1: 240 240 mock_context_close(ctx[1]); 241 - i915_gem_request_put(request); 241 + i915_request_put(request); 242 242 err_context_0: 243 243 mock_context_close(ctx[0]); 244 244 mock_device_flush(i915); ··· 246 246 return err; 247 247 } 248 248 249 - int i915_gem_request_mock_selftests(void) 249 + int i915_request_mock_selftests(void) 250 250 { 251 251 static const struct i915_subtest tests[] = { 252 252 SUBTEST(igt_add_request), ··· 303 303 { 304 304 struct drm_i915_private *i915 = t->i915; 305 305 306 - i915_gem_retire_requests(i915); 306 + i915_retire_requests(i915); 307 307 308 308 if (wait_for(intel_engines_are_idle(i915), 10)) { 309 309 pr_err("%s(%s): GPU not idle\n", t->func, t->name); ··· 343 343 344 344 for_each_engine(engine, i915, id) { 345 345 IGT_TIMEOUT(end_time); 346 - struct drm_i915_gem_request *request; 346 + struct i915_request *request; 347 347 unsigned long n, prime; 348 348 ktime_t times[2] = {}; 349 349 ··· 355 355 times[1] = ktime_get_raw(); 356 356 357 357 for (n = 0; n < prime; n++) { 358 - request = i915_gem_request_alloc(engine, 359 - i915->kernel_context); 358 + request = i915_request_alloc(engine, 359 + i915->kernel_context); 360 360 if (IS_ERR(request)) { 361 361 err = PTR_ERR(request); 362 362 goto out_unlock; ··· 375 375 * for latency. 376 376 */ 377 377 378 - i915_add_request(request); 378 + i915_request_add(request); 379 379 } 380 - i915_wait_request(request, 380 + i915_request_wait(request, 381 381 I915_WAIT_LOCKED, 382 382 MAX_SCHEDULE_TIMEOUT); 383 383 ··· 447 447 return ERR_PTR(err); 448 448 } 449 449 450 - static struct drm_i915_gem_request * 450 + static struct i915_request * 451 451 empty_request(struct intel_engine_cs *engine, 452 452 struct i915_vma *batch) 453 453 { 454 - struct drm_i915_gem_request *request; 454 + struct i915_request *request; 455 455 int err; 456 456 457 - request = i915_gem_request_alloc(engine, 458 - engine->i915->kernel_context); 457 + request = i915_request_alloc(engine, engine->i915->kernel_context); 459 458 if (IS_ERR(request)) 460 459 return request; 461 460 ··· 466 467 goto out_request; 467 468 468 469 out_request: 469 - __i915_add_request(request, err == 0); 470 + __i915_request_add(request, err == 0); 470 471 return err ? ERR_PTR(err) : request; 471 472 } 472 473 ··· 494 495 495 496 for_each_engine(engine, i915, id) { 496 497 IGT_TIMEOUT(end_time); 497 - struct drm_i915_gem_request *request; 498 + struct i915_request *request; 498 499 unsigned long n, prime; 499 500 ktime_t times[2] = {}; 500 501 ··· 508 509 err = PTR_ERR(request); 509 510 goto out_batch; 510 511 } 511 - i915_wait_request(request, 512 + i915_request_wait(request, 512 513 I915_WAIT_LOCKED, 513 514 MAX_SCHEDULE_TIMEOUT); 514 515 ··· 522 523 goto out_batch; 523 524 } 524 525 } 525 - i915_wait_request(request, 526 + i915_request_wait(request, 526 527 I915_WAIT_LOCKED, 527 528 MAX_SCHEDULE_TIMEOUT); 528 529 ··· 632 633 { 633 634 struct drm_i915_private *i915 = arg; 634 635 struct intel_engine_cs *engine; 635 - struct drm_i915_gem_request *request[I915_NUM_ENGINES]; 636 + struct i915_request *request[I915_NUM_ENGINES]; 636 637 struct i915_vma *batch; 637 638 struct live_test t; 638 639 unsigned int id; ··· 657 658 } 658 659 659 660 for_each_engine(engine, i915, id) { 660 - request[id] = i915_gem_request_alloc(engine, 661 - i915->kernel_context); 661 + request[id] = i915_request_alloc(engine, i915->kernel_context); 662 662 if (IS_ERR(request[id])) { 663 663 err = PTR_ERR(request[id]); 664 664 pr_err("%s: Request allocation failed with err=%d\n", ··· 678 680 } 679 681 680 682 i915_vma_move_to_active(batch, request[id], 0); 681 - i915_gem_request_get(request[id]); 682 - i915_add_request(request[id]); 683 + i915_request_get(request[id]); 684 + i915_request_add(request[id]); 683 685 } 684 686 685 687 for_each_engine(engine, i915, id) { 686 - if (i915_gem_request_completed(request[id])) { 688 + if (i915_request_completed(request[id])) { 687 689 pr_err("%s(%s): request completed too early!\n", 688 690 __func__, engine->name); 689 691 err = -EINVAL; ··· 700 702 for_each_engine(engine, i915, id) { 701 703 long timeout; 702 704 703 - timeout = i915_wait_request(request[id], 705 + timeout = i915_request_wait(request[id], 704 706 I915_WAIT_LOCKED, 705 707 MAX_SCHEDULE_TIMEOUT); 706 708 if (timeout < 0) { ··· 710 712 goto out_request; 711 713 } 712 714 713 - GEM_BUG_ON(!i915_gem_request_completed(request[id])); 714 - i915_gem_request_put(request[id]); 715 + GEM_BUG_ON(!i915_request_completed(request[id])); 716 + i915_request_put(request[id]); 715 717 request[id] = NULL; 716 718 } 717 719 ··· 720 722 out_request: 721 723 for_each_engine(engine, i915, id) 722 724 if (request[id]) 723 - i915_gem_request_put(request[id]); 725 + i915_request_put(request[id]); 724 726 i915_vma_unpin(batch); 725 727 i915_vma_put(batch); 726 728 out_unlock: ··· 731 733 static int live_sequential_engines(void *arg) 732 734 { 733 735 struct drm_i915_private *i915 = arg; 734 - struct drm_i915_gem_request *request[I915_NUM_ENGINES] = {}; 735 - struct drm_i915_gem_request *prev = NULL; 736 + struct i915_request *request[I915_NUM_ENGINES] = {}; 737 + struct i915_request *prev = NULL; 736 738 struct intel_engine_cs *engine; 737 739 struct live_test t; 738 740 unsigned int id; ··· 761 763 goto out_unlock; 762 764 } 763 765 764 - request[id] = i915_gem_request_alloc(engine, 765 - i915->kernel_context); 766 + request[id] = i915_request_alloc(engine, i915->kernel_context); 766 767 if (IS_ERR(request[id])) { 767 768 err = PTR_ERR(request[id]); 768 769 pr_err("%s: Request allocation failed for %s with err=%d\n", ··· 770 773 } 771 774 772 775 if (prev) { 773 - err = i915_gem_request_await_dma_fence(request[id], 774 - &prev->fence); 776 + err = i915_request_await_dma_fence(request[id], 777 + &prev->fence); 775 778 if (err) { 776 - i915_add_request(request[id]); 779 + i915_request_add(request[id]); 777 780 pr_err("%s: Request await failed for %s with err=%d\n", 778 781 __func__, engine->name, err); 779 782 goto out_request; ··· 791 794 i915_gem_object_set_active_reference(batch->obj); 792 795 i915_vma_get(batch); 793 796 794 - i915_gem_request_get(request[id]); 795 - i915_add_request(request[id]); 797 + i915_request_get(request[id]); 798 + i915_request_add(request[id]); 796 799 797 800 prev = request[id]; 798 801 } ··· 800 803 for_each_engine(engine, i915, id) { 801 804 long timeout; 802 805 803 - if (i915_gem_request_completed(request[id])) { 806 + if (i915_request_completed(request[id])) { 804 807 pr_err("%s(%s): request completed too early!\n", 805 808 __func__, engine->name); 806 809 err = -EINVAL; ··· 814 817 goto out_request; 815 818 } 816 819 817 - timeout = i915_wait_request(request[id], 820 + timeout = i915_request_wait(request[id], 818 821 I915_WAIT_LOCKED, 819 822 MAX_SCHEDULE_TIMEOUT); 820 823 if (timeout < 0) { ··· 824 827 goto out_request; 825 828 } 826 829 827 - GEM_BUG_ON(!i915_gem_request_completed(request[id])); 830 + GEM_BUG_ON(!i915_request_completed(request[id])); 828 831 } 829 832 830 833 err = end_live_test(&t); ··· 846 849 } 847 850 848 851 i915_vma_put(request[id]->batch); 849 - i915_gem_request_put(request[id]); 852 + i915_request_put(request[id]); 850 853 } 851 854 out_unlock: 852 855 mutex_unlock(&i915->drm.struct_mutex); 853 856 return err; 854 857 } 855 858 856 - int i915_gem_request_live_selftests(struct drm_i915_private *i915) 859 + int i915_request_live_selftests(struct drm_i915_private *i915) 857 860 { 858 861 static const struct i915_subtest tests[] = { 859 862 SUBTEST(live_nop_request),
+1 -1
drivers/gpu/drm/i915/selftests/i915_live_selftests.h
··· 11 11 */ 12 12 selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */ 13 13 selftest(uncore, intel_uncore_live_selftests) 14 - selftest(requests, i915_gem_request_live_selftests) 14 + selftest(requests, i915_request_live_selftests) 15 15 selftest(objects, i915_gem_object_live_selftests) 16 16 selftest(dmabuf, i915_gem_dmabuf_live_selftests) 17 17 selftest(coherency, i915_gem_coherency_live_selftests)
+1 -1
drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
··· 16 16 selftest(uncore, intel_uncore_mock_selftests) 17 17 selftest(breadcrumbs, intel_breadcrumbs_mock_selftests) 18 18 selftest(timelines, i915_gem_timeline_mock_selftests) 19 - selftest(requests, i915_gem_request_mock_selftests) 19 + selftest(requests, i915_request_mock_selftests) 20 20 selftest(objects, i915_gem_object_mock_selftests) 21 21 selftest(dmabuf, i915_gem_dmabuf_mock_selftests) 22 22 selftest(vma, i915_vma_mock_selftests)
+58 -59
drivers/gpu/drm/i915/selftests/intel_hangcheck.c
··· 92 92 } 93 93 94 94 static u64 hws_address(const struct i915_vma *hws, 95 - const struct drm_i915_gem_request *rq) 95 + const struct i915_request *rq) 96 96 { 97 97 return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context); 98 98 } 99 99 100 100 static int emit_recurse_batch(struct hang *h, 101 - struct drm_i915_gem_request *rq) 101 + struct i915_request *rq) 102 102 { 103 103 struct drm_i915_private *i915 = h->i915; 104 104 struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base; ··· 204 204 return err; 205 205 } 206 206 207 - static struct drm_i915_gem_request * 207 + static struct i915_request * 208 208 hang_create_request(struct hang *h, struct intel_engine_cs *engine) 209 209 { 210 - struct drm_i915_gem_request *rq; 210 + struct i915_request *rq; 211 211 int err; 212 212 213 213 if (i915_gem_object_is_active(h->obj)) { ··· 232 232 h->batch = vaddr; 233 233 } 234 234 235 - rq = i915_gem_request_alloc(engine, h->ctx); 235 + rq = i915_request_alloc(engine, h->ctx); 236 236 if (IS_ERR(rq)) 237 237 return rq; 238 238 239 239 err = emit_recurse_batch(h, rq); 240 240 if (err) { 241 - __i915_add_request(rq, false); 241 + __i915_request_add(rq, false); 242 242 return ERR_PTR(err); 243 243 } 244 244 245 245 return rq; 246 246 } 247 247 248 - static u32 hws_seqno(const struct hang *h, 249 - const struct drm_i915_gem_request *rq) 248 + static u32 hws_seqno(const struct hang *h, const struct i915_request *rq) 250 249 { 251 250 return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]); 252 251 } ··· 318 319 flush_test(h->i915, I915_WAIT_LOCKED); 319 320 } 320 321 321 - static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq) 322 + static bool wait_for_hang(struct hang *h, struct i915_request *rq) 322 323 { 323 324 return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq), 324 325 rq->fence.seqno), ··· 331 332 static int igt_hang_sanitycheck(void *arg) 332 333 { 333 334 struct drm_i915_private *i915 = arg; 334 - struct drm_i915_gem_request *rq; 335 + struct i915_request *rq; 335 336 struct intel_engine_cs *engine; 336 337 enum intel_engine_id id; 337 338 struct hang h; ··· 358 359 goto fini; 359 360 } 360 361 361 - i915_gem_request_get(rq); 362 + i915_request_get(rq); 362 363 363 364 *h.batch = MI_BATCH_BUFFER_END; 364 365 i915_gem_chipset_flush(i915); 365 366 366 - __i915_add_request(rq, true); 367 + __i915_request_add(rq, true); 367 368 368 - timeout = i915_wait_request(rq, 369 + timeout = i915_request_wait(rq, 369 370 I915_WAIT_LOCKED, 370 371 MAX_SCHEDULE_TIMEOUT); 371 - i915_gem_request_put(rq); 372 + i915_request_put(rq); 372 373 373 374 if (timeout < 0) { 374 375 err = timeout; ··· 484 485 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); 485 486 do { 486 487 if (active) { 487 - struct drm_i915_gem_request *rq; 488 + struct i915_request *rq; 488 489 489 490 mutex_lock(&i915->drm.struct_mutex); 490 491 rq = hang_create_request(&h, engine); ··· 494 495 break; 495 496 } 496 497 497 - i915_gem_request_get(rq); 498 - __i915_add_request(rq, true); 498 + i915_request_get(rq); 499 + __i915_request_add(rq, true); 499 500 mutex_unlock(&i915->drm.struct_mutex); 500 501 501 502 if (!wait_for_hang(&h, rq)) { ··· 506 507 intel_engine_dump(engine, &p, 507 508 "%s\n", engine->name); 508 509 509 - i915_gem_request_put(rq); 510 + i915_request_put(rq); 510 511 err = -EIO; 511 512 break; 512 513 } 513 514 514 - i915_gem_request_put(rq); 515 + i915_request_put(rq); 515 516 } 516 517 517 518 engine->hangcheck.stalled = true; ··· 576 577 static int active_engine(void *data) 577 578 { 578 579 struct intel_engine_cs *engine = data; 579 - struct drm_i915_gem_request *rq[2] = {}; 580 + struct i915_request *rq[2] = {}; 580 581 struct i915_gem_context *ctx[2]; 581 582 struct drm_file *file; 582 583 unsigned long count = 0; ··· 605 606 606 607 while (!kthread_should_stop()) { 607 608 unsigned int idx = count++ & 1; 608 - struct drm_i915_gem_request *old = rq[idx]; 609 - struct drm_i915_gem_request *new; 609 + struct i915_request *old = rq[idx]; 610 + struct i915_request *new; 610 611 611 612 mutex_lock(&engine->i915->drm.struct_mutex); 612 - new = i915_gem_request_alloc(engine, ctx[idx]); 613 + new = i915_request_alloc(engine, ctx[idx]); 613 614 if (IS_ERR(new)) { 614 615 mutex_unlock(&engine->i915->drm.struct_mutex); 615 616 err = PTR_ERR(new); 616 617 break; 617 618 } 618 619 619 - rq[idx] = i915_gem_request_get(new); 620 - i915_add_request(new); 620 + rq[idx] = i915_request_get(new); 621 + i915_request_add(new); 621 622 mutex_unlock(&engine->i915->drm.struct_mutex); 622 623 623 624 if (old) { 624 - i915_wait_request(old, 0, MAX_SCHEDULE_TIMEOUT); 625 - i915_gem_request_put(old); 625 + i915_request_wait(old, 0, MAX_SCHEDULE_TIMEOUT); 626 + i915_request_put(old); 626 627 } 627 628 } 628 629 629 630 for (count = 0; count < ARRAY_SIZE(rq); count++) 630 - i915_gem_request_put(rq[count]); 631 + i915_request_put(rq[count]); 631 632 632 633 err_file: 633 634 mock_file_free(engine->i915, file); ··· 691 692 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); 692 693 do { 693 694 if (active) { 694 - struct drm_i915_gem_request *rq; 695 + struct i915_request *rq; 695 696 696 697 mutex_lock(&i915->drm.struct_mutex); 697 698 rq = hang_create_request(&h, engine); ··· 701 702 break; 702 703 } 703 704 704 - i915_gem_request_get(rq); 705 - __i915_add_request(rq, true); 705 + i915_request_get(rq); 706 + __i915_request_add(rq, true); 706 707 mutex_unlock(&i915->drm.struct_mutex); 707 708 708 709 if (!wait_for_hang(&h, rq)) { ··· 713 714 intel_engine_dump(engine, &p, 714 715 "%s\n", engine->name); 715 716 716 - i915_gem_request_put(rq); 717 + i915_request_put(rq); 717 718 err = -EIO; 718 719 break; 719 720 } 720 721 721 - i915_gem_request_put(rq); 722 + i915_request_put(rq); 722 723 } 723 724 724 725 engine->hangcheck.stalled = true; ··· 813 814 return __igt_reset_engine_others(arg, true); 814 815 } 815 816 816 - static u32 fake_hangcheck(struct drm_i915_gem_request *rq) 817 + static u32 fake_hangcheck(struct i915_request *rq) 817 818 { 818 819 u32 reset_count; 819 820 ··· 831 832 static int igt_wait_reset(void *arg) 832 833 { 833 834 struct drm_i915_private *i915 = arg; 834 - struct drm_i915_gem_request *rq; 835 + struct i915_request *rq; 835 836 unsigned int reset_count; 836 837 struct hang h; 837 838 long timeout; ··· 855 856 goto fini; 856 857 } 857 858 858 - i915_gem_request_get(rq); 859 - __i915_add_request(rq, true); 859 + i915_request_get(rq); 860 + __i915_request_add(rq, true); 860 861 861 862 if (!wait_for_hang(&h, rq)) { 862 863 struct drm_printer p = drm_info_printer(i915->drm.dev); ··· 874 875 875 876 reset_count = fake_hangcheck(rq); 876 877 877 - timeout = i915_wait_request(rq, I915_WAIT_LOCKED, 10); 878 + timeout = i915_request_wait(rq, I915_WAIT_LOCKED, 10); 878 879 if (timeout < 0) { 879 880 pr_err("i915_wait_request failed on a stuck request: err=%ld\n", 880 881 timeout); ··· 890 891 } 891 892 892 893 out_rq: 893 - i915_gem_request_put(rq); 894 + i915_request_put(rq); 894 895 fini: 895 896 hang_fini(&h); 896 897 unlock: ··· 921 922 goto unlock; 922 923 923 924 for_each_engine(engine, i915, id) { 924 - struct drm_i915_gem_request *prev; 925 + struct i915_request *prev; 925 926 IGT_TIMEOUT(end_time); 926 927 unsigned int count; 927 928 ··· 934 935 goto fini; 935 936 } 936 937 937 - i915_gem_request_get(prev); 938 - __i915_add_request(prev, true); 938 + i915_request_get(prev); 939 + __i915_request_add(prev, true); 939 940 940 941 count = 0; 941 942 do { 942 - struct drm_i915_gem_request *rq; 943 + struct i915_request *rq; 943 944 unsigned int reset_count; 944 945 945 946 rq = hang_create_request(&h, engine); ··· 948 949 goto fini; 949 950 } 950 951 951 - i915_gem_request_get(rq); 952 - __i915_add_request(rq, true); 952 + i915_request_get(rq); 953 + __i915_request_add(rq, true); 953 954 954 955 if (!wait_for_hang(&h, prev)) { 955 956 struct drm_printer p = drm_info_printer(i915->drm.dev); ··· 959 960 intel_engine_dump(prev->engine, &p, 960 961 "%s\n", prev->engine->name); 961 962 962 - i915_gem_request_put(rq); 963 - i915_gem_request_put(prev); 963 + i915_request_put(rq); 964 + i915_request_put(prev); 964 965 965 966 i915_reset(i915, 0); 966 967 i915_gem_set_wedged(i915); ··· 979 980 if (prev->fence.error != -EIO) { 980 981 pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n", 981 982 prev->fence.error); 982 - i915_gem_request_put(rq); 983 - i915_gem_request_put(prev); 983 + i915_request_put(rq); 984 + i915_request_put(prev); 984 985 err = -EINVAL; 985 986 goto fini; 986 987 } ··· 988 989 if (rq->fence.error) { 989 990 pr_err("Fence error status not zero [%d] after unrelated reset\n", 990 991 rq->fence.error); 991 - i915_gem_request_put(rq); 992 - i915_gem_request_put(prev); 992 + i915_request_put(rq); 993 + i915_request_put(prev); 993 994 err = -EINVAL; 994 995 goto fini; 995 996 } 996 997 997 998 if (i915_reset_count(&i915->gpu_error) == reset_count) { 998 999 pr_err("No GPU reset recorded!\n"); 999 - i915_gem_request_put(rq); 1000 - i915_gem_request_put(prev); 1000 + i915_request_put(rq); 1001 + i915_request_put(prev); 1001 1002 err = -EINVAL; 1002 1003 goto fini; 1003 1004 } 1004 1005 1005 - i915_gem_request_put(prev); 1006 + i915_request_put(prev); 1006 1007 prev = rq; 1007 1008 count++; 1008 1009 } while (time_before(jiffies, end_time)); ··· 1011 1012 *h.batch = MI_BATCH_BUFFER_END; 1012 1013 i915_gem_chipset_flush(i915); 1013 1014 1014 - i915_gem_request_put(prev); 1015 + i915_request_put(prev); 1015 1016 1016 1017 err = flush_test(i915, I915_WAIT_LOCKED); 1017 1018 if (err) ··· 1035 1036 struct drm_i915_private *i915 = arg; 1036 1037 struct intel_engine_cs *engine = i915->engine[RCS]; 1037 1038 struct hang h; 1038 - struct drm_i915_gem_request *rq; 1039 + struct i915_request *rq; 1039 1040 struct i915_gpu_state *error; 1040 1041 int err; 1041 1042 ··· 1059 1060 goto err_fini; 1060 1061 } 1061 1062 1062 - i915_gem_request_get(rq); 1063 - __i915_add_request(rq, true); 1063 + i915_request_get(rq); 1064 + __i915_request_add(rq, true); 1064 1065 1065 1066 if (!wait_for_hang(&h, rq)) { 1066 1067 struct drm_printer p = drm_info_printer(i915->drm.dev); ··· 1097 1098 } 1098 1099 1099 1100 err_request: 1100 - i915_gem_request_put(rq); 1101 + i915_request_put(rq); 1101 1102 err_fini: 1102 1103 hang_fini(&h); 1103 1104 err_unlock:
+5 -5
drivers/gpu/drm/i915/selftests/mock_engine.c
··· 81 81 i915_gem_context_put(ctx); 82 82 } 83 83 84 - static int mock_request_alloc(struct drm_i915_gem_request *request) 84 + static int mock_request_alloc(struct i915_request *request) 85 85 { 86 86 struct mock_request *mock = container_of(request, typeof(*mock), base); 87 87 ··· 91 91 return 0; 92 92 } 93 93 94 - static int mock_emit_flush(struct drm_i915_gem_request *request, 94 + static int mock_emit_flush(struct i915_request *request, 95 95 unsigned int flags) 96 96 { 97 97 return 0; 98 98 } 99 99 100 - static void mock_emit_breadcrumb(struct drm_i915_gem_request *request, 100 + static void mock_emit_breadcrumb(struct i915_request *request, 101 101 u32 *flags) 102 102 { 103 103 } 104 104 105 - static void mock_submit_request(struct drm_i915_gem_request *request) 105 + static void mock_submit_request(struct i915_request *request) 106 106 { 107 107 struct mock_request *mock = container_of(request, typeof(*mock), base); 108 108 struct mock_engine *engine = 109 109 container_of(request->engine, typeof(*engine), base); 110 110 111 - i915_gem_request_submit(request); 111 + i915_request_submit(request); 112 112 GEM_BUG_ON(!request->global_seqno); 113 113 114 114 spin_lock_irq(&engine->hw_lock);
+1 -1
drivers/gpu/drm/i915/selftests/mock_gem_device.c
··· 43 43 for_each_engine(engine, i915, id) 44 44 mock_engine_flush(engine); 45 45 46 - i915_gem_retire_requests(i915); 46 + i915_retire_requests(i915); 47 47 } 48 48 49 49 static void mock_device_release(struct drm_device *dev)
+5 -5
drivers/gpu/drm/i915/selftests/mock_request.c
··· 25 25 #include "mock_engine.h" 26 26 #include "mock_request.h" 27 27 28 - struct drm_i915_gem_request * 28 + struct i915_request * 29 29 mock_request(struct intel_engine_cs *engine, 30 30 struct i915_gem_context *context, 31 31 unsigned long delay) 32 32 { 33 - struct drm_i915_gem_request *request; 33 + struct i915_request *request; 34 34 struct mock_request *mock; 35 35 36 36 /* NB the i915->requests slab cache is enlarged to fit mock_request */ 37 - request = i915_gem_request_alloc(engine, context); 37 + request = i915_request_alloc(engine, context); 38 38 if (IS_ERR(request)) 39 39 return NULL; 40 40 ··· 44 44 return &mock->base; 45 45 } 46 46 47 - bool mock_cancel_request(struct drm_i915_gem_request *request) 47 + bool mock_cancel_request(struct i915_request *request) 48 48 { 49 49 struct mock_request *mock = container_of(request, typeof(*mock), base); 50 50 struct mock_engine *engine = ··· 57 57 spin_unlock_irq(&engine->hw_lock); 58 58 59 59 if (was_queued) 60 - i915_gem_request_unsubmit(request); 60 + i915_request_unsubmit(request); 61 61 62 62 return was_queued; 63 63 }
+4 -4
drivers/gpu/drm/i915/selftests/mock_request.h
··· 27 27 28 28 #include <linux/list.h> 29 29 30 - #include "../i915_gem_request.h" 30 + #include "../i915_request.h" 31 31 32 32 struct mock_request { 33 - struct drm_i915_gem_request base; 33 + struct i915_request base; 34 34 35 35 struct list_head link; 36 36 unsigned long delay; 37 37 }; 38 38 39 - struct drm_i915_gem_request * 39 + struct i915_request * 40 40 mock_request(struct intel_engine_cs *engine, 41 41 struct i915_gem_context *context, 42 42 unsigned long delay); 43 43 44 - bool mock_cancel_request(struct drm_i915_gem_request *request); 44 + bool mock_cancel_request(struct i915_request *request); 45 45 46 46 #endif /* !__MOCK_REQUEST__ */