Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Rename struct intel_ringbuffer to struct intel_ring

The state stored in this struct is not only the information about the
buffer object, but the ring used to communicate with the hardware. Using
buffer here is overly specific and, for me at least, conflates with the
notion of buffer objects themselves.

s/struct intel_ringbuffer/struct intel_ring/
s/enum intel_ring_hangcheck/enum intel_engine_hangcheck/
s/describe_ctx_ringbuf()/describe_ctx_ring()/
s/intel_ring_get_active_head()/intel_engine_get_active_head()/
s/intel_ring_sync_index()/intel_engine_sync_index()/
s/intel_ring_init_seqno()/intel_engine_init_seqno()/
s/ring_stuck()/engine_stuck()/
s/intel_cleanup_engine()/intel_engine_cleanup()/
s/intel_stop_engine()/intel_engine_stop()/
s/intel_pin_and_map_ringbuffer_obj()/intel_pin_and_map_ring()/
s/intel_unpin_ringbuffer()/intel_unpin_ring()/
s/intel_engine_create_ringbuffer()/intel_engine_create_ring()/
s/intel_ring_flush_all_caches()/intel_engine_flush_all_caches()/
s/intel_ring_invalidate_all_caches()/intel_engine_invalidate_all_caches()/
s/intel_ringbuffer_free()/intel_ring_free()/

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-15-git-send-email-chris@chris-wilson.co.uk
Link: http://patchwork.freedesktop.org/patch/msgid/1470174640-18242-4-git-send-email-chris@chris-wilson.co.uk

+157 -159
+5 -6
drivers/gpu/drm/i915/i915_debugfs.c
··· 1419 1419 intel_runtime_pm_get(dev_priv); 1420 1420 1421 1421 for_each_engine_id(engine, dev_priv, id) { 1422 - acthd[id] = intel_ring_get_active_head(engine); 1422 + acthd[id] = intel_engine_get_active_head(engine); 1423 1423 seqno[id] = intel_engine_get_seqno(engine); 1424 1424 } 1425 1425 ··· 2036 2036 return 0; 2037 2037 } 2038 2038 2039 - static void describe_ctx_ringbuf(struct seq_file *m, 2040 - struct intel_ringbuffer *ringbuf) 2039 + static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) 2041 2040 { 2042 2041 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)", 2043 - ringbuf->space, ringbuf->head, ringbuf->tail, 2044 - ringbuf->last_retired_head); 2042 + ring->space, ring->head, ring->tail, 2043 + ring->last_retired_head); 2045 2044 } 2046 2045 2047 2046 static int i915_context_status(struct seq_file *m, void *unused) ··· 2085 2086 if (ce->state) 2086 2087 describe_obj(m, ce->state); 2087 2088 if (ce->ring) 2088 - describe_ctx_ringbuf(m, ce->ring); 2089 + describe_ctx_ring(m, ce->ring); 2089 2090 seq_putc(m, '\n'); 2090 2091 } 2091 2092
+2 -2
drivers/gpu/drm/i915/i915_drv.h
··· 518 518 bool waiting; 519 519 int num_waiters; 520 520 int hangcheck_score; 521 - enum intel_ring_hangcheck_action hangcheck_action; 521 + enum intel_engine_hangcheck_action hangcheck_action; 522 522 int num_requests; 523 523 524 524 /* our own tracking of ring head and tail */ ··· 894 894 895 895 struct intel_context { 896 896 struct drm_i915_gem_object *state; 897 - struct intel_ringbuffer *ring; 897 + struct intel_ring *ring; 898 898 struct i915_vma *lrc_vma; 899 899 uint32_t *lrc_reg_state; 900 900 u64 lrc_desc;
+8 -8
drivers/gpu/drm/i915/i915_gem.c
··· 2486 2486 2487 2487 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) 2488 2488 { 2489 - struct intel_ringbuffer *buffer; 2489 + struct intel_ring *ring; 2490 2490 2491 2491 while (!list_empty(&engine->active_list)) { 2492 2492 struct drm_i915_gem_object *obj; ··· 2502 2502 * (lockless) lookup doesn't try and wait upon the request as we 2503 2503 * reset it. 2504 2504 */ 2505 - intel_ring_init_seqno(engine, engine->last_submitted_seqno); 2505 + intel_engine_init_seqno(engine, engine->last_submitted_seqno); 2506 2506 2507 2507 /* 2508 2508 * Clear the execlists queue up before freeing the requests, as those ··· 2541 2541 * upon reset is less than when we start. Do one more pass over 2542 2542 * all the ringbuffers to reset last_retired_head. 2543 2543 */ 2544 - list_for_each_entry(buffer, &engine->buffers, link) { 2545 - buffer->last_retired_head = buffer->tail; 2546 - intel_ring_update_space(buffer); 2544 + list_for_each_entry(ring, &engine->buffers, link) { 2545 + ring->last_retired_head = ring->tail; 2546 + intel_ring_update_space(ring); 2547 2547 } 2548 2548 2549 2549 engine->i915->gt.active_engines &= ~intel_engine_flag(engine); ··· 2870 2870 2871 2871 i915_gem_object_retire_request(obj, from_req); 2872 2872 } else { 2873 - int idx = intel_ring_sync_index(from, to); 2873 + int idx = intel_engine_sync_index(from, to); 2874 2874 u32 seqno = i915_gem_request_get_seqno(from_req); 2875 2875 2876 2876 WARN_ON(!to_req); ··· 4570 4570 4571 4571 if (!i915.enable_execlists) { 4572 4572 dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission; 4573 - dev_priv->gt.cleanup_engine = intel_cleanup_engine; 4574 - dev_priv->gt.stop_engine = intel_stop_engine; 4573 + dev_priv->gt.cleanup_engine = intel_engine_cleanup; 4574 + dev_priv->gt.stop_engine = intel_engine_stop; 4575 4575 } else { 4576 4576 dev_priv->gt.execbuf_submit = intel_execlists_submission; 4577 4577 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
+3 -3
drivers/gpu/drm/i915/i915_gem_context.c
··· 174 174 175 175 WARN_ON(ce->pin_count); 176 176 if (ce->ring) 177 - intel_ringbuffer_free(ce->ring); 177 + intel_ring_free(ce->ring); 178 178 179 179 i915_gem_object_put(ce->state); 180 180 } ··· 552 552 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) 553 553 { 554 554 struct drm_i915_private *dev_priv = req->i915; 555 - struct intel_ringbuffer *ring = req->ring; 555 + struct intel_ring *ring = req->ring; 556 556 struct intel_engine_cs *engine = req->engine; 557 557 u32 flags = hw_flags | MI_MM_SPACE_GTT; 558 558 const int num_rings = ··· 655 655 static int remap_l3(struct drm_i915_gem_request *req, int slice) 656 656 { 657 657 u32 *remap_info = req->i915->l3_parity.remap_info[slice]; 658 - struct intel_ringbuffer *ring = req->ring; 658 + struct intel_ring *ring = req->ring; 659 659 int i, ret; 660 660 661 661 if (!remap_info)
+3 -3
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 1001 1001 /* Unconditionally invalidate gpu caches and ensure that we do flush 1002 1002 * any residual writes from the previous batch. 1003 1003 */ 1004 - return intel_ring_invalidate_all_caches(req); 1004 + return intel_engine_invalidate_all_caches(req); 1005 1005 } 1006 1006 1007 1007 static bool ··· 1173 1173 static int 1174 1174 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req) 1175 1175 { 1176 - struct intel_ringbuffer *ring = req->ring; 1176 + struct intel_ring *ring = req->ring; 1177 1177 int ret, i; 1178 1178 1179 1179 if (!IS_GEN7(req->i915) || req->engine->id != RCS) { ··· 1303 1303 1304 1304 if (params->engine->id == RCS && 1305 1305 instp_mode != dev_priv->relative_constants_mode) { 1306 - struct intel_ringbuffer *ring = params->request->ring; 1306 + struct intel_ring *ring = params->request->ring; 1307 1307 1308 1308 ret = intel_ring_begin(params->request, 4); 1309 1309 if (ret)
+3 -3
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 669 669 unsigned entry, 670 670 dma_addr_t addr) 671 671 { 672 - struct intel_ringbuffer *ring = req->ring; 672 + struct intel_ring *ring = req->ring; 673 673 struct intel_engine_cs *engine = req->engine; 674 674 int ret; 675 675 ··· 1661 1661 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, 1662 1662 struct drm_i915_gem_request *req) 1663 1663 { 1664 - struct intel_ringbuffer *ring = req->ring; 1664 + struct intel_ring *ring = req->ring; 1665 1665 struct intel_engine_cs *engine = req->engine; 1666 1666 int ret; 1667 1667 ··· 1688 1688 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, 1689 1689 struct drm_i915_gem_request *req) 1690 1690 { 1691 - struct intel_ringbuffer *ring = req->ring; 1691 + struct intel_ring *ring = req->ring; 1692 1692 struct intel_engine_cs *engine = req->engine; 1693 1693 int ret; 1694 1694
+3 -3
drivers/gpu/drm/i915/i915_gem_request.c
··· 244 244 245 245 /* Finally reset hw state */ 246 246 for_each_engine(engine, dev_priv) 247 - intel_ring_init_seqno(engine, seqno); 247 + intel_engine_init_seqno(engine, seqno); 248 248 249 249 return 0; 250 250 } ··· 423 423 bool flush_caches) 424 424 { 425 425 struct intel_engine_cs *engine; 426 - struct intel_ringbuffer *ring; 426 + struct intel_ring *ring; 427 427 u32 request_start; 428 428 u32 reserved_tail; 429 429 int ret; ··· 454 454 if (i915.enable_execlists) 455 455 ret = logical_ring_flush_all_caches(request); 456 456 else 457 - ret = intel_ring_flush_all_caches(request); 457 + ret = intel_engine_flush_all_caches(request); 458 458 /* Not allowed to fail! */ 459 459 WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret); 460 460 }
+1 -1
drivers/gpu/drm/i915/i915_gem_request.h
··· 61 61 */ 62 62 struct i915_gem_context *ctx; 63 63 struct intel_engine_cs *engine; 64 - struct intel_ringbuffer *ring; 64 + struct intel_ring *ring; 65 65 struct intel_signal_node signaling; 66 66 67 67 /** GEM sequence number associated with the previous request,
+4 -4
drivers/gpu/drm/i915/i915_gpu_error.c
··· 221 221 } 222 222 } 223 223 224 - static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a) 224 + static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a) 225 225 { 226 226 switch (a) { 227 227 case HANGCHECK_IDLE: ··· 879 879 signal_offset = 880 880 (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4; 881 881 tmp = error->semaphore_obj->pages[0]; 882 - idx = intel_ring_sync_index(engine, to); 882 + idx = intel_engine_sync_index(engine, to); 883 883 884 884 ee->semaphore_mboxes[idx] = tmp[signal_offset]; 885 885 ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx]; ··· 981 981 982 982 ee->waiting = intel_engine_has_waiter(engine); 983 983 ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base)); 984 - ee->acthd = intel_ring_get_active_head(engine); 984 + ee->acthd = intel_engine_get_active_head(engine); 985 985 ee->seqno = intel_engine_get_seqno(engine); 986 986 ee->last_seqno = engine->last_submitted_seqno; 987 987 ee->start = I915_READ_START(engine); ··· 1097 1097 request = i915_gem_find_active_request(engine); 1098 1098 if (request) { 1099 1099 struct i915_address_space *vm; 1100 - struct intel_ringbuffer *ring; 1100 + struct intel_ring *ring; 1101 1101 1102 1102 vm = request->ctx->ppgtt ? 1103 1103 &request->ctx->ppgtt->base : &ggtt->base;
+7 -7
drivers/gpu/drm/i915/i915_irq.c
··· 2993 2993 return stuck; 2994 2994 } 2995 2995 2996 - static enum intel_ring_hangcheck_action 2996 + static enum intel_engine_hangcheck_action 2997 2997 head_stuck(struct intel_engine_cs *engine, u64 acthd) 2998 2998 { 2999 2999 if (acthd != engine->hangcheck.acthd) { ··· 3011 3011 return HANGCHECK_HUNG; 3012 3012 } 3013 3013 3014 - static enum intel_ring_hangcheck_action 3015 - ring_stuck(struct intel_engine_cs *engine, u64 acthd) 3014 + static enum intel_engine_hangcheck_action 3015 + engine_stuck(struct intel_engine_cs *engine, u64 acthd) 3016 3016 { 3017 3017 struct drm_i915_private *dev_priv = engine->i915; 3018 - enum intel_ring_hangcheck_action ha; 3018 + enum intel_engine_hangcheck_action ha; 3019 3019 u32 tmp; 3020 3020 3021 3021 ha = head_stuck(engine, acthd); ··· 3124 3124 if (engine->irq_seqno_barrier) 3125 3125 engine->irq_seqno_barrier(engine); 3126 3126 3127 - acthd = intel_ring_get_active_head(engine); 3127 + acthd = intel_engine_get_active_head(engine); 3128 3128 seqno = intel_engine_get_seqno(engine); 3129 3129 3130 3130 /* Reset stuck interrupts between batch advances */ ··· 3154 3154 * being repeatedly kicked and so responsible 3155 3155 * for stalling the machine. 3156 3156 */ 3157 - engine->hangcheck.action = ring_stuck(engine, 3158 - acthd); 3157 + engine->hangcheck.action = 3158 + engine_stuck(engine, acthd); 3159 3159 3160 3160 switch (engine->hangcheck.action) { 3161 3161 case HANGCHECK_IDLE:
+5 -5
drivers/gpu/drm/i915/intel_display.c
··· 11115 11115 struct drm_i915_gem_request *req, 11116 11116 uint32_t flags) 11117 11117 { 11118 - struct intel_ringbuffer *ring = req->ring; 11118 + struct intel_ring *ring = req->ring; 11119 11119 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11120 11120 u32 flip_mask; 11121 11121 int ret; ··· 11149 11149 struct drm_i915_gem_request *req, 11150 11150 uint32_t flags) 11151 11151 { 11152 - struct intel_ringbuffer *ring = req->ring; 11152 + struct intel_ring *ring = req->ring; 11153 11153 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11154 11154 u32 flip_mask; 11155 11155 int ret; ··· 11180 11180 struct drm_i915_gem_request *req, 11181 11181 uint32_t flags) 11182 11182 { 11183 - struct intel_ringbuffer *ring = req->ring; 11183 + struct intel_ring *ring = req->ring; 11184 11184 struct drm_i915_private *dev_priv = to_i915(dev); 11185 11185 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11186 11186 uint32_t pf, pipesrc; ··· 11218 11218 struct drm_i915_gem_request *req, 11219 11219 uint32_t flags) 11220 11220 { 11221 - struct intel_ringbuffer *ring = req->ring; 11221 + struct intel_ring *ring = req->ring; 11222 11222 struct drm_i915_private *dev_priv = to_i915(dev); 11223 11223 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11224 11224 uint32_t pf, pipesrc; ··· 11253 11253 struct drm_i915_gem_request *req, 11254 11254 uint32_t flags) 11255 11255 { 11256 - struct intel_ringbuffer *ring = req->ring; 11256 + struct intel_ring *ring = req->ring; 11257 11257 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11258 11258 uint32_t plane_bit = 0; 11259 11259 int len, ret;
+1 -1
drivers/gpu/drm/i915/intel_engine_cs.c
··· 155 155 if (i915.enable_execlists) 156 156 intel_logical_ring_cleanup(&dev_priv->engine[i]); 157 157 else 158 - intel_cleanup_engine(&dev_priv->engine[i]); 158 + intel_engine_cleanup(&dev_priv->engine[i]); 159 159 } 160 160 161 161 return ret;
+17 -17
drivers/gpu/drm/i915/intel_lrc.c
··· 767 767 static int 768 768 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) 769 769 { 770 - struct intel_ringbuffer *ring = request->ring; 770 + struct intel_ring *ring = request->ring; 771 771 struct intel_engine_cs *engine = request->engine; 772 772 773 773 intel_ring_advance(ring); ··· 818 818 struct drm_device *dev = params->dev; 819 819 struct intel_engine_cs *engine = params->engine; 820 820 struct drm_i915_private *dev_priv = to_i915(dev); 821 - struct intel_ringbuffer *ring = params->request->ring; 821 + struct intel_ring *ring = params->request->ring; 822 822 u64 exec_start; 823 823 int instp_mode; 824 824 u32 instp_mask; ··· 973 973 974 974 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; 975 975 976 - ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ring); 976 + ret = intel_pin_and_map_ring(dev_priv, ce->ring); 977 977 if (ret) 978 978 goto unpin_map; 979 979 ··· 1011 1011 if (--ce->pin_count) 1012 1012 return; 1013 1013 1014 - intel_unpin_ringbuffer_obj(ce->ring); 1014 + intel_unpin_ring(ce->ring); 1015 1015 1016 1016 i915_gem_object_unpin_map(ce->state); 1017 1017 i915_gem_object_ggtt_unpin(ce->state); ··· 1027 1027 { 1028 1028 int ret, i; 1029 1029 struct intel_engine_cs *engine = req->engine; 1030 - struct intel_ringbuffer *ring = req->ring; 1030 + struct intel_ring *ring = req->ring; 1031 1031 struct i915_workarounds *w = &req->i915->workarounds; 1032 1032 1033 1033 if (w->count == 0) ··· 1550 1550 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) 1551 1551 { 1552 1552 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; 1553 - struct intel_ringbuffer *ring = req->ring; 1553 + struct intel_ring *ring = req->ring; 1554 1554 struct intel_engine_cs *engine = req->engine; 1555 1555 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; 1556 1556 int i, ret; ··· 1578 1578 static int gen8_emit_bb_start(struct drm_i915_gem_request *req, 1579 1579 u64 offset, unsigned dispatch_flags) 1580 1580 { 1581 - struct intel_ringbuffer *ring = req->ring; 1581 + struct intel_ring *ring = req->ring; 1582 1582 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE); 1583 1583 int ret; 1584 1584 ··· 1635 1635 u32 invalidate_domains, 1636 1636 u32 unused) 1637 1637 { 1638 - struct intel_ringbuffer *ring = request->ring; 1639 - uint32_t cmd; 1638 + struct intel_ring *ring = request->ring; 1639 + u32 cmd; 1640 1640 int ret; 1641 1641 1642 1642 ret = intel_ring_begin(request, 4); ··· 1673 1673 u32 invalidate_domains, 1674 1674 u32 flush_domains) 1675 1675 { 1676 - struct intel_ringbuffer *ring = request->ring; 1676 + struct intel_ring *ring = request->ring; 1677 1677 struct intel_engine_cs *engine = request->engine; 1678 1678 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 1679 1679 bool vf_flush_wa = false, dc_flush_wa = false; ··· 1787 1787 1788 1788 static int gen8_emit_request(struct drm_i915_gem_request *request) 1789 1789 { 1790 - struct intel_ringbuffer *ring = request->ring; 1790 + struct intel_ring *ring = request->ring; 1791 1791 int ret; 1792 1792 1793 1793 ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS); ··· 1810 1810 1811 1811 static int gen8_emit_request_render(struct drm_i915_gem_request *request) 1812 1812 { 1813 - struct intel_ringbuffer *ring = request->ring; 1813 + struct intel_ring *ring = request->ring; 1814 1814 int ret; 1815 1815 1816 1816 ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS); ··· 2162 2162 populate_lr_context(struct i915_gem_context *ctx, 2163 2163 struct drm_i915_gem_object *ctx_obj, 2164 2164 struct intel_engine_cs *engine, 2165 - struct intel_ringbuffer *ringbuf) 2165 + struct intel_ring *ring) 2166 2166 { 2167 2167 struct drm_i915_private *dev_priv = ctx->i915; 2168 2168 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; ··· 2215 2215 RING_START(engine->mmio_base), 0); 2216 2216 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, 2217 2217 RING_CTL(engine->mmio_base), 2218 - ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); 2218 + ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); 2219 2219 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, 2220 2220 RING_BBADDR_UDW(engine->mmio_base), 0); 2221 2221 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, ··· 2343 2343 struct drm_i915_gem_object *ctx_obj; 2344 2344 struct intel_context *ce = &ctx->engine[engine->id]; 2345 2345 uint32_t context_size; 2346 - struct intel_ringbuffer *ring; 2346 + struct intel_ring *ring; 2347 2347 int ret; 2348 2348 2349 2349 WARN_ON(ce->state); ··· 2359 2359 return PTR_ERR(ctx_obj); 2360 2360 } 2361 2361 2362 - ring = intel_engine_create_ringbuffer(engine, ctx->ring_size); 2362 + ring = intel_engine_create_ring(engine, ctx->ring_size); 2363 2363 if (IS_ERR(ring)) { 2364 2364 ret = PTR_ERR(ring); 2365 2365 goto error_deref_obj; ··· 2378 2378 return 0; 2379 2379 2380 2380 error_ring_free: 2381 - intel_ringbuffer_free(ring); 2381 + intel_ring_free(ring); 2382 2382 error_deref_obj: 2383 2383 i915_gem_object_put(ctx_obj); 2384 2384 ce->ring = NULL;
+2 -2
drivers/gpu/drm/i915/intel_mocs.c
··· 276 276 static int emit_mocs_control_table(struct drm_i915_gem_request *req, 277 277 const struct drm_i915_mocs_table *table) 278 278 { 279 - struct intel_ringbuffer *ring = req->ring; 279 + struct intel_ring *ring = req->ring; 280 280 enum intel_engine_id engine = req->engine->id; 281 281 unsigned int index; 282 282 int ret; ··· 336 336 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req, 337 337 const struct drm_i915_mocs_table *table) 338 338 { 339 - struct intel_ringbuffer *ring = req->ring; 339 + struct intel_ring *ring = req->ring; 340 340 unsigned int i; 341 341 int ret; 342 342
+4 -4
drivers/gpu/drm/i915/intel_overlay.c
··· 235 235 struct drm_i915_private *dev_priv = overlay->i915; 236 236 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 237 237 struct drm_i915_gem_request *req; 238 - struct intel_ringbuffer *ring; 238 + struct intel_ring *ring; 239 239 int ret; 240 240 241 241 WARN_ON(overlay->active); ··· 270 270 struct drm_i915_private *dev_priv = overlay->i915; 271 271 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 272 272 struct drm_i915_gem_request *req; 273 - struct intel_ringbuffer *ring; 273 + struct intel_ring *ring; 274 274 u32 flip_addr = overlay->flip_addr; 275 275 u32 tmp; 276 276 int ret; ··· 340 340 struct drm_i915_private *dev_priv = overlay->i915; 341 341 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 342 342 struct drm_i915_gem_request *req; 343 - struct intel_ringbuffer *ring; 343 + struct intel_ring *ring; 344 344 u32 flip_addr = overlay->flip_addr; 345 345 int ret; 346 346 ··· 426 426 if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) { 427 427 /* synchronous slowpath */ 428 428 struct drm_i915_gem_request *req; 429 - struct intel_ringbuffer *ring; 429 + struct intel_ring *ring; 430 430 431 431 req = i915_gem_request_alloc(engine, NULL); 432 432 if (IS_ERR(req))
+64 -64
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 47 47 return space - I915_RING_FREE_SPACE; 48 48 } 49 49 50 - void intel_ring_update_space(struct intel_ringbuffer *ringbuf) 50 + void intel_ring_update_space(struct intel_ring *ringbuf) 51 51 { 52 52 if (ringbuf->last_retired_head != -1) { 53 53 ringbuf->head = ringbuf->last_retired_head; ··· 60 60 61 61 static void __intel_engine_submit(struct intel_engine_cs *engine) 62 62 { 63 - struct intel_ringbuffer *ringbuf = engine->buffer; 64 - ringbuf->tail &= ringbuf->size - 1; 65 - engine->write_tail(engine, ringbuf->tail); 63 + struct intel_ring *ring = engine->buffer; 64 + 65 + ring->tail &= ring->size - 1; 66 + engine->write_tail(engine, ring->tail); 66 67 } 67 68 68 69 static int ··· 71 70 u32 invalidate_domains, 72 71 u32 flush_domains) 73 72 { 74 - struct intel_ringbuffer *ring = req->ring; 73 + struct intel_ring *ring = req->ring; 75 74 u32 cmd; 76 75 int ret; 77 76 ··· 98 97 u32 invalidate_domains, 99 98 u32 flush_domains) 100 99 { 101 - struct intel_ringbuffer *ring = req->ring; 100 + struct intel_ring *ring = req->ring; 102 101 u32 cmd; 103 102 int ret; 104 103 ··· 188 187 static int 189 188 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) 190 189 { 191 - struct intel_ringbuffer *ring = req->ring; 190 + struct intel_ring *ring = req->ring; 192 191 u32 scratch_addr = 193 192 req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 194 193 int ret; ··· 225 224 gen6_render_ring_flush(struct drm_i915_gem_request *req, 226 225 u32 invalidate_domains, u32 flush_domains) 227 226 { 228 - struct intel_ringbuffer *ring = req->ring; 227 + struct intel_ring *ring = req->ring; 229 228 u32 scratch_addr = 230 229 req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 231 230 u32 flags = 0; ··· 278 277 static int 279 278 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) 280 279 { 281 - struct intel_ringbuffer *ring = req->ring; 280 + struct intel_ring *ring = req->ring; 282 281 int ret; 283 282 284 283 ret = intel_ring_begin(req, 4); ··· 300 299 gen7_render_ring_flush(struct drm_i915_gem_request *req, 301 300 u32 invalidate_domains, u32 flush_domains) 302 301 { 303 - struct intel_ringbuffer *ring = req->ring; 302 + struct intel_ring *ring = req->ring; 304 303 u32 scratch_addr = 305 304 req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 306 305 u32 flags = 0; ··· 365 364 gen8_emit_pipe_control(struct drm_i915_gem_request *req, 366 365 u32 flags, u32 scratch_addr) 367 366 { 368 - struct intel_ringbuffer *ring = req->ring; 367 + struct intel_ring *ring = req->ring; 369 368 int ret; 370 369 371 370 ret = intel_ring_begin(req, 6); ··· 428 427 I915_WRITE_TAIL(engine, value); 429 428 } 430 429 431 - u64 intel_ring_get_active_head(struct intel_engine_cs *engine) 430 + u64 intel_engine_get_active_head(struct intel_engine_cs *engine) 432 431 { 433 432 struct drm_i915_private *dev_priv = engine->i915; 434 433 u64 acthd; ··· 554 553 static int init_ring_common(struct intel_engine_cs *engine) 555 554 { 556 555 struct drm_i915_private *dev_priv = engine->i915; 557 - struct intel_ringbuffer *ringbuf = engine->buffer; 558 - struct drm_i915_gem_object *obj = ringbuf->obj; 556 + struct intel_ring *ring = engine->buffer; 557 + struct drm_i915_gem_object *obj = ring->obj; 559 558 int ret = 0; 560 559 561 560 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); ··· 605 604 (void)I915_READ_HEAD(engine); 606 605 607 606 I915_WRITE_CTL(engine, 608 - ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 607 + ((ring->size - PAGE_SIZE) & RING_NR_PAGES) 609 608 | RING_VALID); 610 609 611 610 /* If the head is still not zero, the ring is dead */ ··· 624 623 goto out; 625 624 } 626 625 627 - ringbuf->last_retired_head = -1; 628 - ringbuf->head = I915_READ_HEAD(engine); 629 - ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR; 630 - intel_ring_update_space(ringbuf); 626 + ring->last_retired_head = -1; 627 + ring->head = I915_READ_HEAD(engine); 628 + ring->tail = I915_READ_TAIL(engine) & TAIL_ADDR; 629 + intel_ring_update_space(ring); 631 630 632 631 intel_engine_init_hangcheck(engine); 633 632 ··· 681 680 682 681 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 683 682 { 684 - struct intel_ringbuffer *ring = req->ring; 683 + struct intel_ring *ring = req->ring; 685 684 struct i915_workarounds *w = &req->i915->workarounds; 686 685 int ret, i; 687 686 ··· 689 688 return 0; 690 689 691 690 req->engine->gpu_caches_dirty = true; 692 - ret = intel_ring_flush_all_caches(req); 691 + ret = intel_engine_flush_all_caches(req); 693 692 if (ret) 694 693 return ret; 695 694 ··· 707 706 intel_ring_advance(ring); 708 707 709 708 req->engine->gpu_caches_dirty = true; 710 - ret = intel_ring_flush_all_caches(req); 709 + ret = intel_engine_flush_all_caches(req); 711 710 if (ret) 712 711 return ret; 713 712 ··· 1339 1338 unsigned int num_dwords) 1340 1339 { 1341 1340 #define MBOX_UPDATE_DWORDS 8 1342 - struct intel_ringbuffer *signaller = signaller_req->ring; 1341 + struct intel_ring *signaller = signaller_req->ring; 1343 1342 struct drm_i915_private *dev_priv = signaller_req->i915; 1344 1343 struct intel_engine_cs *waiter; 1345 1344 enum intel_engine_id id; ··· 1381 1380 unsigned int num_dwords) 1382 1381 { 1383 1382 #define MBOX_UPDATE_DWORDS 6 1384 - struct intel_ringbuffer *signaller = signaller_req->ring; 1383 + struct intel_ring *signaller = signaller_req->ring; 1385 1384 struct drm_i915_private *dev_priv = signaller_req->i915; 1386 1385 struct intel_engine_cs *waiter; 1387 1386 enum intel_engine_id id; ··· 1420 1419 static int gen6_signal(struct drm_i915_gem_request *signaller_req, 1421 1420 unsigned int num_dwords) 1422 1421 { 1423 - struct intel_ringbuffer *signaller = signaller_req->ring; 1422 + struct intel_ring *signaller = signaller_req->ring; 1424 1423 struct drm_i915_private *dev_priv = signaller_req->i915; 1425 1424 struct intel_engine_cs *useless; 1426 1425 enum intel_engine_id id; ··· 1465 1464 gen6_add_request(struct drm_i915_gem_request *req) 1466 1465 { 1467 1466 struct intel_engine_cs *engine = req->engine; 1468 - struct intel_ringbuffer *ring = req->ring; 1467 + struct intel_ring *ring = req->ring; 1469 1468 int ret; 1470 1469 1471 1470 if (engine->semaphore.signal) ··· 1489 1488 gen8_render_add_request(struct drm_i915_gem_request *req) 1490 1489 { 1491 1490 struct intel_engine_cs *engine = req->engine; 1492 - struct intel_ringbuffer *ring = req->ring; 1491 + struct intel_ring *ring = req->ring; 1493 1492 int ret; 1494 1493 1495 1494 if (engine->semaphore.signal) ··· 1534 1533 struct intel_engine_cs *signaller, 1535 1534 u32 seqno) 1536 1535 { 1537 - struct intel_ringbuffer *waiter = waiter_req->ring; 1536 + struct intel_ring *waiter = waiter_req->ring; 1538 1537 struct drm_i915_private *dev_priv = waiter_req->i915; 1539 1538 u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id); 1540 1539 struct i915_hw_ppgtt *ppgtt; ··· 1568 1567 struct intel_engine_cs *signaller, 1569 1568 u32 seqno) 1570 1569 { 1571 - struct intel_ringbuffer *waiter = waiter_req->ring; 1570 + struct intel_ring *waiter = waiter_req->ring; 1572 1571 u32 dw1 = MI_SEMAPHORE_MBOX | 1573 1572 MI_SEMAPHORE_COMPARE | 1574 1573 MI_SEMAPHORE_REGISTER; ··· 1702 1701 u32 invalidate_domains, 1703 1702 u32 flush_domains) 1704 1703 { 1705 - struct intel_ringbuffer *ring = req->ring; 1704 + struct intel_ring *ring = req->ring; 1706 1705 int ret; 1707 1706 1708 1707 ret = intel_ring_begin(req, 2); ··· 1718 1717 static int 1719 1718 i9xx_add_request(struct drm_i915_gem_request *req) 1720 1719 { 1721 - struct intel_ringbuffer *ring = req->ring; 1720 + struct intel_ring *ring = req->ring; 1722 1721 int ret; 1723 1722 1724 1723 ret = intel_ring_begin(req, 4); ··· 1796 1795 u64 offset, u32 length, 1797 1796 unsigned dispatch_flags) 1798 1797 { 1799 - struct intel_ringbuffer *ring = req->ring; 1798 + struct intel_ring *ring = req->ring; 1800 1799 int ret; 1801 1800 1802 1801 ret = intel_ring_begin(req, 2); ··· 1823 1822 u64 offset, u32 len, 1824 1823 unsigned dispatch_flags) 1825 1824 { 1826 - struct intel_ringbuffer *ring = req->ring; 1825 + struct intel_ring *ring = req->ring; 1827 1826 u32 cs_offset = req->engine->scratch.gtt_offset; 1828 1827 int ret; 1829 1828 ··· 1885 1884 u64 offset, u32 len, 1886 1885 unsigned dispatch_flags) 1887 1886 { 1888 - struct intel_ringbuffer *ring = req->ring; 1887 + struct intel_ring *ring = req->ring; 1889 1888 int ret; 1890 1889 1891 1890 ret = intel_ring_begin(req, 2); ··· 1993 1992 return 0; 1994 1993 } 1995 1994 1996 - void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1995 + void intel_unpin_ring(struct intel_ring *ringbuf) 1997 1996 { 1998 1997 GEM_BUG_ON(!ringbuf->vma); 1999 1998 GEM_BUG_ON(!ringbuf->vaddr); ··· 2008 2007 ringbuf->vma = NULL; 2009 2008 } 2010 2009 2011 - int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, 2012 - struct intel_ringbuffer *ringbuf) 2010 + int intel_pin_and_map_ring(struct drm_i915_private *dev_priv, 2011 + struct intel_ring *ringbuf) 2013 2012 { 2014 2013 struct drm_i915_gem_object *obj = ringbuf->obj; 2015 2014 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ ··· 2061 2060 return ret; 2062 2061 } 2063 2062 2064 - static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 2063 + static void intel_destroy_ringbuffer_obj(struct intel_ring *ringbuf) 2065 2064 { 2066 2065 i915_gem_object_put(ringbuf->obj); 2067 2066 ringbuf->obj = NULL; 2068 2067 } 2069 2068 2070 2069 static int intel_alloc_ringbuffer_obj(struct drm_device *dev, 2071 - struct intel_ringbuffer *ringbuf) 2070 + struct intel_ring *ringbuf) 2072 2071 { 2073 2072 struct drm_i915_gem_object *obj; 2074 2073 ··· 2088 2087 return 0; 2089 2088 } 2090 2089 2091 - struct intel_ringbuffer * 2092 - intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) 2090 + struct intel_ring * 2091 + intel_engine_create_ring(struct intel_engine_cs *engine, int size) 2093 2092 { 2094 - struct intel_ringbuffer *ring; 2093 + struct intel_ring *ring; 2095 2094 int ret; 2096 2095 2097 2096 ring = kzalloc(sizeof(*ring), GFP_KERNEL); ··· 2129 2128 } 2130 2129 2131 2130 void 2132 - intel_ringbuffer_free(struct intel_ringbuffer *ring) 2131 + intel_ring_free(struct intel_ring *ring) 2133 2132 { 2134 2133 intel_destroy_ringbuffer_obj(ring); 2135 2134 list_del(&ring->link); ··· 2190 2189 static int intel_init_ring_buffer(struct intel_engine_cs *engine) 2191 2190 { 2192 2191 struct drm_i915_private *dev_priv = engine->i915; 2193 - struct intel_ringbuffer *ringbuf; 2192 + struct intel_ring *ringbuf; 2194 2193 int ret; 2195 2194 2196 2195 WARN_ON(engine->buffer); ··· 2215 2214 if (ret) 2216 2215 goto error; 2217 2216 2218 - ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE); 2217 + ringbuf = intel_engine_create_ring(engine, 32 * PAGE_SIZE); 2219 2218 if (IS_ERR(ringbuf)) { 2220 2219 ret = PTR_ERR(ringbuf); 2221 2220 goto error; ··· 2233 2232 goto error; 2234 2233 } 2235 2234 2236 - ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf); 2235 + ret = intel_pin_and_map_ring(dev_priv, ringbuf); 2237 2236 if (ret) { 2238 2237 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", 2239 2238 engine->name, ret); ··· 2244 2243 return 0; 2245 2244 2246 2245 error: 2247 - intel_cleanup_engine(engine); 2246 + intel_engine_cleanup(engine); 2248 2247 return ret; 2249 2248 } 2250 2249 2251 - void intel_cleanup_engine(struct intel_engine_cs *engine) 2250 + void intel_engine_cleanup(struct intel_engine_cs *engine) 2252 2251 { 2253 2252 struct drm_i915_private *dev_priv; 2254 2253 ··· 2258 2257 dev_priv = engine->i915; 2259 2258 2260 2259 if (engine->buffer) { 2261 - intel_stop_engine(engine); 2260 + intel_engine_stop(engine); 2262 2261 WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); 2263 2262 2264 - intel_unpin_ringbuffer_obj(engine->buffer); 2265 - intel_ringbuffer_free(engine->buffer); 2263 + intel_unpin_ring(engine->buffer); 2264 + intel_ring_free(engine->buffer); 2266 2265 engine->buffer = NULL; 2267 2266 } 2268 2267 ··· 2325 2324 2326 2325 static int wait_for_space(struct drm_i915_gem_request *req, int bytes) 2327 2326 { 2328 - struct intel_ringbuffer *ring = req->ring; 2327 + struct intel_ring *ring = req->ring; 2329 2328 struct intel_engine_cs *engine = req->engine; 2330 2329 struct drm_i915_gem_request *target; 2331 2330 ··· 2370 2369 2371 2370 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) 2372 2371 { 2373 - struct intel_ringbuffer *ring = req->ring; 2372 + struct intel_ring *ring = req->ring; 2374 2373 int remain_actual = ring->size - ring->tail; 2375 2374 int remain_usable = ring->effective_size - ring->tail; 2376 2375 int bytes = num_dwords * sizeof(u32); ··· 2427 2426 /* Align the ring tail to a cacheline boundary */ 2428 2427 int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 2429 2428 { 2430 - struct intel_ringbuffer *ring = req->ring; 2429 + struct intel_ring *ring = req->ring; 2431 2430 int num_dwords = 2432 2431 (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 2433 2432 int ret; ··· 2448 2447 return 0; 2449 2448 } 2450 2449 2451 - void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) 2450 + void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno) 2452 2451 { 2453 2452 struct drm_i915_private *dev_priv = engine->i915; 2454 2453 ··· 2534 2533 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, 2535 2534 u32 invalidate, u32 flush) 2536 2535 { 2537 - struct intel_ringbuffer *ring = req->ring; 2536 + struct intel_ring *ring = req->ring; 2538 2537 uint32_t cmd; 2539 2538 int ret; 2540 2539 ··· 2580 2579 u64 offset, u32 len, 2581 2580 unsigned dispatch_flags) 2582 2581 { 2583 - struct intel_ringbuffer *ring = req->ring; 2582 + struct intel_ring *ring = req->ring; 2584 2583 bool ppgtt = USES_PPGTT(req->i915) && 2585 2584 !(dispatch_flags & I915_DISPATCH_SECURE); 2586 2585 int ret; ··· 2606 2605 u64 offset, u32 len, 2607 2606 unsigned dispatch_flags) 2608 2607 { 2609 - struct intel_ringbuffer *ring = req->ring; 2608 + struct intel_ring *ring = req->ring; 2610 2609 int ret; 2611 2610 2612 2611 ret = intel_ring_begin(req, 2); ··· 2631 2630 u64 offset, u32 len, 2632 2631 unsigned dispatch_flags) 2633 2632 { 2634 - struct intel_ringbuffer *ring = req->ring; 2633 + struct intel_ring *ring = req->ring; 2635 2634 int ret; 2636 2635 2637 2636 ret = intel_ring_begin(req, 2); ··· 2654 2653 static int gen6_ring_flush(struct drm_i915_gem_request *req, 2655 2654 u32 invalidate, u32 flush) 2656 2655 { 2657 - struct intel_ringbuffer *ring = req->ring; 2656 + struct intel_ring *ring = req->ring; 2658 2657 uint32_t cmd; 2659 2658 int ret; 2660 2659 ··· 2971 2970 } 2972 2971 2973 2972 int 2974 - intel_ring_flush_all_caches(struct drm_i915_gem_request *req) 2973 + intel_engine_flush_all_caches(struct drm_i915_gem_request *req) 2975 2974 { 2976 2975 struct intel_engine_cs *engine = req->engine; 2977 2976 int ret; ··· 2990 2989 } 2991 2990 2992 2991 int 2993 - intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req) 2992 + intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req) 2994 2993 { 2995 2994 struct intel_engine_cs *engine = req->engine; 2996 2995 uint32_t flush_domains; ··· 3010 3009 return 0; 3011 3010 } 3012 3011 3013 - void 3014 - intel_stop_engine(struct intel_engine_cs *engine) 3012 + void intel_engine_stop(struct intel_engine_cs *engine) 3015 3013 { 3016 3014 int ret; 3017 3015
+25 -26
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 62 62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 63 63 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) 64 64 65 - enum intel_ring_hangcheck_action { 65 + enum intel_engine_hangcheck_action { 66 66 HANGCHECK_IDLE = 0, 67 67 HANGCHECK_WAIT, 68 68 HANGCHECK_ACTIVE, ··· 72 72 73 73 #define HANGCHECK_SCORE_RING_HUNG 31 74 74 75 - struct intel_ring_hangcheck { 75 + struct intel_engine_hangcheck { 76 76 u64 acthd; 77 77 unsigned long user_interrupts; 78 78 u32 seqno; 79 79 int score; 80 - enum intel_ring_hangcheck_action action; 80 + enum intel_engine_hangcheck_action action; 81 81 int deadlock; 82 82 u32 instdone[I915_NUM_INSTDONE_REG]; 83 83 }; 84 84 85 - struct intel_ringbuffer { 85 + struct intel_ring { 86 86 struct drm_i915_gem_object *obj; 87 87 void *vaddr; 88 88 struct i915_vma *vma; ··· 149 149 u64 fence_context; 150 150 u32 mmio_base; 151 151 unsigned int irq_shift; 152 - struct intel_ringbuffer *buffer; 152 + struct intel_ring *buffer; 153 153 struct list_head buffers; 154 154 155 155 /* Rather than have every client wait upon all user interrupts, ··· 329 329 330 330 struct i915_gem_context *last_context; 331 331 332 - struct intel_ring_hangcheck hangcheck; 332 + struct intel_engine_hangcheck hangcheck; 333 333 334 334 struct { 335 335 struct drm_i915_gem_object *obj; ··· 376 376 } 377 377 378 378 static inline u32 379 - intel_ring_sync_index(struct intel_engine_cs *engine, 380 - struct intel_engine_cs *other) 379 + intel_engine_sync_index(struct intel_engine_cs *engine, 380 + struct intel_engine_cs *other) 381 381 { 382 382 int idx; 383 383 ··· 439 439 #define I915_GEM_HWS_SCRATCH_INDEX 0x40 440 440 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 441 441 442 - struct intel_ringbuffer * 443 - intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); 444 - int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, 445 - struct intel_ringbuffer *ringbuf); 446 - void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 447 - void intel_ringbuffer_free(struct intel_ringbuffer *ring); 442 + struct intel_ring * 443 + intel_engine_create_ring(struct intel_engine_cs *engine, int size); 444 + int intel_pin_and_map_ring(struct drm_i915_private *dev_priv, 445 + struct intel_ring *ring); 446 + void intel_unpin_ring(struct intel_ring *ring); 447 + void intel_ring_free(struct intel_ring *ring); 448 448 449 - void intel_stop_engine(struct intel_engine_cs *engine); 450 - void intel_cleanup_engine(struct intel_engine_cs *engine); 449 + void intel_engine_stop(struct intel_engine_cs *engine); 450 + void intel_engine_cleanup(struct intel_engine_cs *engine); 451 451 452 452 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); 453 453 454 454 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); 455 455 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); 456 456 457 - static inline void intel_ring_emit(struct intel_ringbuffer *ring, u32 data) 457 + static inline void intel_ring_emit(struct intel_ring *ring, u32 data) 458 458 { 459 459 *(uint32_t *)(ring->vaddr + ring->tail) = data; 460 460 ring->tail += 4; 461 461 } 462 462 463 - static inline void intel_ring_emit_reg(struct intel_ringbuffer *ring, 464 - i915_reg_t reg) 463 + static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg) 465 464 { 466 465 intel_ring_emit(ring, i915_mmio_reg_offset(reg)); 467 466 } 468 467 469 - static inline void intel_ring_advance(struct intel_ringbuffer *ring) 468 + static inline void intel_ring_advance(struct intel_ring *ring) 470 469 { 471 470 ring->tail &= ring->size - 1; 472 471 } 473 472 474 473 int __intel_ring_space(int head, int tail, int size); 475 - void intel_ring_update_space(struct intel_ringbuffer *ringbuf); 474 + void intel_ring_update_space(struct intel_ring *ringbuf); 476 475 477 476 int __must_check intel_engine_idle(struct intel_engine_cs *engine); 478 - void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno); 479 - int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); 480 - int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); 477 + void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno); 478 + int intel_engine_flush_all_caches(struct drm_i915_gem_request *req); 479 + int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req); 481 480 482 481 int intel_init_pipe_control(struct intel_engine_cs *engine, int size); 483 482 void intel_fini_pipe_control(struct intel_engine_cs *engine); ··· 490 491 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); 491 492 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); 492 493 493 - u64 intel_ring_get_active_head(struct intel_engine_cs *engine); 494 + u64 intel_engine_get_active_head(struct intel_engine_cs *engine); 494 495 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) 495 496 { 496 497 return intel_read_status_page(engine, I915_GEM_HWS_INDEX); ··· 498 499 499 500 int init_workarounds_ring(struct intel_engine_cs *engine); 500 501 501 - static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) 502 + static inline u32 intel_ring_get_tail(struct intel_ring *ringbuf) 502 503 { 503 504 return ringbuf->tail; 504 505 }