Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Embed the ring->private within the struct intel_ring_buffer

We now have more devices using ring->private than not, and they all want
the same structure. Worse, I would like to use a scratch page from
outside of intel_ringbuffer.c and so for convenience would like to reuse
ring->private. Embed the object into the struct intel_ringbuffer so that
we can keep the code clean.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>

authored by

Chris Wilson and committed by
Daniel Vetter
0d1aacac a52690e4

+35 -72
+1 -1
drivers/gpu/drm/i915/i915_gpu_error.c
··· 641 641 if (WARN_ON(ring->id != RCS)) 642 642 return NULL; 643 643 644 - obj = ring->private; 644 + obj = ring->scratch.obj; 645 645 if (acthd >= i915_gem_obj_ggtt_offset(obj) && 646 646 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) 647 647 return i915_error_object_create(dev_priv, obj);
+29 -70
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 33 33 #include "i915_trace.h" 34 34 #include "intel_drv.h" 35 35 36 - /* 37 - * 965+ support PIPE_CONTROL commands, which provide finer grained control 38 - * over cache flushing. 39 - */ 40 - struct pipe_control { 41 - struct drm_i915_gem_object *obj; 42 - volatile u32 *cpu_page; 43 - u32 gtt_offset; 44 - }; 45 - 46 36 static inline int ring_space(struct intel_ring_buffer *ring) 47 37 { 48 38 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); ··· 165 175 static int 166 176 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) 167 177 { 168 - struct pipe_control *pc = ring->private; 169 - u32 scratch_addr = pc->gtt_offset + 128; 178 + u32 scratch_addr = ring->scratch.gtt_offset + 128; 170 179 int ret; 171 180 172 181 ··· 202 213 u32 invalidate_domains, u32 flush_domains) 203 214 { 204 215 u32 flags = 0; 205 - struct pipe_control *pc = ring->private; 206 - u32 scratch_addr = pc->gtt_offset + 128; 216 + u32 scratch_addr = ring->scratch.gtt_offset + 128; 207 217 int ret; 208 218 209 219 /* Force SNB workarounds for PIPE_CONTROL flushes */ ··· 294 306 u32 invalidate_domains, u32 flush_domains) 295 307 { 296 308 u32 flags = 0; 297 - struct pipe_control *pc = ring->private; 298 - u32 scratch_addr = pc->gtt_offset + 128; 309 + u32 scratch_addr = ring->scratch.gtt_offset + 128; 299 310 int ret; 300 311 301 312 /* ··· 468 481 static int 469 482 init_pipe_control(struct intel_ring_buffer *ring) 470 483 { 471 - struct pipe_control *pc; 472 - struct drm_i915_gem_object *obj; 473 484 int ret; 474 485 475 - if (ring->private) 486 + if (ring->scratch.obj) 476 487 return 0; 477 488 478 - pc = kmalloc(sizeof(*pc), GFP_KERNEL); 479 - if (!pc) 480 - return -ENOMEM; 481 - 482 - obj = i915_gem_alloc_object(ring->dev, 4096); 483 - if (obj == NULL) { 489 + ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); 490 + if (ring->scratch.obj == NULL) { 484 491 DRM_ERROR("Failed to allocate seqno page\n"); 485 492 ret = -ENOMEM; 486 493 goto err; 487 494 } 488 495 489 - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 496 + i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 490 497 491 - ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); 498 + ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false); 492 499 if (ret) 493 500 goto err_unref; 494 501 495 - pc->gtt_offset = i915_gem_obj_ggtt_offset(obj); 496 - pc->cpu_page = kmap(sg_page(obj->pages->sgl)); 497 - if (pc->cpu_page == NULL) { 502 + ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); 503 + ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl)); 504 + if (ring->scratch.cpu_page == NULL) { 498 505 ret = -ENOMEM; 499 506 goto err_unpin; 500 507 } 501 508 502 509 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 503 - ring->name, pc->gtt_offset); 504 - 505 - pc->obj = obj; 506 - ring->private = pc; 510 + ring->name, ring->scratch.gtt_offset); 507 511 return 0; 508 512 509 513 err_unpin: 510 - i915_gem_object_unpin(obj); 514 + i915_gem_object_unpin(ring->scratch.obj); 511 515 err_unref: 512 - drm_gem_object_unreference(&obj->base); 516 + drm_gem_object_unreference(&ring->scratch.obj->base); 513 517 err: 514 - kfree(pc); 515 518 return ret; 516 - } 517 - 518 - static void 519 - cleanup_pipe_control(struct intel_ring_buffer *ring) 520 - { 521 - struct pipe_control *pc = ring->private; 522 - struct drm_i915_gem_object *obj; 523 - 524 - obj = pc->obj; 525 - 526 - kunmap(sg_page(obj->pages->sgl)); 527 - i915_gem_object_unpin(obj); 528 - drm_gem_object_unreference(&obj->base); 529 - 530 - kfree(pc); 531 519 } 532 520 533 521 static int init_render_ring(struct intel_ring_buffer *ring) ··· 569 607 { 570 608 struct drm_device *dev = ring->dev; 571 609 572 - if (!ring->private) 610 + if (ring->scratch.obj == NULL) 573 611 return; 574 612 575 - if (HAS_BROKEN_CS_TLB(dev)) 576 - drm_gem_object_unreference(to_gem_object(ring->private)); 613 + if (INTEL_INFO(dev)->gen >= 5) { 614 + kunmap(sg_page(ring->scratch.obj->pages->sgl)); 615 + i915_gem_object_unpin(ring->scratch.obj); 616 + } 577 617 578 - if (INTEL_INFO(dev)->gen >= 5) 579 - cleanup_pipe_control(ring); 580 - 581 - ring->private = NULL; 618 + drm_gem_object_unreference(&ring->scratch.obj->base); 619 + ring->scratch.obj = NULL; 582 620 } 583 621 584 622 static void ··· 704 742 static int 705 743 pc_render_add_request(struct intel_ring_buffer *ring) 706 744 { 707 - struct pipe_control *pc = ring->private; 708 - u32 scratch_addr = pc->gtt_offset + 128; 745 + u32 scratch_addr = ring->scratch.gtt_offset + 128; 709 746 int ret; 710 747 711 748 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently ··· 722 761 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 723 762 PIPE_CONTROL_WRITE_FLUSH | 724 763 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 725 - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 764 + intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 726 765 intel_ring_emit(ring, ring->outstanding_lazy_request); 727 766 intel_ring_emit(ring, 0); 728 767 PIPE_CONTROL_FLUSH(ring, scratch_addr); ··· 741 780 PIPE_CONTROL_WRITE_FLUSH | 742 781 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 743 782 PIPE_CONTROL_NOTIFY); 744 - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 783 + intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 745 784 intel_ring_emit(ring, ring->outstanding_lazy_request); 746 785 intel_ring_emit(ring, 0); 747 786 intel_ring_advance(ring); ··· 775 814 static u32 776 815 pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 777 816 { 778 - struct pipe_control *pc = ring->private; 779 - return pc->cpu_page[0]; 817 + return ring->scratch.cpu_page[0]; 780 818 } 781 819 782 820 static void 783 821 pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) 784 822 { 785 - struct pipe_control *pc = ring->private; 786 - pc->cpu_page[0] = seqno; 823 + ring->scratch.cpu_page[0] = seqno; 787 824 } 788 825 789 826 static bool ··· 1100 1141 intel_ring_emit(ring, MI_NOOP); 1101 1142 intel_ring_advance(ring); 1102 1143 } else { 1103 - struct drm_i915_gem_object *obj = ring->private; 1104 - u32 cs_offset = i915_gem_obj_ggtt_offset(obj); 1144 + u32 cs_offset = ring->scratch.gtt_offset; 1105 1145 1106 1146 if (len > I830_BATCH_LIMIT) 1107 1147 return -ENOSPC; ··· 1793 1835 return ret; 1794 1836 } 1795 1837 1796 - ring->private = obj; 1838 + ring->scratch.obj = obj; 1839 + ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 1797 1840 } 1798 1841 1799 1842 return intel_init_ring_buffer(dev, ring);
+5 -1
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 155 155 156 156 struct intel_ring_hangcheck hangcheck; 157 157 158 - void *private; 158 + struct { 159 + struct drm_i915_gem_object *obj; 160 + u32 gtt_offset; 161 + volatile u32 *cpu_page; 162 + } scratch; 159 163 }; 160 164 161 165 static inline bool