Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.16 2405 lines 66 kB view raw
1/* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30#include <drm/drmP.h> 31#include "i915_drv.h" 32#include <drm/i915_drm.h> 33#include "i915_trace.h" 34#include "intel_drv.h" 35 36/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 37 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 38 * to give some inclination as to some of the magic values used in the various 39 * workarounds! 40 */ 41#define CACHELINE_BYTES 64 42 43static inline int __ring_space(int head, int tail, int size) 44{ 45 int space = head - (tail + I915_RING_FREE_SPACE); 46 if (space < 0) 47 space += size; 48 return space; 49} 50 51static inline int ring_space(struct intel_engine_cs *ring) 52{ 53 struct intel_ringbuffer *ringbuf = ring->buffer; 54 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); 55} 56 57static bool intel_ring_stopped(struct intel_engine_cs *ring) 58{ 59 struct drm_i915_private *dev_priv = ring->dev->dev_private; 60 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); 61} 62 63void __intel_ring_advance(struct intel_engine_cs *ring) 64{ 65 struct intel_ringbuffer *ringbuf = ring->buffer; 66 ringbuf->tail &= ringbuf->size - 1; 67 if (intel_ring_stopped(ring)) 68 return; 69 ring->write_tail(ring, ringbuf->tail); 70} 71 72static int 73gen2_render_ring_flush(struct intel_engine_cs *ring, 74 u32 invalidate_domains, 75 u32 flush_domains) 76{ 77 u32 cmd; 78 int ret; 79 80 cmd = MI_FLUSH; 81 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) 82 cmd |= MI_NO_WRITE_FLUSH; 83 84 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 85 cmd |= MI_READ_FLUSH; 86 87 ret = intel_ring_begin(ring, 2); 88 if (ret) 89 return ret; 90 91 intel_ring_emit(ring, cmd); 92 intel_ring_emit(ring, MI_NOOP); 93 intel_ring_advance(ring); 94 95 return 0; 96} 97 98static int 99gen4_render_ring_flush(struct intel_engine_cs *ring, 100 u32 invalidate_domains, 101 u32 flush_domains) 102{ 103 struct drm_device *dev = ring->dev; 104 u32 cmd; 105 int ret; 106 107 /* 108 * read/write caches: 109 * 110 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 111 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 112 * also flushed at 2d versus 3d pipeline switches. 113 * 114 * read-only caches: 115 * 116 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 117 * MI_READ_FLUSH is set, and is always flushed on 965. 118 * 119 * I915_GEM_DOMAIN_COMMAND may not exist? 120 * 121 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 122 * invalidated when MI_EXE_FLUSH is set. 123 * 124 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 125 * invalidated with every MI_FLUSH. 126 * 127 * TLBs: 128 * 129 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 130 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 131 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 132 * are flushed at any MI_FLUSH. 133 */ 134 135 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 136 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 137 cmd &= ~MI_NO_WRITE_FLUSH; 138 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 139 cmd |= MI_EXE_FLUSH; 140 141 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 142 (IS_G4X(dev) || IS_GEN5(dev))) 143 cmd |= MI_INVALIDATE_ISP; 144 145 ret = intel_ring_begin(ring, 2); 146 if (ret) 147 return ret; 148 149 intel_ring_emit(ring, cmd); 150 intel_ring_emit(ring, MI_NOOP); 151 intel_ring_advance(ring); 152 153 return 0; 154} 155 156/** 157 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 158 * implementing two workarounds on gen6. From section 1.4.7.1 159 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 160 * 161 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 162 * produced by non-pipelined state commands), software needs to first 163 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 164 * 0. 165 * 166 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 167 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 168 * 169 * And the workaround for these two requires this workaround first: 170 * 171 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 172 * BEFORE the pipe-control with a post-sync op and no write-cache 173 * flushes. 174 * 175 * And this last workaround is tricky because of the requirements on 176 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 177 * volume 2 part 1: 178 * 179 * "1 of the following must also be set: 180 * - Render Target Cache Flush Enable ([12] of DW1) 181 * - Depth Cache Flush Enable ([0] of DW1) 182 * - Stall at Pixel Scoreboard ([1] of DW1) 183 * - Depth Stall ([13] of DW1) 184 * - Post-Sync Operation ([13] of DW1) 185 * - Notify Enable ([8] of DW1)" 186 * 187 * The cache flushes require the workaround flush that triggered this 188 * one, so we can't use it. Depth stall would trigger the same. 189 * Post-sync nonzero is what triggered this second workaround, so we 190 * can't use that one either. Notify enable is IRQs, which aren't 191 * really our business. That leaves only stall at scoreboard. 192 */ 193static int 194intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring) 195{ 196 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 197 int ret; 198 199 200 ret = intel_ring_begin(ring, 6); 201 if (ret) 202 return ret; 203 204 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 205 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 206 PIPE_CONTROL_STALL_AT_SCOREBOARD); 207 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 208 intel_ring_emit(ring, 0); /* low dword */ 209 intel_ring_emit(ring, 0); /* high dword */ 210 intel_ring_emit(ring, MI_NOOP); 211 intel_ring_advance(ring); 212 213 ret = intel_ring_begin(ring, 6); 214 if (ret) 215 return ret; 216 217 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 218 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); 219 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 220 intel_ring_emit(ring, 0); 221 intel_ring_emit(ring, 0); 222 intel_ring_emit(ring, MI_NOOP); 223 intel_ring_advance(ring); 224 225 return 0; 226} 227 228static int 229gen6_render_ring_flush(struct intel_engine_cs *ring, 230 u32 invalidate_domains, u32 flush_domains) 231{ 232 u32 flags = 0; 233 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 234 int ret; 235 236 /* Force SNB workarounds for PIPE_CONTROL flushes */ 237 ret = intel_emit_post_sync_nonzero_flush(ring); 238 if (ret) 239 return ret; 240 241 /* Just flush everything. Experiments have shown that reducing the 242 * number of bits based on the write domains has little performance 243 * impact. 244 */ 245 if (flush_domains) { 246 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 247 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 248 /* 249 * Ensure that any following seqno writes only happen 250 * when the render cache is indeed flushed. 251 */ 252 flags |= PIPE_CONTROL_CS_STALL; 253 } 254 if (invalidate_domains) { 255 flags |= PIPE_CONTROL_TLB_INVALIDATE; 256 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 257 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 258 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 259 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 260 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 261 /* 262 * TLB invalidate requires a post-sync write. 263 */ 264 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 265 } 266 267 ret = intel_ring_begin(ring, 4); 268 if (ret) 269 return ret; 270 271 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 272 intel_ring_emit(ring, flags); 273 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 274 intel_ring_emit(ring, 0); 275 intel_ring_advance(ring); 276 277 return 0; 278} 279 280static int 281gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring) 282{ 283 int ret; 284 285 ret = intel_ring_begin(ring, 4); 286 if (ret) 287 return ret; 288 289 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 290 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 291 PIPE_CONTROL_STALL_AT_SCOREBOARD); 292 intel_ring_emit(ring, 0); 293 intel_ring_emit(ring, 0); 294 intel_ring_advance(ring); 295 296 return 0; 297} 298 299static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value) 300{ 301 int ret; 302 303 if (!ring->fbc_dirty) 304 return 0; 305 306 ret = intel_ring_begin(ring, 6); 307 if (ret) 308 return ret; 309 /* WaFbcNukeOn3DBlt:ivb/hsw */ 310 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 311 intel_ring_emit(ring, MSG_FBC_REND_STATE); 312 intel_ring_emit(ring, value); 313 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT); 314 intel_ring_emit(ring, MSG_FBC_REND_STATE); 315 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 316 intel_ring_advance(ring); 317 318 ring->fbc_dirty = false; 319 return 0; 320} 321 322static int 323gen7_render_ring_flush(struct intel_engine_cs *ring, 324 u32 invalidate_domains, u32 flush_domains) 325{ 326 u32 flags = 0; 327 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 328 int ret; 329 330 /* 331 * Ensure that any following seqno writes only happen when the render 332 * cache is indeed flushed. 333 * 334 * Workaround: 4th PIPE_CONTROL command (except the ones with only 335 * read-cache invalidate bits set) must have the CS_STALL bit set. We 336 * don't try to be clever and just set it unconditionally. 337 */ 338 flags |= PIPE_CONTROL_CS_STALL; 339 340 /* Just flush everything. Experiments have shown that reducing the 341 * number of bits based on the write domains has little performance 342 * impact. 343 */ 344 if (flush_domains) { 345 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 346 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 347 } 348 if (invalidate_domains) { 349 flags |= PIPE_CONTROL_TLB_INVALIDATE; 350 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 351 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 352 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 353 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 354 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 355 /* 356 * TLB invalidate requires a post-sync write. 357 */ 358 flags |= PIPE_CONTROL_QW_WRITE; 359 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 360 361 /* Workaround: we must issue a pipe_control with CS-stall bit 362 * set before a pipe_control command that has the state cache 363 * invalidate bit set. */ 364 gen7_render_ring_cs_stall_wa(ring); 365 } 366 367 ret = intel_ring_begin(ring, 4); 368 if (ret) 369 return ret; 370 371 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 372 intel_ring_emit(ring, flags); 373 intel_ring_emit(ring, scratch_addr); 374 intel_ring_emit(ring, 0); 375 intel_ring_advance(ring); 376 377 if (!invalidate_domains && flush_domains) 378 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); 379 380 return 0; 381} 382 383static int 384gen8_render_ring_flush(struct intel_engine_cs *ring, 385 u32 invalidate_domains, u32 flush_domains) 386{ 387 u32 flags = 0; 388 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 389 int ret; 390 391 flags |= PIPE_CONTROL_CS_STALL; 392 393 if (flush_domains) { 394 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 395 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 396 } 397 if (invalidate_domains) { 398 flags |= PIPE_CONTROL_TLB_INVALIDATE; 399 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 400 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 401 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 402 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 403 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 404 flags |= PIPE_CONTROL_QW_WRITE; 405 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 406 } 407 408 ret = intel_ring_begin(ring, 6); 409 if (ret) 410 return ret; 411 412 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 413 intel_ring_emit(ring, flags); 414 intel_ring_emit(ring, scratch_addr); 415 intel_ring_emit(ring, 0); 416 intel_ring_emit(ring, 0); 417 intel_ring_emit(ring, 0); 418 intel_ring_advance(ring); 419 420 return 0; 421 422} 423 424static void ring_write_tail(struct intel_engine_cs *ring, 425 u32 value) 426{ 427 struct drm_i915_private *dev_priv = ring->dev->dev_private; 428 I915_WRITE_TAIL(ring, value); 429} 430 431u64 intel_ring_get_active_head(struct intel_engine_cs *ring) 432{ 433 struct drm_i915_private *dev_priv = ring->dev->dev_private; 434 u64 acthd; 435 436 if (INTEL_INFO(ring->dev)->gen >= 8) 437 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), 438 RING_ACTHD_UDW(ring->mmio_base)); 439 else if (INTEL_INFO(ring->dev)->gen >= 4) 440 acthd = I915_READ(RING_ACTHD(ring->mmio_base)); 441 else 442 acthd = I915_READ(ACTHD); 443 444 return acthd; 445} 446 447static void ring_setup_phys_status_page(struct intel_engine_cs *ring) 448{ 449 struct drm_i915_private *dev_priv = ring->dev->dev_private; 450 u32 addr; 451 452 addr = dev_priv->status_page_dmah->busaddr; 453 if (INTEL_INFO(ring->dev)->gen >= 4) 454 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 455 I915_WRITE(HWS_PGA, addr); 456} 457 458static bool stop_ring(struct intel_engine_cs *ring) 459{ 460 struct drm_i915_private *dev_priv = to_i915(ring->dev); 461 462 if (!IS_GEN2(ring->dev)) { 463 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 464 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 465 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); 466 return false; 467 } 468 } 469 470 I915_WRITE_CTL(ring, 0); 471 I915_WRITE_HEAD(ring, 0); 472 ring->write_tail(ring, 0); 473 474 if (!IS_GEN2(ring->dev)) { 475 (void)I915_READ_CTL(ring); 476 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); 477 } 478 479 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; 480} 481 482static int init_ring_common(struct intel_engine_cs *ring) 483{ 484 struct drm_device *dev = ring->dev; 485 struct drm_i915_private *dev_priv = dev->dev_private; 486 struct intel_ringbuffer *ringbuf = ring->buffer; 487 struct drm_i915_gem_object *obj = ringbuf->obj; 488 int ret = 0; 489 490 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 491 492 if (!stop_ring(ring)) { 493 /* G45 ring initialization often fails to reset head to zero */ 494 DRM_DEBUG_KMS("%s head not reset to zero " 495 "ctl %08x head %08x tail %08x start %08x\n", 496 ring->name, 497 I915_READ_CTL(ring), 498 I915_READ_HEAD(ring), 499 I915_READ_TAIL(ring), 500 I915_READ_START(ring)); 501 502 if (!stop_ring(ring)) { 503 DRM_ERROR("failed to set %s head to zero " 504 "ctl %08x head %08x tail %08x start %08x\n", 505 ring->name, 506 I915_READ_CTL(ring), 507 I915_READ_HEAD(ring), 508 I915_READ_TAIL(ring), 509 I915_READ_START(ring)); 510 ret = -EIO; 511 goto out; 512 } 513 } 514 515 if (I915_NEED_GFX_HWS(dev)) 516 intel_ring_setup_status_page(ring); 517 else 518 ring_setup_phys_status_page(ring); 519 520 /* Initialize the ring. This must happen _after_ we've cleared the ring 521 * registers with the above sequence (the readback of the HEAD registers 522 * also enforces ordering), otherwise the hw might lose the new ring 523 * register values. */ 524 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); 525 I915_WRITE_CTL(ring, 526 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 527 | RING_VALID); 528 529 /* If the head is still not zero, the ring is dead */ 530 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 531 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && 532 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 533 DRM_ERROR("%s initialization failed " 534 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", 535 ring->name, 536 I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID, 537 I915_READ_HEAD(ring), I915_READ_TAIL(ring), 538 I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj)); 539 ret = -EIO; 540 goto out; 541 } 542 543 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 544 i915_kernel_lost_context(ring->dev); 545 else { 546 ringbuf->head = I915_READ_HEAD(ring); 547 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 548 ringbuf->space = ring_space(ring); 549 ringbuf->last_retired_head = -1; 550 } 551 552 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 553 554out: 555 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 556 557 return ret; 558} 559 560static int 561init_pipe_control(struct intel_engine_cs *ring) 562{ 563 int ret; 564 565 if (ring->scratch.obj) 566 return 0; 567 568 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); 569 if (ring->scratch.obj == NULL) { 570 DRM_ERROR("Failed to allocate seqno page\n"); 571 ret = -ENOMEM; 572 goto err; 573 } 574 575 ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 576 if (ret) 577 goto err_unref; 578 579 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); 580 if (ret) 581 goto err_unref; 582 583 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); 584 ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl)); 585 if (ring->scratch.cpu_page == NULL) { 586 ret = -ENOMEM; 587 goto err_unpin; 588 } 589 590 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 591 ring->name, ring->scratch.gtt_offset); 592 return 0; 593 594err_unpin: 595 i915_gem_object_ggtt_unpin(ring->scratch.obj); 596err_unref: 597 drm_gem_object_unreference(&ring->scratch.obj->base); 598err: 599 return ret; 600} 601 602static int init_render_ring(struct intel_engine_cs *ring) 603{ 604 struct drm_device *dev = ring->dev; 605 struct drm_i915_private *dev_priv = dev->dev_private; 606 int ret = init_ring_common(ring); 607 608 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 609 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 610 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 611 612 /* We need to disable the AsyncFlip performance optimisations in order 613 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 614 * programmed to '1' on all products. 615 * 616 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv 617 */ 618 if (INTEL_INFO(dev)->gen >= 6) 619 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 620 621 /* Required for the hardware to program scanline values for waiting */ 622 /* WaEnableFlushTlbInvalidationMode:snb */ 623 if (INTEL_INFO(dev)->gen == 6) 624 I915_WRITE(GFX_MODE, 625 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 626 627 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 628 if (IS_GEN7(dev)) 629 I915_WRITE(GFX_MODE_GEN7, 630 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 631 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 632 633 if (INTEL_INFO(dev)->gen >= 5) { 634 ret = init_pipe_control(ring); 635 if (ret) 636 return ret; 637 } 638 639 if (IS_GEN6(dev)) { 640 /* From the Sandybridge PRM, volume 1 part 3, page 24: 641 * "If this bit is set, STCunit will have LRA as replacement 642 * policy. [...] This bit must be reset. LRA replacement 643 * policy is not supported." 644 */ 645 I915_WRITE(CACHE_MODE_0, 646 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 647 } 648 649 if (INTEL_INFO(dev)->gen >= 6) 650 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 651 652 if (HAS_L3_DPF(dev)) 653 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 654 655 return ret; 656} 657 658static void render_ring_cleanup(struct intel_engine_cs *ring) 659{ 660 struct drm_device *dev = ring->dev; 661 662 if (ring->scratch.obj == NULL) 663 return; 664 665 if (INTEL_INFO(dev)->gen >= 5) { 666 kunmap(sg_page(ring->scratch.obj->pages->sgl)); 667 i915_gem_object_ggtt_unpin(ring->scratch.obj); 668 } 669 670 drm_gem_object_unreference(&ring->scratch.obj->base); 671 ring->scratch.obj = NULL; 672} 673 674static int gen6_signal(struct intel_engine_cs *signaller, 675 unsigned int num_dwords) 676{ 677 struct drm_device *dev = signaller->dev; 678 struct drm_i915_private *dev_priv = dev->dev_private; 679 struct intel_engine_cs *useless; 680 int i, ret; 681 682 /* NB: In order to be able to do semaphore MBOX updates for varying 683 * number of rings, it's easiest if we round up each individual update 684 * to a multiple of 2 (since ring updates must always be a multiple of 685 * 2) even though the actual update only requires 3 dwords. 686 */ 687#define MBOX_UPDATE_DWORDS 4 688 if (i915_semaphore_is_enabled(dev)) 689 num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS); 690 else 691 return intel_ring_begin(signaller, num_dwords); 692 693 ret = intel_ring_begin(signaller, num_dwords); 694 if (ret) 695 return ret; 696#undef MBOX_UPDATE_DWORDS 697 698 for_each_ring(useless, dev_priv, i) { 699 u32 mbox_reg = signaller->semaphore.mbox.signal[i]; 700 if (mbox_reg != GEN6_NOSYNC) { 701 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 702 intel_ring_emit(signaller, mbox_reg); 703 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 704 intel_ring_emit(signaller, MI_NOOP); 705 } else { 706 intel_ring_emit(signaller, MI_NOOP); 707 intel_ring_emit(signaller, MI_NOOP); 708 intel_ring_emit(signaller, MI_NOOP); 709 intel_ring_emit(signaller, MI_NOOP); 710 } 711 } 712 713 return 0; 714} 715 716/** 717 * gen6_add_request - Update the semaphore mailbox registers 718 * 719 * @ring - ring that is adding a request 720 * @seqno - return seqno stuck into the ring 721 * 722 * Update the mailbox registers in the *other* rings with the current seqno. 723 * This acts like a signal in the canonical semaphore. 724 */ 725static int 726gen6_add_request(struct intel_engine_cs *ring) 727{ 728 int ret; 729 730 ret = ring->semaphore.signal(ring, 4); 731 if (ret) 732 return ret; 733 734 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 735 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 736 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 737 intel_ring_emit(ring, MI_USER_INTERRUPT); 738 __intel_ring_advance(ring); 739 740 return 0; 741} 742 743static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 744 u32 seqno) 745{ 746 struct drm_i915_private *dev_priv = dev->dev_private; 747 return dev_priv->last_seqno < seqno; 748} 749 750/** 751 * intel_ring_sync - sync the waiter to the signaller on seqno 752 * 753 * @waiter - ring that is waiting 754 * @signaller - ring which has, or will signal 755 * @seqno - seqno which the waiter will block on 756 */ 757static int 758gen6_ring_sync(struct intel_engine_cs *waiter, 759 struct intel_engine_cs *signaller, 760 u32 seqno) 761{ 762 u32 dw1 = MI_SEMAPHORE_MBOX | 763 MI_SEMAPHORE_COMPARE | 764 MI_SEMAPHORE_REGISTER; 765 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; 766 int ret; 767 768 /* Throughout all of the GEM code, seqno passed implies our current 769 * seqno is >= the last seqno executed. However for hardware the 770 * comparison is strictly greater than. 771 */ 772 seqno -= 1; 773 774 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 775 776 ret = intel_ring_begin(waiter, 4); 777 if (ret) 778 return ret; 779 780 /* If seqno wrap happened, omit the wait with no-ops */ 781 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 782 intel_ring_emit(waiter, dw1 | wait_mbox); 783 intel_ring_emit(waiter, seqno); 784 intel_ring_emit(waiter, 0); 785 intel_ring_emit(waiter, MI_NOOP); 786 } else { 787 intel_ring_emit(waiter, MI_NOOP); 788 intel_ring_emit(waiter, MI_NOOP); 789 intel_ring_emit(waiter, MI_NOOP); 790 intel_ring_emit(waiter, MI_NOOP); 791 } 792 intel_ring_advance(waiter); 793 794 return 0; 795} 796 797#define PIPE_CONTROL_FLUSH(ring__, addr__) \ 798do { \ 799 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 800 PIPE_CONTROL_DEPTH_STALL); \ 801 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ 802 intel_ring_emit(ring__, 0); \ 803 intel_ring_emit(ring__, 0); \ 804} while (0) 805 806static int 807pc_render_add_request(struct intel_engine_cs *ring) 808{ 809 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 810 int ret; 811 812 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 813 * incoherent with writes to memory, i.e. completely fubar, 814 * so we need to use PIPE_NOTIFY instead. 815 * 816 * However, we also need to workaround the qword write 817 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 818 * memory before requesting an interrupt. 819 */ 820 ret = intel_ring_begin(ring, 32); 821 if (ret) 822 return ret; 823 824 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 825 PIPE_CONTROL_WRITE_FLUSH | 826 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 827 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 828 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 829 intel_ring_emit(ring, 0); 830 PIPE_CONTROL_FLUSH(ring, scratch_addr); 831 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ 832 PIPE_CONTROL_FLUSH(ring, scratch_addr); 833 scratch_addr += 2 * CACHELINE_BYTES; 834 PIPE_CONTROL_FLUSH(ring, scratch_addr); 835 scratch_addr += 2 * CACHELINE_BYTES; 836 PIPE_CONTROL_FLUSH(ring, scratch_addr); 837 scratch_addr += 2 * CACHELINE_BYTES; 838 PIPE_CONTROL_FLUSH(ring, scratch_addr); 839 scratch_addr += 2 * CACHELINE_BYTES; 840 PIPE_CONTROL_FLUSH(ring, scratch_addr); 841 842 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 843 PIPE_CONTROL_WRITE_FLUSH | 844 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 845 PIPE_CONTROL_NOTIFY); 846 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 847 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 848 intel_ring_emit(ring, 0); 849 __intel_ring_advance(ring); 850 851 return 0; 852} 853 854static u32 855gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 856{ 857 /* Workaround to force correct ordering between irq and seqno writes on 858 * ivb (and maybe also on snb) by reading from a CS register (like 859 * ACTHD) before reading the status page. */ 860 if (!lazy_coherency) { 861 struct drm_i915_private *dev_priv = ring->dev->dev_private; 862 POSTING_READ(RING_ACTHD(ring->mmio_base)); 863 } 864 865 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 866} 867 868static u32 869ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 870{ 871 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 872} 873 874static void 875ring_set_seqno(struct intel_engine_cs *ring, u32 seqno) 876{ 877 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 878} 879 880static u32 881pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 882{ 883 return ring->scratch.cpu_page[0]; 884} 885 886static void 887pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno) 888{ 889 ring->scratch.cpu_page[0] = seqno; 890} 891 892static bool 893gen5_ring_get_irq(struct intel_engine_cs *ring) 894{ 895 struct drm_device *dev = ring->dev; 896 struct drm_i915_private *dev_priv = dev->dev_private; 897 unsigned long flags; 898 899 if (!dev->irq_enabled) 900 return false; 901 902 spin_lock_irqsave(&dev_priv->irq_lock, flags); 903 if (ring->irq_refcount++ == 0) 904 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 905 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 906 907 return true; 908} 909 910static void 911gen5_ring_put_irq(struct intel_engine_cs *ring) 912{ 913 struct drm_device *dev = ring->dev; 914 struct drm_i915_private *dev_priv = dev->dev_private; 915 unsigned long flags; 916 917 spin_lock_irqsave(&dev_priv->irq_lock, flags); 918 if (--ring->irq_refcount == 0) 919 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 920 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 921} 922 923static bool 924i9xx_ring_get_irq(struct intel_engine_cs *ring) 925{ 926 struct drm_device *dev = ring->dev; 927 struct drm_i915_private *dev_priv = dev->dev_private; 928 unsigned long flags; 929 930 if (!dev->irq_enabled) 931 return false; 932 933 spin_lock_irqsave(&dev_priv->irq_lock, flags); 934 if (ring->irq_refcount++ == 0) { 935 dev_priv->irq_mask &= ~ring->irq_enable_mask; 936 I915_WRITE(IMR, dev_priv->irq_mask); 937 POSTING_READ(IMR); 938 } 939 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 940 941 return true; 942} 943 944static void 945i9xx_ring_put_irq(struct intel_engine_cs *ring) 946{ 947 struct drm_device *dev = ring->dev; 948 struct drm_i915_private *dev_priv = dev->dev_private; 949 unsigned long flags; 950 951 spin_lock_irqsave(&dev_priv->irq_lock, flags); 952 if (--ring->irq_refcount == 0) { 953 dev_priv->irq_mask |= ring->irq_enable_mask; 954 I915_WRITE(IMR, dev_priv->irq_mask); 955 POSTING_READ(IMR); 956 } 957 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 958} 959 960static bool 961i8xx_ring_get_irq(struct intel_engine_cs *ring) 962{ 963 struct drm_device *dev = ring->dev; 964 struct drm_i915_private *dev_priv = dev->dev_private; 965 unsigned long flags; 966 967 if (!dev->irq_enabled) 968 return false; 969 970 spin_lock_irqsave(&dev_priv->irq_lock, flags); 971 if (ring->irq_refcount++ == 0) { 972 dev_priv->irq_mask &= ~ring->irq_enable_mask; 973 I915_WRITE16(IMR, dev_priv->irq_mask); 974 POSTING_READ16(IMR); 975 } 976 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 977 978 return true; 979} 980 981static void 982i8xx_ring_put_irq(struct intel_engine_cs *ring) 983{ 984 struct drm_device *dev = ring->dev; 985 struct drm_i915_private *dev_priv = dev->dev_private; 986 unsigned long flags; 987 988 spin_lock_irqsave(&dev_priv->irq_lock, flags); 989 if (--ring->irq_refcount == 0) { 990 dev_priv->irq_mask |= ring->irq_enable_mask; 991 I915_WRITE16(IMR, dev_priv->irq_mask); 992 POSTING_READ16(IMR); 993 } 994 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 995} 996 997void intel_ring_setup_status_page(struct intel_engine_cs *ring) 998{ 999 struct drm_device *dev = ring->dev; 1000 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1001 u32 mmio = 0; 1002 1003 /* The ring status page addresses are no longer next to the rest of 1004 * the ring registers as of gen7. 1005 */ 1006 if (IS_GEN7(dev)) { 1007 switch (ring->id) { 1008 case RCS: 1009 mmio = RENDER_HWS_PGA_GEN7; 1010 break; 1011 case BCS: 1012 mmio = BLT_HWS_PGA_GEN7; 1013 break; 1014 /* 1015 * VCS2 actually doesn't exist on Gen7. Only shut up 1016 * gcc switch check warning 1017 */ 1018 case VCS2: 1019 case VCS: 1020 mmio = BSD_HWS_PGA_GEN7; 1021 break; 1022 case VECS: 1023 mmio = VEBOX_HWS_PGA_GEN7; 1024 break; 1025 } 1026 } else if (IS_GEN6(ring->dev)) { 1027 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 1028 } else { 1029 /* XXX: gen8 returns to sanity */ 1030 mmio = RING_HWS_PGA(ring->mmio_base); 1031 } 1032 1033 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 1034 POSTING_READ(mmio); 1035 1036 /* 1037 * Flush the TLB for this page 1038 * 1039 * FIXME: These two bits have disappeared on gen8, so a question 1040 * arises: do we still need this and if so how should we go about 1041 * invalidating the TLB? 1042 */ 1043 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 1044 u32 reg = RING_INSTPM(ring->mmio_base); 1045 1046 /* ring should be idle before issuing a sync flush*/ 1047 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 1048 1049 I915_WRITE(reg, 1050 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 1051 INSTPM_SYNC_FLUSH)); 1052 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 1053 1000)) 1054 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 1055 ring->name); 1056 } 1057} 1058 1059static int 1060bsd_ring_flush(struct intel_engine_cs *ring, 1061 u32 invalidate_domains, 1062 u32 flush_domains) 1063{ 1064 int ret; 1065 1066 ret = intel_ring_begin(ring, 2); 1067 if (ret) 1068 return ret; 1069 1070 intel_ring_emit(ring, MI_FLUSH); 1071 intel_ring_emit(ring, MI_NOOP); 1072 intel_ring_advance(ring); 1073 return 0; 1074} 1075 1076static int 1077i9xx_add_request(struct intel_engine_cs *ring) 1078{ 1079 int ret; 1080 1081 ret = intel_ring_begin(ring, 4); 1082 if (ret) 1083 return ret; 1084 1085 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1086 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1087 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 1088 intel_ring_emit(ring, MI_USER_INTERRUPT); 1089 __intel_ring_advance(ring); 1090 1091 return 0; 1092} 1093 1094static bool 1095gen6_ring_get_irq(struct intel_engine_cs *ring) 1096{ 1097 struct drm_device *dev = ring->dev; 1098 struct drm_i915_private *dev_priv = dev->dev_private; 1099 unsigned long flags; 1100 1101 if (!dev->irq_enabled) 1102 return false; 1103 1104 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1105 if (ring->irq_refcount++ == 0) { 1106 if (HAS_L3_DPF(dev) && ring->id == RCS) 1107 I915_WRITE_IMR(ring, 1108 ~(ring->irq_enable_mask | 1109 GT_PARITY_ERROR(dev))); 1110 else 1111 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1112 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1113 } 1114 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1115 1116 return true; 1117} 1118 1119static void 1120gen6_ring_put_irq(struct intel_engine_cs *ring) 1121{ 1122 struct drm_device *dev = ring->dev; 1123 struct drm_i915_private *dev_priv = dev->dev_private; 1124 unsigned long flags; 1125 1126 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1127 if (--ring->irq_refcount == 0) { 1128 if (HAS_L3_DPF(dev) && ring->id == RCS) 1129 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1130 else 1131 I915_WRITE_IMR(ring, ~0); 1132 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1133 } 1134 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1135} 1136 1137static bool 1138hsw_vebox_get_irq(struct intel_engine_cs *ring) 1139{ 1140 struct drm_device *dev = ring->dev; 1141 struct drm_i915_private *dev_priv = dev->dev_private; 1142 unsigned long flags; 1143 1144 if (!dev->irq_enabled) 1145 return false; 1146 1147 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1148 if (ring->irq_refcount++ == 0) { 1149 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1150 snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); 1151 } 1152 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1153 1154 return true; 1155} 1156 1157static void 1158hsw_vebox_put_irq(struct intel_engine_cs *ring) 1159{ 1160 struct drm_device *dev = ring->dev; 1161 struct drm_i915_private *dev_priv = dev->dev_private; 1162 unsigned long flags; 1163 1164 if (!dev->irq_enabled) 1165 return; 1166 1167 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1168 if (--ring->irq_refcount == 0) { 1169 I915_WRITE_IMR(ring, ~0); 1170 snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); 1171 } 1172 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1173} 1174 1175static bool 1176gen8_ring_get_irq(struct intel_engine_cs *ring) 1177{ 1178 struct drm_device *dev = ring->dev; 1179 struct drm_i915_private *dev_priv = dev->dev_private; 1180 unsigned long flags; 1181 1182 if (!dev->irq_enabled) 1183 return false; 1184 1185 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1186 if (ring->irq_refcount++ == 0) { 1187 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1188 I915_WRITE_IMR(ring, 1189 ~(ring->irq_enable_mask | 1190 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1191 } else { 1192 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1193 } 1194 POSTING_READ(RING_IMR(ring->mmio_base)); 1195 } 1196 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1197 1198 return true; 1199} 1200 1201static void 1202gen8_ring_put_irq(struct intel_engine_cs *ring) 1203{ 1204 struct drm_device *dev = ring->dev; 1205 struct drm_i915_private *dev_priv = dev->dev_private; 1206 unsigned long flags; 1207 1208 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1209 if (--ring->irq_refcount == 0) { 1210 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1211 I915_WRITE_IMR(ring, 1212 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1213 } else { 1214 I915_WRITE_IMR(ring, ~0); 1215 } 1216 POSTING_READ(RING_IMR(ring->mmio_base)); 1217 } 1218 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1219} 1220 1221static int 1222i965_dispatch_execbuffer(struct intel_engine_cs *ring, 1223 u64 offset, u32 length, 1224 unsigned flags) 1225{ 1226 int ret; 1227 1228 ret = intel_ring_begin(ring, 2); 1229 if (ret) 1230 return ret; 1231 1232 intel_ring_emit(ring, 1233 MI_BATCH_BUFFER_START | 1234 MI_BATCH_GTT | 1235 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 1236 intel_ring_emit(ring, offset); 1237 intel_ring_advance(ring); 1238 1239 return 0; 1240} 1241 1242/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1243#define I830_BATCH_LIMIT (256*1024) 1244static int 1245i830_dispatch_execbuffer(struct intel_engine_cs *ring, 1246 u64 offset, u32 len, 1247 unsigned flags) 1248{ 1249 int ret; 1250 1251 if (flags & I915_DISPATCH_PINNED) { 1252 ret = intel_ring_begin(ring, 4); 1253 if (ret) 1254 return ret; 1255 1256 intel_ring_emit(ring, MI_BATCH_BUFFER); 1257 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1258 intel_ring_emit(ring, offset + len - 8); 1259 intel_ring_emit(ring, MI_NOOP); 1260 intel_ring_advance(ring); 1261 } else { 1262 u32 cs_offset = ring->scratch.gtt_offset; 1263 1264 if (len > I830_BATCH_LIMIT) 1265 return -ENOSPC; 1266 1267 ret = intel_ring_begin(ring, 9+3); 1268 if (ret) 1269 return ret; 1270 /* Blit the batch (which has now all relocs applied) to the stable batch 1271 * scratch bo area (so that the CS never stumbles over its tlb 1272 * invalidation bug) ... */ 1273 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | 1274 XY_SRC_COPY_BLT_WRITE_ALPHA | 1275 XY_SRC_COPY_BLT_WRITE_RGB); 1276 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); 1277 intel_ring_emit(ring, 0); 1278 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); 1279 intel_ring_emit(ring, cs_offset); 1280 intel_ring_emit(ring, 0); 1281 intel_ring_emit(ring, 4096); 1282 intel_ring_emit(ring, offset); 1283 intel_ring_emit(ring, MI_FLUSH); 1284 1285 /* ... and execute it. */ 1286 intel_ring_emit(ring, MI_BATCH_BUFFER); 1287 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1288 intel_ring_emit(ring, cs_offset + len - 8); 1289 intel_ring_advance(ring); 1290 } 1291 1292 return 0; 1293} 1294 1295static int 1296i915_dispatch_execbuffer(struct intel_engine_cs *ring, 1297 u64 offset, u32 len, 1298 unsigned flags) 1299{ 1300 int ret; 1301 1302 ret = intel_ring_begin(ring, 2); 1303 if (ret) 1304 return ret; 1305 1306 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1307 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1308 intel_ring_advance(ring); 1309 1310 return 0; 1311} 1312 1313static void cleanup_status_page(struct intel_engine_cs *ring) 1314{ 1315 struct drm_i915_gem_object *obj; 1316 1317 obj = ring->status_page.obj; 1318 if (obj == NULL) 1319 return; 1320 1321 kunmap(sg_page(obj->pages->sgl)); 1322 i915_gem_object_ggtt_unpin(obj); 1323 drm_gem_object_unreference(&obj->base); 1324 ring->status_page.obj = NULL; 1325} 1326 1327static int init_status_page(struct intel_engine_cs *ring) 1328{ 1329 struct drm_i915_gem_object *obj; 1330 1331 if ((obj = ring->status_page.obj) == NULL) { 1332 int ret; 1333 1334 obj = i915_gem_alloc_object(ring->dev, 4096); 1335 if (obj == NULL) { 1336 DRM_ERROR("Failed to allocate status page\n"); 1337 return -ENOMEM; 1338 } 1339 1340 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1341 if (ret) 1342 goto err_unref; 1343 1344 ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); 1345 if (ret) { 1346err_unref: 1347 drm_gem_object_unreference(&obj->base); 1348 return ret; 1349 } 1350 1351 ring->status_page.obj = obj; 1352 } 1353 1354 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1355 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 1356 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1357 1358 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1359 ring->name, ring->status_page.gfx_addr); 1360 1361 return 0; 1362} 1363 1364static int init_phys_status_page(struct intel_engine_cs *ring) 1365{ 1366 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1367 1368 if (!dev_priv->status_page_dmah) { 1369 dev_priv->status_page_dmah = 1370 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); 1371 if (!dev_priv->status_page_dmah) 1372 return -ENOMEM; 1373 } 1374 1375 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1376 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1377 1378 return 0; 1379} 1380 1381static int allocate_ring_buffer(struct intel_engine_cs *ring) 1382{ 1383 struct drm_device *dev = ring->dev; 1384 struct drm_i915_private *dev_priv = to_i915(dev); 1385 struct intel_ringbuffer *ringbuf = ring->buffer; 1386 struct drm_i915_gem_object *obj; 1387 int ret; 1388 1389 if (intel_ring_initialized(ring)) 1390 return 0; 1391 1392 obj = NULL; 1393 if (!HAS_LLC(dev)) 1394 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 1395 if (obj == NULL) 1396 obj = i915_gem_alloc_object(dev, ringbuf->size); 1397 if (obj == NULL) 1398 return -ENOMEM; 1399 1400 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 1401 if (ret) 1402 goto err_unref; 1403 1404 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1405 if (ret) 1406 goto err_unpin; 1407 1408 ringbuf->virtual_start = 1409 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), 1410 ringbuf->size); 1411 if (ringbuf->virtual_start == NULL) { 1412 ret = -EINVAL; 1413 goto err_unpin; 1414 } 1415 1416 ringbuf->obj = obj; 1417 return 0; 1418 1419err_unpin: 1420 i915_gem_object_ggtt_unpin(obj); 1421err_unref: 1422 drm_gem_object_unreference(&obj->base); 1423 return ret; 1424} 1425 1426static int intel_init_ring_buffer(struct drm_device *dev, 1427 struct intel_engine_cs *ring) 1428{ 1429 struct intel_ringbuffer *ringbuf = ring->buffer; 1430 int ret; 1431 1432 if (ringbuf == NULL) { 1433 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 1434 if (!ringbuf) 1435 return -ENOMEM; 1436 ring->buffer = ringbuf; 1437 } 1438 1439 ring->dev = dev; 1440 INIT_LIST_HEAD(&ring->active_list); 1441 INIT_LIST_HEAD(&ring->request_list); 1442 ringbuf->size = 32 * PAGE_SIZE; 1443 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 1444 1445 init_waitqueue_head(&ring->irq_queue); 1446 1447 if (I915_NEED_GFX_HWS(dev)) { 1448 ret = init_status_page(ring); 1449 if (ret) 1450 goto error; 1451 } else { 1452 BUG_ON(ring->id != RCS); 1453 ret = init_phys_status_page(ring); 1454 if (ret) 1455 goto error; 1456 } 1457 1458 ret = allocate_ring_buffer(ring); 1459 if (ret) { 1460 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); 1461 goto error; 1462 } 1463 1464 /* Workaround an erratum on the i830 which causes a hang if 1465 * the TAIL pointer points to within the last 2 cachelines 1466 * of the buffer. 1467 */ 1468 ringbuf->effective_size = ringbuf->size; 1469 if (IS_I830(dev) || IS_845G(dev)) 1470 ringbuf->effective_size -= 2 * CACHELINE_BYTES; 1471 1472 ret = i915_cmd_parser_init_ring(ring); 1473 if (ret) 1474 goto error; 1475 1476 ret = ring->init(ring); 1477 if (ret) 1478 goto error; 1479 1480 return 0; 1481 1482error: 1483 kfree(ringbuf); 1484 ring->buffer = NULL; 1485 return ret; 1486} 1487 1488void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) 1489{ 1490 struct drm_i915_private *dev_priv = to_i915(ring->dev); 1491 struct intel_ringbuffer *ringbuf = ring->buffer; 1492 1493 if (!intel_ring_initialized(ring)) 1494 return; 1495 1496 intel_stop_ring_buffer(ring); 1497 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 1498 1499 iounmap(ringbuf->virtual_start); 1500 1501 i915_gem_object_ggtt_unpin(ringbuf->obj); 1502 drm_gem_object_unreference(&ringbuf->obj->base); 1503 ringbuf->obj = NULL; 1504 ring->preallocated_lazy_request = NULL; 1505 ring->outstanding_lazy_seqno = 0; 1506 1507 if (ring->cleanup) 1508 ring->cleanup(ring); 1509 1510 cleanup_status_page(ring); 1511 1512 i915_cmd_parser_fini_ring(ring); 1513 1514 kfree(ringbuf); 1515 ring->buffer = NULL; 1516} 1517 1518static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) 1519{ 1520 struct intel_ringbuffer *ringbuf = ring->buffer; 1521 struct drm_i915_gem_request *request; 1522 u32 seqno = 0; 1523 int ret; 1524 1525 if (ringbuf->last_retired_head != -1) { 1526 ringbuf->head = ringbuf->last_retired_head; 1527 ringbuf->last_retired_head = -1; 1528 1529 ringbuf->space = ring_space(ring); 1530 if (ringbuf->space >= n) 1531 return 0; 1532 } 1533 1534 list_for_each_entry(request, &ring->request_list, list) { 1535 if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) { 1536 seqno = request->seqno; 1537 break; 1538 } 1539 } 1540 1541 if (seqno == 0) 1542 return -ENOSPC; 1543 1544 ret = i915_wait_seqno(ring, seqno); 1545 if (ret) 1546 return ret; 1547 1548 i915_gem_retire_requests_ring(ring); 1549 ringbuf->head = ringbuf->last_retired_head; 1550 ringbuf->last_retired_head = -1; 1551 1552 ringbuf->space = ring_space(ring); 1553 return 0; 1554} 1555 1556static int ring_wait_for_space(struct intel_engine_cs *ring, int n) 1557{ 1558 struct drm_device *dev = ring->dev; 1559 struct drm_i915_private *dev_priv = dev->dev_private; 1560 struct intel_ringbuffer *ringbuf = ring->buffer; 1561 unsigned long end; 1562 int ret; 1563 1564 ret = intel_ring_wait_request(ring, n); 1565 if (ret != -ENOSPC) 1566 return ret; 1567 1568 /* force the tail write in case we have been skipping them */ 1569 __intel_ring_advance(ring); 1570 1571 /* With GEM the hangcheck timer should kick us out of the loop, 1572 * leaving it early runs the risk of corrupting GEM state (due 1573 * to running on almost untested codepaths). But on resume 1574 * timers don't work yet, so prevent a complete hang in that 1575 * case by choosing an insanely large timeout. */ 1576 end = jiffies + 60 * HZ; 1577 1578 trace_i915_ring_wait_begin(ring); 1579 do { 1580 ringbuf->head = I915_READ_HEAD(ring); 1581 ringbuf->space = ring_space(ring); 1582 if (ringbuf->space >= n) { 1583 ret = 0; 1584 break; 1585 } 1586 1587 if (!drm_core_check_feature(dev, DRIVER_MODESET) && 1588 dev->primary->master) { 1589 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1590 if (master_priv->sarea_priv) 1591 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1592 } 1593 1594 msleep(1); 1595 1596 if (dev_priv->mm.interruptible && signal_pending(current)) { 1597 ret = -ERESTARTSYS; 1598 break; 1599 } 1600 1601 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1602 dev_priv->mm.interruptible); 1603 if (ret) 1604 break; 1605 1606 if (time_after(jiffies, end)) { 1607 ret = -EBUSY; 1608 break; 1609 } 1610 } while (1); 1611 trace_i915_ring_wait_end(ring); 1612 return ret; 1613} 1614 1615static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) 1616{ 1617 uint32_t __iomem *virt; 1618 struct intel_ringbuffer *ringbuf = ring->buffer; 1619 int rem = ringbuf->size - ringbuf->tail; 1620 1621 if (ringbuf->space < rem) { 1622 int ret = ring_wait_for_space(ring, rem); 1623 if (ret) 1624 return ret; 1625 } 1626 1627 virt = ringbuf->virtual_start + ringbuf->tail; 1628 rem /= 4; 1629 while (rem--) 1630 iowrite32(MI_NOOP, virt++); 1631 1632 ringbuf->tail = 0; 1633 ringbuf->space = ring_space(ring); 1634 1635 return 0; 1636} 1637 1638int intel_ring_idle(struct intel_engine_cs *ring) 1639{ 1640 u32 seqno; 1641 int ret; 1642 1643 /* We need to add any requests required to flush the objects and ring */ 1644 if (ring->outstanding_lazy_seqno) { 1645 ret = i915_add_request(ring, NULL); 1646 if (ret) 1647 return ret; 1648 } 1649 1650 /* Wait upon the last request to be completed */ 1651 if (list_empty(&ring->request_list)) 1652 return 0; 1653 1654 seqno = list_entry(ring->request_list.prev, 1655 struct drm_i915_gem_request, 1656 list)->seqno; 1657 1658 return i915_wait_seqno(ring, seqno); 1659} 1660 1661static int 1662intel_ring_alloc_seqno(struct intel_engine_cs *ring) 1663{ 1664 if (ring->outstanding_lazy_seqno) 1665 return 0; 1666 1667 if (ring->preallocated_lazy_request == NULL) { 1668 struct drm_i915_gem_request *request; 1669 1670 request = kmalloc(sizeof(*request), GFP_KERNEL); 1671 if (request == NULL) 1672 return -ENOMEM; 1673 1674 ring->preallocated_lazy_request = request; 1675 } 1676 1677 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); 1678} 1679 1680static int __intel_ring_prepare(struct intel_engine_cs *ring, 1681 int bytes) 1682{ 1683 struct intel_ringbuffer *ringbuf = ring->buffer; 1684 int ret; 1685 1686 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { 1687 ret = intel_wrap_ring_buffer(ring); 1688 if (unlikely(ret)) 1689 return ret; 1690 } 1691 1692 if (unlikely(ringbuf->space < bytes)) { 1693 ret = ring_wait_for_space(ring, bytes); 1694 if (unlikely(ret)) 1695 return ret; 1696 } 1697 1698 return 0; 1699} 1700 1701int intel_ring_begin(struct intel_engine_cs *ring, 1702 int num_dwords) 1703{ 1704 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1705 int ret; 1706 1707 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1708 dev_priv->mm.interruptible); 1709 if (ret) 1710 return ret; 1711 1712 ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); 1713 if (ret) 1714 return ret; 1715 1716 /* Preallocate the olr before touching the ring */ 1717 ret = intel_ring_alloc_seqno(ring); 1718 if (ret) 1719 return ret; 1720 1721 ring->buffer->space -= num_dwords * sizeof(uint32_t); 1722 return 0; 1723} 1724 1725/* Align the ring tail to a cacheline boundary */ 1726int intel_ring_cacheline_align(struct intel_engine_cs *ring) 1727{ 1728 int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1729 int ret; 1730 1731 if (num_dwords == 0) 1732 return 0; 1733 1734 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 1735 ret = intel_ring_begin(ring, num_dwords); 1736 if (ret) 1737 return ret; 1738 1739 while (num_dwords--) 1740 intel_ring_emit(ring, MI_NOOP); 1741 1742 intel_ring_advance(ring); 1743 1744 return 0; 1745} 1746 1747void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) 1748{ 1749 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1750 1751 BUG_ON(ring->outstanding_lazy_seqno); 1752 1753 if (INTEL_INFO(ring->dev)->gen >= 6) { 1754 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1755 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 1756 if (HAS_VEBOX(ring->dev)) 1757 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); 1758 } 1759 1760 ring->set_seqno(ring, seqno); 1761 ring->hangcheck.seqno = seqno; 1762} 1763 1764static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, 1765 u32 value) 1766{ 1767 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1768 1769 /* Every tail move must follow the sequence below */ 1770 1771 /* Disable notification that the ring is IDLE. The GT 1772 * will then assume that it is busy and bring it out of rc6. 1773 */ 1774 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1775 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1776 1777 /* Clear the context id. Here be magic! */ 1778 I915_WRITE64(GEN6_BSD_RNCID, 0x0); 1779 1780 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1781 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 1782 GEN6_BSD_SLEEP_INDICATOR) == 0, 1783 50)) 1784 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 1785 1786 /* Now that the ring is fully powered up, update the tail */ 1787 I915_WRITE_TAIL(ring, value); 1788 POSTING_READ(RING_TAIL(ring->mmio_base)); 1789 1790 /* Let the ring send IDLE messages to the GT again, 1791 * and so let it sleep to conserve power when idle. 1792 */ 1793 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1794 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1795} 1796 1797static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, 1798 u32 invalidate, u32 flush) 1799{ 1800 uint32_t cmd; 1801 int ret; 1802 1803 ret = intel_ring_begin(ring, 4); 1804 if (ret) 1805 return ret; 1806 1807 cmd = MI_FLUSH_DW; 1808 if (INTEL_INFO(ring->dev)->gen >= 8) 1809 cmd += 1; 1810 /* 1811 * Bspec vol 1c.5 - video engine command streamer: 1812 * "If ENABLED, all TLBs will be invalidated once the flush 1813 * operation is complete. This bit is only valid when the 1814 * Post-Sync Operation field is a value of 1h or 3h." 1815 */ 1816 if (invalidate & I915_GEM_GPU_DOMAINS) 1817 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | 1818 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1819 intel_ring_emit(ring, cmd); 1820 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1821 if (INTEL_INFO(ring->dev)->gen >= 8) { 1822 intel_ring_emit(ring, 0); /* upper addr */ 1823 intel_ring_emit(ring, 0); /* value */ 1824 } else { 1825 intel_ring_emit(ring, 0); 1826 intel_ring_emit(ring, MI_NOOP); 1827 } 1828 intel_ring_advance(ring); 1829 return 0; 1830} 1831 1832static int 1833gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 1834 u64 offset, u32 len, 1835 unsigned flags) 1836{ 1837 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1838 bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && 1839 !(flags & I915_DISPATCH_SECURE); 1840 int ret; 1841 1842 ret = intel_ring_begin(ring, 4); 1843 if (ret) 1844 return ret; 1845 1846 /* FIXME(BDW): Address space and security selectors. */ 1847 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); 1848 intel_ring_emit(ring, lower_32_bits(offset)); 1849 intel_ring_emit(ring, upper_32_bits(offset)); 1850 intel_ring_emit(ring, MI_NOOP); 1851 intel_ring_advance(ring); 1852 1853 return 0; 1854} 1855 1856static int 1857hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 1858 u64 offset, u32 len, 1859 unsigned flags) 1860{ 1861 int ret; 1862 1863 ret = intel_ring_begin(ring, 2); 1864 if (ret) 1865 return ret; 1866 1867 intel_ring_emit(ring, 1868 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | 1869 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); 1870 /* bit0-7 is the length on GEN6+ */ 1871 intel_ring_emit(ring, offset); 1872 intel_ring_advance(ring); 1873 1874 return 0; 1875} 1876 1877static int 1878gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 1879 u64 offset, u32 len, 1880 unsigned flags) 1881{ 1882 int ret; 1883 1884 ret = intel_ring_begin(ring, 2); 1885 if (ret) 1886 return ret; 1887 1888 intel_ring_emit(ring, 1889 MI_BATCH_BUFFER_START | 1890 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 1891 /* bit0-7 is the length on GEN6+ */ 1892 intel_ring_emit(ring, offset); 1893 intel_ring_advance(ring); 1894 1895 return 0; 1896} 1897 1898/* Blitter support (SandyBridge+) */ 1899 1900static int gen6_ring_flush(struct intel_engine_cs *ring, 1901 u32 invalidate, u32 flush) 1902{ 1903 struct drm_device *dev = ring->dev; 1904 uint32_t cmd; 1905 int ret; 1906 1907 ret = intel_ring_begin(ring, 4); 1908 if (ret) 1909 return ret; 1910 1911 cmd = MI_FLUSH_DW; 1912 if (INTEL_INFO(ring->dev)->gen >= 8) 1913 cmd += 1; 1914 /* 1915 * Bspec vol 1c.3 - blitter engine command streamer: 1916 * "If ENABLED, all TLBs will be invalidated once the flush 1917 * operation is complete. This bit is only valid when the 1918 * Post-Sync Operation field is a value of 1h or 3h." 1919 */ 1920 if (invalidate & I915_GEM_DOMAIN_RENDER) 1921 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | 1922 MI_FLUSH_DW_OP_STOREDW; 1923 intel_ring_emit(ring, cmd); 1924 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1925 if (INTEL_INFO(ring->dev)->gen >= 8) { 1926 intel_ring_emit(ring, 0); /* upper addr */ 1927 intel_ring_emit(ring, 0); /* value */ 1928 } else { 1929 intel_ring_emit(ring, 0); 1930 intel_ring_emit(ring, MI_NOOP); 1931 } 1932 intel_ring_advance(ring); 1933 1934 if (IS_GEN7(dev) && !invalidate && flush) 1935 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); 1936 1937 return 0; 1938} 1939 1940int intel_init_render_ring_buffer(struct drm_device *dev) 1941{ 1942 struct drm_i915_private *dev_priv = dev->dev_private; 1943 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 1944 1945 ring->name = "render ring"; 1946 ring->id = RCS; 1947 ring->mmio_base = RENDER_RING_BASE; 1948 1949 if (INTEL_INFO(dev)->gen >= 6) { 1950 ring->add_request = gen6_add_request; 1951 ring->flush = gen7_render_ring_flush; 1952 if (INTEL_INFO(dev)->gen == 6) 1953 ring->flush = gen6_render_ring_flush; 1954 if (INTEL_INFO(dev)->gen >= 8) { 1955 ring->flush = gen8_render_ring_flush; 1956 ring->irq_get = gen8_ring_get_irq; 1957 ring->irq_put = gen8_ring_put_irq; 1958 } else { 1959 ring->irq_get = gen6_ring_get_irq; 1960 ring->irq_put = gen6_ring_put_irq; 1961 } 1962 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1963 ring->get_seqno = gen6_ring_get_seqno; 1964 ring->set_seqno = ring_set_seqno; 1965 ring->semaphore.sync_to = gen6_ring_sync; 1966 ring->semaphore.signal = gen6_signal; 1967 /* 1968 * The current semaphore is only applied on pre-gen8 platform. 1969 * And there is no VCS2 ring on the pre-gen8 platform. So the 1970 * semaphore between RCS and VCS2 is initialized as INVALID. 1971 * Gen8 will initialize the sema between VCS2 and RCS later. 1972 */ 1973 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 1974 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; 1975 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; 1976 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; 1977 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 1978 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 1979 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; 1980 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; 1981 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 1982 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 1983 } else if (IS_GEN5(dev)) { 1984 ring->add_request = pc_render_add_request; 1985 ring->flush = gen4_render_ring_flush; 1986 ring->get_seqno = pc_render_get_seqno; 1987 ring->set_seqno = pc_render_set_seqno; 1988 ring->irq_get = gen5_ring_get_irq; 1989 ring->irq_put = gen5_ring_put_irq; 1990 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT | 1991 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 1992 } else { 1993 ring->add_request = i9xx_add_request; 1994 if (INTEL_INFO(dev)->gen < 4) 1995 ring->flush = gen2_render_ring_flush; 1996 else 1997 ring->flush = gen4_render_ring_flush; 1998 ring->get_seqno = ring_get_seqno; 1999 ring->set_seqno = ring_set_seqno; 2000 if (IS_GEN2(dev)) { 2001 ring->irq_get = i8xx_ring_get_irq; 2002 ring->irq_put = i8xx_ring_put_irq; 2003 } else { 2004 ring->irq_get = i9xx_ring_get_irq; 2005 ring->irq_put = i9xx_ring_put_irq; 2006 } 2007 ring->irq_enable_mask = I915_USER_INTERRUPT; 2008 } 2009 ring->write_tail = ring_write_tail; 2010 if (IS_HASWELL(dev)) 2011 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2012 else if (IS_GEN8(dev)) 2013 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2014 else if (INTEL_INFO(dev)->gen >= 6) 2015 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2016 else if (INTEL_INFO(dev)->gen >= 4) 2017 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2018 else if (IS_I830(dev) || IS_845G(dev)) 2019 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2020 else 2021 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2022 ring->init = init_render_ring; 2023 ring->cleanup = render_ring_cleanup; 2024 2025 /* Workaround batchbuffer to combat CS tlb bug. */ 2026 if (HAS_BROKEN_CS_TLB(dev)) { 2027 struct drm_i915_gem_object *obj; 2028 int ret; 2029 2030 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); 2031 if (obj == NULL) { 2032 DRM_ERROR("Failed to allocate batch bo\n"); 2033 return -ENOMEM; 2034 } 2035 2036 ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 2037 if (ret != 0) { 2038 drm_gem_object_unreference(&obj->base); 2039 DRM_ERROR("Failed to ping batch bo\n"); 2040 return ret; 2041 } 2042 2043 ring->scratch.obj = obj; 2044 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 2045 } 2046 2047 return intel_init_ring_buffer(dev, ring); 2048} 2049 2050int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) 2051{ 2052 struct drm_i915_private *dev_priv = dev->dev_private; 2053 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 2054 struct intel_ringbuffer *ringbuf = ring->buffer; 2055 int ret; 2056 2057 if (ringbuf == NULL) { 2058 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 2059 if (!ringbuf) 2060 return -ENOMEM; 2061 ring->buffer = ringbuf; 2062 } 2063 2064 ring->name = "render ring"; 2065 ring->id = RCS; 2066 ring->mmio_base = RENDER_RING_BASE; 2067 2068 if (INTEL_INFO(dev)->gen >= 6) { 2069 /* non-kms not supported on gen6+ */ 2070 ret = -ENODEV; 2071 goto err_ringbuf; 2072 } 2073 2074 /* Note: gem is not supported on gen5/ilk without kms (the corresponding 2075 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up 2076 * the special gen5 functions. */ 2077 ring->add_request = i9xx_add_request; 2078 if (INTEL_INFO(dev)->gen < 4) 2079 ring->flush = gen2_render_ring_flush; 2080 else 2081 ring->flush = gen4_render_ring_flush; 2082 ring->get_seqno = ring_get_seqno; 2083 ring->set_seqno = ring_set_seqno; 2084 if (IS_GEN2(dev)) { 2085 ring->irq_get = i8xx_ring_get_irq; 2086 ring->irq_put = i8xx_ring_put_irq; 2087 } else { 2088 ring->irq_get = i9xx_ring_get_irq; 2089 ring->irq_put = i9xx_ring_put_irq; 2090 } 2091 ring->irq_enable_mask = I915_USER_INTERRUPT; 2092 ring->write_tail = ring_write_tail; 2093 if (INTEL_INFO(dev)->gen >= 4) 2094 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2095 else if (IS_I830(dev) || IS_845G(dev)) 2096 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2097 else 2098 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2099 ring->init = init_render_ring; 2100 ring->cleanup = render_ring_cleanup; 2101 2102 ring->dev = dev; 2103 INIT_LIST_HEAD(&ring->active_list); 2104 INIT_LIST_HEAD(&ring->request_list); 2105 2106 ringbuf->size = size; 2107 ringbuf->effective_size = ringbuf->size; 2108 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 2109 ringbuf->effective_size -= 2 * CACHELINE_BYTES; 2110 2111 ringbuf->virtual_start = ioremap_wc(start, size); 2112 if (ringbuf->virtual_start == NULL) { 2113 DRM_ERROR("can not ioremap virtual address for" 2114 " ring buffer\n"); 2115 ret = -ENOMEM; 2116 goto err_ringbuf; 2117 } 2118 2119 if (!I915_NEED_GFX_HWS(dev)) { 2120 ret = init_phys_status_page(ring); 2121 if (ret) 2122 goto err_vstart; 2123 } 2124 2125 return 0; 2126 2127err_vstart: 2128 iounmap(ringbuf->virtual_start); 2129err_ringbuf: 2130 kfree(ringbuf); 2131 ring->buffer = NULL; 2132 return ret; 2133} 2134 2135int intel_init_bsd_ring_buffer(struct drm_device *dev) 2136{ 2137 struct drm_i915_private *dev_priv = dev->dev_private; 2138 struct intel_engine_cs *ring = &dev_priv->ring[VCS]; 2139 2140 ring->name = "bsd ring"; 2141 ring->id = VCS; 2142 2143 ring->write_tail = ring_write_tail; 2144 if (INTEL_INFO(dev)->gen >= 6) { 2145 ring->mmio_base = GEN6_BSD_RING_BASE; 2146 /* gen6 bsd needs a special wa for tail updates */ 2147 if (IS_GEN6(dev)) 2148 ring->write_tail = gen6_bsd_ring_write_tail; 2149 ring->flush = gen6_bsd_ring_flush; 2150 ring->add_request = gen6_add_request; 2151 ring->get_seqno = gen6_ring_get_seqno; 2152 ring->set_seqno = ring_set_seqno; 2153 if (INTEL_INFO(dev)->gen >= 8) { 2154 ring->irq_enable_mask = 2155 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2156 ring->irq_get = gen8_ring_get_irq; 2157 ring->irq_put = gen8_ring_put_irq; 2158 ring->dispatch_execbuffer = 2159 gen8_ring_dispatch_execbuffer; 2160 } else { 2161 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2162 ring->irq_get = gen6_ring_get_irq; 2163 ring->irq_put = gen6_ring_put_irq; 2164 ring->dispatch_execbuffer = 2165 gen6_ring_dispatch_execbuffer; 2166 } 2167 ring->semaphore.sync_to = gen6_ring_sync; 2168 ring->semaphore.signal = gen6_signal; 2169 /* 2170 * The current semaphore is only applied on pre-gen8 platform. 2171 * And there is no VCS2 ring on the pre-gen8 platform. So the 2172 * semaphore between VCS and VCS2 is initialized as INVALID. 2173 * Gen8 will initialize the sema between VCS2 and VCS later. 2174 */ 2175 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; 2176 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2177 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; 2178 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; 2179 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2180 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; 2181 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; 2182 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; 2183 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; 2184 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2185 } else { 2186 ring->mmio_base = BSD_RING_BASE; 2187 ring->flush = bsd_ring_flush; 2188 ring->add_request = i9xx_add_request; 2189 ring->get_seqno = ring_get_seqno; 2190 ring->set_seqno = ring_set_seqno; 2191 if (IS_GEN5(dev)) { 2192 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2193 ring->irq_get = gen5_ring_get_irq; 2194 ring->irq_put = gen5_ring_put_irq; 2195 } else { 2196 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2197 ring->irq_get = i9xx_ring_get_irq; 2198 ring->irq_put = i9xx_ring_put_irq; 2199 } 2200 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2201 } 2202 ring->init = init_ring_common; 2203 2204 return intel_init_ring_buffer(dev, ring); 2205} 2206 2207/** 2208 * Initialize the second BSD ring for Broadwell GT3. 2209 * It is noted that this only exists on Broadwell GT3. 2210 */ 2211int intel_init_bsd2_ring_buffer(struct drm_device *dev) 2212{ 2213 struct drm_i915_private *dev_priv = dev->dev_private; 2214 struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; 2215 2216 if ((INTEL_INFO(dev)->gen != 8)) { 2217 DRM_ERROR("No dual-BSD ring on non-BDW machine\n"); 2218 return -EINVAL; 2219 } 2220 2221 ring->name = "bds2_ring"; 2222 ring->id = VCS2; 2223 2224 ring->write_tail = ring_write_tail; 2225 ring->mmio_base = GEN8_BSD2_RING_BASE; 2226 ring->flush = gen6_bsd_ring_flush; 2227 ring->add_request = gen6_add_request; 2228 ring->get_seqno = gen6_ring_get_seqno; 2229 ring->set_seqno = ring_set_seqno; 2230 ring->irq_enable_mask = 2231 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; 2232 ring->irq_get = gen8_ring_get_irq; 2233 ring->irq_put = gen8_ring_put_irq; 2234 ring->dispatch_execbuffer = 2235 gen8_ring_dispatch_execbuffer; 2236 ring->semaphore.sync_to = gen6_ring_sync; 2237 ring->semaphore.signal = gen6_signal; 2238 /* 2239 * The current semaphore is only applied on the pre-gen8. And there 2240 * is no bsd2 ring on the pre-gen8. So now the semaphore_register 2241 * between VCS2 and other ring is initialized as invalid. 2242 * Gen8 will initialize the sema between VCS2 and other ring later. 2243 */ 2244 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 2245 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2246 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; 2247 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; 2248 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2249 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 2250 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; 2251 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; 2252 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; 2253 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2254 2255 ring->init = init_ring_common; 2256 2257 return intel_init_ring_buffer(dev, ring); 2258} 2259 2260int intel_init_blt_ring_buffer(struct drm_device *dev) 2261{ 2262 struct drm_i915_private *dev_priv = dev->dev_private; 2263 struct intel_engine_cs *ring = &dev_priv->ring[BCS]; 2264 2265 ring->name = "blitter ring"; 2266 ring->id = BCS; 2267 2268 ring->mmio_base = BLT_RING_BASE; 2269 ring->write_tail = ring_write_tail; 2270 ring->flush = gen6_ring_flush; 2271 ring->add_request = gen6_add_request; 2272 ring->get_seqno = gen6_ring_get_seqno; 2273 ring->set_seqno = ring_set_seqno; 2274 if (INTEL_INFO(dev)->gen >= 8) { 2275 ring->irq_enable_mask = 2276 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2277 ring->irq_get = gen8_ring_get_irq; 2278 ring->irq_put = gen8_ring_put_irq; 2279 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2280 } else { 2281 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2282 ring->irq_get = gen6_ring_get_irq; 2283 ring->irq_put = gen6_ring_put_irq; 2284 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2285 } 2286 ring->semaphore.sync_to = gen6_ring_sync; 2287 ring->semaphore.signal = gen6_signal; 2288 /* 2289 * The current semaphore is only applied on pre-gen8 platform. And 2290 * there is no VCS2 ring on the pre-gen8 platform. So the semaphore 2291 * between BCS and VCS2 is initialized as INVALID. 2292 * Gen8 will initialize the sema between BCS and VCS2 later. 2293 */ 2294 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; 2295 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; 2296 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; 2297 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; 2298 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2299 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; 2300 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; 2301 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; 2302 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; 2303 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2304 ring->init = init_ring_common; 2305 2306 return intel_init_ring_buffer(dev, ring); 2307} 2308 2309int intel_init_vebox_ring_buffer(struct drm_device *dev) 2310{ 2311 struct drm_i915_private *dev_priv = dev->dev_private; 2312 struct intel_engine_cs *ring = &dev_priv->ring[VECS]; 2313 2314 ring->name = "video enhancement ring"; 2315 ring->id = VECS; 2316 2317 ring->mmio_base = VEBOX_RING_BASE; 2318 ring->write_tail = ring_write_tail; 2319 ring->flush = gen6_ring_flush; 2320 ring->add_request = gen6_add_request; 2321 ring->get_seqno = gen6_ring_get_seqno; 2322 ring->set_seqno = ring_set_seqno; 2323 2324 if (INTEL_INFO(dev)->gen >= 8) { 2325 ring->irq_enable_mask = 2326 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 2327 ring->irq_get = gen8_ring_get_irq; 2328 ring->irq_put = gen8_ring_put_irq; 2329 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2330 } else { 2331 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2332 ring->irq_get = hsw_vebox_get_irq; 2333 ring->irq_put = hsw_vebox_put_irq; 2334 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2335 } 2336 ring->semaphore.sync_to = gen6_ring_sync; 2337 ring->semaphore.signal = gen6_signal; 2338 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; 2339 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; 2340 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; 2341 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; 2342 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2343 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; 2344 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; 2345 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; 2346 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; 2347 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2348 ring->init = init_ring_common; 2349 2350 return intel_init_ring_buffer(dev, ring); 2351} 2352 2353int 2354intel_ring_flush_all_caches(struct intel_engine_cs *ring) 2355{ 2356 int ret; 2357 2358 if (!ring->gpu_caches_dirty) 2359 return 0; 2360 2361 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); 2362 if (ret) 2363 return ret; 2364 2365 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); 2366 2367 ring->gpu_caches_dirty = false; 2368 return 0; 2369} 2370 2371int 2372intel_ring_invalidate_all_caches(struct intel_engine_cs *ring) 2373{ 2374 uint32_t flush_domains; 2375 int ret; 2376 2377 flush_domains = 0; 2378 if (ring->gpu_caches_dirty) 2379 flush_domains = I915_GEM_GPU_DOMAINS; 2380 2381 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2382 if (ret) 2383 return ret; 2384 2385 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2386 2387 ring->gpu_caches_dirty = false; 2388 return 0; 2389} 2390 2391void 2392intel_stop_ring_buffer(struct intel_engine_cs *ring) 2393{ 2394 int ret; 2395 2396 if (!intel_ring_initialized(ring)) 2397 return; 2398 2399 ret = intel_ring_idle(ring); 2400 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) 2401 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 2402 ring->name, ret); 2403 2404 stop_ring(ring); 2405}