Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.17 2592 lines 71 kB view raw
1/* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30#include <drm/drmP.h> 31#include "i915_drv.h" 32#include <drm/i915_drm.h> 33#include "i915_trace.h" 34#include "intel_drv.h" 35 36/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 37 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 38 * to give some inclination as to some of the magic values used in the various 39 * workarounds! 40 */ 41#define CACHELINE_BYTES 64 42 43static inline int __ring_space(int head, int tail, int size) 44{ 45 int space = head - (tail + I915_RING_FREE_SPACE); 46 if (space < 0) 47 space += size; 48 return space; 49} 50 51static inline int ring_space(struct intel_ringbuffer *ringbuf) 52{ 53 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); 54} 55 56static bool intel_ring_stopped(struct intel_engine_cs *ring) 57{ 58 struct drm_i915_private *dev_priv = ring->dev->dev_private; 59 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); 60} 61 62void __intel_ring_advance(struct intel_engine_cs *ring) 63{ 64 struct intel_ringbuffer *ringbuf = ring->buffer; 65 ringbuf->tail &= ringbuf->size - 1; 66 if (intel_ring_stopped(ring)) 67 return; 68 ring->write_tail(ring, ringbuf->tail); 69} 70 71static int 72gen2_render_ring_flush(struct intel_engine_cs *ring, 73 u32 invalidate_domains, 74 u32 flush_domains) 75{ 76 u32 cmd; 77 int ret; 78 79 cmd = MI_FLUSH; 80 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) 81 cmd |= MI_NO_WRITE_FLUSH; 82 83 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 84 cmd |= MI_READ_FLUSH; 85 86 ret = intel_ring_begin(ring, 2); 87 if (ret) 88 return ret; 89 90 intel_ring_emit(ring, cmd); 91 intel_ring_emit(ring, MI_NOOP); 92 intel_ring_advance(ring); 93 94 return 0; 95} 96 97static int 98gen4_render_ring_flush(struct intel_engine_cs *ring, 99 u32 invalidate_domains, 100 u32 flush_domains) 101{ 102 struct drm_device *dev = ring->dev; 103 u32 cmd; 104 int ret; 105 106 /* 107 * read/write caches: 108 * 109 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 110 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 111 * also flushed at 2d versus 3d pipeline switches. 112 * 113 * read-only caches: 114 * 115 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 116 * MI_READ_FLUSH is set, and is always flushed on 965. 117 * 118 * I915_GEM_DOMAIN_COMMAND may not exist? 119 * 120 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 121 * invalidated when MI_EXE_FLUSH is set. 122 * 123 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 124 * invalidated with every MI_FLUSH. 125 * 126 * TLBs: 127 * 128 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 129 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 130 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 131 * are flushed at any MI_FLUSH. 132 */ 133 134 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 135 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 136 cmd &= ~MI_NO_WRITE_FLUSH; 137 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 138 cmd |= MI_EXE_FLUSH; 139 140 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 141 (IS_G4X(dev) || IS_GEN5(dev))) 142 cmd |= MI_INVALIDATE_ISP; 143 144 ret = intel_ring_begin(ring, 2); 145 if (ret) 146 return ret; 147 148 intel_ring_emit(ring, cmd); 149 intel_ring_emit(ring, MI_NOOP); 150 intel_ring_advance(ring); 151 152 return 0; 153} 154 155/** 156 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 157 * implementing two workarounds on gen6. From section 1.4.7.1 158 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 159 * 160 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 161 * produced by non-pipelined state commands), software needs to first 162 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 163 * 0. 164 * 165 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 166 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 167 * 168 * And the workaround for these two requires this workaround first: 169 * 170 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 171 * BEFORE the pipe-control with a post-sync op and no write-cache 172 * flushes. 173 * 174 * And this last workaround is tricky because of the requirements on 175 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 176 * volume 2 part 1: 177 * 178 * "1 of the following must also be set: 179 * - Render Target Cache Flush Enable ([12] of DW1) 180 * - Depth Cache Flush Enable ([0] of DW1) 181 * - Stall at Pixel Scoreboard ([1] of DW1) 182 * - Depth Stall ([13] of DW1) 183 * - Post-Sync Operation ([13] of DW1) 184 * - Notify Enable ([8] of DW1)" 185 * 186 * The cache flushes require the workaround flush that triggered this 187 * one, so we can't use it. Depth stall would trigger the same. 188 * Post-sync nonzero is what triggered this second workaround, so we 189 * can't use that one either. Notify enable is IRQs, which aren't 190 * really our business. That leaves only stall at scoreboard. 191 */ 192static int 193intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring) 194{ 195 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 196 int ret; 197 198 199 ret = intel_ring_begin(ring, 6); 200 if (ret) 201 return ret; 202 203 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 204 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 205 PIPE_CONTROL_STALL_AT_SCOREBOARD); 206 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 207 intel_ring_emit(ring, 0); /* low dword */ 208 intel_ring_emit(ring, 0); /* high dword */ 209 intel_ring_emit(ring, MI_NOOP); 210 intel_ring_advance(ring); 211 212 ret = intel_ring_begin(ring, 6); 213 if (ret) 214 return ret; 215 216 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 217 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); 218 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 219 intel_ring_emit(ring, 0); 220 intel_ring_emit(ring, 0); 221 intel_ring_emit(ring, MI_NOOP); 222 intel_ring_advance(ring); 223 224 return 0; 225} 226 227static int 228gen6_render_ring_flush(struct intel_engine_cs *ring, 229 u32 invalidate_domains, u32 flush_domains) 230{ 231 u32 flags = 0; 232 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 233 int ret; 234 235 /* Force SNB workarounds for PIPE_CONTROL flushes */ 236 ret = intel_emit_post_sync_nonzero_flush(ring); 237 if (ret) 238 return ret; 239 240 /* Just flush everything. Experiments have shown that reducing the 241 * number of bits based on the write domains has little performance 242 * impact. 243 */ 244 if (flush_domains) { 245 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 246 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 247 /* 248 * Ensure that any following seqno writes only happen 249 * when the render cache is indeed flushed. 250 */ 251 flags |= PIPE_CONTROL_CS_STALL; 252 } 253 if (invalidate_domains) { 254 flags |= PIPE_CONTROL_TLB_INVALIDATE; 255 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 256 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 257 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 258 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 259 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 260 /* 261 * TLB invalidate requires a post-sync write. 262 */ 263 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 264 } 265 266 ret = intel_ring_begin(ring, 4); 267 if (ret) 268 return ret; 269 270 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 271 intel_ring_emit(ring, flags); 272 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 273 intel_ring_emit(ring, 0); 274 intel_ring_advance(ring); 275 276 return 0; 277} 278 279static int 280gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring) 281{ 282 int ret; 283 284 ret = intel_ring_begin(ring, 4); 285 if (ret) 286 return ret; 287 288 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 289 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 290 PIPE_CONTROL_STALL_AT_SCOREBOARD); 291 intel_ring_emit(ring, 0); 292 intel_ring_emit(ring, 0); 293 intel_ring_advance(ring); 294 295 return 0; 296} 297 298static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value) 299{ 300 int ret; 301 302 if (!ring->fbc_dirty) 303 return 0; 304 305 ret = intel_ring_begin(ring, 6); 306 if (ret) 307 return ret; 308 /* WaFbcNukeOn3DBlt:ivb/hsw */ 309 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 310 intel_ring_emit(ring, MSG_FBC_REND_STATE); 311 intel_ring_emit(ring, value); 312 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT); 313 intel_ring_emit(ring, MSG_FBC_REND_STATE); 314 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 315 intel_ring_advance(ring); 316 317 ring->fbc_dirty = false; 318 return 0; 319} 320 321static int 322gen7_render_ring_flush(struct intel_engine_cs *ring, 323 u32 invalidate_domains, u32 flush_domains) 324{ 325 u32 flags = 0; 326 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 327 int ret; 328 329 /* 330 * Ensure that any following seqno writes only happen when the render 331 * cache is indeed flushed. 332 * 333 * Workaround: 4th PIPE_CONTROL command (except the ones with only 334 * read-cache invalidate bits set) must have the CS_STALL bit set. We 335 * don't try to be clever and just set it unconditionally. 336 */ 337 flags |= PIPE_CONTROL_CS_STALL; 338 339 /* Just flush everything. Experiments have shown that reducing the 340 * number of bits based on the write domains has little performance 341 * impact. 342 */ 343 if (flush_domains) { 344 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 345 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 346 } 347 if (invalidate_domains) { 348 flags |= PIPE_CONTROL_TLB_INVALIDATE; 349 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 350 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 351 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 352 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 353 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 354 /* 355 * TLB invalidate requires a post-sync write. 356 */ 357 flags |= PIPE_CONTROL_QW_WRITE; 358 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 359 360 /* Workaround: we must issue a pipe_control with CS-stall bit 361 * set before a pipe_control command that has the state cache 362 * invalidate bit set. */ 363 gen7_render_ring_cs_stall_wa(ring); 364 } 365 366 ret = intel_ring_begin(ring, 4); 367 if (ret) 368 return ret; 369 370 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 371 intel_ring_emit(ring, flags); 372 intel_ring_emit(ring, scratch_addr); 373 intel_ring_emit(ring, 0); 374 intel_ring_advance(ring); 375 376 if (!invalidate_domains && flush_domains) 377 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); 378 379 return 0; 380} 381 382static int 383gen8_emit_pipe_control(struct intel_engine_cs *ring, 384 u32 flags, u32 scratch_addr) 385{ 386 int ret; 387 388 ret = intel_ring_begin(ring, 6); 389 if (ret) 390 return ret; 391 392 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 393 intel_ring_emit(ring, flags); 394 intel_ring_emit(ring, scratch_addr); 395 intel_ring_emit(ring, 0); 396 intel_ring_emit(ring, 0); 397 intel_ring_emit(ring, 0); 398 intel_ring_advance(ring); 399 400 return 0; 401} 402 403static int 404gen8_render_ring_flush(struct intel_engine_cs *ring, 405 u32 invalidate_domains, u32 flush_domains) 406{ 407 u32 flags = 0; 408 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 409 int ret; 410 411 flags |= PIPE_CONTROL_CS_STALL; 412 413 if (flush_domains) { 414 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 415 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 416 } 417 if (invalidate_domains) { 418 flags |= PIPE_CONTROL_TLB_INVALIDATE; 419 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 420 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 421 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 422 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 423 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 424 flags |= PIPE_CONTROL_QW_WRITE; 425 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 426 427 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ 428 ret = gen8_emit_pipe_control(ring, 429 PIPE_CONTROL_CS_STALL | 430 PIPE_CONTROL_STALL_AT_SCOREBOARD, 431 0); 432 if (ret) 433 return ret; 434 } 435 436 return gen8_emit_pipe_control(ring, flags, scratch_addr); 437} 438 439static void ring_write_tail(struct intel_engine_cs *ring, 440 u32 value) 441{ 442 struct drm_i915_private *dev_priv = ring->dev->dev_private; 443 I915_WRITE_TAIL(ring, value); 444} 445 446u64 intel_ring_get_active_head(struct intel_engine_cs *ring) 447{ 448 struct drm_i915_private *dev_priv = ring->dev->dev_private; 449 u64 acthd; 450 451 if (INTEL_INFO(ring->dev)->gen >= 8) 452 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), 453 RING_ACTHD_UDW(ring->mmio_base)); 454 else if (INTEL_INFO(ring->dev)->gen >= 4) 455 acthd = I915_READ(RING_ACTHD(ring->mmio_base)); 456 else 457 acthd = I915_READ(ACTHD); 458 459 return acthd; 460} 461 462static void ring_setup_phys_status_page(struct intel_engine_cs *ring) 463{ 464 struct drm_i915_private *dev_priv = ring->dev->dev_private; 465 u32 addr; 466 467 addr = dev_priv->status_page_dmah->busaddr; 468 if (INTEL_INFO(ring->dev)->gen >= 4) 469 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 470 I915_WRITE(HWS_PGA, addr); 471} 472 473static bool stop_ring(struct intel_engine_cs *ring) 474{ 475 struct drm_i915_private *dev_priv = to_i915(ring->dev); 476 477 if (!IS_GEN2(ring->dev)) { 478 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 479 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 480 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); 481 return false; 482 } 483 } 484 485 I915_WRITE_CTL(ring, 0); 486 I915_WRITE_HEAD(ring, 0); 487 ring->write_tail(ring, 0); 488 489 if (!IS_GEN2(ring->dev)) { 490 (void)I915_READ_CTL(ring); 491 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); 492 } 493 494 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; 495} 496 497static int init_ring_common(struct intel_engine_cs *ring) 498{ 499 struct drm_device *dev = ring->dev; 500 struct drm_i915_private *dev_priv = dev->dev_private; 501 struct intel_ringbuffer *ringbuf = ring->buffer; 502 struct drm_i915_gem_object *obj = ringbuf->obj; 503 int ret = 0; 504 505 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 506 507 if (!stop_ring(ring)) { 508 /* G45 ring initialization often fails to reset head to zero */ 509 DRM_DEBUG_KMS("%s head not reset to zero " 510 "ctl %08x head %08x tail %08x start %08x\n", 511 ring->name, 512 I915_READ_CTL(ring), 513 I915_READ_HEAD(ring), 514 I915_READ_TAIL(ring), 515 I915_READ_START(ring)); 516 517 if (!stop_ring(ring)) { 518 DRM_ERROR("failed to set %s head to zero " 519 "ctl %08x head %08x tail %08x start %08x\n", 520 ring->name, 521 I915_READ_CTL(ring), 522 I915_READ_HEAD(ring), 523 I915_READ_TAIL(ring), 524 I915_READ_START(ring)); 525 ret = -EIO; 526 goto out; 527 } 528 } 529 530 if (I915_NEED_GFX_HWS(dev)) 531 intel_ring_setup_status_page(ring); 532 else 533 ring_setup_phys_status_page(ring); 534 535 /* Enforce ordering by reading HEAD register back */ 536 I915_READ_HEAD(ring); 537 538 /* Initialize the ring. This must happen _after_ we've cleared the ring 539 * registers with the above sequence (the readback of the HEAD registers 540 * also enforces ordering), otherwise the hw might lose the new ring 541 * register values. */ 542 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); 543 I915_WRITE_CTL(ring, 544 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 545 | RING_VALID); 546 547 /* If the head is still not zero, the ring is dead */ 548 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 549 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && 550 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 551 DRM_ERROR("%s initialization failed " 552 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", 553 ring->name, 554 I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID, 555 I915_READ_HEAD(ring), I915_READ_TAIL(ring), 556 I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj)); 557 ret = -EIO; 558 goto out; 559 } 560 561 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 562 i915_kernel_lost_context(ring->dev); 563 else { 564 ringbuf->head = I915_READ_HEAD(ring); 565 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 566 ringbuf->space = ring_space(ringbuf); 567 ringbuf->last_retired_head = -1; 568 } 569 570 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 571 572out: 573 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 574 575 return ret; 576} 577 578static int 579init_pipe_control(struct intel_engine_cs *ring) 580{ 581 int ret; 582 583 if (ring->scratch.obj) 584 return 0; 585 586 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); 587 if (ring->scratch.obj == NULL) { 588 DRM_ERROR("Failed to allocate seqno page\n"); 589 ret = -ENOMEM; 590 goto err; 591 } 592 593 ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 594 if (ret) 595 goto err_unref; 596 597 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); 598 if (ret) 599 goto err_unref; 600 601 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); 602 ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl)); 603 if (ring->scratch.cpu_page == NULL) { 604 ret = -ENOMEM; 605 goto err_unpin; 606 } 607 608 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 609 ring->name, ring->scratch.gtt_offset); 610 return 0; 611 612err_unpin: 613 i915_gem_object_ggtt_unpin(ring->scratch.obj); 614err_unref: 615 drm_gem_object_unreference(&ring->scratch.obj->base); 616err: 617 return ret; 618} 619 620static int init_render_ring(struct intel_engine_cs *ring) 621{ 622 struct drm_device *dev = ring->dev; 623 struct drm_i915_private *dev_priv = dev->dev_private; 624 int ret = init_ring_common(ring); 625 if (ret) 626 return ret; 627 628 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 629 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 630 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 631 632 /* We need to disable the AsyncFlip performance optimisations in order 633 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 634 * programmed to '1' on all products. 635 * 636 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv 637 */ 638 if (INTEL_INFO(dev)->gen >= 6) 639 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 640 641 /* Required for the hardware to program scanline values for waiting */ 642 /* WaEnableFlushTlbInvalidationMode:snb */ 643 if (INTEL_INFO(dev)->gen == 6) 644 I915_WRITE(GFX_MODE, 645 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 646 647 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 648 if (IS_GEN7(dev)) 649 I915_WRITE(GFX_MODE_GEN7, 650 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 651 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 652 653 if (INTEL_INFO(dev)->gen >= 5) { 654 ret = init_pipe_control(ring); 655 if (ret) 656 return ret; 657 } 658 659 if (IS_GEN6(dev)) { 660 /* From the Sandybridge PRM, volume 1 part 3, page 24: 661 * "If this bit is set, STCunit will have LRA as replacement 662 * policy. [...] This bit must be reset. LRA replacement 663 * policy is not supported." 664 */ 665 I915_WRITE(CACHE_MODE_0, 666 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 667 } 668 669 if (INTEL_INFO(dev)->gen >= 6) 670 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 671 672 if (HAS_L3_DPF(dev)) 673 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 674 675 return ret; 676} 677 678static void render_ring_cleanup(struct intel_engine_cs *ring) 679{ 680 struct drm_device *dev = ring->dev; 681 struct drm_i915_private *dev_priv = dev->dev_private; 682 683 if (dev_priv->semaphore_obj) { 684 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); 685 drm_gem_object_unreference(&dev_priv->semaphore_obj->base); 686 dev_priv->semaphore_obj = NULL; 687 } 688 689 if (ring->scratch.obj == NULL) 690 return; 691 692 if (INTEL_INFO(dev)->gen >= 5) { 693 kunmap(sg_page(ring->scratch.obj->pages->sgl)); 694 i915_gem_object_ggtt_unpin(ring->scratch.obj); 695 } 696 697 drm_gem_object_unreference(&ring->scratch.obj->base); 698 ring->scratch.obj = NULL; 699} 700 701static int gen8_rcs_signal(struct intel_engine_cs *signaller, 702 unsigned int num_dwords) 703{ 704#define MBOX_UPDATE_DWORDS 8 705 struct drm_device *dev = signaller->dev; 706 struct drm_i915_private *dev_priv = dev->dev_private; 707 struct intel_engine_cs *waiter; 708 int i, ret, num_rings; 709 710 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 711 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 712#undef MBOX_UPDATE_DWORDS 713 714 ret = intel_ring_begin(signaller, num_dwords); 715 if (ret) 716 return ret; 717 718 for_each_ring(waiter, dev_priv, i) { 719 u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; 720 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 721 continue; 722 723 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); 724 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | 725 PIPE_CONTROL_QW_WRITE | 726 PIPE_CONTROL_FLUSH_ENABLE); 727 intel_ring_emit(signaller, lower_32_bits(gtt_offset)); 728 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 729 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 730 intel_ring_emit(signaller, 0); 731 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 732 MI_SEMAPHORE_TARGET(waiter->id)); 733 intel_ring_emit(signaller, 0); 734 } 735 736 return 0; 737} 738 739static int gen8_xcs_signal(struct intel_engine_cs *signaller, 740 unsigned int num_dwords) 741{ 742#define MBOX_UPDATE_DWORDS 6 743 struct drm_device *dev = signaller->dev; 744 struct drm_i915_private *dev_priv = dev->dev_private; 745 struct intel_engine_cs *waiter; 746 int i, ret, num_rings; 747 748 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 749 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 750#undef MBOX_UPDATE_DWORDS 751 752 ret = intel_ring_begin(signaller, num_dwords); 753 if (ret) 754 return ret; 755 756 for_each_ring(waiter, dev_priv, i) { 757 u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; 758 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 759 continue; 760 761 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | 762 MI_FLUSH_DW_OP_STOREDW); 763 intel_ring_emit(signaller, lower_32_bits(gtt_offset) | 764 MI_FLUSH_DW_USE_GTT); 765 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 766 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 767 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 768 MI_SEMAPHORE_TARGET(waiter->id)); 769 intel_ring_emit(signaller, 0); 770 } 771 772 return 0; 773} 774 775static int gen6_signal(struct intel_engine_cs *signaller, 776 unsigned int num_dwords) 777{ 778 struct drm_device *dev = signaller->dev; 779 struct drm_i915_private *dev_priv = dev->dev_private; 780 struct intel_engine_cs *useless; 781 int i, ret, num_rings; 782 783#define MBOX_UPDATE_DWORDS 3 784 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 785 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); 786#undef MBOX_UPDATE_DWORDS 787 788 ret = intel_ring_begin(signaller, num_dwords); 789 if (ret) 790 return ret; 791 792 for_each_ring(useless, dev_priv, i) { 793 u32 mbox_reg = signaller->semaphore.mbox.signal[i]; 794 if (mbox_reg != GEN6_NOSYNC) { 795 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 796 intel_ring_emit(signaller, mbox_reg); 797 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 798 } 799 } 800 801 /* If num_dwords was rounded, make sure the tail pointer is correct */ 802 if (num_rings % 2 == 0) 803 intel_ring_emit(signaller, MI_NOOP); 804 805 return 0; 806} 807 808/** 809 * gen6_add_request - Update the semaphore mailbox registers 810 * 811 * @ring - ring that is adding a request 812 * @seqno - return seqno stuck into the ring 813 * 814 * Update the mailbox registers in the *other* rings with the current seqno. 815 * This acts like a signal in the canonical semaphore. 816 */ 817static int 818gen6_add_request(struct intel_engine_cs *ring) 819{ 820 int ret; 821 822 if (ring->semaphore.signal) 823 ret = ring->semaphore.signal(ring, 4); 824 else 825 ret = intel_ring_begin(ring, 4); 826 827 if (ret) 828 return ret; 829 830 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 831 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 832 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 833 intel_ring_emit(ring, MI_USER_INTERRUPT); 834 __intel_ring_advance(ring); 835 836 return 0; 837} 838 839static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 840 u32 seqno) 841{ 842 struct drm_i915_private *dev_priv = dev->dev_private; 843 return dev_priv->last_seqno < seqno; 844} 845 846/** 847 * intel_ring_sync - sync the waiter to the signaller on seqno 848 * 849 * @waiter - ring that is waiting 850 * @signaller - ring which has, or will signal 851 * @seqno - seqno which the waiter will block on 852 */ 853 854static int 855gen8_ring_sync(struct intel_engine_cs *waiter, 856 struct intel_engine_cs *signaller, 857 u32 seqno) 858{ 859 struct drm_i915_private *dev_priv = waiter->dev->dev_private; 860 int ret; 861 862 ret = intel_ring_begin(waiter, 4); 863 if (ret) 864 return ret; 865 866 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | 867 MI_SEMAPHORE_GLOBAL_GTT | 868 MI_SEMAPHORE_POLL | 869 MI_SEMAPHORE_SAD_GTE_SDD); 870 intel_ring_emit(waiter, seqno); 871 intel_ring_emit(waiter, 872 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 873 intel_ring_emit(waiter, 874 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 875 intel_ring_advance(waiter); 876 return 0; 877} 878 879static int 880gen6_ring_sync(struct intel_engine_cs *waiter, 881 struct intel_engine_cs *signaller, 882 u32 seqno) 883{ 884 u32 dw1 = MI_SEMAPHORE_MBOX | 885 MI_SEMAPHORE_COMPARE | 886 MI_SEMAPHORE_REGISTER; 887 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; 888 int ret; 889 890 /* Throughout all of the GEM code, seqno passed implies our current 891 * seqno is >= the last seqno executed. However for hardware the 892 * comparison is strictly greater than. 893 */ 894 seqno -= 1; 895 896 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 897 898 ret = intel_ring_begin(waiter, 4); 899 if (ret) 900 return ret; 901 902 /* If seqno wrap happened, omit the wait with no-ops */ 903 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 904 intel_ring_emit(waiter, dw1 | wait_mbox); 905 intel_ring_emit(waiter, seqno); 906 intel_ring_emit(waiter, 0); 907 intel_ring_emit(waiter, MI_NOOP); 908 } else { 909 intel_ring_emit(waiter, MI_NOOP); 910 intel_ring_emit(waiter, MI_NOOP); 911 intel_ring_emit(waiter, MI_NOOP); 912 intel_ring_emit(waiter, MI_NOOP); 913 } 914 intel_ring_advance(waiter); 915 916 return 0; 917} 918 919#define PIPE_CONTROL_FLUSH(ring__, addr__) \ 920do { \ 921 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 922 PIPE_CONTROL_DEPTH_STALL); \ 923 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ 924 intel_ring_emit(ring__, 0); \ 925 intel_ring_emit(ring__, 0); \ 926} while (0) 927 928static int 929pc_render_add_request(struct intel_engine_cs *ring) 930{ 931 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 932 int ret; 933 934 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 935 * incoherent with writes to memory, i.e. completely fubar, 936 * so we need to use PIPE_NOTIFY instead. 937 * 938 * However, we also need to workaround the qword write 939 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 940 * memory before requesting an interrupt. 941 */ 942 ret = intel_ring_begin(ring, 32); 943 if (ret) 944 return ret; 945 946 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 947 PIPE_CONTROL_WRITE_FLUSH | 948 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 949 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 950 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 951 intel_ring_emit(ring, 0); 952 PIPE_CONTROL_FLUSH(ring, scratch_addr); 953 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ 954 PIPE_CONTROL_FLUSH(ring, scratch_addr); 955 scratch_addr += 2 * CACHELINE_BYTES; 956 PIPE_CONTROL_FLUSH(ring, scratch_addr); 957 scratch_addr += 2 * CACHELINE_BYTES; 958 PIPE_CONTROL_FLUSH(ring, scratch_addr); 959 scratch_addr += 2 * CACHELINE_BYTES; 960 PIPE_CONTROL_FLUSH(ring, scratch_addr); 961 scratch_addr += 2 * CACHELINE_BYTES; 962 PIPE_CONTROL_FLUSH(ring, scratch_addr); 963 964 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 965 PIPE_CONTROL_WRITE_FLUSH | 966 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 967 PIPE_CONTROL_NOTIFY); 968 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 969 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 970 intel_ring_emit(ring, 0); 971 __intel_ring_advance(ring); 972 973 return 0; 974} 975 976static u32 977gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 978{ 979 /* Workaround to force correct ordering between irq and seqno writes on 980 * ivb (and maybe also on snb) by reading from a CS register (like 981 * ACTHD) before reading the status page. */ 982 if (!lazy_coherency) { 983 struct drm_i915_private *dev_priv = ring->dev->dev_private; 984 POSTING_READ(RING_ACTHD(ring->mmio_base)); 985 } 986 987 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 988} 989 990static u32 991ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 992{ 993 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 994} 995 996static void 997ring_set_seqno(struct intel_engine_cs *ring, u32 seqno) 998{ 999 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 1000} 1001 1002static u32 1003pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1004{ 1005 return ring->scratch.cpu_page[0]; 1006} 1007 1008static void 1009pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1010{ 1011 ring->scratch.cpu_page[0] = seqno; 1012} 1013 1014static bool 1015gen5_ring_get_irq(struct intel_engine_cs *ring) 1016{ 1017 struct drm_device *dev = ring->dev; 1018 struct drm_i915_private *dev_priv = dev->dev_private; 1019 unsigned long flags; 1020 1021 if (!dev->irq_enabled) 1022 return false; 1023 1024 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1025 if (ring->irq_refcount++ == 0) 1026 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1027 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1028 1029 return true; 1030} 1031 1032static void 1033gen5_ring_put_irq(struct intel_engine_cs *ring) 1034{ 1035 struct drm_device *dev = ring->dev; 1036 struct drm_i915_private *dev_priv = dev->dev_private; 1037 unsigned long flags; 1038 1039 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1040 if (--ring->irq_refcount == 0) 1041 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1042 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1043} 1044 1045static bool 1046i9xx_ring_get_irq(struct intel_engine_cs *ring) 1047{ 1048 struct drm_device *dev = ring->dev; 1049 struct drm_i915_private *dev_priv = dev->dev_private; 1050 unsigned long flags; 1051 1052 if (!dev->irq_enabled) 1053 return false; 1054 1055 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1056 if (ring->irq_refcount++ == 0) { 1057 dev_priv->irq_mask &= ~ring->irq_enable_mask; 1058 I915_WRITE(IMR, dev_priv->irq_mask); 1059 POSTING_READ(IMR); 1060 } 1061 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1062 1063 return true; 1064} 1065 1066static void 1067i9xx_ring_put_irq(struct intel_engine_cs *ring) 1068{ 1069 struct drm_device *dev = ring->dev; 1070 struct drm_i915_private *dev_priv = dev->dev_private; 1071 unsigned long flags; 1072 1073 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1074 if (--ring->irq_refcount == 0) { 1075 dev_priv->irq_mask |= ring->irq_enable_mask; 1076 I915_WRITE(IMR, dev_priv->irq_mask); 1077 POSTING_READ(IMR); 1078 } 1079 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1080} 1081 1082static bool 1083i8xx_ring_get_irq(struct intel_engine_cs *ring) 1084{ 1085 struct drm_device *dev = ring->dev; 1086 struct drm_i915_private *dev_priv = dev->dev_private; 1087 unsigned long flags; 1088 1089 if (!dev->irq_enabled) 1090 return false; 1091 1092 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1093 if (ring->irq_refcount++ == 0) { 1094 dev_priv->irq_mask &= ~ring->irq_enable_mask; 1095 I915_WRITE16(IMR, dev_priv->irq_mask); 1096 POSTING_READ16(IMR); 1097 } 1098 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1099 1100 return true; 1101} 1102 1103static void 1104i8xx_ring_put_irq(struct intel_engine_cs *ring) 1105{ 1106 struct drm_device *dev = ring->dev; 1107 struct drm_i915_private *dev_priv = dev->dev_private; 1108 unsigned long flags; 1109 1110 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1111 if (--ring->irq_refcount == 0) { 1112 dev_priv->irq_mask |= ring->irq_enable_mask; 1113 I915_WRITE16(IMR, dev_priv->irq_mask); 1114 POSTING_READ16(IMR); 1115 } 1116 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1117} 1118 1119void intel_ring_setup_status_page(struct intel_engine_cs *ring) 1120{ 1121 struct drm_device *dev = ring->dev; 1122 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1123 u32 mmio = 0; 1124 1125 /* The ring status page addresses are no longer next to the rest of 1126 * the ring registers as of gen7. 1127 */ 1128 if (IS_GEN7(dev)) { 1129 switch (ring->id) { 1130 case RCS: 1131 mmio = RENDER_HWS_PGA_GEN7; 1132 break; 1133 case BCS: 1134 mmio = BLT_HWS_PGA_GEN7; 1135 break; 1136 /* 1137 * VCS2 actually doesn't exist on Gen7. Only shut up 1138 * gcc switch check warning 1139 */ 1140 case VCS2: 1141 case VCS: 1142 mmio = BSD_HWS_PGA_GEN7; 1143 break; 1144 case VECS: 1145 mmio = VEBOX_HWS_PGA_GEN7; 1146 break; 1147 } 1148 } else if (IS_GEN6(ring->dev)) { 1149 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 1150 } else { 1151 /* XXX: gen8 returns to sanity */ 1152 mmio = RING_HWS_PGA(ring->mmio_base); 1153 } 1154 1155 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 1156 POSTING_READ(mmio); 1157 1158 /* 1159 * Flush the TLB for this page 1160 * 1161 * FIXME: These two bits have disappeared on gen8, so a question 1162 * arises: do we still need this and if so how should we go about 1163 * invalidating the TLB? 1164 */ 1165 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 1166 u32 reg = RING_INSTPM(ring->mmio_base); 1167 1168 /* ring should be idle before issuing a sync flush*/ 1169 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 1170 1171 I915_WRITE(reg, 1172 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 1173 INSTPM_SYNC_FLUSH)); 1174 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 1175 1000)) 1176 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 1177 ring->name); 1178 } 1179} 1180 1181static int 1182bsd_ring_flush(struct intel_engine_cs *ring, 1183 u32 invalidate_domains, 1184 u32 flush_domains) 1185{ 1186 int ret; 1187 1188 ret = intel_ring_begin(ring, 2); 1189 if (ret) 1190 return ret; 1191 1192 intel_ring_emit(ring, MI_FLUSH); 1193 intel_ring_emit(ring, MI_NOOP); 1194 intel_ring_advance(ring); 1195 return 0; 1196} 1197 1198static int 1199i9xx_add_request(struct intel_engine_cs *ring) 1200{ 1201 int ret; 1202 1203 ret = intel_ring_begin(ring, 4); 1204 if (ret) 1205 return ret; 1206 1207 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1208 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1209 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 1210 intel_ring_emit(ring, MI_USER_INTERRUPT); 1211 __intel_ring_advance(ring); 1212 1213 return 0; 1214} 1215 1216static bool 1217gen6_ring_get_irq(struct intel_engine_cs *ring) 1218{ 1219 struct drm_device *dev = ring->dev; 1220 struct drm_i915_private *dev_priv = dev->dev_private; 1221 unsigned long flags; 1222 1223 if (!dev->irq_enabled) 1224 return false; 1225 1226 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1227 if (ring->irq_refcount++ == 0) { 1228 if (HAS_L3_DPF(dev) && ring->id == RCS) 1229 I915_WRITE_IMR(ring, 1230 ~(ring->irq_enable_mask | 1231 GT_PARITY_ERROR(dev))); 1232 else 1233 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1234 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1235 } 1236 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1237 1238 return true; 1239} 1240 1241static void 1242gen6_ring_put_irq(struct intel_engine_cs *ring) 1243{ 1244 struct drm_device *dev = ring->dev; 1245 struct drm_i915_private *dev_priv = dev->dev_private; 1246 unsigned long flags; 1247 1248 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1249 if (--ring->irq_refcount == 0) { 1250 if (HAS_L3_DPF(dev) && ring->id == RCS) 1251 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1252 else 1253 I915_WRITE_IMR(ring, ~0); 1254 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1255 } 1256 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1257} 1258 1259static bool 1260hsw_vebox_get_irq(struct intel_engine_cs *ring) 1261{ 1262 struct drm_device *dev = ring->dev; 1263 struct drm_i915_private *dev_priv = dev->dev_private; 1264 unsigned long flags; 1265 1266 if (!dev->irq_enabled) 1267 return false; 1268 1269 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1270 if (ring->irq_refcount++ == 0) { 1271 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1272 gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask); 1273 } 1274 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1275 1276 return true; 1277} 1278 1279static void 1280hsw_vebox_put_irq(struct intel_engine_cs *ring) 1281{ 1282 struct drm_device *dev = ring->dev; 1283 struct drm_i915_private *dev_priv = dev->dev_private; 1284 unsigned long flags; 1285 1286 if (!dev->irq_enabled) 1287 return; 1288 1289 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1290 if (--ring->irq_refcount == 0) { 1291 I915_WRITE_IMR(ring, ~0); 1292 gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask); 1293 } 1294 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1295} 1296 1297static bool 1298gen8_ring_get_irq(struct intel_engine_cs *ring) 1299{ 1300 struct drm_device *dev = ring->dev; 1301 struct drm_i915_private *dev_priv = dev->dev_private; 1302 unsigned long flags; 1303 1304 if (!dev->irq_enabled) 1305 return false; 1306 1307 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1308 if (ring->irq_refcount++ == 0) { 1309 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1310 I915_WRITE_IMR(ring, 1311 ~(ring->irq_enable_mask | 1312 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1313 } else { 1314 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1315 } 1316 POSTING_READ(RING_IMR(ring->mmio_base)); 1317 } 1318 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1319 1320 return true; 1321} 1322 1323static void 1324gen8_ring_put_irq(struct intel_engine_cs *ring) 1325{ 1326 struct drm_device *dev = ring->dev; 1327 struct drm_i915_private *dev_priv = dev->dev_private; 1328 unsigned long flags; 1329 1330 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1331 if (--ring->irq_refcount == 0) { 1332 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1333 I915_WRITE_IMR(ring, 1334 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1335 } else { 1336 I915_WRITE_IMR(ring, ~0); 1337 } 1338 POSTING_READ(RING_IMR(ring->mmio_base)); 1339 } 1340 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1341} 1342 1343static int 1344i965_dispatch_execbuffer(struct intel_engine_cs *ring, 1345 u64 offset, u32 length, 1346 unsigned flags) 1347{ 1348 int ret; 1349 1350 ret = intel_ring_begin(ring, 2); 1351 if (ret) 1352 return ret; 1353 1354 intel_ring_emit(ring, 1355 MI_BATCH_BUFFER_START | 1356 MI_BATCH_GTT | 1357 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 1358 intel_ring_emit(ring, offset); 1359 intel_ring_advance(ring); 1360 1361 return 0; 1362} 1363 1364/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1365#define I830_BATCH_LIMIT (256*1024) 1366#define I830_TLB_ENTRIES (2) 1367#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1368static int 1369i830_dispatch_execbuffer(struct intel_engine_cs *ring, 1370 u64 offset, u32 len, 1371 unsigned flags) 1372{ 1373 u32 cs_offset = ring->scratch.gtt_offset; 1374 int ret; 1375 1376 ret = intel_ring_begin(ring, 6); 1377 if (ret) 1378 return ret; 1379 1380 /* Evict the invalid PTE TLBs */ 1381 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); 1382 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); 1383 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ 1384 intel_ring_emit(ring, cs_offset); 1385 intel_ring_emit(ring, 0xdeadbeef); 1386 intel_ring_emit(ring, MI_NOOP); 1387 intel_ring_advance(ring); 1388 1389 if ((flags & I915_DISPATCH_PINNED) == 0) { 1390 if (len > I830_BATCH_LIMIT) 1391 return -ENOSPC; 1392 1393 ret = intel_ring_begin(ring, 6 + 2); 1394 if (ret) 1395 return ret; 1396 1397 /* Blit the batch (which has now all relocs applied) to the 1398 * stable batch scratch bo area (so that the CS never 1399 * stumbles over its tlb invalidation bug) ... 1400 */ 1401 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); 1402 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); 1403 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096); 1404 intel_ring_emit(ring, cs_offset); 1405 intel_ring_emit(ring, 4096); 1406 intel_ring_emit(ring, offset); 1407 1408 intel_ring_emit(ring, MI_FLUSH); 1409 intel_ring_emit(ring, MI_NOOP); 1410 intel_ring_advance(ring); 1411 1412 /* ... and execute it. */ 1413 offset = cs_offset; 1414 } 1415 1416 ret = intel_ring_begin(ring, 4); 1417 if (ret) 1418 return ret; 1419 1420 intel_ring_emit(ring, MI_BATCH_BUFFER); 1421 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1422 intel_ring_emit(ring, offset + len - 8); 1423 intel_ring_emit(ring, MI_NOOP); 1424 intel_ring_advance(ring); 1425 1426 return 0; 1427} 1428 1429static int 1430i915_dispatch_execbuffer(struct intel_engine_cs *ring, 1431 u64 offset, u32 len, 1432 unsigned flags) 1433{ 1434 int ret; 1435 1436 ret = intel_ring_begin(ring, 2); 1437 if (ret) 1438 return ret; 1439 1440 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1441 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1442 intel_ring_advance(ring); 1443 1444 return 0; 1445} 1446 1447static void cleanup_status_page(struct intel_engine_cs *ring) 1448{ 1449 struct drm_i915_gem_object *obj; 1450 1451 obj = ring->status_page.obj; 1452 if (obj == NULL) 1453 return; 1454 1455 kunmap(sg_page(obj->pages->sgl)); 1456 i915_gem_object_ggtt_unpin(obj); 1457 drm_gem_object_unreference(&obj->base); 1458 ring->status_page.obj = NULL; 1459} 1460 1461static int init_status_page(struct intel_engine_cs *ring) 1462{ 1463 struct drm_i915_gem_object *obj; 1464 1465 if ((obj = ring->status_page.obj) == NULL) { 1466 unsigned flags; 1467 int ret; 1468 1469 obj = i915_gem_alloc_object(ring->dev, 4096); 1470 if (obj == NULL) { 1471 DRM_ERROR("Failed to allocate status page\n"); 1472 return -ENOMEM; 1473 } 1474 1475 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1476 if (ret) 1477 goto err_unref; 1478 1479 flags = 0; 1480 if (!HAS_LLC(ring->dev)) 1481 /* On g33, we cannot place HWS above 256MiB, so 1482 * restrict its pinning to the low mappable arena. 1483 * Though this restriction is not documented for 1484 * gen4, gen5, or byt, they also behave similarly 1485 * and hang if the HWS is placed at the top of the 1486 * GTT. To generalise, it appears that all !llc 1487 * platforms have issues with us placing the HWS 1488 * above the mappable region (even though we never 1489 * actualy map it). 1490 */ 1491 flags |= PIN_MAPPABLE; 1492 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags); 1493 if (ret) { 1494err_unref: 1495 drm_gem_object_unreference(&obj->base); 1496 return ret; 1497 } 1498 1499 ring->status_page.obj = obj; 1500 } 1501 1502 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1503 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 1504 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1505 1506 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1507 ring->name, ring->status_page.gfx_addr); 1508 1509 return 0; 1510} 1511 1512static int init_phys_status_page(struct intel_engine_cs *ring) 1513{ 1514 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1515 1516 if (!dev_priv->status_page_dmah) { 1517 dev_priv->status_page_dmah = 1518 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); 1519 if (!dev_priv->status_page_dmah) 1520 return -ENOMEM; 1521 } 1522 1523 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1524 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1525 1526 return 0; 1527} 1528 1529static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1530{ 1531 if (!ringbuf->obj) 1532 return; 1533 1534 iounmap(ringbuf->virtual_start); 1535 i915_gem_object_ggtt_unpin(ringbuf->obj); 1536 drm_gem_object_unreference(&ringbuf->obj->base); 1537 ringbuf->obj = NULL; 1538} 1539 1540static int intel_alloc_ringbuffer_obj(struct drm_device *dev, 1541 struct intel_ringbuffer *ringbuf) 1542{ 1543 struct drm_i915_private *dev_priv = to_i915(dev); 1544 struct drm_i915_gem_object *obj; 1545 int ret; 1546 1547 if (ringbuf->obj) 1548 return 0; 1549 1550 obj = NULL; 1551 if (!HAS_LLC(dev)) 1552 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 1553 if (obj == NULL) 1554 obj = i915_gem_alloc_object(dev, ringbuf->size); 1555 if (obj == NULL) 1556 return -ENOMEM; 1557 1558 /* mark ring buffers as read-only from GPU side by default */ 1559 obj->gt_ro = 1; 1560 1561 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 1562 if (ret) 1563 goto err_unref; 1564 1565 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1566 if (ret) 1567 goto err_unpin; 1568 1569 ringbuf->virtual_start = 1570 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), 1571 ringbuf->size); 1572 if (ringbuf->virtual_start == NULL) { 1573 ret = -EINVAL; 1574 goto err_unpin; 1575 } 1576 1577 ringbuf->obj = obj; 1578 return 0; 1579 1580err_unpin: 1581 i915_gem_object_ggtt_unpin(obj); 1582err_unref: 1583 drm_gem_object_unreference(&obj->base); 1584 return ret; 1585} 1586 1587static int intel_init_ring_buffer(struct drm_device *dev, 1588 struct intel_engine_cs *ring) 1589{ 1590 struct intel_ringbuffer *ringbuf = ring->buffer; 1591 int ret; 1592 1593 if (ringbuf == NULL) { 1594 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 1595 if (!ringbuf) 1596 return -ENOMEM; 1597 ring->buffer = ringbuf; 1598 } 1599 1600 ring->dev = dev; 1601 INIT_LIST_HEAD(&ring->active_list); 1602 INIT_LIST_HEAD(&ring->request_list); 1603 ringbuf->size = 32 * PAGE_SIZE; 1604 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 1605 1606 init_waitqueue_head(&ring->irq_queue); 1607 1608 if (I915_NEED_GFX_HWS(dev)) { 1609 ret = init_status_page(ring); 1610 if (ret) 1611 goto error; 1612 } else { 1613 BUG_ON(ring->id != RCS); 1614 ret = init_phys_status_page(ring); 1615 if (ret) 1616 goto error; 1617 } 1618 1619 ret = intel_alloc_ringbuffer_obj(dev, ringbuf); 1620 if (ret) { 1621 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); 1622 goto error; 1623 } 1624 1625 /* Workaround an erratum on the i830 which causes a hang if 1626 * the TAIL pointer points to within the last 2 cachelines 1627 * of the buffer. 1628 */ 1629 ringbuf->effective_size = ringbuf->size; 1630 if (IS_I830(dev) || IS_845G(dev)) 1631 ringbuf->effective_size -= 2 * CACHELINE_BYTES; 1632 1633 ret = i915_cmd_parser_init_ring(ring); 1634 if (ret) 1635 goto error; 1636 1637 ret = ring->init(ring); 1638 if (ret) 1639 goto error; 1640 1641 return 0; 1642 1643error: 1644 kfree(ringbuf); 1645 ring->buffer = NULL; 1646 return ret; 1647} 1648 1649void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) 1650{ 1651 struct drm_i915_private *dev_priv = to_i915(ring->dev); 1652 struct intel_ringbuffer *ringbuf = ring->buffer; 1653 1654 if (!intel_ring_initialized(ring)) 1655 return; 1656 1657 intel_stop_ring_buffer(ring); 1658 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 1659 1660 intel_destroy_ringbuffer_obj(ringbuf); 1661 ring->preallocated_lazy_request = NULL; 1662 ring->outstanding_lazy_seqno = 0; 1663 1664 if (ring->cleanup) 1665 ring->cleanup(ring); 1666 1667 cleanup_status_page(ring); 1668 1669 i915_cmd_parser_fini_ring(ring); 1670 1671 kfree(ringbuf); 1672 ring->buffer = NULL; 1673} 1674 1675static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) 1676{ 1677 struct intel_ringbuffer *ringbuf = ring->buffer; 1678 struct drm_i915_gem_request *request; 1679 u32 seqno = 0; 1680 int ret; 1681 1682 if (ringbuf->last_retired_head != -1) { 1683 ringbuf->head = ringbuf->last_retired_head; 1684 ringbuf->last_retired_head = -1; 1685 1686 ringbuf->space = ring_space(ringbuf); 1687 if (ringbuf->space >= n) 1688 return 0; 1689 } 1690 1691 list_for_each_entry(request, &ring->request_list, list) { 1692 if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) { 1693 seqno = request->seqno; 1694 break; 1695 } 1696 } 1697 1698 if (seqno == 0) 1699 return -ENOSPC; 1700 1701 ret = i915_wait_seqno(ring, seqno); 1702 if (ret) 1703 return ret; 1704 1705 i915_gem_retire_requests_ring(ring); 1706 ringbuf->head = ringbuf->last_retired_head; 1707 ringbuf->last_retired_head = -1; 1708 1709 ringbuf->space = ring_space(ringbuf); 1710 return 0; 1711} 1712 1713static int ring_wait_for_space(struct intel_engine_cs *ring, int n) 1714{ 1715 struct drm_device *dev = ring->dev; 1716 struct drm_i915_private *dev_priv = dev->dev_private; 1717 struct intel_ringbuffer *ringbuf = ring->buffer; 1718 unsigned long end; 1719 int ret; 1720 1721 ret = intel_ring_wait_request(ring, n); 1722 if (ret != -ENOSPC) 1723 return ret; 1724 1725 /* force the tail write in case we have been skipping them */ 1726 __intel_ring_advance(ring); 1727 1728 /* With GEM the hangcheck timer should kick us out of the loop, 1729 * leaving it early runs the risk of corrupting GEM state (due 1730 * to running on almost untested codepaths). But on resume 1731 * timers don't work yet, so prevent a complete hang in that 1732 * case by choosing an insanely large timeout. */ 1733 end = jiffies + 60 * HZ; 1734 1735 trace_i915_ring_wait_begin(ring); 1736 do { 1737 ringbuf->head = I915_READ_HEAD(ring); 1738 ringbuf->space = ring_space(ringbuf); 1739 if (ringbuf->space >= n) { 1740 ret = 0; 1741 break; 1742 } 1743 1744 if (!drm_core_check_feature(dev, DRIVER_MODESET) && 1745 dev->primary->master) { 1746 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1747 if (master_priv->sarea_priv) 1748 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1749 } 1750 1751 msleep(1); 1752 1753 if (dev_priv->mm.interruptible && signal_pending(current)) { 1754 ret = -ERESTARTSYS; 1755 break; 1756 } 1757 1758 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1759 dev_priv->mm.interruptible); 1760 if (ret) 1761 break; 1762 1763 if (time_after(jiffies, end)) { 1764 ret = -EBUSY; 1765 break; 1766 } 1767 } while (1); 1768 trace_i915_ring_wait_end(ring); 1769 return ret; 1770} 1771 1772static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) 1773{ 1774 uint32_t __iomem *virt; 1775 struct intel_ringbuffer *ringbuf = ring->buffer; 1776 int rem = ringbuf->size - ringbuf->tail; 1777 1778 if (ringbuf->space < rem) { 1779 int ret = ring_wait_for_space(ring, rem); 1780 if (ret) 1781 return ret; 1782 } 1783 1784 virt = ringbuf->virtual_start + ringbuf->tail; 1785 rem /= 4; 1786 while (rem--) 1787 iowrite32(MI_NOOP, virt++); 1788 1789 ringbuf->tail = 0; 1790 ringbuf->space = ring_space(ringbuf); 1791 1792 return 0; 1793} 1794 1795int intel_ring_idle(struct intel_engine_cs *ring) 1796{ 1797 u32 seqno; 1798 int ret; 1799 1800 /* We need to add any requests required to flush the objects and ring */ 1801 if (ring->outstanding_lazy_seqno) { 1802 ret = i915_add_request(ring, NULL); 1803 if (ret) 1804 return ret; 1805 } 1806 1807 /* Wait upon the last request to be completed */ 1808 if (list_empty(&ring->request_list)) 1809 return 0; 1810 1811 seqno = list_entry(ring->request_list.prev, 1812 struct drm_i915_gem_request, 1813 list)->seqno; 1814 1815 return i915_wait_seqno(ring, seqno); 1816} 1817 1818static int 1819intel_ring_alloc_seqno(struct intel_engine_cs *ring) 1820{ 1821 if (ring->outstanding_lazy_seqno) 1822 return 0; 1823 1824 if (ring->preallocated_lazy_request == NULL) { 1825 struct drm_i915_gem_request *request; 1826 1827 request = kmalloc(sizeof(*request), GFP_KERNEL); 1828 if (request == NULL) 1829 return -ENOMEM; 1830 1831 ring->preallocated_lazy_request = request; 1832 } 1833 1834 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); 1835} 1836 1837static int __intel_ring_prepare(struct intel_engine_cs *ring, 1838 int bytes) 1839{ 1840 struct intel_ringbuffer *ringbuf = ring->buffer; 1841 int ret; 1842 1843 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { 1844 ret = intel_wrap_ring_buffer(ring); 1845 if (unlikely(ret)) 1846 return ret; 1847 } 1848 1849 if (unlikely(ringbuf->space < bytes)) { 1850 ret = ring_wait_for_space(ring, bytes); 1851 if (unlikely(ret)) 1852 return ret; 1853 } 1854 1855 return 0; 1856} 1857 1858int intel_ring_begin(struct intel_engine_cs *ring, 1859 int num_dwords) 1860{ 1861 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1862 int ret; 1863 1864 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1865 dev_priv->mm.interruptible); 1866 if (ret) 1867 return ret; 1868 1869 ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); 1870 if (ret) 1871 return ret; 1872 1873 /* Preallocate the olr before touching the ring */ 1874 ret = intel_ring_alloc_seqno(ring); 1875 if (ret) 1876 return ret; 1877 1878 ring->buffer->space -= num_dwords * sizeof(uint32_t); 1879 return 0; 1880} 1881 1882/* Align the ring tail to a cacheline boundary */ 1883int intel_ring_cacheline_align(struct intel_engine_cs *ring) 1884{ 1885 int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1886 int ret; 1887 1888 if (num_dwords == 0) 1889 return 0; 1890 1891 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 1892 ret = intel_ring_begin(ring, num_dwords); 1893 if (ret) 1894 return ret; 1895 1896 while (num_dwords--) 1897 intel_ring_emit(ring, MI_NOOP); 1898 1899 intel_ring_advance(ring); 1900 1901 return 0; 1902} 1903 1904void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) 1905{ 1906 struct drm_device *dev = ring->dev; 1907 struct drm_i915_private *dev_priv = dev->dev_private; 1908 1909 BUG_ON(ring->outstanding_lazy_seqno); 1910 1911 if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { 1912 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1913 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 1914 if (HAS_VEBOX(dev)) 1915 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); 1916 } 1917 1918 ring->set_seqno(ring, seqno); 1919 ring->hangcheck.seqno = seqno; 1920} 1921 1922static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, 1923 u32 value) 1924{ 1925 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1926 1927 /* Every tail move must follow the sequence below */ 1928 1929 /* Disable notification that the ring is IDLE. The GT 1930 * will then assume that it is busy and bring it out of rc6. 1931 */ 1932 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1933 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1934 1935 /* Clear the context id. Here be magic! */ 1936 I915_WRITE64(GEN6_BSD_RNCID, 0x0); 1937 1938 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1939 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 1940 GEN6_BSD_SLEEP_INDICATOR) == 0, 1941 50)) 1942 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 1943 1944 /* Now that the ring is fully powered up, update the tail */ 1945 I915_WRITE_TAIL(ring, value); 1946 POSTING_READ(RING_TAIL(ring->mmio_base)); 1947 1948 /* Let the ring send IDLE messages to the GT again, 1949 * and so let it sleep to conserve power when idle. 1950 */ 1951 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1952 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1953} 1954 1955static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, 1956 u32 invalidate, u32 flush) 1957{ 1958 uint32_t cmd; 1959 int ret; 1960 1961 ret = intel_ring_begin(ring, 4); 1962 if (ret) 1963 return ret; 1964 1965 cmd = MI_FLUSH_DW; 1966 if (INTEL_INFO(ring->dev)->gen >= 8) 1967 cmd += 1; 1968 /* 1969 * Bspec vol 1c.5 - video engine command streamer: 1970 * "If ENABLED, all TLBs will be invalidated once the flush 1971 * operation is complete. This bit is only valid when the 1972 * Post-Sync Operation field is a value of 1h or 3h." 1973 */ 1974 if (invalidate & I915_GEM_GPU_DOMAINS) 1975 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | 1976 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1977 intel_ring_emit(ring, cmd); 1978 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1979 if (INTEL_INFO(ring->dev)->gen >= 8) { 1980 intel_ring_emit(ring, 0); /* upper addr */ 1981 intel_ring_emit(ring, 0); /* value */ 1982 } else { 1983 intel_ring_emit(ring, 0); 1984 intel_ring_emit(ring, MI_NOOP); 1985 } 1986 intel_ring_advance(ring); 1987 return 0; 1988} 1989 1990static int 1991gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 1992 u64 offset, u32 len, 1993 unsigned flags) 1994{ 1995 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1996 bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && 1997 !(flags & I915_DISPATCH_SECURE); 1998 int ret; 1999 2000 ret = intel_ring_begin(ring, 4); 2001 if (ret) 2002 return ret; 2003 2004 /* FIXME(BDW): Address space and security selectors. */ 2005 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); 2006 intel_ring_emit(ring, lower_32_bits(offset)); 2007 intel_ring_emit(ring, upper_32_bits(offset)); 2008 intel_ring_emit(ring, MI_NOOP); 2009 intel_ring_advance(ring); 2010 2011 return 0; 2012} 2013 2014static int 2015hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2016 u64 offset, u32 len, 2017 unsigned flags) 2018{ 2019 int ret; 2020 2021 ret = intel_ring_begin(ring, 2); 2022 if (ret) 2023 return ret; 2024 2025 intel_ring_emit(ring, 2026 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | 2027 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); 2028 /* bit0-7 is the length on GEN6+ */ 2029 intel_ring_emit(ring, offset); 2030 intel_ring_advance(ring); 2031 2032 return 0; 2033} 2034 2035static int 2036gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 2037 u64 offset, u32 len, 2038 unsigned flags) 2039{ 2040 int ret; 2041 2042 ret = intel_ring_begin(ring, 2); 2043 if (ret) 2044 return ret; 2045 2046 intel_ring_emit(ring, 2047 MI_BATCH_BUFFER_START | 2048 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 2049 /* bit0-7 is the length on GEN6+ */ 2050 intel_ring_emit(ring, offset); 2051 intel_ring_advance(ring); 2052 2053 return 0; 2054} 2055 2056/* Blitter support (SandyBridge+) */ 2057 2058static int gen6_ring_flush(struct intel_engine_cs *ring, 2059 u32 invalidate, u32 flush) 2060{ 2061 struct drm_device *dev = ring->dev; 2062 uint32_t cmd; 2063 int ret; 2064 2065 ret = intel_ring_begin(ring, 4); 2066 if (ret) 2067 return ret; 2068 2069 cmd = MI_FLUSH_DW; 2070 if (INTEL_INFO(ring->dev)->gen >= 8) 2071 cmd += 1; 2072 /* 2073 * Bspec vol 1c.3 - blitter engine command streamer: 2074 * "If ENABLED, all TLBs will be invalidated once the flush 2075 * operation is complete. This bit is only valid when the 2076 * Post-Sync Operation field is a value of 1h or 3h." 2077 */ 2078 if (invalidate & I915_GEM_DOMAIN_RENDER) 2079 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | 2080 MI_FLUSH_DW_OP_STOREDW; 2081 intel_ring_emit(ring, cmd); 2082 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2083 if (INTEL_INFO(ring->dev)->gen >= 8) { 2084 intel_ring_emit(ring, 0); /* upper addr */ 2085 intel_ring_emit(ring, 0); /* value */ 2086 } else { 2087 intel_ring_emit(ring, 0); 2088 intel_ring_emit(ring, MI_NOOP); 2089 } 2090 intel_ring_advance(ring); 2091 2092 if (IS_GEN7(dev) && !invalidate && flush) 2093 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); 2094 2095 return 0; 2096} 2097 2098int intel_init_render_ring_buffer(struct drm_device *dev) 2099{ 2100 struct drm_i915_private *dev_priv = dev->dev_private; 2101 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 2102 struct drm_i915_gem_object *obj; 2103 int ret; 2104 2105 ring->name = "render ring"; 2106 ring->id = RCS; 2107 ring->mmio_base = RENDER_RING_BASE; 2108 2109 if (INTEL_INFO(dev)->gen >= 8) { 2110 if (i915_semaphore_is_enabled(dev)) { 2111 obj = i915_gem_alloc_object(dev, 4096); 2112 if (obj == NULL) { 2113 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); 2114 i915.semaphores = 0; 2115 } else { 2116 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 2117 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); 2118 if (ret != 0) { 2119 drm_gem_object_unreference(&obj->base); 2120 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); 2121 i915.semaphores = 0; 2122 } else 2123 dev_priv->semaphore_obj = obj; 2124 } 2125 } 2126 ring->add_request = gen6_add_request; 2127 ring->flush = gen8_render_ring_flush; 2128 ring->irq_get = gen8_ring_get_irq; 2129 ring->irq_put = gen8_ring_put_irq; 2130 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2131 ring->get_seqno = gen6_ring_get_seqno; 2132 ring->set_seqno = ring_set_seqno; 2133 if (i915_semaphore_is_enabled(dev)) { 2134 WARN_ON(!dev_priv->semaphore_obj); 2135 ring->semaphore.sync_to = gen8_ring_sync; 2136 ring->semaphore.signal = gen8_rcs_signal; 2137 GEN8_RING_SEMAPHORE_INIT; 2138 } 2139 } else if (INTEL_INFO(dev)->gen >= 6) { 2140 ring->add_request = gen6_add_request; 2141 ring->flush = gen7_render_ring_flush; 2142 if (INTEL_INFO(dev)->gen == 6) 2143 ring->flush = gen6_render_ring_flush; 2144 ring->irq_get = gen6_ring_get_irq; 2145 ring->irq_put = gen6_ring_put_irq; 2146 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2147 ring->get_seqno = gen6_ring_get_seqno; 2148 ring->set_seqno = ring_set_seqno; 2149 if (i915_semaphore_is_enabled(dev)) { 2150 ring->semaphore.sync_to = gen6_ring_sync; 2151 ring->semaphore.signal = gen6_signal; 2152 /* 2153 * The current semaphore is only applied on pre-gen8 2154 * platform. And there is no VCS2 ring on the pre-gen8 2155 * platform. So the semaphore between RCS and VCS2 is 2156 * initialized as INVALID. Gen8 will initialize the 2157 * sema between VCS2 and RCS later. 2158 */ 2159 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 2160 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; 2161 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; 2162 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; 2163 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2164 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 2165 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; 2166 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; 2167 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 2168 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2169 } 2170 } else if (IS_GEN5(dev)) { 2171 ring->add_request = pc_render_add_request; 2172 ring->flush = gen4_render_ring_flush; 2173 ring->get_seqno = pc_render_get_seqno; 2174 ring->set_seqno = pc_render_set_seqno; 2175 ring->irq_get = gen5_ring_get_irq; 2176 ring->irq_put = gen5_ring_put_irq; 2177 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT | 2178 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 2179 } else { 2180 ring->add_request = i9xx_add_request; 2181 if (INTEL_INFO(dev)->gen < 4) 2182 ring->flush = gen2_render_ring_flush; 2183 else 2184 ring->flush = gen4_render_ring_flush; 2185 ring->get_seqno = ring_get_seqno; 2186 ring->set_seqno = ring_set_seqno; 2187 if (IS_GEN2(dev)) { 2188 ring->irq_get = i8xx_ring_get_irq; 2189 ring->irq_put = i8xx_ring_put_irq; 2190 } else { 2191 ring->irq_get = i9xx_ring_get_irq; 2192 ring->irq_put = i9xx_ring_put_irq; 2193 } 2194 ring->irq_enable_mask = I915_USER_INTERRUPT; 2195 } 2196 ring->write_tail = ring_write_tail; 2197 2198 if (IS_HASWELL(dev)) 2199 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2200 else if (IS_GEN8(dev)) 2201 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2202 else if (INTEL_INFO(dev)->gen >= 6) 2203 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2204 else if (INTEL_INFO(dev)->gen >= 4) 2205 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2206 else if (IS_I830(dev) || IS_845G(dev)) 2207 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2208 else 2209 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2210 ring->init = init_render_ring; 2211 ring->cleanup = render_ring_cleanup; 2212 2213 /* Workaround batchbuffer to combat CS tlb bug. */ 2214 if (HAS_BROKEN_CS_TLB(dev)) { 2215 obj = i915_gem_alloc_object(dev, I830_WA_SIZE); 2216 if (obj == NULL) { 2217 DRM_ERROR("Failed to allocate batch bo\n"); 2218 return -ENOMEM; 2219 } 2220 2221 ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 2222 if (ret != 0) { 2223 drm_gem_object_unreference(&obj->base); 2224 DRM_ERROR("Failed to ping batch bo\n"); 2225 return ret; 2226 } 2227 2228 ring->scratch.obj = obj; 2229 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 2230 } 2231 2232 return intel_init_ring_buffer(dev, ring); 2233} 2234 2235int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) 2236{ 2237 struct drm_i915_private *dev_priv = dev->dev_private; 2238 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 2239 struct intel_ringbuffer *ringbuf = ring->buffer; 2240 int ret; 2241 2242 if (ringbuf == NULL) { 2243 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 2244 if (!ringbuf) 2245 return -ENOMEM; 2246 ring->buffer = ringbuf; 2247 } 2248 2249 ring->name = "render ring"; 2250 ring->id = RCS; 2251 ring->mmio_base = RENDER_RING_BASE; 2252 2253 if (INTEL_INFO(dev)->gen >= 6) { 2254 /* non-kms not supported on gen6+ */ 2255 ret = -ENODEV; 2256 goto err_ringbuf; 2257 } 2258 2259 /* Note: gem is not supported on gen5/ilk without kms (the corresponding 2260 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up 2261 * the special gen5 functions. */ 2262 ring->add_request = i9xx_add_request; 2263 if (INTEL_INFO(dev)->gen < 4) 2264 ring->flush = gen2_render_ring_flush; 2265 else 2266 ring->flush = gen4_render_ring_flush; 2267 ring->get_seqno = ring_get_seqno; 2268 ring->set_seqno = ring_set_seqno; 2269 if (IS_GEN2(dev)) { 2270 ring->irq_get = i8xx_ring_get_irq; 2271 ring->irq_put = i8xx_ring_put_irq; 2272 } else { 2273 ring->irq_get = i9xx_ring_get_irq; 2274 ring->irq_put = i9xx_ring_put_irq; 2275 } 2276 ring->irq_enable_mask = I915_USER_INTERRUPT; 2277 ring->write_tail = ring_write_tail; 2278 if (INTEL_INFO(dev)->gen >= 4) 2279 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2280 else if (IS_I830(dev) || IS_845G(dev)) 2281 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2282 else 2283 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2284 ring->init = init_render_ring; 2285 ring->cleanup = render_ring_cleanup; 2286 2287 ring->dev = dev; 2288 INIT_LIST_HEAD(&ring->active_list); 2289 INIT_LIST_HEAD(&ring->request_list); 2290 2291 ringbuf->size = size; 2292 ringbuf->effective_size = ringbuf->size; 2293 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 2294 ringbuf->effective_size -= 2 * CACHELINE_BYTES; 2295 2296 ringbuf->virtual_start = ioremap_wc(start, size); 2297 if (ringbuf->virtual_start == NULL) { 2298 DRM_ERROR("can not ioremap virtual address for" 2299 " ring buffer\n"); 2300 ret = -ENOMEM; 2301 goto err_ringbuf; 2302 } 2303 2304 if (!I915_NEED_GFX_HWS(dev)) { 2305 ret = init_phys_status_page(ring); 2306 if (ret) 2307 goto err_vstart; 2308 } 2309 2310 return 0; 2311 2312err_vstart: 2313 iounmap(ringbuf->virtual_start); 2314err_ringbuf: 2315 kfree(ringbuf); 2316 ring->buffer = NULL; 2317 return ret; 2318} 2319 2320int intel_init_bsd_ring_buffer(struct drm_device *dev) 2321{ 2322 struct drm_i915_private *dev_priv = dev->dev_private; 2323 struct intel_engine_cs *ring = &dev_priv->ring[VCS]; 2324 2325 ring->name = "bsd ring"; 2326 ring->id = VCS; 2327 2328 ring->write_tail = ring_write_tail; 2329 if (INTEL_INFO(dev)->gen >= 6) { 2330 ring->mmio_base = GEN6_BSD_RING_BASE; 2331 /* gen6 bsd needs a special wa for tail updates */ 2332 if (IS_GEN6(dev)) 2333 ring->write_tail = gen6_bsd_ring_write_tail; 2334 ring->flush = gen6_bsd_ring_flush; 2335 ring->add_request = gen6_add_request; 2336 ring->get_seqno = gen6_ring_get_seqno; 2337 ring->set_seqno = ring_set_seqno; 2338 if (INTEL_INFO(dev)->gen >= 8) { 2339 ring->irq_enable_mask = 2340 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2341 ring->irq_get = gen8_ring_get_irq; 2342 ring->irq_put = gen8_ring_put_irq; 2343 ring->dispatch_execbuffer = 2344 gen8_ring_dispatch_execbuffer; 2345 if (i915_semaphore_is_enabled(dev)) { 2346 ring->semaphore.sync_to = gen8_ring_sync; 2347 ring->semaphore.signal = gen8_xcs_signal; 2348 GEN8_RING_SEMAPHORE_INIT; 2349 } 2350 } else { 2351 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2352 ring->irq_get = gen6_ring_get_irq; 2353 ring->irq_put = gen6_ring_put_irq; 2354 ring->dispatch_execbuffer = 2355 gen6_ring_dispatch_execbuffer; 2356 if (i915_semaphore_is_enabled(dev)) { 2357 ring->semaphore.sync_to = gen6_ring_sync; 2358 ring->semaphore.signal = gen6_signal; 2359 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; 2360 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2361 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; 2362 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; 2363 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2364 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; 2365 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; 2366 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; 2367 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; 2368 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2369 } 2370 } 2371 } else { 2372 ring->mmio_base = BSD_RING_BASE; 2373 ring->flush = bsd_ring_flush; 2374 ring->add_request = i9xx_add_request; 2375 ring->get_seqno = ring_get_seqno; 2376 ring->set_seqno = ring_set_seqno; 2377 if (IS_GEN5(dev)) { 2378 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2379 ring->irq_get = gen5_ring_get_irq; 2380 ring->irq_put = gen5_ring_put_irq; 2381 } else { 2382 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2383 ring->irq_get = i9xx_ring_get_irq; 2384 ring->irq_put = i9xx_ring_put_irq; 2385 } 2386 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2387 } 2388 ring->init = init_ring_common; 2389 2390 return intel_init_ring_buffer(dev, ring); 2391} 2392 2393/** 2394 * Initialize the second BSD ring for Broadwell GT3. 2395 * It is noted that this only exists on Broadwell GT3. 2396 */ 2397int intel_init_bsd2_ring_buffer(struct drm_device *dev) 2398{ 2399 struct drm_i915_private *dev_priv = dev->dev_private; 2400 struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; 2401 2402 if ((INTEL_INFO(dev)->gen != 8)) { 2403 DRM_ERROR("No dual-BSD ring on non-BDW machine\n"); 2404 return -EINVAL; 2405 } 2406 2407 ring->name = "bsd2 ring"; 2408 ring->id = VCS2; 2409 2410 ring->write_tail = ring_write_tail; 2411 ring->mmio_base = GEN8_BSD2_RING_BASE; 2412 ring->flush = gen6_bsd_ring_flush; 2413 ring->add_request = gen6_add_request; 2414 ring->get_seqno = gen6_ring_get_seqno; 2415 ring->set_seqno = ring_set_seqno; 2416 ring->irq_enable_mask = 2417 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; 2418 ring->irq_get = gen8_ring_get_irq; 2419 ring->irq_put = gen8_ring_put_irq; 2420 ring->dispatch_execbuffer = 2421 gen8_ring_dispatch_execbuffer; 2422 if (i915_semaphore_is_enabled(dev)) { 2423 ring->semaphore.sync_to = gen8_ring_sync; 2424 ring->semaphore.signal = gen8_xcs_signal; 2425 GEN8_RING_SEMAPHORE_INIT; 2426 } 2427 ring->init = init_ring_common; 2428 2429 return intel_init_ring_buffer(dev, ring); 2430} 2431 2432int intel_init_blt_ring_buffer(struct drm_device *dev) 2433{ 2434 struct drm_i915_private *dev_priv = dev->dev_private; 2435 struct intel_engine_cs *ring = &dev_priv->ring[BCS]; 2436 2437 ring->name = "blitter ring"; 2438 ring->id = BCS; 2439 2440 ring->mmio_base = BLT_RING_BASE; 2441 ring->write_tail = ring_write_tail; 2442 ring->flush = gen6_ring_flush; 2443 ring->add_request = gen6_add_request; 2444 ring->get_seqno = gen6_ring_get_seqno; 2445 ring->set_seqno = ring_set_seqno; 2446 if (INTEL_INFO(dev)->gen >= 8) { 2447 ring->irq_enable_mask = 2448 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2449 ring->irq_get = gen8_ring_get_irq; 2450 ring->irq_put = gen8_ring_put_irq; 2451 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2452 if (i915_semaphore_is_enabled(dev)) { 2453 ring->semaphore.sync_to = gen8_ring_sync; 2454 ring->semaphore.signal = gen8_xcs_signal; 2455 GEN8_RING_SEMAPHORE_INIT; 2456 } 2457 } else { 2458 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2459 ring->irq_get = gen6_ring_get_irq; 2460 ring->irq_put = gen6_ring_put_irq; 2461 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2462 if (i915_semaphore_is_enabled(dev)) { 2463 ring->semaphore.signal = gen6_signal; 2464 ring->semaphore.sync_to = gen6_ring_sync; 2465 /* 2466 * The current semaphore is only applied on pre-gen8 2467 * platform. And there is no VCS2 ring on the pre-gen8 2468 * platform. So the semaphore between BCS and VCS2 is 2469 * initialized as INVALID. Gen8 will initialize the 2470 * sema between BCS and VCS2 later. 2471 */ 2472 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; 2473 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; 2474 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; 2475 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; 2476 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2477 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; 2478 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; 2479 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; 2480 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; 2481 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2482 } 2483 } 2484 ring->init = init_ring_common; 2485 2486 return intel_init_ring_buffer(dev, ring); 2487} 2488 2489int intel_init_vebox_ring_buffer(struct drm_device *dev) 2490{ 2491 struct drm_i915_private *dev_priv = dev->dev_private; 2492 struct intel_engine_cs *ring = &dev_priv->ring[VECS]; 2493 2494 ring->name = "video enhancement ring"; 2495 ring->id = VECS; 2496 2497 ring->mmio_base = VEBOX_RING_BASE; 2498 ring->write_tail = ring_write_tail; 2499 ring->flush = gen6_ring_flush; 2500 ring->add_request = gen6_add_request; 2501 ring->get_seqno = gen6_ring_get_seqno; 2502 ring->set_seqno = ring_set_seqno; 2503 2504 if (INTEL_INFO(dev)->gen >= 8) { 2505 ring->irq_enable_mask = 2506 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 2507 ring->irq_get = gen8_ring_get_irq; 2508 ring->irq_put = gen8_ring_put_irq; 2509 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2510 if (i915_semaphore_is_enabled(dev)) { 2511 ring->semaphore.sync_to = gen8_ring_sync; 2512 ring->semaphore.signal = gen8_xcs_signal; 2513 GEN8_RING_SEMAPHORE_INIT; 2514 } 2515 } else { 2516 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2517 ring->irq_get = hsw_vebox_get_irq; 2518 ring->irq_put = hsw_vebox_put_irq; 2519 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2520 if (i915_semaphore_is_enabled(dev)) { 2521 ring->semaphore.sync_to = gen6_ring_sync; 2522 ring->semaphore.signal = gen6_signal; 2523 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; 2524 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; 2525 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; 2526 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; 2527 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2528 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; 2529 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; 2530 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; 2531 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; 2532 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2533 } 2534 } 2535 ring->init = init_ring_common; 2536 2537 return intel_init_ring_buffer(dev, ring); 2538} 2539 2540int 2541intel_ring_flush_all_caches(struct intel_engine_cs *ring) 2542{ 2543 int ret; 2544 2545 if (!ring->gpu_caches_dirty) 2546 return 0; 2547 2548 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); 2549 if (ret) 2550 return ret; 2551 2552 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); 2553 2554 ring->gpu_caches_dirty = false; 2555 return 0; 2556} 2557 2558int 2559intel_ring_invalidate_all_caches(struct intel_engine_cs *ring) 2560{ 2561 uint32_t flush_domains; 2562 int ret; 2563 2564 flush_domains = 0; 2565 if (ring->gpu_caches_dirty) 2566 flush_domains = I915_GEM_GPU_DOMAINS; 2567 2568 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2569 if (ret) 2570 return ret; 2571 2572 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2573 2574 ring->gpu_caches_dirty = false; 2575 return 0; 2576} 2577 2578void 2579intel_stop_ring_buffer(struct intel_engine_cs *ring) 2580{ 2581 int ret; 2582 2583 if (!intel_ring_initialized(ring)) 2584 return; 2585 2586 ret = intel_ring_idle(ring); 2587 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) 2588 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 2589 ring->name, ret); 2590 2591 stop_ring(ring); 2592}