Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v5.1 2414 lines 62 kB view raw
1/* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30#include <linux/log2.h> 31 32#include <drm/i915_drm.h> 33 34#include "i915_drv.h" 35#include "i915_gem_render_state.h" 36#include "i915_reset.h" 37#include "i915_trace.h" 38#include "intel_drv.h" 39#include "intel_workarounds.h" 40 41/* Rough estimate of the typical request size, performing a flush, 42 * set-context and then emitting the batch. 43 */ 44#define LEGACY_REQUEST_SIZE 200 45 46static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) 47{ 48 return (i915_ggtt_offset(engine->status_page.vma) + 49 I915_GEM_HWS_INDEX_ADDR); 50} 51 52unsigned int intel_ring_update_space(struct intel_ring *ring) 53{ 54 unsigned int space; 55 56 space = __intel_ring_space(ring->head, ring->emit, ring->size); 57 58 ring->space = space; 59 return space; 60} 61 62static int 63gen2_render_ring_flush(struct i915_request *rq, u32 mode) 64{ 65 unsigned int num_store_dw; 66 u32 cmd, *cs; 67 68 cmd = MI_FLUSH; 69 num_store_dw = 0; 70 if (mode & EMIT_INVALIDATE) 71 cmd |= MI_READ_FLUSH; 72 if (mode & EMIT_FLUSH) 73 num_store_dw = 4; 74 75 cs = intel_ring_begin(rq, 2 + 3 * num_store_dw); 76 if (IS_ERR(cs)) 77 return PTR_ERR(cs); 78 79 *cs++ = cmd; 80 while (num_store_dw--) { 81 *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; 82 *cs++ = i915_scratch_offset(rq->i915); 83 *cs++ = 0; 84 } 85 *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; 86 87 intel_ring_advance(rq, cs); 88 89 return 0; 90} 91 92static int 93gen4_render_ring_flush(struct i915_request *rq, u32 mode) 94{ 95 u32 cmd, *cs; 96 int i; 97 98 /* 99 * read/write caches: 100 * 101 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 102 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 103 * also flushed at 2d versus 3d pipeline switches. 104 * 105 * read-only caches: 106 * 107 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 108 * MI_READ_FLUSH is set, and is always flushed on 965. 109 * 110 * I915_GEM_DOMAIN_COMMAND may not exist? 111 * 112 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 113 * invalidated when MI_EXE_FLUSH is set. 114 * 115 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 116 * invalidated with every MI_FLUSH. 117 * 118 * TLBs: 119 * 120 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 121 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 122 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 123 * are flushed at any MI_FLUSH. 124 */ 125 126 cmd = MI_FLUSH; 127 if (mode & EMIT_INVALIDATE) { 128 cmd |= MI_EXE_FLUSH; 129 if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5)) 130 cmd |= MI_INVALIDATE_ISP; 131 } 132 133 i = 2; 134 if (mode & EMIT_INVALIDATE) 135 i += 20; 136 137 cs = intel_ring_begin(rq, i); 138 if (IS_ERR(cs)) 139 return PTR_ERR(cs); 140 141 *cs++ = cmd; 142 143 /* 144 * A random delay to let the CS invalidate take effect? Without this 145 * delay, the GPU relocation path fails as the CS does not see 146 * the updated contents. Just as important, if we apply the flushes 147 * to the EMIT_FLUSH branch (i.e. immediately after the relocation 148 * write and before the invalidate on the next batch), the relocations 149 * still fail. This implies that is a delay following invalidation 150 * that is required to reset the caches as opposed to a delay to 151 * ensure the memory is written. 152 */ 153 if (mode & EMIT_INVALIDATE) { 154 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; 155 *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; 156 *cs++ = 0; 157 *cs++ = 0; 158 159 for (i = 0; i < 12; i++) 160 *cs++ = MI_FLUSH; 161 162 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; 163 *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; 164 *cs++ = 0; 165 *cs++ = 0; 166 } 167 168 *cs++ = cmd; 169 170 intel_ring_advance(rq, cs); 171 172 return 0; 173} 174 175/* 176 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 177 * implementing two workarounds on gen6. From section 1.4.7.1 178 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 179 * 180 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 181 * produced by non-pipelined state commands), software needs to first 182 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 183 * 0. 184 * 185 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 186 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 187 * 188 * And the workaround for these two requires this workaround first: 189 * 190 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 191 * BEFORE the pipe-control with a post-sync op and no write-cache 192 * flushes. 193 * 194 * And this last workaround is tricky because of the requirements on 195 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 196 * volume 2 part 1: 197 * 198 * "1 of the following must also be set: 199 * - Render Target Cache Flush Enable ([12] of DW1) 200 * - Depth Cache Flush Enable ([0] of DW1) 201 * - Stall at Pixel Scoreboard ([1] of DW1) 202 * - Depth Stall ([13] of DW1) 203 * - Post-Sync Operation ([13] of DW1) 204 * - Notify Enable ([8] of DW1)" 205 * 206 * The cache flushes require the workaround flush that triggered this 207 * one, so we can't use it. Depth stall would trigger the same. 208 * Post-sync nonzero is what triggered this second workaround, so we 209 * can't use that one either. Notify enable is IRQs, which aren't 210 * really our business. That leaves only stall at scoreboard. 211 */ 212static int 213gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) 214{ 215 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; 216 u32 *cs; 217 218 cs = intel_ring_begin(rq, 6); 219 if (IS_ERR(cs)) 220 return PTR_ERR(cs); 221 222 *cs++ = GFX_OP_PIPE_CONTROL(5); 223 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 224 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 225 *cs++ = 0; /* low dword */ 226 *cs++ = 0; /* high dword */ 227 *cs++ = MI_NOOP; 228 intel_ring_advance(rq, cs); 229 230 cs = intel_ring_begin(rq, 6); 231 if (IS_ERR(cs)) 232 return PTR_ERR(cs); 233 234 *cs++ = GFX_OP_PIPE_CONTROL(5); 235 *cs++ = PIPE_CONTROL_QW_WRITE; 236 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 237 *cs++ = 0; 238 *cs++ = 0; 239 *cs++ = MI_NOOP; 240 intel_ring_advance(rq, cs); 241 242 return 0; 243} 244 245static int 246gen6_render_ring_flush(struct i915_request *rq, u32 mode) 247{ 248 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; 249 u32 *cs, flags = 0; 250 int ret; 251 252 /* Force SNB workarounds for PIPE_CONTROL flushes */ 253 ret = gen6_emit_post_sync_nonzero_flush(rq); 254 if (ret) 255 return ret; 256 257 /* Just flush everything. Experiments have shown that reducing the 258 * number of bits based on the write domains has little performance 259 * impact. 260 */ 261 if (mode & EMIT_FLUSH) { 262 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 263 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 264 /* 265 * Ensure that any following seqno writes only happen 266 * when the render cache is indeed flushed. 267 */ 268 flags |= PIPE_CONTROL_CS_STALL; 269 } 270 if (mode & EMIT_INVALIDATE) { 271 flags |= PIPE_CONTROL_TLB_INVALIDATE; 272 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 273 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 274 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 275 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 276 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 277 /* 278 * TLB invalidate requires a post-sync write. 279 */ 280 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 281 } 282 283 cs = intel_ring_begin(rq, 4); 284 if (IS_ERR(cs)) 285 return PTR_ERR(cs); 286 287 *cs++ = GFX_OP_PIPE_CONTROL(4); 288 *cs++ = flags; 289 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 290 *cs++ = 0; 291 intel_ring_advance(rq, cs); 292 293 return 0; 294} 295 296static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 297{ 298 /* First we do the gen6_emit_post_sync_nonzero_flush w/a */ 299 *cs++ = GFX_OP_PIPE_CONTROL(4); 300 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 301 *cs++ = 0; 302 *cs++ = 0; 303 304 *cs++ = GFX_OP_PIPE_CONTROL(4); 305 *cs++ = PIPE_CONTROL_QW_WRITE; 306 *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; 307 *cs++ = 0; 308 309 /* Finally we can flush and with it emit the breadcrumb */ 310 *cs++ = GFX_OP_PIPE_CONTROL(4); 311 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 312 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 313 PIPE_CONTROL_DC_FLUSH_ENABLE | 314 PIPE_CONTROL_QW_WRITE | 315 PIPE_CONTROL_CS_STALL); 316 *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT; 317 *cs++ = rq->fence.seqno; 318 319 *cs++ = GFX_OP_PIPE_CONTROL(4); 320 *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 321 *cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT; 322 *cs++ = rq->global_seqno; 323 324 *cs++ = MI_USER_INTERRUPT; 325 *cs++ = MI_NOOP; 326 327 rq->tail = intel_ring_offset(rq, cs); 328 assert_ring_tail_valid(rq->ring, rq->tail); 329 330 return cs; 331} 332 333static int 334gen7_render_ring_cs_stall_wa(struct i915_request *rq) 335{ 336 u32 *cs; 337 338 cs = intel_ring_begin(rq, 4); 339 if (IS_ERR(cs)) 340 return PTR_ERR(cs); 341 342 *cs++ = GFX_OP_PIPE_CONTROL(4); 343 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 344 *cs++ = 0; 345 *cs++ = 0; 346 intel_ring_advance(rq, cs); 347 348 return 0; 349} 350 351static int 352gen7_render_ring_flush(struct i915_request *rq, u32 mode) 353{ 354 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; 355 u32 *cs, flags = 0; 356 357 /* 358 * Ensure that any following seqno writes only happen when the render 359 * cache is indeed flushed. 360 * 361 * Workaround: 4th PIPE_CONTROL command (except the ones with only 362 * read-cache invalidate bits set) must have the CS_STALL bit set. We 363 * don't try to be clever and just set it unconditionally. 364 */ 365 flags |= PIPE_CONTROL_CS_STALL; 366 367 /* Just flush everything. Experiments have shown that reducing the 368 * number of bits based on the write domains has little performance 369 * impact. 370 */ 371 if (mode & EMIT_FLUSH) { 372 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 373 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 374 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 375 flags |= PIPE_CONTROL_FLUSH_ENABLE; 376 } 377 if (mode & EMIT_INVALIDATE) { 378 flags |= PIPE_CONTROL_TLB_INVALIDATE; 379 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 380 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 381 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 382 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 383 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 384 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; 385 /* 386 * TLB invalidate requires a post-sync write. 387 */ 388 flags |= PIPE_CONTROL_QW_WRITE; 389 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 390 391 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 392 393 /* Workaround: we must issue a pipe_control with CS-stall bit 394 * set before a pipe_control command that has the state cache 395 * invalidate bit set. */ 396 gen7_render_ring_cs_stall_wa(rq); 397 } 398 399 cs = intel_ring_begin(rq, 4); 400 if (IS_ERR(cs)) 401 return PTR_ERR(cs); 402 403 *cs++ = GFX_OP_PIPE_CONTROL(4); 404 *cs++ = flags; 405 *cs++ = scratch_addr; 406 *cs++ = 0; 407 intel_ring_advance(rq, cs); 408 409 return 0; 410} 411 412static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 413{ 414 *cs++ = GFX_OP_PIPE_CONTROL(4); 415 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 416 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 417 PIPE_CONTROL_DC_FLUSH_ENABLE | 418 PIPE_CONTROL_FLUSH_ENABLE | 419 PIPE_CONTROL_QW_WRITE | 420 PIPE_CONTROL_GLOBAL_GTT_IVB | 421 PIPE_CONTROL_CS_STALL); 422 *cs++ = rq->timeline->hwsp_offset; 423 *cs++ = rq->fence.seqno; 424 425 *cs++ = GFX_OP_PIPE_CONTROL(4); 426 *cs++ = (PIPE_CONTROL_QW_WRITE | 427 PIPE_CONTROL_GLOBAL_GTT_IVB | 428 PIPE_CONTROL_CS_STALL); 429 *cs++ = intel_hws_seqno_address(rq->engine); 430 *cs++ = rq->global_seqno; 431 432 *cs++ = MI_USER_INTERRUPT; 433 *cs++ = MI_NOOP; 434 435 rq->tail = intel_ring_offset(rq, cs); 436 assert_ring_tail_valid(rq->ring, rq->tail); 437 438 return cs; 439} 440 441static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 442{ 443 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); 444 GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 445 446 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; 447 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; 448 *cs++ = rq->fence.seqno; 449 450 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; 451 *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT; 452 *cs++ = rq->global_seqno; 453 454 *cs++ = MI_USER_INTERRUPT; 455 *cs++ = MI_NOOP; 456 457 rq->tail = intel_ring_offset(rq, cs); 458 assert_ring_tail_valid(rq->ring, rq->tail); 459 460 return cs; 461} 462 463#define GEN7_XCS_WA 32 464static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 465{ 466 int i; 467 468 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); 469 GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 470 471 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; 472 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; 473 *cs++ = rq->fence.seqno; 474 475 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; 476 *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT; 477 *cs++ = rq->global_seqno; 478 479 for (i = 0; i < GEN7_XCS_WA; i++) { 480 *cs++ = MI_STORE_DWORD_INDEX; 481 *cs++ = I915_GEM_HWS_SEQNO_ADDR; 482 *cs++ = rq->fence.seqno; 483 } 484 485 *cs++ = MI_FLUSH_DW; 486 *cs++ = 0; 487 *cs++ = 0; 488 489 *cs++ = MI_USER_INTERRUPT; 490 491 rq->tail = intel_ring_offset(rq, cs); 492 assert_ring_tail_valid(rq->ring, rq->tail); 493 494 return cs; 495} 496#undef GEN7_XCS_WA 497 498static void set_hwstam(struct intel_engine_cs *engine, u32 mask) 499{ 500 /* 501 * Keep the render interrupt unmasked as this papers over 502 * lost interrupts following a reset. 503 */ 504 if (engine->class == RENDER_CLASS) { 505 if (INTEL_GEN(engine->i915) >= 6) 506 mask &= ~BIT(0); 507 else 508 mask &= ~I915_USER_INTERRUPT; 509 } 510 511 intel_engine_set_hwsp_writemask(engine, mask); 512} 513 514static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) 515{ 516 struct drm_i915_private *dev_priv = engine->i915; 517 u32 addr; 518 519 addr = lower_32_bits(phys); 520 if (INTEL_GEN(dev_priv) >= 4) 521 addr |= (phys >> 28) & 0xf0; 522 523 I915_WRITE(HWS_PGA, addr); 524} 525 526static struct page *status_page(struct intel_engine_cs *engine) 527{ 528 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; 529 530 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 531 return sg_page(obj->mm.pages->sgl); 532} 533 534static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 535{ 536 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); 537 set_hwstam(engine, ~0u); 538} 539 540static void set_hwsp(struct intel_engine_cs *engine, u32 offset) 541{ 542 struct drm_i915_private *dev_priv = engine->i915; 543 i915_reg_t hwsp; 544 545 /* 546 * The ring status page addresses are no longer next to the rest of 547 * the ring registers as of gen7. 548 */ 549 if (IS_GEN(dev_priv, 7)) { 550 switch (engine->id) { 551 /* 552 * No more rings exist on Gen7. Default case is only to shut up 553 * gcc switch check warning. 554 */ 555 default: 556 GEM_BUG_ON(engine->id); 557 case RCS: 558 hwsp = RENDER_HWS_PGA_GEN7; 559 break; 560 case BCS: 561 hwsp = BLT_HWS_PGA_GEN7; 562 break; 563 case VCS: 564 hwsp = BSD_HWS_PGA_GEN7; 565 break; 566 case VECS: 567 hwsp = VEBOX_HWS_PGA_GEN7; 568 break; 569 } 570 } else if (IS_GEN(dev_priv, 6)) { 571 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); 572 } else { 573 hwsp = RING_HWS_PGA(engine->mmio_base); 574 } 575 576 I915_WRITE(hwsp, offset); 577 POSTING_READ(hwsp); 578} 579 580static void flush_cs_tlb(struct intel_engine_cs *engine) 581{ 582 struct drm_i915_private *dev_priv = engine->i915; 583 i915_reg_t instpm = RING_INSTPM(engine->mmio_base); 584 585 if (!IS_GEN_RANGE(dev_priv, 6, 7)) 586 return; 587 588 /* ring should be idle before issuing a sync flush*/ 589 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); 590 591 I915_WRITE(instpm, 592 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 593 INSTPM_SYNC_FLUSH)); 594 if (intel_wait_for_register(dev_priv, 595 instpm, INSTPM_SYNC_FLUSH, 0, 596 1000)) 597 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 598 engine->name); 599} 600 601static void ring_setup_status_page(struct intel_engine_cs *engine) 602{ 603 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); 604 set_hwstam(engine, ~0u); 605 606 flush_cs_tlb(engine); 607} 608 609static bool stop_ring(struct intel_engine_cs *engine) 610{ 611 struct drm_i915_private *dev_priv = engine->i915; 612 613 if (INTEL_GEN(dev_priv) > 2) { 614 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 615 if (intel_wait_for_register(dev_priv, 616 RING_MI_MODE(engine->mmio_base), 617 MODE_IDLE, 618 MODE_IDLE, 619 1000)) { 620 DRM_ERROR("%s : timed out trying to stop ring\n", 621 engine->name); 622 /* Sometimes we observe that the idle flag is not 623 * set even though the ring is empty. So double 624 * check before giving up. 625 */ 626 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine)) 627 return false; 628 } 629 } 630 631 I915_WRITE_HEAD(engine, I915_READ_TAIL(engine)); 632 633 I915_WRITE_HEAD(engine, 0); 634 I915_WRITE_TAIL(engine, 0); 635 636 /* The ring must be empty before it is disabled */ 637 I915_WRITE_CTL(engine, 0); 638 639 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0; 640} 641 642static int init_ring_common(struct intel_engine_cs *engine) 643{ 644 struct drm_i915_private *dev_priv = engine->i915; 645 struct intel_ring *ring = engine->buffer; 646 int ret = 0; 647 648 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 649 650 if (!stop_ring(engine)) { 651 /* G45 ring initialization often fails to reset head to zero */ 652 DRM_DEBUG_DRIVER("%s head not reset to zero " 653 "ctl %08x head %08x tail %08x start %08x\n", 654 engine->name, 655 I915_READ_CTL(engine), 656 I915_READ_HEAD(engine), 657 I915_READ_TAIL(engine), 658 I915_READ_START(engine)); 659 660 if (!stop_ring(engine)) { 661 DRM_ERROR("failed to set %s head to zero " 662 "ctl %08x head %08x tail %08x start %08x\n", 663 engine->name, 664 I915_READ_CTL(engine), 665 I915_READ_HEAD(engine), 666 I915_READ_TAIL(engine), 667 I915_READ_START(engine)); 668 ret = -EIO; 669 goto out; 670 } 671 } 672 673 if (HWS_NEEDS_PHYSICAL(dev_priv)) 674 ring_setup_phys_status_page(engine); 675 else 676 ring_setup_status_page(engine); 677 678 intel_engine_reset_breadcrumbs(engine); 679 680 /* Enforce ordering by reading HEAD register back */ 681 I915_READ_HEAD(engine); 682 683 /* Initialize the ring. This must happen _after_ we've cleared the ring 684 * registers with the above sequence (the readback of the HEAD registers 685 * also enforces ordering), otherwise the hw might lose the new ring 686 * register values. */ 687 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma)); 688 689 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 690 if (I915_READ_HEAD(engine)) 691 DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n", 692 engine->name, I915_READ_HEAD(engine)); 693 694 /* Check that the ring offsets point within the ring! */ 695 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); 696 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 697 intel_ring_update_space(ring); 698 699 /* First wake the ring up to an empty/idle ring */ 700 I915_WRITE_HEAD(engine, ring->head); 701 I915_WRITE_TAIL(engine, ring->head); 702 (void)I915_READ_TAIL(engine); 703 704 I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID); 705 706 /* If the head is still not zero, the ring is dead */ 707 if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base), 708 RING_VALID, RING_VALID, 709 50)) { 710 DRM_ERROR("%s initialization failed " 711 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 712 engine->name, 713 I915_READ_CTL(engine), 714 I915_READ_CTL(engine) & RING_VALID, 715 I915_READ_HEAD(engine), ring->head, 716 I915_READ_TAIL(engine), ring->tail, 717 I915_READ_START(engine), 718 i915_ggtt_offset(ring->vma)); 719 ret = -EIO; 720 goto out; 721 } 722 723 if (INTEL_GEN(dev_priv) > 2) 724 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 725 726 /* Now awake, let it get started */ 727 if (ring->tail != ring->head) { 728 I915_WRITE_TAIL(engine, ring->tail); 729 (void)I915_READ_TAIL(engine); 730 } 731 732 /* Papering over lost _interrupts_ immediately following the restart */ 733 intel_engine_queue_breadcrumbs(engine); 734out: 735 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 736 737 return ret; 738} 739 740static void reset_prepare(struct intel_engine_cs *engine) 741{ 742 intel_engine_stop_cs(engine); 743} 744 745static void reset_ring(struct intel_engine_cs *engine, bool stalled) 746{ 747 struct i915_timeline *tl = &engine->timeline; 748 struct i915_request *pos, *rq; 749 unsigned long flags; 750 u32 head; 751 752 rq = NULL; 753 spin_lock_irqsave(&tl->lock, flags); 754 list_for_each_entry(pos, &tl->requests, link) { 755 if (!i915_request_completed(pos)) { 756 rq = pos; 757 break; 758 } 759 } 760 761 GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n", 762 engine->name, 763 rq ? rq->global_seqno : 0, 764 intel_engine_get_seqno(engine), 765 yesno(stalled)); 766 /* 767 * The guilty request will get skipped on a hung engine. 768 * 769 * Users of client default contexts do not rely on logical 770 * state preserved between batches so it is safe to execute 771 * queued requests following the hang. Non default contexts 772 * rely on preserved state, so skipping a batch loses the 773 * evolution of the state and it needs to be considered corrupted. 774 * Executing more queued batches on top of corrupted state is 775 * risky. But we take the risk by trying to advance through 776 * the queued requests in order to make the client behaviour 777 * more predictable around resets, by not throwing away random 778 * amount of batches it has prepared for execution. Sophisticated 779 * clients can use gem_reset_stats_ioctl and dma fence status 780 * (exported via sync_file info ioctl on explicit fences) to observe 781 * when it loses the context state and should rebuild accordingly. 782 * 783 * The context ban, and ultimately the client ban, mechanism are safety 784 * valves if client submission ends up resulting in nothing more than 785 * subsequent hangs. 786 */ 787 788 if (rq) { 789 /* 790 * Try to restore the logical GPU state to match the 791 * continuation of the request queue. If we skip the 792 * context/PD restore, then the next request may try to execute 793 * assuming that its context is valid and loaded on the GPU and 794 * so may try to access invalid memory, prompting repeated GPU 795 * hangs. 796 * 797 * If the request was guilty, we still restore the logical 798 * state in case the next request requires it (e.g. the 799 * aliasing ppgtt), but skip over the hung batch. 800 * 801 * If the request was innocent, we try to replay the request 802 * with the restored context. 803 */ 804 i915_reset_request(rq, stalled); 805 806 GEM_BUG_ON(rq->ring != engine->buffer); 807 head = rq->head; 808 } else { 809 head = engine->buffer->tail; 810 } 811 engine->buffer->head = intel_ring_wrap(engine->buffer, head); 812 813 spin_unlock_irqrestore(&tl->lock, flags); 814} 815 816static void reset_finish(struct intel_engine_cs *engine) 817{ 818} 819 820static int intel_rcs_ctx_init(struct i915_request *rq) 821{ 822 int ret; 823 824 ret = intel_engine_emit_ctx_wa(rq); 825 if (ret != 0) 826 return ret; 827 828 ret = i915_gem_render_state_emit(rq); 829 if (ret) 830 return ret; 831 832 return 0; 833} 834 835static int init_render_ring(struct intel_engine_cs *engine) 836{ 837 struct drm_i915_private *dev_priv = engine->i915; 838 int ret = init_ring_common(engine); 839 if (ret) 840 return ret; 841 842 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 843 if (IS_GEN_RANGE(dev_priv, 4, 6)) 844 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 845 846 /* We need to disable the AsyncFlip performance optimisations in order 847 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 848 * programmed to '1' on all products. 849 * 850 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 851 */ 852 if (IS_GEN_RANGE(dev_priv, 6, 7)) 853 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 854 855 /* Required for the hardware to program scanline values for waiting */ 856 /* WaEnableFlushTlbInvalidationMode:snb */ 857 if (IS_GEN(dev_priv, 6)) 858 I915_WRITE(GFX_MODE, 859 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 860 861 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 862 if (IS_GEN(dev_priv, 7)) 863 I915_WRITE(GFX_MODE_GEN7, 864 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 865 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 866 867 if (IS_GEN(dev_priv, 6)) { 868 /* From the Sandybridge PRM, volume 1 part 3, page 24: 869 * "If this bit is set, STCunit will have LRA as replacement 870 * policy. [...] This bit must be reset. LRA replacement 871 * policy is not supported." 872 */ 873 I915_WRITE(CACHE_MODE_0, 874 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 875 } 876 877 if (IS_GEN_RANGE(dev_priv, 6, 7)) 878 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 879 880 if (INTEL_GEN(dev_priv) >= 6) 881 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 882 883 return 0; 884} 885 886static void cancel_requests(struct intel_engine_cs *engine) 887{ 888 struct i915_request *request; 889 unsigned long flags; 890 891 spin_lock_irqsave(&engine->timeline.lock, flags); 892 893 /* Mark all submitted requests as skipped. */ 894 list_for_each_entry(request, &engine->timeline.requests, link) { 895 GEM_BUG_ON(!request->global_seqno); 896 897 if (!i915_request_signaled(request)) 898 dma_fence_set_error(&request->fence, -EIO); 899 900 i915_request_mark_complete(request); 901 } 902 903 intel_write_status_page(engine, 904 I915_GEM_HWS_INDEX, 905 intel_engine_last_submit(engine)); 906 907 /* Remaining _unready_ requests will be nop'ed when submitted */ 908 909 spin_unlock_irqrestore(&engine->timeline.lock, flags); 910} 911 912static void i9xx_submit_request(struct i915_request *request) 913{ 914 struct drm_i915_private *dev_priv = request->i915; 915 916 i915_request_submit(request); 917 918 I915_WRITE_TAIL(request->engine, 919 intel_ring_set_tail(request->ring, request->tail)); 920} 921 922static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) 923{ 924 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); 925 GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 926 927 *cs++ = MI_FLUSH; 928 929 *cs++ = MI_STORE_DWORD_INDEX; 930 *cs++ = I915_GEM_HWS_SEQNO_ADDR; 931 *cs++ = rq->fence.seqno; 932 933 *cs++ = MI_STORE_DWORD_INDEX; 934 *cs++ = I915_GEM_HWS_INDEX_ADDR; 935 *cs++ = rq->global_seqno; 936 937 *cs++ = MI_USER_INTERRUPT; 938 939 rq->tail = intel_ring_offset(rq, cs); 940 assert_ring_tail_valid(rq->ring, rq->tail); 941 942 return cs; 943} 944 945#define GEN5_WA_STORES 8 /* must be at least 1! */ 946static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) 947{ 948 int i; 949 950 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); 951 GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 952 953 *cs++ = MI_FLUSH; 954 955 *cs++ = MI_STORE_DWORD_INDEX; 956 *cs++ = I915_GEM_HWS_SEQNO_ADDR; 957 *cs++ = rq->fence.seqno; 958 959 BUILD_BUG_ON(GEN5_WA_STORES < 1); 960 for (i = 0; i < GEN5_WA_STORES; i++) { 961 *cs++ = MI_STORE_DWORD_INDEX; 962 *cs++ = I915_GEM_HWS_INDEX_ADDR; 963 *cs++ = rq->global_seqno; 964 } 965 966 *cs++ = MI_USER_INTERRUPT; 967 *cs++ = MI_NOOP; 968 969 rq->tail = intel_ring_offset(rq, cs); 970 assert_ring_tail_valid(rq->ring, rq->tail); 971 972 return cs; 973} 974#undef GEN5_WA_STORES 975 976static void 977gen5_irq_enable(struct intel_engine_cs *engine) 978{ 979 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask); 980} 981 982static void 983gen5_irq_disable(struct intel_engine_cs *engine) 984{ 985 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask); 986} 987 988static void 989i9xx_irq_enable(struct intel_engine_cs *engine) 990{ 991 struct drm_i915_private *dev_priv = engine->i915; 992 993 dev_priv->irq_mask &= ~engine->irq_enable_mask; 994 I915_WRITE(IMR, dev_priv->irq_mask); 995 POSTING_READ_FW(RING_IMR(engine->mmio_base)); 996} 997 998static void 999i9xx_irq_disable(struct intel_engine_cs *engine) 1000{ 1001 struct drm_i915_private *dev_priv = engine->i915; 1002 1003 dev_priv->irq_mask |= engine->irq_enable_mask; 1004 I915_WRITE(IMR, dev_priv->irq_mask); 1005} 1006 1007static void 1008i8xx_irq_enable(struct intel_engine_cs *engine) 1009{ 1010 struct drm_i915_private *dev_priv = engine->i915; 1011 1012 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1013 I915_WRITE16(IMR, dev_priv->irq_mask); 1014 POSTING_READ16(RING_IMR(engine->mmio_base)); 1015} 1016 1017static void 1018i8xx_irq_disable(struct intel_engine_cs *engine) 1019{ 1020 struct drm_i915_private *dev_priv = engine->i915; 1021 1022 dev_priv->irq_mask |= engine->irq_enable_mask; 1023 I915_WRITE16(IMR, dev_priv->irq_mask); 1024} 1025 1026static int 1027bsd_ring_flush(struct i915_request *rq, u32 mode) 1028{ 1029 u32 *cs; 1030 1031 cs = intel_ring_begin(rq, 2); 1032 if (IS_ERR(cs)) 1033 return PTR_ERR(cs); 1034 1035 *cs++ = MI_FLUSH; 1036 *cs++ = MI_NOOP; 1037 intel_ring_advance(rq, cs); 1038 return 0; 1039} 1040 1041static void 1042gen6_irq_enable(struct intel_engine_cs *engine) 1043{ 1044 struct drm_i915_private *dev_priv = engine->i915; 1045 1046 I915_WRITE_IMR(engine, 1047 ~(engine->irq_enable_mask | 1048 engine->irq_keep_mask)); 1049 1050 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ 1051 POSTING_READ_FW(RING_IMR(engine->mmio_base)); 1052 1053 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); 1054} 1055 1056static void 1057gen6_irq_disable(struct intel_engine_cs *engine) 1058{ 1059 struct drm_i915_private *dev_priv = engine->i915; 1060 1061 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 1062 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); 1063} 1064 1065static void 1066hsw_vebox_irq_enable(struct intel_engine_cs *engine) 1067{ 1068 struct drm_i915_private *dev_priv = engine->i915; 1069 1070 I915_WRITE_IMR(engine, ~engine->irq_enable_mask); 1071 1072 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ 1073 POSTING_READ_FW(RING_IMR(engine->mmio_base)); 1074 1075 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask); 1076} 1077 1078static void 1079hsw_vebox_irq_disable(struct intel_engine_cs *engine) 1080{ 1081 struct drm_i915_private *dev_priv = engine->i915; 1082 1083 I915_WRITE_IMR(engine, ~0); 1084 gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask); 1085} 1086 1087static int 1088i965_emit_bb_start(struct i915_request *rq, 1089 u64 offset, u32 length, 1090 unsigned int dispatch_flags) 1091{ 1092 u32 *cs; 1093 1094 cs = intel_ring_begin(rq, 2); 1095 if (IS_ERR(cs)) 1096 return PTR_ERR(cs); 1097 1098 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags & 1099 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965); 1100 *cs++ = offset; 1101 intel_ring_advance(rq, cs); 1102 1103 return 0; 1104} 1105 1106/* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1107#define I830_BATCH_LIMIT SZ_256K 1108#define I830_TLB_ENTRIES (2) 1109#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1110static int 1111i830_emit_bb_start(struct i915_request *rq, 1112 u64 offset, u32 len, 1113 unsigned int dispatch_flags) 1114{ 1115 u32 *cs, cs_offset = i915_scratch_offset(rq->i915); 1116 1117 GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE); 1118 1119 cs = intel_ring_begin(rq, 6); 1120 if (IS_ERR(cs)) 1121 return PTR_ERR(cs); 1122 1123 /* Evict the invalid PTE TLBs */ 1124 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA; 1125 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096; 1126 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */ 1127 *cs++ = cs_offset; 1128 *cs++ = 0xdeadbeef; 1129 *cs++ = MI_NOOP; 1130 intel_ring_advance(rq, cs); 1131 1132 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 1133 if (len > I830_BATCH_LIMIT) 1134 return -ENOSPC; 1135 1136 cs = intel_ring_begin(rq, 6 + 2); 1137 if (IS_ERR(cs)) 1138 return PTR_ERR(cs); 1139 1140 /* Blit the batch (which has now all relocs applied) to the 1141 * stable batch scratch bo area (so that the CS never 1142 * stumbles over its tlb invalidation bug) ... 1143 */ 1144 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA; 1145 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096; 1146 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096; 1147 *cs++ = cs_offset; 1148 *cs++ = 4096; 1149 *cs++ = offset; 1150 1151 *cs++ = MI_FLUSH; 1152 *cs++ = MI_NOOP; 1153 intel_ring_advance(rq, cs); 1154 1155 /* ... and execute it. */ 1156 offset = cs_offset; 1157 } 1158 1159 cs = intel_ring_begin(rq, 2); 1160 if (IS_ERR(cs)) 1161 return PTR_ERR(cs); 1162 1163 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 1164 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 1165 MI_BATCH_NON_SECURE); 1166 intel_ring_advance(rq, cs); 1167 1168 return 0; 1169} 1170 1171static int 1172i915_emit_bb_start(struct i915_request *rq, 1173 u64 offset, u32 len, 1174 unsigned int dispatch_flags) 1175{ 1176 u32 *cs; 1177 1178 cs = intel_ring_begin(rq, 2); 1179 if (IS_ERR(cs)) 1180 return PTR_ERR(cs); 1181 1182 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 1183 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 1184 MI_BATCH_NON_SECURE); 1185 intel_ring_advance(rq, cs); 1186 1187 return 0; 1188} 1189 1190int intel_ring_pin(struct intel_ring *ring) 1191{ 1192 struct i915_vma *vma = ring->vma; 1193 enum i915_map_type map = i915_coherent_map_type(vma->vm->i915); 1194 unsigned int flags; 1195 void *addr; 1196 int ret; 1197 1198 GEM_BUG_ON(ring->vaddr); 1199 1200 ret = i915_timeline_pin(ring->timeline); 1201 if (ret) 1202 return ret; 1203 1204 flags = PIN_GLOBAL; 1205 1206 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 1207 flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); 1208 1209 if (vma->obj->stolen) 1210 flags |= PIN_MAPPABLE; 1211 else 1212 flags |= PIN_HIGH; 1213 1214 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { 1215 if (flags & PIN_MAPPABLE || map == I915_MAP_WC) 1216 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true); 1217 else 1218 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true); 1219 if (unlikely(ret)) 1220 goto unpin_timeline; 1221 } 1222 1223 ret = i915_vma_pin(vma, 0, 0, flags); 1224 if (unlikely(ret)) 1225 goto unpin_timeline; 1226 1227 if (i915_vma_is_map_and_fenceable(vma)) 1228 addr = (void __force *)i915_vma_pin_iomap(vma); 1229 else 1230 addr = i915_gem_object_pin_map(vma->obj, map); 1231 if (IS_ERR(addr)) { 1232 ret = PTR_ERR(addr); 1233 goto unpin_ring; 1234 } 1235 1236 vma->obj->pin_global++; 1237 1238 ring->vaddr = addr; 1239 return 0; 1240 1241unpin_ring: 1242 i915_vma_unpin(vma); 1243unpin_timeline: 1244 i915_timeline_unpin(ring->timeline); 1245 return ret; 1246} 1247 1248void intel_ring_reset(struct intel_ring *ring, u32 tail) 1249{ 1250 GEM_BUG_ON(!intel_ring_offset_valid(ring, tail)); 1251 1252 ring->tail = tail; 1253 ring->head = tail; 1254 ring->emit = tail; 1255 intel_ring_update_space(ring); 1256} 1257 1258void intel_ring_unpin(struct intel_ring *ring) 1259{ 1260 GEM_BUG_ON(!ring->vma); 1261 GEM_BUG_ON(!ring->vaddr); 1262 1263 /* Discard any unused bytes beyond that submitted to hw. */ 1264 intel_ring_reset(ring, ring->tail); 1265 1266 if (i915_vma_is_map_and_fenceable(ring->vma)) 1267 i915_vma_unpin_iomap(ring->vma); 1268 else 1269 i915_gem_object_unpin_map(ring->vma->obj); 1270 ring->vaddr = NULL; 1271 1272 ring->vma->obj->pin_global--; 1273 i915_vma_unpin(ring->vma); 1274 1275 i915_timeline_unpin(ring->timeline); 1276} 1277 1278static struct i915_vma * 1279intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) 1280{ 1281 struct i915_address_space *vm = &dev_priv->ggtt.vm; 1282 struct drm_i915_gem_object *obj; 1283 struct i915_vma *vma; 1284 1285 obj = i915_gem_object_create_stolen(dev_priv, size); 1286 if (!obj) 1287 obj = i915_gem_object_create_internal(dev_priv, size); 1288 if (IS_ERR(obj)) 1289 return ERR_CAST(obj); 1290 1291 /* 1292 * Mark ring buffers as read-only from GPU side (so no stray overwrites) 1293 * if supported by the platform's GGTT. 1294 */ 1295 if (vm->has_read_only) 1296 i915_gem_object_set_readonly(obj); 1297 1298 vma = i915_vma_instance(obj, vm, NULL); 1299 if (IS_ERR(vma)) 1300 goto err; 1301 1302 return vma; 1303 1304err: 1305 i915_gem_object_put(obj); 1306 return vma; 1307} 1308 1309struct intel_ring * 1310intel_engine_create_ring(struct intel_engine_cs *engine, 1311 struct i915_timeline *timeline, 1312 int size) 1313{ 1314 struct intel_ring *ring; 1315 struct i915_vma *vma; 1316 1317 GEM_BUG_ON(!is_power_of_2(size)); 1318 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); 1319 GEM_BUG_ON(timeline == &engine->timeline); 1320 lockdep_assert_held(&engine->i915->drm.struct_mutex); 1321 1322 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 1323 if (!ring) 1324 return ERR_PTR(-ENOMEM); 1325 1326 INIT_LIST_HEAD(&ring->request_list); 1327 ring->timeline = i915_timeline_get(timeline); 1328 1329 ring->size = size; 1330 /* Workaround an erratum on the i830 which causes a hang if 1331 * the TAIL pointer points to within the last 2 cachelines 1332 * of the buffer. 1333 */ 1334 ring->effective_size = size; 1335 if (IS_I830(engine->i915) || IS_I845G(engine->i915)) 1336 ring->effective_size -= 2 * CACHELINE_BYTES; 1337 1338 intel_ring_update_space(ring); 1339 1340 vma = intel_ring_create_vma(engine->i915, size); 1341 if (IS_ERR(vma)) { 1342 kfree(ring); 1343 return ERR_CAST(vma); 1344 } 1345 ring->vma = vma; 1346 1347 return ring; 1348} 1349 1350void 1351intel_ring_free(struct intel_ring *ring) 1352{ 1353 struct drm_i915_gem_object *obj = ring->vma->obj; 1354 1355 i915_vma_close(ring->vma); 1356 __i915_gem_object_release_unless_active(obj); 1357 1358 i915_timeline_put(ring->timeline); 1359 kfree(ring); 1360} 1361 1362static void intel_ring_context_destroy(struct intel_context *ce) 1363{ 1364 GEM_BUG_ON(ce->pin_count); 1365 1366 if (!ce->state) 1367 return; 1368 1369 GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); 1370 i915_gem_object_put(ce->state->obj); 1371} 1372 1373static int __context_pin_ppgtt(struct i915_gem_context *ctx) 1374{ 1375 struct i915_hw_ppgtt *ppgtt; 1376 int err = 0; 1377 1378 ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt; 1379 if (ppgtt) 1380 err = gen6_ppgtt_pin(ppgtt); 1381 1382 return err; 1383} 1384 1385static void __context_unpin_ppgtt(struct i915_gem_context *ctx) 1386{ 1387 struct i915_hw_ppgtt *ppgtt; 1388 1389 ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt; 1390 if (ppgtt) 1391 gen6_ppgtt_unpin(ppgtt); 1392} 1393 1394static int __context_pin(struct intel_context *ce) 1395{ 1396 struct i915_vma *vma; 1397 int err; 1398 1399 vma = ce->state; 1400 if (!vma) 1401 return 0; 1402 1403 /* 1404 * Clear this page out of any CPU caches for coherent swap-in/out. 1405 * We only want to do this on the first bind so that we do not stall 1406 * on an active context (which by nature is already on the GPU). 1407 */ 1408 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { 1409 err = i915_gem_object_set_to_gtt_domain(vma->obj, true); 1410 if (err) 1411 return err; 1412 } 1413 1414 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); 1415 if (err) 1416 return err; 1417 1418 /* 1419 * And mark is as a globally pinned object to let the shrinker know 1420 * it cannot reclaim the object until we release it. 1421 */ 1422 vma->obj->pin_global++; 1423 1424 return 0; 1425} 1426 1427static void __context_unpin(struct intel_context *ce) 1428{ 1429 struct i915_vma *vma; 1430 1431 vma = ce->state; 1432 if (!vma) 1433 return; 1434 1435 vma->obj->pin_global--; 1436 i915_vma_unpin(vma); 1437} 1438 1439static void intel_ring_context_unpin(struct intel_context *ce) 1440{ 1441 __context_unpin_ppgtt(ce->gem_context); 1442 __context_unpin(ce); 1443 1444 i915_gem_context_put(ce->gem_context); 1445} 1446 1447static struct i915_vma * 1448alloc_context_vma(struct intel_engine_cs *engine) 1449{ 1450 struct drm_i915_private *i915 = engine->i915; 1451 struct drm_i915_gem_object *obj; 1452 struct i915_vma *vma; 1453 int err; 1454 1455 obj = i915_gem_object_create(i915, engine->context_size); 1456 if (IS_ERR(obj)) 1457 return ERR_CAST(obj); 1458 1459 if (engine->default_state) { 1460 void *defaults, *vaddr; 1461 1462 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 1463 if (IS_ERR(vaddr)) { 1464 err = PTR_ERR(vaddr); 1465 goto err_obj; 1466 } 1467 1468 defaults = i915_gem_object_pin_map(engine->default_state, 1469 I915_MAP_WB); 1470 if (IS_ERR(defaults)) { 1471 err = PTR_ERR(defaults); 1472 goto err_map; 1473 } 1474 1475 memcpy(vaddr, defaults, engine->context_size); 1476 1477 i915_gem_object_unpin_map(engine->default_state); 1478 i915_gem_object_unpin_map(obj); 1479 } 1480 1481 /* 1482 * Try to make the context utilize L3 as well as LLC. 1483 * 1484 * On VLV we don't have L3 controls in the PTEs so we 1485 * shouldn't touch the cache level, especially as that 1486 * would make the object snooped which might have a 1487 * negative performance impact. 1488 * 1489 * Snooping is required on non-llc platforms in execlist 1490 * mode, but since all GGTT accesses use PAT entry 0 we 1491 * get snooping anyway regardless of cache_level. 1492 * 1493 * This is only applicable for Ivy Bridge devices since 1494 * later platforms don't have L3 control bits in the PTE. 1495 */ 1496 if (IS_IVYBRIDGE(i915)) { 1497 /* Ignore any error, regard it as a simple optimisation */ 1498 i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); 1499 } 1500 1501 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); 1502 if (IS_ERR(vma)) { 1503 err = PTR_ERR(vma); 1504 goto err_obj; 1505 } 1506 1507 return vma; 1508 1509err_map: 1510 i915_gem_object_unpin_map(obj); 1511err_obj: 1512 i915_gem_object_put(obj); 1513 return ERR_PTR(err); 1514} 1515 1516static struct intel_context * 1517__ring_context_pin(struct intel_engine_cs *engine, 1518 struct i915_gem_context *ctx, 1519 struct intel_context *ce) 1520{ 1521 int err; 1522 1523 if (!ce->state && engine->context_size) { 1524 struct i915_vma *vma; 1525 1526 vma = alloc_context_vma(engine); 1527 if (IS_ERR(vma)) { 1528 err = PTR_ERR(vma); 1529 goto err; 1530 } 1531 1532 ce->state = vma; 1533 } 1534 1535 err = __context_pin(ce); 1536 if (err) 1537 goto err; 1538 1539 err = __context_pin_ppgtt(ce->gem_context); 1540 if (err) 1541 goto err_unpin; 1542 1543 i915_gem_context_get(ctx); 1544 1545 /* One ringbuffer to rule them all */ 1546 GEM_BUG_ON(!engine->buffer); 1547 ce->ring = engine->buffer; 1548 1549 return ce; 1550 1551err_unpin: 1552 __context_unpin(ce); 1553err: 1554 ce->pin_count = 0; 1555 return ERR_PTR(err); 1556} 1557 1558static const struct intel_context_ops ring_context_ops = { 1559 .unpin = intel_ring_context_unpin, 1560 .destroy = intel_ring_context_destroy, 1561}; 1562 1563static struct intel_context * 1564intel_ring_context_pin(struct intel_engine_cs *engine, 1565 struct i915_gem_context *ctx) 1566{ 1567 struct intel_context *ce = to_intel_context(ctx, engine); 1568 1569 lockdep_assert_held(&ctx->i915->drm.struct_mutex); 1570 1571 if (likely(ce->pin_count++)) 1572 return ce; 1573 GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ 1574 1575 ce->ops = &ring_context_ops; 1576 1577 return __ring_context_pin(engine, ctx, ce); 1578} 1579 1580static int intel_init_ring_buffer(struct intel_engine_cs *engine) 1581{ 1582 struct i915_timeline *timeline; 1583 struct intel_ring *ring; 1584 int err; 1585 1586 err = intel_engine_setup_common(engine); 1587 if (err) 1588 return err; 1589 1590 timeline = i915_timeline_create(engine->i915, 1591 engine->name, 1592 engine->status_page.vma); 1593 if (IS_ERR(timeline)) { 1594 err = PTR_ERR(timeline); 1595 goto err; 1596 } 1597 GEM_BUG_ON(timeline->has_initial_breadcrumb); 1598 1599 ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE); 1600 i915_timeline_put(timeline); 1601 if (IS_ERR(ring)) { 1602 err = PTR_ERR(ring); 1603 goto err; 1604 } 1605 1606 err = intel_ring_pin(ring); 1607 if (err) 1608 goto err_ring; 1609 1610 GEM_BUG_ON(engine->buffer); 1611 engine->buffer = ring; 1612 1613 err = intel_engine_init_common(engine); 1614 if (err) 1615 goto err_unpin; 1616 1617 GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma); 1618 1619 return 0; 1620 1621err_unpin: 1622 intel_ring_unpin(ring); 1623err_ring: 1624 intel_ring_free(ring); 1625err: 1626 intel_engine_cleanup_common(engine); 1627 return err; 1628} 1629 1630void intel_engine_cleanup(struct intel_engine_cs *engine) 1631{ 1632 struct drm_i915_private *dev_priv = engine->i915; 1633 1634 WARN_ON(INTEL_GEN(dev_priv) > 2 && 1635 (I915_READ_MODE(engine) & MODE_IDLE) == 0); 1636 1637 intel_ring_unpin(engine->buffer); 1638 intel_ring_free(engine->buffer); 1639 1640 if (engine->cleanup) 1641 engine->cleanup(engine); 1642 1643 intel_engine_cleanup_common(engine); 1644 1645 dev_priv->engine[engine->id] = NULL; 1646 kfree(engine); 1647} 1648 1649void intel_legacy_submission_resume(struct drm_i915_private *dev_priv) 1650{ 1651 struct intel_engine_cs *engine; 1652 enum intel_engine_id id; 1653 1654 /* Restart from the beginning of the rings for convenience */ 1655 for_each_engine(engine, dev_priv, id) 1656 intel_ring_reset(engine->buffer, 0); 1657} 1658 1659static int load_pd_dir(struct i915_request *rq, 1660 const struct i915_hw_ppgtt *ppgtt) 1661{ 1662 const struct intel_engine_cs * const engine = rq->engine; 1663 u32 *cs; 1664 1665 cs = intel_ring_begin(rq, 6); 1666 if (IS_ERR(cs)) 1667 return PTR_ERR(cs); 1668 1669 *cs++ = MI_LOAD_REGISTER_IMM(1); 1670 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine)); 1671 *cs++ = PP_DIR_DCLV_2G; 1672 1673 *cs++ = MI_LOAD_REGISTER_IMM(1); 1674 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); 1675 *cs++ = ppgtt->pd.base.ggtt_offset << 10; 1676 1677 intel_ring_advance(rq, cs); 1678 1679 return 0; 1680} 1681 1682static int flush_pd_dir(struct i915_request *rq) 1683{ 1684 const struct intel_engine_cs * const engine = rq->engine; 1685 u32 *cs; 1686 1687 cs = intel_ring_begin(rq, 4); 1688 if (IS_ERR(cs)) 1689 return PTR_ERR(cs); 1690 1691 /* Stall until the page table load is complete */ 1692 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 1693 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); 1694 *cs++ = i915_scratch_offset(rq->i915); 1695 *cs++ = MI_NOOP; 1696 1697 intel_ring_advance(rq, cs); 1698 return 0; 1699} 1700 1701static inline int mi_set_context(struct i915_request *rq, u32 flags) 1702{ 1703 struct drm_i915_private *i915 = rq->i915; 1704 struct intel_engine_cs *engine = rq->engine; 1705 enum intel_engine_id id; 1706 const int num_rings = 1707 IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0; 1708 bool force_restore = false; 1709 int len; 1710 u32 *cs; 1711 1712 flags |= MI_MM_SPACE_GTT; 1713 if (IS_HASWELL(i915)) 1714 /* These flags are for resource streamer on HSW+ */ 1715 flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN; 1716 else 1717 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN; 1718 1719 len = 4; 1720 if (IS_GEN(i915, 7)) 1721 len += 2 + (num_rings ? 4*num_rings + 6 : 0); 1722 if (flags & MI_FORCE_RESTORE) { 1723 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); 1724 flags &= ~MI_FORCE_RESTORE; 1725 force_restore = true; 1726 len += 2; 1727 } 1728 1729 cs = intel_ring_begin(rq, len); 1730 if (IS_ERR(cs)) 1731 return PTR_ERR(cs); 1732 1733 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 1734 if (IS_GEN(i915, 7)) { 1735 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 1736 if (num_rings) { 1737 struct intel_engine_cs *signaller; 1738 1739 *cs++ = MI_LOAD_REGISTER_IMM(num_rings); 1740 for_each_engine(signaller, i915, id) { 1741 if (signaller == engine) 1742 continue; 1743 1744 *cs++ = i915_mmio_reg_offset( 1745 RING_PSMI_CTL(signaller->mmio_base)); 1746 *cs++ = _MASKED_BIT_ENABLE( 1747 GEN6_PSMI_SLEEP_MSG_DISABLE); 1748 } 1749 } 1750 } 1751 1752 if (force_restore) { 1753 /* 1754 * The HW doesn't handle being told to restore the current 1755 * context very well. Quite often it likes goes to go off and 1756 * sulk, especially when it is meant to be reloading PP_DIR. 1757 * A very simple fix to force the reload is to simply switch 1758 * away from the current context and back again. 1759 * 1760 * Note that the kernel_context will contain random state 1761 * following the INHIBIT_RESTORE. We accept this since we 1762 * never use the kernel_context state; it is merely a 1763 * placeholder we use to flush other contexts. 1764 */ 1765 *cs++ = MI_SET_CONTEXT; 1766 *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context, 1767 engine)->state) | 1768 MI_MM_SPACE_GTT | 1769 MI_RESTORE_INHIBIT; 1770 } 1771 1772 *cs++ = MI_NOOP; 1773 *cs++ = MI_SET_CONTEXT; 1774 *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags; 1775 /* 1776 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 1777 * WaMiSetContext_Hang:snb,ivb,vlv 1778 */ 1779 *cs++ = MI_NOOP; 1780 1781 if (IS_GEN(i915, 7)) { 1782 if (num_rings) { 1783 struct intel_engine_cs *signaller; 1784 i915_reg_t last_reg = {}; /* keep gcc quiet */ 1785 1786 *cs++ = MI_LOAD_REGISTER_IMM(num_rings); 1787 for_each_engine(signaller, i915, id) { 1788 if (signaller == engine) 1789 continue; 1790 1791 last_reg = RING_PSMI_CTL(signaller->mmio_base); 1792 *cs++ = i915_mmio_reg_offset(last_reg); 1793 *cs++ = _MASKED_BIT_DISABLE( 1794 GEN6_PSMI_SLEEP_MSG_DISABLE); 1795 } 1796 1797 /* Insert a delay before the next switch! */ 1798 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 1799 *cs++ = i915_mmio_reg_offset(last_reg); 1800 *cs++ = i915_scratch_offset(rq->i915); 1801 *cs++ = MI_NOOP; 1802 } 1803 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 1804 } 1805 1806 intel_ring_advance(rq, cs); 1807 1808 return 0; 1809} 1810 1811static int remap_l3(struct i915_request *rq, int slice) 1812{ 1813 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; 1814 int i; 1815 1816 if (!remap_info) 1817 return 0; 1818 1819 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2); 1820 if (IS_ERR(cs)) 1821 return PTR_ERR(cs); 1822 1823 /* 1824 * Note: We do not worry about the concurrent register cacheline hang 1825 * here because no other code should access these registers other than 1826 * at initialization time. 1827 */ 1828 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4); 1829 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) { 1830 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); 1831 *cs++ = remap_info[i]; 1832 } 1833 *cs++ = MI_NOOP; 1834 intel_ring_advance(rq, cs); 1835 1836 return 0; 1837} 1838 1839static int switch_context(struct i915_request *rq) 1840{ 1841 struct intel_engine_cs *engine = rq->engine; 1842 struct i915_gem_context *ctx = rq->gem_context; 1843 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; 1844 unsigned int unwind_mm = 0; 1845 u32 hw_flags = 0; 1846 int ret, i; 1847 1848 lockdep_assert_held(&rq->i915->drm.struct_mutex); 1849 GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); 1850 1851 if (ppgtt) { 1852 int loops; 1853 1854 /* 1855 * Baytail takes a little more convincing that it really needs 1856 * to reload the PD between contexts. It is not just a little 1857 * longer, as adding more stalls after the load_pd_dir (i.e. 1858 * adding a long loop around flush_pd_dir) is not as effective 1859 * as reloading the PD umpteen times. 32 is derived from 1860 * experimentation (gem_exec_parallel/fds) and has no good 1861 * explanation. 1862 */ 1863 loops = 1; 1864 if (engine->id == BCS && IS_VALLEYVIEW(engine->i915)) 1865 loops = 32; 1866 1867 do { 1868 ret = load_pd_dir(rq, ppgtt); 1869 if (ret) 1870 goto err; 1871 } while (--loops); 1872 1873 if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) { 1874 unwind_mm = intel_engine_flag(engine); 1875 ppgtt->pd_dirty_rings &= ~unwind_mm; 1876 hw_flags = MI_FORCE_RESTORE; 1877 } 1878 } 1879 1880 if (rq->hw_context->state) { 1881 GEM_BUG_ON(engine->id != RCS); 1882 1883 /* 1884 * The kernel context(s) is treated as pure scratch and is not 1885 * expected to retain any state (as we sacrifice it during 1886 * suspend and on resume it may be corrupted). This is ok, 1887 * as nothing actually executes using the kernel context; it 1888 * is purely used for flushing user contexts. 1889 */ 1890 if (i915_gem_context_is_kernel(ctx)) 1891 hw_flags = MI_RESTORE_INHIBIT; 1892 1893 ret = mi_set_context(rq, hw_flags); 1894 if (ret) 1895 goto err_mm; 1896 } 1897 1898 if (ppgtt) { 1899 ret = engine->emit_flush(rq, EMIT_INVALIDATE); 1900 if (ret) 1901 goto err_mm; 1902 1903 ret = flush_pd_dir(rq); 1904 if (ret) 1905 goto err_mm; 1906 1907 /* 1908 * Not only do we need a full barrier (post-sync write) after 1909 * invalidating the TLBs, but we need to wait a little bit 1910 * longer. Whether this is merely delaying us, or the 1911 * subsequent flush is a key part of serialising with the 1912 * post-sync op, this extra pass appears vital before a 1913 * mm switch! 1914 */ 1915 ret = engine->emit_flush(rq, EMIT_INVALIDATE); 1916 if (ret) 1917 goto err_mm; 1918 1919 ret = engine->emit_flush(rq, EMIT_FLUSH); 1920 if (ret) 1921 goto err_mm; 1922 } 1923 1924 if (ctx->remap_slice) { 1925 for (i = 0; i < MAX_L3_SLICES; i++) { 1926 if (!(ctx->remap_slice & BIT(i))) 1927 continue; 1928 1929 ret = remap_l3(rq, i); 1930 if (ret) 1931 goto err_mm; 1932 } 1933 1934 ctx->remap_slice = 0; 1935 } 1936 1937 return 0; 1938 1939err_mm: 1940 if (unwind_mm) 1941 ppgtt->pd_dirty_rings |= unwind_mm; 1942err: 1943 return ret; 1944} 1945 1946static int ring_request_alloc(struct i915_request *request) 1947{ 1948 int ret; 1949 1950 GEM_BUG_ON(!request->hw_context->pin_count); 1951 GEM_BUG_ON(request->timeline->has_initial_breadcrumb); 1952 1953 /* 1954 * Flush enough space to reduce the likelihood of waiting after 1955 * we start building the request - in which case we will just 1956 * have to repeat work. 1957 */ 1958 request->reserved_space += LEGACY_REQUEST_SIZE; 1959 1960 ret = switch_context(request); 1961 if (ret) 1962 return ret; 1963 1964 /* Unconditionally invalidate GPU caches and TLBs. */ 1965 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 1966 if (ret) 1967 return ret; 1968 1969 request->reserved_space -= LEGACY_REQUEST_SIZE; 1970 return 0; 1971} 1972 1973static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes) 1974{ 1975 struct i915_request *target; 1976 long timeout; 1977 1978 lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex); 1979 1980 if (intel_ring_update_space(ring) >= bytes) 1981 return 0; 1982 1983 GEM_BUG_ON(list_empty(&ring->request_list)); 1984 list_for_each_entry(target, &ring->request_list, ring_link) { 1985 /* Would completion of this request free enough space? */ 1986 if (bytes <= __intel_ring_space(target->postfix, 1987 ring->emit, ring->size)) 1988 break; 1989 } 1990 1991 if (WARN_ON(&target->ring_link == &ring->request_list)) 1992 return -ENOSPC; 1993 1994 timeout = i915_request_wait(target, 1995 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, 1996 MAX_SCHEDULE_TIMEOUT); 1997 if (timeout < 0) 1998 return timeout; 1999 2000 i915_request_retire_upto(target); 2001 2002 intel_ring_update_space(ring); 2003 GEM_BUG_ON(ring->space < bytes); 2004 return 0; 2005} 2006 2007u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) 2008{ 2009 struct intel_ring *ring = rq->ring; 2010 const unsigned int remain_usable = ring->effective_size - ring->emit; 2011 const unsigned int bytes = num_dwords * sizeof(u32); 2012 unsigned int need_wrap = 0; 2013 unsigned int total_bytes; 2014 u32 *cs; 2015 2016 /* Packets must be qword aligned. */ 2017 GEM_BUG_ON(num_dwords & 1); 2018 2019 total_bytes = bytes + rq->reserved_space; 2020 GEM_BUG_ON(total_bytes > ring->effective_size); 2021 2022 if (unlikely(total_bytes > remain_usable)) { 2023 const int remain_actual = ring->size - ring->emit; 2024 2025 if (bytes > remain_usable) { 2026 /* 2027 * Not enough space for the basic request. So need to 2028 * flush out the remainder and then wait for 2029 * base + reserved. 2030 */ 2031 total_bytes += remain_actual; 2032 need_wrap = remain_actual | 1; 2033 } else { 2034 /* 2035 * The base request will fit but the reserved space 2036 * falls off the end. So we don't need an immediate 2037 * wrap and only need to effectively wait for the 2038 * reserved size from the start of ringbuffer. 2039 */ 2040 total_bytes = rq->reserved_space + remain_actual; 2041 } 2042 } 2043 2044 if (unlikely(total_bytes > ring->space)) { 2045 int ret; 2046 2047 /* 2048 * Space is reserved in the ringbuffer for finalising the 2049 * request, as that cannot be allowed to fail. During request 2050 * finalisation, reserved_space is set to 0 to stop the 2051 * overallocation and the assumption is that then we never need 2052 * to wait (which has the risk of failing with EINTR). 2053 * 2054 * See also i915_request_alloc() and i915_request_add(). 2055 */ 2056 GEM_BUG_ON(!rq->reserved_space); 2057 2058 ret = wait_for_space(ring, total_bytes); 2059 if (unlikely(ret)) 2060 return ERR_PTR(ret); 2061 } 2062 2063 if (unlikely(need_wrap)) { 2064 need_wrap &= ~1; 2065 GEM_BUG_ON(need_wrap > ring->space); 2066 GEM_BUG_ON(ring->emit + need_wrap > ring->size); 2067 GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64))); 2068 2069 /* Fill the tail with MI_NOOP */ 2070 memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64)); 2071 ring->space -= need_wrap; 2072 ring->emit = 0; 2073 } 2074 2075 GEM_BUG_ON(ring->emit > ring->size - bytes); 2076 GEM_BUG_ON(ring->space < bytes); 2077 cs = ring->vaddr + ring->emit; 2078 GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs))); 2079 ring->emit += bytes; 2080 ring->space -= bytes; 2081 2082 return cs; 2083} 2084 2085/* Align the ring tail to a cacheline boundary */ 2086int intel_ring_cacheline_align(struct i915_request *rq) 2087{ 2088 int num_dwords; 2089 void *cs; 2090 2091 num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32); 2092 if (num_dwords == 0) 2093 return 0; 2094 2095 num_dwords = CACHELINE_DWORDS - num_dwords; 2096 GEM_BUG_ON(num_dwords & 1); 2097 2098 cs = intel_ring_begin(rq, num_dwords); 2099 if (IS_ERR(cs)) 2100 return PTR_ERR(cs); 2101 2102 memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2); 2103 intel_ring_advance(rq, cs); 2104 2105 GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1)); 2106 return 0; 2107} 2108 2109static void gen6_bsd_submit_request(struct i915_request *request) 2110{ 2111 struct drm_i915_private *dev_priv = request->i915; 2112 2113 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2114 2115 /* Every tail move must follow the sequence below */ 2116 2117 /* Disable notification that the ring is IDLE. The GT 2118 * will then assume that it is busy and bring it out of rc6. 2119 */ 2120 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, 2121 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2122 2123 /* Clear the context id. Here be magic! */ 2124 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0); 2125 2126 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 2127 if (__intel_wait_for_register_fw(dev_priv, 2128 GEN6_BSD_SLEEP_PSMI_CONTROL, 2129 GEN6_BSD_SLEEP_INDICATOR, 2130 0, 2131 1000, 0, NULL)) 2132 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 2133 2134 /* Now that the ring is fully powered up, update the tail */ 2135 i9xx_submit_request(request); 2136 2137 /* Let the ring send IDLE messages to the GT again, 2138 * and so let it sleep to conserve power when idle. 2139 */ 2140 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, 2141 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2142 2143 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2144} 2145 2146static int mi_flush_dw(struct i915_request *rq, u32 flags) 2147{ 2148 u32 cmd, *cs; 2149 2150 cs = intel_ring_begin(rq, 4); 2151 if (IS_ERR(cs)) 2152 return PTR_ERR(cs); 2153 2154 cmd = MI_FLUSH_DW; 2155 2156 /* 2157 * We always require a command barrier so that subsequent 2158 * commands, such as breadcrumb interrupts, are strictly ordered 2159 * wrt the contents of the write cache being flushed to memory 2160 * (and thus being coherent from the CPU). 2161 */ 2162 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2163 2164 /* 2165 * Bspec vol 1c.3 - blitter engine command streamer: 2166 * "If ENABLED, all TLBs will be invalidated once the flush 2167 * operation is complete. This bit is only valid when the 2168 * Post-Sync Operation field is a value of 1h or 3h." 2169 */ 2170 cmd |= flags; 2171 2172 *cs++ = cmd; 2173 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; 2174 *cs++ = 0; 2175 *cs++ = MI_NOOP; 2176 2177 intel_ring_advance(rq, cs); 2178 2179 return 0; 2180} 2181 2182static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags) 2183{ 2184 return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0); 2185} 2186 2187static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) 2188{ 2189 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD); 2190} 2191 2192static int 2193hsw_emit_bb_start(struct i915_request *rq, 2194 u64 offset, u32 len, 2195 unsigned int dispatch_flags) 2196{ 2197 u32 *cs; 2198 2199 cs = intel_ring_begin(rq, 2); 2200 if (IS_ERR(cs)) 2201 return PTR_ERR(cs); 2202 2203 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 2204 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW); 2205 /* bit0-7 is the length on GEN6+ */ 2206 *cs++ = offset; 2207 intel_ring_advance(rq, cs); 2208 2209 return 0; 2210} 2211 2212static int 2213gen6_emit_bb_start(struct i915_request *rq, 2214 u64 offset, u32 len, 2215 unsigned int dispatch_flags) 2216{ 2217 u32 *cs; 2218 2219 cs = intel_ring_begin(rq, 2); 2220 if (IS_ERR(cs)) 2221 return PTR_ERR(cs); 2222 2223 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 2224 0 : MI_BATCH_NON_SECURE_I965); 2225 /* bit0-7 is the length on GEN6+ */ 2226 *cs++ = offset; 2227 intel_ring_advance(rq, cs); 2228 2229 return 0; 2230} 2231 2232/* Blitter support (SandyBridge+) */ 2233 2234static int gen6_ring_flush(struct i915_request *rq, u32 mode) 2235{ 2236 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB); 2237} 2238 2239static void intel_ring_init_irq(struct drm_i915_private *dev_priv, 2240 struct intel_engine_cs *engine) 2241{ 2242 if (INTEL_GEN(dev_priv) >= 6) { 2243 engine->irq_enable = gen6_irq_enable; 2244 engine->irq_disable = gen6_irq_disable; 2245 } else if (INTEL_GEN(dev_priv) >= 5) { 2246 engine->irq_enable = gen5_irq_enable; 2247 engine->irq_disable = gen5_irq_disable; 2248 } else if (INTEL_GEN(dev_priv) >= 3) { 2249 engine->irq_enable = i9xx_irq_enable; 2250 engine->irq_disable = i9xx_irq_disable; 2251 } else { 2252 engine->irq_enable = i8xx_irq_enable; 2253 engine->irq_disable = i8xx_irq_disable; 2254 } 2255} 2256 2257static void i9xx_set_default_submission(struct intel_engine_cs *engine) 2258{ 2259 engine->submit_request = i9xx_submit_request; 2260 engine->cancel_requests = cancel_requests; 2261 2262 engine->park = NULL; 2263 engine->unpark = NULL; 2264} 2265 2266static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) 2267{ 2268 i9xx_set_default_submission(engine); 2269 engine->submit_request = gen6_bsd_submit_request; 2270} 2271 2272static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, 2273 struct intel_engine_cs *engine) 2274{ 2275 /* gen8+ are only supported with execlists */ 2276 GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8); 2277 2278 intel_ring_init_irq(dev_priv, engine); 2279 2280 engine->init_hw = init_ring_common; 2281 engine->reset.prepare = reset_prepare; 2282 engine->reset.reset = reset_ring; 2283 engine->reset.finish = reset_finish; 2284 2285 engine->context_pin = intel_ring_context_pin; 2286 engine->request_alloc = ring_request_alloc; 2287 2288 /* 2289 * Using a global execution timeline; the previous final breadcrumb is 2290 * equivalent to our next initial bread so we can elide 2291 * engine->emit_init_breadcrumb(). 2292 */ 2293 engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb; 2294 if (IS_GEN(dev_priv, 5)) 2295 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; 2296 2297 engine->set_default_submission = i9xx_set_default_submission; 2298 2299 if (INTEL_GEN(dev_priv) >= 6) 2300 engine->emit_bb_start = gen6_emit_bb_start; 2301 else if (INTEL_GEN(dev_priv) >= 4) 2302 engine->emit_bb_start = i965_emit_bb_start; 2303 else if (IS_I830(dev_priv) || IS_I845G(dev_priv)) 2304 engine->emit_bb_start = i830_emit_bb_start; 2305 else 2306 engine->emit_bb_start = i915_emit_bb_start; 2307} 2308 2309int intel_init_render_ring_buffer(struct intel_engine_cs *engine) 2310{ 2311 struct drm_i915_private *dev_priv = engine->i915; 2312 int ret; 2313 2314 intel_ring_default_vfuncs(dev_priv, engine); 2315 2316 if (HAS_L3_DPF(dev_priv)) 2317 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2318 2319 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2320 2321 if (INTEL_GEN(dev_priv) >= 7) { 2322 engine->init_context = intel_rcs_ctx_init; 2323 engine->emit_flush = gen7_render_ring_flush; 2324 engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb; 2325 } else if (IS_GEN(dev_priv, 6)) { 2326 engine->init_context = intel_rcs_ctx_init; 2327 engine->emit_flush = gen6_render_ring_flush; 2328 engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb; 2329 } else if (IS_GEN(dev_priv, 5)) { 2330 engine->emit_flush = gen4_render_ring_flush; 2331 } else { 2332 if (INTEL_GEN(dev_priv) < 4) 2333 engine->emit_flush = gen2_render_ring_flush; 2334 else 2335 engine->emit_flush = gen4_render_ring_flush; 2336 engine->irq_enable_mask = I915_USER_INTERRUPT; 2337 } 2338 2339 if (IS_HASWELL(dev_priv)) 2340 engine->emit_bb_start = hsw_emit_bb_start; 2341 2342 engine->init_hw = init_render_ring; 2343 2344 ret = intel_init_ring_buffer(engine); 2345 if (ret) 2346 return ret; 2347 2348 return 0; 2349} 2350 2351int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine) 2352{ 2353 struct drm_i915_private *dev_priv = engine->i915; 2354 2355 intel_ring_default_vfuncs(dev_priv, engine); 2356 2357 if (INTEL_GEN(dev_priv) >= 6) { 2358 /* gen6 bsd needs a special wa for tail updates */ 2359 if (IS_GEN(dev_priv, 6)) 2360 engine->set_default_submission = gen6_bsd_set_default_submission; 2361 engine->emit_flush = gen6_bsd_ring_flush; 2362 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2363 2364 if (IS_GEN(dev_priv, 6)) 2365 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; 2366 else 2367 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; 2368 } else { 2369 engine->emit_flush = bsd_ring_flush; 2370 if (IS_GEN(dev_priv, 5)) 2371 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2372 else 2373 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2374 } 2375 2376 return intel_init_ring_buffer(engine); 2377} 2378 2379int intel_init_blt_ring_buffer(struct intel_engine_cs *engine) 2380{ 2381 struct drm_i915_private *dev_priv = engine->i915; 2382 2383 GEM_BUG_ON(INTEL_GEN(dev_priv) < 6); 2384 2385 intel_ring_default_vfuncs(dev_priv, engine); 2386 2387 engine->emit_flush = gen6_ring_flush; 2388 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2389 2390 if (IS_GEN(dev_priv, 6)) 2391 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; 2392 else 2393 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; 2394 2395 return intel_init_ring_buffer(engine); 2396} 2397 2398int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine) 2399{ 2400 struct drm_i915_private *dev_priv = engine->i915; 2401 2402 GEM_BUG_ON(INTEL_GEN(dev_priv) < 7); 2403 2404 intel_ring_default_vfuncs(dev_priv, engine); 2405 2406 engine->emit_flush = gen6_ring_flush; 2407 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2408 engine->irq_enable = hsw_vebox_irq_enable; 2409 engine->irq_disable = hsw_vebox_irq_disable; 2410 2411 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; 2412 2413 return intel_init_ring_buffer(engine); 2414}