at v4.18 1561 lines 42 kB view raw
1/* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25#include <drm/drm_print.h> 26 27#include "i915_drv.h" 28#include "i915_vgpu.h" 29#include "intel_ringbuffer.h" 30#include "intel_lrc.h" 31 32/* Haswell does have the CXT_SIZE register however it does not appear to be 33 * valid. Now, docs explain in dwords what is in the context object. The full 34 * size is 70720 bytes, however, the power context and execlist context will 35 * never be saved (power context is stored elsewhere, and execlists don't work 36 * on HSW) - so the final size, including the extra state required for the 37 * Resource Streamer, is 66944 bytes, which rounds to 17 pages. 38 */ 39#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) 40 41#define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 42#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) 43#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 44#define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE) 45#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE) 46 47#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE) 48 49struct engine_class_info { 50 const char *name; 51 int (*init_legacy)(struct intel_engine_cs *engine); 52 int (*init_execlists)(struct intel_engine_cs *engine); 53 54 u8 uabi_class; 55}; 56 57static const struct engine_class_info intel_engine_classes[] = { 58 [RENDER_CLASS] = { 59 .name = "rcs", 60 .init_execlists = logical_render_ring_init, 61 .init_legacy = intel_init_render_ring_buffer, 62 .uabi_class = I915_ENGINE_CLASS_RENDER, 63 }, 64 [COPY_ENGINE_CLASS] = { 65 .name = "bcs", 66 .init_execlists = logical_xcs_ring_init, 67 .init_legacy = intel_init_blt_ring_buffer, 68 .uabi_class = I915_ENGINE_CLASS_COPY, 69 }, 70 [VIDEO_DECODE_CLASS] = { 71 .name = "vcs", 72 .init_execlists = logical_xcs_ring_init, 73 .init_legacy = intel_init_bsd_ring_buffer, 74 .uabi_class = I915_ENGINE_CLASS_VIDEO, 75 }, 76 [VIDEO_ENHANCEMENT_CLASS] = { 77 .name = "vecs", 78 .init_execlists = logical_xcs_ring_init, 79 .init_legacy = intel_init_vebox_ring_buffer, 80 .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE, 81 }, 82}; 83 84#define MAX_MMIO_BASES 3 85struct engine_info { 86 unsigned int hw_id; 87 unsigned int uabi_id; 88 u8 class; 89 u8 instance; 90 /* mmio bases table *must* be sorted in reverse gen order */ 91 struct engine_mmio_base { 92 u32 gen : 8; 93 u32 base : 24; 94 } mmio_bases[MAX_MMIO_BASES]; 95}; 96 97static const struct engine_info intel_engines[] = { 98 [RCS] = { 99 .hw_id = RCS_HW, 100 .uabi_id = I915_EXEC_RENDER, 101 .class = RENDER_CLASS, 102 .instance = 0, 103 .mmio_bases = { 104 { .gen = 1, .base = RENDER_RING_BASE } 105 }, 106 }, 107 [BCS] = { 108 .hw_id = BCS_HW, 109 .uabi_id = I915_EXEC_BLT, 110 .class = COPY_ENGINE_CLASS, 111 .instance = 0, 112 .mmio_bases = { 113 { .gen = 6, .base = BLT_RING_BASE } 114 }, 115 }, 116 [VCS] = { 117 .hw_id = VCS_HW, 118 .uabi_id = I915_EXEC_BSD, 119 .class = VIDEO_DECODE_CLASS, 120 .instance = 0, 121 .mmio_bases = { 122 { .gen = 11, .base = GEN11_BSD_RING_BASE }, 123 { .gen = 6, .base = GEN6_BSD_RING_BASE }, 124 { .gen = 4, .base = BSD_RING_BASE } 125 }, 126 }, 127 [VCS2] = { 128 .hw_id = VCS2_HW, 129 .uabi_id = I915_EXEC_BSD, 130 .class = VIDEO_DECODE_CLASS, 131 .instance = 1, 132 .mmio_bases = { 133 { .gen = 11, .base = GEN11_BSD2_RING_BASE }, 134 { .gen = 8, .base = GEN8_BSD2_RING_BASE } 135 }, 136 }, 137 [VCS3] = { 138 .hw_id = VCS3_HW, 139 .uabi_id = I915_EXEC_BSD, 140 .class = VIDEO_DECODE_CLASS, 141 .instance = 2, 142 .mmio_bases = { 143 { .gen = 11, .base = GEN11_BSD3_RING_BASE } 144 }, 145 }, 146 [VCS4] = { 147 .hw_id = VCS4_HW, 148 .uabi_id = I915_EXEC_BSD, 149 .class = VIDEO_DECODE_CLASS, 150 .instance = 3, 151 .mmio_bases = { 152 { .gen = 11, .base = GEN11_BSD4_RING_BASE } 153 }, 154 }, 155 [VECS] = { 156 .hw_id = VECS_HW, 157 .uabi_id = I915_EXEC_VEBOX, 158 .class = VIDEO_ENHANCEMENT_CLASS, 159 .instance = 0, 160 .mmio_bases = { 161 { .gen = 11, .base = GEN11_VEBOX_RING_BASE }, 162 { .gen = 7, .base = VEBOX_RING_BASE } 163 }, 164 }, 165 [VECS2] = { 166 .hw_id = VECS2_HW, 167 .uabi_id = I915_EXEC_VEBOX, 168 .class = VIDEO_ENHANCEMENT_CLASS, 169 .instance = 1, 170 .mmio_bases = { 171 { .gen = 11, .base = GEN11_VEBOX2_RING_BASE } 172 }, 173 }, 174}; 175 176/** 177 * ___intel_engine_context_size() - return the size of the context for an engine 178 * @dev_priv: i915 device private 179 * @class: engine class 180 * 181 * Each engine class may require a different amount of space for a context 182 * image. 183 * 184 * Return: size (in bytes) of an engine class specific context image 185 * 186 * Note: this size includes the HWSP, which is part of the context image 187 * in LRC mode, but does not include the "shared data page" used with 188 * GuC submission. The caller should account for this if using the GuC. 189 */ 190static u32 191__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) 192{ 193 u32 cxt_size; 194 195 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE); 196 197 switch (class) { 198 case RENDER_CLASS: 199 switch (INTEL_GEN(dev_priv)) { 200 default: 201 MISSING_CASE(INTEL_GEN(dev_priv)); 202 return DEFAULT_LR_CONTEXT_RENDER_SIZE; 203 case 11: 204 return GEN11_LR_CONTEXT_RENDER_SIZE; 205 case 10: 206 return GEN10_LR_CONTEXT_RENDER_SIZE; 207 case 9: 208 return GEN9_LR_CONTEXT_RENDER_SIZE; 209 case 8: 210 return GEN8_LR_CONTEXT_RENDER_SIZE; 211 case 7: 212 if (IS_HASWELL(dev_priv)) 213 return HSW_CXT_TOTAL_SIZE; 214 215 cxt_size = I915_READ(GEN7_CXT_SIZE); 216 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64, 217 PAGE_SIZE); 218 case 6: 219 cxt_size = I915_READ(CXT_SIZE); 220 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64, 221 PAGE_SIZE); 222 case 5: 223 case 4: 224 case 3: 225 case 2: 226 /* For the special day when i810 gets merged. */ 227 case 1: 228 return 0; 229 } 230 break; 231 default: 232 MISSING_CASE(class); 233 case VIDEO_DECODE_CLASS: 234 case VIDEO_ENHANCEMENT_CLASS: 235 case COPY_ENGINE_CLASS: 236 if (INTEL_GEN(dev_priv) < 8) 237 return 0; 238 return GEN8_LR_CONTEXT_OTHER_SIZE; 239 } 240} 241 242static u32 __engine_mmio_base(struct drm_i915_private *i915, 243 const struct engine_mmio_base *bases) 244{ 245 int i; 246 247 for (i = 0; i < MAX_MMIO_BASES; i++) 248 if (INTEL_GEN(i915) >= bases[i].gen) 249 break; 250 251 GEM_BUG_ON(i == MAX_MMIO_BASES); 252 GEM_BUG_ON(!bases[i].base); 253 254 return bases[i].base; 255} 256 257static void __sprint_engine_name(char *name, const struct engine_info *info) 258{ 259 WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u", 260 intel_engine_classes[info->class].name, 261 info->instance) >= INTEL_ENGINE_CS_MAX_NAME); 262} 263 264static int 265intel_engine_setup(struct drm_i915_private *dev_priv, 266 enum intel_engine_id id) 267{ 268 const struct engine_info *info = &intel_engines[id]; 269 struct intel_engine_cs *engine; 270 271 GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes)); 272 273 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); 274 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); 275 276 if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS)) 277 return -EINVAL; 278 279 if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) 280 return -EINVAL; 281 282 if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance])) 283 return -EINVAL; 284 285 GEM_BUG_ON(dev_priv->engine[id]); 286 engine = kzalloc(sizeof(*engine), GFP_KERNEL); 287 if (!engine) 288 return -ENOMEM; 289 290 engine->id = id; 291 engine->i915 = dev_priv; 292 __sprint_engine_name(engine->name, info); 293 engine->hw_id = engine->guc_id = info->hw_id; 294 engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases); 295 engine->class = info->class; 296 engine->instance = info->instance; 297 298 engine->uabi_id = info->uabi_id; 299 engine->uabi_class = intel_engine_classes[info->class].uabi_class; 300 301 engine->context_size = __intel_engine_context_size(dev_priv, 302 engine->class); 303 if (WARN_ON(engine->context_size > BIT(20))) 304 engine->context_size = 0; 305 306 /* Nothing to do here, execute in order of dependencies */ 307 engine->schedule = NULL; 308 309 seqlock_init(&engine->stats.lock); 310 311 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); 312 313 dev_priv->engine_class[info->class][info->instance] = engine; 314 dev_priv->engine[id] = engine; 315 return 0; 316} 317 318/** 319 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers 320 * @dev_priv: i915 device private 321 * 322 * Return: non-zero if the initialization failed. 323 */ 324int intel_engines_init_mmio(struct drm_i915_private *dev_priv) 325{ 326 struct intel_device_info *device_info = mkwrite_device_info(dev_priv); 327 const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask; 328 struct intel_engine_cs *engine; 329 enum intel_engine_id id; 330 unsigned int mask = 0; 331 unsigned int i; 332 int err; 333 334 WARN_ON(ring_mask == 0); 335 WARN_ON(ring_mask & 336 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES)); 337 338 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { 339 if (!HAS_ENGINE(dev_priv, i)) 340 continue; 341 342 err = intel_engine_setup(dev_priv, i); 343 if (err) 344 goto cleanup; 345 346 mask |= ENGINE_MASK(i); 347 } 348 349 /* 350 * Catch failures to update intel_engines table when the new engines 351 * are added to the driver by a warning and disabling the forgotten 352 * engines. 353 */ 354 if (WARN_ON(mask != ring_mask)) 355 device_info->ring_mask = mask; 356 357 /* We always presume we have at least RCS available for later probing */ 358 if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) { 359 err = -ENODEV; 360 goto cleanup; 361 } 362 363 device_info->num_rings = hweight32(mask); 364 365 i915_check_and_clear_faults(dev_priv); 366 367 return 0; 368 369cleanup: 370 for_each_engine(engine, dev_priv, id) 371 kfree(engine); 372 return err; 373} 374 375/** 376 * intel_engines_init() - init the Engine Command Streamers 377 * @dev_priv: i915 device private 378 * 379 * Return: non-zero if the initialization failed. 380 */ 381int intel_engines_init(struct drm_i915_private *dev_priv) 382{ 383 struct intel_engine_cs *engine; 384 enum intel_engine_id id, err_id; 385 int err; 386 387 for_each_engine(engine, dev_priv, id) { 388 const struct engine_class_info *class_info = 389 &intel_engine_classes[engine->class]; 390 int (*init)(struct intel_engine_cs *engine); 391 392 if (HAS_EXECLISTS(dev_priv)) 393 init = class_info->init_execlists; 394 else 395 init = class_info->init_legacy; 396 397 err = -EINVAL; 398 err_id = id; 399 400 if (GEM_WARN_ON(!init)) 401 goto cleanup; 402 403 err = init(engine); 404 if (err) 405 goto cleanup; 406 407 GEM_BUG_ON(!engine->submit_request); 408 } 409 410 return 0; 411 412cleanup: 413 for_each_engine(engine, dev_priv, id) { 414 if (id >= err_id) { 415 kfree(engine); 416 dev_priv->engine[id] = NULL; 417 } else { 418 dev_priv->gt.cleanup_engine(engine); 419 } 420 } 421 return err; 422} 423 424void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno) 425{ 426 struct drm_i915_private *dev_priv = engine->i915; 427 428 /* Our semaphore implementation is strictly monotonic (i.e. we proceed 429 * so long as the semaphore value in the register/page is greater 430 * than the sync value), so whenever we reset the seqno, 431 * so long as we reset the tracking semaphore value to 0, it will 432 * always be before the next request's seqno. If we don't reset 433 * the semaphore value, then when the seqno moves backwards all 434 * future waits will complete instantly (causing rendering corruption). 435 */ 436 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { 437 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); 438 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); 439 if (HAS_VEBOX(dev_priv)) 440 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); 441 } 442 443 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); 444 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); 445 446 /* After manually advancing the seqno, fake the interrupt in case 447 * there are any waiters for that seqno. 448 */ 449 intel_engine_wakeup(engine); 450 451 GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno); 452} 453 454static void intel_engine_init_batch_pool(struct intel_engine_cs *engine) 455{ 456 i915_gem_batch_pool_init(&engine->batch_pool, engine); 457} 458 459static bool csb_force_mmio(struct drm_i915_private *i915) 460{ 461 /* Older GVT emulation depends upon intercepting CSB mmio */ 462 if (intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915)) 463 return true; 464 465 return false; 466} 467 468static void intel_engine_init_execlist(struct intel_engine_cs *engine) 469{ 470 struct intel_engine_execlists * const execlists = &engine->execlists; 471 472 execlists->csb_use_mmio = csb_force_mmio(engine->i915); 473 474 execlists->port_mask = 1; 475 BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); 476 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); 477 478 execlists->queue_priority = INT_MIN; 479 execlists->queue = RB_ROOT; 480 execlists->first = NULL; 481} 482 483/** 484 * intel_engines_setup_common - setup engine state not requiring hw access 485 * @engine: Engine to setup. 486 * 487 * Initializes @engine@ structure members shared between legacy and execlists 488 * submission modes which do not require hardware access. 489 * 490 * Typically done early in the submission mode specific engine setup stage. 491 */ 492void intel_engine_setup_common(struct intel_engine_cs *engine) 493{ 494 i915_timeline_init(engine->i915, &engine->timeline, engine->name); 495 496 intel_engine_init_execlist(engine); 497 intel_engine_init_hangcheck(engine); 498 intel_engine_init_batch_pool(engine); 499 intel_engine_init_cmd_parser(engine); 500} 501 502int intel_engine_create_scratch(struct intel_engine_cs *engine, int size) 503{ 504 struct drm_i915_gem_object *obj; 505 struct i915_vma *vma; 506 int ret; 507 508 WARN_ON(engine->scratch); 509 510 obj = i915_gem_object_create_stolen(engine->i915, size); 511 if (!obj) 512 obj = i915_gem_object_create_internal(engine->i915, size); 513 if (IS_ERR(obj)) { 514 DRM_ERROR("Failed to allocate scratch page\n"); 515 return PTR_ERR(obj); 516 } 517 518 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); 519 if (IS_ERR(vma)) { 520 ret = PTR_ERR(vma); 521 goto err_unref; 522 } 523 524 ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH); 525 if (ret) 526 goto err_unref; 527 528 engine->scratch = vma; 529 return 0; 530 531err_unref: 532 i915_gem_object_put(obj); 533 return ret; 534} 535 536static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) 537{ 538 i915_vma_unpin_and_release(&engine->scratch); 539} 540 541static void cleanup_phys_status_page(struct intel_engine_cs *engine) 542{ 543 struct drm_i915_private *dev_priv = engine->i915; 544 545 if (!dev_priv->status_page_dmah) 546 return; 547 548 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); 549 engine->status_page.page_addr = NULL; 550} 551 552static void cleanup_status_page(struct intel_engine_cs *engine) 553{ 554 struct i915_vma *vma; 555 struct drm_i915_gem_object *obj; 556 557 vma = fetch_and_zero(&engine->status_page.vma); 558 if (!vma) 559 return; 560 561 obj = vma->obj; 562 563 i915_vma_unpin(vma); 564 i915_vma_close(vma); 565 566 i915_gem_object_unpin_map(obj); 567 __i915_gem_object_release_unless_active(obj); 568} 569 570static int init_status_page(struct intel_engine_cs *engine) 571{ 572 struct drm_i915_gem_object *obj; 573 struct i915_vma *vma; 574 unsigned int flags; 575 void *vaddr; 576 int ret; 577 578 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); 579 if (IS_ERR(obj)) { 580 DRM_ERROR("Failed to allocate status page\n"); 581 return PTR_ERR(obj); 582 } 583 584 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 585 if (ret) 586 goto err; 587 588 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); 589 if (IS_ERR(vma)) { 590 ret = PTR_ERR(vma); 591 goto err; 592 } 593 594 flags = PIN_GLOBAL; 595 if (!HAS_LLC(engine->i915)) 596 /* On g33, we cannot place HWS above 256MiB, so 597 * restrict its pinning to the low mappable arena. 598 * Though this restriction is not documented for 599 * gen4, gen5, or byt, they also behave similarly 600 * and hang if the HWS is placed at the top of the 601 * GTT. To generalise, it appears that all !llc 602 * platforms have issues with us placing the HWS 603 * above the mappable region (even though we never 604 * actually map it). 605 */ 606 flags |= PIN_MAPPABLE; 607 else 608 flags |= PIN_HIGH; 609 ret = i915_vma_pin(vma, 0, 4096, flags); 610 if (ret) 611 goto err; 612 613 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 614 if (IS_ERR(vaddr)) { 615 ret = PTR_ERR(vaddr); 616 goto err_unpin; 617 } 618 619 engine->status_page.vma = vma; 620 engine->status_page.ggtt_offset = i915_ggtt_offset(vma); 621 engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE); 622 return 0; 623 624err_unpin: 625 i915_vma_unpin(vma); 626err: 627 i915_gem_object_put(obj); 628 return ret; 629} 630 631static int init_phys_status_page(struct intel_engine_cs *engine) 632{ 633 struct drm_i915_private *dev_priv = engine->i915; 634 635 GEM_BUG_ON(engine->id != RCS); 636 637 dev_priv->status_page_dmah = 638 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); 639 if (!dev_priv->status_page_dmah) 640 return -ENOMEM; 641 642 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 643 memset(engine->status_page.page_addr, 0, PAGE_SIZE); 644 645 return 0; 646} 647 648/** 649 * intel_engines_init_common - initialize cengine state which might require hw access 650 * @engine: Engine to initialize. 651 * 652 * Initializes @engine@ structure members shared between legacy and execlists 653 * submission modes which do require hardware access. 654 * 655 * Typcally done at later stages of submission mode specific engine setup. 656 * 657 * Returns zero on success or an error code on failure. 658 */ 659int intel_engine_init_common(struct intel_engine_cs *engine) 660{ 661 struct intel_ring *ring; 662 int ret; 663 664 engine->set_default_submission(engine); 665 666 /* We may need to do things with the shrinker which 667 * require us to immediately switch back to the default 668 * context. This can cause a problem as pinning the 669 * default context also requires GTT space which may not 670 * be available. To avoid this we always pin the default 671 * context. 672 */ 673 ring = intel_context_pin(engine->i915->kernel_context, engine); 674 if (IS_ERR(ring)) 675 return PTR_ERR(ring); 676 677 /* 678 * Similarly the preempt context must always be available so that 679 * we can interrupt the engine at any time. 680 */ 681 if (engine->i915->preempt_context) { 682 ring = intel_context_pin(engine->i915->preempt_context, engine); 683 if (IS_ERR(ring)) { 684 ret = PTR_ERR(ring); 685 goto err_unpin_kernel; 686 } 687 } 688 689 ret = intel_engine_init_breadcrumbs(engine); 690 if (ret) 691 goto err_unpin_preempt; 692 693 if (HWS_NEEDS_PHYSICAL(engine->i915)) 694 ret = init_phys_status_page(engine); 695 else 696 ret = init_status_page(engine); 697 if (ret) 698 goto err_breadcrumbs; 699 700 return 0; 701 702err_breadcrumbs: 703 intel_engine_fini_breadcrumbs(engine); 704err_unpin_preempt: 705 if (engine->i915->preempt_context) 706 intel_context_unpin(engine->i915->preempt_context, engine); 707err_unpin_kernel: 708 intel_context_unpin(engine->i915->kernel_context, engine); 709 return ret; 710} 711 712/** 713 * intel_engines_cleanup_common - cleans up the engine state created by 714 * the common initiailizers. 715 * @engine: Engine to cleanup. 716 * 717 * This cleans up everything created by the common helpers. 718 */ 719void intel_engine_cleanup_common(struct intel_engine_cs *engine) 720{ 721 intel_engine_cleanup_scratch(engine); 722 723 if (HWS_NEEDS_PHYSICAL(engine->i915)) 724 cleanup_phys_status_page(engine); 725 else 726 cleanup_status_page(engine); 727 728 intel_engine_fini_breadcrumbs(engine); 729 intel_engine_cleanup_cmd_parser(engine); 730 i915_gem_batch_pool_fini(&engine->batch_pool); 731 732 if (engine->default_state) 733 i915_gem_object_put(engine->default_state); 734 735 if (engine->i915->preempt_context) 736 intel_context_unpin(engine->i915->preempt_context, engine); 737 intel_context_unpin(engine->i915->kernel_context, engine); 738 739 i915_timeline_fini(&engine->timeline); 740} 741 742u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) 743{ 744 struct drm_i915_private *dev_priv = engine->i915; 745 u64 acthd; 746 747 if (INTEL_GEN(dev_priv) >= 8) 748 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), 749 RING_ACTHD_UDW(engine->mmio_base)); 750 else if (INTEL_GEN(dev_priv) >= 4) 751 acthd = I915_READ(RING_ACTHD(engine->mmio_base)); 752 else 753 acthd = I915_READ(ACTHD); 754 755 return acthd; 756} 757 758u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) 759{ 760 struct drm_i915_private *dev_priv = engine->i915; 761 u64 bbaddr; 762 763 if (INTEL_GEN(dev_priv) >= 8) 764 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base), 765 RING_BBADDR_UDW(engine->mmio_base)); 766 else 767 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); 768 769 return bbaddr; 770} 771 772const char *i915_cache_level_str(struct drm_i915_private *i915, int type) 773{ 774 switch (type) { 775 case I915_CACHE_NONE: return " uncached"; 776 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped"; 777 case I915_CACHE_L3_LLC: return " L3+LLC"; 778 case I915_CACHE_WT: return " WT"; 779 default: return ""; 780 } 781} 782 783static inline uint32_t 784read_subslice_reg(struct drm_i915_private *dev_priv, int slice, 785 int subslice, i915_reg_t reg) 786{ 787 uint32_t mcr_slice_subslice_mask; 788 uint32_t mcr_slice_subslice_select; 789 uint32_t mcr; 790 uint32_t ret; 791 enum forcewake_domains fw_domains; 792 793 if (INTEL_GEN(dev_priv) >= 11) { 794 mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | 795 GEN11_MCR_SUBSLICE_MASK; 796 mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) | 797 GEN11_MCR_SUBSLICE(subslice); 798 } else { 799 mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK | 800 GEN8_MCR_SUBSLICE_MASK; 801 mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) | 802 GEN8_MCR_SUBSLICE(subslice); 803 } 804 805 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, 806 FW_REG_READ); 807 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, 808 GEN8_MCR_SELECTOR, 809 FW_REG_READ | FW_REG_WRITE); 810 811 spin_lock_irq(&dev_priv->uncore.lock); 812 intel_uncore_forcewake_get__locked(dev_priv, fw_domains); 813 814 mcr = I915_READ_FW(GEN8_MCR_SELECTOR); 815 /* 816 * The HW expects the slice and sublice selectors to be reset to 0 817 * after reading out the registers. 818 */ 819 WARN_ON_ONCE(mcr & mcr_slice_subslice_mask); 820 mcr &= ~mcr_slice_subslice_mask; 821 mcr |= mcr_slice_subslice_select; 822 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); 823 824 ret = I915_READ_FW(reg); 825 826 mcr &= ~mcr_slice_subslice_mask; 827 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); 828 829 intel_uncore_forcewake_put__locked(dev_priv, fw_domains); 830 spin_unlock_irq(&dev_priv->uncore.lock); 831 832 return ret; 833} 834 835/* NB: please notice the memset */ 836void intel_engine_get_instdone(struct intel_engine_cs *engine, 837 struct intel_instdone *instdone) 838{ 839 struct drm_i915_private *dev_priv = engine->i915; 840 u32 mmio_base = engine->mmio_base; 841 int slice; 842 int subslice; 843 844 memset(instdone, 0, sizeof(*instdone)); 845 846 switch (INTEL_GEN(dev_priv)) { 847 default: 848 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); 849 850 if (engine->id != RCS) 851 break; 852 853 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); 854 for_each_instdone_slice_subslice(dev_priv, slice, subslice) { 855 instdone->sampler[slice][subslice] = 856 read_subslice_reg(dev_priv, slice, subslice, 857 GEN7_SAMPLER_INSTDONE); 858 instdone->row[slice][subslice] = 859 read_subslice_reg(dev_priv, slice, subslice, 860 GEN7_ROW_INSTDONE); 861 } 862 break; 863 case 7: 864 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); 865 866 if (engine->id != RCS) 867 break; 868 869 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); 870 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE); 871 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE); 872 873 break; 874 case 6: 875 case 5: 876 case 4: 877 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); 878 879 if (engine->id == RCS) 880 /* HACK: Using the wrong struct member */ 881 instdone->slice_common = I915_READ(GEN4_INSTDONE1); 882 break; 883 case 3: 884 case 2: 885 instdone->instdone = I915_READ(GEN2_INSTDONE); 886 break; 887 } 888} 889 890static bool ring_is_idle(struct intel_engine_cs *engine) 891{ 892 struct drm_i915_private *dev_priv = engine->i915; 893 bool idle = true; 894 895 /* If the whole device is asleep, the engine must be idle */ 896 if (!intel_runtime_pm_get_if_in_use(dev_priv)) 897 return true; 898 899 /* First check that no commands are left in the ring */ 900 if ((I915_READ_HEAD(engine) & HEAD_ADDR) != 901 (I915_READ_TAIL(engine) & TAIL_ADDR)) 902 idle = false; 903 904 /* No bit for gen2, so assume the CS parser is idle */ 905 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) 906 idle = false; 907 908 intel_runtime_pm_put(dev_priv); 909 910 return idle; 911} 912 913/** 914 * intel_engine_is_idle() - Report if the engine has finished process all work 915 * @engine: the intel_engine_cs 916 * 917 * Return true if there are no requests pending, nothing left to be submitted 918 * to hardware, and that the engine is idle. 919 */ 920bool intel_engine_is_idle(struct intel_engine_cs *engine) 921{ 922 struct drm_i915_private *dev_priv = engine->i915; 923 924 /* More white lies, if wedged, hw state is inconsistent */ 925 if (i915_terminally_wedged(&dev_priv->gpu_error)) 926 return true; 927 928 /* Any inflight/incomplete requests? */ 929 if (!i915_seqno_passed(intel_engine_get_seqno(engine), 930 intel_engine_last_submit(engine))) 931 return false; 932 933 if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock)) 934 return true; 935 936 /* Waiting to drain ELSP? */ 937 if (READ_ONCE(engine->execlists.active)) 938 return false; 939 940 /* ELSP is empty, but there are ready requests? */ 941 if (READ_ONCE(engine->execlists.first)) 942 return false; 943 944 /* Ring stopped? */ 945 if (!ring_is_idle(engine)) 946 return false; 947 948 return true; 949} 950 951bool intel_engines_are_idle(struct drm_i915_private *dev_priv) 952{ 953 struct intel_engine_cs *engine; 954 enum intel_engine_id id; 955 956 /* 957 * If the driver is wedged, HW state may be very inconsistent and 958 * report that it is still busy, even though we have stopped using it. 959 */ 960 if (i915_terminally_wedged(&dev_priv->gpu_error)) 961 return true; 962 963 for_each_engine(engine, dev_priv, id) { 964 if (!intel_engine_is_idle(engine)) 965 return false; 966 } 967 968 return true; 969} 970 971/** 972 * intel_engine_has_kernel_context: 973 * @engine: the engine 974 * 975 * Returns true if the last context to be executed on this engine, or has been 976 * executed if the engine is already idle, is the kernel context 977 * (#i915.kernel_context). 978 */ 979bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine) 980{ 981 const struct i915_gem_context * const kernel_context = 982 engine->i915->kernel_context; 983 struct i915_request *rq; 984 985 lockdep_assert_held(&engine->i915->drm.struct_mutex); 986 987 /* 988 * Check the last context seen by the engine. If active, it will be 989 * the last request that remains in the timeline. When idle, it is 990 * the last executed context as tracked by retirement. 991 */ 992 rq = __i915_gem_active_peek(&engine->timeline.last_request); 993 if (rq) 994 return rq->ctx == kernel_context; 995 else 996 return engine->last_retired_context == kernel_context; 997} 998 999void intel_engines_reset_default_submission(struct drm_i915_private *i915) 1000{ 1001 struct intel_engine_cs *engine; 1002 enum intel_engine_id id; 1003 1004 for_each_engine(engine, i915, id) 1005 engine->set_default_submission(engine); 1006} 1007 1008/** 1009 * intel_engines_park: called when the GT is transitioning from busy->idle 1010 * @i915: the i915 device 1011 * 1012 * The GT is now idle and about to go to sleep (maybe never to wake again?). 1013 * Time for us to tidy and put away our toys (release resources back to the 1014 * system). 1015 */ 1016void intel_engines_park(struct drm_i915_private *i915) 1017{ 1018 struct intel_engine_cs *engine; 1019 enum intel_engine_id id; 1020 1021 for_each_engine(engine, i915, id) { 1022 /* Flush the residual irq tasklets first. */ 1023 intel_engine_disarm_breadcrumbs(engine); 1024 tasklet_kill(&engine->execlists.tasklet); 1025 1026 /* 1027 * We are committed now to parking the engines, make sure there 1028 * will be no more interrupts arriving later and the engines 1029 * are truly idle. 1030 */ 1031 if (wait_for(intel_engine_is_idle(engine), 10)) { 1032 struct drm_printer p = drm_debug_printer(__func__); 1033 1034 dev_err(i915->drm.dev, 1035 "%s is not idle before parking\n", 1036 engine->name); 1037 intel_engine_dump(engine, &p, NULL); 1038 } 1039 1040 /* Must be reset upon idling, or we may miss the busy wakeup. */ 1041 GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN); 1042 1043 if (engine->park) 1044 engine->park(engine); 1045 1046 i915_gem_batch_pool_fini(&engine->batch_pool); 1047 engine->execlists.no_priolist = false; 1048 } 1049} 1050 1051/** 1052 * intel_engines_unpark: called when the GT is transitioning from idle->busy 1053 * @i915: the i915 device 1054 * 1055 * The GT was idle and now about to fire up with some new user requests. 1056 */ 1057void intel_engines_unpark(struct drm_i915_private *i915) 1058{ 1059 struct intel_engine_cs *engine; 1060 enum intel_engine_id id; 1061 1062 for_each_engine(engine, i915, id) { 1063 if (engine->unpark) 1064 engine->unpark(engine); 1065 1066 intel_engine_init_hangcheck(engine); 1067 } 1068} 1069 1070bool intel_engine_can_store_dword(struct intel_engine_cs *engine) 1071{ 1072 switch (INTEL_GEN(engine->i915)) { 1073 case 2: 1074 return false; /* uses physical not virtual addresses */ 1075 case 3: 1076 /* maybe only uses physical not virtual addresses */ 1077 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); 1078 case 6: 1079 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ 1080 default: 1081 return true; 1082 } 1083} 1084 1085unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915) 1086{ 1087 struct intel_engine_cs *engine; 1088 enum intel_engine_id id; 1089 unsigned int which; 1090 1091 which = 0; 1092 for_each_engine(engine, i915, id) 1093 if (engine->default_state) 1094 which |= BIT(engine->uabi_class); 1095 1096 return which; 1097} 1098 1099static int print_sched_attr(struct drm_i915_private *i915, 1100 const struct i915_sched_attr *attr, 1101 char *buf, int x, int len) 1102{ 1103 if (attr->priority == I915_PRIORITY_INVALID) 1104 return x; 1105 1106 x += snprintf(buf + x, len - x, 1107 " prio=%d", attr->priority); 1108 1109 return x; 1110} 1111 1112static void print_request(struct drm_printer *m, 1113 struct i915_request *rq, 1114 const char *prefix) 1115{ 1116 const char *name = rq->fence.ops->get_timeline_name(&rq->fence); 1117 char buf[80] = ""; 1118 int x = 0; 1119 1120 x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf)); 1121 1122 drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n", 1123 prefix, 1124 rq->global_seqno, 1125 i915_request_completed(rq) ? "!" : "", 1126 rq->fence.context, rq->fence.seqno, 1127 buf, 1128 jiffies_to_msecs(jiffies - rq->emitted_jiffies), 1129 name); 1130} 1131 1132static void hexdump(struct drm_printer *m, const void *buf, size_t len) 1133{ 1134 const size_t rowsize = 8 * sizeof(u32); 1135 const void *prev = NULL; 1136 bool skip = false; 1137 size_t pos; 1138 1139 for (pos = 0; pos < len; pos += rowsize) { 1140 char line[128]; 1141 1142 if (prev && !memcmp(prev, buf + pos, rowsize)) { 1143 if (!skip) { 1144 drm_printf(m, "*\n"); 1145 skip = true; 1146 } 1147 continue; 1148 } 1149 1150 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos, 1151 rowsize, sizeof(u32), 1152 line, sizeof(line), 1153 false) >= sizeof(line)); 1154 drm_printf(m, "%08zx %s\n", pos, line); 1155 1156 prev = buf + pos; 1157 skip = false; 1158 } 1159} 1160 1161static void intel_engine_print_registers(const struct intel_engine_cs *engine, 1162 struct drm_printer *m) 1163{ 1164 struct drm_i915_private *dev_priv = engine->i915; 1165 const struct intel_engine_execlists * const execlists = 1166 &engine->execlists; 1167 u64 addr; 1168 1169 drm_printf(m, "\tRING_START: 0x%08x\n", 1170 I915_READ(RING_START(engine->mmio_base))); 1171 drm_printf(m, "\tRING_HEAD: 0x%08x\n", 1172 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR); 1173 drm_printf(m, "\tRING_TAIL: 0x%08x\n", 1174 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR); 1175 drm_printf(m, "\tRING_CTL: 0x%08x%s\n", 1176 I915_READ(RING_CTL(engine->mmio_base)), 1177 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); 1178 if (INTEL_GEN(engine->i915) > 2) { 1179 drm_printf(m, "\tRING_MODE: 0x%08x%s\n", 1180 I915_READ(RING_MI_MODE(engine->mmio_base)), 1181 I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : ""); 1182 } 1183 1184 if (INTEL_GEN(dev_priv) >= 6) { 1185 drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine)); 1186 } 1187 1188 if (HAS_LEGACY_SEMAPHORES(dev_priv)) { 1189 drm_printf(m, "\tSYNC_0: 0x%08x\n", 1190 I915_READ(RING_SYNC_0(engine->mmio_base))); 1191 drm_printf(m, "\tSYNC_1: 0x%08x\n", 1192 I915_READ(RING_SYNC_1(engine->mmio_base))); 1193 if (HAS_VEBOX(dev_priv)) 1194 drm_printf(m, "\tSYNC_2: 0x%08x\n", 1195 I915_READ(RING_SYNC_2(engine->mmio_base))); 1196 } 1197 1198 addr = intel_engine_get_active_head(engine); 1199 drm_printf(m, "\tACTHD: 0x%08x_%08x\n", 1200 upper_32_bits(addr), lower_32_bits(addr)); 1201 addr = intel_engine_get_last_batch_head(engine); 1202 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n", 1203 upper_32_bits(addr), lower_32_bits(addr)); 1204 if (INTEL_GEN(dev_priv) >= 8) 1205 addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base), 1206 RING_DMA_FADD_UDW(engine->mmio_base)); 1207 else if (INTEL_GEN(dev_priv) >= 4) 1208 addr = I915_READ(RING_DMA_FADD(engine->mmio_base)); 1209 else 1210 addr = I915_READ(DMA_FADD_I8XX); 1211 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n", 1212 upper_32_bits(addr), lower_32_bits(addr)); 1213 if (INTEL_GEN(dev_priv) >= 4) { 1214 drm_printf(m, "\tIPEIR: 0x%08x\n", 1215 I915_READ(RING_IPEIR(engine->mmio_base))); 1216 drm_printf(m, "\tIPEHR: 0x%08x\n", 1217 I915_READ(RING_IPEHR(engine->mmio_base))); 1218 } else { 1219 drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR)); 1220 drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR)); 1221 } 1222 1223 if (HAS_EXECLISTS(dev_priv)) { 1224 const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; 1225 u32 ptr, read, write; 1226 unsigned int idx; 1227 1228 drm_printf(m, "\tExeclist status: 0x%08x %08x\n", 1229 I915_READ(RING_EXECLIST_STATUS_LO(engine)), 1230 I915_READ(RING_EXECLIST_STATUS_HI(engine))); 1231 1232 ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); 1233 read = GEN8_CSB_READ_PTR(ptr); 1234 write = GEN8_CSB_WRITE_PTR(ptr); 1235 drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s, tasklet queued? %s (%s)\n", 1236 read, execlists->csb_head, 1237 write, 1238 intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), 1239 yesno(test_bit(ENGINE_IRQ_EXECLIST, 1240 &engine->irq_posted)), 1241 yesno(test_bit(TASKLET_STATE_SCHED, 1242 &engine->execlists.tasklet.state)), 1243 enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); 1244 if (read >= GEN8_CSB_ENTRIES) 1245 read = 0; 1246 if (write >= GEN8_CSB_ENTRIES) 1247 write = 0; 1248 if (read > write) 1249 write += GEN8_CSB_ENTRIES; 1250 while (read < write) { 1251 idx = ++read % GEN8_CSB_ENTRIES; 1252 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n", 1253 idx, 1254 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), 1255 hws[idx * 2], 1256 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)), 1257 hws[idx * 2 + 1]); 1258 } 1259 1260 rcu_read_lock(); 1261 for (idx = 0; idx < execlists_num_ports(execlists); idx++) { 1262 struct i915_request *rq; 1263 unsigned int count; 1264 1265 rq = port_unpack(&execlists->port[idx], &count); 1266 if (rq) { 1267 char hdr[80]; 1268 1269 snprintf(hdr, sizeof(hdr), 1270 "\t\tELSP[%d] count=%d, ring->start=%08x, rq: ", 1271 idx, count, 1272 i915_ggtt_offset(rq->ring->vma)); 1273 print_request(m, rq, hdr); 1274 } else { 1275 drm_printf(m, "\t\tELSP[%d] idle\n", idx); 1276 } 1277 } 1278 drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active); 1279 rcu_read_unlock(); 1280 } else if (INTEL_GEN(dev_priv) > 6) { 1281 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", 1282 I915_READ(RING_PP_DIR_BASE(engine))); 1283 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", 1284 I915_READ(RING_PP_DIR_BASE_READ(engine))); 1285 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", 1286 I915_READ(RING_PP_DIR_DCLV(engine))); 1287 } 1288} 1289 1290void intel_engine_dump(struct intel_engine_cs *engine, 1291 struct drm_printer *m, 1292 const char *header, ...) 1293{ 1294 const int MAX_REQUESTS_TO_SHOW = 8; 1295 struct intel_breadcrumbs * const b = &engine->breadcrumbs; 1296 const struct intel_engine_execlists * const execlists = &engine->execlists; 1297 struct i915_gpu_error * const error = &engine->i915->gpu_error; 1298 struct i915_request *rq, *last; 1299 struct rb_node *rb; 1300 int count; 1301 1302 if (header) { 1303 va_list ap; 1304 1305 va_start(ap, header); 1306 drm_vprintf(m, header, &ap); 1307 va_end(ap); 1308 } 1309 1310 if (i915_terminally_wedged(&engine->i915->gpu_error)) 1311 drm_printf(m, "*** WEDGED ***\n"); 1312 1313 drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n", 1314 intel_engine_get_seqno(engine), 1315 intel_engine_last_submit(engine), 1316 engine->hangcheck.seqno, 1317 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp)); 1318 drm_printf(m, "\tReset count: %d (global %d)\n", 1319 i915_reset_engine_count(error, engine), 1320 i915_reset_count(error)); 1321 1322 rcu_read_lock(); 1323 1324 drm_printf(m, "\tRequests:\n"); 1325 1326 rq = list_first_entry(&engine->timeline.requests, 1327 struct i915_request, link); 1328 if (&rq->link != &engine->timeline.requests) 1329 print_request(m, rq, "\t\tfirst "); 1330 1331 rq = list_last_entry(&engine->timeline.requests, 1332 struct i915_request, link); 1333 if (&rq->link != &engine->timeline.requests) 1334 print_request(m, rq, "\t\tlast "); 1335 1336 rq = i915_gem_find_active_request(engine); 1337 if (rq) { 1338 print_request(m, rq, "\t\tactive "); 1339 drm_printf(m, 1340 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n", 1341 rq->head, rq->postfix, rq->tail, 1342 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, 1343 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); 1344 drm_printf(m, "\t\tring->start: 0x%08x\n", 1345 i915_ggtt_offset(rq->ring->vma)); 1346 drm_printf(m, "\t\tring->head: 0x%08x\n", 1347 rq->ring->head); 1348 drm_printf(m, "\t\tring->tail: 0x%08x\n", 1349 rq->ring->tail); 1350 drm_printf(m, "\t\tring->emit: 0x%08x\n", 1351 rq->ring->emit); 1352 drm_printf(m, "\t\tring->space: 0x%08x\n", 1353 rq->ring->space); 1354 } 1355 1356 rcu_read_unlock(); 1357 1358 if (intel_runtime_pm_get_if_in_use(engine->i915)) { 1359 intel_engine_print_registers(engine, m); 1360 intel_runtime_pm_put(engine->i915); 1361 } else { 1362 drm_printf(m, "\tDevice is asleep; skipping register dump\n"); 1363 } 1364 1365 spin_lock_irq(&engine->timeline.lock); 1366 1367 last = NULL; 1368 count = 0; 1369 list_for_each_entry(rq, &engine->timeline.requests, link) { 1370 if (count++ < MAX_REQUESTS_TO_SHOW - 1) 1371 print_request(m, rq, "\t\tE "); 1372 else 1373 last = rq; 1374 } 1375 if (last) { 1376 if (count > MAX_REQUESTS_TO_SHOW) { 1377 drm_printf(m, 1378 "\t\t...skipping %d executing requests...\n", 1379 count - MAX_REQUESTS_TO_SHOW); 1380 } 1381 print_request(m, last, "\t\tE "); 1382 } 1383 1384 last = NULL; 1385 count = 0; 1386 drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); 1387 for (rb = execlists->first; rb; rb = rb_next(rb)) { 1388 struct i915_priolist *p = 1389 rb_entry(rb, typeof(*p), node); 1390 1391 list_for_each_entry(rq, &p->requests, sched.link) { 1392 if (count++ < MAX_REQUESTS_TO_SHOW - 1) 1393 print_request(m, rq, "\t\tQ "); 1394 else 1395 last = rq; 1396 } 1397 } 1398 if (last) { 1399 if (count > MAX_REQUESTS_TO_SHOW) { 1400 drm_printf(m, 1401 "\t\t...skipping %d queued requests...\n", 1402 count - MAX_REQUESTS_TO_SHOW); 1403 } 1404 print_request(m, last, "\t\tQ "); 1405 } 1406 1407 spin_unlock_irq(&engine->timeline.lock); 1408 1409 spin_lock_irq(&b->rb_lock); 1410 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 1411 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 1412 1413 drm_printf(m, "\t%s [%d] waiting for %x\n", 1414 w->tsk->comm, w->tsk->pid, w->seqno); 1415 } 1416 spin_unlock_irq(&b->rb_lock); 1417 1418 drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n", 1419 engine->irq_posted, 1420 yesno(test_bit(ENGINE_IRQ_BREADCRUMB, 1421 &engine->irq_posted)), 1422 yesno(test_bit(ENGINE_IRQ_EXECLIST, 1423 &engine->irq_posted))); 1424 1425 drm_printf(m, "HWSP:\n"); 1426 hexdump(m, engine->status_page.page_addr, PAGE_SIZE); 1427 1428 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine))); 1429} 1430 1431static u8 user_class_map[] = { 1432 [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS, 1433 [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS, 1434 [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS, 1435 [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS, 1436}; 1437 1438struct intel_engine_cs * 1439intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) 1440{ 1441 if (class >= ARRAY_SIZE(user_class_map)) 1442 return NULL; 1443 1444 class = user_class_map[class]; 1445 1446 GEM_BUG_ON(class > MAX_ENGINE_CLASS); 1447 1448 if (instance > MAX_ENGINE_INSTANCE) 1449 return NULL; 1450 1451 return i915->engine_class[class][instance]; 1452} 1453 1454/** 1455 * intel_enable_engine_stats() - Enable engine busy tracking on engine 1456 * @engine: engine to enable stats collection 1457 * 1458 * Start collecting the engine busyness data for @engine. 1459 * 1460 * Returns 0 on success or a negative error code. 1461 */ 1462int intel_enable_engine_stats(struct intel_engine_cs *engine) 1463{ 1464 struct intel_engine_execlists *execlists = &engine->execlists; 1465 unsigned long flags; 1466 int err = 0; 1467 1468 if (!intel_engine_supports_stats(engine)) 1469 return -ENODEV; 1470 1471 tasklet_disable(&execlists->tasklet); 1472 write_seqlock_irqsave(&engine->stats.lock, flags); 1473 1474 if (unlikely(engine->stats.enabled == ~0)) { 1475 err = -EBUSY; 1476 goto unlock; 1477 } 1478 1479 if (engine->stats.enabled++ == 0) { 1480 const struct execlist_port *port = execlists->port; 1481 unsigned int num_ports = execlists_num_ports(execlists); 1482 1483 engine->stats.enabled_at = ktime_get(); 1484 1485 /* XXX submission method oblivious? */ 1486 while (num_ports-- && port_isset(port)) { 1487 engine->stats.active++; 1488 port++; 1489 } 1490 1491 if (engine->stats.active) 1492 engine->stats.start = engine->stats.enabled_at; 1493 } 1494 1495unlock: 1496 write_sequnlock_irqrestore(&engine->stats.lock, flags); 1497 tasklet_enable(&execlists->tasklet); 1498 1499 return err; 1500} 1501 1502static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) 1503{ 1504 ktime_t total = engine->stats.total; 1505 1506 /* 1507 * If the engine is executing something at the moment 1508 * add it to the total. 1509 */ 1510 if (engine->stats.active) 1511 total = ktime_add(total, 1512 ktime_sub(ktime_get(), engine->stats.start)); 1513 1514 return total; 1515} 1516 1517/** 1518 * intel_engine_get_busy_time() - Return current accumulated engine busyness 1519 * @engine: engine to report on 1520 * 1521 * Returns accumulated time @engine was busy since engine stats were enabled. 1522 */ 1523ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine) 1524{ 1525 unsigned int seq; 1526 ktime_t total; 1527 1528 do { 1529 seq = read_seqbegin(&engine->stats.lock); 1530 total = __intel_engine_get_busy_time(engine); 1531 } while (read_seqretry(&engine->stats.lock, seq)); 1532 1533 return total; 1534} 1535 1536/** 1537 * intel_disable_engine_stats() - Disable engine busy tracking on engine 1538 * @engine: engine to disable stats collection 1539 * 1540 * Stops collecting the engine busyness data for @engine. 1541 */ 1542void intel_disable_engine_stats(struct intel_engine_cs *engine) 1543{ 1544 unsigned long flags; 1545 1546 if (!intel_engine_supports_stats(engine)) 1547 return; 1548 1549 write_seqlock_irqsave(&engine->stats.lock, flags); 1550 WARN_ON_ONCE(engine->stats.enabled == 0); 1551 if (--engine->stats.enabled == 0) { 1552 engine->stats.total = __intel_engine_get_busy_time(engine); 1553 engine->stats.active = 0; 1554 } 1555 write_sequnlock_irqrestore(&engine->stats.lock, flags); 1556} 1557 1558#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1559#include "selftests/mock_engine.c" 1560#include "selftests/intel_engine_cs.c" 1561#endif