Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.19 1717 lines 46 kB view raw
1/* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25#include <drm/drm_print.h> 26 27#include "i915_drv.h" 28#include "intel_ringbuffer.h" 29#include "intel_lrc.h" 30 31/* Haswell does have the CXT_SIZE register however it does not appear to be 32 * valid. Now, docs explain in dwords what is in the context object. The full 33 * size is 70720 bytes, however, the power context and execlist context will 34 * never be saved (power context is stored elsewhere, and execlists don't work 35 * on HSW) - so the final size, including the extra state required for the 36 * Resource Streamer, is 66944 bytes, which rounds to 17 pages. 37 */ 38#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) 39 40#define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 41#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) 42#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 43#define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE) 44#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE) 45 46#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE) 47 48struct engine_class_info { 49 const char *name; 50 int (*init_legacy)(struct intel_engine_cs *engine); 51 int (*init_execlists)(struct intel_engine_cs *engine); 52 53 u8 uabi_class; 54}; 55 56static const struct engine_class_info intel_engine_classes[] = { 57 [RENDER_CLASS] = { 58 .name = "rcs", 59 .init_execlists = logical_render_ring_init, 60 .init_legacy = intel_init_render_ring_buffer, 61 .uabi_class = I915_ENGINE_CLASS_RENDER, 62 }, 63 [COPY_ENGINE_CLASS] = { 64 .name = "bcs", 65 .init_execlists = logical_xcs_ring_init, 66 .init_legacy = intel_init_blt_ring_buffer, 67 .uabi_class = I915_ENGINE_CLASS_COPY, 68 }, 69 [VIDEO_DECODE_CLASS] = { 70 .name = "vcs", 71 .init_execlists = logical_xcs_ring_init, 72 .init_legacy = intel_init_bsd_ring_buffer, 73 .uabi_class = I915_ENGINE_CLASS_VIDEO, 74 }, 75 [VIDEO_ENHANCEMENT_CLASS] = { 76 .name = "vecs", 77 .init_execlists = logical_xcs_ring_init, 78 .init_legacy = intel_init_vebox_ring_buffer, 79 .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE, 80 }, 81}; 82 83#define MAX_MMIO_BASES 3 84struct engine_info { 85 unsigned int hw_id; 86 unsigned int uabi_id; 87 u8 class; 88 u8 instance; 89 /* mmio bases table *must* be sorted in reverse gen order */ 90 struct engine_mmio_base { 91 u32 gen : 8; 92 u32 base : 24; 93 } mmio_bases[MAX_MMIO_BASES]; 94}; 95 96static const struct engine_info intel_engines[] = { 97 [RCS] = { 98 .hw_id = RCS_HW, 99 .uabi_id = I915_EXEC_RENDER, 100 .class = RENDER_CLASS, 101 .instance = 0, 102 .mmio_bases = { 103 { .gen = 1, .base = RENDER_RING_BASE } 104 }, 105 }, 106 [BCS] = { 107 .hw_id = BCS_HW, 108 .uabi_id = I915_EXEC_BLT, 109 .class = COPY_ENGINE_CLASS, 110 .instance = 0, 111 .mmio_bases = { 112 { .gen = 6, .base = BLT_RING_BASE } 113 }, 114 }, 115 [VCS] = { 116 .hw_id = VCS_HW, 117 .uabi_id = I915_EXEC_BSD, 118 .class = VIDEO_DECODE_CLASS, 119 .instance = 0, 120 .mmio_bases = { 121 { .gen = 11, .base = GEN11_BSD_RING_BASE }, 122 { .gen = 6, .base = GEN6_BSD_RING_BASE }, 123 { .gen = 4, .base = BSD_RING_BASE } 124 }, 125 }, 126 [VCS2] = { 127 .hw_id = VCS2_HW, 128 .uabi_id = I915_EXEC_BSD, 129 .class = VIDEO_DECODE_CLASS, 130 .instance = 1, 131 .mmio_bases = { 132 { .gen = 11, .base = GEN11_BSD2_RING_BASE }, 133 { .gen = 8, .base = GEN8_BSD2_RING_BASE } 134 }, 135 }, 136 [VCS3] = { 137 .hw_id = VCS3_HW, 138 .uabi_id = I915_EXEC_BSD, 139 .class = VIDEO_DECODE_CLASS, 140 .instance = 2, 141 .mmio_bases = { 142 { .gen = 11, .base = GEN11_BSD3_RING_BASE } 143 }, 144 }, 145 [VCS4] = { 146 .hw_id = VCS4_HW, 147 .uabi_id = I915_EXEC_BSD, 148 .class = VIDEO_DECODE_CLASS, 149 .instance = 3, 150 .mmio_bases = { 151 { .gen = 11, .base = GEN11_BSD4_RING_BASE } 152 }, 153 }, 154 [VECS] = { 155 .hw_id = VECS_HW, 156 .uabi_id = I915_EXEC_VEBOX, 157 .class = VIDEO_ENHANCEMENT_CLASS, 158 .instance = 0, 159 .mmio_bases = { 160 { .gen = 11, .base = GEN11_VEBOX_RING_BASE }, 161 { .gen = 7, .base = VEBOX_RING_BASE } 162 }, 163 }, 164 [VECS2] = { 165 .hw_id = VECS2_HW, 166 .uabi_id = I915_EXEC_VEBOX, 167 .class = VIDEO_ENHANCEMENT_CLASS, 168 .instance = 1, 169 .mmio_bases = { 170 { .gen = 11, .base = GEN11_VEBOX2_RING_BASE } 171 }, 172 }, 173}; 174 175/** 176 * ___intel_engine_context_size() - return the size of the context for an engine 177 * @dev_priv: i915 device private 178 * @class: engine class 179 * 180 * Each engine class may require a different amount of space for a context 181 * image. 182 * 183 * Return: size (in bytes) of an engine class specific context image 184 * 185 * Note: this size includes the HWSP, which is part of the context image 186 * in LRC mode, but does not include the "shared data page" used with 187 * GuC submission. The caller should account for this if using the GuC. 188 */ 189static u32 190__intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) 191{ 192 u32 cxt_size; 193 194 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE); 195 196 switch (class) { 197 case RENDER_CLASS: 198 switch (INTEL_GEN(dev_priv)) { 199 default: 200 MISSING_CASE(INTEL_GEN(dev_priv)); 201 return DEFAULT_LR_CONTEXT_RENDER_SIZE; 202 case 11: 203 return GEN11_LR_CONTEXT_RENDER_SIZE; 204 case 10: 205 return GEN10_LR_CONTEXT_RENDER_SIZE; 206 case 9: 207 return GEN9_LR_CONTEXT_RENDER_SIZE; 208 case 8: 209 return GEN8_LR_CONTEXT_RENDER_SIZE; 210 case 7: 211 if (IS_HASWELL(dev_priv)) 212 return HSW_CXT_TOTAL_SIZE; 213 214 cxt_size = I915_READ(GEN7_CXT_SIZE); 215 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64, 216 PAGE_SIZE); 217 case 6: 218 cxt_size = I915_READ(CXT_SIZE); 219 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64, 220 PAGE_SIZE); 221 case 5: 222 case 4: 223 case 3: 224 case 2: 225 /* For the special day when i810 gets merged. */ 226 case 1: 227 return 0; 228 } 229 break; 230 default: 231 MISSING_CASE(class); 232 /* fall through */ 233 case VIDEO_DECODE_CLASS: 234 case VIDEO_ENHANCEMENT_CLASS: 235 case COPY_ENGINE_CLASS: 236 if (INTEL_GEN(dev_priv) < 8) 237 return 0; 238 return GEN8_LR_CONTEXT_OTHER_SIZE; 239 } 240} 241 242static u32 __engine_mmio_base(struct drm_i915_private *i915, 243 const struct engine_mmio_base *bases) 244{ 245 int i; 246 247 for (i = 0; i < MAX_MMIO_BASES; i++) 248 if (INTEL_GEN(i915) >= bases[i].gen) 249 break; 250 251 GEM_BUG_ON(i == MAX_MMIO_BASES); 252 GEM_BUG_ON(!bases[i].base); 253 254 return bases[i].base; 255} 256 257static void __sprint_engine_name(char *name, const struct engine_info *info) 258{ 259 WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u", 260 intel_engine_classes[info->class].name, 261 info->instance) >= INTEL_ENGINE_CS_MAX_NAME); 262} 263 264static int 265intel_engine_setup(struct drm_i915_private *dev_priv, 266 enum intel_engine_id id) 267{ 268 const struct engine_info *info = &intel_engines[id]; 269 struct intel_engine_cs *engine; 270 271 GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes)); 272 273 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); 274 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); 275 276 if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS)) 277 return -EINVAL; 278 279 if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) 280 return -EINVAL; 281 282 if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance])) 283 return -EINVAL; 284 285 GEM_BUG_ON(dev_priv->engine[id]); 286 engine = kzalloc(sizeof(*engine), GFP_KERNEL); 287 if (!engine) 288 return -ENOMEM; 289 290 engine->id = id; 291 engine->i915 = dev_priv; 292 __sprint_engine_name(engine->name, info); 293 engine->hw_id = engine->guc_id = info->hw_id; 294 engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases); 295 engine->class = info->class; 296 engine->instance = info->instance; 297 298 engine->uabi_id = info->uabi_id; 299 engine->uabi_class = intel_engine_classes[info->class].uabi_class; 300 301 engine->context_size = __intel_engine_context_size(dev_priv, 302 engine->class); 303 if (WARN_ON(engine->context_size > BIT(20))) 304 engine->context_size = 0; 305 if (engine->context_size) 306 DRIVER_CAPS(dev_priv)->has_logical_contexts = true; 307 308 /* Nothing to do here, execute in order of dependencies */ 309 engine->schedule = NULL; 310 311 seqlock_init(&engine->stats.lock); 312 313 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); 314 315 dev_priv->engine_class[info->class][info->instance] = engine; 316 dev_priv->engine[id] = engine; 317 return 0; 318} 319 320/** 321 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers 322 * @dev_priv: i915 device private 323 * 324 * Return: non-zero if the initialization failed. 325 */ 326int intel_engines_init_mmio(struct drm_i915_private *dev_priv) 327{ 328 struct intel_device_info *device_info = mkwrite_device_info(dev_priv); 329 const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask; 330 struct intel_engine_cs *engine; 331 enum intel_engine_id id; 332 unsigned int mask = 0; 333 unsigned int i; 334 int err; 335 336 WARN_ON(ring_mask == 0); 337 WARN_ON(ring_mask & 338 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES)); 339 340 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { 341 if (!HAS_ENGINE(dev_priv, i)) 342 continue; 343 344 err = intel_engine_setup(dev_priv, i); 345 if (err) 346 goto cleanup; 347 348 mask |= ENGINE_MASK(i); 349 } 350 351 /* 352 * Catch failures to update intel_engines table when the new engines 353 * are added to the driver by a warning and disabling the forgotten 354 * engines. 355 */ 356 if (WARN_ON(mask != ring_mask)) 357 device_info->ring_mask = mask; 358 359 /* We always presume we have at least RCS available for later probing */ 360 if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) { 361 err = -ENODEV; 362 goto cleanup; 363 } 364 365 device_info->num_rings = hweight32(mask); 366 367 i915_check_and_clear_faults(dev_priv); 368 369 return 0; 370 371cleanup: 372 for_each_engine(engine, dev_priv, id) 373 kfree(engine); 374 return err; 375} 376 377/** 378 * intel_engines_init() - init the Engine Command Streamers 379 * @dev_priv: i915 device private 380 * 381 * Return: non-zero if the initialization failed. 382 */ 383int intel_engines_init(struct drm_i915_private *dev_priv) 384{ 385 struct intel_engine_cs *engine; 386 enum intel_engine_id id, err_id; 387 int err; 388 389 for_each_engine(engine, dev_priv, id) { 390 const struct engine_class_info *class_info = 391 &intel_engine_classes[engine->class]; 392 int (*init)(struct intel_engine_cs *engine); 393 394 if (HAS_EXECLISTS(dev_priv)) 395 init = class_info->init_execlists; 396 else 397 init = class_info->init_legacy; 398 399 err = -EINVAL; 400 err_id = id; 401 402 if (GEM_WARN_ON(!init)) 403 goto cleanup; 404 405 err = init(engine); 406 if (err) 407 goto cleanup; 408 409 GEM_BUG_ON(!engine->submit_request); 410 } 411 412 return 0; 413 414cleanup: 415 for_each_engine(engine, dev_priv, id) { 416 if (id >= err_id) { 417 kfree(engine); 418 dev_priv->engine[id] = NULL; 419 } else { 420 dev_priv->gt.cleanup_engine(engine); 421 } 422 } 423 return err; 424} 425 426void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno) 427{ 428 struct drm_i915_private *dev_priv = engine->i915; 429 430 /* Our semaphore implementation is strictly monotonic (i.e. we proceed 431 * so long as the semaphore value in the register/page is greater 432 * than the sync value), so whenever we reset the seqno, 433 * so long as we reset the tracking semaphore value to 0, it will 434 * always be before the next request's seqno. If we don't reset 435 * the semaphore value, then when the seqno moves backwards all 436 * future waits will complete instantly (causing rendering corruption). 437 */ 438 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { 439 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); 440 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); 441 if (HAS_VEBOX(dev_priv)) 442 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); 443 } 444 445 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); 446 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); 447 448 /* After manually advancing the seqno, fake the interrupt in case 449 * there are any waiters for that seqno. 450 */ 451 intel_engine_wakeup(engine); 452 453 GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno); 454} 455 456static void intel_engine_init_batch_pool(struct intel_engine_cs *engine) 457{ 458 i915_gem_batch_pool_init(&engine->batch_pool, engine); 459} 460 461static void intel_engine_init_execlist(struct intel_engine_cs *engine) 462{ 463 struct intel_engine_execlists * const execlists = &engine->execlists; 464 465 execlists->port_mask = 1; 466 BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); 467 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); 468 469 execlists->queue_priority = INT_MIN; 470 execlists->queue = RB_ROOT_CACHED; 471} 472 473/** 474 * intel_engines_setup_common - setup engine state not requiring hw access 475 * @engine: Engine to setup. 476 * 477 * Initializes @engine@ structure members shared between legacy and execlists 478 * submission modes which do not require hardware access. 479 * 480 * Typically done early in the submission mode specific engine setup stage. 481 */ 482void intel_engine_setup_common(struct intel_engine_cs *engine) 483{ 484 i915_timeline_init(engine->i915, &engine->timeline, engine->name); 485 lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE); 486 487 intel_engine_init_execlist(engine); 488 intel_engine_init_hangcheck(engine); 489 intel_engine_init_batch_pool(engine); 490 intel_engine_init_cmd_parser(engine); 491} 492 493int intel_engine_create_scratch(struct intel_engine_cs *engine, 494 unsigned int size) 495{ 496 struct drm_i915_gem_object *obj; 497 struct i915_vma *vma; 498 int ret; 499 500 WARN_ON(engine->scratch); 501 502 obj = i915_gem_object_create_stolen(engine->i915, size); 503 if (!obj) 504 obj = i915_gem_object_create_internal(engine->i915, size); 505 if (IS_ERR(obj)) { 506 DRM_ERROR("Failed to allocate scratch page\n"); 507 return PTR_ERR(obj); 508 } 509 510 vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); 511 if (IS_ERR(vma)) { 512 ret = PTR_ERR(vma); 513 goto err_unref; 514 } 515 516 ret = i915_vma_pin(vma, 0, 4096, PIN_GLOBAL | PIN_HIGH); 517 if (ret) 518 goto err_unref; 519 520 engine->scratch = vma; 521 return 0; 522 523err_unref: 524 i915_gem_object_put(obj); 525 return ret; 526} 527 528void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) 529{ 530 i915_vma_unpin_and_release(&engine->scratch); 531} 532 533static void cleanup_phys_status_page(struct intel_engine_cs *engine) 534{ 535 struct drm_i915_private *dev_priv = engine->i915; 536 537 if (!dev_priv->status_page_dmah) 538 return; 539 540 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); 541 engine->status_page.page_addr = NULL; 542} 543 544static void cleanup_status_page(struct intel_engine_cs *engine) 545{ 546 struct i915_vma *vma; 547 struct drm_i915_gem_object *obj; 548 549 vma = fetch_and_zero(&engine->status_page.vma); 550 if (!vma) 551 return; 552 553 obj = vma->obj; 554 555 i915_vma_unpin(vma); 556 i915_vma_close(vma); 557 558 i915_gem_object_unpin_map(obj); 559 __i915_gem_object_release_unless_active(obj); 560} 561 562static int init_status_page(struct intel_engine_cs *engine) 563{ 564 struct drm_i915_gem_object *obj; 565 struct i915_vma *vma; 566 unsigned int flags; 567 void *vaddr; 568 int ret; 569 570 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); 571 if (IS_ERR(obj)) { 572 DRM_ERROR("Failed to allocate status page\n"); 573 return PTR_ERR(obj); 574 } 575 576 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 577 if (ret) 578 goto err; 579 580 vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); 581 if (IS_ERR(vma)) { 582 ret = PTR_ERR(vma); 583 goto err; 584 } 585 586 flags = PIN_GLOBAL; 587 if (!HAS_LLC(engine->i915)) 588 /* On g33, we cannot place HWS above 256MiB, so 589 * restrict its pinning to the low mappable arena. 590 * Though this restriction is not documented for 591 * gen4, gen5, or byt, they also behave similarly 592 * and hang if the HWS is placed at the top of the 593 * GTT. To generalise, it appears that all !llc 594 * platforms have issues with us placing the HWS 595 * above the mappable region (even though we never 596 * actually map it). 597 */ 598 flags |= PIN_MAPPABLE; 599 else 600 flags |= PIN_HIGH; 601 ret = i915_vma_pin(vma, 0, 4096, flags); 602 if (ret) 603 goto err; 604 605 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 606 if (IS_ERR(vaddr)) { 607 ret = PTR_ERR(vaddr); 608 goto err_unpin; 609 } 610 611 engine->status_page.vma = vma; 612 engine->status_page.ggtt_offset = i915_ggtt_offset(vma); 613 engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE); 614 return 0; 615 616err_unpin: 617 i915_vma_unpin(vma); 618err: 619 i915_gem_object_put(obj); 620 return ret; 621} 622 623static int init_phys_status_page(struct intel_engine_cs *engine) 624{ 625 struct drm_i915_private *dev_priv = engine->i915; 626 627 GEM_BUG_ON(engine->id != RCS); 628 629 dev_priv->status_page_dmah = 630 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); 631 if (!dev_priv->status_page_dmah) 632 return -ENOMEM; 633 634 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 635 memset(engine->status_page.page_addr, 0, PAGE_SIZE); 636 637 return 0; 638} 639 640static void __intel_context_unpin(struct i915_gem_context *ctx, 641 struct intel_engine_cs *engine) 642{ 643 intel_context_unpin(to_intel_context(ctx, engine)); 644} 645 646/** 647 * intel_engines_init_common - initialize cengine state which might require hw access 648 * @engine: Engine to initialize. 649 * 650 * Initializes @engine@ structure members shared between legacy and execlists 651 * submission modes which do require hardware access. 652 * 653 * Typcally done at later stages of submission mode specific engine setup. 654 * 655 * Returns zero on success or an error code on failure. 656 */ 657int intel_engine_init_common(struct intel_engine_cs *engine) 658{ 659 struct drm_i915_private *i915 = engine->i915; 660 struct intel_context *ce; 661 int ret; 662 663 engine->set_default_submission(engine); 664 665 /* We may need to do things with the shrinker which 666 * require us to immediately switch back to the default 667 * context. This can cause a problem as pinning the 668 * default context also requires GTT space which may not 669 * be available. To avoid this we always pin the default 670 * context. 671 */ 672 ce = intel_context_pin(i915->kernel_context, engine); 673 if (IS_ERR(ce)) 674 return PTR_ERR(ce); 675 676 /* 677 * Similarly the preempt context must always be available so that 678 * we can interrupt the engine at any time. 679 */ 680 if (i915->preempt_context) { 681 ce = intel_context_pin(i915->preempt_context, engine); 682 if (IS_ERR(ce)) { 683 ret = PTR_ERR(ce); 684 goto err_unpin_kernel; 685 } 686 } 687 688 ret = intel_engine_init_breadcrumbs(engine); 689 if (ret) 690 goto err_unpin_preempt; 691 692 if (HWS_NEEDS_PHYSICAL(i915)) 693 ret = init_phys_status_page(engine); 694 else 695 ret = init_status_page(engine); 696 if (ret) 697 goto err_breadcrumbs; 698 699 return 0; 700 701err_breadcrumbs: 702 intel_engine_fini_breadcrumbs(engine); 703err_unpin_preempt: 704 if (i915->preempt_context) 705 __intel_context_unpin(i915->preempt_context, engine); 706 707err_unpin_kernel: 708 __intel_context_unpin(i915->kernel_context, engine); 709 return ret; 710} 711 712/** 713 * intel_engines_cleanup_common - cleans up the engine state created by 714 * the common initiailizers. 715 * @engine: Engine to cleanup. 716 * 717 * This cleans up everything created by the common helpers. 718 */ 719void intel_engine_cleanup_common(struct intel_engine_cs *engine) 720{ 721 struct drm_i915_private *i915 = engine->i915; 722 723 intel_engine_cleanup_scratch(engine); 724 725 if (HWS_NEEDS_PHYSICAL(engine->i915)) 726 cleanup_phys_status_page(engine); 727 else 728 cleanup_status_page(engine); 729 730 intel_engine_fini_breadcrumbs(engine); 731 intel_engine_cleanup_cmd_parser(engine); 732 i915_gem_batch_pool_fini(&engine->batch_pool); 733 734 if (engine->default_state) 735 i915_gem_object_put(engine->default_state); 736 737 if (i915->preempt_context) 738 __intel_context_unpin(i915->preempt_context, engine); 739 __intel_context_unpin(i915->kernel_context, engine); 740 741 i915_timeline_fini(&engine->timeline); 742} 743 744u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) 745{ 746 struct drm_i915_private *dev_priv = engine->i915; 747 u64 acthd; 748 749 if (INTEL_GEN(dev_priv) >= 8) 750 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), 751 RING_ACTHD_UDW(engine->mmio_base)); 752 else if (INTEL_GEN(dev_priv) >= 4) 753 acthd = I915_READ(RING_ACTHD(engine->mmio_base)); 754 else 755 acthd = I915_READ(ACTHD); 756 757 return acthd; 758} 759 760u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) 761{ 762 struct drm_i915_private *dev_priv = engine->i915; 763 u64 bbaddr; 764 765 if (INTEL_GEN(dev_priv) >= 8) 766 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base), 767 RING_BBADDR_UDW(engine->mmio_base)); 768 else 769 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); 770 771 return bbaddr; 772} 773 774int intel_engine_stop_cs(struct intel_engine_cs *engine) 775{ 776 struct drm_i915_private *dev_priv = engine->i915; 777 const u32 base = engine->mmio_base; 778 const i915_reg_t mode = RING_MI_MODE(base); 779 int err; 780 781 if (INTEL_GEN(dev_priv) < 3) 782 return -ENODEV; 783 784 GEM_TRACE("%s\n", engine->name); 785 786 I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING)); 787 788 err = 0; 789 if (__intel_wait_for_register_fw(dev_priv, 790 mode, MODE_IDLE, MODE_IDLE, 791 1000, 0, 792 NULL)) { 793 GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name); 794 err = -ETIMEDOUT; 795 } 796 797 /* A final mmio read to let GPU writes be hopefully flushed to memory */ 798 POSTING_READ_FW(mode); 799 800 return err; 801} 802 803const char *i915_cache_level_str(struct drm_i915_private *i915, int type) 804{ 805 switch (type) { 806 case I915_CACHE_NONE: return " uncached"; 807 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped"; 808 case I915_CACHE_L3_LLC: return " L3+LLC"; 809 case I915_CACHE_WT: return " WT"; 810 default: return ""; 811 } 812} 813 814u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) 815{ 816 const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu); 817 u32 mcr_s_ss_select; 818 u32 slice = fls(sseu->slice_mask); 819 u32 subslice = fls(sseu->subslice_mask[slice]); 820 821 if (INTEL_GEN(dev_priv) == 10) 822 mcr_s_ss_select = GEN8_MCR_SLICE(slice) | 823 GEN8_MCR_SUBSLICE(subslice); 824 else if (INTEL_GEN(dev_priv) >= 11) 825 mcr_s_ss_select = GEN11_MCR_SLICE(slice) | 826 GEN11_MCR_SUBSLICE(subslice); 827 else 828 mcr_s_ss_select = 0; 829 830 return mcr_s_ss_select; 831} 832 833static inline uint32_t 834read_subslice_reg(struct drm_i915_private *dev_priv, int slice, 835 int subslice, i915_reg_t reg) 836{ 837 uint32_t mcr_slice_subslice_mask; 838 uint32_t mcr_slice_subslice_select; 839 uint32_t default_mcr_s_ss_select; 840 uint32_t mcr; 841 uint32_t ret; 842 enum forcewake_domains fw_domains; 843 844 if (INTEL_GEN(dev_priv) >= 11) { 845 mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | 846 GEN11_MCR_SUBSLICE_MASK; 847 mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) | 848 GEN11_MCR_SUBSLICE(subslice); 849 } else { 850 mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK | 851 GEN8_MCR_SUBSLICE_MASK; 852 mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) | 853 GEN8_MCR_SUBSLICE(subslice); 854 } 855 856 default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv); 857 858 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, 859 FW_REG_READ); 860 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, 861 GEN8_MCR_SELECTOR, 862 FW_REG_READ | FW_REG_WRITE); 863 864 spin_lock_irq(&dev_priv->uncore.lock); 865 intel_uncore_forcewake_get__locked(dev_priv, fw_domains); 866 867 mcr = I915_READ_FW(GEN8_MCR_SELECTOR); 868 869 WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) != 870 default_mcr_s_ss_select); 871 872 mcr &= ~mcr_slice_subslice_mask; 873 mcr |= mcr_slice_subslice_select; 874 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); 875 876 ret = I915_READ_FW(reg); 877 878 mcr &= ~mcr_slice_subslice_mask; 879 mcr |= default_mcr_s_ss_select; 880 881 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); 882 883 intel_uncore_forcewake_put__locked(dev_priv, fw_domains); 884 spin_unlock_irq(&dev_priv->uncore.lock); 885 886 return ret; 887} 888 889/* NB: please notice the memset */ 890void intel_engine_get_instdone(struct intel_engine_cs *engine, 891 struct intel_instdone *instdone) 892{ 893 struct drm_i915_private *dev_priv = engine->i915; 894 u32 mmio_base = engine->mmio_base; 895 int slice; 896 int subslice; 897 898 memset(instdone, 0, sizeof(*instdone)); 899 900 switch (INTEL_GEN(dev_priv)) { 901 default: 902 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); 903 904 if (engine->id != RCS) 905 break; 906 907 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); 908 for_each_instdone_slice_subslice(dev_priv, slice, subslice) { 909 instdone->sampler[slice][subslice] = 910 read_subslice_reg(dev_priv, slice, subslice, 911 GEN7_SAMPLER_INSTDONE); 912 instdone->row[slice][subslice] = 913 read_subslice_reg(dev_priv, slice, subslice, 914 GEN7_ROW_INSTDONE); 915 } 916 break; 917 case 7: 918 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); 919 920 if (engine->id != RCS) 921 break; 922 923 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); 924 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE); 925 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE); 926 927 break; 928 case 6: 929 case 5: 930 case 4: 931 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); 932 933 if (engine->id == RCS) 934 /* HACK: Using the wrong struct member */ 935 instdone->slice_common = I915_READ(GEN4_INSTDONE1); 936 break; 937 case 3: 938 case 2: 939 instdone->instdone = I915_READ(GEN2_INSTDONE); 940 break; 941 } 942} 943 944static bool ring_is_idle(struct intel_engine_cs *engine) 945{ 946 struct drm_i915_private *dev_priv = engine->i915; 947 bool idle = true; 948 949 /* If the whole device is asleep, the engine must be idle */ 950 if (!intel_runtime_pm_get_if_in_use(dev_priv)) 951 return true; 952 953 /* First check that no commands are left in the ring */ 954 if ((I915_READ_HEAD(engine) & HEAD_ADDR) != 955 (I915_READ_TAIL(engine) & TAIL_ADDR)) 956 idle = false; 957 958 /* No bit for gen2, so assume the CS parser is idle */ 959 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) 960 idle = false; 961 962 intel_runtime_pm_put(dev_priv); 963 964 return idle; 965} 966 967/** 968 * intel_engine_is_idle() - Report if the engine has finished process all work 969 * @engine: the intel_engine_cs 970 * 971 * Return true if there are no requests pending, nothing left to be submitted 972 * to hardware, and that the engine is idle. 973 */ 974bool intel_engine_is_idle(struct intel_engine_cs *engine) 975{ 976 struct drm_i915_private *dev_priv = engine->i915; 977 978 /* More white lies, if wedged, hw state is inconsistent */ 979 if (i915_terminally_wedged(&dev_priv->gpu_error)) 980 return true; 981 982 /* Any inflight/incomplete requests? */ 983 if (!i915_seqno_passed(intel_engine_get_seqno(engine), 984 intel_engine_last_submit(engine))) 985 return false; 986 987 if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock)) 988 return true; 989 990 /* Waiting to drain ELSP? */ 991 if (READ_ONCE(engine->execlists.active)) { 992 struct tasklet_struct *t = &engine->execlists.tasklet; 993 994 local_bh_disable(); 995 if (tasklet_trylock(t)) { 996 /* Must wait for any GPU reset in progress. */ 997 if (__tasklet_is_enabled(t)) 998 t->func(t->data); 999 tasklet_unlock(t); 1000 } 1001 local_bh_enable(); 1002 1003 if (READ_ONCE(engine->execlists.active)) 1004 return false; 1005 } 1006 1007 /* ELSP is empty, but there are ready requests? E.g. after reset */ 1008 if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) 1009 return false; 1010 1011 /* Ring stopped? */ 1012 if (!ring_is_idle(engine)) 1013 return false; 1014 1015 return true; 1016} 1017 1018bool intel_engines_are_idle(struct drm_i915_private *dev_priv) 1019{ 1020 struct intel_engine_cs *engine; 1021 enum intel_engine_id id; 1022 1023 /* 1024 * If the driver is wedged, HW state may be very inconsistent and 1025 * report that it is still busy, even though we have stopped using it. 1026 */ 1027 if (i915_terminally_wedged(&dev_priv->gpu_error)) 1028 return true; 1029 1030 for_each_engine(engine, dev_priv, id) { 1031 if (!intel_engine_is_idle(engine)) 1032 return false; 1033 } 1034 1035 return true; 1036} 1037 1038/** 1039 * intel_engine_has_kernel_context: 1040 * @engine: the engine 1041 * 1042 * Returns true if the last context to be executed on this engine, or has been 1043 * executed if the engine is already idle, is the kernel context 1044 * (#i915.kernel_context). 1045 */ 1046bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine) 1047{ 1048 const struct intel_context *kernel_context = 1049 to_intel_context(engine->i915->kernel_context, engine); 1050 struct i915_request *rq; 1051 1052 lockdep_assert_held(&engine->i915->drm.struct_mutex); 1053 1054 /* 1055 * Check the last context seen by the engine. If active, it will be 1056 * the last request that remains in the timeline. When idle, it is 1057 * the last executed context as tracked by retirement. 1058 */ 1059 rq = __i915_gem_active_peek(&engine->timeline.last_request); 1060 if (rq) 1061 return rq->hw_context == kernel_context; 1062 else 1063 return engine->last_retired_context == kernel_context; 1064} 1065 1066void intel_engines_reset_default_submission(struct drm_i915_private *i915) 1067{ 1068 struct intel_engine_cs *engine; 1069 enum intel_engine_id id; 1070 1071 for_each_engine(engine, i915, id) 1072 engine->set_default_submission(engine); 1073} 1074 1075/** 1076 * intel_engines_sanitize: called after the GPU has lost power 1077 * @i915: the i915 device 1078 * 1079 * Anytime we reset the GPU, either with an explicit GPU reset or through a 1080 * PCI power cycle, the GPU loses state and we must reset our state tracking 1081 * to match. Note that calling intel_engines_sanitize() if the GPU has not 1082 * been reset results in much confusion! 1083 */ 1084void intel_engines_sanitize(struct drm_i915_private *i915) 1085{ 1086 struct intel_engine_cs *engine; 1087 enum intel_engine_id id; 1088 1089 GEM_TRACE("\n"); 1090 1091 for_each_engine(engine, i915, id) { 1092 if (engine->reset.reset) 1093 engine->reset.reset(engine, NULL); 1094 } 1095} 1096 1097/** 1098 * intel_engines_park: called when the GT is transitioning from busy->idle 1099 * @i915: the i915 device 1100 * 1101 * The GT is now idle and about to go to sleep (maybe never to wake again?). 1102 * Time for us to tidy and put away our toys (release resources back to the 1103 * system). 1104 */ 1105void intel_engines_park(struct drm_i915_private *i915) 1106{ 1107 struct intel_engine_cs *engine; 1108 enum intel_engine_id id; 1109 1110 for_each_engine(engine, i915, id) { 1111 /* Flush the residual irq tasklets first. */ 1112 intel_engine_disarm_breadcrumbs(engine); 1113 tasklet_kill(&engine->execlists.tasklet); 1114 1115 /* 1116 * We are committed now to parking the engines, make sure there 1117 * will be no more interrupts arriving later and the engines 1118 * are truly idle. 1119 */ 1120 if (wait_for(intel_engine_is_idle(engine), 10)) { 1121 struct drm_printer p = drm_debug_printer(__func__); 1122 1123 dev_err(i915->drm.dev, 1124 "%s is not idle before parking\n", 1125 engine->name); 1126 intel_engine_dump(engine, &p, NULL); 1127 } 1128 1129 /* Must be reset upon idling, or we may miss the busy wakeup. */ 1130 GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN); 1131 1132 if (engine->park) 1133 engine->park(engine); 1134 1135 if (engine->pinned_default_state) { 1136 i915_gem_object_unpin_map(engine->default_state); 1137 engine->pinned_default_state = NULL; 1138 } 1139 1140 i915_gem_batch_pool_fini(&engine->batch_pool); 1141 engine->execlists.no_priolist = false; 1142 } 1143} 1144 1145/** 1146 * intel_engines_unpark: called when the GT is transitioning from idle->busy 1147 * @i915: the i915 device 1148 * 1149 * The GT was idle and now about to fire up with some new user requests. 1150 */ 1151void intel_engines_unpark(struct drm_i915_private *i915) 1152{ 1153 struct intel_engine_cs *engine; 1154 enum intel_engine_id id; 1155 1156 for_each_engine(engine, i915, id) { 1157 void *map; 1158 1159 /* Pin the default state for fast resets from atomic context. */ 1160 map = NULL; 1161 if (engine->default_state) 1162 map = i915_gem_object_pin_map(engine->default_state, 1163 I915_MAP_WB); 1164 if (!IS_ERR_OR_NULL(map)) 1165 engine->pinned_default_state = map; 1166 1167 if (engine->unpark) 1168 engine->unpark(engine); 1169 1170 intel_engine_init_hangcheck(engine); 1171 } 1172} 1173 1174/** 1175 * intel_engine_lost_context: called when the GPU is reset into unknown state 1176 * @engine: the engine 1177 * 1178 * We have either reset the GPU or otherwise about to lose state tracking of 1179 * the current GPU logical state (e.g. suspend). On next use, it is therefore 1180 * imperative that we make no presumptions about the current state and load 1181 * from scratch. 1182 */ 1183void intel_engine_lost_context(struct intel_engine_cs *engine) 1184{ 1185 struct intel_context *ce; 1186 1187 lockdep_assert_held(&engine->i915->drm.struct_mutex); 1188 1189 ce = fetch_and_zero(&engine->last_retired_context); 1190 if (ce) 1191 intel_context_unpin(ce); 1192} 1193 1194bool intel_engine_can_store_dword(struct intel_engine_cs *engine) 1195{ 1196 switch (INTEL_GEN(engine->i915)) { 1197 case 2: 1198 return false; /* uses physical not virtual addresses */ 1199 case 3: 1200 /* maybe only uses physical not virtual addresses */ 1201 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); 1202 case 6: 1203 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ 1204 default: 1205 return true; 1206 } 1207} 1208 1209unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915) 1210{ 1211 struct intel_engine_cs *engine; 1212 enum intel_engine_id id; 1213 unsigned int which; 1214 1215 which = 0; 1216 for_each_engine(engine, i915, id) 1217 if (engine->default_state) 1218 which |= BIT(engine->uabi_class); 1219 1220 return which; 1221} 1222 1223static int print_sched_attr(struct drm_i915_private *i915, 1224 const struct i915_sched_attr *attr, 1225 char *buf, int x, int len) 1226{ 1227 if (attr->priority == I915_PRIORITY_INVALID) 1228 return x; 1229 1230 x += snprintf(buf + x, len - x, 1231 " prio=%d", attr->priority); 1232 1233 return x; 1234} 1235 1236static void print_request(struct drm_printer *m, 1237 struct i915_request *rq, 1238 const char *prefix) 1239{ 1240 const char *name = rq->fence.ops->get_timeline_name(&rq->fence); 1241 char buf[80] = ""; 1242 int x = 0; 1243 1244 x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf)); 1245 1246 drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n", 1247 prefix, 1248 rq->global_seqno, 1249 i915_request_completed(rq) ? "!" : "", 1250 rq->fence.context, rq->fence.seqno, 1251 buf, 1252 jiffies_to_msecs(jiffies - rq->emitted_jiffies), 1253 name); 1254} 1255 1256static void hexdump(struct drm_printer *m, const void *buf, size_t len) 1257{ 1258 const size_t rowsize = 8 * sizeof(u32); 1259 const void *prev = NULL; 1260 bool skip = false; 1261 size_t pos; 1262 1263 for (pos = 0; pos < len; pos += rowsize) { 1264 char line[128]; 1265 1266 if (prev && !memcmp(prev, buf + pos, rowsize)) { 1267 if (!skip) { 1268 drm_printf(m, "*\n"); 1269 skip = true; 1270 } 1271 continue; 1272 } 1273 1274 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos, 1275 rowsize, sizeof(u32), 1276 line, sizeof(line), 1277 false) >= sizeof(line)); 1278 drm_printf(m, "[%04zx] %s\n", pos, line); 1279 1280 prev = buf + pos; 1281 skip = false; 1282 } 1283} 1284 1285static void intel_engine_print_registers(const struct intel_engine_cs *engine, 1286 struct drm_printer *m) 1287{ 1288 struct drm_i915_private *dev_priv = engine->i915; 1289 const struct intel_engine_execlists * const execlists = 1290 &engine->execlists; 1291 u64 addr; 1292 1293 if (engine->id == RCS && IS_GEN(dev_priv, 4, 7)) 1294 drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID)); 1295 drm_printf(m, "\tRING_START: 0x%08x\n", 1296 I915_READ(RING_START(engine->mmio_base))); 1297 drm_printf(m, "\tRING_HEAD: 0x%08x\n", 1298 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR); 1299 drm_printf(m, "\tRING_TAIL: 0x%08x\n", 1300 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR); 1301 drm_printf(m, "\tRING_CTL: 0x%08x%s\n", 1302 I915_READ(RING_CTL(engine->mmio_base)), 1303 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); 1304 if (INTEL_GEN(engine->i915) > 2) { 1305 drm_printf(m, "\tRING_MODE: 0x%08x%s\n", 1306 I915_READ(RING_MI_MODE(engine->mmio_base)), 1307 I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : ""); 1308 } 1309 1310 if (INTEL_GEN(dev_priv) >= 6) { 1311 drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine)); 1312 } 1313 1314 if (HAS_LEGACY_SEMAPHORES(dev_priv)) { 1315 drm_printf(m, "\tSYNC_0: 0x%08x\n", 1316 I915_READ(RING_SYNC_0(engine->mmio_base))); 1317 drm_printf(m, "\tSYNC_1: 0x%08x\n", 1318 I915_READ(RING_SYNC_1(engine->mmio_base))); 1319 if (HAS_VEBOX(dev_priv)) 1320 drm_printf(m, "\tSYNC_2: 0x%08x\n", 1321 I915_READ(RING_SYNC_2(engine->mmio_base))); 1322 } 1323 1324 addr = intel_engine_get_active_head(engine); 1325 drm_printf(m, "\tACTHD: 0x%08x_%08x\n", 1326 upper_32_bits(addr), lower_32_bits(addr)); 1327 addr = intel_engine_get_last_batch_head(engine); 1328 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n", 1329 upper_32_bits(addr), lower_32_bits(addr)); 1330 if (INTEL_GEN(dev_priv) >= 8) 1331 addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base), 1332 RING_DMA_FADD_UDW(engine->mmio_base)); 1333 else if (INTEL_GEN(dev_priv) >= 4) 1334 addr = I915_READ(RING_DMA_FADD(engine->mmio_base)); 1335 else 1336 addr = I915_READ(DMA_FADD_I8XX); 1337 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n", 1338 upper_32_bits(addr), lower_32_bits(addr)); 1339 if (INTEL_GEN(dev_priv) >= 4) { 1340 drm_printf(m, "\tIPEIR: 0x%08x\n", 1341 I915_READ(RING_IPEIR(engine->mmio_base))); 1342 drm_printf(m, "\tIPEHR: 0x%08x\n", 1343 I915_READ(RING_IPEHR(engine->mmio_base))); 1344 } else { 1345 drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR)); 1346 drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR)); 1347 } 1348 1349 if (HAS_EXECLISTS(dev_priv)) { 1350 const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; 1351 u32 ptr, read, write; 1352 unsigned int idx; 1353 1354 drm_printf(m, "\tExeclist status: 0x%08x %08x\n", 1355 I915_READ(RING_EXECLIST_STATUS_LO(engine)), 1356 I915_READ(RING_EXECLIST_STATUS_HI(engine))); 1357 1358 ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); 1359 read = GEN8_CSB_READ_PTR(ptr); 1360 write = GEN8_CSB_WRITE_PTR(ptr); 1361 drm_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], tasklet queued? %s (%s)\n", 1362 read, execlists->csb_head, 1363 write, 1364 intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), 1365 yesno(test_bit(TASKLET_STATE_SCHED, 1366 &engine->execlists.tasklet.state)), 1367 enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); 1368 if (read >= GEN8_CSB_ENTRIES) 1369 read = 0; 1370 if (write >= GEN8_CSB_ENTRIES) 1371 write = 0; 1372 if (read > write) 1373 write += GEN8_CSB_ENTRIES; 1374 while (read < write) { 1375 idx = ++read % GEN8_CSB_ENTRIES; 1376 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n", 1377 idx, 1378 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), 1379 hws[idx * 2], 1380 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)), 1381 hws[idx * 2 + 1]); 1382 } 1383 1384 rcu_read_lock(); 1385 for (idx = 0; idx < execlists_num_ports(execlists); idx++) { 1386 struct i915_request *rq; 1387 unsigned int count; 1388 1389 rq = port_unpack(&execlists->port[idx], &count); 1390 if (rq) { 1391 char hdr[80]; 1392 1393 snprintf(hdr, sizeof(hdr), 1394 "\t\tELSP[%d] count=%d, ring->start=%08x, rq: ", 1395 idx, count, 1396 i915_ggtt_offset(rq->ring->vma)); 1397 print_request(m, rq, hdr); 1398 } else { 1399 drm_printf(m, "\t\tELSP[%d] idle\n", idx); 1400 } 1401 } 1402 drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active); 1403 rcu_read_unlock(); 1404 } else if (INTEL_GEN(dev_priv) > 6) { 1405 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", 1406 I915_READ(RING_PP_DIR_BASE(engine))); 1407 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", 1408 I915_READ(RING_PP_DIR_BASE_READ(engine))); 1409 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", 1410 I915_READ(RING_PP_DIR_DCLV(engine))); 1411 } 1412} 1413 1414static void print_request_ring(struct drm_printer *m, struct i915_request *rq) 1415{ 1416 void *ring; 1417 int size; 1418 1419 drm_printf(m, 1420 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n", 1421 rq->head, rq->postfix, rq->tail, 1422 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, 1423 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); 1424 1425 size = rq->tail - rq->head; 1426 if (rq->tail < rq->head) 1427 size += rq->ring->size; 1428 1429 ring = kmalloc(size, GFP_ATOMIC); 1430 if (ring) { 1431 const void *vaddr = rq->ring->vaddr; 1432 unsigned int head = rq->head; 1433 unsigned int len = 0; 1434 1435 if (rq->tail < head) { 1436 len = rq->ring->size - head; 1437 memcpy(ring, vaddr + head, len); 1438 head = 0; 1439 } 1440 memcpy(ring + len, vaddr + head, size - len); 1441 1442 hexdump(m, ring, size); 1443 kfree(ring); 1444 } 1445} 1446 1447void intel_engine_dump(struct intel_engine_cs *engine, 1448 struct drm_printer *m, 1449 const char *header, ...) 1450{ 1451 const int MAX_REQUESTS_TO_SHOW = 8; 1452 struct intel_breadcrumbs * const b = &engine->breadcrumbs; 1453 const struct intel_engine_execlists * const execlists = &engine->execlists; 1454 struct i915_gpu_error * const error = &engine->i915->gpu_error; 1455 struct i915_request *rq, *last; 1456 unsigned long flags; 1457 struct rb_node *rb; 1458 int count; 1459 1460 if (header) { 1461 va_list ap; 1462 1463 va_start(ap, header); 1464 drm_vprintf(m, header, &ap); 1465 va_end(ap); 1466 } 1467 1468 if (i915_terminally_wedged(&engine->i915->gpu_error)) 1469 drm_printf(m, "*** WEDGED ***\n"); 1470 1471 drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n", 1472 intel_engine_get_seqno(engine), 1473 intel_engine_last_submit(engine), 1474 engine->hangcheck.seqno, 1475 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp)); 1476 drm_printf(m, "\tReset count: %d (global %d)\n", 1477 i915_reset_engine_count(error, engine), 1478 i915_reset_count(error)); 1479 1480 rcu_read_lock(); 1481 1482 drm_printf(m, "\tRequests:\n"); 1483 1484 rq = list_first_entry(&engine->timeline.requests, 1485 struct i915_request, link); 1486 if (&rq->link != &engine->timeline.requests) 1487 print_request(m, rq, "\t\tfirst "); 1488 1489 rq = list_last_entry(&engine->timeline.requests, 1490 struct i915_request, link); 1491 if (&rq->link != &engine->timeline.requests) 1492 print_request(m, rq, "\t\tlast "); 1493 1494 rq = i915_gem_find_active_request(engine); 1495 if (rq) { 1496 print_request(m, rq, "\t\tactive "); 1497 1498 drm_printf(m, "\t\tring->start: 0x%08x\n", 1499 i915_ggtt_offset(rq->ring->vma)); 1500 drm_printf(m, "\t\tring->head: 0x%08x\n", 1501 rq->ring->head); 1502 drm_printf(m, "\t\tring->tail: 0x%08x\n", 1503 rq->ring->tail); 1504 drm_printf(m, "\t\tring->emit: 0x%08x\n", 1505 rq->ring->emit); 1506 drm_printf(m, "\t\tring->space: 0x%08x\n", 1507 rq->ring->space); 1508 1509 print_request_ring(m, rq); 1510 } 1511 1512 rcu_read_unlock(); 1513 1514 if (intel_runtime_pm_get_if_in_use(engine->i915)) { 1515 intel_engine_print_registers(engine, m); 1516 intel_runtime_pm_put(engine->i915); 1517 } else { 1518 drm_printf(m, "\tDevice is asleep; skipping register dump\n"); 1519 } 1520 1521 local_irq_save(flags); 1522 spin_lock(&engine->timeline.lock); 1523 1524 last = NULL; 1525 count = 0; 1526 list_for_each_entry(rq, &engine->timeline.requests, link) { 1527 if (count++ < MAX_REQUESTS_TO_SHOW - 1) 1528 print_request(m, rq, "\t\tE "); 1529 else 1530 last = rq; 1531 } 1532 if (last) { 1533 if (count > MAX_REQUESTS_TO_SHOW) { 1534 drm_printf(m, 1535 "\t\t...skipping %d executing requests...\n", 1536 count - MAX_REQUESTS_TO_SHOW); 1537 } 1538 print_request(m, last, "\t\tE "); 1539 } 1540 1541 last = NULL; 1542 count = 0; 1543 drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); 1544 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { 1545 struct i915_priolist *p = 1546 rb_entry(rb, typeof(*p), node); 1547 1548 list_for_each_entry(rq, &p->requests, sched.link) { 1549 if (count++ < MAX_REQUESTS_TO_SHOW - 1) 1550 print_request(m, rq, "\t\tQ "); 1551 else 1552 last = rq; 1553 } 1554 } 1555 if (last) { 1556 if (count > MAX_REQUESTS_TO_SHOW) { 1557 drm_printf(m, 1558 "\t\t...skipping %d queued requests...\n", 1559 count - MAX_REQUESTS_TO_SHOW); 1560 } 1561 print_request(m, last, "\t\tQ "); 1562 } 1563 1564 spin_unlock(&engine->timeline.lock); 1565 1566 spin_lock(&b->rb_lock); 1567 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 1568 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 1569 1570 drm_printf(m, "\t%s [%d] waiting for %x\n", 1571 w->tsk->comm, w->tsk->pid, w->seqno); 1572 } 1573 spin_unlock(&b->rb_lock); 1574 local_irq_restore(flags); 1575 1576 drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n", 1577 engine->irq_posted, 1578 yesno(test_bit(ENGINE_IRQ_BREADCRUMB, 1579 &engine->irq_posted))); 1580 1581 drm_printf(m, "HWSP:\n"); 1582 hexdump(m, engine->status_page.page_addr, PAGE_SIZE); 1583 1584 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine))); 1585} 1586 1587static u8 user_class_map[] = { 1588 [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS, 1589 [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS, 1590 [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS, 1591 [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS, 1592}; 1593 1594struct intel_engine_cs * 1595intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) 1596{ 1597 if (class >= ARRAY_SIZE(user_class_map)) 1598 return NULL; 1599 1600 class = user_class_map[class]; 1601 1602 GEM_BUG_ON(class > MAX_ENGINE_CLASS); 1603 1604 if (instance > MAX_ENGINE_INSTANCE) 1605 return NULL; 1606 1607 return i915->engine_class[class][instance]; 1608} 1609 1610/** 1611 * intel_enable_engine_stats() - Enable engine busy tracking on engine 1612 * @engine: engine to enable stats collection 1613 * 1614 * Start collecting the engine busyness data for @engine. 1615 * 1616 * Returns 0 on success or a negative error code. 1617 */ 1618int intel_enable_engine_stats(struct intel_engine_cs *engine) 1619{ 1620 struct intel_engine_execlists *execlists = &engine->execlists; 1621 unsigned long flags; 1622 int err = 0; 1623 1624 if (!intel_engine_supports_stats(engine)) 1625 return -ENODEV; 1626 1627 spin_lock_irqsave(&engine->timeline.lock, flags); 1628 write_seqlock(&engine->stats.lock); 1629 1630 if (unlikely(engine->stats.enabled == ~0)) { 1631 err = -EBUSY; 1632 goto unlock; 1633 } 1634 1635 if (engine->stats.enabled++ == 0) { 1636 const struct execlist_port *port = execlists->port; 1637 unsigned int num_ports = execlists_num_ports(execlists); 1638 1639 engine->stats.enabled_at = ktime_get(); 1640 1641 /* XXX submission method oblivious? */ 1642 while (num_ports-- && port_isset(port)) { 1643 engine->stats.active++; 1644 port++; 1645 } 1646 1647 if (engine->stats.active) 1648 engine->stats.start = engine->stats.enabled_at; 1649 } 1650 1651unlock: 1652 write_sequnlock(&engine->stats.lock); 1653 spin_unlock_irqrestore(&engine->timeline.lock, flags); 1654 1655 return err; 1656} 1657 1658static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) 1659{ 1660 ktime_t total = engine->stats.total; 1661 1662 /* 1663 * If the engine is executing something at the moment 1664 * add it to the total. 1665 */ 1666 if (engine->stats.active) 1667 total = ktime_add(total, 1668 ktime_sub(ktime_get(), engine->stats.start)); 1669 1670 return total; 1671} 1672 1673/** 1674 * intel_engine_get_busy_time() - Return current accumulated engine busyness 1675 * @engine: engine to report on 1676 * 1677 * Returns accumulated time @engine was busy since engine stats were enabled. 1678 */ 1679ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine) 1680{ 1681 unsigned int seq; 1682 ktime_t total; 1683 1684 do { 1685 seq = read_seqbegin(&engine->stats.lock); 1686 total = __intel_engine_get_busy_time(engine); 1687 } while (read_seqretry(&engine->stats.lock, seq)); 1688 1689 return total; 1690} 1691 1692/** 1693 * intel_disable_engine_stats() - Disable engine busy tracking on engine 1694 * @engine: engine to disable stats collection 1695 * 1696 * Stops collecting the engine busyness data for @engine. 1697 */ 1698void intel_disable_engine_stats(struct intel_engine_cs *engine) 1699{ 1700 unsigned long flags; 1701 1702 if (!intel_engine_supports_stats(engine)) 1703 return; 1704 1705 write_seqlock_irqsave(&engine->stats.lock, flags); 1706 WARN_ON_ONCE(engine->stats.enabled == 0); 1707 if (--engine->stats.enabled == 0) { 1708 engine->stats.total = __intel_engine_get_busy_time(engine); 1709 engine->stats.active = 0; 1710 } 1711 write_sequnlock_irqrestore(&engine->stats.lock, flags); 1712} 1713 1714#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1715#include "selftests/mock_engine.c" 1716#include "selftests/intel_engine_cs.c" 1717#endif