Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Move the engine mask to intel_gt_info

Since the engines belong to the GT, move the runtime-updated list of
available engines to the intel_gt struct. The original mask has been
renamed to indicate it contains the maximum engine list that can be
found on a matching device.

In preparation for other info being moved to the gt in follow up patches
(sseu), introduce an intel_gt_info structure to group all gt-related
runtime info.

v2: s/max_engine_mask/platform_engine_mask (tvrtko), fix selftest

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Andi Shyti <andi.shyti@intel.com>
Cc: Venkata Sandeep Dhanalakota <venkata.s.dhanalakota@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> #v1
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20200708003952.21831-5-daniele.ceraolospurio@intel.com

authored by

Daniele Ceraolo Spurio and committed by
Chris Wilson
792592e7 f6beb381

+84 -62
+1 -2
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
··· 1980 1980 1981 1981 static int num_vcs_engines(const struct drm_i915_private *i915) 1982 1982 { 1983 - return hweight64(INTEL_INFO(i915)->engine_mask & 1984 - GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0)); 1983 + return hweight64(VDBOX_MASK(&i915->gt)); 1985 1984 } 1986 1985 1987 1986 /*
+7 -6
drivers/gpu/drm/i915/gt/intel_engine_cs.c
··· 370 370 * instances. 371 371 */ 372 372 if ((INTEL_GEN(i915) >= 11 && 373 - RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) || 373 + engine->gt->info.vdbox_sfc_access & engine->mask) || 374 374 (INTEL_GEN(i915) >= 9 && engine->instance == 0)) 375 375 engine->uabi_capabilities |= 376 376 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; ··· 463 463 static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) 464 464 { 465 465 struct drm_i915_private *i915 = gt->i915; 466 - struct intel_device_info *info = mkwrite_device_info(i915); 466 + struct intel_gt_info *info = &gt->info; 467 467 struct intel_uncore *uncore = gt->uncore; 468 468 unsigned int logical_vdbox = 0; 469 469 unsigned int i; 470 470 u32 media_fuse; 471 471 u16 vdbox_mask; 472 472 u16 vebox_mask; 473 + 474 + info->engine_mask = INTEL_INFO(i915)->platform_engine_mask; 473 475 474 476 if (INTEL_GEN(i915) < 11) 475 477 return info->engine_mask; ··· 500 498 * In TGL each VDBOX has access to an SFC. 501 499 */ 502 500 if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0) 503 - RUNTIME_INFO(i915)->vdbox_sfc_access |= BIT(i); 501 + gt->info.vdbox_sfc_access |= BIT(i); 504 502 } 505 503 drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", 506 504 vdbox_mask, VDBOX_MASK(gt)); ··· 533 531 int intel_engines_init_mmio(struct intel_gt *gt) 534 532 { 535 533 struct drm_i915_private *i915 = gt->i915; 536 - struct intel_device_info *device_info = mkwrite_device_info(i915); 537 534 const unsigned int engine_mask = init_engine_mask(gt); 538 535 unsigned int mask = 0; 539 536 unsigned int i; ··· 562 561 * engines. 563 562 */ 564 563 if (drm_WARN_ON(&i915->drm, mask != engine_mask)) 565 - device_info->engine_mask = mask; 564 + gt->info.engine_mask = mask; 566 565 567 - RUNTIME_INFO(i915)->num_engines = hweight32(mask); 566 + gt->info.num_engines = hweight32(mask); 568 567 569 568 intel_gt_check_and_clear_faults(gt); 570 569
+6
drivers/gpu/drm/i915/gt/intel_gt.c
··· 642 642 intel_gt_fini_timelines(gt); 643 643 intel_engines_free(gt); 644 644 } 645 + 646 + void intel_gt_info_print(const struct intel_gt_info *info, 647 + struct drm_printer *p) 648 + { 649 + drm_printf(p, "available engines: %x\n", info->engine_mask); 650 + }
+4
drivers/gpu/drm/i915/gt/intel_gt.h
··· 11 11 #include "intel_reset.h" 12 12 13 13 struct drm_i915_private; 14 + struct drm_printer; 14 15 15 16 #define GT_TRACE(gt, fmt, ...) do { \ 16 17 const struct intel_gt *gt__ __maybe_unused = (gt); \ ··· 72 71 73 72 return unlikely(test_bit(I915_WEDGED, &gt->reset.flags)); 74 73 } 74 + 75 + void intel_gt_info_print(const struct intel_gt_info *info, 76 + struct drm_printer *p); 75 77 76 78 #endif /* __INTEL_GT_H__ */
+8
drivers/gpu/drm/i915/gt/intel_gt_types.h
··· 109 109 struct intel_gt_buffer_pool buffer_pool; 110 110 111 111 struct i915_vma *scratch; 112 + 113 + struct intel_gt_info { 114 + intel_engine_mask_t engine_mask; 115 + u8 num_engines; 116 + 117 + /* Media engine access to SFC per instance */ 118 + u8 vdbox_sfc_access; 119 + } info; 112 120 }; 113 121 114 122 enum intel_gt_scratch_field {
+3 -3
drivers/gpu/drm/i915/gt/intel_reset.c
··· 342 342 static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask) 343 343 { 344 344 struct intel_uncore *uncore = engine->uncore; 345 - u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; 345 + u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; 346 346 i915_reg_t sfc_forced_lock, sfc_forced_lock_ack; 347 347 u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit; 348 348 i915_reg_t sfc_usage; ··· 417 417 static void gen11_unlock_sfc(struct intel_engine_cs *engine) 418 418 { 419 419 struct intel_uncore *uncore = engine->uncore; 420 - u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; 420 + u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; 421 421 i915_reg_t sfc_forced_lock; 422 422 u32 sfc_forced_lock_bit; 423 423 ··· 1246 1246 */ 1247 1247 wakeref = intel_runtime_pm_get(gt->uncore->rpm); 1248 1248 1249 - engine_mask &= INTEL_INFO(gt->i915)->engine_mask; 1249 + engine_mask &= gt->info.engine_mask; 1250 1250 1251 1251 if (flags & I915_ERROR_CAPTURE) { 1252 1252 i915_capture_error_state(gt->i915);
+1 -1
drivers/gpu/drm/i915/gt/intel_ring_submission.c
··· 649 649 struct drm_i915_private *i915 = engine->i915; 650 650 enum intel_engine_id id; 651 651 const int num_engines = 652 - IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; 652 + IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0; 653 653 bool force_restore = false; 654 654 int len; 655 655 u32 *cs;
+3 -5
drivers/gpu/drm/i915/gt/selftest_lrc.c
··· 963 963 goto out; 964 964 965 965 if (i915_request_wait(head, 0, 966 - 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) { 966 + 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) { 967 967 pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n", 968 968 count, n); 969 969 GEM_TRACE_DUMP(); ··· 3569 3569 } 3570 3570 3571 3571 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", 3572 - count, flags, 3573 - RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext); 3572 + count, flags, smoke->gt->info.num_engines, smoke->ncontext); 3574 3573 return 0; 3575 3574 } 3576 3575 ··· 3596 3597 } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL)); 3597 3598 3598 3599 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", 3599 - count, flags, 3600 - RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext); 3600 + count, flags, smoke->gt->info.num_engines, smoke->ncontext); 3601 3601 return 0; 3602 3602 } 3603 3603
+1 -1
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
··· 106 106 107 107 blob->system_info.vdbox_enable_mask = VDBOX_MASK(gt); 108 108 blob->system_info.vebox_enable_mask = VEBOX_MASK(gt); 109 - blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; 109 + blob->system_info.vdbox_sfc_support_mask = gt->info.vdbox_sfc_access; 110 110 111 111 base = intel_guc_ggtt_offset(guc, guc->ads_vma); 112 112
+1 -1
drivers/gpu/drm/i915/gvt/handlers.c
··· 347 347 gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id); 348 348 vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; 349 349 } 350 - engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask; 350 + engine_mask &= vgpu->gvt->gt->info.engine_mask; 351 351 } 352 352 353 353 /* vgpu_lock already hold by emulate mmio r/w */
+2
drivers/gpu/drm/i915/i915_debugfs.c
··· 34 34 #include "gem/i915_gem_context.h" 35 35 #include "gt/intel_gt_buffer_pool.h" 36 36 #include "gt/intel_gt_clock_utils.h" 37 + #include "gt/intel_gt.h" 37 38 #include "gt/intel_gt_pm.h" 38 39 #include "gt/intel_gt_requests.h" 39 40 #include "gt/intel_reset.h" ··· 62 61 63 62 intel_device_info_print_static(INTEL_INFO(i915), &p); 64 63 intel_device_info_print_runtime(RUNTIME_INFO(i915), &p); 64 + intel_gt_info_print(&i915->gt.info, &p); 65 65 intel_driver_caps_print(&i915->caps, &p); 66 66 67 67 kernel_param_lock(THIS_MODULE);
+1
drivers/gpu/drm/i915/i915_drv.c
··· 886 886 887 887 intel_device_info_print_static(INTEL_INFO(dev_priv), &p); 888 888 intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); 889 + intel_gt_info_print(&dev_priv->gt.info, &p); 889 890 } 890 891 891 892 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+3 -3
drivers/gpu/drm/i915/i915_drv.h
··· 1256 1256 1257 1257 /* Iterator over subset of engines selected by mask */ 1258 1258 #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ 1259 - for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \ 1259 + for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \ 1260 1260 (tmp__) ? \ 1261 1261 ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ 1262 1262 0;) ··· 1563 1563 #define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) 1564 1564 1565 1565 #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) 1566 - #define HAS_ENGINE(gt, id) __HAS_ENGINE(INTEL_INFO((gt)->i915)->engine_mask, id) 1566 + #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id) 1567 1567 1568 1568 #define ENGINE_INSTANCES_MASK(gt, first, count) ({ \ 1569 1569 unsigned int first__ = (first); \ 1570 1570 unsigned int count__ = (count); \ 1571 - (INTEL_INFO((gt)->i915)->engine_mask & \ 1571 + ((gt)->info.engine_mask & \ 1572 1572 GENMASK(first__ + count__ - 1, first__)) >> first__; \ 1573 1573 }) 1574 1574 #define VDBOX_MASK(gt) \
+14 -9
drivers/gpu/drm/i915/i915_gpu_error.c
··· 42 42 43 43 #include "gem/i915_gem_context.h" 44 44 #include "gem/i915_gem_lmem.h" 45 + #include "gt/intel_gt.h" 45 46 #include "gt/intel_gt_pm.h" 46 47 47 48 #include "i915_drv.h" ··· 620 619 } 621 620 622 621 static void err_print_capabilities(struct drm_i915_error_state_buf *m, 623 - const struct intel_device_info *info, 624 - const struct intel_runtime_info *runtime, 625 - const struct intel_driver_caps *caps) 622 + struct i915_gpu_coredump *error) 626 623 { 627 624 struct drm_printer p = i915_error_printer(m); 628 625 629 - intel_device_info_print_static(info, &p); 630 - intel_device_info_print_runtime(runtime, &p); 631 - intel_device_info_print_topology(&runtime->sseu, &p); 632 - intel_driver_caps_print(caps, &p); 626 + intel_device_info_print_static(&error->device_info, &p); 627 + intel_device_info_print_runtime(&error->runtime_info, &p); 628 + intel_device_info_print_topology(&error->runtime_info.sseu, &p); 629 + intel_gt_info_print(&error->gt->info, &p); 630 + intel_driver_caps_print(&error->driver_caps, &p); 633 631 } 634 632 635 633 static void err_print_params(struct drm_i915_error_state_buf *m, ··· 798 798 if (error->display) 799 799 intel_display_print_error_state(m, error->display); 800 800 801 - err_print_capabilities(m, &error->device_info, &error->runtime_info, 802 - &error->driver_caps); 801 + err_print_capabilities(m, error); 803 802 err_print_params(m, &error->params); 804 803 } 805 804 ··· 1629 1630 gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER); 1630 1631 } 1631 1632 1633 + static void gt_record_info(struct intel_gt_coredump *gt) 1634 + { 1635 + memcpy(&gt->info, &gt->_gt->info, sizeof(struct intel_gt_info)); 1636 + } 1637 + 1632 1638 /* 1633 1639 * Generate a semi-unique error code. The code is not meant to have meaning, The 1634 1640 * code's only purpose is to try to prevent false duplicated bug reports by ··· 1812 1808 return ERR_PTR(-ENOMEM); 1813 1809 } 1814 1810 1811 + gt_record_info(error->gt); 1815 1812 gt_record_engines(error->gt, compress); 1816 1813 1817 1814 if (INTEL_INFO(i915)->has_gt_uc)
+3
drivers/gpu/drm/i915/i915_gpu_error.h
··· 15 15 #include <drm/drm_mm.h> 16 16 17 17 #include "gt/intel_engine.h" 18 + #include "gt/intel_gt_types.h" 18 19 #include "gt/uc/intel_uc_fw.h" 19 20 20 21 #include "intel_device_info.h" ··· 118 117 const struct intel_gt *_gt; 119 118 bool awake; 120 119 bool simulated; 120 + 121 + struct intel_gt_info info; 121 122 122 123 /* Generic register state */ 123 124 u32 eir;
+21 -21
drivers/gpu/drm/i915/i915_pci.c
··· 168 168 .gpu_reset_clobbers_display = true, \ 169 169 .hws_needs_physical = 1, \ 170 170 .unfenced_needs_alignment = 1, \ 171 - .engine_mask = BIT(RCS0), \ 171 + .platform_engine_mask = BIT(RCS0), \ 172 172 .has_snoop = true, \ 173 173 .has_coherent_ggtt = false, \ 174 174 .dma_mask_size = 32, \ ··· 188 188 .gpu_reset_clobbers_display = true, \ 189 189 .hws_needs_physical = 1, \ 190 190 .unfenced_needs_alignment = 1, \ 191 - .engine_mask = BIT(RCS0), \ 191 + .platform_engine_mask = BIT(RCS0), \ 192 192 .has_snoop = true, \ 193 193 .has_coherent_ggtt = false, \ 194 194 .dma_mask_size = 32, \ ··· 226 226 .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ 227 227 .display.has_gmch = 1, \ 228 228 .gpu_reset_clobbers_display = true, \ 229 - .engine_mask = BIT(RCS0), \ 229 + .platform_engine_mask = BIT(RCS0), \ 230 230 .has_snoop = true, \ 231 231 .has_coherent_ggtt = true, \ 232 232 .dma_mask_size = 32, \ ··· 317 317 .display.has_hotplug = 1, \ 318 318 .display.has_gmch = 1, \ 319 319 .gpu_reset_clobbers_display = true, \ 320 - .engine_mask = BIT(RCS0), \ 320 + .platform_engine_mask = BIT(RCS0), \ 321 321 .has_snoop = true, \ 322 322 .has_coherent_ggtt = true, \ 323 323 .dma_mask_size = 36, \ ··· 349 349 static const struct intel_device_info g45_info = { 350 350 GEN4_FEATURES, 351 351 PLATFORM(INTEL_G45), 352 - .engine_mask = BIT(RCS0) | BIT(VCS0), 352 + .platform_engine_mask = BIT(RCS0) | BIT(VCS0), 353 353 .gpu_reset_clobbers_display = false, 354 354 }; 355 355 ··· 359 359 .is_mobile = 1, 360 360 .display.has_fbc = 1, 361 361 .display.supports_tv = 1, 362 - .engine_mask = BIT(RCS0) | BIT(VCS0), 362 + .platform_engine_mask = BIT(RCS0) | BIT(VCS0), 363 363 .gpu_reset_clobbers_display = false, 364 364 }; 365 365 ··· 368 368 .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ 369 369 .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ 370 370 .display.has_hotplug = 1, \ 371 - .engine_mask = BIT(RCS0) | BIT(VCS0), \ 371 + .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \ 372 372 .has_snoop = true, \ 373 373 .has_coherent_ggtt = true, \ 374 374 /* ilk does support rc6, but we do not implement [power] contexts */ \ ··· 398 398 .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ 399 399 .display.has_hotplug = 1, \ 400 400 .display.has_fbc = 1, \ 401 - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ 401 + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ 402 402 .has_coherent_ggtt = true, \ 403 403 .has_llc = 1, \ 404 404 .has_rc6 = 1, \ ··· 449 449 .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ 450 450 .display.has_hotplug = 1, \ 451 451 .display.has_fbc = 1, \ 452 - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ 452 + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ 453 453 .has_coherent_ggtt = true, \ 454 454 .has_llc = 1, \ 455 455 .has_rc6 = 1, \ ··· 520 520 .ppgtt_size = 31, 521 521 .has_snoop = true, 522 522 .has_coherent_ggtt = false, 523 - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), 523 + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), 524 524 .display_mmio_offset = VLV_DISPLAY_BASE, 525 525 I9XX_PIPE_OFFSETS, 526 526 I9XX_CURSOR_OFFSETS, ··· 531 531 532 532 #define G75_FEATURES \ 533 533 GEN7_FEATURES, \ 534 - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ 534 + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ 535 535 .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ 536 536 BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \ 537 537 .display.has_ddi = 1, \ ··· 598 598 static const struct intel_device_info bdw_gt3_info = { 599 599 BDW_PLATFORM, 600 600 .gt = 3, 601 - .engine_mask = 601 + .platform_engine_mask = 602 602 BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), 603 603 }; 604 604 ··· 609 609 .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), 610 610 .display.has_hotplug = 1, 611 611 .is_lp = 1, 612 - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), 612 + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), 613 613 .has_64bit_reloc = 1, 614 614 .has_runtime_pm = 1, 615 615 .has_rc6 = 1, ··· 662 662 663 663 #define SKL_GT3_PLUS_PLATFORM \ 664 664 SKL_PLATFORM, \ 665 - .engine_mask = \ 665 + .platform_engine_mask = \ 666 666 BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1) 667 667 668 668 ··· 681 681 .is_lp = 1, \ 682 682 .num_supported_dbuf_slices = 1, \ 683 683 .display.has_hotplug = 1, \ 684 - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ 684 + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ 685 685 .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ 686 686 .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ 687 687 BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ ··· 744 744 static const struct intel_device_info kbl_gt3_info = { 745 745 KBL_PLATFORM, 746 746 .gt = 3, 747 - .engine_mask = 747 + .platform_engine_mask = 748 748 BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), 749 749 }; 750 750 ··· 765 765 static const struct intel_device_info cfl_gt3_info = { 766 766 CFL_PLATFORM, 767 767 .gt = 3, 768 - .engine_mask = 768 + .platform_engine_mask = 769 769 BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), 770 770 }; 771 771 ··· 834 834 static const struct intel_device_info icl_info = { 835 835 GEN11_FEATURES, 836 836 PLATFORM(INTEL_ICELAKE), 837 - .engine_mask = 837 + .platform_engine_mask = 838 838 BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), 839 839 }; 840 840 ··· 842 842 GEN11_FEATURES, 843 843 PLATFORM(INTEL_ELKHARTLAKE), 844 844 .require_force_probe = 1, 845 - .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), 845 + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), 846 846 .ppgtt_size = 36, 847 847 }; 848 848 ··· 878 878 GEN12_FEATURES, 879 879 PLATFORM(INTEL_TIGERLAKE), 880 880 .display.has_modular_fia = 1, 881 - .engine_mask = 881 + .platform_engine_mask = 882 882 BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), 883 883 }; 884 884 ··· 891 891 BIT(TRANSCODER_C), 892 892 .require_force_probe = 1, 893 893 .display.has_psr_hw_tracking = 0, 894 - .engine_mask = 894 + .platform_engine_mask = 895 895 BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0), 896 896 }; 897 897
-1
drivers/gpu/drm/i915/intel_device_info.c
··· 92 92 void intel_device_info_print_static(const struct intel_device_info *info, 93 93 struct drm_printer *p) 94 94 { 95 - drm_printf(p, "engines: %x\n", info->engine_mask); 96 95 drm_printf(p, "gen: %d\n", info->gen); 97 96 drm_printf(p, "gt: %d\n", info->gt); 98 97 drm_printf(p, "iommu: %s\n", iommu_name());
+1 -6
drivers/gpu/drm/i915/intel_device_info.h
··· 157 157 158 158 u8 gen; 159 159 u8 gt; /* GT number, 0 if undefined */ 160 - intel_engine_mask_t engine_mask; /* Engines supported by the HW */ 160 + intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */ 161 161 162 162 enum intel_platform platform; 163 163 ··· 219 219 u8 num_sprites[I915_MAX_PIPES]; 220 220 u8 num_scalers[I915_MAX_PIPES]; 221 221 222 - u8 num_engines; 223 - 224 222 /* Slice/subslice/EU info */ 225 223 struct sseu_dev_info sseu; 226 224 ··· 226 228 227 229 u32 cs_timestamp_frequency_hz; 228 230 u32 cs_timestamp_period_ns; 229 - 230 - /* Media engine access to SFC per instance */ 231 - u8 vdbox_sfc_access; 232 231 }; 233 232 234 233 struct intel_driver_caps {
+1 -1
drivers/gpu/drm/i915/intel_uncore.c
··· 1530 1530 1531 1531 if (INTEL_GEN(i915) >= 11) { 1532 1532 /* we'll prune the domains of missing engines later */ 1533 - intel_engine_mask_t emask = INTEL_INFO(i915)->engine_mask; 1533 + intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask; 1534 1534 int i; 1535 1535 1536 1536 uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
+1 -1
drivers/gpu/drm/i915/selftests/i915_request.c
··· 1454 1454 idx++; 1455 1455 } 1456 1456 pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n", 1457 - num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus); 1457 + num_waits, num_fences, idx, ncpus); 1458 1458 1459 1459 ret = igt_live_test_end(&live) ?: ret; 1460 1460 out_contexts:
+2 -1
drivers/gpu/drm/i915/selftests/mock_gem_device.c
··· 190 190 mock_init_ggtt(i915, &i915->ggtt); 191 191 i915->gt.vm = i915_vm_get(&i915->ggtt.vm); 192 192 193 - mkwrite_device_info(i915)->engine_mask = BIT(0); 193 + mkwrite_device_info(i915)->platform_engine_mask = BIT(0); 194 + i915->gt.info.engine_mask = BIT(0); 194 195 195 196 i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0); 196 197 if (!i915->gt.engine[RCS0])