Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-gt-next-2022-09-16' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Cross-subsystem Changes:

- MEI subsystem pieces for XeHP SDV GSC support
These are Acked-by Greg.

Driver Changes:

- Release mmaps on RPM suspend on discrete GPUs (Anshuman)
- Update GuC version to 7.5 on DG1, DG2 and ADL
- Revert "drm/i915/dg2: extend Wa_1409120013 to DG2" (Lucas)
- MTL enabling incl. standalone media (Matt R, Lucas)
- Explicitly clear BB_OFFSET for new contexts on Gen8+ (Chris)
- Fix throttling / perf limit reason decoding (Ashutosh)
- XeHP SDV GSC support (Vitaly, Alexander, Tomas)

- Fix issues with overrding firmware file paths (John)
- Invert if-else ladders to check latest version first (Lucas)
- Cancel GuC engine busyness worker synchronously (Umesh)

- Skip applying copy engine fuses outside PVC (Lucas)
- Eliminate Gen10 frequency read function (Lucas)
- Static code checker fixes (Gaosheng)
- Selftest improvements (Chris)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/YyQ4Jgl3cpGL1/As@jlahtine-mobl.ger.corp.intel.com

+1426 -537
+1
drivers/gpu/drm/i915/Makefile
··· 123 123 gt/intel_ring.o \ 124 124 gt/intel_ring_submission.o \ 125 125 gt/intel_rps.o \ 126 + gt/intel_sa_media.o \ 126 127 gt/intel_sseu.o \ 127 128 gt/intel_sseu_debugfs.o \ 128 129 gt/intel_timeline.o \
-2
drivers/gpu/drm/i915/gem/i915_gem_lmem.h
··· 12 12 struct drm_i915_gem_object; 13 13 struct intel_memory_region; 14 14 15 - extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops; 16 - 17 15 void __iomem * 18 16 i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, 19 17 unsigned long n,
+22 -1
drivers/gpu/drm/i915/gem/i915_gem_mman.c
··· 413 413 vma->mmo = mmo; 414 414 415 415 if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) 416 - intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 416 + intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 417 417 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); 418 418 419 419 if (write) { ··· 550 550 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 551 551 } 552 552 553 + void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj) 554 + { 555 + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 556 + struct ttm_device *bdev = bo->bdev; 557 + 558 + drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); 559 + 560 + if (obj->userfault_count) { 561 + /* rpm wakeref provide exclusive access */ 562 + list_del(&obj->userfault_link); 563 + obj->userfault_count = 0; 564 + } 565 + } 566 + 553 567 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) 554 568 { 555 569 struct i915_mmap_offset *mmo, *mn; ··· 587 573 spin_lock(&obj->mmo.lock); 588 574 } 589 575 spin_unlock(&obj->mmo.lock); 576 + 577 + if (obj->userfault_count) { 578 + mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); 579 + list_del(&obj->userfault_link); 580 + mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); 581 + obj->userfault_count = 0; 582 + } 590 583 } 591 584 592 585 static struct i915_mmap_offset *
+1
drivers/gpu/drm/i915/gem/i915_gem_mman.h
··· 27 27 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); 28 28 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); 29 29 30 + void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj); 30 31 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj); 31 32 32 33 #endif
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_object.c
··· 238 238 { 239 239 /* Skip serialisation and waking the device if known to be not used. */ 240 240 241 - if (obj->userfault_count) 241 + if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev))) 242 242 i915_gem_object_release_mmap_gtt(obj); 243 243 244 244 if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
+2 -1
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
··· 298 298 }; 299 299 300 300 /** 301 - * Whether the object is currently in the GGTT mmap. 301 + * Whether the object is currently in the GGTT or any other supported 302 + * fake offset mmap backed by lmem. 302 303 */ 303 304 unsigned int userfault_count; 304 305 struct list_head userfault_link;
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_pm.c
··· 24 24 { 25 25 GEM_TRACE("%s\n", dev_name(i915->drm.dev)); 26 26 27 - intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0); 27 + intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0); 28 28 flush_workqueue(i915->wq); 29 29 30 30 /*
+16 -35
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
··· 430 430 reserved_base = stolen_top; 431 431 reserved_size = 0; 432 432 433 - switch (GRAPHICS_VER(i915)) { 434 - case 2: 435 - case 3: 436 - break; 437 - case 4: 438 - if (!IS_G4X(i915)) 439 - break; 440 - fallthrough; 441 - case 5: 442 - g4x_get_stolen_reserved(i915, uncore, 433 + if (GRAPHICS_VER(i915) >= 11) { 434 + icl_get_stolen_reserved(i915, uncore, 443 435 &reserved_base, &reserved_size); 444 - break; 445 - case 6: 446 - gen6_get_stolen_reserved(i915, uncore, 447 - &reserved_base, &reserved_size); 448 - break; 449 - case 7: 450 - if (IS_VALLEYVIEW(i915)) 451 - vlv_get_stolen_reserved(i915, uncore, 452 - &reserved_base, &reserved_size); 453 - else 454 - gen7_get_stolen_reserved(i915, uncore, 455 - &reserved_base, &reserved_size); 456 - break; 457 - case 8: 458 - case 9: 436 + } else if (GRAPHICS_VER(i915) >= 8) { 459 437 if (IS_LP(i915)) 460 438 chv_get_stolen_reserved(i915, uncore, 461 439 &reserved_base, &reserved_size); 462 440 else 463 441 bdw_get_stolen_reserved(i915, uncore, 464 442 &reserved_base, &reserved_size); 465 - break; 466 - default: 467 - MISSING_CASE(GRAPHICS_VER(i915)); 468 - fallthrough; 469 - case 11: 470 - case 12: 471 - icl_get_stolen_reserved(i915, uncore, 472 - &reserved_base, 473 - &reserved_size); 474 - break; 443 + } else if (GRAPHICS_VER(i915) >= 7) { 444 + if (IS_VALLEYVIEW(i915)) 445 + vlv_get_stolen_reserved(i915, uncore, 446 + &reserved_base, &reserved_size); 447 + else 448 + gen7_get_stolen_reserved(i915, uncore, 449 + &reserved_base, &reserved_size); 450 + } else if (GRAPHICS_VER(i915) >= 6) { 451 + gen6_get_stolen_reserved(i915, uncore, 452 + &reserved_base, &reserved_size); 453 + } else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) { 454 + g4x_get_stolen_reserved(i915, uncore, 455 + &reserved_base, &reserved_size); 475 456 } 476 457 477 458 /*
+34 -2
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
··· 509 509 static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) 510 510 { 511 511 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 512 + intel_wakeref_t wakeref = 0; 512 513 513 514 if (likely(obj)) { 515 + /* ttm_bo_release() already has dma_resv_lock */ 516 + if (i915_ttm_cpu_maps_iomem(bo->resource)) 517 + wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); 518 + 514 519 __i915_gem_object_pages_fini(obj); 520 + 521 + if (wakeref) 522 + intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref); 523 + 515 524 i915_ttm_free_cached_io_rsgt(obj); 516 525 } 517 526 } ··· 990 981 struct ttm_buffer_object *bo = area->vm_private_data; 991 982 struct drm_device *dev = bo->base.dev; 992 983 struct drm_i915_gem_object *obj; 984 + intel_wakeref_t wakeref = 0; 993 985 vm_fault_t ret; 994 986 int idx; 995 987 ··· 1011 1001 dma_resv_unlock(bo->base.resv); 1012 1002 return VM_FAULT_SIGBUS; 1013 1003 } 1004 + 1005 + if (i915_ttm_cpu_maps_iomem(bo->resource)) 1006 + wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); 1014 1007 1015 1008 if (!i915_ttm_resource_mappable(bo->resource)) { 1016 1009 int err = -ENODEV; ··· 1036 1023 if (err) { 1037 1024 drm_dbg(dev, "Unable to make resource CPU accessible\n"); 1038 1025 dma_resv_unlock(bo->base.resv); 1039 - return VM_FAULT_SIGBUS; 1026 + ret = VM_FAULT_SIGBUS; 1027 + goto out_rpm; 1040 1028 } 1041 1029 } 1042 1030 ··· 1048 1034 } else { 1049 1035 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); 1050 1036 } 1037 + 1051 1038 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 1052 - return ret; 1039 + goto out_rpm; 1040 + 1041 + /* ttm_bo_vm_reserve() already has dma_resv_lock */ 1042 + if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) { 1043 + obj->userfault_count = 1; 1044 + mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); 1045 + list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list); 1046 + mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock); 1047 + } 1048 + 1049 + if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) 1050 + intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref, 1051 + msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); 1053 1052 1054 1053 i915_ttm_adjust_lru(obj); 1055 1054 1056 1055 dma_resv_unlock(bo->base.resv); 1056 + 1057 + out_rpm: 1058 + if (wakeref) 1059 + intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref); 1060 + 1057 1061 return ret; 1058 1062 } 1059 1063
+10 -5
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
··· 165 165 return MI_ARB_CHECK | 1 << 8 | state; 166 166 } 167 167 168 - u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg) 168 + u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg) 169 169 { 170 + u32 gsi_offset = gt->uncore->gsi_offset; 171 + 170 172 *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN; 171 - *cs++ = i915_mmio_reg_offset(inv_reg); 173 + *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; 172 174 *cs++ = AUX_INV; 173 175 *cs++ = MI_NOOP; 174 176 ··· 256 254 257 255 if (!HAS_FLAT_CCS(rq->engine->i915)) { 258 256 /* hsdes: 1809175790 */ 259 - cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV); 257 + cs = gen12_emit_aux_table_inv(rq->engine->gt, 258 + cs, GEN12_GFX_CCS_AUX_NV); 260 259 } 261 260 262 261 *cs++ = preparser_disable(false); ··· 316 313 317 314 if (aux_inv) { /* hsdes: 1809175790 */ 318 315 if (rq->engine->class == VIDEO_DECODE_CLASS) 319 - cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV); 316 + cs = gen12_emit_aux_table_inv(rq->engine->gt, 317 + cs, GEN12_VD0_AUX_NV); 320 318 else 321 - cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV); 319 + cs = gen12_emit_aux_table_inv(rq->engine->gt, 320 + cs, GEN12_VE0_AUX_NV); 322 321 } 323 322 324 323 if (mode & EMIT_INVALIDATE)
+2 -1
drivers/gpu/drm/i915/gt/gen8_engine_cs.h
··· 13 13 #include "intel_gt_regs.h" 14 14 #include "intel_gpu_commands.h" 15 15 16 + struct intel_gt; 16 17 struct i915_request; 17 18 18 19 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode); ··· 46 45 u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); 47 46 u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); 48 47 49 - u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg); 48 + u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg); 50 49 51 50 static inline u32 * 52 51 __gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+83 -73
drivers/gpu/drm/i915/gt/intel_engine_cs.c
··· 654 654 */ 655 655 if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0) 656 656 return false; 657 - else if (GRAPHICS_VER(i915) == 12) 657 + else if (MEDIA_VER(i915) >= 12) 658 658 return (physical_vdbox % 2 == 0) || 659 659 !(BIT(physical_vdbox - 1) & vdbox_mask); 660 - else if (GRAPHICS_VER(i915) == 11) 660 + else if (MEDIA_VER(i915) == 11) 661 661 return logical_vdbox % 2 == 0; 662 662 663 - MISSING_CASE(GRAPHICS_VER(i915)); 664 663 return false; 664 + } 665 + 666 + static void engine_mask_apply_media_fuses(struct intel_gt *gt) 667 + { 668 + struct drm_i915_private *i915 = gt->i915; 669 + unsigned int logical_vdbox = 0; 670 + unsigned int i; 671 + u32 media_fuse, fuse1; 672 + u16 vdbox_mask; 673 + u16 vebox_mask; 674 + 675 + if (MEDIA_VER(gt->i915) < 11) 676 + return; 677 + 678 + /* 679 + * On newer platforms the fusing register is called 'enable' and has 680 + * enable semantics, while on older platforms it is called 'disable' 681 + * and bits have disable semantices. 682 + */ 683 + media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); 684 + if (MEDIA_VER_FULL(i915) < IP_VER(12, 50)) 685 + media_fuse = ~media_fuse; 686 + 687 + vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; 688 + vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> 689 + GEN11_GT_VEBOX_DISABLE_SHIFT; 690 + 691 + if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) { 692 + fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1); 693 + gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1); 694 + } else { 695 + gt->info.sfc_mask = ~0; 696 + } 697 + 698 + for (i = 0; i < I915_MAX_VCS; i++) { 699 + if (!HAS_ENGINE(gt, _VCS(i))) { 700 + vdbox_mask &= ~BIT(i); 701 + continue; 702 + } 703 + 704 + if (!(BIT(i) & vdbox_mask)) { 705 + gt->info.engine_mask &= ~BIT(_VCS(i)); 706 + drm_dbg(&i915->drm, "vcs%u fused off\n", i); 707 + continue; 708 + } 709 + 710 + if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask)) 711 + gt->info.vdbox_sfc_access |= BIT(i); 712 + logical_vdbox++; 713 + } 714 + drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", 715 + vdbox_mask, VDBOX_MASK(gt)); 716 + GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); 717 + 718 + for (i = 0; i < I915_MAX_VECS; i++) { 719 + if (!HAS_ENGINE(gt, _VECS(i))) { 720 + vebox_mask &= ~BIT(i); 721 + continue; 722 + } 723 + 724 + if (!(BIT(i) & vebox_mask)) { 725 + gt->info.engine_mask &= ~BIT(_VECS(i)); 726 + drm_dbg(&i915->drm, "vecs%u fused off\n", i); 727 + } 728 + } 729 + drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n", 730 + vebox_mask, VEBOX_MASK(gt)); 731 + GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); 665 732 } 666 733 667 734 static void engine_mask_apply_compute_fuses(struct intel_gt *gt) ··· 738 671 int ss_per_ccs = info->sseu.max_subslices / I915_MAX_CCS; 739 672 unsigned long ccs_mask; 740 673 unsigned int i; 674 + 675 + if (GRAPHICS_VER(i915) < 11) 676 + return; 741 677 742 678 if (hweight32(CCS_MASK(gt)) <= 1) 743 679 return; ··· 763 693 struct intel_gt_info *info = &gt->info; 764 694 unsigned long meml3_mask; 765 695 unsigned long quad; 696 + 697 + if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) && 698 + GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))) 699 + return; 766 700 767 701 meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3); 768 702 meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask); ··· 801 727 */ 802 728 static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) 803 729 { 804 - struct drm_i915_private *i915 = gt->i915; 805 730 struct intel_gt_info *info = &gt->info; 806 - struct intel_uncore *uncore = gt->uncore; 807 - unsigned int logical_vdbox = 0; 808 - unsigned int i; 809 - u32 media_fuse, fuse1; 810 - u16 vdbox_mask; 811 - u16 vebox_mask; 812 731 813 - info->engine_mask = RUNTIME_INFO(i915)->platform_engine_mask; 732 + GEM_BUG_ON(!info->engine_mask); 814 733 815 - if (GRAPHICS_VER(i915) < 11) 816 - return info->engine_mask; 817 - 818 - /* 819 - * On newer platforms the fusing register is called 'enable' and has 820 - * enable semantics, while on older platforms it is called 'disable' 821 - * and bits have disable semantices. 822 - */ 823 - media_fuse = intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); 824 - if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) 825 - media_fuse = ~media_fuse; 826 - 827 - vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; 828 - vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> 829 - GEN11_GT_VEBOX_DISABLE_SHIFT; 830 - 831 - if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { 832 - fuse1 = intel_uncore_read(uncore, HSW_PAVP_FUSE1); 833 - gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1); 834 - } else { 835 - gt->info.sfc_mask = ~0; 836 - } 837 - 838 - for (i = 0; i < I915_MAX_VCS; i++) { 839 - if (!HAS_ENGINE(gt, _VCS(i))) { 840 - vdbox_mask &= ~BIT(i); 841 - continue; 842 - } 843 - 844 - if (!(BIT(i) & vdbox_mask)) { 845 - info->engine_mask &= ~BIT(_VCS(i)); 846 - drm_dbg(&i915->drm, "vcs%u fused off\n", i); 847 - continue; 848 - } 849 - 850 - if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask)) 851 - gt->info.vdbox_sfc_access |= BIT(i); 852 - logical_vdbox++; 853 - } 854 - drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", 855 - vdbox_mask, VDBOX_MASK(gt)); 856 - GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); 857 - 858 - for (i = 0; i < I915_MAX_VECS; i++) { 859 - if (!HAS_ENGINE(gt, _VECS(i))) { 860 - vebox_mask &= ~BIT(i); 861 - continue; 862 - } 863 - 864 - if (!(BIT(i) & vebox_mask)) { 865 - info->engine_mask &= ~BIT(_VECS(i)); 866 - drm_dbg(&i915->drm, "vecs%u fused off\n", i); 867 - } 868 - } 869 - drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n", 870 - vebox_mask, VEBOX_MASK(gt)); 871 - GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); 872 - 734 + engine_mask_apply_media_fuses(gt); 873 735 engine_mask_apply_compute_fuses(gt); 874 736 engine_mask_apply_copy_fuses(gt); 875 737 ··· 1698 1688 return false; 1699 1689 1700 1690 /* Caller disables interrupts */ 1701 - spin_lock(&engine->gt->irq_lock); 1691 + spin_lock(engine->gt->irq_lock); 1702 1692 engine->irq_enable(engine); 1703 - spin_unlock(&engine->gt->irq_lock); 1693 + spin_unlock(engine->gt->irq_lock); 1704 1694 1705 1695 return true; 1706 1696 } ··· 1711 1701 return; 1712 1702 1713 1703 /* Caller disables interrupts */ 1714 - spin_lock(&engine->gt->irq_lock); 1704 + spin_lock(engine->gt->irq_lock); 1715 1705 engine->irq_disable(engine); 1716 - spin_unlock(&engine->gt->irq_lock); 1706 + spin_unlock(engine->gt->irq_lock); 1717 1707 } 1718 1708 1719 1709 void intel_engines_reset_default_submission(struct intel_gt *gt)
+1
drivers/gpu/drm/i915/gt/intel_engine_regs.h
··· 110 110 #define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */ 111 111 #define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */ 112 112 #define RING_BBADDR(base) _MMIO((base) + 0x140) 113 + #define RING_BB_OFFSET(base) _MMIO((base) + 0x158) 113 114 #define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */ 114 115 #define CCID(base) _MMIO((base) + 0x180) 115 116 #define CCID_EN BIT(0)
-1
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
··· 842 842 843 843 INIT_LIST_HEAD(&ggtt->fence_list); 844 844 INIT_LIST_HEAD(&ggtt->userfault_list); 845 - intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm); 846 845 847 846 detect_bit_6_swizzle(ggtt); 848 847
+96 -10
drivers/gpu/drm/i915/gt/intel_gsc.c
··· 7 7 #include <linux/mei_aux.h> 8 8 #include "i915_drv.h" 9 9 #include "i915_reg.h" 10 + #include "gem/i915_gem_region.h" 10 11 #include "gt/intel_gsc.h" 11 12 #include "gt/intel_gt.h" 12 13 ··· 37 36 return irq_set_chip_data(irq, NULL); 38 37 } 39 38 39 + static int 40 + gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size) 41 + { 42 + struct intel_gt *gt = gsc_to_gt(gsc); 43 + struct drm_i915_gem_object *obj; 44 + int err; 45 + 46 + obj = i915_gem_object_create_lmem(gt->i915, size, 47 + I915_BO_ALLOC_CONTIGUOUS | 48 + I915_BO_ALLOC_CPU_CLEAR); 49 + if (IS_ERR(obj)) { 50 + drm_err(&gt->i915->drm, "Failed to allocate gsc memory\n"); 51 + return PTR_ERR(obj); 52 + } 53 + 54 + err = i915_gem_object_pin_pages_unlocked(obj); 55 + if (err) { 56 + drm_err(&gt->i915->drm, "Failed to pin pages for gsc memory\n"); 57 + goto out_put; 58 + } 59 + 60 + intf->gem_obj = obj; 61 + 62 + return 0; 63 + 64 + out_put: 65 + i915_gem_object_put(obj); 66 + return err; 67 + } 68 + 69 + static void gsc_ext_om_destroy(struct intel_gsc_intf *intf) 70 + { 71 + struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj); 72 + 73 + if (!obj) 74 + return; 75 + 76 + if (i915_gem_object_has_pinned_pages(obj)) 77 + i915_gem_object_unpin_pages(obj); 78 + 79 + i915_gem_object_put(obj); 80 + } 81 + 40 82 struct gsc_def { 41 83 const char *name; 42 84 unsigned long bar; 43 85 size_t bar_size; 86 + bool use_polling; 87 + bool slow_firmware; 88 + size_t lmem_size; 44 89 }; 45 90 46 91 /* gsc resources and definitions (HECI1 and HECI2) */ ··· 101 54 } 102 55 }; 103 56 57 + static const struct gsc_def gsc_def_xehpsdv[] = { 58 + { 59 + /* HECI1 not enabled on the device. */ 60 + }, 61 + { 62 + .name = "mei-gscfi", 63 + .bar = DG1_GSC_HECI2_BASE, 64 + .bar_size = GSC_BAR_LENGTH, 65 + .use_polling = true, 66 + .slow_firmware = true, 67 + } 68 + }; 69 + 104 70 static const struct gsc_def gsc_def_dg2[] = { 105 71 { 106 72 .name = "mei-gsc", 107 73 .bar = DG2_GSC_HECI1_BASE, 108 74 .bar_size = GSC_BAR_LENGTH, 75 + .lmem_size = SZ_4M, 109 76 }, 110 77 { 111 78 .name = "mei-gscfi", ··· 136 75 kfree(adev); 137 76 } 138 77 139 - static void gsc_destroy_one(struct intel_gsc_intf *intf) 78 + static void gsc_destroy_one(struct drm_i915_private *i915, 79 + struct intel_gsc *gsc, unsigned int intf_id) 140 80 { 81 + struct intel_gsc_intf *intf = &gsc->intf[intf_id]; 82 + 141 83 if (intf->adev) { 142 84 auxiliary_device_delete(&intf->adev->aux_dev); 143 85 auxiliary_device_uninit(&intf->adev->aux_dev); 144 86 intf->adev = NULL; 145 87 } 88 + 146 89 if (intf->irq >= 0) 147 90 irq_free_desc(intf->irq); 148 91 intf->irq = -1; 92 + 93 + gsc_ext_om_destroy(intf); 149 94 } 150 95 151 - static void gsc_init_one(struct drm_i915_private *i915, 152 - struct intel_gsc_intf *intf, 96 + static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc, 153 97 unsigned int intf_id) 154 98 { 155 99 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 156 100 struct mei_aux_device *adev; 157 101 struct auxiliary_device *aux_dev; 158 102 const struct gsc_def *def; 103 + struct intel_gsc_intf *intf = &gsc->intf[intf_id]; 159 104 int ret; 160 105 161 106 intf->irq = -1; ··· 172 105 173 106 if (IS_DG1(i915)) { 174 107 def = &gsc_def_dg1[intf_id]; 108 + } else if (IS_XEHPSDV(i915)) { 109 + def = &gsc_def_xehpsdv[intf_id]; 175 110 } else if (IS_DG2(i915)) { 176 111 def = &gsc_def_dg2[intf_id]; 177 112 } else { ··· 186 117 return; 187 118 } 188 119 120 + /* skip irq initialization */ 121 + if (def->use_polling) 122 + goto add_device; 123 + 189 124 intf->irq = irq_alloc_desc(0); 190 125 if (intf->irq < 0) { 191 126 drm_err(&i915->drm, "gsc irq error %d\n", intf->irq); 192 - return; 127 + goto fail; 193 128 } 194 129 195 130 ret = gsc_irq_init(intf->irq); ··· 202 129 goto fail; 203 130 } 204 131 132 + add_device: 205 133 adev = kzalloc(sizeof(*adev), GFP_KERNEL); 206 134 if (!adev) 207 135 goto fail; 136 + 137 + if (def->lmem_size) { 138 + drm_dbg(&i915->drm, "setting up GSC lmem\n"); 139 + 140 + if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) { 141 + drm_err(&i915->drm, "setting up gsc extended operational memory failed\n"); 142 + kfree(adev); 143 + goto fail; 144 + } 145 + 146 + adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0); 147 + adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size; 148 + } 208 149 209 150 adev->irq = intf->irq; 210 151 adev->bar.parent = &pdev->resource[0]; ··· 226 139 adev->bar.end = adev->bar.start + def->bar_size - 1; 227 140 adev->bar.flags = IORESOURCE_MEM; 228 141 adev->bar.desc = IORES_DESC_NONE; 142 + adev->slow_firmware = def->slow_firmware; 229 143 230 144 aux_dev = &adev->aux_dev; 231 145 aux_dev->name = def->name; ··· 253 165 254 166 return; 255 167 fail: 256 - gsc_destroy_one(intf); 168 + gsc_destroy_one(i915, gsc, intf->id); 257 169 } 258 170 259 171 static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) ··· 270 182 return; 271 183 } 272 184 273 - if (gt->gsc.intf[intf_id].irq < 0) { 274 - drm_err_ratelimited(&gt->i915->drm, "GSC irq: irq not set"); 185 + if (gt->gsc.intf[intf_id].irq < 0) 275 186 return; 276 - } 277 187 278 188 ret = generic_handle_irq(gt->gsc.intf[intf_id].irq); 279 189 if (ret) ··· 294 208 return; 295 209 296 210 for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) 297 - gsc_init_one(i915, &gsc->intf[i], i); 211 + gsc_init_one(i915, gsc, i); 298 212 } 299 213 300 214 void intel_gsc_fini(struct intel_gsc *gsc) ··· 306 220 return; 307 221 308 222 for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++) 309 - gsc_destroy_one(&gsc->intf[i]); 223 + gsc_destroy_one(gt->i915, gsc, i); 310 224 }
+3
drivers/gpu/drm/i915/gt/intel_gsc.h
··· 20 20 21 21 /** 22 22 * struct intel_gsc - graphics security controller 23 + * 24 + * @gem_obj: scratch memory GSC operations 23 25 * @intf : gsc interface 24 26 */ 25 27 struct intel_gsc { 26 28 struct intel_gsc_intf { 27 29 struct mei_aux_device *adev; 30 + struct drm_i915_gem_object *gem_obj; 28 31 int irq; 29 32 unsigned int id; 30 33 } intf[INTEL_GSC_NUM_INTERFACES];
+83 -28
drivers/gpu/drm/i915/gt/intel_gt.c
··· 31 31 #include "intel_rc6.h" 32 32 #include "intel_renderstate.h" 33 33 #include "intel_rps.h" 34 + #include "intel_sa_media.h" 34 35 #include "intel_gt_sysfs.h" 35 36 #include "intel_uncore.h" 36 37 #include "shmem_utils.h" 37 38 38 - static void __intel_gt_init_early(struct intel_gt *gt) 39 + void intel_gt_common_init_early(struct intel_gt *gt) 39 40 { 40 - spin_lock_init(&gt->irq_lock); 41 + spin_lock_init(gt->irq_lock); 41 42 43 + INIT_LIST_HEAD(&gt->lmem_userfault_list); 44 + mutex_init(&gt->lmem_userfault_lock); 42 45 INIT_LIST_HEAD(&gt->closed_vma); 43 46 spin_lock_init(&gt->closed_lock); 44 47 ··· 61 58 } 62 59 63 60 /* Preliminary initialization of Tile 0 */ 64 - void intel_root_gt_init_early(struct drm_i915_private *i915) 61 + int intel_root_gt_init_early(struct drm_i915_private *i915) 65 62 { 66 63 struct intel_gt *gt = to_gt(i915); 67 64 68 65 gt->i915 = i915; 69 66 gt->uncore = &i915->uncore; 67 + gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL); 68 + if (!gt->irq_lock) 69 + return -ENOMEM; 70 70 71 - __intel_gt_init_early(gt); 71 + intel_gt_common_init_early(gt); 72 + 73 + return 0; 72 74 } 73 75 74 76 static int intel_gt_probe_lmem(struct intel_gt *gt) ··· 789 781 int ret; 790 782 791 783 if (!gt_is_root(gt)) { 792 - struct intel_uncore_mmio_debug *mmio_debug; 793 784 struct intel_uncore *uncore; 785 + spinlock_t *irq_lock; 794 786 795 - uncore = kzalloc(sizeof(*uncore), GFP_KERNEL); 787 + uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL); 796 788 if (!uncore) 797 789 return -ENOMEM; 798 790 799 - mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL); 800 - if (!mmio_debug) { 801 - kfree(uncore); 791 + irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL); 792 + if (!irq_lock) 802 793 return -ENOMEM; 803 - } 804 794 805 795 gt->uncore = uncore; 806 - gt->uncore->debug = mmio_debug; 796 + gt->irq_lock = irq_lock; 807 797 808 - __intel_gt_init_early(gt); 798 + intel_gt_common_init_early(gt); 809 799 } 810 800 811 801 intel_uncore_init_early(gt->uncore, gt); 802 + intel_wakeref_auto_init(&gt->userfault_wakeref, gt->uncore->rpm); 812 803 813 804 ret = intel_uncore_setup_mmio(gt->uncore, phys_addr); 814 805 if (ret) ··· 818 811 return 0; 819 812 } 820 813 821 - static void 822 - intel_gt_tile_cleanup(struct intel_gt *gt) 823 - { 824 - intel_uncore_cleanup_mmio(gt->uncore); 825 - 826 - if (!gt_is_root(gt)) { 827 - kfree(gt->uncore->debug); 828 - kfree(gt->uncore); 829 - kfree(gt); 830 - } 831 - } 832 - 833 814 int intel_gt_probe_all(struct drm_i915_private *i915) 834 815 { 835 816 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 836 817 struct intel_gt *gt = &i915->gt0; 818 + const struct intel_gt_definition *gtdef; 837 819 phys_addr_t phys_addr; 838 820 unsigned int mmio_bar; 821 + unsigned int i; 839 822 int ret; 840 823 841 824 mmio_bar = GRAPHICS_VER(i915) == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR; ··· 836 839 * and it has been already initialized early during probe 837 840 * in i915_driver_probe() 838 841 */ 842 + gt->i915 = i915; 843 + gt->name = "Primary GT"; 844 + gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask; 845 + 846 + drm_dbg(&i915->drm, "Setting up %s\n", gt->name); 839 847 ret = intel_gt_tile_setup(gt, phys_addr); 840 848 if (ret) 841 849 return ret; 842 850 843 851 i915->gt[0] = gt; 844 852 845 - /* TODO: add more tiles */ 853 + if (!HAS_EXTRA_GT_LIST(i915)) 854 + return 0; 855 + 856 + for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]; 857 + gtdef->name != NULL; 858 + i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) { 859 + gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL); 860 + if (!gt) { 861 + ret = -ENOMEM; 862 + goto err; 863 + } 864 + 865 + gt->i915 = i915; 866 + gt->name = gtdef->name; 867 + gt->type = gtdef->type; 868 + gt->info.engine_mask = gtdef->engine_mask; 869 + gt->info.id = i; 870 + 871 + drm_dbg(&i915->drm, "Setting up %s\n", gt->name); 872 + if (GEM_WARN_ON(range_overflows_t(resource_size_t, 873 + gtdef->mapping_base, 874 + SZ_16M, 875 + pci_resource_len(pdev, mmio_bar)))) { 876 + ret = -ENODEV; 877 + goto err; 878 + } 879 + 880 + switch (gtdef->type) { 881 + case GT_TILE: 882 + ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base); 883 + break; 884 + 885 + case GT_MEDIA: 886 + ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base, 887 + gtdef->gsi_offset); 888 + break; 889 + 890 + case GT_PRIMARY: 891 + /* Primary GT should not appear in extra GT list */ 892 + default: 893 + MISSING_CASE(gtdef->type); 894 + ret = -ENODEV; 895 + } 896 + 897 + if (ret) 898 + goto err; 899 + 900 + i915->gt[i] = gt; 901 + } 902 + 846 903 return 0; 904 + 905 + err: 906 + i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret); 907 + intel_gt_release_all(i915); 908 + 909 + return ret; 847 910 } 848 911 849 912 int intel_gt_tiles_init(struct drm_i915_private *i915) ··· 926 869 struct intel_gt *gt; 927 870 unsigned int id; 928 871 929 - for_each_gt(gt, i915, id) { 930 - intel_gt_tile_cleanup(gt); 872 + for_each_gt(gt, i915, id) 931 873 i915->gt[id] = NULL; 932 - } 933 874 } 934 875 935 876 void intel_gt_info_print(const struct intel_gt_info *info,
+2 -2
drivers/gpu/drm/i915/gt/intel_gt.h
··· 44 44 return container_of(gsc, struct intel_gt, gsc); 45 45 } 46 46 47 - void intel_root_gt_init_early(struct drm_i915_private *i915); 47 + void intel_gt_common_init_early(struct intel_gt *gt); 48 + int intel_root_gt_init_early(struct drm_i915_private *i915); 48 49 int intel_gt_assign_ggtt(struct intel_gt *gt); 49 50 int intel_gt_init_mmio(struct intel_gt *gt); 50 51 int __must_check intel_gt_init_hw(struct intel_gt *gt); ··· 55 54 void intel_gt_driver_unregister(struct intel_gt *gt); 56 55 void intel_gt_driver_remove(struct intel_gt *gt); 57 56 void intel_gt_driver_release(struct intel_gt *gt); 58 - 59 57 void intel_gt_driver_late_release_all(struct drm_i915_private *i915); 60 58 61 59 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
+83 -95
drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
··· 26 26 return base_freq + frac_freq; 27 27 } 28 28 29 - static u32 gen9_get_crystal_clock_freq(struct intel_uncore *uncore, 30 - u32 rpm_config_reg) 31 - { 32 - u32 f19_2_mhz = 19200000; 33 - u32 f24_mhz = 24000000; 34 - u32 crystal_clock = 35 - (rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> 36 - GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; 37 - 38 - switch (crystal_clock) { 39 - case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: 40 - return f19_2_mhz; 41 - case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: 42 - return f24_mhz; 43 - default: 44 - MISSING_CASE(crystal_clock); 45 - return 0; 46 - } 47 - } 48 - 49 29 static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore, 50 30 u32 rpm_config_reg) 51 31 { ··· 52 72 } 53 73 } 54 74 55 - static u32 read_clock_frequency(struct intel_uncore *uncore) 75 + static u32 gen11_read_clock_frequency(struct intel_uncore *uncore) 56 76 { 57 - u32 f12_5_mhz = 12500000; 58 - u32 f19_2_mhz = 19200000; 59 - u32 f24_mhz = 24000000; 77 + u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); 78 + u32 freq = 0; 60 79 61 - if (GRAPHICS_VER(uncore->i915) <= 4) { 62 - /* 63 - * PRMs say: 64 - * 65 - * "The value in this register increments once every 16 66 - * hclks." (through the “Clocking Configuration” 67 - * (“CLKCFG”) MCHBAR register) 68 - */ 69 - return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16; 70 - } else if (GRAPHICS_VER(uncore->i915) <= 8) { 71 - /* 72 - * PRMs say: 73 - * 74 - * "The PCU TSC counts 10ns increments; this timestamp 75 - * reflects bits 38:3 of the TSC (i.e. 80ns granularity, 76 - * rolling over every 1.5 hours). 77 - */ 78 - return f12_5_mhz; 79 - } else if (GRAPHICS_VER(uncore->i915) <= 9) { 80 - u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); 81 - u32 freq = 0; 80 + /* 81 + * Note that on gen11+, the clock frequency may be reconfigured. 82 + * We do not, and we assume nobody else does. 83 + * 84 + * First figure out the reference frequency. There are 2 ways 85 + * we can compute the frequency, either through the 86 + * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE 87 + * tells us which one we should use. 88 + */ 89 + if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { 90 + freq = read_reference_ts_freq(uncore); 91 + } else { 92 + u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0); 82 93 83 - if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { 84 - freq = read_reference_ts_freq(uncore); 85 - } else { 86 - freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz; 87 - 88 - /* 89 - * Now figure out how the command stream's timestamp 90 - * register increments from this frequency (it might 91 - * increment only every few clock cycle). 92 - */ 93 - freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >> 94 - CTC_SHIFT_PARAMETER_SHIFT); 95 - } 96 - 97 - return freq; 98 - } else if (GRAPHICS_VER(uncore->i915) <= 12) { 99 - u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); 100 - u32 freq = 0; 94 + freq = gen11_get_crystal_clock_freq(uncore, c0); 101 95 102 96 /* 103 - * First figure out the reference frequency. There are 2 ways 104 - * we can compute the frequency, either through the 105 - * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE 106 - * tells us which one we should use. 97 + * Now figure out how the command stream's timestamp 98 + * register increments from this frequency (it might 99 + * increment only every few clock cycle). 107 100 */ 108 - if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { 109 - freq = read_reference_ts_freq(uncore); 110 - } else { 111 - u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0); 112 - 113 - if (GRAPHICS_VER(uncore->i915) >= 11) 114 - freq = gen11_get_crystal_clock_freq(uncore, c0); 115 - else 116 - freq = gen9_get_crystal_clock_freq(uncore, c0); 117 - 118 - /* 119 - * Now figure out how the command stream's timestamp 120 - * register increments from this frequency (it might 121 - * increment only every few clock cycle). 122 - */ 123 - freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >> 124 - GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT); 125 - } 126 - 127 - return freq; 101 + freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >> 102 + GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT); 128 103 } 129 104 130 - MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n"); 131 - return 0; 105 + return freq; 106 + } 107 + 108 + static u32 gen9_read_clock_frequency(struct intel_uncore *uncore) 109 + { 110 + u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); 111 + u32 freq = 0; 112 + 113 + if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { 114 + freq = read_reference_ts_freq(uncore); 115 + } else { 116 + freq = IS_GEN9_LP(uncore->i915) ? 19200000 : 24000000; 117 + 118 + /* 119 + * Now figure out how the command stream's timestamp 120 + * register increments from this frequency (it might 121 + * increment only every few clock cycle). 122 + */ 123 + freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >> 124 + CTC_SHIFT_PARAMETER_SHIFT); 125 + } 126 + 127 + return freq; 128 + } 129 + 130 + static u32 gen5_read_clock_frequency(struct intel_uncore *uncore) 131 + { 132 + /* 133 + * PRMs say: 134 + * 135 + * "The PCU TSC counts 10ns increments; this timestamp 136 + * reflects bits 38:3 of the TSC (i.e. 80ns granularity, 137 + * rolling over every 1.5 hours). 138 + */ 139 + return 12500000; 140 + } 141 + 142 + static u32 gen2_read_clock_frequency(struct intel_uncore *uncore) 143 + { 144 + /* 145 + * PRMs say: 146 + * 147 + * "The value in this register increments once every 16 148 + * hclks." (through the “Clocking Configuration” 149 + * (“CLKCFG”) MCHBAR register) 150 + */ 151 + return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16; 152 + } 153 + 154 + static u32 read_clock_frequency(struct intel_uncore *uncore) 155 + { 156 + if (GRAPHICS_VER(uncore->i915) >= 11) 157 + return gen11_read_clock_frequency(uncore); 158 + else if (GRAPHICS_VER(uncore->i915) >= 9) 159 + return gen9_read_clock_frequency(uncore); 160 + else if (GRAPHICS_VER(uncore->i915) >= 5) 161 + return gen5_read_clock_frequency(uncore); 162 + else 163 + return gen2_read_clock_frequency(uncore); 132 164 } 133 165 134 166 void intel_gt_init_clock_frequency(struct intel_gt *gt) 135 167 { 136 - /* 137 - * Note that on gen11+, the clock frequency may be reconfigured. 138 - * We do not, and we assume nobody else does. 139 - */ 140 168 gt->clock_frequency = read_clock_frequency(gt->uncore); 141 - if (gt->clock_frequency) 142 - gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1); 143 169 144 170 /* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */ 145 171 if (GRAPHICS_VER(gt->i915) == 11) 146 172 gt->clock_period_ns = NSEC_PER_SEC / 13750000; 173 + else if (gt->clock_frequency) 174 + gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1); 147 175 148 176 GT_TRACE(gt, 149 177 "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
+27 -8
drivers/gpu/drm/i915/gt/intel_gt_irq.c
··· 29 29 u32 timeout_ts; 30 30 u32 ident; 31 31 32 - lockdep_assert_held(&gt->irq_lock); 32 + lockdep_assert_held(gt->irq_lock); 33 33 34 34 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 35 35 ··· 59 59 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, 60 60 const u16 iir) 61 61 { 62 + struct intel_gt *media_gt = gt->i915->media_gt; 63 + 62 64 if (instance == OTHER_GUC_INSTANCE) 63 65 return guc_irq_handler(&gt->uc.guc, iir); 66 + if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt) 67 + return guc_irq_handler(&media_gt->uc.guc, iir); 64 68 65 69 if (instance == OTHER_GTPM_INSTANCE) 66 70 return gen11_rps_irq_handler(&gt->rps, iir); 71 + if (instance == OTHER_MEDIA_GTPM_INSTANCE && media_gt) 72 + return gen11_rps_irq_handler(&media_gt->rps, iir); 67 73 68 74 if (instance == OTHER_KCR_INSTANCE) 69 75 return intel_pxp_irq_handler(&gt->pxp, iir); ··· 87 81 { 88 82 struct intel_engine_cs *engine; 89 83 84 + /* 85 + * Platforms with standalone media have their media engines in another 86 + * GT. 87 + */ 88 + if (MEDIA_VER(gt->i915) >= 13 && 89 + (class == VIDEO_DECODE_CLASS || class == VIDEO_ENHANCEMENT_CLASS)) { 90 + if (!gt->i915->media_gt) 91 + goto err; 92 + 93 + gt = gt->i915->media_gt; 94 + } 95 + 90 96 if (instance <= MAX_ENGINE_INSTANCE) 91 97 engine = gt->engine_class[class][instance]; 92 98 else ··· 107 89 if (likely(engine)) 108 90 return intel_engine_cs_irq(engine, iir); 109 91 92 + err: 110 93 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 111 94 class, instance); 112 95 } ··· 139 120 unsigned long intr_dw; 140 121 unsigned int bit; 141 122 142 - lockdep_assert_held(&gt->irq_lock); 123 + lockdep_assert_held(gt->irq_lock); 143 124 144 125 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 145 126 ··· 157 138 { 158 139 unsigned int bank; 159 140 160 - spin_lock(&gt->irq_lock); 141 + spin_lock(gt->irq_lock); 161 142 162 143 for (bank = 0; bank < 2; bank++) { 163 144 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 164 145 gen11_gt_bank_handler(gt, bank); 165 146 } 166 147 167 - spin_unlock(&gt->irq_lock); 148 + spin_unlock(gt->irq_lock); 168 149 } 169 150 170 151 bool gen11_gt_reset_one_iir(struct intel_gt *gt, ··· 173 154 void __iomem * const regs = gt->uncore->regs; 174 155 u32 dw; 175 156 176 - lockdep_assert_held(&gt->irq_lock); 157 + lockdep_assert_held(gt->irq_lock); 177 158 178 159 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 179 160 if (dw & BIT(bit)) { ··· 329 310 if (!HAS_L3_DPF(gt->i915)) 330 311 return; 331 312 332 - spin_lock(&gt->irq_lock); 313 + spin_lock(gt->irq_lock); 333 314 gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915)); 334 - spin_unlock(&gt->irq_lock); 315 + spin_unlock(gt->irq_lock); 335 316 336 317 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 337 318 gt->i915->l3_parity.which_slice |= 1 << 1; ··· 453 434 u32 interrupt_mask, 454 435 u32 enabled_irq_mask) 455 436 { 456 - lockdep_assert_held(&gt->irq_lock); 437 + lockdep_assert_held(gt->irq_lock); 457 438 458 439 GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask); 459 440
+4 -4
drivers/gpu/drm/i915/gt/intel_gt_pm_irq.c
··· 37 37 38 38 WARN_ON(enabled_irq_mask & ~interrupt_mask); 39 39 40 - lockdep_assert_held(&gt->irq_lock); 40 + lockdep_assert_held(gt->irq_lock); 41 41 42 42 new_val = gt->pm_imr; 43 43 new_val &= ~interrupt_mask; ··· 64 64 struct intel_uncore *uncore = gt->uncore; 65 65 i915_reg_t reg = GRAPHICS_VER(gt->i915) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 66 66 67 - lockdep_assert_held(&gt->irq_lock); 67 + lockdep_assert_held(gt->irq_lock); 68 68 69 69 intel_uncore_write(uncore, reg, reset_mask); 70 70 intel_uncore_write(uncore, reg, reset_mask); ··· 92 92 93 93 void gen6_gt_pm_enable_irq(struct intel_gt *gt, u32 enable_mask) 94 94 { 95 - lockdep_assert_held(&gt->irq_lock); 95 + lockdep_assert_held(gt->irq_lock); 96 96 97 97 gt->pm_ier |= enable_mask; 98 98 write_pm_ier(gt); ··· 101 101 102 102 void gen6_gt_pm_disable_irq(struct intel_gt *gt, u32 disable_mask) 103 103 { 104 - lockdep_assert_held(&gt->irq_lock); 104 + lockdep_assert_held(gt->irq_lock); 105 105 106 106 gt->pm_ier &= ~disable_mask; 107 107 gen6_gt_pm_mask_irq(gt, disable_mask);
+10
drivers/gpu/drm/i915/gt/intel_gt_regs.h
··· 1554 1554 #define OTHER_GTPM_INSTANCE 1 1555 1555 #define OTHER_KCR_INSTANCE 4 1556 1556 #define OTHER_GSC_INSTANCE 6 1557 + #define OTHER_MEDIA_GUC_INSTANCE 16 1558 + #define OTHER_MEDIA_GTPM_INSTANCE 17 1557 1559 1558 1560 #define GEN11_IIR_REG_SELECTOR(x) _MMIO(0x190070 + ((x) * 4)) 1559 1561 ··· 1579 1577 #define XEHPC_BCS7_BCS8_INTR_MASK _MMIO(0x19011c) 1580 1578 1581 1579 #define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000) 1580 + 1581 + /* 1582 + * Standalone Media's non-engine GT registers are located at their regular GT 1583 + * offsets plus 0x380000. This extra offset is stored inside the intel_uncore 1584 + * structure so that the existing code can be used for both GTs without 1585 + * modification. 1586 + */ 1587 + #define MTL_MEDIA_GSI_BASE 0x380000 1582 1588 1583 1589 #endif /* __INTEL_GT_REGS__ */
+35 -1
drivers/gpu/drm/i915/gt/intel_gt_types.h
··· 81 81 u32 max_freq; 82 82 }; 83 83 84 + enum intel_gt_type { 85 + GT_PRIMARY, 86 + GT_TILE, 87 + GT_MEDIA, 88 + }; 89 + 84 90 struct intel_gt { 85 91 struct drm_i915_private *i915; 92 + const char *name; 93 + enum intel_gt_type type; 94 + 86 95 struct intel_uncore *uncore; 87 96 struct i915_ggtt *ggtt; 88 97 ··· 141 132 struct intel_wakeref wakeref; 142 133 atomic_t user_wakeref; 143 134 135 + /** 136 + * Protects access to lmem usefault list. 137 + * It is required, if we are outside of the runtime suspend path, 138 + * access to @lmem_userfault_list requires always first grabbing the 139 + * runtime pm, to ensure we can't race against runtime suspend. 140 + * Once we have that we also need to grab @lmem_userfault_lock, 141 + * at which point we have exclusive access. 142 + * The runtime suspend path is special since it doesn't really hold any locks, 143 + * but instead has exclusive access by virtue of all other accesses requiring 144 + * holding the runtime pm wakeref. 145 + */ 146 + struct mutex lmem_userfault_lock; 147 + struct list_head lmem_userfault_list; 148 + 144 149 struct list_head closed_vma; 145 150 spinlock_t closed_lock; /* guards the list of closed_vma */ 146 151 ··· 170 147 */ 171 148 intel_wakeref_t awake; 172 149 150 + /* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */ 151 + struct intel_wakeref_auto userfault_wakeref; 152 + 173 153 u32 clock_frequency; 174 154 u32 clock_period_ns; 175 155 ··· 180 154 struct intel_rc6 rc6; 181 155 struct intel_rps rps; 182 156 183 - spinlock_t irq_lock; 157 + spinlock_t *irq_lock; 184 158 u32 gt_imr; 185 159 u32 pm_ier; 186 160 u32 pm_imr; ··· 286 260 /* sysfs defaults per gt */ 287 261 struct gt_defaults defaults; 288 262 struct kobject *sysfs_defaults; 263 + }; 264 + 265 + struct intel_gt_definition { 266 + enum intel_gt_type type; 267 + char *name; 268 + u32 mapping_base; 269 + u32 gsi_offset; 270 + intel_engine_mask_t engine_mask; 289 271 }; 290 272 291 273 enum intel_gt_scratch_field {
-3
drivers/gpu/drm/i915/gt/intel_gtt.h
··· 386 386 */ 387 387 struct list_head userfault_list; 388 388 389 - /* Manual runtime pm autosuspend delay for user GGTT mmaps */ 390 - struct intel_wakeref_auto userfault_wakeref; 391 - 392 389 struct mutex error_mutex; 393 390 struct drm_mm_node error_capture; 394 391 struct drm_mm_node uc_fw;
+26 -3
drivers/gpu/drm/i915/gt/intel_lrc.c
··· 662 662 return -1; 663 663 } 664 664 665 + static int lrc_ring_bb_offset(const struct intel_engine_cs *engine) 666 + { 667 + if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) 668 + return 0x80; 669 + else if (GRAPHICS_VER(engine->i915) >= 12) 670 + return 0x70; 671 + else if (GRAPHICS_VER(engine->i915) >= 9) 672 + return 0x64; 673 + else if (GRAPHICS_VER(engine->i915) >= 8 && 674 + engine->class == RENDER_CLASS) 675 + return 0xc4; 676 + else 677 + return -1; 678 + } 679 + 665 680 static int lrc_ring_gpr0(const struct intel_engine_cs *engine) 666 681 { 667 682 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) ··· 783 768 bool inhibit) 784 769 { 785 770 u32 ctl; 771 + int loc; 786 772 787 773 ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); 788 774 ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); ··· 795 779 regs[CTX_CONTEXT_CONTROL] = ctl; 796 780 797 781 regs[CTX_TIMESTAMP] = ce->stats.runtime.last; 782 + 783 + loc = lrc_ring_bb_offset(engine); 784 + if (loc != -1) 785 + regs[loc + 1] = 0; 798 786 } 799 787 800 788 static void init_wa_bb_regs(u32 * const regs, ··· 1298 1278 1299 1279 /* hsdes: 1809175790 */ 1300 1280 if (!HAS_FLAT_CCS(ce->engine->i915)) 1301 - cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV); 1281 + cs = gen12_emit_aux_table_inv(ce->engine->gt, 1282 + cs, GEN12_GFX_CCS_AUX_NV); 1302 1283 1303 1284 /* Wa_16014892111 */ 1304 1285 if (IS_DG2(ce->engine->i915)) ··· 1325 1304 /* hsdes: 1809175790 */ 1326 1305 if (!HAS_FLAT_CCS(ce->engine->i915)) { 1327 1306 if (ce->engine->class == VIDEO_DECODE_CLASS) 1328 - cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV); 1307 + cs = gen12_emit_aux_table_inv(ce->engine->gt, 1308 + cs, GEN12_VD0_AUX_NV); 1329 1309 else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS) 1330 - cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV); 1310 + cs = gen12_emit_aux_table_inv(ce->engine->gt, 1311 + cs, GEN12_VE0_AUX_NV); 1331 1312 } 1332 1313 1333 1314 return cs;
+13 -13
drivers/gpu/drm/i915/gt/intel_rps.c
··· 194 194 195 195 rps_reset_ei(rps); 196 196 197 - spin_lock_irq(&gt->irq_lock); 197 + spin_lock_irq(gt->irq_lock); 198 198 gen6_gt_pm_enable_irq(gt, rps->pm_events); 199 - spin_unlock_irq(&gt->irq_lock); 199 + spin_unlock_irq(gt->irq_lock); 200 200 201 201 intel_uncore_write(gt->uncore, 202 202 GEN6_PMINTRMSK, rps_pm_mask(rps, rps->last_freq)); ··· 217 217 { 218 218 struct intel_gt *gt = rps_to_gt(rps); 219 219 220 - spin_lock_irq(&gt->irq_lock); 220 + spin_lock_irq(gt->irq_lock); 221 221 if (GRAPHICS_VER(gt->i915) >= 11) 222 222 gen11_rps_reset_interrupts(rps); 223 223 else 224 224 gen6_rps_reset_interrupts(rps); 225 225 226 226 rps->pm_iir = 0; 227 - spin_unlock_irq(&gt->irq_lock); 227 + spin_unlock_irq(gt->irq_lock); 228 228 } 229 229 230 230 static void rps_disable_interrupts(struct intel_rps *rps) ··· 234 234 intel_uncore_write(gt->uncore, 235 235 GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); 236 236 237 - spin_lock_irq(&gt->irq_lock); 237 + spin_lock_irq(gt->irq_lock); 238 238 gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); 239 - spin_unlock_irq(&gt->irq_lock); 239 + spin_unlock_irq(gt->irq_lock); 240 240 241 241 intel_synchronize_irq(gt->i915); 242 242 ··· 1797 1797 int new_freq, adj, min, max; 1798 1798 u32 pm_iir = 0; 1799 1799 1800 - spin_lock_irq(&gt->irq_lock); 1800 + spin_lock_irq(gt->irq_lock); 1801 1801 pm_iir = fetch_and_zero(&rps->pm_iir) & rps->pm_events; 1802 1802 client_boost = atomic_read(&rps->num_waiters); 1803 - spin_unlock_irq(&gt->irq_lock); 1803 + spin_unlock_irq(gt->irq_lock); 1804 1804 1805 1805 /* Make sure we didn't queue anything we're not going to process. */ 1806 1806 if (!pm_iir && !client_boost) ··· 1873 1873 mutex_unlock(&rps->lock); 1874 1874 1875 1875 out: 1876 - spin_lock_irq(&gt->irq_lock); 1876 + spin_lock_irq(gt->irq_lock); 1877 1877 gen6_gt_pm_unmask_irq(gt, rps->pm_events); 1878 - spin_unlock_irq(&gt->irq_lock); 1878 + spin_unlock_irq(gt->irq_lock); 1879 1879 } 1880 1880 1881 1881 void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) ··· 1883 1883 struct intel_gt *gt = rps_to_gt(rps); 1884 1884 const u32 events = rps->pm_events & pm_iir; 1885 1885 1886 - lockdep_assert_held(&gt->irq_lock); 1886 + lockdep_assert_held(gt->irq_lock); 1887 1887 1888 1888 if (unlikely(!events)) 1889 1889 return; ··· 1903 1903 1904 1904 events = pm_iir & rps->pm_events; 1905 1905 if (events) { 1906 - spin_lock(&gt->irq_lock); 1906 + spin_lock(gt->irq_lock); 1907 1907 1908 1908 GT_TRACE(gt, "irq events:%x\n", events); 1909 1909 ··· 1911 1911 rps->pm_iir |= events; 1912 1912 1913 1913 schedule_work(&rps->work); 1914 - spin_unlock(&gt->irq_lock); 1914 + spin_unlock(gt->irq_lock); 1915 1915 } 1916 1916 1917 1917 if (GRAPHICS_VER(gt->i915) >= 8)
+47
drivers/gpu/drm/i915/gt/intel_sa_media.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #include <drm/drm_managed.h> 7 + 8 + #include "i915_drv.h" 9 + #include "gt/intel_gt.h" 10 + #include "gt/intel_sa_media.h" 11 + 12 + int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr, 13 + u32 gsi_offset) 14 + { 15 + struct drm_i915_private *i915 = gt->i915; 16 + struct intel_uncore *uncore; 17 + 18 + uncore = drmm_kzalloc(&i915->drm, sizeof(*uncore), GFP_KERNEL); 19 + if (!uncore) 20 + return -ENOMEM; 21 + 22 + uncore->gsi_offset = gsi_offset; 23 + 24 + gt->irq_lock = to_gt(i915)->irq_lock; 25 + intel_gt_common_init_early(gt); 26 + intel_uncore_init_early(uncore, gt); 27 + 28 + /* 29 + * Standalone media shares the general MMIO space with the primary 30 + * GT. We'll re-use the primary GT's mapping. 31 + */ 32 + uncore->regs = i915->uncore.regs; 33 + if (drm_WARN_ON(&i915->drm, uncore->regs == NULL)) 34 + return -EIO; 35 + 36 + gt->uncore = uncore; 37 + gt->phys_addr = phys_addr; 38 + 39 + /* 40 + * For current platforms we can assume there's only a single 41 + * media GT and cache it for quick lookup. 42 + */ 43 + drm_WARN_ON(&i915->drm, i915->media_gt); 44 + i915->media_gt = gt; 45 + 46 + return 0; 47 + }
+15
drivers/gpu/drm/i915/gt/intel_sa_media.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + #ifndef __INTEL_SA_MEDIA__ 6 + #define __INTEL_SA_MEDIA__ 7 + 8 + #include <linux/types.h> 9 + 10 + struct intel_gt; 11 + 12 + int intel_sa_mediagt_setup(struct intel_gt *gt, phys_addr_t phys_addr, 13 + u32 gsi_offset); 14 + 15 + #endif /* __INTEL_SA_MEDIA_H__ */
+95 -20
drivers/gpu/drm/i915/gt/selftest_lrc.c
··· 27 27 #define NUM_GPR 16 28 28 #define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */ 29 29 30 + #define LRI_HEADER MI_INSTR(0x22, 0) 31 + #define LRI_LENGTH_MASK GENMASK(7, 0) 32 + 30 33 static struct i915_vma *create_scratch(struct intel_gt *gt) 31 34 { 32 35 return __vm_create_scratch_for_read_pinned(&gt->ggtt->vm, PAGE_SIZE); ··· 205 202 continue; 206 203 } 207 204 208 - if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { 205 + if ((lri & GENMASK(31, 23)) != LRI_HEADER) { 209 206 pr_err("%s: Expected LRI command at dword %d, found %08x\n", 210 207 engine->name, dw, lri); 211 208 err = -EINVAL; ··· 359 356 i915_mmio_reg_offset(RING_CMD_BUF_CCTL(engine->mmio_base)), 360 357 lrc_ring_cmd_buf_cctl(engine), 361 358 "RING_CMD_BUF_CCTL" 359 + }, 360 + { 361 + i915_mmio_reg_offset(RING_BB_OFFSET(engine->mmio_base)), 362 + lrc_ring_bb_offset(engine), 363 + "RING_BB_OFFSET" 362 364 }, 363 365 { }, 364 366 }, *t; ··· 995 987 hw = defaults; 996 988 hw += LRC_STATE_OFFSET / sizeof(*hw); 997 989 do { 998 - u32 len = hw[dw] & 0x7f; 990 + u32 len = hw[dw] & LRI_LENGTH_MASK; 991 + 992 + /* 993 + * Keep it simple, skip parsing complex commands 994 + * 995 + * At present, there are no more MI_LOAD_REGISTER_IMM 996 + * commands after the first 3D state command. Rather 997 + * than include a table (see i915_cmd_parser.c) of all 998 + * the possible commands and their instruction lengths 999 + * (or mask for variable length instructions), assume 1000 + * we have gathered the complete list of registers and 1001 + * bail out. 1002 + */ 1003 + if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT) 1004 + break; 999 1005 1000 1006 if (hw[dw] == 0) { 1001 1007 dw++; 1002 1008 continue; 1003 1009 } 1004 1010 1005 - if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { 1011 + if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) { 1012 + /* Assume all other MI commands match LRI length mask */ 1006 1013 dw += len + 2; 1007 1014 continue; 1015 + } 1016 + 1017 + if (!len) { 1018 + pr_err("%s: invalid LRI found in context image\n", 1019 + ce->engine->name); 1020 + igt_hexdump(defaults, PAGE_SIZE); 1021 + break; 1008 1022 } 1009 1023 1010 1024 dw++; ··· 1180 1150 hw = defaults; 1181 1151 hw += LRC_STATE_OFFSET / sizeof(*hw); 1182 1152 do { 1183 - u32 len = hw[dw] & 0x7f; 1153 + u32 len = hw[dw] & LRI_LENGTH_MASK; 1154 + 1155 + /* For simplicity, break parsing at the first complex command */ 1156 + if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT) 1157 + break; 1184 1158 1185 1159 if (hw[dw] == 0) { 1186 1160 dw++; 1187 1161 continue; 1188 1162 } 1189 1163 1190 - if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { 1164 + if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) { 1191 1165 dw += len + 2; 1192 1166 continue; 1167 + } 1168 + 1169 + if (!len) { 1170 + pr_err("%s: invalid LRI found in context image\n", 1171 + ce->engine->name); 1172 + igt_hexdump(defaults, PAGE_SIZE); 1173 + break; 1193 1174 } 1194 1175 1195 1176 dw++; ··· 1333 1292 hw = defaults; 1334 1293 hw += LRC_STATE_OFFSET / sizeof(*hw); 1335 1294 do { 1336 - u32 len = hw[dw] & 0x7f; 1295 + u32 len = hw[dw] & LRI_LENGTH_MASK; 1296 + 1297 + /* For simplicity, break parsing at the first complex command */ 1298 + if ((hw[dw] >> INSTR_CLIENT_SHIFT) != INSTR_MI_CLIENT) 1299 + break; 1337 1300 1338 1301 if (hw[dw] == 0) { 1339 1302 dw++; 1340 1303 continue; 1341 1304 } 1342 1305 1343 - if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { 1306 + if ((hw[dw] & GENMASK(31, 23)) != LRI_HEADER) { 1344 1307 dw += len + 2; 1345 1308 continue; 1309 + } 1310 + 1311 + if (!len) { 1312 + pr_err("%s: invalid LRI found in context image\n", 1313 + engine->name); 1314 + igt_hexdump(defaults, PAGE_SIZE); 1315 + break; 1346 1316 } 1347 1317 1348 1318 dw++; ··· 1395 1343 return err; 1396 1344 } 1397 1345 1346 + static struct i915_vma * 1347 + create_result_vma(struct i915_address_space *vm, unsigned long sz) 1348 + { 1349 + struct i915_vma *vma; 1350 + void *ptr; 1351 + 1352 + vma = create_user_vma(vm, sz); 1353 + if (IS_ERR(vma)) 1354 + return vma; 1355 + 1356 + /* Set the results to a known value distinct from the poison */ 1357 + ptr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC); 1358 + if (IS_ERR(ptr)) { 1359 + i915_vma_put(vma); 1360 + return ERR_CAST(ptr); 1361 + } 1362 + 1363 + memset(ptr, POISON_INUSE, vma->size); 1364 + i915_gem_object_flush_map(vma->obj); 1365 + i915_gem_object_unpin_map(vma->obj); 1366 + 1367 + return vma; 1368 + } 1369 + 1398 1370 static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison) 1399 1371 { 1400 1372 u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1); ··· 1437 1361 goto err_A; 1438 1362 } 1439 1363 1440 - ref[0] = create_user_vma(A->vm, SZ_64K); 1364 + ref[0] = create_result_vma(A->vm, SZ_64K); 1441 1365 if (IS_ERR(ref[0])) { 1442 1366 err = PTR_ERR(ref[0]); 1443 1367 goto err_B; 1444 1368 } 1445 1369 1446 - ref[1] = create_user_vma(A->vm, SZ_64K); 1370 + ref[1] = create_result_vma(A->vm, SZ_64K); 1447 1371 if (IS_ERR(ref[1])) { 1448 1372 err = PTR_ERR(ref[1]); 1449 1373 goto err_ref0; ··· 1465 1389 } 1466 1390 i915_request_put(rq); 1467 1391 1468 - result[0] = create_user_vma(A->vm, SZ_64K); 1392 + result[0] = create_result_vma(A->vm, SZ_64K); 1469 1393 if (IS_ERR(result[0])) { 1470 1394 err = PTR_ERR(result[0]); 1471 1395 goto err_ref1; 1472 1396 } 1473 1397 1474 - result[1] = create_user_vma(A->vm, SZ_64K); 1398 + result[1] = create_result_vma(A->vm, SZ_64K); 1475 1399 if (IS_ERR(result[1])) { 1476 1400 err = PTR_ERR(result[1]); 1477 1401 goto err_result0; ··· 1484 1408 } 1485 1409 1486 1410 err = poison_registers(B, poison, sema); 1487 - if (err) { 1488 - WRITE_ONCE(*sema, -1); 1489 - i915_request_put(rq); 1490 - goto err_result1; 1411 + if (err == 0 && i915_request_wait(rq, 0, HZ / 2) < 0) { 1412 + pr_err("%s(%s): wait for results timed out\n", 1413 + __func__, engine->name); 1414 + err = -ETIME; 1491 1415 } 1492 1416 1493 - if (i915_request_wait(rq, 0, HZ / 2) < 0) { 1494 - i915_request_put(rq); 1495 - err = -ETIME; 1496 - goto err_result1; 1497 - } 1417 + /* Always cancel the semaphore wait, just in case the GPU gets stuck */ 1418 + WRITE_ONCE(*sema, -1); 1498 1419 i915_request_put(rq); 1420 + if (err) 1421 + goto err_result1; 1499 1422 1500 1423 err = compare_isolation(engine, ref, result, A, poison); 1501 1424
+12 -12
drivers/gpu/drm/i915/gt/uc/intel_guc.c
··· 82 82 83 83 assert_rpm_wakelock_held(&gt->i915->runtime_pm); 84 84 85 - spin_lock_irq(&gt->irq_lock); 85 + spin_lock_irq(gt->irq_lock); 86 86 gen6_gt_pm_reset_iir(gt, gt->pm_guc_events); 87 - spin_unlock_irq(&gt->irq_lock); 87 + spin_unlock_irq(gt->irq_lock); 88 88 } 89 89 90 90 static void gen9_enable_guc_interrupts(struct intel_guc *guc) ··· 93 93 94 94 assert_rpm_wakelock_held(&gt->i915->runtime_pm); 95 95 96 - spin_lock_irq(&gt->irq_lock); 96 + spin_lock_irq(gt->irq_lock); 97 97 WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) & 98 98 gt->pm_guc_events); 99 99 gen6_gt_pm_enable_irq(gt, gt->pm_guc_events); 100 - spin_unlock_irq(&gt->irq_lock); 100 + spin_unlock_irq(gt->irq_lock); 101 101 } 102 102 103 103 static void gen9_disable_guc_interrupts(struct intel_guc *guc) ··· 106 106 107 107 assert_rpm_wakelock_held(&gt->i915->runtime_pm); 108 108 109 - spin_lock_irq(&gt->irq_lock); 109 + spin_lock_irq(gt->irq_lock); 110 110 111 111 gen6_gt_pm_disable_irq(gt, gt->pm_guc_events); 112 112 113 - spin_unlock_irq(&gt->irq_lock); 113 + spin_unlock_irq(gt->irq_lock); 114 114 intel_synchronize_irq(gt->i915); 115 115 116 116 gen9_reset_guc_interrupts(guc); ··· 120 120 { 121 121 struct intel_gt *gt = guc_to_gt(guc); 122 122 123 - spin_lock_irq(&gt->irq_lock); 123 + spin_lock_irq(gt->irq_lock); 124 124 gen11_gt_reset_one_iir(gt, 0, GEN11_GUC); 125 - spin_unlock_irq(&gt->irq_lock); 125 + spin_unlock_irq(gt->irq_lock); 126 126 } 127 127 128 128 static void gen11_enable_guc_interrupts(struct intel_guc *guc) ··· 130 130 struct intel_gt *gt = guc_to_gt(guc); 131 131 u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); 132 132 133 - spin_lock_irq(&gt->irq_lock); 133 + spin_lock_irq(gt->irq_lock); 134 134 WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC)); 135 135 intel_uncore_write(gt->uncore, 136 136 GEN11_GUC_SG_INTR_ENABLE, events); 137 137 intel_uncore_write(gt->uncore, 138 138 GEN11_GUC_SG_INTR_MASK, ~events); 139 - spin_unlock_irq(&gt->irq_lock); 139 + spin_unlock_irq(gt->irq_lock); 140 140 } 141 141 142 142 static void gen11_disable_guc_interrupts(struct intel_guc *guc) 143 143 { 144 144 struct intel_gt *gt = guc_to_gt(guc); 145 145 146 - spin_lock_irq(&gt->irq_lock); 146 + spin_lock_irq(gt->irq_lock); 147 147 148 148 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0); 149 149 intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 150 150 151 - spin_unlock_irq(&gt->irq_lock); 151 + spin_unlock_irq(gt->irq_lock); 152 152 intel_synchronize_irq(gt->i915); 153 153 154 154 gen11_reset_guc_interrupts(guc);
+8 -3
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
··· 1438 1438 if (!guc_submission_initialized(guc)) 1439 1439 return; 1440 1440 1441 - cancel_delayed_work(&guc->timestamp.work); 1441 + /* 1442 + * There is a race with suspend flow where the worker runs after suspend 1443 + * and causes an unclaimed register access warning. Cancel the worker 1444 + * synchronously here. 1445 + */ 1446 + cancel_delayed_work_sync(&guc->timestamp.work); 1442 1447 1443 1448 /* 1444 1449 * Before parking, we should sample engine busyness stats if we need to. ··· 1537 1532 __reset_guc_busyness_stats(guc); 1538 1533 1539 1534 /* Flush IRQ handler */ 1540 - spin_lock_irq(&guc_to_gt(guc)->irq_lock); 1541 - spin_unlock_irq(&guc_to_gt(guc)->irq_lock); 1535 + spin_lock_irq(guc_to_gt(guc)->irq_lock); 1536 + spin_unlock_irq(guc_to_gt(guc)->irq_lock); 1542 1537 1543 1538 guc_flush_submissions(guc); 1544 1539 guc_flush_destroyed_contexts(guc);
+2 -2
drivers/gpu/drm/i915/gt/uc/intel_uc.c
··· 245 245 intel_guc_enable_interrupts(guc); 246 246 247 247 /* check for CT messages received before we enabled interrupts */ 248 - spin_lock_irq(&gt->irq_lock); 248 + spin_lock_irq(gt->irq_lock); 249 249 intel_guc_ct_event_handler(&guc->ct); 250 - spin_unlock_irq(&gt->irq_lock); 250 + spin_unlock_irq(gt->irq_lock); 251 251 252 252 drm_dbg(&i915->drm, "GuC communication enabled\n"); 253 253
+22 -8
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
··· 72 72 * security fixes, etc. to be enabled. 73 73 */ 74 74 #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \ 75 - fw_def(DG2, 0, guc_mmp(dg2, 70, 4, 1)) \ 75 + fw_def(DG2, 0, guc_maj(dg2, 70, 5)) \ 76 + fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5)) \ 76 77 fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \ 77 78 fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \ 79 + fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5)) \ 78 80 fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \ 79 81 fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \ 80 - fw_def(DG1, 0, guc_mmp(dg1, 70, 1, 1)) \ 82 + fw_def(DG1, 0, guc_maj(dg1, 70, 5)) \ 81 83 fw_def(ROCKETLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \ 82 84 fw_def(TIGERLAKE, 0, guc_mmp(tgl, 70, 1, 1)) \ 83 85 fw_def(JASPERLAKE, 0, guc_mmp(ehl, 70, 1, 1)) \ ··· 94 92 fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1)) 95 93 96 94 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp) \ 95 + fw_def(ALDERLAKE_P, 0, huc_raw(tgl)) \ 97 96 fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \ 97 + fw_def(ALDERLAKE_S, 0, huc_raw(tgl)) \ 98 98 fw_def(ALDERLAKE_S, 0, huc_mmp(tgl, 7, 9, 3)) \ 99 - fw_def(DG1, 0, huc_mmp(dg1, 7, 9, 3)) \ 99 + fw_def(DG1, 0, huc_raw(dg1)) \ 100 100 fw_def(ROCKETLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \ 101 101 fw_def(TIGERLAKE, 0, huc_mmp(tgl, 7, 9, 3)) \ 102 102 fw_def(JASPERLAKE, 0, huc_mmp(ehl, 9, 0, 0)) \ ··· 236 232 u32 fw_count; 237 233 u8 rev = INTEL_REVID(i915); 238 234 int i; 235 + bool found; 239 236 240 237 /* 241 238 * The only difference between the ADL GuC FWs is the HWConfig support. ··· 251 246 fw_blobs = blobs_all[uc_fw->type].blobs; 252 247 fw_count = blobs_all[uc_fw->type].count; 253 248 249 + found = false; 254 250 for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) { 255 251 const struct uc_fw_blob *blob = &fw_blobs[i].blob; 256 252 ··· 272 266 uc_fw->file_wanted.path = blob->path; 273 267 uc_fw->file_wanted.major_ver = blob->major; 274 268 uc_fw->file_wanted.minor_ver = blob->minor; 269 + found = true; 275 270 break; 271 + } 272 + 273 + if (!found && uc_fw->file_selected.path) { 274 + /* Failed to find a match for the last attempt?! */ 275 + uc_fw->file_selected.path = NULL; 276 276 } 277 277 278 278 /* make sure the list is ordered as expected */ ··· 334 322 continue; 335 323 336 324 bad: 337 - drm_err(&i915->drm, "\x1B[35;1mInvalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n", 325 + drm_err(&i915->drm, "Invalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n", 338 326 intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev, 339 327 fw_blobs[i - 1].blob.legacy ? "L" : "v", 340 328 fw_blobs[i - 1].blob.major, ··· 565 553 566 554 err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev); 567 555 memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal)); 568 - if (!err || intel_uc_fw_is_overridden(uc_fw)) 569 - goto done; 556 + 557 + /* Any error is terminal if overriding. Don't bother searching for older versions */ 558 + if (err && intel_uc_fw_is_overridden(uc_fw)) 559 + goto fail; 570 560 571 561 while (err == -ENOENT) { 562 + old_ver = true; 563 + 572 564 __uc_fw_auto_select(i915, uc_fw); 573 565 if (!uc_fw->file_selected.path) { 574 566 /* ··· 592 576 if (err) 593 577 goto fail; 594 578 595 - old_ver = true; 596 - done: 597 579 if (uc_fw->loaded_via_gsc) 598 580 err = check_gsc_manifest(fw, uc_fw); 599 581 else
+74 -31
drivers/gpu/drm/i915/i915_driver.c
··· 105 105 106 106 static const struct drm_driver i915_drm_driver; 107 107 108 + static void i915_release_bridge_dev(struct drm_device *dev, 109 + void *bridge) 110 + { 111 + pci_dev_put(bridge); 112 + } 113 + 108 114 static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) 109 115 { 110 116 int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus); ··· 121 115 drm_err(&dev_priv->drm, "bridge device not found\n"); 122 116 return -EIO; 123 117 } 124 - return 0; 118 + 119 + return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev, 120 + dev_priv->bridge_dev); 125 121 } 126 122 127 123 /* Allocate space for the MCH regs if needed, return nonzero on error */ ··· 310 302 311 303 static void sanitize_gpu(struct drm_i915_private *i915) 312 304 { 313 - if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) 314 - __intel_gt_reset(to_gt(i915), ALL_ENGINES); 305 + if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) { 306 + struct intel_gt *gt; 307 + unsigned int i; 308 + 309 + for_each_gt(gt, i915, i) 310 + __intel_gt_reset(gt, ALL_ENGINES); 311 + } 315 312 } 316 313 317 314 /** ··· 339 326 intel_device_info_subplatform_init(dev_priv); 340 327 intel_step_init(dev_priv); 341 328 342 - intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug); 329 + intel_uncore_mmio_debug_init_early(dev_priv); 343 330 344 331 spin_lock_init(&dev_priv->irq_lock); 345 332 spin_lock_init(&dev_priv->gpu_error.lock); ··· 370 357 371 358 intel_wopcm_init_early(&dev_priv->wopcm); 372 359 373 - intel_root_gt_init_early(dev_priv); 360 + ret = intel_root_gt_init_early(dev_priv); 361 + if (ret < 0) 362 + goto err_rootgt; 374 363 375 364 i915_drm_clients_init(&dev_priv->clients, dev_priv); 376 365 ··· 397 382 i915_gem_cleanup_early(dev_priv); 398 383 intel_gt_driver_late_release_all(dev_priv); 399 384 i915_drm_clients_fini(&dev_priv->clients); 385 + err_rootgt: 400 386 intel_region_ttm_device_fini(dev_priv); 401 387 err_ttm: 402 388 vlv_suspend_cleanup(dev_priv); ··· 439 423 */ 440 424 static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) 441 425 { 442 - int ret; 426 + struct intel_gt *gt; 427 + int ret, i; 443 428 444 429 if (i915_inject_probe_failure(dev_priv)) 445 430 return -ENODEV; ··· 449 432 if (ret < 0) 450 433 return ret; 451 434 452 - ret = intel_uncore_init_mmio(&dev_priv->uncore); 453 - if (ret) 454 - return ret; 435 + for_each_gt(gt, dev_priv, i) { 436 + ret = intel_uncore_init_mmio(gt->uncore); 437 + if (ret) 438 + return ret; 439 + 440 + ret = drmm_add_action_or_reset(&dev_priv->drm, 441 + intel_uncore_fini_mmio, 442 + gt->uncore); 443 + if (ret) 444 + return ret; 445 + } 455 446 456 447 /* Try to make sure MCHBAR is enabled before poking at it */ 457 448 intel_setup_mchbar(dev_priv); 458 449 intel_device_info_runtime_init(dev_priv); 459 450 460 - ret = intel_gt_init_mmio(to_gt(dev_priv)); 461 - if (ret) 462 - goto err_uncore; 451 + for_each_gt(gt, dev_priv, i) { 452 + ret = intel_gt_init_mmio(gt); 453 + if (ret) 454 + goto err_uncore; 455 + } 463 456 464 457 /* As early as possible, scrub existing GPU state before clobbering */ 465 458 sanitize_gpu(dev_priv); ··· 478 451 479 452 err_uncore: 480 453 intel_teardown_mchbar(dev_priv); 481 - intel_uncore_fini_mmio(&dev_priv->uncore); 482 - pci_dev_put(dev_priv->bridge_dev); 483 454 484 455 return ret; 485 456 } ··· 489 464 static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) 490 465 { 491 466 intel_teardown_mchbar(dev_priv); 492 - intel_uncore_fini_mmio(&dev_priv->uncore); 493 - pci_dev_put(dev_priv->bridge_dev); 494 467 } 495 468 496 469 /** ··· 738 715 static void i915_driver_register(struct drm_i915_private *dev_priv) 739 716 { 740 717 struct drm_device *dev = &dev_priv->drm; 718 + struct intel_gt *gt; 719 + unsigned int i; 741 720 742 721 i915_gem_driver_register(dev_priv); 743 722 i915_pmu_register(dev_priv); ··· 759 734 /* Depends on sysfs having been initialized */ 760 735 i915_perf_register(dev_priv); 761 736 762 - intel_gt_driver_register(to_gt(dev_priv)); 737 + for_each_gt(gt, dev_priv, i) 738 + intel_gt_driver_register(gt); 763 739 764 740 intel_display_driver_register(dev_priv); 765 741 ··· 779 753 */ 780 754 static void i915_driver_unregister(struct drm_i915_private *dev_priv) 781 755 { 756 + struct intel_gt *gt; 757 + unsigned int i; 758 + 782 759 i915_switcheroo_unregister(dev_priv); 783 760 784 761 intel_unregister_dsm_handler(); ··· 791 762 792 763 intel_display_driver_unregister(dev_priv); 793 764 794 - intel_gt_driver_unregister(to_gt(dev_priv)); 765 + for_each_gt(gt, dev_priv, i) 766 + intel_gt_driver_unregister(gt); 795 767 796 768 i915_perf_unregister(dev_priv); 797 769 i915_pmu_unregister(dev_priv); ··· 814 784 { 815 785 if (drm_debug_enabled(DRM_UT_DRIVER)) { 816 786 struct drm_printer p = drm_debug_printer("i915 device info:"); 787 + struct intel_gt *gt; 788 + unsigned int i; 817 789 818 790 drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n", 819 791 INTEL_DEVID(dev_priv), ··· 828 796 intel_device_info_print(INTEL_INFO(dev_priv), 829 797 RUNTIME_INFO(dev_priv), &p); 830 798 i915_print_iommu_status(dev_priv, &p); 831 - intel_gt_info_print(&to_gt(dev_priv)->info, &p); 799 + for_each_gt(gt, dev_priv, i) 800 + intel_gt_info_print(&gt->info, &p); 832 801 } 833 802 834 803 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) ··· 1244 1211 struct drm_i915_private *dev_priv = to_i915(dev); 1245 1212 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 1246 1213 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1247 - int ret; 1214 + struct intel_gt *gt; 1215 + int ret, i; 1248 1216 1249 1217 disable_rpm_wakeref_asserts(rpm); 1250 1218 1251 1219 i915_gem_suspend_late(dev_priv); 1252 1220 1253 - intel_uncore_suspend(&dev_priv->uncore); 1221 + for_each_gt(gt, dev_priv, i) 1222 + intel_uncore_suspend(gt->uncore); 1254 1223 1255 1224 intel_power_domains_suspend(dev_priv, 1256 1225 get_suspend_mode(dev_priv, hibernation)); ··· 1384 1349 { 1385 1350 struct drm_i915_private *dev_priv = to_i915(dev); 1386 1351 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 1387 - int ret; 1352 + struct intel_gt *gt; 1353 + int ret, i; 1388 1354 1389 1355 /* 1390 1356 * We have a resume ordering issue with the snd-hda driver also ··· 1439 1403 drm_err(&dev_priv->drm, 1440 1404 "Resume prepare failed: %d, continuing anyway\n", ret); 1441 1405 1442 - intel_uncore_resume_early(&dev_priv->uncore); 1443 - 1444 - intel_gt_check_and_clear_faults(to_gt(dev_priv)); 1406 + for_each_gt(gt, dev_priv, i) { 1407 + intel_uncore_resume_early(gt->uncore); 1408 + intel_gt_check_and_clear_faults(gt); 1409 + } 1445 1410 1446 1411 intel_display_power_resume_early(dev_priv); 1447 1412 ··· 1622 1585 { 1623 1586 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 1624 1587 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1625 - int ret; 1588 + struct intel_gt *gt; 1589 + int ret, i; 1626 1590 1627 1591 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) 1628 1592 return -ENODEV; ··· 1638 1600 */ 1639 1601 i915_gem_runtime_suspend(dev_priv); 1640 1602 1641 - intel_gt_runtime_suspend(to_gt(dev_priv)); 1603 + for_each_gt(gt, dev_priv, i) 1604 + intel_gt_runtime_suspend(gt); 1642 1605 1643 1606 intel_runtime_pm_disable_interrupts(dev_priv); 1644 1607 1645 - intel_uncore_suspend(&dev_priv->uncore); 1608 + for_each_gt(gt, dev_priv, i) 1609 + intel_uncore_suspend(gt->uncore); 1646 1610 1647 1611 intel_display_power_suspend(dev_priv); 1648 1612 ··· 1708 1668 { 1709 1669 struct drm_i915_private *dev_priv = kdev_to_i915(kdev); 1710 1670 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 1711 - int ret; 1671 + struct intel_gt *gt; 1672 + int ret, i; 1712 1673 1713 1674 if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) 1714 1675 return -ENODEV; ··· 1729 1688 1730 1689 ret = vlv_resume_prepare(dev_priv, true); 1731 1690 1732 - intel_uncore_runtime_resume(&dev_priv->uncore); 1691 + for_each_gt(gt, dev_priv, i) 1692 + intel_uncore_runtime_resume(gt->uncore); 1733 1693 1734 1694 intel_runtime_pm_enable_interrupts(dev_priv); 1735 1695 ··· 1738 1696 * No point of rolling back things in case of an error, as the best 1739 1697 * we can do is to hope that things will still work (and disable RPM). 1740 1698 */ 1741 - intel_gt_runtime_resume(to_gt(dev_priv)); 1699 + for_each_gt(gt, dev_priv, i) 1700 + intel_gt_runtime_resume(gt); 1742 1701 1743 1702 /* 1744 1703 * On VLV/CHV display interrupts are part of the display
+5
drivers/gpu/drm/i915/i915_drv.h
··· 497 497 498 498 struct kobject *sysfs_gt; 499 499 500 + /* Quick lookup of media GT (current platforms only have one) */ 501 + struct intel_gt *media_gt; 502 + 500 503 struct { 501 504 struct i915_gem_contexts { 502 505 spinlock_t lock; /* locks list */ ··· 1063 1060 1064 1061 #define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i)) 1065 1062 #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) 1063 + 1064 + #define HAS_EXTRA_GT_LIST(dev_priv) (INTEL_INFO(dev_priv)->extra_gt_list) 1066 1065 1067 1066 /* 1068 1067 * Platform has the dedicated compression control state for each lmem surfaces
+5 -1
drivers/gpu/drm/i915/i915_gem.c
··· 842 842 &to_gt(i915)->ggtt->userfault_list, userfault_link) 843 843 __i915_gem_object_release_mmap_gtt(obj); 844 844 845 + list_for_each_entry_safe(obj, on, 846 + &to_gt(i915)->lmem_userfault_list, userfault_link) 847 + i915_gem_object_runtime_pm_release_mmap_offset(obj); 848 + 845 849 /* 846 850 * The fence will be lost when the device powers down. If any were 847 851 * in use by hardware (i.e. they are pinned), we should not be powering ··· 1176 1172 1177 1173 void i915_gem_driver_remove(struct drm_i915_private *dev_priv) 1178 1174 { 1179 - intel_wakeref_auto_fini(&to_gt(dev_priv)->ggtt->userfault_wakeref); 1175 + intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref); 1180 1176 1181 1177 i915_gem_suspend_late(dev_priv); 1182 1178 intel_gt_driver_remove(to_gt(dev_priv));
+2 -2
drivers/gpu/drm/i915/i915_irq.c
··· 1104 1104 1105 1105 out: 1106 1106 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); 1107 - spin_lock_irq(&gt->irq_lock); 1107 + spin_lock_irq(gt->irq_lock); 1108 1108 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); 1109 - spin_unlock_irq(&gt->irq_lock); 1109 + spin_unlock_irq(gt->irq_lock); 1110 1110 1111 1111 mutex_unlock(&dev_priv->drm.struct_mutex); 1112 1112 }
+14
drivers/gpu/drm/i915/i915_pci.c
··· 26 26 #include <drm/drm_drv.h> 27 27 #include <drm/i915_pciids.h> 28 28 29 + #include "gt/intel_gt_regs.h" 30 + #include "gt/intel_sa_media.h" 31 + 29 32 #include "i915_driver.h" 30 33 #include "i915_drv.h" 31 34 #include "i915_pci.h" ··· 1118 1115 .display.has_cdclk_crawl = 1, \ 1119 1116 .__runtime.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B) 1120 1117 1118 + static const struct intel_gt_definition xelpmp_extra_gt[] = { 1119 + { 1120 + .type = GT_MEDIA, 1121 + .name = "Standalone Media GT", 1122 + .gsi_offset = MTL_MEDIA_GSI_BASE, 1123 + .engine_mask = BIT(VECS0) | BIT(VCS0) | BIT(VCS2), 1124 + }, 1125 + {} 1126 + }; 1127 + 1121 1128 __maybe_unused 1122 1129 static const struct intel_device_info mtl_info = { 1123 1130 XE_HP_FEATURES, ··· 1141 1128 .media.ver = 13, 1142 1129 PLATFORM(INTEL_METEORLAKE), 1143 1130 .display.has_modular_fia = 1, 1131 + .extra_gt_list = xelpmp_extra_gt, 1144 1132 .has_flat_ccs = 0, 1145 1133 .has_snoop = 1, 1146 1134 .__runtime.memory_regions = REGION_SMEM | REGION_STOLEN_LMEM,
+8 -8
drivers/gpu/drm/i915/i915_reg.h
··· 1857 1857 1858 1858 #define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8) 1859 1859 #define GT0_PERF_LIMIT_REASONS_MASK 0xde3 1860 - #define PROCHOT_MASK REG_BIT(1) 1861 - #define THERMAL_LIMIT_MASK REG_BIT(2) 1862 - #define RATL_MASK REG_BIT(6) 1863 - #define VR_THERMALERT_MASK REG_BIT(7) 1864 - #define VR_TDC_MASK REG_BIT(8) 1865 - #define POWER_LIMIT_4_MASK REG_BIT(9) 1866 - #define POWER_LIMIT_1_MASK REG_BIT(11) 1867 - #define POWER_LIMIT_2_MASK REG_BIT(12) 1860 + #define PROCHOT_MASK REG_BIT(0) 1861 + #define THERMAL_LIMIT_MASK REG_BIT(1) 1862 + #define RATL_MASK REG_BIT(5) 1863 + #define VR_THERMALERT_MASK REG_BIT(6) 1864 + #define VR_TDC_MASK REG_BIT(7) 1865 + #define POWER_LIMIT_4_MASK REG_BIT(8) 1866 + #define POWER_LIMIT_1_MASK REG_BIT(10) 1867 + #define POWER_LIMIT_2_MASK REG_BIT(11) 1868 1868 1869 1869 #define CHV_CLK_CTL1 _MMIO(0x101100) 1870 1870 #define VLV_CLK_CTL2 _MMIO(0x101104)
+3
drivers/gpu/drm/i915/intel_device_info.h
··· 37 37 38 38 struct drm_printer; 39 39 struct drm_i915_private; 40 + struct intel_gt_definition; 40 41 41 42 /* Keep in gen based order, and chronological order within a gen */ 42 43 enum intel_platform { ··· 252 251 enum intel_platform platform; 253 252 254 253 unsigned int dma_mask_size; /* available DMA address bits */ 254 + 255 + const struct intel_gt_definition *extra_gt_list; 255 256 256 257 u8 gt; /* GT number, 0 if undefined */ 257 258
+2 -3
drivers/gpu/drm/i915/intel_pm.c
··· 7614 7614 7615 7615 static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv) 7616 7616 { 7617 - /* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */ 7618 - if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) || 7619 - IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv)) 7617 + /* Wa_1409120013 */ 7618 + if (DISPLAY_VER(dev_priv) == 12) 7620 7619 intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), 7621 7620 DPFC_CHICKEN_COMP_DUMMY_PIXEL); 7622 7621
+59 -35
drivers/gpu/drm/i915/intel_uncore.c
··· 21 21 * IN THE SOFTWARE. 22 22 */ 23 23 24 + #include <drm/drm_managed.h> 24 25 #include <linux/pm_runtime.h> 25 26 26 27 #include "gt/intel_engine_regs.h" ··· 45 44 } 46 45 47 46 void 48 - intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug) 47 + intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915) 49 48 { 50 - spin_lock_init(&mmio_debug->lock); 51 - mmio_debug->unclaimed_mmio_check = 1; 49 + spin_lock_init(&i915->mmio_debug.lock); 50 + i915->mmio_debug.unclaimed_mmio_check = 1; 51 + 52 + i915->uncore.debug = &i915->mmio_debug; 52 53 } 53 54 54 - static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug) 55 + static void mmio_debug_suspend(struct intel_uncore *uncore) 55 56 { 56 - lockdep_assert_held(&mmio_debug->lock); 57 + if (!uncore->debug) 58 + return; 59 + 60 + spin_lock(&uncore->debug->lock); 57 61 58 62 /* Save and disable mmio debugging for the user bypass */ 59 - if (!mmio_debug->suspend_count++) { 60 - mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check; 61 - mmio_debug->unclaimed_mmio_check = 0; 63 + if (!uncore->debug->suspend_count++) { 64 + uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check; 65 + uncore->debug->unclaimed_mmio_check = 0; 62 66 } 67 + 68 + spin_unlock(&uncore->debug->lock); 63 69 } 64 70 65 - static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug) 66 - { 67 - lockdep_assert_held(&mmio_debug->lock); 71 + static bool check_for_unclaimed_mmio(struct intel_uncore *uncore); 68 72 69 - if (!--mmio_debug->suspend_count) 70 - mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check; 73 + static void mmio_debug_resume(struct intel_uncore *uncore) 74 + { 75 + if (!uncore->debug) 76 + return; 77 + 78 + spin_lock(&uncore->debug->lock); 79 + 80 + if (!--uncore->debug->suspend_count) 81 + uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check; 82 + 83 + if (check_for_unclaimed_mmio(uncore)) 84 + drm_info(&uncore->i915->drm, 85 + "Invalid mmio detected during user access\n"); 86 + 87 + spin_unlock(&uncore->debug->lock); 71 88 } 72 89 73 90 static const char * const forcewake_domain_names[] = { ··· 696 677 spin_lock_irq(&uncore->lock); 697 678 if (!uncore->user_forcewake_count++) { 698 679 intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL); 699 - spin_lock(&uncore->debug->lock); 700 - mmio_debug_suspend(uncore->debug); 701 - spin_unlock(&uncore->debug->lock); 680 + mmio_debug_suspend(uncore); 702 681 } 703 682 spin_unlock_irq(&uncore->lock); 704 683 } ··· 712 695 { 713 696 spin_lock_irq(&uncore->lock); 714 697 if (!--uncore->user_forcewake_count) { 715 - spin_lock(&uncore->debug->lock); 716 - mmio_debug_resume(uncore->debug); 717 - 718 - if (check_for_unclaimed_mmio(uncore)) 719 - drm_info(&uncore->i915->drm, 720 - "Invalid mmio detected during user access\n"); 721 - spin_unlock(&uncore->debug->lock); 722 - 698 + mmio_debug_resume(uncore); 723 699 intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL); 724 700 } 725 701 spin_unlock_irq(&uncore->lock); ··· 928 918 { 929 919 const struct intel_forcewake_range *entry; 930 920 921 + if (IS_GSI_REG(offset)) 922 + offset += uncore->gsi_offset; 923 + 931 924 entry = BSEARCH(offset, 932 925 uncore->fw_domains_table, 933 926 uncore->fw_domains_table_entries, ··· 1145 1132 { 1146 1133 if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table)) 1147 1134 return false; 1135 + 1136 + if (IS_GSI_REG(offset)) 1137 + offset += uncore->gsi_offset; 1148 1138 1149 1139 return BSEARCH(offset, 1150 1140 uncore->shadowed_reg_table, ··· 1720 1704 const bool read, 1721 1705 const bool before) 1722 1706 { 1723 - if (likely(!uncore->i915->params.mmio_debug)) 1707 + if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug) 1724 1708 return; 1725 1709 1726 1710 /* interrupts are disabled and re-enabled around uncore->lock usage */ ··· 2001 1985 2002 1986 d->uncore = uncore; 2003 1987 d->wake_count = 0; 2004 - d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); 2005 - d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); 1988 + d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset; 1989 + d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset; 2006 1990 2007 1991 d->id = domain_id; 2008 1992 ··· 2239 2223 return NOTIFY_OK; 2240 2224 } 2241 2225 2226 + static void uncore_unmap_mmio(struct drm_device *drm, void *regs) 2227 + { 2228 + iounmap(regs); 2229 + } 2230 + 2242 2231 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr) 2243 2232 { 2244 2233 struct drm_i915_private *i915 = uncore->i915; ··· 2272 2251 return -EIO; 2273 2252 } 2274 2253 2275 - return 0; 2276 - } 2277 - 2278 - void intel_uncore_cleanup_mmio(struct intel_uncore *uncore) 2279 - { 2280 - iounmap(uncore->regs); 2254 + return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs); 2281 2255 } 2282 2256 2283 2257 void intel_uncore_init_early(struct intel_uncore *uncore, ··· 2282 2266 uncore->i915 = gt->i915; 2283 2267 uncore->gt = gt; 2284 2268 uncore->rpm = &gt->i915->runtime_pm; 2285 - uncore->debug = &gt->i915->mmio_debug; 2286 2269 } 2287 2270 2288 2271 static void uncore_raw_init(struct intel_uncore *uncore) ··· 2461 2446 } 2462 2447 } 2463 2448 2464 - void intel_uncore_fini_mmio(struct intel_uncore *uncore) 2449 + /* Called via drm-managed action */ 2450 + void intel_uncore_fini_mmio(struct drm_device *dev, void *data) 2465 2451 { 2452 + struct intel_uncore *uncore = data; 2453 + 2466 2454 if (intel_uncore_has_forcewake(uncore)) { 2467 2455 iosf_mbi_punit_acquire(); 2468 2456 iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( ··· 2595 2577 { 2596 2578 bool ret; 2597 2579 2580 + if (!uncore->debug) 2581 + return false; 2582 + 2598 2583 spin_lock_irq(&uncore->debug->lock); 2599 2584 ret = check_for_unclaimed_mmio(uncore); 2600 2585 spin_unlock_irq(&uncore->debug->lock); ··· 2609 2588 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) 2610 2589 { 2611 2590 bool ret = false; 2591 + 2592 + if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug)) 2593 + return false; 2612 2594 2613 2595 spin_lock_irq(&uncore->debug->lock); 2614 2596
+35 -5
drivers/gpu/drm/i915/intel_uncore.h
··· 33 33 34 34 #include "i915_reg_defs.h" 35 35 36 + struct drm_device; 36 37 struct drm_i915_private; 37 38 struct intel_runtime_pm; 38 39 struct intel_uncore; ··· 136 135 137 136 spinlock_t lock; /** lock is also taken in irq contexts. */ 138 137 138 + /* 139 + * Do we need to apply an additional offset to reach the beginning 140 + * of the basic non-engine GT registers (referred to as "GSI" on 141 + * newer platforms, or "GT block" on older platforms)? If so, we'll 142 + * track that here and apply it transparently to registers in the 143 + * appropriate range to maintain compatibility with our existing 144 + * register definitions and GT code. 145 + */ 146 + u32 gsi_offset; 147 + 139 148 unsigned int flags; 140 149 #define UNCORE_HAS_FORCEWAKE BIT(0) 141 150 #define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1) ··· 221 210 return uncore->flags & UNCORE_HAS_FIFO; 222 211 } 223 212 224 - void 225 - intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug); 213 + void intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915); 226 214 void intel_uncore_init_early(struct intel_uncore *uncore, 227 215 struct intel_gt *gt); 228 216 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr); ··· 231 221 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); 232 222 bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore); 233 223 void intel_uncore_cleanup_mmio(struct intel_uncore *uncore); 234 - void intel_uncore_fini_mmio(struct intel_uncore *uncore); 224 + void intel_uncore_fini_mmio(struct drm_device *dev, void *data); 235 225 void intel_uncore_suspend(struct intel_uncore *uncore); 236 226 void intel_uncore_resume_early(struct intel_uncore *uncore); 237 227 void intel_uncore_runtime_resume(struct intel_uncore *uncore); ··· 304 294 2, timeout_ms, NULL); 305 295 } 306 296 297 + #define IS_GSI_REG(reg) ((reg) < 0x40000) 298 + 307 299 /* register access functions */ 308 300 #define __raw_read(x__, s__) \ 309 301 static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \ 310 302 i915_reg_t reg) \ 311 303 { \ 312 - return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \ 304 + u32 offset = i915_mmio_reg_offset(reg); \ 305 + if (IS_GSI_REG(offset)) \ 306 + offset += uncore->gsi_offset; \ 307 + return read##s__(uncore->regs + offset); \ 313 308 } 314 309 315 310 #define __raw_write(x__, s__) \ 316 311 static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \ 317 312 i915_reg_t reg, u##x__ val) \ 318 313 { \ 319 - write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \ 314 + u32 offset = i915_mmio_reg_offset(reg); \ 315 + if (IS_GSI_REG(offset)) \ 316 + offset += uncore->gsi_offset; \ 317 + write##s__(val, uncore->regs + offset); \ 320 318 } 321 319 __raw_read(8, b) 322 320 __raw_read(16, w) ··· 465 447 return (reg_val & mask) != expected_val ? -EINVAL : 0; 466 448 } 467 449 450 + /* 451 + * The raw_reg_{read,write} macros are intended as a micro-optimization for 452 + * interrupt handlers so that the pointer indirection on uncore->regs can 453 + * be computed once (and presumably cached in a register) instead of generating 454 + * extra load instructions for each MMIO access. 455 + * 456 + * Given that these macros are only intended for non-GSI interrupt registers 457 + * (and the goal is to avoid extra instructions generated by the compiler), 458 + * these macros do not account for uncore->gsi_offset. Any caller that needs 459 + * to use these macros on a GSI register is responsible for adding the 460 + * appropriate GSI offset to the 'base' parameter. 461 + */ 468 462 #define raw_reg_read(base, reg) \ 469 463 readl(base + i915_mmio_reg_offset(reg)) 470 464 #define raw_reg_write(base, reg, value) \
+2 -2
drivers/gpu/drm/i915/pxp/intel_pxp.c
··· 169 169 * We want to get the same effect as if we received a termination 170 170 * interrupt, so just pretend that we did. 171 171 */ 172 - spin_lock_irq(&gt->irq_lock); 172 + spin_lock_irq(gt->irq_lock); 173 173 intel_pxp_mark_termination_in_progress(pxp); 174 174 pxp->session_events |= PXP_TERMINATION_REQUEST; 175 175 queue_work(system_unbound_wq, &pxp->session_work); 176 - spin_unlock_irq(&gt->irq_lock); 176 + spin_unlock_irq(gt->irq_lock); 177 177 } 178 178 179 179 static bool pxp_component_bound(struct intel_pxp *pxp)
+2 -2
drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
··· 47 47 return -ENODEV; 48 48 49 49 /* simulate a termination interrupt */ 50 - spin_lock_irq(&gt->irq_lock); 50 + spin_lock_irq(gt->irq_lock); 51 51 intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT); 52 - spin_unlock_irq(&gt->irq_lock); 52 + spin_unlock_irq(gt->irq_lock); 53 53 54 54 if (!wait_for_completion_timeout(&pxp->termination, 55 55 msecs_to_jiffies(100)))
+7 -7
drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
··· 25 25 if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp))) 26 26 return; 27 27 28 - lockdep_assert_held(&gt->irq_lock); 28 + lockdep_assert_held(gt->irq_lock); 29 29 30 30 if (unlikely(!iir)) 31 31 return; ··· 55 55 56 56 static inline void pxp_irq_reset(struct intel_gt *gt) 57 57 { 58 - spin_lock_irq(&gt->irq_lock); 58 + spin_lock_irq(gt->irq_lock); 59 59 gen11_gt_reset_one_iir(gt, 0, GEN11_KCR); 60 - spin_unlock_irq(&gt->irq_lock); 60 + spin_unlock_irq(gt->irq_lock); 61 61 } 62 62 63 63 void intel_pxp_irq_enable(struct intel_pxp *pxp) 64 64 { 65 65 struct intel_gt *gt = pxp_to_gt(pxp); 66 66 67 - spin_lock_irq(&gt->irq_lock); 67 + spin_lock_irq(gt->irq_lock); 68 68 69 69 if (!pxp->irq_enabled) 70 70 WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR)); ··· 72 72 __pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS); 73 73 pxp->irq_enabled = true; 74 74 75 - spin_unlock_irq(&gt->irq_lock); 75 + spin_unlock_irq(gt->irq_lock); 76 76 } 77 77 78 78 void intel_pxp_irq_disable(struct intel_pxp *pxp) ··· 88 88 */ 89 89 GEM_WARN_ON(intel_pxp_is_active(pxp)); 90 90 91 - spin_lock_irq(&gt->irq_lock); 91 + spin_lock_irq(gt->irq_lock); 92 92 93 93 pxp->irq_enabled = false; 94 94 __pxp_set_interrupts(gt, 0); 95 95 96 - spin_unlock_irq(&gt->irq_lock); 96 + spin_unlock_irq(gt->irq_lock); 97 97 intel_synchronize_irq(gt->i915); 98 98 99 99 pxp_irq_reset(gt);
+2 -2
drivers/gpu/drm/i915/pxp/intel_pxp_session.c
··· 144 144 intel_wakeref_t wakeref; 145 145 u32 events = 0; 146 146 147 - spin_lock_irq(&gt->irq_lock); 147 + spin_lock_irq(gt->irq_lock); 148 148 events = fetch_and_zero(&pxp->session_events); 149 - spin_unlock_irq(&gt->irq_lock); 149 + spin_unlock_irq(gt->irq_lock); 150 150 151 151 if (!events) 152 152 return;
+1
drivers/gpu/drm/i915/selftests/mock_gem_device.c
··· 115 115 static void mock_gt_probe(struct drm_i915_private *i915) 116 116 { 117 117 i915->gt[0] = &i915->gt0; 118 + i915->gt[0]->name = "Mock GT"; 118 119 } 119 120 120 121 struct drm_i915_private *mock_gem_device(void)
+71 -35
drivers/misc/mei/bus-fixup.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copyright (c) 2013-2020, Intel Corporation. All rights reserved. 3 + * Copyright (c) 2013-2022, Intel Corporation. All rights reserved. 4 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 5 */ 6 6 ··· 15 15 16 16 #include "mei_dev.h" 17 17 #include "client.h" 18 + #include "mkhi.h" 18 19 19 20 #define MEI_UUID_NFC_INFO UUID_LE(0xd2de1625, 0x382d, 0x417d, \ 20 21 0x48, 0xa4, 0xef, 0xab, 0xba, 0x8a, 0x12, 0x06) ··· 90 89 u8 reserved2; 91 90 } __packed; 92 91 93 - #define MKHI_FEATURE_PTT 0x10 94 - 95 - struct mkhi_rule_id { 96 - __le16 rule_type; 97 - u8 feature_id; 98 - u8 reserved; 99 - } __packed; 100 - 101 - struct mkhi_fwcaps { 102 - struct mkhi_rule_id id; 103 - u8 len; 104 - u8 data[]; 105 - } __packed; 106 - 107 92 struct mkhi_fw_ver_block { 108 93 u16 minor; 109 94 u8 major; ··· 100 113 101 114 struct mkhi_fw_ver { 102 115 struct mkhi_fw_ver_block ver[MEI_MAX_FW_VER_BLOCKS]; 103 - } __packed; 104 - 105 - #define MKHI_FWCAPS_GROUP_ID 0x3 106 - #define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6 107 - #define MKHI_GEN_GROUP_ID 0xFF 108 - #define MKHI_GEN_GET_FW_VERSION_CMD 0x2 109 - struct mkhi_msg_hdr { 110 - u8 group_id; 111 - u8 command; 112 - u8 reserved; 113 - u8 result; 114 - } __packed; 115 - 116 - struct mkhi_msg { 117 - struct mkhi_msg_hdr hdr; 118 - u8 data[]; 119 116 } __packed; 120 117 121 118 #define MKHI_OSVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \ ··· 135 164 sizeof(struct mkhi_fw_ver)) 136 165 #define MKHI_FWVER_LEN(__num) (sizeof(struct mkhi_msg_hdr) + \ 137 166 sizeof(struct mkhi_fw_ver_block) * (__num)) 138 - #define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */ 139 167 static int mei_fwver(struct mei_cl_device *cldev) 140 168 { 141 169 char buf[MKHI_FWVER_BUF_LEN]; ··· 157 187 158 188 ret = 0; 159 189 bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), NULL, 0, 160 - MKHI_RCV_TIMEOUT); 190 + cldev->bus->timeouts.mkhi_recv); 161 191 if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) { 162 192 /* 163 193 * Should be at least one version block, ··· 186 216 } 187 217 188 218 return ret; 219 + } 220 + 221 + static int mei_gfx_memory_ready(struct mei_cl_device *cldev) 222 + { 223 + struct mkhi_gfx_mem_ready req = {0}; 224 + unsigned int mode = MEI_CL_IO_TX_INTERNAL; 225 + 226 + req.hdr.group_id = MKHI_GROUP_ID_GFX; 227 + req.hdr.command = MKHI_GFX_MEMORY_READY_CMD_REQ; 228 + req.flags = MKHI_GFX_MEM_READY_PXP_ALLOWED; 229 + 230 + dev_dbg(&cldev->dev, "Sending memory ready command\n"); 231 + return __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0, mode); 189 232 } 190 233 191 234 static void mei_mkhi_fix(struct mei_cl_device *cldev) ··· 247 264 dev_err(&cldev->dev, "FW version command failed %d\n", ret); 248 265 mei_cldev_disable(cldev); 249 266 } 267 + 268 + static void mei_gsc_mkhi_fix_ver(struct mei_cl_device *cldev) 269 + { 270 + int ret; 271 + 272 + /* No need to enable the client if nothing is needed from it */ 273 + if (!cldev->bus->fw_f_fw_ver_supported && 274 + cldev->bus->pxp_mode != MEI_DEV_PXP_INIT) 275 + return; 276 + 277 + ret = mei_cldev_enable(cldev); 278 + if (ret) 279 + return; 280 + 281 + if (cldev->bus->pxp_mode == MEI_DEV_PXP_INIT) { 282 + ret = mei_gfx_memory_ready(cldev); 283 + if (ret < 0) 284 + dev_err(&cldev->dev, "memory ready command failed %d\n", ret); 285 + else 286 + dev_dbg(&cldev->dev, "memory ready command sent\n"); 287 + /* we go to reset after that */ 288 + cldev->bus->pxp_mode = MEI_DEV_PXP_SETUP; 289 + goto out; 290 + } 291 + 292 + ret = mei_fwver(cldev); 293 + if (ret < 0) 294 + dev_err(&cldev->dev, "FW version command failed %d\n", 295 + ret); 296 + out: 297 + mei_cldev_disable(cldev); 298 + } 299 + 250 300 /** 251 301 * mei_wd - wd client on the bus, change protocol version 252 302 * as the API has changed. ··· 519 503 cldev->do_match = 1; 520 504 } 521 505 506 + /** 507 + * pxp_is_ready - enable bus client if pxp is ready 508 + * 509 + * @cldev: me clients device 510 + */ 511 + static void pxp_is_ready(struct mei_cl_device *cldev) 512 + { 513 + struct mei_device *bus = cldev->bus; 514 + 515 + switch (bus->pxp_mode) { 516 + case MEI_DEV_PXP_READY: 517 + case MEI_DEV_PXP_DEFAULT: 518 + cldev->do_match = 1; 519 + break; 520 + default: 521 + cldev->do_match = 0; 522 + break; 523 + } 524 + } 525 + 522 526 #define MEI_FIXUP(_uuid, _hook) { _uuid, _hook } 523 527 524 528 static struct mei_fixup { ··· 552 516 MEI_FIXUP(MEI_UUID_WD, mei_wd), 553 517 MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix), 554 518 MEI_FIXUP(MEI_UUID_IGSC_MKHI, mei_gsc_mkhi_ver), 555 - MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_ver), 519 + MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_fix_ver), 556 520 MEI_FIXUP(MEI_UUID_HDCP, whitelist), 557 521 MEI_FIXUP(MEI_UUID_ANY, vt_support), 558 - MEI_FIXUP(MEI_UUID_PAVP, whitelist), 522 + MEI_FIXUP(MEI_UUID_PAVP, pxp_is_ready), 559 523 }; 560 524 561 525 /**
+8 -8
drivers/misc/mei/client.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. 3 + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. 4 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 5 */ 6 6 ··· 870 870 } 871 871 872 872 list_move_tail(&cb->list, &dev->ctrl_rd_list); 873 - cl->timer_count = MEI_CONNECT_TIMEOUT; 873 + cl->timer_count = dev->timeouts.connect; 874 874 mei_schedule_stall_timer(dev); 875 875 876 876 return 0; ··· 945 945 wait_event_timeout(cl->wait, 946 946 cl->state == MEI_FILE_DISCONNECT_REPLY || 947 947 cl->state == MEI_FILE_DISCONNECTED, 948 - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 948 + dev->timeouts.cl_connect); 949 949 mutex_lock(&dev->device_lock); 950 950 951 951 rets = cl->status; ··· 1065 1065 } 1066 1066 1067 1067 list_move_tail(&cb->list, &dev->ctrl_rd_list); 1068 - cl->timer_count = MEI_CONNECT_TIMEOUT; 1068 + cl->timer_count = dev->timeouts.connect; 1069 1069 mei_schedule_stall_timer(dev); 1070 1070 return 0; 1071 1071 } ··· 1164 1164 cl->state == MEI_FILE_DISCONNECTED || 1165 1165 cl->state == MEI_FILE_DISCONNECT_REQUIRED || 1166 1166 cl->state == MEI_FILE_DISCONNECT_REPLY), 1167 - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1167 + dev->timeouts.cl_connect); 1168 1168 mutex_lock(&dev->device_lock); 1169 1169 1170 1170 if (!mei_cl_is_connected(cl)) { ··· 1562 1562 cl->notify_en == request || 1563 1563 cl->status || 1564 1564 !mei_cl_is_connected(cl), 1565 - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 1565 + dev->timeouts.cl_connect); 1566 1566 mutex_lock(&dev->device_lock); 1567 1567 1568 1568 if (cl->notify_en != request && !cl->status) ··· 2336 2336 mutex_unlock(&dev->device_lock); 2337 2337 wait_event_timeout(cl->wait, 2338 2338 cl->dma_mapped || cl->status, 2339 - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 2339 + dev->timeouts.cl_connect); 2340 2340 mutex_lock(&dev->device_lock); 2341 2341 2342 2342 if (!cl->dma_mapped && !cl->status) ··· 2415 2415 mutex_unlock(&dev->device_lock); 2416 2416 wait_event_timeout(cl->wait, 2417 2417 !cl->dma_mapped || cl->status, 2418 - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 2418 + dev->timeouts.cl_connect); 2419 2419 mutex_lock(&dev->device_lock); 2420 2420 2421 2421 if (cl->dma_mapped && !cl->status)
+18 -1
drivers/misc/mei/debugfs.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copyright (c) 2012-2016, Intel Corporation. All rights reserved 3 + * Copyright (c) 2012-2022, Intel Corporation. All rights reserved 4 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 5 */ 6 6 ··· 86 86 } 87 87 DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_active); 88 88 89 + static const char *mei_dev_pxp_mode_str(enum mei_dev_pxp_mode state) 90 + { 91 + #define MEI_PXP_MODE(state) case MEI_DEV_PXP_##state: return #state 92 + switch (state) { 93 + MEI_PXP_MODE(DEFAULT); 94 + MEI_PXP_MODE(INIT); 95 + MEI_PXP_MODE(SETUP); 96 + MEI_PXP_MODE(READY); 97 + default: 98 + return "unknown"; 99 + } 100 + #undef MEI_PXP_MODE 101 + } 102 + 89 103 static int mei_dbgfs_devstate_show(struct seq_file *m, void *unused) 90 104 { 91 105 struct mei_device *dev = m->private; ··· 126 112 seq_printf(m, "pg: %s, %s\n", 127 113 mei_pg_is_enabled(dev) ? "ENABLED" : "DISABLED", 128 114 mei_pg_state_str(mei_pg_state(dev))); 115 + 116 + seq_printf(m, "pxp: %s\n", mei_dev_pxp_mode_str(dev->pxp_mode)); 117 + 129 118 return 0; 130 119 } 131 120 DEFINE_SHOW_ATTRIBUTE(mei_dbgfs_devstate);
+67 -10
drivers/misc/mei/gsc-me.c
··· 13 13 #include <linux/ktime.h> 14 14 #include <linux/delay.h> 15 15 #include <linux/pm_runtime.h> 16 + #include <linux/kthread.h> 16 17 17 18 #include "mei_dev.h" 18 19 #include "hw-me.h" ··· 32 31 return 0; 33 32 } 34 33 34 + static void mei_gsc_set_ext_op_mem(const struct mei_me_hw *hw, struct resource *mem) 35 + { 36 + u32 low = lower_32_bits(mem->start); 37 + u32 hi = upper_32_bits(mem->start); 38 + u32 limit = (resource_size(mem) / SZ_4K) | GSC_EXT_OP_MEM_VALID; 39 + 40 + iowrite32(low, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG); 41 + iowrite32(hi, hw->mem_addr + H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG); 42 + iowrite32(limit, hw->mem_addr + H_GSC_EXT_OP_MEM_LIMIT_REG); 43 + } 44 + 35 45 static int mei_gsc_probe(struct auxiliary_device *aux_dev, 36 46 const struct auxiliary_device_id *aux_dev_id) 37 47 { ··· 59 47 60 48 device = &aux_dev->dev; 61 49 62 - dev = mei_me_dev_init(device, cfg); 50 + dev = mei_me_dev_init(device, cfg, adev->slow_firmware); 63 51 if (!dev) { 64 52 ret = -ENOMEM; 65 53 goto err; ··· 78 66 79 67 dev_set_drvdata(device, dev); 80 68 81 - ret = devm_request_threaded_irq(device, hw->irq, 82 - mei_me_irq_quick_handler, 83 - mei_me_irq_thread_handler, 84 - IRQF_ONESHOT, KBUILD_MODNAME, dev); 85 - if (ret) { 86 - dev_err(device, "irq register failed %d\n", ret); 87 - goto err; 69 + if (adev->ext_op_mem.start) { 70 + mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem); 71 + dev->pxp_mode = MEI_DEV_PXP_INIT; 72 + } 73 + 74 + /* use polling */ 75 + if (mei_me_hw_use_polling(hw)) { 76 + mei_disable_interrupts(dev); 77 + mei_clear_interrupts(dev); 78 + init_waitqueue_head(&hw->wait_active); 79 + hw->is_active = true; /* start in active mode for initialization */ 80 + hw->polling_thread = kthread_run(mei_me_polling_thread, dev, 81 + "kmegscirqd/%s", dev_name(device)); 82 + if (IS_ERR(hw->polling_thread)) { 83 + ret = PTR_ERR(hw->polling_thread); 84 + dev_err(device, "unable to create kernel thread: %d\n", ret); 85 + goto err; 86 + } 87 + } else { 88 + ret = devm_request_threaded_irq(device, hw->irq, 89 + mei_me_irq_quick_handler, 90 + mei_me_irq_thread_handler, 91 + IRQF_ONESHOT, KBUILD_MODNAME, dev); 92 + if (ret) { 93 + dev_err(device, "irq register failed %d\n", ret); 94 + goto err; 95 + } 88 96 } 89 97 90 98 pm_runtime_get_noresume(device); ··· 130 98 131 99 register_err: 132 100 mei_stop(dev); 133 - devm_free_irq(device, hw->irq, dev); 101 + if (!mei_me_hw_use_polling(hw)) 102 + devm_free_irq(device, hw->irq, dev); 134 103 135 104 err: 136 105 dev_err(device, "probe failed: %d\n", ret); ··· 152 119 153 120 mei_stop(dev); 154 121 122 + hw = to_me_hw(dev); 123 + if (mei_me_hw_use_polling(hw)) 124 + kthread_stop(hw->polling_thread); 125 + 155 126 mei_deregister(dev); 156 127 157 128 pm_runtime_disable(&aux_dev->dev); 158 129 159 130 mei_disable_interrupts(dev); 160 - devm_free_irq(&aux_dev->dev, hw->irq, dev); 131 + if (!mei_me_hw_use_polling(hw)) 132 + devm_free_irq(&aux_dev->dev, hw->irq, dev); 161 133 } 162 134 163 135 static int __maybe_unused mei_gsc_pm_suspend(struct device *device) ··· 182 144 static int __maybe_unused mei_gsc_pm_resume(struct device *device) 183 145 { 184 146 struct mei_device *dev = dev_get_drvdata(device); 147 + struct auxiliary_device *aux_dev; 148 + struct mei_aux_device *adev; 185 149 int err; 150 + struct mei_me_hw *hw; 186 151 187 152 if (!dev) 188 153 return -ENODEV; 154 + 155 + hw = to_me_hw(dev); 156 + aux_dev = to_auxiliary_dev(device); 157 + adev = auxiliary_dev_to_mei_aux_dev(aux_dev); 158 + if (adev->ext_op_mem.start) { 159 + mei_gsc_set_ext_op_mem(hw, &adev->ext_op_mem); 160 + dev->pxp_mode = MEI_DEV_PXP_INIT; 161 + } 189 162 190 163 err = mei_restart(dev); 191 164 if (err) ··· 234 185 if (mei_write_is_idle(dev)) { 235 186 hw = to_me_hw(dev); 236 187 hw->pg_state = MEI_PG_ON; 188 + 189 + if (mei_me_hw_use_polling(hw)) 190 + hw->is_active = false; 237 191 ret = 0; 238 192 } else { 239 193 ret = -EAGAIN; ··· 260 208 261 209 hw = to_me_hw(dev); 262 210 hw->pg_state = MEI_PG_OFF; 211 + 212 + if (mei_me_hw_use_polling(hw)) { 213 + hw->is_active = true; 214 + wake_up(&hw->wait_active); 215 + } 263 216 264 217 mutex_unlock(&dev->device_lock); 265 218
+7 -7
drivers/misc/mei/hbm.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. 3 + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. 4 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 5 */ 6 6 #include <linux/export.h> ··· 232 232 mutex_unlock(&dev->device_lock); 233 233 ret = wait_event_timeout(dev->wait_hbm_start, 234 234 dev->hbm_state != MEI_HBM_STARTING, 235 - mei_secs_to_jiffies(MEI_HBM_TIMEOUT)); 235 + dev->timeouts.hbm); 236 236 mutex_lock(&dev->device_lock); 237 237 238 238 if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) { ··· 275 275 } 276 276 277 277 dev->hbm_state = MEI_HBM_STARTING; 278 - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; 278 + dev->init_clients_timer = dev->timeouts.client_init; 279 279 mei_schedule_stall_timer(dev); 280 280 return 0; 281 281 } ··· 316 316 } 317 317 318 318 dev->hbm_state = MEI_HBM_DR_SETUP; 319 - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; 319 + dev->init_clients_timer = dev->timeouts.client_init; 320 320 mei_schedule_stall_timer(dev); 321 321 return 0; 322 322 } ··· 351 351 } 352 352 353 353 dev->hbm_state = MEI_HBM_CAP_SETUP; 354 - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; 354 + dev->init_clients_timer = dev->timeouts.client_init; 355 355 mei_schedule_stall_timer(dev); 356 356 return 0; 357 357 } ··· 385 385 return ret; 386 386 } 387 387 dev->hbm_state = MEI_HBM_ENUM_CLIENTS; 388 - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; 388 + dev->init_clients_timer = dev->timeouts.client_init; 389 389 mei_schedule_stall_timer(dev); 390 390 return 0; 391 391 } ··· 751 751 return ret; 752 752 } 753 753 754 - dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT; 754 + dev->init_clients_timer = dev->timeouts.client_init; 755 755 mei_schedule_stall_timer(dev); 756 756 757 757 return 0;
+8 -1
drivers/misc/mei/hw-me-regs.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 2 /* 3 - * Copyright (c) 2003-2019, Intel Corporation. All rights reserved. 3 + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. 4 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 5 */ 6 6 #ifndef _MEI_HW_MEI_REGS_H_ ··· 127 127 # define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060 128 128 #define PCI_CFG_HFS_4 0x64 129 129 #define PCI_CFG_HFS_5 0x68 130 + # define GSC_CFG_HFS_5_BOOT_TYPE_MSK 0x00000003 131 + # define GSC_CFG_HFS_5_BOOT_TYPE_PXP 3 130 132 #define PCI_CFG_HFS_6 0x6C 131 133 132 134 /* MEI registers */ ··· 144 142 #define H_HPG_CSR 0x10 145 143 /* H_D0I3C - D0I3 Control */ 146 144 #define H_D0I3C 0x800 145 + 146 + #define H_GSC_EXT_OP_MEM_BASE_ADDR_LO_REG 0x100 147 + #define H_GSC_EXT_OP_MEM_BASE_ADDR_HI_REG 0x104 148 + #define H_GSC_EXT_OP_MEM_LIMIT_REG 0x108 149 + #define GSC_EXT_OP_MEM_VALID BIT(31) 147 150 148 151 /* register bits of H_CSR (Host Control Status register) */ 149 152 /* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
+118 -20
drivers/misc/mei/hw-me.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. 3 + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. 4 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 5 */ 6 6 ··· 10 10 #include <linux/interrupt.h> 11 11 #include <linux/pm_runtime.h> 12 12 #include <linux/sizes.h> 13 + #include <linux/delay.h> 13 14 14 15 #include "mei_dev.h" 15 16 #include "hbm.h" ··· 328 327 */ 329 328 static void mei_me_intr_enable(struct mei_device *dev) 330 329 { 331 - u32 hcsr = mei_hcsr_read(dev); 330 + u32 hcsr; 332 331 333 - hcsr |= H_CSR_IE_MASK; 332 + if (mei_me_hw_use_polling(to_me_hw(dev))) 333 + return; 334 + 335 + hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK; 334 336 mei_hcsr_set(dev, hcsr); 335 337 } 336 338 ··· 357 353 static void mei_me_synchronize_irq(struct mei_device *dev) 358 354 { 359 355 struct mei_me_hw *hw = to_me_hw(dev); 356 + 357 + if (mei_me_hw_use_polling(hw)) 358 + return; 360 359 361 360 synchronize_irq(hw->irq); 362 361 } ··· 387 380 { 388 381 u32 hcsr = mei_hcsr_read(dev); 389 382 390 - hcsr |= H_CSR_IE_MASK | H_IG | H_RDY; 383 + if (!mei_me_hw_use_polling(to_me_hw(dev))) 384 + hcsr |= H_CSR_IE_MASK; 385 + 386 + hcsr |= H_IG | H_RDY; 391 387 mei_hcsr_set(dev, hcsr); 392 388 } 393 389 ··· 434 424 } 435 425 436 426 /** 427 + * mei_gsc_pxp_check - check for gsc firmware entering pxp mode 428 + * 429 + * @dev: the device structure 430 + */ 431 + static void mei_gsc_pxp_check(struct mei_device *dev) 432 + { 433 + struct mei_me_hw *hw = to_me_hw(dev); 434 + u32 fwsts5 = 0; 435 + 436 + if (dev->pxp_mode == MEI_DEV_PXP_DEFAULT) 437 + return; 438 + 439 + hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5); 440 + trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5); 441 + if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) { 442 + dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5); 443 + dev->pxp_mode = MEI_DEV_PXP_READY; 444 + } else { 445 + dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5); 446 + } 447 + } 448 + 449 + /** 437 450 * mei_me_hw_ready_wait - wait until the me(hw) has turned ready 438 451 * or timeout is reached 439 452 * ··· 468 435 mutex_unlock(&dev->device_lock); 469 436 wait_event_timeout(dev->wait_hw_ready, 470 437 dev->recvd_hw_ready, 471 - mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT)); 438 + dev->timeouts.hw_ready); 472 439 mutex_lock(&dev->device_lock); 473 440 if (!dev->recvd_hw_ready) { 474 441 dev_err(dev->dev, "wait hw ready failed\n"); 475 442 return -ETIME; 476 443 } 444 + 445 + mei_gsc_pxp_check(dev); 477 446 478 447 mei_me_hw_reset_release(dev); 479 448 dev->recvd_hw_ready = false; ··· 732 697 static int mei_me_pg_legacy_enter_sync(struct mei_device *dev) 733 698 { 734 699 struct mei_me_hw *hw = to_me_hw(dev); 735 - unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); 736 700 int ret; 737 701 738 702 dev->pg_event = MEI_PG_EVENT_WAIT; ··· 742 708 743 709 mutex_unlock(&dev->device_lock); 744 710 wait_event_timeout(dev->wait_pg, 745 - dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); 711 + dev->pg_event == MEI_PG_EVENT_RECEIVED, 712 + dev->timeouts.pgi); 746 713 mutex_lock(&dev->device_lock); 747 714 748 715 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) { ··· 769 734 static int mei_me_pg_legacy_exit_sync(struct mei_device *dev) 770 735 { 771 736 struct mei_me_hw *hw = to_me_hw(dev); 772 - unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); 773 737 int ret; 774 738 775 739 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) ··· 780 746 781 747 mutex_unlock(&dev->device_lock); 782 748 wait_event_timeout(dev->wait_pg, 783 - dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout); 749 + dev->pg_event == MEI_PG_EVENT_RECEIVED, 750 + dev->timeouts.pgi); 784 751 mutex_lock(&dev->device_lock); 785 752 786 753 reply: ··· 797 762 798 763 mutex_unlock(&dev->device_lock); 799 764 wait_event_timeout(dev->wait_pg, 800 - dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); 765 + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, 766 + dev->timeouts.pgi); 801 767 mutex_lock(&dev->device_lock); 802 768 803 769 if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED) ··· 913 877 static int mei_me_d0i3_enter_sync(struct mei_device *dev) 914 878 { 915 879 struct mei_me_hw *hw = to_me_hw(dev); 916 - unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); 917 - unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); 918 880 int ret; 919 881 u32 reg; 920 882 ··· 934 900 935 901 mutex_unlock(&dev->device_lock); 936 902 wait_event_timeout(dev->wait_pg, 937 - dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout); 903 + dev->pg_event == MEI_PG_EVENT_RECEIVED, 904 + dev->timeouts.pgi); 938 905 mutex_lock(&dev->device_lock); 939 906 940 907 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) { ··· 955 920 956 921 mutex_unlock(&dev->device_lock); 957 922 wait_event_timeout(dev->wait_pg, 958 - dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout); 923 + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, 924 + dev->timeouts.d0i3); 959 925 mutex_lock(&dev->device_lock); 960 926 961 927 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { ··· 1016 980 static int mei_me_d0i3_exit_sync(struct mei_device *dev) 1017 981 { 1018 982 struct mei_me_hw *hw = to_me_hw(dev); 1019 - unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); 1020 983 int ret; 1021 984 u32 reg; 1022 985 ··· 1038 1003 1039 1004 mutex_unlock(&dev->device_lock); 1040 1005 wait_event_timeout(dev->wait_pg, 1041 - dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout); 1006 + dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, 1007 + dev->timeouts.d0i3); 1042 1008 mutex_lock(&dev->device_lock); 1043 1009 1044 1010 if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) { ··· 1212 1176 1213 1177 hcsr |= H_RST | H_IG | H_CSR_IS_MASK; 1214 1178 1215 - if (!intr_enable) 1179 + if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev))) 1216 1180 hcsr &= ~H_CSR_IE_MASK; 1217 1181 1218 1182 dev->recvd_hw_ready = false; ··· 1295 1259 1296 1260 /* check if ME wants a reset */ 1297 1261 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) { 1298 - dev_warn(dev->dev, "FW not ready: resetting.\n"); 1262 + dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d pxp = %d\n", 1263 + dev->dev_state, dev->pxp_mode); 1299 1264 if (dev->dev_state == MEI_DEV_POWERING_DOWN || 1300 1265 dev->dev_state == MEI_DEV_POWER_DOWN) 1301 1266 mei_cl_all_disconnect(dev); ··· 1367 1330 return IRQ_HANDLED; 1368 1331 } 1369 1332 EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler); 1333 + 1334 + #define MEI_POLLING_TIMEOUT_ACTIVE 100 1335 + #define MEI_POLLING_TIMEOUT_IDLE 500 1336 + 1337 + /** 1338 + * mei_me_polling_thread - interrupt register polling thread 1339 + * 1340 + * The thread monitors the interrupt source register and calls 1341 + * mei_me_irq_thread_handler() to handle the firmware 1342 + * input. 1343 + * 1344 + * The function polls in MEI_POLLING_TIMEOUT_ACTIVE timeout 1345 + * in case there was an event, in idle case the polling 1346 + * time increases yet again by MEI_POLLING_TIMEOUT_ACTIVE 1347 + * up to MEI_POLLING_TIMEOUT_IDLE. 1348 + * 1349 + * @_dev: mei device 1350 + * 1351 + * Return: always 0 1352 + */ 1353 + int mei_me_polling_thread(void *_dev) 1354 + { 1355 + struct mei_device *dev = _dev; 1356 + irqreturn_t irq_ret; 1357 + long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE; 1358 + 1359 + dev_dbg(dev->dev, "kernel thread is running\n"); 1360 + while (!kthread_should_stop()) { 1361 + struct mei_me_hw *hw = to_me_hw(dev); 1362 + u32 hcsr; 1363 + 1364 + wait_event_timeout(hw->wait_active, 1365 + hw->is_active || kthread_should_stop(), 1366 + msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE)); 1367 + 1368 + if (kthread_should_stop()) 1369 + break; 1370 + 1371 + hcsr = mei_hcsr_read(dev); 1372 + if (me_intr_src(hcsr)) { 1373 + polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE; 1374 + irq_ret = mei_me_irq_thread_handler(1, dev); 1375 + if (irq_ret != IRQ_HANDLED) 1376 + dev_err(dev->dev, "irq_ret %d\n", irq_ret); 1377 + } else { 1378 + /* 1379 + * Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE 1380 + * up to MEI_POLLING_TIMEOUT_IDLE 1381 + */ 1382 + polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE, 1383 + MEI_POLLING_TIMEOUT_ACTIVE, 1384 + MEI_POLLING_TIMEOUT_IDLE); 1385 + } 1386 + 1387 + schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout)); 1388 + } 1389 + 1390 + return 0; 1391 + } 1392 + EXPORT_SYMBOL_GPL(mei_me_polling_thread); 1370 1393 1371 1394 static const struct mei_hw_ops mei_me_hw_ops = { 1372 1395 ··· 1733 1636 * 1734 1637 * @parent: device associated with physical device (pci/platform) 1735 1638 * @cfg: per device generation config 1639 + * @slow_fw: configure longer timeouts as FW is slow 1736 1640 * 1737 1641 * Return: The mei_device pointer on success, NULL on failure. 1738 1642 */ 1739 1643 struct mei_device *mei_me_dev_init(struct device *parent, 1740 - const struct mei_cfg *cfg) 1644 + const struct mei_cfg *cfg, bool slow_fw) 1741 1645 { 1742 1646 struct mei_device *dev; 1743 1647 struct mei_me_hw *hw; ··· 1753 1655 for (i = 0; i < DMA_DSCR_NUM; i++) 1754 1656 dev->dr_dscr[i].size = cfg->dma_size[i]; 1755 1657 1756 - mei_device_init(dev, parent, &mei_me_hw_ops); 1658 + mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops); 1757 1659 hw->cfg = cfg; 1758 1660 1759 1661 dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
+15 -2
drivers/misc/mei/hw-me.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 - * Copyright (c) 2012-2020, Intel Corporation. All rights reserved. 3 + * Copyright (c) 2012-2022, Intel Corporation. All rights reserved. 4 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 5 */ 6 6 ··· 51 51 * @d0i3_supported: di03 support 52 52 * @hbuf_depth: depth of hardware host/write buffer in slots 53 53 * @read_fws: read FW status register handler 54 + * @polling_thread: interrupt polling thread 55 + * @wait_active: the polling thread activity wait queue 56 + * @is_active: the device is active 54 57 */ 55 58 struct mei_me_hw { 56 59 const struct mei_cfg *cfg; ··· 63 60 bool d0i3_supported; 64 61 u8 hbuf_depth; 65 62 int (*read_fws)(const struct mei_device *dev, int where, u32 *val); 63 + /* polling */ 64 + struct task_struct *polling_thread; 65 + wait_queue_head_t wait_active; 66 + bool is_active; 66 67 }; 67 68 68 69 #define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw) 70 + 71 + static inline bool mei_me_hw_use_polling(const struct mei_me_hw *hw) 72 + { 73 + return hw->irq < 0; 74 + } 69 75 70 76 /** 71 77 * enum mei_cfg_idx - indices to platform specific configurations. ··· 132 120 const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx); 133 121 134 122 struct mei_device *mei_me_dev_init(struct device *parent, 135 - const struct mei_cfg *cfg); 123 + const struct mei_cfg *cfg, bool slow_fw); 136 124 137 125 int mei_me_pg_enter_sync(struct mei_device *dev); 138 126 int mei_me_pg_exit_sync(struct mei_device *dev); 139 127 140 128 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id); 141 129 irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id); 130 + int mei_me_polling_thread(void *_dev); 142 131 143 132 #endif /* _MEI_INTERFACE_H_ */
+2 -2
drivers/misc/mei/hw-txe.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copyright (c) 2013-2020, Intel Corporation. All rights reserved. 3 + * Copyright (c) 2013-2022, Intel Corporation. All rights reserved. 4 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 5 */ 6 6 ··· 1201 1201 if (!dev) 1202 1202 return NULL; 1203 1203 1204 - mei_device_init(dev, &pdev->dev, &mei_txe_hw_ops); 1204 + mei_device_init(dev, &pdev->dev, false, &mei_txe_hw_ops); 1205 1205 1206 1206 hw = to_txe_hw(dev); 1207 1207
+6 -1
drivers/misc/mei/hw.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved 3 + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved 4 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 5 */ 6 6 ··· 16 16 #define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */ 17 17 18 18 #define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */ 19 + #define MEI_CL_CONNECT_TIMEOUT_SLOW 30 /* HPS: Client Connect Timeout, slow FW */ 19 20 #define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */ 20 21 21 22 #define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */ 22 23 #define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */ 23 24 #define MEI_HBM_TIMEOUT 1 /* 1 second */ 25 + #define MEI_HBM_TIMEOUT_SLOW 5 /* 5 second, slow FW */ 26 + 27 + #define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */ 28 + #define MKHI_RCV_TIMEOUT_SLOW 10000 /* receive timeout in msec, slow FW */ 24 29 25 30 /* 26 31 * FW page size for DMA allocations
+23 -12
drivers/misc/mei/init.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copyright (c) 2012-2019, Intel Corporation. All rights reserved. 3 + * Copyright (c) 2012-2022, Intel Corporation. All rights reserved. 4 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 5 */ 6 6 ··· 218 218 goto err; 219 219 } 220 220 221 - if (!mei_host_is_ready(dev)) { 222 - dev_err(dev->dev, "host is not ready.\n"); 223 - goto err; 224 - } 225 - 226 - if (!mei_hw_is_ready(dev)) { 227 - dev_err(dev->dev, "ME is not ready.\n"); 228 - goto err; 229 - } 230 - 231 221 if (!mei_hbm_version_is_supported(dev)) { 232 222 dev_dbg(dev->dev, "MEI start failed.\n"); 233 223 goto err; ··· 310 320 311 321 mei_clear_interrupts(dev); 312 322 mei_synchronize_irq(dev); 323 + /* to catch HW-initiated reset */ 324 + mei_cancel_work(dev); 313 325 314 326 mutex_lock(&dev->device_lock); 315 327 ··· 349 357 EXPORT_SYMBOL_GPL(mei_write_is_idle); 350 358 351 359 /** 352 - * mei_device_init -- initialize mei_device structure 360 + * mei_device_init - initialize mei_device structure 353 361 * 354 362 * @dev: the mei device 355 363 * @device: the device structure 364 + * @slow_fw: configure longer timeouts as FW is slow 356 365 * @hw_ops: hw operations 357 366 */ 358 367 void mei_device_init(struct mei_device *dev, 359 368 struct device *device, 369 + bool slow_fw, 360 370 const struct mei_hw_ops *hw_ops) 361 371 { 362 372 /* setup our list array */ ··· 387 393 bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX); 388 394 dev->open_handle_count = 0; 389 395 396 + dev->pxp_mode = MEI_DEV_PXP_DEFAULT; 397 + 390 398 /* 391 399 * Reserving the first client ID 392 400 * 0: Reserved for MEI Bus Message communications ··· 398 402 dev->pg_event = MEI_PG_EVENT_IDLE; 399 403 dev->ops = hw_ops; 400 404 dev->dev = device; 405 + 406 + dev->timeouts.hw_ready = mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT); 407 + dev->timeouts.connect = MEI_CONNECT_TIMEOUT; 408 + dev->timeouts.client_init = MEI_CLIENTS_INIT_TIMEOUT; 409 + dev->timeouts.pgi = mei_secs_to_jiffies(MEI_PGI_TIMEOUT); 410 + dev->timeouts.d0i3 = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT); 411 + if (slow_fw) { 412 + dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT_SLOW); 413 + dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT_SLOW); 414 + dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT_SLOW); 415 + } else { 416 + dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT); 417 + dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT); 418 + dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT); 419 + } 401 420 } 402 421 EXPORT_SYMBOL_GPL(mei_device_init); 403 422
+2 -2
drivers/misc/mei/main.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. 3 + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. 4 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 5 */ 6 6 ··· 571 571 cl->state == MEI_FILE_DISCONNECTED || 572 572 cl->state == MEI_FILE_DISCONNECT_REQUIRED || 573 573 cl->state == MEI_FILE_DISCONNECT_REPLY), 574 - mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); 574 + dev->timeouts.cl_connect); 575 575 mutex_lock(&dev->device_lock); 576 576 } 577 577
+34 -1
drivers/misc/mei/mei_dev.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 2 /* 3 - * Copyright (c) 2003-2019, Intel Corporation. All rights reserved. 3 + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. 4 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 5 */ 6 6 ··· 60 60 MEI_DEV_POWERING_DOWN, 61 61 MEI_DEV_POWER_DOWN, 62 62 MEI_DEV_POWER_UP 63 + }; 64 + 65 + /** 66 + * enum mei_dev_pxp_mode - MEI PXP mode state 67 + * 68 + * @MEI_DEV_PXP_DEFAULT: PCH based device, no initailization required 69 + * @MEI_DEV_PXP_INIT: device requires initialization, send setup message to firmware 70 + * @MEI_DEV_PXP_SETUP: device is in setup stage, waiting for firmware repsonse 71 + * @MEI_DEV_PXP_READY: device initialized 72 + */ 73 + enum mei_dev_pxp_mode { 74 + MEI_DEV_PXP_DEFAULT = 0, 75 + MEI_DEV_PXP_INIT = 1, 76 + MEI_DEV_PXP_SETUP = 2, 77 + MEI_DEV_PXP_READY = 3, 63 78 }; 64 79 65 80 const char *mei_dev_state_str(int state); ··· 430 415 431 416 #define MEI_MAX_FW_VER_BLOCKS 3 432 417 418 + struct mei_dev_timeouts { 419 + unsigned long hw_ready; /* Timeout on ready message, in jiffies */ 420 + int connect; /* HPS: at least 2 seconds, in seconds */ 421 + unsigned long cl_connect; /* HPS: Client Connect Timeout, in jiffies */ 422 + int client_init; /* HPS: Clients Enumeration Timeout, in seconds */ 423 + unsigned long pgi; /* PG Isolation time response, in jiffies */ 424 + unsigned int d0i3; /* D0i3 set/unset max response time, in jiffies */ 425 + unsigned long hbm; /* HBM operation timeout, in jiffies */ 426 + unsigned long mkhi_recv; /* receive timeout, in jiffies */ 427 + }; 428 + 433 429 /** 434 430 * struct mei_device - MEI private device struct 435 431 * ··· 469 443 * @reset_count : number of consecutive resets 470 444 * @dev_state : device state 471 445 * @hbm_state : state of host bus message protocol 446 + * @pxp_mode : PXP device mode 472 447 * @init_clients_timer : HBM init handshake timeout 473 448 * 474 449 * @pg_event : power gating event ··· 506 479 * 507 480 * @allow_fixed_address: allow user space to connect a fixed client 508 481 * @override_fixed_address: force allow fixed address behavior 482 + * 483 + * @timeouts: actual timeout values 509 484 * 510 485 * @reset_work : work item for the device reset 511 486 * @bus_rescan_work : work item for the bus rescan ··· 553 524 unsigned long reset_count; 554 525 enum mei_dev_state dev_state; 555 526 enum mei_hbm_state hbm_state; 527 + enum mei_dev_pxp_mode pxp_mode; 556 528 u16 init_clients_timer; 557 529 558 530 /* ··· 597 567 598 568 bool allow_fixed_address; 599 569 bool override_fixed_address; 570 + 571 + struct mei_dev_timeouts timeouts; 600 572 601 573 struct work_struct reset_work; 602 574 struct work_struct bus_rescan_work; ··· 664 632 */ 665 633 void mei_device_init(struct mei_device *dev, 666 634 struct device *device, 635 + bool slow_fw, 667 636 const struct mei_hw_ops *hw_ops); 668 637 int mei_reset(struct mei_device *dev); 669 638 int mei_start(struct mei_device *dev);
+55
drivers/misc/mei/mkhi.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. 4 + * Intel Management Engine Interface (Intel MEI) Linux driver 5 + */ 6 + 7 + #ifndef _MEI_MKHI_H_ 8 + #define _MEI_MKHI_H_ 9 + 10 + #include <linux/types.h> 11 + 12 + #define MKHI_FEATURE_PTT 0x10 13 + 14 + #define MKHI_FWCAPS_GROUP_ID 0x3 15 + #define MKHI_FWCAPS_SET_OS_VER_APP_RULE_CMD 6 16 + #define MKHI_GEN_GROUP_ID 0xFF 17 + #define MKHI_GEN_GET_FW_VERSION_CMD 0x2 18 + 19 + #define MKHI_GROUP_ID_GFX 0x30 20 + #define MKHI_GFX_RESET_WARN_CMD_REQ 0x0 21 + #define MKHI_GFX_MEMORY_READY_CMD_REQ 0x1 22 + 23 + /* Allow transition to PXP mode without approval */ 24 + #define MKHI_GFX_MEM_READY_PXP_ALLOWED 0x1 25 + 26 + struct mkhi_rule_id { 27 + __le16 rule_type; 28 + u8 feature_id; 29 + u8 reserved; 30 + } __packed; 31 + 32 + struct mkhi_fwcaps { 33 + struct mkhi_rule_id id; 34 + u8 len; 35 + u8 data[]; 36 + } __packed; 37 + 38 + struct mkhi_msg_hdr { 39 + u8 group_id; 40 + u8 command; 41 + u8 reserved; 42 + u8 result; 43 + } __packed; 44 + 45 + struct mkhi_msg { 46 + struct mkhi_msg_hdr hdr; 47 + u8 data[]; 48 + } __packed; 49 + 50 + struct mkhi_gfx_mem_ready { 51 + struct mkhi_msg_hdr hdr; 52 + u32 flags; 53 + } __packed; 54 + 55 + #endif /* _MEI_MKHI_H_ */
+2 -2
drivers/misc/mei/pci-me.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 - * Copyright (c) 2003-2020, Intel Corporation. All rights reserved. 3 + * Copyright (c) 2003-2022, Intel Corporation. All rights reserved. 4 4 * Intel Management Engine Interface (Intel MEI) Linux driver 5 5 */ 6 6 ··· 203 203 } 204 204 205 205 /* allocates and initializes the mei dev structure */ 206 - dev = mei_me_dev_init(&pdev->dev, cfg); 206 + dev = mei_me_dev_init(&pdev->dev, cfg, false); 207 207 if (!dev) { 208 208 err = -ENOMEM; 209 209 goto end;
+12
include/linux/mei_aux.h
··· 7 7 8 8 #include <linux/auxiliary_bus.h> 9 9 10 + /** 11 + * struct mei_aux_device - mei auxiliary device 12 + * @aux_dev: - auxiliary device object 13 + * @irq: interrupt driving the mei auxiliary device 14 + * @bar: mmio resource bar reserved to mei auxiliary device 15 + * @ext_op_mem: resource for extend operational memory 16 + * used in graphics PXP mode. 17 + * @slow_firmware: The device has slow underlying firmware. 18 + * Such firmware will require to use larger operation timeouts. 19 + */ 10 20 struct mei_aux_device { 11 21 struct auxiliary_device aux_dev; 12 22 int irq; 13 23 struct resource bar; 24 + struct resource ext_op_mem; 25 + bool slow_firmware; 14 26 }; 15 27 16 28 #define auxiliary_dev_to_mei_aux_dev(auxiliary_dev) \