Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: Drop dead code for xehpsdv

PCI IDs for XEHPSDV were never added and platform always marked with
force_probe. Drop what's not used and rename some places to either be
xehp or dg2, depending on the platform/IP checks.

The registers not used anymore are also removed.

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Acked-by: Tvrtko Ursulin <tursulin@ursulin.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20240320060543.4034215-2-lucas.demarchi@intel.com
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>

+55 -384
+5 -6
Documentation/gpu/rfc/i915_vm_bind.h
··· 93 93 * Multiple VA mappings can be created to the same section of the object 94 94 * (aliasing). 95 95 * 96 - * The @start, @offset and @length must be 4K page aligned. However the DG2 97 - * and XEHPSDV has 64K page size for device local memory and has compact page 98 - * table. On those platforms, for binding device local-memory objects, the 99 - * @start, @offset and @length must be 64K aligned. Also, UMDs should not mix 100 - * the local memory 64K page and the system memory 4K page bindings in the same 101 - * 2M range. 96 + * The @start, @offset and @length must be 4K page aligned. However the DG2 has 97 + * 64K page size for device local memory and has compact page table. On that 98 + * platform, for binding device local-memory objects, the @start, @offset and 99 + * @length must be 64K aligned. Also, UMDs should not mix the local memory 64K 100 + * page and the system memory 4K page bindings in the same 2M range. 102 101 * 103 102 * Error code -EINVAL will be returned if @start, @offset and @length are not 104 103 * properly aligned. In version 1 (See I915_PARAM_VM_BIND_VERSION), error code
+20 -20
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
··· 500 500 } 501 501 502 502 static void 503 - xehpsdv_ppgtt_insert_huge(struct i915_address_space *vm, 504 - struct i915_vma_resource *vma_res, 505 - struct sgt_dma *iter, 506 - unsigned int pat_index, 507 - u32 flags) 503 + xehp_ppgtt_insert_huge(struct i915_address_space *vm, 504 + struct i915_vma_resource *vma_res, 505 + struct sgt_dma *iter, 506 + unsigned int pat_index, 507 + u32 flags) 508 508 { 509 509 const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags); 510 510 unsigned int rem = sg_dma_len(iter->sg); ··· 741 741 struct sgt_dma iter = sgt_dma(vma_res); 742 742 743 743 if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) { 744 - if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 50)) 745 - xehpsdv_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags); 744 + if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 55)) 745 + xehp_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags); 746 746 else 747 747 gen8_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags); 748 748 } else { ··· 781 781 drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr)); 782 782 } 783 783 784 - static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm, 785 - dma_addr_t addr, 786 - u64 offset, 787 - unsigned int pat_index, 788 - u32 flags) 784 + static void xehp_ppgtt_insert_entry_lm(struct i915_address_space *vm, 785 + dma_addr_t addr, 786 + u64 offset, 787 + unsigned int pat_index, 788 + u32 flags) 789 789 { 790 790 u64 idx = offset >> GEN8_PTE_SHIFT; 791 791 struct i915_page_directory * const pdp = ··· 810 810 vaddr[gen8_pd_index(idx, 0) / 16] = vm->pte_encode(addr, pat_index, flags); 811 811 } 812 812 813 - static void xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm, 814 - dma_addr_t addr, 815 - u64 offset, 816 - unsigned int pat_index, 817 - u32 flags) 813 + static void xehp_ppgtt_insert_entry(struct i915_address_space *vm, 814 + dma_addr_t addr, 815 + u64 offset, 816 + unsigned int pat_index, 817 + u32 flags) 818 818 { 819 819 if (flags & PTE_LM) 820 - return __xehpsdv_ppgtt_insert_entry_lm(vm, addr, offset, 821 - pat_index, flags); 820 + return xehp_ppgtt_insert_entry_lm(vm, addr, offset, 821 + pat_index, flags); 822 822 823 823 return gen8_ppgtt_insert_entry(vm, addr, offset, pat_index, flags); 824 824 } ··· 1042 1042 ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND; 1043 1043 ppgtt->vm.insert_entries = gen8_ppgtt_insert; 1044 1044 if (HAS_64K_PAGES(gt->i915)) 1045 - ppgtt->vm.insert_page = xehpsdv_ppgtt_insert_entry; 1045 + ppgtt->vm.insert_page = xehp_ppgtt_insert_entry; 1046 1046 else 1047 1047 ppgtt->vm.insert_page = gen8_ppgtt_insert_entry; 1048 1048 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
-15
drivers/gpu/drm/i915/gt/intel_gsc.c
··· 103 103 } 104 104 }; 105 105 106 - static const struct gsc_def gsc_def_xehpsdv[] = { 107 - { 108 - /* HECI1 not enabled on the device. */ 109 - }, 110 - { 111 - .name = "mei-gscfi", 112 - .bar = DG1_GSC_HECI2_BASE, 113 - .bar_size = GSC_BAR_LENGTH, 114 - .use_polling = true, 115 - .slow_firmware = true, 116 - } 117 - }; 118 - 119 106 static const struct gsc_def gsc_def_dg2[] = { 120 107 { 121 108 .name = "mei-gsc", ··· 175 188 176 189 if (IS_DG1(i915)) { 177 190 def = &gsc_def_dg1[intf_id]; 178 - } else if (IS_XEHPSDV(i915)) { 179 - def = &gsc_def_xehpsdv[intf_id]; 180 191 } else if (IS_DG2(i915)) { 181 192 def = &gsc_def_dg2[intf_id]; 182 193 } else {
+2 -18
drivers/gpu/drm/i915/gt/intel_gt_mcr.c
··· 57 57 * are of a "GAM" subclass that has special rules. Thus we use a separate 58 58 * GAM table farther down for those. 59 59 */ 60 - static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = { 60 + static const struct intel_mmio_range dg2_mslice_steering_table[] = { 61 61 { 0x00DD00, 0x00DDFF }, 62 62 { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */ 63 - {}, 64 - }; 65 - 66 - static const struct intel_mmio_range xehpsdv_gam_steering_table[] = { 67 - { 0x004000, 0x004AFF }, 68 - { 0x00C800, 0x00CFFF }, 69 - {}, 70 - }; 71 - 72 - static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = { 73 - { 0x00B000, 0x00B0FF }, 74 - { 0x00D800, 0x00D8FF }, 75 63 {}, 76 64 }; 77 65 ··· 176 188 } else if (IS_PONTEVECCHIO(i915)) { 177 189 gt->steering_table[INSTANCE0] = pvc_instance0_steering_table; 178 190 } else if (IS_DG2(i915)) { 179 - gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table; 191 + gt->steering_table[MSLICE] = dg2_mslice_steering_table; 180 192 gt->steering_table[LNCF] = dg2_lncf_steering_table; 181 193 /* 182 194 * No need to hook up the GAM table since it has a dedicated 183 195 * steering control register on DG2 and can use implicit 184 196 * steering. 185 197 */ 186 - } else if (IS_XEHPSDV(i915)) { 187 - gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table; 188 - gt->steering_table[LNCF] = xehpsdv_lncf_steering_table; 189 - gt->steering_table[GAM] = xehpsdv_gam_steering_table; 190 198 } else if (GRAPHICS_VER(i915) >= 11 && 191 199 GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) { 192 200 gt->steering_table[L3BANK] = icl_l3bank_steering_table;
-50
drivers/gpu/drm/i915/gt/intel_gt_regs.h
··· 718 718 719 719 #define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) 720 720 #define VFUNIT_CLKGATE_DIS REG_BIT(20) 721 - #define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */ 722 721 #define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */ 723 722 #define GAMEDIA_CLKGATE_DIS REG_BIT(11) 724 723 #define HSUNIT_CLKGATE_DIS REG_BIT(8) 725 724 #define VSUNIT_CLKGATE_DIS REG_BIT(3) 726 - 727 - #define UNSLCGCTL9440 _MMIO(0x9440) 728 - #define GAMTLBOACS_CLKGATE_DIS REG_BIT(28) 729 - #define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27) 730 - #define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26) 731 - #define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24) 732 - #define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23) 733 - #define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22) 734 - #define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21) 735 - #define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17) 736 - #define GAMTLBKCR_CLKGATE_DIS REG_BIT(16) 737 - #define GAMTLBGUC_CLKGATE_DIS REG_BIT(15) 738 - #define GAMTLBBLT_CLKGATE_DIS REG_BIT(14) 739 - #define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6) 740 - 741 - #define UNSLCGCTL9444 _MMIO(0x9444) 742 - #define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30) 743 - #define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29) 744 - #define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28) 745 - #define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27) 746 - #define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26) 747 - #define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25) 748 - #define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24) 749 - #define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23) 750 - #define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22) 751 - #define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21) 752 - #define GAMTLBMERT_CLKGATE_DIS REG_BIT(20) 753 - #define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19) 754 - #define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18) 755 - #define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17) 756 - #define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16) 757 - #define LTCDD_CLKGATE_DIS REG_BIT(10) 758 725 759 726 #define GEN11_SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4) 760 727 #define XEHP_SLICE_UNIT_LEVEL_CLKGATE MCR_REG(0x94d4) ··· 731 764 #define NODEDSS_CLKGATE_DIS REG_BIT(12) 732 765 #define L3_CLKGATE_DIS REG_BIT(16) 733 766 #define L3_CR2X_CLKGATE_DIS REG_BIT(17) 734 - 735 - #define SCCGCTL94DC MCR_REG(0x94dc) 736 - #define CG3DDISURB REG_BIT(14) 737 767 738 768 #define UNSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x94e4) 739 769 #define VSUNIT_CLKGATE_DIS_TGL REG_BIT(19) ··· 1010 1046 #define XEHP_L3SQCREG5 MCR_REG(0xb158) 1011 1047 #define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0) 1012 1048 1013 - #define MLTICTXCTL MCR_REG(0xb170) 1014 - #define TDONRENDER REG_BIT(2) 1015 - 1016 1049 #define XEHP_L3SCQREG7 MCR_REG(0xb188) 1017 1050 #define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3) 1018 1051 ··· 1017 1056 #define SCRUB_CL_DWNGRADE_SHARED REG_BIT(12) 1018 1057 #define SCRUB_RATE_PER_BANK_MASK REG_GENMASK(2, 0) 1019 1058 #define SCRUB_RATE_4B_PER_CLK REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6) 1020 - 1021 - #define L3SQCREG1_CCS0 MCR_REG(0xb200) 1022 - #define FLUSHALLNONCOH REG_BIT(5) 1023 1059 1024 1060 #define GEN11_GLBLINVL _MMIO(0xb404) 1025 1061 #define GEN11_BANK_HASH_ADDR_EXCL_MASK (0x7f << 5) ··· 1067 1109 #define XEHP_COMPCTX_TLB_INV_CR MCR_REG(0xcf04) 1068 1110 #define XELPMP_GSC_TLB_INV_CR _MMIO(0xcf04) /* media GT only */ 1069 1111 1070 - #define XEHP_MERT_MOD_CTRL MCR_REG(0xcf28) 1071 1112 #define RENDER_MOD_CTRL MCR_REG(0xcf2c) 1072 1113 #define COMP_MOD_CTRL MCR_REG(0xcf30) 1073 1114 #define XELPMP_GSC_MOD_CTRL _MMIO(0xcf30) /* media GT only */ ··· 1142 1185 #define EU_PERF_CNTL4 PERF_REG(0xe45c) 1143 1186 1144 1187 #define GEN9_ROW_CHICKEN4 MCR_REG(0xe48c) 1145 - #define GEN12_DISABLE_GRF_CLEAR REG_BIT(13) 1146 1188 #define XEHP_DIS_BBL_SYSPIPE REG_BIT(11) 1147 1189 #define GEN12_DISABLE_TDL_PUSH REG_BIT(9) 1148 1190 #define GEN11_DIS_PICK_2ND_EU REG_BIT(7) ··· 1158 1202 #define FLOW_CONTROL_ENABLE REG_BIT(15) 1159 1203 #define UGM_BACKUP_MODE REG_BIT(13) 1160 1204 #define MDQ_ARBITRATION_MODE REG_BIT(12) 1161 - #define SYSTOLIC_DOP_CLOCK_GATING_DIS REG_BIT(10) 1162 1205 #define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8) 1163 1206 #define STALL_DOP_GATING_DISABLE REG_BIT(5) 1164 1207 #define THROTTLE_12_5 REG_GENMASK(4, 2) ··· 1633 1678 #define XEHPC_BCS7_BCS8_INTR_MASK _MMIO(0x19011c) 1634 1679 1635 1680 #define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000) 1636 - 1637 - #define GT0_PACKAGE_ENERGY_STATUS _MMIO(0x250004) 1638 - #define GT0_PACKAGE_RAPL_LIMIT _MMIO(0x250008) 1639 - #define GT0_PACKAGE_POWER_SKU_UNIT _MMIO(0x250068) 1640 - #define GT0_PLATFORM_ENERGY_STATUS _MMIO(0x25006c) 1641 1681 1642 1682 /* 1643 1683 * Standalone Media's non-engine GT registers are located at their regular GT
+6 -15
drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
··· 573 573 char *buff) 574 574 { 575 575 struct intel_gt *gt = intel_gt_sysfs_get_drvdata(kobj, attr->attr.name); 576 - struct intel_guc_slpc *slpc = &gt->uc.guc.slpc; 577 576 intel_wakeref_t wakeref; 578 577 u32 mode; 579 578 ··· 580 581 * Retrieve media_ratio_mode from GEN6_RPNSWREQ bit 13 set by 581 582 * GuC. GEN6_RPNSWREQ:13 value 0 represents 1:2 and 1 represents 1:1 582 583 */ 583 - if (IS_XEHPSDV(gt->i915) && 584 - slpc->media_ratio_mode == SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL) { 585 - /* 586 - * For XEHPSDV dynamic mode GEN6_RPNSWREQ:13 does not contain 587 - * the media_ratio_mode, just return the cached media ratio 588 - */ 589 - mode = slpc->media_ratio_mode; 590 - } else { 591 - with_intel_runtime_pm(gt->uncore->rpm, wakeref) 592 - mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ); 593 - mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ? 594 - SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE : 595 - SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO; 596 - } 584 + with_intel_runtime_pm(gt->uncore->rpm, wakeref) 585 + mode = intel_uncore_read(gt->uncore, GEN6_RPNSWREQ); 586 + 587 + mode = REG_FIELD_GET(GEN12_MEDIA_FREQ_RATIO, mode) ? 588 + SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_ONE : 589 + SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO; 597 590 598 591 return sysfs_emit(buff, "%u\n", media_ratio_mode_to_factor(mode)); 599 592 }
-43
drivers/gpu/drm/i915/gt/intel_lrc.c
··· 546 546 END 547 547 }; 548 548 549 - static const u8 xehp_rcs_offsets[] = { 550 - NOP(1), 551 - LRI(13, POSTED), 552 - REG16(0x244), 553 - REG(0x034), 554 - REG(0x030), 555 - REG(0x038), 556 - REG(0x03c), 557 - REG(0x168), 558 - REG(0x140), 559 - REG(0x110), 560 - REG(0x1c0), 561 - REG(0x1c4), 562 - REG(0x1c8), 563 - REG(0x180), 564 - REG16(0x2b4), 565 - 566 - NOP(5), 567 - LRI(9, POSTED), 568 - REG16(0x3a8), 569 - REG16(0x28c), 570 - REG16(0x288), 571 - REG16(0x284), 572 - REG16(0x280), 573 - REG16(0x27c), 574 - REG16(0x278), 575 - REG16(0x274), 576 - REG16(0x270), 577 - 578 - LRI(3, POSTED), 579 - REG(0x1b0), 580 - REG16(0x5a8), 581 - REG16(0x5ac), 582 - 583 - NOP(6), 584 - LRI(1, 0), 585 - REG(0x0c8), 586 - 587 - END 588 - }; 589 - 590 549 static const u8 dg2_rcs_offsets[] = { 591 550 NOP(1), 592 551 LRI(15, POSTED), ··· 654 695 return mtl_rcs_offsets; 655 696 else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) 656 697 return dg2_rcs_offsets; 657 - else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) 658 - return xehp_rcs_offsets; 659 698 else if (GRAPHICS_VER(engine->i915) >= 12) 660 699 return gen12_rcs_offsets; 661 700 else if (GRAPHICS_VER(engine->i915) >= 11)
+9 -9
drivers/gpu/drm/i915/gt/intel_migrate.c
··· 35 35 return true; 36 36 } 37 37 38 - static void xehpsdv_toggle_pdes(struct i915_address_space *vm, 39 - struct i915_page_table *pt, 40 - void *data) 38 + static void xehp_toggle_pdes(struct i915_address_space *vm, 39 + struct i915_page_table *pt, 40 + void *data) 41 41 { 42 42 struct insert_pte_data *d = data; 43 43 ··· 52 52 d->offset += SZ_2M; 53 53 } 54 54 55 - static void xehpsdv_insert_pte(struct i915_address_space *vm, 56 - struct i915_page_table *pt, 57 - void *data) 55 + static void xehp_insert_pte(struct i915_address_space *vm, 56 + struct i915_page_table *pt, 57 + void *data) 58 58 { 59 59 struct insert_pte_data *d = data; 60 60 ··· 120 120 * 512 entry layout using 4K GTT pages. The other two windows just map 121 121 * lmem pages and must use the new compact 32 entry layout using 64K GTT 122 122 * pages, which ensures we can address any lmem object that the user 123 - * throws at us. We then also use the xehpsdv_toggle_pdes as a way of 123 + * throws at us. We then also use the xehp_toggle_pdes as a way of 124 124 * just toggling the PDE bit(GEN12_PDE_64K) for us, to enable the 125 125 * compact layout for each of these page-tables, that fall within the 126 126 * [CHUNK_SIZE, 3 * CHUNK_SIZE) range. ··· 209 209 /* Now allow the GPU to rewrite the PTE via its own ppGTT */ 210 210 if (HAS_64K_PAGES(gt->i915)) { 211 211 vm->vm.foreach(&vm->vm, base, d.offset - base, 212 - xehpsdv_insert_pte, &d); 212 + xehp_insert_pte, &d); 213 213 d.offset = base + CHUNK_SZ; 214 214 vm->vm.foreach(&vm->vm, 215 215 d.offset, 216 216 2 * CHUNK_SZ, 217 - xehpsdv_toggle_pdes, &d); 217 + xehp_toggle_pdes, &d); 218 218 } else { 219 219 vm->vm.foreach(&vm->vm, base, d.offset - base, 220 220 insert_pte, &d);
-31
drivers/gpu/drm/i915/gt/intel_mocs.c
··· 367 367 L3_3_WB), 368 368 }; 369 369 370 - static const struct drm_i915_mocs_entry xehpsdv_mocs_table[] = { 371 - /* wa_1608975824 */ 372 - MOCS_ENTRY(0, 0, L3_3_WB | L3_LKUP(1)), 373 - 374 - /* UC - Coherent; GO:L3 */ 375 - MOCS_ENTRY(1, 0, L3_1_UC | L3_LKUP(1)), 376 - /* UC - Coherent; GO:Memory */ 377 - MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)), 378 - /* UC - Non-Coherent; GO:Memory */ 379 - MOCS_ENTRY(3, 0, L3_1_UC | L3_GLBGO(1)), 380 - /* UC - Non-Coherent; GO:L3 */ 381 - MOCS_ENTRY(4, 0, L3_1_UC), 382 - 383 - /* WB */ 384 - MOCS_ENTRY(5, 0, L3_3_WB | L3_LKUP(1)), 385 - 386 - /* HW Reserved - SW program but never use. */ 387 - MOCS_ENTRY(48, 0, L3_3_WB | L3_LKUP(1)), 388 - MOCS_ENTRY(49, 0, L3_1_UC | L3_LKUP(1)), 389 - MOCS_ENTRY(60, 0, L3_1_UC), 390 - MOCS_ENTRY(61, 0, L3_1_UC), 391 - MOCS_ENTRY(62, 0, L3_1_UC), 392 - MOCS_ENTRY(63, 0, L3_1_UC), 393 - }; 394 - 395 370 static const struct drm_i915_mocs_entry dg2_mocs_table[] = { 396 371 /* UC - Coherent; GO:L3 */ 397 372 MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)), ··· 489 514 table->uc_index = 1; 490 515 table->n_entries = GEN9_NUM_MOCS_ENTRIES; 491 516 table->unused_entries_index = 3; 492 - } else if (IS_XEHPSDV(i915)) { 493 - table->size = ARRAY_SIZE(xehpsdv_mocs_table); 494 - table->table = xehpsdv_mocs_table; 495 - table->uc_index = 2; 496 - table->n_entries = GEN9_NUM_MOCS_ENTRIES; 497 - table->unused_entries_index = 5; 498 517 } else if (IS_DG1(i915)) { 499 518 table->size = ARRAY_SIZE(dg1_mocs_table); 500 519 table->table = dg1_mocs_table;
-2
drivers/gpu/drm/i915/gt/intel_rps.c
··· 1088 1088 1089 1089 if (IS_PONTEVECCHIO(i915)) 1090 1090 return intel_uncore_read(uncore, PVC_RP_STATE_CAP); 1091 - else if (IS_XEHPSDV(i915)) 1092 - return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP); 1093 1091 else if (IS_GEN9_LP(i915)) 1094 1092 return intel_uncore_read(uncore, BXT_RP_STATE_CAP); 1095 1093 else
-95
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 922 922 ; /* noop; none at this time */ 923 923 else if (IS_DG2(i915)) 924 924 dg2_ctx_workarounds_init(engine, wal); 925 - else if (IS_XEHPSDV(i915)) 926 - ; /* noop; none at this time */ 927 925 else if (IS_DG1(i915)) 928 926 dg1_ctx_workarounds_init(engine, wal); 929 927 else if (GRAPHICS_VER(i915) == 12) ··· 1348 1350 gt->steering_table[MSLICE] = NULL; 1349 1351 } 1350 1352 1351 - if (IS_XEHPSDV(gt->i915) && slice_mask & BIT(0)) 1352 - gt->steering_table[GAM] = NULL; 1353 - 1354 1353 slice = __ffs(slice_mask); 1355 1354 subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) % 1356 1355 GEN_DSS_PER_GSLICE; ··· 1512 1517 /* Wa_1408615072:dg1 */ 1513 1518 /* Empirical testing shows this register is unaffected by engine reset. */ 1514 1519 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL); 1515 - } 1516 - 1517 - static void 1518 - xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1519 - { 1520 - struct drm_i915_private *i915 = gt->i915; 1521 - 1522 - xehp_init_mcr(gt, wal); 1523 - 1524 - /* Wa_1409757795:xehpsdv */ 1525 - wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB); 1526 - 1527 - /* Wa_18011725039:xehpsdv */ 1528 - if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) { 1529 - wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER); 1530 - wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH); 1531 - } 1532 - 1533 - /* Wa_16011155590:xehpsdv */ 1534 - if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) 1535 - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, 1536 - TSGUNIT_CLKGATE_DIS); 1537 - 1538 - /* Wa_14011780169:xehpsdv */ 1539 - if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) { 1540 - wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS | 1541 - GAMTLBVDBOX7_CLKGATE_DIS | 1542 - GAMTLBVDBOX6_CLKGATE_DIS | 1543 - GAMTLBVDBOX5_CLKGATE_DIS | 1544 - GAMTLBVDBOX4_CLKGATE_DIS | 1545 - GAMTLBVDBOX3_CLKGATE_DIS | 1546 - GAMTLBVDBOX2_CLKGATE_DIS | 1547 - GAMTLBVDBOX1_CLKGATE_DIS | 1548 - GAMTLBVDBOX0_CLKGATE_DIS | 1549 - GAMTLBKCR_CLKGATE_DIS | 1550 - GAMTLBGUC_CLKGATE_DIS | 1551 - GAMTLBBLT_CLKGATE_DIS); 1552 - wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS | 1553 - GAMTLBGFXA1_CLKGATE_DIS | 1554 - GAMTLBCOMPA0_CLKGATE_DIS | 1555 - GAMTLBCOMPA1_CLKGATE_DIS | 1556 - GAMTLBCOMPB0_CLKGATE_DIS | 1557 - GAMTLBCOMPB1_CLKGATE_DIS | 1558 - GAMTLBCOMPC0_CLKGATE_DIS | 1559 - GAMTLBCOMPC1_CLKGATE_DIS | 1560 - GAMTLBCOMPD0_CLKGATE_DIS | 1561 - GAMTLBCOMPD1_CLKGATE_DIS | 1562 - GAMTLBMERT_CLKGATE_DIS | 1563 - GAMTLBVEBOX3_CLKGATE_DIS | 1564 - GAMTLBVEBOX2_CLKGATE_DIS | 1565 - GAMTLBVEBOX1_CLKGATE_DIS | 1566 - GAMTLBVEBOX0_CLKGATE_DIS); 1567 - } 1568 - 1569 - /* Wa_16012725990:xehpsdv */ 1570 - if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER)) 1571 - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS); 1572 - 1573 - /* Wa_14011060649:xehpsdv */ 1574 - wa_14011060649(gt, wal); 1575 - 1576 - /* Wa_14012362059:xehpsdv */ 1577 - wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB); 1578 - 1579 - /* Wa_14014368820:xehpsdv */ 1580 - wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL, 1581 - INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE); 1582 - 1583 - /* Wa_14010670810:xehpsdv */ 1584 - wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE); 1585 1520 } 1586 1521 1587 1522 static void ··· 1683 1758 pvc_gt_workarounds_init(gt, wal); 1684 1759 else if (IS_DG2(i915)) 1685 1760 dg2_gt_workarounds_init(gt, wal); 1686 - else if (IS_XEHPSDV(i915)) 1687 - xehpsdv_gt_workarounds_init(gt, wal); 1688 1761 else if (IS_DG1(i915)) 1689 1762 dg1_gt_workarounds_init(gt, wal); 1690 1763 else if (GRAPHICS_VER(i915) == 12) ··· 2154 2231 pvc_whitelist_build(engine); 2155 2232 else if (IS_DG2(i915)) 2156 2233 dg2_whitelist_build(engine); 2157 - else if (IS_XEHPSDV(i915)) 2158 - ; /* none needed */ 2159 2234 else if (GRAPHICS_VER(i915) == 12) 2160 2235 tgl_whitelist_build(engine); 2161 2236 else if (GRAPHICS_VER(i915) == 11) ··· 2892 2971 _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC), 2893 2972 0 /* write-only, so skip validation */, 2894 2973 true); 2895 - } 2896 - 2897 - if (IS_XEHPSDV(i915)) { 2898 - /* Wa_1409954639 */ 2899 - wa_mcr_masked_en(wal, 2900 - GEN8_ROW_CHICKEN, 2901 - SYSTOLIC_DOP_CLOCK_GATING_DIS); 2902 - 2903 - /* Wa_1607196519 */ 2904 - wa_mcr_masked_en(wal, 2905 - GEN9_ROW_CHICKEN4, 2906 - GEN12_DISABLE_GRF_CLEAR); 2907 - 2908 - /* Wa_14010449647:xehpsdv */ 2909 - wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1, 2910 - GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); 2911 2974 } 2912 2975 } 2913 2976
+2 -2
drivers/gpu/drm/i915/gt/uc/intel_uc.c
··· 51 51 /* Default: enable HuC authentication and GuC submission */ 52 52 i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION; 53 53 54 - /* XEHPSDV and PVC do not use HuC */ 55 - if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915)) 54 + /* PVC does not use HuC */ 55 + if (IS_PONTEVECCHIO(i915)) 56 56 i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC; 57 57 } 58 58
-4
drivers/gpu/drm/i915/i915_drv.h
··· 544 544 #define IS_DG1(i915) IS_PLATFORM(i915, INTEL_DG1) 545 545 #define IS_ALDERLAKE_S(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_S) 546 546 #define IS_ALDERLAKE_P(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_P) 547 - #define IS_XEHPSDV(i915) IS_PLATFORM(i915, INTEL_XEHPSDV) 548 547 #define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2) 549 548 #define IS_PONTEVECCHIO(i915) IS_PLATFORM(i915, INTEL_PONTEVECCHIO) 550 549 #define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE) ··· 619 620 620 621 #define IS_TIGERLAKE_UY(i915) \ 621 622 IS_SUBPLATFORM(i915, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_UY) 622 - 623 - #define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \ 624 - (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until)) 625 623 626 624 #define IS_PVC_BD_STEP(__i915, since, until) \ 627 625 (IS_PONTEVECCHIO(__i915) && \
-6
drivers/gpu/drm/i915/i915_hwmon.c
··· 739 739 hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT; 740 740 hwmon->rg.energy_status_all = PCU_PACKAGE_ENERGY_STATUS; 741 741 hwmon->rg.energy_status_tile = INVALID_MMIO_REG; 742 - } else if (IS_XEHPSDV(i915)) { 743 - hwmon->rg.pkg_power_sku_unit = GT0_PACKAGE_POWER_SKU_UNIT; 744 - hwmon->rg.pkg_power_sku = INVALID_MMIO_REG; 745 - hwmon->rg.pkg_rapl_limit = GT0_PACKAGE_RAPL_LIMIT; 746 - hwmon->rg.energy_status_all = GT0_PLATFORM_ENERGY_STATUS; 747 - hwmon->rg.energy_status_tile = GT0_PACKAGE_ENERGY_STATUS; 748 742 } else { 749 743 hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG; 750 744 hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
-17
drivers/gpu/drm/i915/i915_pci.c
··· 734 734 .__runtime.media.ip.ver = 12, \ 735 735 .__runtime.media.ip.rel = 50 736 736 737 - __maybe_unused 738 - static const struct intel_device_info xehpsdv_info = { 739 - XE_HP_FEATURES, 740 - XE_HPM_FEATURES, 741 - DGFX_FEATURES, 742 - PLATFORM(INTEL_XEHPSDV), 743 - .has_64k_pages = 1, 744 - .has_media_ratio_mode = 1, 745 - .platform_engine_mask = 746 - BIT(RCS0) | BIT(BCS0) | 747 - BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) | 748 - BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) | 749 - BIT(VCS4) | BIT(VCS5) | BIT(VCS6) | BIT(VCS7) | 750 - BIT(CCS0) | BIT(CCS1) | BIT(CCS2) | BIT(CCS3), 751 - .require_force_probe = 1, 752 - }; 753 - 754 737 #define DG2_FEATURES \ 755 738 XE_HP_FEATURES, \ 756 739 XE_HPM_FEATURES, \
+5 -6
drivers/gpu/drm/i915/i915_perf.c
··· 2881 2881 int ret; 2882 2882 2883 2883 /* 2884 - * Wa_1508761755:xehpsdv, dg2 2884 + * Wa_1508761755 2885 2885 * EU NOA signals behave incorrectly if EU clock gating is enabled. 2886 2886 * Disable thread stall DOP gating and EU DOP gating. 2887 2887 */ 2888 - if (IS_XEHPSDV(i915) || IS_DG2(i915)) { 2888 + if (IS_DG2(i915)) { 2889 2889 intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN, 2890 2890 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE)); 2891 2891 intel_uncore_write(uncore, GEN7_ROW_CHICKEN2, ··· 2911 2911 /* 2912 2912 * Initialize Super Queue Internal Cnt Register 2913 2913 * Set PMON Enable in order to collect valid metrics. 2914 - * Enable byets per clock reporting in OA for XEHPSDV onward. 2914 + * Enable bytes per clock reporting in OA. 2915 2915 */ 2916 2916 sqcnt1 = GEN12_SQCNT1_PMON_ENABLE | 2917 2917 (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0); ··· 2971 2971 u32 sqcnt1; 2972 2972 2973 2973 /* 2974 - * Wa_1508761755:xehpsdv, dg2 2975 - * Enable thread stall DOP gating and EU DOP gating. 2974 + * Wa_1508761755: Enable thread stall DOP gating and EU DOP gating. 2976 2975 */ 2977 - if (IS_XEHPSDV(i915) || IS_DG2(i915)) { 2976 + if (IS_DG2(i915)) { 2978 2977 intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN, 2979 2978 _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE)); 2980 2979 intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
+1 -2
drivers/gpu/drm/i915/i915_reg.h
··· 1750 1750 1751 1751 #define BXT_RP_STATE_CAP _MMIO(0x138170) 1752 1752 #define GEN9_RP_STATE_LIMITS _MMIO(0x138148) 1753 - #define XEHPSDV_RP_STATE_CAP _MMIO(0x250014) 1754 1753 #define PVC_RP_STATE_CAP _MMIO(0x281014) 1755 1754 1756 1755 #define MTL_RP_STATE_CAP _MMIO(0x138000) ··· 5400 5401 #define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */ 5401 5402 #define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0) 5402 5403 #define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23 5403 - #define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* xehpsdv, pvc */ 5404 + #define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* pvc */ 5404 5405 /* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */ 5405 5406 #define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0 5406 5407 #define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1
-10
drivers/gpu/drm/i915/intel_clock_gating.c
··· 343 343 intel_uncore_write(&i915->uncore, GEN7_MISCCPCTL, misccpctl); 344 344 } 345 345 346 - static void xehpsdv_init_clock_gating(struct drm_i915_private *i915) 347 - { 348 - /* Wa_22010146351:xehpsdv */ 349 - if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) 350 - intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS); 351 - } 352 - 353 346 static void dg2_init_clock_gating(struct drm_i915_private *i915) 354 347 { 355 348 /* Wa_22010954014:dg2 */ ··· 724 731 725 732 CG_FUNCS(pvc); 726 733 CG_FUNCS(dg2); 727 - CG_FUNCS(xehpsdv); 728 734 CG_FUNCS(cfl); 729 735 CG_FUNCS(skl); 730 736 CG_FUNCS(kbl); ··· 760 768 i915->clock_gating_funcs = &pvc_clock_gating_funcs; 761 769 else if (IS_DG2(i915)) 762 770 i915->clock_gating_funcs = &dg2_clock_gating_funcs; 763 - else if (IS_XEHPSDV(i915)) 764 - i915->clock_gating_funcs = &xehpsdv_clock_gating_funcs; 765 771 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) 766 772 i915->clock_gating_funcs = &cfl_clock_gating_funcs; 767 773 else if (IS_SKYLAKE(i915))
-1
drivers/gpu/drm/i915/intel_device_info.c
··· 70 70 PLATFORM_NAME(DG1), 71 71 PLATFORM_NAME(ALDERLAKE_S), 72 72 PLATFORM_NAME(ALDERLAKE_P), 73 - PLATFORM_NAME(XEHPSDV), 74 73 PLATFORM_NAME(DG2), 75 74 PLATFORM_NAME(PONTEVECCHIO), 76 75 PLATFORM_NAME(METEORLAKE),
-1
drivers/gpu/drm/i915/intel_device_info.h
··· 87 87 INTEL_DG1, 88 88 INTEL_ALDERLAKE_S, 89 89 INTEL_ALDERLAKE_P, 90 - INTEL_XEHPSDV, 91 90 INTEL_DG2, 92 91 INTEL_PONTEVECCHIO, 93 92 INTEL_METEORLAKE,
-10
drivers/gpu/drm/i915/intel_step.c
··· 102 102 [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 }, 103 103 }; 104 104 105 - static const struct intel_step_info xehpsdv_revids[] = { 106 - [0x0] = { COMMON_GT_MEDIA_STEP(A0) }, 107 - [0x1] = { COMMON_GT_MEDIA_STEP(A1) }, 108 - [0x4] = { COMMON_GT_MEDIA_STEP(B0) }, 109 - [0x8] = { COMMON_GT_MEDIA_STEP(C0) }, 110 - }; 111 - 112 105 static const struct intel_step_info dg2_g10_revid_step_tbl[] = { 113 106 [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, 114 107 [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_A0 }, ··· 183 190 } else if (IS_DG2_G12(i915)) { 184 191 revids = dg2_g12_revid_step_tbl; 185 192 size = ARRAY_SIZE(dg2_g12_revid_step_tbl); 186 - } else if (IS_XEHPSDV(i915)) { 187 - revids = xehpsdv_revids; 188 - size = ARRAY_SIZE(xehpsdv_revids); 189 193 } else if (IS_ALDERLAKE_P_N(i915)) { 190 194 revids = adlp_n_revids; 191 195 size = ARRAY_SIZE(adlp_n_revids);
+5 -18
drivers/gpu/drm/i915/intel_uncore.c
··· 1533 1533 0x12000 - 0x127ff: always on \ 1534 1534 0x12800 - 0x12fff: reserved */ \ 1535 1535 GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */ \ 1536 - GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /* \ 1536 + GEN_FW_RANGE(0x13200, 0x147ff, FORCEWAKE_MEDIA_VDBOX2), /* \ 1537 1537 0x13200 - 0x133ff: VD2 (DG2 only) \ 1538 - 0x13400 - 0x13fff: reserved */ \ 1539 - GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */ \ 1540 - GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */ \ 1541 - GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */ \ 1542 - GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */ \ 1538 + 0x13400 - 0x147ff: reserved */ \ 1543 1539 GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER), \ 1544 1540 GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /* \ 1545 1541 0x15000 - 0x15fff: gt (DG2 only) \ 1546 1542 0x16000 - 0x16dff: reserved */ \ 1547 - GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER), \ 1548 - GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /* \ 1549 - 0x20000 - 0x20fff: VD0 (XEHPSDV only) \ 1550 - 0x21000 - 0x21fff: reserved */ \ 1543 + GEN_FW_RANGE(0x16e00, 0x21fff, FORCEWAKE_RENDER), /* \ 1544 + 0x16e00 - 0x1ffff: render \ 1545 + 0x20000 - 0x21fff: reserved */ \ 1551 1546 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT), \ 1552 1547 GEN_FW_RANGE(0x24000, 0x2417f, 0), /* \ 1553 1548 0x24000 - 0x2407f: always on \ ··· 1621 1626 0x1f6d00 - 0x1f6dff: VD7 \ 1622 1627 0x1f6e00 - 0x1f7fff: reserved */ \ 1623 1628 GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3), 1624 - 1625 - static const struct intel_forcewake_range __xehp_fw_ranges[] = { 1626 - XEHP_FWRANGES(FORCEWAKE_GT) 1627 - }; 1628 1629 1629 1630 static const struct intel_forcewake_range __dg2_fw_ranges[] = { 1630 1631 XEHP_FWRANGES(FORCEWAKE_RENDER) ··· 2574 2583 } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) { 2575 2584 ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges); 2576 2585 ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs); 2577 - ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 2578 - } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { 2579 - ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges); 2580 - ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs); 2581 2586 ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); 2582 2587 } else if (GRAPHICS_VER(i915) >= 12) { 2583 2588 ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
-1
drivers/gpu/drm/i915/selftests/intel_uncore.c
··· 119 119 { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true }, 120 120 { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true }, 121 121 { __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true }, 122 - { __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true }, 123 122 { __pvc_fw_ranges, ARRAY_SIZE(__pvc_fw_ranges), true }, 124 123 { __mtl_fw_ranges, ARRAY_SIZE(__mtl_fw_ranges), true }, 125 124 { __xelpmp_fw_ranges, ARRAY_SIZE(__xelpmp_fw_ranges), true },
-2
drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
··· 85 85 #define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1) 86 86 #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S) 87 87 #define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) 88 - #define IS_XEHPSDV(dev_priv) (dev_priv && 0) 89 88 #define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2) 90 89 #define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, XE_PVC) 91 90 #define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE) ··· 129 130 #define IS_DG2_GRAPHICS_STEP(xe, variant, first, last) \ 130 131 ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_ ## variant && \ 131 132 IS_GRAPHICS_STEP(xe, first, last)) 132 - #define IS_XEHPSDV_GRAPHICS_STEP(xe, first, last) (IS_XEHPSDV(xe) && IS_GRAPHICS_STEP(xe, first, last)) 133 133 134 134 /* XXX: No basedie stepping support yet */ 135 135 #define IS_PVC_BD_STEP(xe, first, last) (!WARN_ON(1) && IS_PONTEVECCHIO(xe))