Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ia64: remove stale paravirt leftovers

Remove the last leftovers from IA64 Xen pv-guest support.

PARAVIRT is long gone from IA64 Kconfig and Xen IA64 support, too.

Due to lack of infrastructure no testing done.

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Link: https://lore.kernel.org/r/20191021100415.7642-1-jgross@suse.com

authored by

Juergen Gross and committed by
Tony Luck
240b62d3 219d5433

+34 -77
-4
arch/ia64/include/asm/irqflags.h
··· 36 36 static inline unsigned long arch_local_save_flags(void) 37 37 { 38 38 ia64_stop(); 39 - #ifdef CONFIG_PARAVIRT 40 - return ia64_get_psr_i(); 41 - #else 42 39 return ia64_getreg(_IA64_REG_PSR); 43 - #endif 44 40 } 45 41 46 42 static inline unsigned long arch_local_irq_save(void)
+12 -12
arch/ia64/include/uapi/asm/gcc_intrin.h
··· 31 31 extern void ia64_bad_param_for_getreg (void); 32 32 33 33 34 - #define ia64_native_setreg(regnum, val) \ 34 + #define ia64_setreg(regnum, val) \ 35 35 ({ \ 36 36 switch (regnum) { \ 37 37 case _IA64_REG_PSR_L: \ ··· 60 60 } \ 61 61 }) 62 62 63 - #define ia64_native_getreg(regnum) \ 63 + #define ia64_getreg(regnum) \ 64 64 ({ \ 65 65 __u64 ia64_intri_res; \ 66 66 \ ··· 384 384 385 385 #define ia64_invala() asm volatile ("invala" ::: "memory") 386 386 387 - #define ia64_native_thash(addr) \ 387 + #define ia64_thash(addr) \ 388 388 ({ \ 389 389 unsigned long ia64_intri_res; \ 390 390 asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ ··· 437 437 #define ia64_set_pmd(index, val) \ 438 438 asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory") 439 439 440 - #define ia64_native_set_rr(index, val) \ 440 + #define ia64_set_rr(index, val) \ 441 441 asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory"); 442 442 443 - #define ia64_native_get_cpuid(index) \ 443 + #define ia64_get_cpuid(index) \ 444 444 ({ \ 445 445 unsigned long ia64_intri_res; \ 446 446 asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \ ··· 476 476 }) 477 477 478 478 479 - #define ia64_native_get_pmd(index) \ 479 + #define ia64_get_pmd(index) \ 480 480 ({ \ 481 481 unsigned long ia64_intri_res; \ 482 482 asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ 483 483 ia64_intri_res; \ 484 484 }) 485 485 486 - #define ia64_native_get_rr(index) \ 486 + #define ia64_get_rr(index) \ 487 487 ({ \ 488 488 unsigned long ia64_intri_res; \ 489 489 asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \ 490 490 ia64_intri_res; \ 491 491 }) 492 492 493 - #define ia64_native_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") 493 + #define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") 494 494 495 495 496 496 #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory") 497 497 498 - #define ia64_native_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory") 499 - #define ia64_native_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory") 498 + #define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory") 499 + #define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory") 500 500 #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory") 501 501 #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory") 502 502 503 503 #define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr)) 504 504 505 - #define ia64_native_ptcga(addr, size) \ 505 + #define ia64_ptcga(addr, size) \ 506 506 do { \ 507 507 asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \ 508 508 ia64_dv_serialize_data(); \ ··· 607 607 } \ 608 608 }) 609 609 610 - #define ia64_native_intrin_local_irq_restore(x) \ 610 + #define ia64_intrin_local_irq_restore(x) \ 611 611 do { \ 612 612 asm volatile (";; cmp.ne p6,p7=%0,r0;;" \ 613 613 "(p6) ssm psr.i;" \
+16 -16
arch/ia64/include/uapi/asm/intel_intrin.h
··· 17 17 * intrinsic 18 18 */ 19 19 20 - #define ia64_native_getreg __getReg 21 - #define ia64_native_setreg __setReg 20 + #define ia64_getreg __getReg 21 + #define ia64_setreg __setReg 22 22 23 23 #define ia64_hint __hint 24 24 #define ia64_hint_pause __hint_pause ··· 40 40 #define ia64_invala_fr __invala_fr 41 41 #define ia64_nop __nop 42 42 #define ia64_sum __sum 43 - #define ia64_native_ssm __ssm 43 + #define ia64_ssm __ssm 44 44 #define ia64_rum __rum 45 - #define ia64_native_rsm __rsm 46 - #define ia64_native_fc __fc 45 + #define ia64_rsm __rsm 46 + #define ia64_fc __fc 47 47 48 48 #define ia64_ldfs __ldfs 49 49 #define ia64_ldfd __ldfd ··· 89 89 __setIndReg(_IA64_REG_INDR_PMC, index, val) 90 90 #define ia64_set_pmd(index, val) \ 91 91 __setIndReg(_IA64_REG_INDR_PMD, index, val) 92 - #define ia64_native_set_rr(index, val) \ 92 + #define ia64_set_rr(index, val) \ 93 93 __setIndReg(_IA64_REG_INDR_RR, index, val) 94 94 95 - #define ia64_native_get_cpuid(index) \ 95 + #define ia64_get_cpuid(index) \ 96 96 __getIndReg(_IA64_REG_INDR_CPUID, index) 97 97 #define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index) 98 98 #define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index) 99 99 #define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index) 100 100 #define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index) 101 - #define ia64_native_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index) 102 - #define ia64_native_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index) 101 + #define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index) 102 + #define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index) 103 103 104 104 #define ia64_srlz_d __dsrlz 105 105 #define ia64_srlz_i __isrlz ··· 121 121 #define ia64_ld8_acq __ld8_acq 122 122 123 123 #define ia64_sync_i __synci 124 - #define ia64_native_thash __thash 125 - #define ia64_native_ttag __ttag 124 + #define ia64_thash __thash 125 + #define ia64_ttag __ttag 126 126 #define ia64_itcd __itcd 127 127 #define ia64_itci __itci 128 128 #define ia64_itrd __itrd 129 129 #define ia64_itri __itri 130 130 #define ia64_ptce __ptce 131 131 #define ia64_ptcl __ptcl 132 - #define ia64_native_ptcg __ptcg 133 - #define ia64_native_ptcga __ptcga 132 + #define ia64_ptcg __ptcg 133 + #define ia64_ptcga __ptcga 134 134 #define ia64_ptri __ptri 135 135 #define ia64_ptrd __ptrd 136 136 #define ia64_dep_mi _m64_dep_mi ··· 147 147 #define ia64_lfetch_fault __lfetch_fault 148 148 #define ia64_lfetch_fault_excl __lfetch_fault_excl 149 149 150 - #define ia64_native_intrin_local_irq_restore(x) \ 150 + #define ia64_intrin_local_irq_restore(x) \ 151 151 do { \ 152 152 if ((x) != 0) { \ 153 - ia64_native_ssm(IA64_PSR_I); \ 153 + ia64_ssm(IA64_PSR_I); \ 154 154 ia64_srlz_d(); \ 155 155 } else { \ 156 - ia64_native_rsm(IA64_PSR_I); \ 156 + ia64_rsm(IA64_PSR_I); \ 157 157 } \ 158 158 } while (0) 159 159
+6 -45
arch/ia64/include/uapi/asm/intrinsics.h
··· 21 21 #endif 22 22 #include <asm/cmpxchg.h> 23 23 24 - #define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I) 25 - 26 - #define ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4) \ 24 + #define ia64_set_rr0_to_rr4(val0, val1, val2, val3, val4) \ 27 25 do { \ 28 - ia64_native_set_rr(0x0000000000000000UL, (val0)); \ 29 - ia64_native_set_rr(0x2000000000000000UL, (val1)); \ 30 - ia64_native_set_rr(0x4000000000000000UL, (val2)); \ 31 - ia64_native_set_rr(0x6000000000000000UL, (val3)); \ 32 - ia64_native_set_rr(0x8000000000000000UL, (val4)); \ 26 + ia64_set_rr(0x0000000000000000UL, (val0)); \ 27 + ia64_set_rr(0x2000000000000000UL, (val1)); \ 28 + ia64_set_rr(0x4000000000000000UL, (val2)); \ 29 + ia64_set_rr(0x6000000000000000UL, (val3)); \ 30 + ia64_set_rr(0x8000000000000000UL, (val4)); \ 33 31 } while (0) 34 32 35 33 /* ··· 82 84 #define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */ 83 85 84 86 #endif 85 - 86 - 87 - #ifndef __ASSEMBLY__ 88 - 89 - #define IA64_INTRINSIC_API(name) ia64_native_ ## name 90 - #define IA64_INTRINSIC_MACRO(name) ia64_native_ ## name 91 - 92 - 93 - /************************************************/ 94 - /* Instructions paravirtualized for correctness */ 95 - /************************************************/ 96 - /* fc, thash, get_cpuid, get_pmd, get_eflags, set_eflags */ 97 - /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" 98 - * is not currently used (though it may be in a long-format VHPT system!) 99 - */ 100 - #define ia64_fc IA64_INTRINSIC_API(fc) 101 - #define ia64_thash IA64_INTRINSIC_API(thash) 102 - #define ia64_get_cpuid IA64_INTRINSIC_API(get_cpuid) 103 - #define ia64_get_pmd IA64_INTRINSIC_API(get_pmd) 104 - 105 - 106 - /************************************************/ 107 - /* Instructions paravirtualized for performance */ 108 - /************************************************/ 109 - #define ia64_ssm IA64_INTRINSIC_MACRO(ssm) 110 - #define ia64_rsm IA64_INTRINSIC_MACRO(rsm) 111 - #define ia64_getreg IA64_INTRINSIC_MACRO(getreg) 112 - #define ia64_setreg IA64_INTRINSIC_API(setreg) 113 - #define ia64_set_rr IA64_INTRINSIC_API(set_rr) 114 - #define ia64_get_rr IA64_INTRINSIC_API(get_rr) 115 - #define ia64_ptcga IA64_INTRINSIC_API(ptcga) 116 - #define ia64_get_psr_i IA64_INTRINSIC_API(get_psr_i) 117 - #define ia64_intrin_local_irq_restore \ 118 - IA64_INTRINSIC_API(intrin_local_irq_restore) 119 - #define ia64_set_rr0_to_rr4 IA64_INTRINSIC_API(set_rr0_to_rr4) 120 - 121 - #endif /* !__ASSEMBLY__ */ 122 87 123 88 #endif /* _UAPI_ASM_IA64_INTRINSICS_H */