Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/paravirt: Move the Xen-only pv_cpu_ops under the PARAVIRT_XXL umbrella

Most of the paravirt ops defined in pv_cpu_ops are for Xen PV guests
only. Define them only if CONFIG_PARAVIRT_XXL is set.

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: xen-devel@lists.xenproject.org
Cc: virtualization@lists.linux-foundation.org
Cc: akataria@vmware.com
Cc: rusty@rustcorp.com.au
Cc: boris.ostrovsky@oracle.com
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/20180828074026.820-13-jgross@suse.com

authored by

Juergen Gross and committed by
Thomas Gleixner
9bad5658 40181646

+78 -22
+1 -1
arch/x86/include/asm/debugreg.h
··· 8 8 9 9 DECLARE_PER_CPU(unsigned long, cpu_dr7); 10 10 11 - #ifndef CONFIG_PARAVIRT 11 + #ifndef CONFIG_PARAVIRT_XXL 12 12 /* 13 13 * These special macros can be used to get or set a debugging register 14 14 */
+2 -2
arch/x86/include/asm/desc.h
··· 108 108 return !(desc[0] | desc[1]); 109 109 } 110 110 111 - #ifdef CONFIG_PARAVIRT 111 + #ifdef CONFIG_PARAVIRT_XXL 112 112 #include <asm/paravirt.h> 113 113 #else 114 114 #define load_TR_desc() native_load_tr_desc() ··· 134 134 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) 135 135 { 136 136 } 137 - #endif /* CONFIG_PARAVIRT */ 137 + #endif /* CONFIG_PARAVIRT_XXL */ 138 138 139 139 #define store_ldt(ldt) asm("sldt %0" : "=m"(ldt)) 140 140
+11 -5
arch/x86/include/asm/irqflags.h
··· 123 123 #define DISABLE_INTERRUPTS(x) cli 124 124 125 125 #ifdef CONFIG_X86_64 126 + #ifdef CONFIG_DEBUG_ENTRY 127 + #define SAVE_FLAGS(x) pushfq; popq %rax 128 + #endif 129 + #endif 130 + #endif /* __ASSEMBLY__ */ 131 + #endif /* CONFIG_PARAVIRT */ 132 + 133 + #ifndef CONFIG_PARAVIRT_XXL 134 + #ifdef __ASSEMBLY__ 135 + #ifdef CONFIG_X86_64 126 136 #define SWAPGS swapgs 127 137 /* 128 138 * Currently paravirt can't handle swapgs nicely when we ··· 153 143 swapgs; \ 154 144 sysretl 155 145 156 - #ifdef CONFIG_DEBUG_ENTRY 157 - #define SAVE_FLAGS(x) pushfq; popq %rax 158 - #endif 159 146 #else 160 147 #define INTERRUPT_RETURN iret 161 148 #endif 162 149 163 - 164 150 #endif /* __ASSEMBLY__ */ 165 - #endif /* CONFIG_PARAVIRT */ 151 + #endif /* CONFIG_PARAVIRT_XXL */ 166 152 167 153 #ifndef __ASSEMBLY__ 168 154 static inline int arch_irqs_disabled_flags(unsigned long flags)
+2 -2
arch/x86/include/asm/msr.h
··· 242 242 return EAX_EDX_VAL(val, low, high); 243 243 } 244 244 245 - #ifdef CONFIG_PARAVIRT 245 + #ifdef CONFIG_PARAVIRT_XXL 246 246 #include <asm/paravirt.h> 247 247 #else 248 248 #include <linux/errno.h> ··· 305 305 306 306 #define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) 307 307 308 - #endif /* !CONFIG_PARAVIRT */ 308 + #endif /* !CONFIG_PARAVIRT_XXL */ 309 309 310 310 /* 311 311 * 64-bit version of wrmsr_safe():
+17 -2
arch/x86/include/asm/paravirt.h
··· 17 17 #include <linux/cpumask.h> 18 18 #include <asm/frame.h> 19 19 20 + #ifdef CONFIG_PARAVIRT_XXL 20 21 static inline void load_sp0(unsigned long sp0) 21 22 { 22 23 PVOP_VCALL1(cpu.load_sp0, sp0); ··· 52 51 { 53 52 PVOP_VCALL1(cpu.write_cr0, x); 54 53 } 54 + #endif 55 55 56 56 static inline unsigned long read_cr2(void) 57 57 { ··· 74 72 PVOP_VCALL1(mmu.write_cr3, x); 75 73 } 76 74 75 + #ifdef CONFIG_PARAVIRT_XXL 77 76 static inline void __write_cr4(unsigned long x) 78 77 { 79 78 PVOP_VCALL1(cpu.write_cr4, x); ··· 91 88 PVOP_VCALL1(cpu.write_cr8, x); 92 89 } 93 90 #endif 91 + #endif 94 92 95 93 static inline void arch_safe_halt(void) 96 94 { ··· 103 99 PVOP_VCALL0(irq.halt); 104 100 } 105 101 102 + #ifdef CONFIG_PARAVIRT_XXL 106 103 static inline void wbinvd(void) 107 104 { 108 105 PVOP_VCALL0(cpu.wbinvd); 109 106 } 110 107 111 - #ifdef CONFIG_PARAVIRT_XXL 112 108 #define get_kernel_rpl() (pv_info.kernel_rpl) 113 - #endif 114 109 115 110 static inline u64 paravirt_read_msr(unsigned msr) 116 111 { ··· 174 171 *p = paravirt_read_msr_safe(msr, &err); 175 172 return err; 176 173 } 174 + #endif 177 175 178 176 static inline unsigned long long paravirt_sched_clock(void) 179 177 { ··· 190 186 return PVOP_CALL1(u64, time.steal_clock, cpu); 191 187 } 192 188 189 + #ifdef CONFIG_PARAVIRT_XXL 193 190 static inline unsigned long long paravirt_read_pmc(int counter) 194 191 { 195 192 return PVOP_CALL1(u64, cpu.read_pmc, counter); ··· 235 230 { 236 231 return PVOP_CALL0(unsigned long, cpu.store_tr); 237 232 } 233 + 238 234 #define store_tr(tr) ((tr) = paravirt_store_tr()) 239 235 static inline void load_TLS(struct thread_struct *t, unsigned cpu) 240 236 { ··· 269 263 { 270 264 PVOP_VCALL1(cpu.set_iopl_mask, mask); 271 265 } 266 + #endif 272 267 273 268 /* The paravirtualized I/O functions */ 274 269 static inline void slow_down_io(void) ··· 625 618 } 626 619 #endif /* CONFIG_X86_PAE */ 627 620 621 + #ifdef CONFIG_PARAVIRT_XXL 628 622 #define __HAVE_ARCH_START_CONTEXT_SWITCH 629 623 static inline void arch_start_context_switch(struct task_struct *prev) 630 624 { ··· 636 628 { 637 629 PVOP_VCALL1(cpu.end_context_switch, next); 638 630 } 631 + #endif 639 632 640 633 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 641 634 static inline void arch_enter_lazy_mmu_mode(void) ··· 879 870 #define PARA_INDIRECT(addr) *%cs:addr 880 871 #endif 881 872 873 + #ifdef CONFIG_PARAVIRT_XXL 882 874 #define INTERRUPT_RETURN \ 883 875 PARA_SITE(PARA_PATCH(PV_CPU_iret), \ 884 876 ANNOTATE_RETPOLINE_SAFE; \ 885 877 jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);) 878 + #endif 886 879 887 880 #define DISABLE_INTERRUPTS(clobbers) \ 888 881 PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \ ··· 901 890 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) 902 891 903 892 #ifdef CONFIG_X86_64 893 + #ifdef CONFIG_PARAVIRT_XXL 904 894 /* 905 895 * If swapgs is used while the userspace stack is still current, 906 896 * there's no way to call a pvop. The PV replacement *must* be ··· 921 909 ANNOTATE_RETPOLINE_SAFE; \ 922 910 call PARA_INDIRECT(pv_ops+PV_CPU_swapgs); \ 923 911 ) 912 + #endif 924 913 925 914 #define GET_CR2_INTO_RAX \ 926 915 ANNOTATE_RETPOLINE_SAFE; \ 927 916 call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2); 928 917 918 + #ifdef CONFIG_PARAVIRT_XXL 929 919 #define USERGS_SYSRET64 \ 930 920 PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \ 931 921 ANNOTATE_RETPOLINE_SAFE; \ 932 922 jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);) 923 + #endif 933 924 934 925 #ifdef CONFIG_DEBUG_ENTRY 935 926 #define SAVE_FLAGS(clobbers) \
+4 -1
arch/x86/include/asm/paravirt_types.h
··· 106 106 107 107 struct pv_cpu_ops { 108 108 /* hooks for various privileged instructions */ 109 + void (*io_delay)(void); 110 + 111 + #ifdef CONFIG_PARAVIRT_XXL 109 112 unsigned long (*get_debugreg)(int regno); 110 113 void (*set_debugreg)(int regno, unsigned long value); 111 114 ··· 146 143 void (*set_iopl_mask)(unsigned mask); 147 144 148 145 void (*wbinvd)(void); 149 - void (*io_delay)(void); 150 146 151 147 /* cpuid emulation, mostly so that caps bits can be disabled */ 152 148 void (*cpuid)(unsigned int *eax, unsigned int *ebx, ··· 180 178 181 179 void (*start_context_switch)(struct task_struct *prev); 182 180 void (*end_context_switch)(struct task_struct *next); 181 + #endif 183 182 } __no_randomize_layout; 184 183 185 184 struct pv_irq_ops {
+4 -2
arch/x86/include/asm/pgtable.h
··· 111 111 #define pte_val(x) native_pte_val(x) 112 112 #define __pte(x) native_make_pte(x) 113 113 114 - #define arch_end_context_switch(prev) do {} while(0) 115 - 116 114 #endif /* CONFIG_PARAVIRT */ 115 + 116 + #ifndef CONFIG_PARAVIRT_XXL 117 + #define arch_end_context_switch(prev) do {} while(0) 118 + #endif /* CONFIG_PARAVIRT_XXL */ 117 119 118 120 /* 119 121 * The following only work if pte_present() is true.
+2 -2
arch/x86/include/asm/processor.h
··· 578 578 current_stack_pointer) < THREAD_SIZE; 579 579 } 580 580 581 - #ifdef CONFIG_PARAVIRT 581 + #ifdef CONFIG_PARAVIRT_XXL 582 582 #include <asm/paravirt.h> 583 583 #else 584 584 #define __cpuid native_cpuid ··· 589 589 } 590 590 591 591 #define set_iopl_mask native_set_iopl_mask 592 - #endif /* CONFIG_PARAVIRT */ 592 + #endif /* CONFIG_PARAVIRT_XXL */ 593 593 594 594 /* Free all resources held by a thread. */ 595 595 extern void release_thread(struct task_struct *);
+7 -2
arch/x86/include/asm/special_insns.h
··· 143 143 144 144 #ifdef CONFIG_PARAVIRT 145 145 #include <asm/paravirt.h> 146 - #else 146 + #endif 147 147 148 + #ifndef CONFIG_PARAVIRT_XXL 148 149 static inline unsigned long read_cr0(void) 149 150 { 150 151 return native_read_cr0(); ··· 155 154 { 156 155 native_write_cr0(x); 157 156 } 157 + #endif 158 158 159 + #ifndef CONFIG_PARAVIRT 159 160 static inline unsigned long read_cr2(void) 160 161 { 161 162 return native_read_cr2(); ··· 181 178 { 182 179 native_write_cr3(x); 183 180 } 181 + #endif 184 182 183 + #ifndef CONFIG_PARAVIRT_XXL 185 184 static inline void __write_cr4(unsigned long x) 186 185 { 187 186 native_write_cr4(x); ··· 213 208 214 209 #endif 215 210 216 - #endif/* CONFIG_PARAVIRT */ 211 + #endif/* CONFIG_PARAVIRT_XXL */ 217 212 218 213 static inline void clflush(volatile void *__p) 219 214 {
+2
arch/x86/kernel/asm-offsets.c
··· 68 68 BLANK(); 69 69 OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable); 70 70 OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable); 71 + #ifdef CONFIG_PARAVIRT_XXL 71 72 OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret); 73 + #endif 72 74 OFFSET(PV_MMU_read_cr2, paravirt_patch_template, mmu.read_cr2); 73 75 #endif 74 76
+2
arch/x86/kernel/asm-offsets_64.c
··· 21 21 int main(void) 22 22 { 23 23 #ifdef CONFIG_PARAVIRT 24 + #ifdef CONFIG_PARAVIRT_XXL 24 25 OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template, 25 26 cpu.usergs_sysret64); 26 27 OFFSET(PV_CPU_swapgs, paravirt_patch_template, cpu.swapgs); 28 + #endif 27 29 #ifdef CONFIG_DEBUG_ENTRY 28 30 OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl); 29 31 #endif
+1 -1
arch/x86/kernel/cpu/common.c
··· 1240 1240 * ESPFIX issue, we can change this. 1241 1241 */ 1242 1242 #ifdef CONFIG_X86_32 1243 - # ifdef CONFIG_PARAVIRT 1243 + # ifdef CONFIG_PARAVIRT_XXL 1244 1244 do { 1245 1245 extern void native_iret(void); 1246 1246 if (pv_ops.cpu.iret == native_iret)
+2
arch/x86/kernel/head_64.S
··· 31 31 #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg 32 32 #else 33 33 #define GET_CR2_INTO(reg) movq %cr2, reg 34 + #endif 35 + #ifndef CONFIG_PARAVIRT_XXL 34 36 #define INTERRUPT_RETURN iretq 35 37 #endif 36 38
+12 -1
arch/x86/kernel/paravirt.c
··· 101 101 return 5; 102 102 } 103 103 104 + #ifdef CONFIG_PARAVIRT_XXL 104 105 static unsigned paravirt_patch_jmp(void *insnbuf, const void *target, 105 106 unsigned long addr, unsigned len) 106 107 { ··· 120 119 121 120 return 5; 122 121 } 122 + #endif 123 123 124 124 DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); 125 125 ··· 152 150 else if (opfunc == _paravirt_ident_64) 153 151 ret = paravirt_patch_ident_64(insnbuf, len); 154 152 153 + #ifdef CONFIG_PARAVIRT_XXL 155 154 else if (type == PARAVIRT_PATCH(cpu.iret) || 156 155 type == PARAVIRT_PATCH(cpu.usergs_sysret64)) 157 156 /* If operation requires a jmp, then jmp */ 158 157 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); 158 + #endif 159 159 else 160 160 /* Otherwise call the function. */ 161 161 ret = paravirt_patch_call(insnbuf, opfunc, addr, len); ··· 266 262 preempt_enable(); 267 263 } 268 264 265 + #ifdef CONFIG_PARAVIRT_XXL 269 266 void paravirt_start_context_switch(struct task_struct *prev) 270 267 { 271 268 BUG_ON(preemptible()); ··· 287 282 if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES)) 288 283 arch_enter_lazy_mmu_mode(); 289 284 } 285 + #endif 290 286 291 287 enum paravirt_lazy_mode paravirt_get_lazy_mode(void) 292 288 { ··· 326 320 .time.steal_clock = native_steal_clock, 327 321 328 322 /* Cpu ops. */ 323 + .cpu.io_delay = native_io_delay, 324 + 325 + #ifdef CONFIG_PARAVIRT_XXL 329 326 .cpu.cpuid = native_cpuid, 330 327 .cpu.get_debugreg = native_get_debugreg, 331 328 .cpu.set_debugreg = native_set_debugreg, ··· 370 361 .cpu.swapgs = native_swapgs, 371 362 372 363 .cpu.set_iopl_mask = native_set_iopl_mask, 373 - .cpu.io_delay = native_io_delay, 374 364 375 365 .cpu.start_context_switch = paravirt_nop, 376 366 .cpu.end_context_switch = paravirt_nop, 367 + #endif /* CONFIG_PARAVIRT_XXL */ 377 368 378 369 /* Irq ops. */ 379 370 .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), ··· 473 464 #endif 474 465 }; 475 466 467 + #ifdef CONFIG_PARAVIRT_XXL 476 468 /* At this point, native_get/set_debugreg has real function entries */ 477 469 NOKPROBE_SYMBOL(native_get_debugreg); 478 470 NOKPROBE_SYMBOL(native_set_debugreg); 479 471 NOKPROBE_SYMBOL(native_load_idt); 472 + #endif 480 473 481 474 EXPORT_SYMBOL_GPL(pv_ops); 482 475 EXPORT_SYMBOL_GPL(pv_info);
+4
arch/x86/kernel/paravirt_patch_32.c
··· 5 5 DEF_NATIVE(irq, irq_enable, "sti"); 6 6 DEF_NATIVE(irq, restore_fl, "push %eax; popf"); 7 7 DEF_NATIVE(irq, save_fl, "pushf; pop %eax"); 8 + #ifdef CONFIG_PARAVIRT_XXL 8 9 DEF_NATIVE(cpu, iret, "iret"); 10 + #endif 9 11 DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax"); 10 12 DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3"); 11 13 DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax"); ··· 47 45 PATCH_SITE(irq, irq_enable); 48 46 PATCH_SITE(irq, restore_fl); 49 47 PATCH_SITE(irq, save_fl); 48 + #ifdef CONFIG_PARAVIRT_XXL 50 49 PATCH_SITE(cpu, iret); 50 + #endif 51 51 PATCH_SITE(mmu, read_cr2); 52 52 PATCH_SITE(mmu, read_cr3); 53 53 PATCH_SITE(mmu, write_cr3);
+5 -1
arch/x86/kernel/paravirt_patch_64.c
··· 10 10 DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax"); 11 11 DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax"); 12 12 DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3"); 13 + #ifdef CONFIG_PARAVIRT_XXL 13 14 DEF_NATIVE(cpu, wbinvd, "wbinvd"); 14 15 15 16 DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq"); 16 17 DEF_NATIVE(cpu, swapgs, "swapgs"); 18 + #endif 17 19 18 20 DEF_NATIVE(, mov32, "mov %edi, %eax"); 19 21 DEF_NATIVE(, mov64, "mov %rdi, %rax"); ··· 55 53 PATCH_SITE(irq, save_fl); 56 54 PATCH_SITE(irq, irq_enable); 57 55 PATCH_SITE(irq, irq_disable); 56 + #ifdef CONFIG_PARAVIRT_XXL 58 57 PATCH_SITE(cpu, usergs_sysret64); 59 58 PATCH_SITE(cpu, swapgs); 59 + PATCH_SITE(cpu, wbinvd); 60 + #endif 60 61 PATCH_SITE(mmu, read_cr2); 61 62 PATCH_SITE(mmu, read_cr3); 62 63 PATCH_SITE(mmu, write_cr3); 63 - PATCH_SITE(cpu, wbinvd); 64 64 #if defined(CONFIG_PARAVIRT_SPINLOCKS) 65 65 case PARAVIRT_PATCH(lock.queued_spin_unlock): 66 66 if (pv_is_native_spin_unlock()) {