Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
"More bugfixes, including a few remaining "make W=1" issues such as too
large frame sizes on some configurations.

On the ARM side, the compiler was messing up shadow stacks between EL1
and EL2 code, which is easily fixed with __always_inline"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: VMX: check descriptor table exits on instruction emulation
kvm: x86: Limit the number of "kvm: disabled by bios" messages
KVM: x86: avoid useless copy of cpufreq policy
KVM: allow disabling -Werror
KVM: x86: allow compiling as non-module with W=1
KVM: Pre-allocate 1 cpumask variable per cpu for both pv tlb and pv ipis
KVM: Introduce pv check helpers
KVM: let declaration of kvm_get_running_vcpus match implementation
KVM: SVM: allocate AVIC data structures based on kvm_amd module parameter
arm64: Ask the compiler to __always_inline functions used by KVM at HYP
KVM: arm64: Define our own swab32() to avoid a uapi static inline
KVM: arm64: Ask the compiler to __always_inline functions used at HYP
kvm: arm/arm64: Fold VHE entry/exit work into kvm_vcpu_run_vhe()
KVM: arm/arm64: Fix up includes for trace.h

Changed files
+171 -107
arch
include
linux
virt
kvm
-3
arch/arm/include/asm/kvm_host.h
··· 392 392 static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} 393 393 static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} 394 394 395 - static inline void kvm_arm_vhe_guest_enter(void) {} 396 - static inline void kvm_arm_vhe_guest_exit(void) {} 397 - 398 395 #define KVM_BP_HARDEN_UNKNOWN -1 399 396 #define KVM_BP_HARDEN_WA_NEEDED 0 400 397 #define KVM_BP_HARDEN_NOT_REQUIRED 1
+1 -1
arch/arm64/include/asm/arch_gicv3.h
··· 32 32 isb(); 33 33 } 34 34 35 - static inline void gic_write_dir(u32 irq) 35 + static __always_inline void gic_write_dir(u32 irq) 36 36 { 37 37 write_sysreg_s(irq, SYS_ICC_DIR_EL1); 38 38 isb();
+1 -1
arch/arm64/include/asm/cache.h
··· 69 69 return test_bit(ICACHEF_ALIASING, &__icache_flags); 70 70 } 71 71 72 - static inline int icache_is_vpipt(void) 72 + static __always_inline int icache_is_vpipt(void) 73 73 { 74 74 return test_bit(ICACHEF_VPIPT, &__icache_flags); 75 75 }
+1 -1
arch/arm64/include/asm/cacheflush.h
··· 145 145 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 146 146 extern void flush_dcache_page(struct page *); 147 147 148 - static inline void __flush_icache_all(void) 148 + static __always_inline void __flush_icache_all(void) 149 149 { 150 150 if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) 151 151 return;
+5 -5
arch/arm64/include/asm/cpufeature.h
··· 435 435 return cpuid_feature_extract_signed_field_width(features, field, 4); 436 436 } 437 437 438 - static inline unsigned int __attribute_const__ 438 + static __always_inline unsigned int __attribute_const__ 439 439 cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) 440 440 { 441 441 return (u64)(features << (64 - width - field)) >> (64 - width); 442 442 } 443 443 444 - static inline unsigned int __attribute_const__ 444 + static __always_inline unsigned int __attribute_const__ 445 445 cpuid_feature_extract_unsigned_field(u64 features, int field) 446 446 { 447 447 return cpuid_feature_extract_unsigned_field_width(features, field, 4); ··· 564 564 return val == 0x1; 565 565 } 566 566 567 - static inline bool system_supports_fpsimd(void) 567 + static __always_inline bool system_supports_fpsimd(void) 568 568 { 569 569 return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); 570 570 } ··· 575 575 !cpus_have_const_cap(ARM64_HAS_PAN); 576 576 } 577 577 578 - static inline bool system_supports_sve(void) 578 + static __always_inline bool system_supports_sve(void) 579 579 { 580 580 return IS_ENABLED(CONFIG_ARM64_SVE) && 581 581 cpus_have_const_cap(ARM64_SVE); 582 582 } 583 583 584 - static inline bool system_supports_cnp(void) 584 + static __always_inline bool system_supports_cnp(void) 585 585 { 586 586 return IS_ENABLED(CONFIG_ARM64_CNP) && 587 587 cpus_have_const_cap(ARM64_HAS_CNP);
+2 -2
arch/arm64/include/asm/io.h
··· 34 34 } 35 35 36 36 #define __raw_writel __raw_writel 37 - static inline void __raw_writel(u32 val, volatile void __iomem *addr) 37 + static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr) 38 38 { 39 39 asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); 40 40 } ··· 69 69 } 70 70 71 71 #define __raw_readl __raw_readl 72 - static inline u32 __raw_readl(const volatile void __iomem *addr) 72 + static __always_inline u32 __raw_readl(const volatile void __iomem *addr) 73 73 { 74 74 u32 val; 75 75 asm volatile(ALTERNATIVE("ldr %w0, [%1]",
+24 -24
arch/arm64/include/asm/kvm_emulate.h
··· 36 36 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); 37 37 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); 38 38 39 - static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) 39 + static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) 40 40 { 41 41 return !(vcpu->arch.hcr_el2 & HCR_RW); 42 42 } ··· 127 127 vcpu->arch.vsesr_el2 = vsesr; 128 128 } 129 129 130 - static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 130 + static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 131 131 { 132 132 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; 133 133 } ··· 153 153 *__vcpu_elr_el1(vcpu) = v; 154 154 } 155 155 156 - static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) 156 + static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) 157 157 { 158 158 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; 159 159 } 160 160 161 - static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) 161 + static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) 162 162 { 163 163 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); 164 164 } 165 165 166 - static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) 166 + static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) 167 167 { 168 168 if (vcpu_mode_is_32bit(vcpu)) 169 169 return kvm_condition_valid32(vcpu); ··· 181 181 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on 182 182 * AArch32 with banked registers. 183 183 */ 184 - static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, 184 + static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, 185 185 u8 reg_num) 186 186 { 187 187 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; 188 188 } 189 189 190 - static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, 190 + static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, 191 191 unsigned long val) 192 192 { 193 193 if (reg_num != 31) ··· 264 264 return mode != PSR_MODE_EL0t; 265 265 } 266 266 267 - static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) 267 + static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) 268 268 { 269 269 return vcpu->arch.fault.esr_el2; 270 270 } 271 271 272 - static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) 272 + static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) 273 273 { 274 274 u32 esr = kvm_vcpu_get_hsr(vcpu); 275 275 ··· 279 279 return -1; 280 280 } 281 281 282 - static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) 282 + static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) 283 283 { 284 284 return vcpu->arch.fault.far_el2; 285 285 } 286 286 287 - static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) 287 + static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) 288 288 { 289 289 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; 290 290 } ··· 299 299 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; 300 300 } 301 301 302 - static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) 302 + static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) 303 303 { 304 304 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); 305 305 } ··· 319 319 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF); 320 320 } 321 321 322 - static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) 322 + static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) 323 323 { 324 324 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; 325 325 } 326 326 327 - static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) 327 + static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) 328 328 { 329 329 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); 330 330 } 331 331 332 - static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) 332 + static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) 333 333 { 334 334 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || 335 335 kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ ··· 340 340 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); 341 341 } 342 342 343 - static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) 343 + static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) 344 344 { 345 345 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); 346 346 } 347 347 348 348 /* This one is not specific to Data Abort */ 349 - static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) 349 + static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) 350 350 { 351 351 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); 352 352 } 353 353 354 - static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) 354 + static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) 355 355 { 356 356 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 357 357 } ··· 361 361 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; 362 362 } 363 363 364 - static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) 364 + static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) 365 365 { 366 366 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; 367 367 } 368 368 369 - static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) 369 + static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) 370 370 { 371 371 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; 372 372 } 373 373 374 - static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) 374 + static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) 375 375 { 376 376 switch (kvm_vcpu_trap_get_fault(vcpu)) { 377 377 case FSC_SEA: ··· 390 390 } 391 391 } 392 392 393 - static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) 393 + static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) 394 394 { 395 395 u32 esr = kvm_vcpu_get_hsr(vcpu); 396 396 return ESR_ELx_SYS64_ISS_RT(esr); ··· 504 504 return data; /* Leave LE untouched */ 505 505 } 506 506 507 - static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) 507 + static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) 508 508 { 509 509 if (vcpu_mode_is_32bit(vcpu)) 510 510 kvm_skip_instr32(vcpu, is_wide_instr); ··· 519 519 * Skip an instruction which has been emulated at hyp while most guest sysregs 520 520 * are live. 521 521 */ 522 - static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) 522 + static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) 523 523 { 524 524 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); 525 525 vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
-32
arch/arm64/include/asm/kvm_host.h
··· 626 626 static inline void kvm_clr_pmu_events(u32 clr) {} 627 627 #endif 628 628 629 - static inline void kvm_arm_vhe_guest_enter(void) 630 - { 631 - local_daif_mask(); 632 - 633 - /* 634 - * Having IRQs masked via PMR when entering the guest means the GIC 635 - * will not signal the CPU of interrupts of lower priority, and the 636 - * only way to get out will be via guest exceptions. 637 - * Naturally, we want to avoid this. 638 - * 639 - * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a 640 - * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU. 641 - */ 642 - pmr_sync(); 643 - } 644 - 645 - static inline void kvm_arm_vhe_guest_exit(void) 646 - { 647 - /* 648 - * local_daif_restore() takes care to properly restore PSTATE.DAIF 649 - * and the GIC PMR if the host is using IRQ priorities. 650 - */ 651 - local_daif_restore(DAIF_PROCCTX_NOIRQ); 652 - 653 - /* 654 - * When we exit from the guest we change a number of CPU configuration 655 - * parameters, such as traps. Make sure these changes take effect 656 - * before running the host or additional guests. 657 - */ 658 - isb(); 659 - } 660 - 661 629 #define KVM_BP_HARDEN_UNKNOWN -1 662 630 #define KVM_BP_HARDEN_WA_NEEDED 0 663 631 #define KVM_BP_HARDEN_NOT_REQUIRED 1
+7
arch/arm64/include/asm/kvm_hyp.h
··· 47 47 #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) 48 48 #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) 49 49 50 + /* 51 + * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the 52 + * static inline can allow the compiler to out-of-line this. KVM always wants 53 + * the macro version as its always inlined. 54 + */ 55 + #define __kvm_swab32(x) ___constant_swab32(x) 56 + 50 57 int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); 51 58 52 59 void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
+2 -1
arch/arm64/include/asm/kvm_mmu.h
··· 93 93 __le32 *origptr, __le32 *updptr, int nr_inst); 94 94 void kvm_compute_layout(void); 95 95 96 - static inline unsigned long __kern_hyp_va(unsigned long v) 96 + static __always_inline unsigned long __kern_hyp_va(unsigned long v) 97 97 { 98 98 asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" 99 99 "ror %0, %0, #1\n" ··· 473 473 extern void *__kvm_bp_vect_base; 474 474 extern int __kvm_harden_el2_vector_slot; 475 475 476 + /* This is only called on a VHE system */ 476 477 static inline void *kvm_get_hyp_vector(void) 477 478 { 478 479 struct bp_hardening_data *data = arm64_get_bp_hardening_data();
+1 -1
arch/arm64/include/asm/virt.h
··· 83 83 return read_sysreg(CurrentEL) == CurrentEL_EL2; 84 84 } 85 85 86 - static inline bool has_vhe(void) 86 + static __always_inline bool has_vhe(void) 87 87 { 88 88 if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN)) 89 89 return true;
+37 -2
arch/arm64/kvm/hyp/switch.c
··· 625 625 } 626 626 627 627 /* Switch to the guest for VHE systems running in EL2 */ 628 - int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) 628 + static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) 629 629 { 630 630 struct kvm_cpu_context *host_ctxt; 631 631 struct kvm_cpu_context *guest_ctxt; ··· 678 678 679 679 return exit_code; 680 680 } 681 - NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); 681 + NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe); 682 + 683 + int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) 684 + { 685 + int ret; 686 + 687 + local_daif_mask(); 688 + 689 + /* 690 + * Having IRQs masked via PMR when entering the guest means the GIC 691 + * will not signal the CPU of interrupts of lower priority, and the 692 + * only way to get out will be via guest exceptions. 693 + * Naturally, we want to avoid this. 694 + * 695 + * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a 696 + * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU. 697 + */ 698 + pmr_sync(); 699 + 700 + ret = __kvm_vcpu_run_vhe(vcpu); 701 + 702 + /* 703 + * local_daif_restore() takes care to properly restore PSTATE.DAIF 704 + * and the GIC PMR if the host is using IRQ priorities. 705 + */ 706 + local_daif_restore(DAIF_PROCCTX_NOIRQ); 707 + 708 + /* 709 + * When we exit from the guest we change a number of CPU configuration 710 + * parameters, such as traps. Make sure these changes take effect 711 + * before running the host or additional guests. 712 + */ 713 + isb(); 714 + 715 + return ret; 716 + } 682 717 683 718 /* Switch to the guest for legacy non-VHE systems */ 684 719 int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
+2 -2
arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
··· 69 69 u32 data = vcpu_get_reg(vcpu, rd); 70 70 if (__is_be(vcpu)) { 71 71 /* guest pre-swabbed data, undo this for writel() */ 72 - data = swab32(data); 72 + data = __kvm_swab32(data); 73 73 } 74 74 writel_relaxed(data, addr); 75 75 } else { 76 76 u32 data = readl_relaxed(addr); 77 77 if (__is_be(vcpu)) { 78 78 /* guest expects swabbed data */ 79 - data = swab32(data); 79 + data = __kvm_swab32(data); 80 80 } 81 81 vcpu_set_reg(vcpu, rd, data); 82 82 }
+44 -21
arch/x86/kernel/kvm.c
··· 425 425 } 426 426 } 427 427 428 + static bool pv_tlb_flush_supported(void) 429 + { 430 + return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && 431 + !kvm_para_has_hint(KVM_HINTS_REALTIME) && 432 + kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); 433 + } 434 + 435 + static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask); 436 + 428 437 #ifdef CONFIG_SMP 438 + 439 + static bool pv_ipi_supported(void) 440 + { 441 + return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI); 442 + } 443 + 444 + static bool pv_sched_yield_supported(void) 445 + { 446 + return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && 447 + !kvm_para_has_hint(KVM_HINTS_REALTIME) && 448 + kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)); 449 + } 450 + 429 451 #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) 430 452 431 453 static void __send_ipi_mask(const struct cpumask *mask, int vector) ··· 512 490 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector) 513 491 { 514 492 unsigned int this_cpu = smp_processor_id(); 515 - struct cpumask new_mask; 493 + struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask); 516 494 const struct cpumask *local_mask; 517 495 518 - cpumask_copy(&new_mask, mask); 519 - cpumask_clear_cpu(this_cpu, &new_mask); 520 - local_mask = &new_mask; 496 + cpumask_copy(new_mask, mask); 497 + cpumask_clear_cpu(this_cpu, new_mask); 498 + local_mask = new_mask; 521 499 __send_ipi_mask(local_mask, vector); 522 500 } 523 501 ··· 597 575 update_intr_gate(X86_TRAP_PF, async_page_fault); 598 576 } 599 577 600 - static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask); 601 578 602 579 static void kvm_flush_tlb_others(const struct cpumask *cpumask, 603 580 const struct flush_tlb_info *info) ··· 604 583 u8 state; 605 584 int cpu; 606 585 struct kvm_steal_time *src; 607 - struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask); 586 + struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask); 608 587 609 588 cpumask_copy(flushmask, cpumask); 610 589 /* ··· 640 619 pv_ops.time.steal_clock = kvm_steal_clock; 641 620 } 642 621 643 - if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && 644 - !kvm_para_has_hint(KVM_HINTS_REALTIME) && 645 - kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 622 + if (pv_tlb_flush_supported()) { 646 623 pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; 647 624 pv_ops.mmu.tlb_remove_table = tlb_remove_table; 625 + pr_info("KVM setup pv remote TLB flush\n"); 648 626 } 649 627 650 628 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) ··· 652 632 #ifdef CONFIG_SMP 653 633 smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus; 654 634 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; 655 - if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) && 656 - !kvm_para_has_hint(KVM_HINTS_REALTIME) && 657 - kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 635 + if (pv_sched_yield_supported()) { 658 636 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi; 659 637 pr_info("KVM setup pv sched yield\n"); 660 638 } ··· 718 700 static void __init kvm_apic_init(void) 719 701 { 720 702 #if defined(CONFIG_SMP) 721 - if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI)) 703 + if (pv_ipi_supported()) 722 704 kvm_setup_pv_ipi(); 723 705 #endif 724 706 } ··· 750 732 } 751 733 arch_initcall(activate_jump_labels); 752 734 753 - static __init int kvm_setup_pv_tlb_flush(void) 735 + static __init int kvm_alloc_cpumask(void) 754 736 { 755 737 int cpu; 738 + bool alloc = false; 756 739 757 740 if (!kvm_para_available() || nopv) 758 741 return 0; 759 742 760 - if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && 761 - !kvm_para_has_hint(KVM_HINTS_REALTIME) && 762 - kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 743 + if (pv_tlb_flush_supported()) 744 + alloc = true; 745 + 746 + #if defined(CONFIG_SMP) 747 + if (pv_ipi_supported()) 748 + alloc = true; 749 + #endif 750 + 751 + if (alloc) 763 752 for_each_possible_cpu(cpu) { 764 - zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), 753 + zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu), 765 754 GFP_KERNEL, cpu_to_node(cpu)); 766 755 } 767 - pr_info("KVM setup pv remote TLB flush\n"); 768 - } 769 756 770 757 return 0; 771 758 } 772 - arch_initcall(kvm_setup_pv_tlb_flush); 759 + arch_initcall(kvm_alloc_cpumask); 773 760 774 761 #ifdef CONFIG_PARAVIRT_SPINLOCKS 775 762
+13
arch/x86/kvm/Kconfig
··· 59 59 60 60 If unsure, say N. 61 61 62 + config KVM_WERROR 63 + bool "Compile KVM with -Werror" 64 + # KASAN may cause the build to fail due to larger frames 65 + default y if X86_64 && !KASAN 66 + # We use the dependency on !COMPILE_TEST to not be enabled 67 + # blindly in allmodconfig or allyesconfig configurations 68 + depends on (X86_64 && !KASAN) || !COMPILE_TEST 69 + depends on EXPERT 70 + help 71 + Add -Werror to the build flags for (and only for) i915.ko. 72 + 73 + If in doubt, say "N". 74 + 62 75 config KVM_INTEL 63 76 tristate "KVM for Intel (and compatible) processors support" 64 77 depends on KVM && IA32_FEAT_CTL
+1
arch/x86/kvm/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 3 ccflags-y += -Iarch/x86/kvm 4 + ccflags-$(CONFIG_KVM_WERROR) += -Werror 4 5 5 6 KVM := ../../../virt/kvm 6 7
+4 -1
arch/x86/kvm/svm.c
··· 57 57 MODULE_AUTHOR("Qumranet"); 58 58 MODULE_LICENSE("GPL"); 59 59 60 + #ifdef MODULE 60 61 static const struct x86_cpu_id svm_cpu_id[] = { 61 62 X86_FEATURE_MATCH(X86_FEATURE_SVM), 62 63 {} 63 64 }; 64 65 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id); 66 + #endif 65 67 66 68 #define IOPM_ALLOC_ORDER 2 67 69 #define MSRPM_ALLOC_ORDER 1 ··· 2196 2194 static int avic_init_vcpu(struct vcpu_svm *svm) 2197 2195 { 2198 2196 int ret; 2197 + struct kvm_vcpu *vcpu = &svm->vcpu; 2199 2198 2200 - if (!kvm_vcpu_apicv_active(&svm->vcpu)) 2199 + if (!avic || !irqchip_in_kernel(vcpu->kvm)) 2201 2200 return 0; 2202 2201 2203 2202 ret = avic_init_backing_page(&svm->vcpu);
+17
arch/x86/kvm/vmx/vmx.c
··· 64 64 MODULE_AUTHOR("Qumranet"); 65 65 MODULE_LICENSE("GPL"); 66 66 67 + #ifdef MODULE 67 68 static const struct x86_cpu_id vmx_cpu_id[] = { 68 69 X86_FEATURE_MATCH(X86_FEATURE_VMX), 69 70 {} 70 71 }; 71 72 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id); 73 + #endif 72 74 73 75 bool __read_mostly enable_vpid = 1; 74 76 module_param_named(vpid, enable_vpid, bool, 0444); ··· 7177 7175 else 7178 7176 intercept = nested_vmx_check_io_bitmaps(vcpu, port, size); 7179 7177 7178 + /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ 7180 7179 return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; 7181 7180 } 7182 7181 ··· 7206 7203 case x86_intercept_out: 7207 7204 case x86_intercept_outs: 7208 7205 return vmx_check_intercept_io(vcpu, info); 7206 + 7207 + case x86_intercept_lgdt: 7208 + case x86_intercept_lidt: 7209 + case x86_intercept_lldt: 7210 + case x86_intercept_ltr: 7211 + case x86_intercept_sgdt: 7212 + case x86_intercept_sidt: 7213 + case x86_intercept_sldt: 7214 + case x86_intercept_str: 7215 + if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC)) 7216 + return X86EMUL_CONTINUE; 7217 + 7218 + /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */ 7219 + break; 7209 7220 7210 7221 /* TODO: check more intercepts... */ 7211 7222 default:
+7 -7
arch/x86/kvm/x86.c
··· 7190 7190 7191 7191 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { 7192 7192 #ifdef CONFIG_CPU_FREQ 7193 - struct cpufreq_policy policy; 7193 + struct cpufreq_policy *policy; 7194 7194 int cpu; 7195 7195 7196 - memset(&policy, 0, sizeof(policy)); 7197 7196 cpu = get_cpu(); 7198 - cpufreq_get_policy(&policy, cpu); 7199 - if (policy.cpuinfo.max_freq) 7200 - max_tsc_khz = policy.cpuinfo.max_freq; 7197 + policy = cpufreq_cpu_get(cpu); 7198 + if (policy && policy->cpuinfo.max_freq) 7199 + max_tsc_khz = policy->cpuinfo.max_freq; 7201 7200 put_cpu(); 7201 + cpufreq_cpu_put(policy); 7202 7202 #endif 7203 7203 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, 7204 7204 CPUFREQ_TRANSITION_NOTIFIER); ··· 7308 7308 } 7309 7309 7310 7310 if (!ops->cpu_has_kvm_support()) { 7311 - printk(KERN_ERR "kvm: no hardware support\n"); 7311 + pr_err_ratelimited("kvm: no hardware support\n"); 7312 7312 r = -EOPNOTSUPP; 7313 7313 goto out; 7314 7314 } 7315 7315 if (ops->disabled_by_bios()) { 7316 - printk(KERN_ERR "kvm: disabled by bios\n"); 7316 + pr_err_ratelimited("kvm: disabled by bios\n"); 7317 7317 r = -EOPNOTSUPP; 7318 7318 goto out; 7319 7319 }
+1 -1
include/linux/kvm_host.h
··· 1344 1344 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 1345 1345 1346 1346 struct kvm_vcpu *kvm_get_running_vcpu(void); 1347 - struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); 1347 + struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); 1348 1348 1349 1349 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS 1350 1350 bool kvm_arch_has_irq_bypass(void);
-2
virt/kvm/arm/arm.c
··· 742 742 guest_enter_irqoff(); 743 743 744 744 if (has_vhe()) { 745 - kvm_arm_vhe_guest_enter(); 746 745 ret = kvm_vcpu_run_vhe(vcpu); 747 - kvm_arm_vhe_guest_exit(); 748 746 } else { 749 747 ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu); 750 748 }
+1
virt/kvm/arm/trace.h
··· 4 4 5 5 #include <kvm/arm_arch_timer.h> 6 6 #include <linux/tracepoint.h> 7 + #include <asm/kvm_arm.h> 7 8 8 9 #undef TRACE_SYSTEM 9 10 #define TRACE_SYSTEM kvm