Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: x86: Unify pr_fmt to use module name for all KVM modules

Define pr_fmt using KBUILD_MODNAME for all KVM x86 code so that printks
use consistent formatting across common x86, Intel, and AMD code. In
addition to providing consistent print formatting, using KBUILD_MODNAME,
e.g. kvm_amd and kvm_intel, allows referencing SVM and VMX (and SEV and
SGX and ...) as technologies without generating weird messages, and
without causing naming conflicts with other kernel code, e.g. "SEV: ",
"tdx: ", "sgx: " etc.. are all used by the kernel for non-KVM subsystems.

Opportunistically move away from printk() for prints that need to be
modified anyways, e.g. to drop a manual "kvm: " prefix.

Opportunistically convert a few SGX WARNs that are similarly modified to
WARN_ONCE; in the very unlikely event that the WARNs fire, odds are good
that they would fire repeatedly and spam the kernel log without providing
unique information in each print.

Note, defining pr_fmt yields undesirable results for code that uses KVM's
printk wrappers, e.g. vcpu_unimpl(). But, that's a pre-existing problem
as SVM/kvm_amd already defines a pr_fmt, and thankfully use of KVM's
wrappers is relatively limited in KVM x86 code.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Message-Id: <20221130230934.1014142-35-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

authored by

Sean Christopherson and committed by
Paolo Bonzini
8d20bd63 08a9d59c

+124 -99
+7 -7
arch/arm64/include/asm/kvm_host.h
··· 66 66 67 67 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); 68 68 69 - extern unsigned int kvm_sve_max_vl; 70 - int kvm_arm_init_sve(void); 69 + extern unsigned int __ro_after_init kvm_sve_max_vl; 70 + int __init kvm_arm_init_sve(void); 71 71 72 72 u32 __attribute_const__ kvm_target_cpu(void); 73 73 int kvm_reset_vcpu(struct kvm_vcpu *vcpu); ··· 877 877 878 878 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu); 879 879 880 - int kvm_sys_reg_table_init(void); 880 + int __init kvm_sys_reg_table_init(void); 881 881 882 882 /* MMIO helpers */ 883 883 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data); ··· 908 908 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, 909 909 struct kvm_device_attr *attr); 910 910 911 - extern unsigned int kvm_arm_vmid_bits; 912 - int kvm_arm_vmid_alloc_init(void); 913 - void kvm_arm_vmid_alloc_free(void); 911 + extern unsigned int __ro_after_init kvm_arm_vmid_bits; 912 + int __init kvm_arm_vmid_alloc_init(void); 913 + void __init kvm_arm_vmid_alloc_free(void); 914 914 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); 915 915 void kvm_arm_vmid_clear_active(void); 916 916 ··· 993 993 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); 994 994 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu); 995 995 996 - int kvm_set_ipa_limit(void); 996 + int __init kvm_set_ipa_limit(void); 997 997 998 998 #define __KVM_HAVE_ARCH_VM_ALLOC 999 999 struct kvm *kvm_arch_alloc_vm(void);
+2 -2
arch/arm64/include/asm/kvm_mmu.h
··· 163 163 void __iomem **haddr); 164 164 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, 165 165 void **haddr); 166 - void free_hyp_pgds(void); 166 + void __init free_hyp_pgds(void); 167 167 168 168 void stage2_unmap_vm(struct kvm *kvm); 169 169 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type); ··· 175 175 176 176 phys_addr_t kvm_mmu_get_httbr(void); 177 177 phys_addr_t kvm_get_idmap_vector(void); 178 - int kvm_mmu_init(u32 *hyp_va_bits); 178 + int __init kvm_mmu_init(u32 *hyp_va_bits); 179 179 180 180 static inline void *__kvm_vector_slot2addr(void *base, 181 181 enum arm64_hyp_spectre_vector slot)
+1 -1
arch/arm64/kvm/arch_timer.c
··· 1113 1113 return 0; 1114 1114 } 1115 1115 1116 - int kvm_timer_hyp_init(bool has_gic) 1116 + int __init kvm_timer_hyp_init(bool has_gic) 1117 1117 { 1118 1118 struct arch_timer_kvm_info *info; 1119 1119 int err;
+6 -6
arch/arm64/kvm/mmu.c
··· 25 25 static struct kvm_pgtable *hyp_pgtable; 26 26 static DEFINE_MUTEX(kvm_hyp_pgd_mutex); 27 27 28 - static unsigned long hyp_idmap_start; 29 - static unsigned long hyp_idmap_end; 30 - static phys_addr_t hyp_idmap_vector; 28 + static unsigned long __ro_after_init hyp_idmap_start; 29 + static unsigned long __ro_after_init hyp_idmap_end; 30 + static phys_addr_t __ro_after_init hyp_idmap_vector; 31 31 32 - static unsigned long io_map_base; 32 + static unsigned long __ro_after_init io_map_base; 33 33 34 34 static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end) 35 35 { ··· 280 280 /** 281 281 * free_hyp_pgds - free Hyp-mode page tables 282 282 */ 283 - void free_hyp_pgds(void) 283 + void __init free_hyp_pgds(void) 284 284 { 285 285 mutex_lock(&kvm_hyp_pgd_mutex); 286 286 if (hyp_pgtable) { ··· 1665 1665 .virt_to_phys = kvm_host_pa, 1666 1666 }; 1667 1667 1668 - int kvm_mmu_init(u32 *hyp_va_bits) 1668 + int __init kvm_mmu_init(u32 *hyp_va_bits) 1669 1669 { 1670 1670 int err; 1671 1671 u32 idmap_bits;
+4 -4
arch/arm64/kvm/reset.c
··· 30 30 #include <asm/virt.h> 31 31 32 32 /* Maximum phys_shift supported for any VM on this host */ 33 - static u32 kvm_ipa_limit; 33 + static u32 __ro_after_init kvm_ipa_limit; 34 34 35 35 /* 36 36 * ARMv8 Reset Values ··· 41 41 #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \ 42 42 PSR_AA32_I_BIT | PSR_AA32_F_BIT) 43 43 44 - unsigned int kvm_sve_max_vl; 44 + unsigned int __ro_after_init kvm_sve_max_vl; 45 45 46 - int kvm_arm_init_sve(void) 46 + int __init kvm_arm_init_sve(void) 47 47 { 48 48 if (system_supports_sve()) { 49 49 kvm_sve_max_vl = sve_max_virtualisable_vl(); ··· 352 352 return kvm_ipa_limit; 353 353 } 354 354 355 - int kvm_set_ipa_limit(void) 355 + int __init kvm_set_ipa_limit(void) 356 356 { 357 357 unsigned int parange; 358 358 u64 mmfr0;
+3 -3
arch/arm64/kvm/sys_regs.c
··· 82 82 } 83 83 84 84 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ 85 - static u32 cache_levels; 85 + static u32 __ro_after_init cache_levels; 86 86 87 87 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 88 88 #define CSSELR_MAX 14 ··· 2733 2733 } 2734 2734 2735 2735 /* ->val is filled in by kvm_sys_reg_table_init() */ 2736 - static struct sys_reg_desc invariant_sys_regs[] = { 2736 + static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = { 2737 2737 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 }, 2738 2738 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 }, 2739 2739 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 }, ··· 3057 3057 return write_demux_regids(uindices); 3058 3058 } 3059 3059 3060 - int kvm_sys_reg_table_init(void) 3060 + int __init kvm_sys_reg_table_init(void) 3061 3061 { 3062 3062 bool valid = true; 3063 3063 unsigned int i;
+3 -3
arch/arm64/kvm/vmid.c
··· 16 16 #include <asm/kvm_asm.h> 17 17 #include <asm/kvm_mmu.h> 18 18 19 - unsigned int kvm_arm_vmid_bits; 19 + unsigned int __ro_after_init kvm_arm_vmid_bits; 20 20 static DEFINE_RAW_SPINLOCK(cpu_vmid_lock); 21 21 22 22 static atomic64_t vmid_generation; ··· 172 172 /* 173 173 * Initialize the VMID allocator 174 174 */ 175 - int kvm_arm_vmid_alloc_init(void) 175 + int __init kvm_arm_vmid_alloc_init(void) 176 176 { 177 177 kvm_arm_vmid_bits = kvm_get_vmid_bits(); 178 178 ··· 190 190 return 0; 191 191 } 192 192 193 - void kvm_arm_vmid_alloc_free(void) 193 + void __init kvm_arm_vmid_alloc_free(void) 194 194 { 195 195 kfree(vmid_map); 196 196 }
+1
arch/x86/kvm/cpuid.c
··· 8 8 * Copyright 2011 Red Hat, Inc. and/or its affiliates. 9 9 * Copyright IBM Corporation, 2008 10 10 */ 11 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 12 13 #include <linux/kvm_host.h> 13 14 #include <linux/export.h>
+2
arch/x86/kvm/debugfs.c
··· 4 4 * 5 5 * Copyright 2016 Red Hat, Inc. and/or its affiliates. 6 6 */ 7 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 + 7 9 #include <linux/kvm_host.h> 8 10 #include <linux/debugfs.h> 9 11 #include "lapic.h"
+1
arch/x86/kvm/emulate.c
··· 17 17 * 18 18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 19 19 */ 20 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 21 22 #include <linux/kvm_host.h> 22 23 #include "kvm_cache_regs.h"
+1
arch/x86/kvm/hyperv.c
··· 17 17 * Ben-Ami Yassour <benami@il.ibm.com> 18 18 * Andrey Smetanin <asmetanin@virtuozzo.com> 19 19 */ 20 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 21 22 #include "x86.h" 22 23 #include "lapic.h"
+2 -2
arch/x86/kvm/i8254.c
··· 30 30 * Based on QEMU and Xen. 31 31 */ 32 32 33 - #define pr_fmt(fmt) "pit: " fmt 33 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 34 34 35 35 #include <linux/kvm_host.h> 36 36 #include <linux/slab.h> ··· 351 351 352 352 if (ps->period < min_period) { 353 353 pr_info_ratelimited( 354 - "kvm: requested %lld ns " 354 + "requested %lld ns " 355 355 "i8254 timer period limited to %lld ns\n", 356 356 ps->period, min_period); 357 357 ps->period = min_period;
+3 -1
arch/x86/kvm/i8259.c
··· 26 26 * Yaozu (Eddie) Dong <Eddie.dong@intel.com> 27 27 * Port from Qemu. 28 28 */ 29 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 + 29 31 #include <linux/mm.h> 30 32 #include <linux/slab.h> 31 33 #include <linux/bitops.h> ··· 37 35 #include "trace.h" 38 36 39 37 #define pr_pic_unimpl(fmt, ...) \ 40 - pr_err_ratelimited("kvm: pic: " fmt, ## __VA_ARGS__) 38 + pr_err_ratelimited("pic: " fmt, ## __VA_ARGS__) 41 39 42 40 static void pic_irq_request(struct kvm *kvm, int level); 43 41
+1
arch/x86/kvm/ioapic.c
··· 26 26 * Yaozu (Eddie) Dong <eddie.dong@intel.com> 27 27 * Based on Xen 3.1 code. 28 28 */ 29 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30 30 31 #include <linux/kvm_host.h> 31 32 #include <linux/kvm.h>
+1
arch/x86/kvm/irq.c
··· 7 7 * Authors: 8 8 * Yaozu (Eddie) Dong <Eddie.dong@intel.com> 9 9 */ 10 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 11 12 #include <linux/export.h> 12 13 #include <linux/kvm_host.h>
+4 -3
arch/x86/kvm/irq_comm.c
··· 8 8 * 9 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 10 */ 11 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 12 13 #include <linux/kvm_host.h> 13 14 #include <linux/slab.h> ··· 57 56 58 57 if (irq->dest_mode == APIC_DEST_PHYSICAL && 59 58 irq->dest_id == 0xff && kvm_lowest_prio_delivery(irq)) { 60 - printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n"); 59 + pr_info("apic: phys broadcast and lowest prio\n"); 61 60 irq->delivery_mode = APIC_DM_FIXED; 62 61 } 63 62 ··· 200 199 irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG); 201 200 202 201 if (irq_source_id >= BITS_PER_LONG) { 203 - printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n"); 202 + pr_warn("exhausted allocatable IRQ sources!\n"); 204 203 irq_source_id = -EFAULT; 205 204 goto unlock; 206 205 } ··· 222 221 mutex_lock(&kvm->irq_lock); 223 222 if (irq_source_id < 0 || 224 223 irq_source_id >= BITS_PER_LONG) { 225 - printk(KERN_ERR "kvm: IRQ source ID out of range!\n"); 224 + pr_err("IRQ source ID out of range!\n"); 226 225 goto unlock; 227 226 } 228 227 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
+1
arch/x86/kvm/kvm_onhyperv.c
··· 2 2 /* 3 3 * KVM L1 hypervisor optimizations on Hyper-V. 4 4 */ 5 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 5 6 6 7 #include <linux/kvm_host.h> 7 8 #include <asm/mshyperv.h>
+4 -4
arch/x86/kvm/lapic.c
··· 15 15 * 16 16 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation. 17 17 */ 18 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 19 20 #include <linux/kvm_host.h> 20 21 #include <linux/kvm.h> ··· 942 941 { 943 942 if (!kvm->arch.disabled_lapic_found) { 944 943 kvm->arch.disabled_lapic_found = true; 945 - printk(KERN_INFO 946 - "Disabled LAPIC found during irq injection\n"); 944 + pr_info("Disabled LAPIC found during irq injection\n"); 947 945 } 948 946 } 949 947 ··· 1560 1560 1561 1561 if (apic->lapic_timer.period < min_period) { 1562 1562 pr_info_ratelimited( 1563 - "kvm: vcpu %i: requested %lld ns " 1563 + "vcpu %i: requested %lld ns " 1564 1564 "lapic timer period limited to %lld ns\n", 1565 1565 apic->vcpu->vcpu_id, 1566 1566 apic->lapic_timer.period, min_period); ··· 1845 1845 deadline = apic->lapic_timer.period; 1846 1846 else if (unlikely(deadline > apic->lapic_timer.period)) { 1847 1847 pr_info_ratelimited( 1848 - "kvm: vcpu %i: requested lapic timer restore with " 1848 + "vcpu %i: requested lapic timer restore with " 1849 1849 "starting count register %#x=%u (%lld ns) > initial count (%lld ns). " 1850 1850 "Using initial count to start timer.\n", 1851 1851 apic->vcpu->vcpu_id,
+3 -3
arch/x86/kvm/mmu/mmu.c
··· 14 14 * Yaniv Kamay <yaniv@qumranet.com> 15 15 * Avi Kivity <avi@qumranet.com> 16 16 */ 17 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 18 19 #include "irq.h" 19 20 #include "ioapic.h" ··· 3457 3456 } 3458 3457 3459 3458 if (++retry_count > 4) { 3460 - printk_once(KERN_WARNING 3461 - "kvm: Fast #PF retrying more than 4 times.\n"); 3459 + pr_warn_once("Fast #PF retrying more than 4 times.\n"); 3462 3460 break; 3463 3461 } 3464 3462 ··· 6647 6647 * zap all shadow pages. 6648 6648 */ 6649 6649 if (unlikely(gen == 0)) { 6650 - kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); 6650 + kvm_debug_ratelimited("zapping shadow pages for mmio generation wraparound\n"); 6651 6651 kvm_mmu_zap_all_fast(kvm); 6652 6652 } 6653 6653 }
+1
arch/x86/kvm/mmu/page_track.c
··· 10 10 * Author: 11 11 * Xiao Guangrong <guangrong.xiao@linux.intel.com> 12 12 */ 13 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 14 15 #include <linux/kvm_host.h> 15 16 #include <linux/rculist.h>
+2 -2
arch/x86/kvm/mmu/spte.c
··· 7 7 * Copyright (C) 2006 Qumranet, Inc. 8 8 * Copyright 2020 Red Hat, Inc. and/or its affiliates. 9 9 */ 10 - 10 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 11 12 12 #include <linux/kvm_host.h> 13 13 #include "mmu.h" ··· 352 352 353 353 WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK << 354 354 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT), 355 - "kvm: Access Tracking saved bit locations are not zero\n"); 355 + "Access Tracking saved bit locations are not zero\n"); 356 356 357 357 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << 358 358 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT;
+2 -2
arch/x86/kvm/mmu/spte.h
··· 435 435 { 436 436 if (spte & shadow_mmu_writable_mask) 437 437 WARN_ONCE(!(spte & shadow_host_writable_mask), 438 - "kvm: MMU-writable SPTE is not Host-writable: %llx", 438 + KBUILD_MODNAME ": MMU-writable SPTE is not Host-writable: %llx", 439 439 spte); 440 440 else 441 441 WARN_ONCE(is_writable_pte(spte), 442 - "kvm: Writable SPTE is not MMU-writable: %llx", spte); 442 + KBUILD_MODNAME ": Writable SPTE is not MMU-writable: %llx", spte); 443 443 } 444 444 445 445 static inline bool is_mmu_writable_spte(u64 spte)
+1
arch/x86/kvm/mmu/tdp_iter.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 3 4 #include "mmu_internal.h" 4 5 #include "tdp_iter.h"
+1
arch/x86/kvm/mmu/tdp_mmu.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 3 4 #include "mmu.h" 4 5 #include "mmu_internal.h"
+1
arch/x86/kvm/mtrr.c
··· 13 13 * Paolo Bonzini <pbonzini@redhat.com> 14 14 * Xiao Guangrong <guangrong.xiao@linux.intel.com> 15 15 */ 16 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 17 18 #include <linux/kvm_host.h> 18 19 #include <asm/mtrr.h>
+1
arch/x86/kvm/pmu.c
··· 9 9 * Gleb Natapov <gleb@redhat.com> 10 10 * Wei Huang <wei@redhat.com> 11 11 */ 12 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 13 14 #include <linux/types.h> 14 15 #include <linux/kvm_host.h>
+1
arch/x86/kvm/smm.c
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 3 4 #include <linux/kvm_host.h> 4 5 #include "x86.h"
+1 -1
arch/x86/kvm/svm/avic.c
··· 12 12 * Avi Kivity <avi@qumranet.com> 13 13 */ 14 14 15 - #define pr_fmt(fmt) "SVM: " fmt 15 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 16 17 17 #include <linux/kvm_types.h> 18 18 #include <linux/hashtable.h>
+1 -1
arch/x86/kvm/svm/nested.c
··· 12 12 * Avi Kivity <avi@qumranet.com> 13 13 */ 14 14 15 - #define pr_fmt(fmt) "SVM: " fmt 15 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 16 17 17 #include <linux/kvm_types.h> 18 18 #include <linux/kvm_host.h>
+2
arch/x86/kvm/svm/pmu.c
··· 9 9 * 10 10 * Implementation is based on pmu_intel.c file 11 11 */ 12 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 + 12 14 #include <linux/types.h> 13 15 #include <linux/kvm_host.h> 14 16 #include <linux/perf_event.h>
+1
arch/x86/kvm/svm/sev.c
··· 6 6 * 7 7 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 8 8 */ 9 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 10 11 #include <linux/kvm_types.h> 11 12 #include <linux/kvm_host.h>
+5 -5
arch/x86/kvm/svm/svm.c
··· 1 - #define pr_fmt(fmt) "SVM: " fmt 1 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 2 3 3 #include <linux/kvm_host.h> 4 4 ··· 2076 2076 * Erratum 383 triggered. Guest state is corrupt so kill the 2077 2077 * guest. 2078 2078 */ 2079 - pr_err("KVM: Guest triggered AMD Erratum 383\n"); 2079 + pr_err("Guest triggered AMD Erratum 383\n"); 2080 2080 2081 2081 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 2082 2082 ··· 4629 4629 smap = cr4 & X86_CR4_SMAP; 4630 4630 is_user = svm_get_cpl(vcpu) == 3; 4631 4631 if (smap && (!smep || is_user)) { 4632 - pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n"); 4632 + pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n"); 4633 4633 4634 4634 /* 4635 4635 * If the fault occurred in userspace, arbitrarily inject #GP ··· 4978 4978 } 4979 4979 4980 4980 if (nested) { 4981 - printk(KERN_INFO "kvm: Nested Virtualization enabled\n"); 4981 + pr_info("Nested Virtualization enabled\n"); 4982 4982 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE); 4983 4983 } 4984 4984 ··· 4996 4996 /* Force VM NPT level equal to the host's paging level */ 4997 4997 kvm_configure_mmu(npt_enabled, get_npt_level(), 4998 4998 get_npt_level(), PG_LEVEL_1G); 4999 - pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis"); 4999 + pr_info("Nested Paging %sabled\n", npt_enabled ? "en" : "dis"); 5000 5000 5001 5001 /* Setup shadow_me_value and shadow_me_mask */ 5002 5002 kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask);
+1
arch/x86/kvm/svm/svm_onhyperv.c
··· 2 2 /* 3 3 * KVM L1 hypervisor optimizations on Hyper-V for SVM. 4 4 */ 5 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 5 6 6 7 #include <linux/kvm_host.h> 7 8
+2 -2
arch/x86/kvm/svm/svm_onhyperv.h
··· 34 34 { 35 35 if (npt_enabled && 36 36 ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) { 37 - pr_info("kvm: Hyper-V enlightened NPT TLB flush enabled\n"); 37 + pr_info(KBUILD_MODNAME ": Hyper-V enlightened NPT TLB flush enabled\n"); 38 38 svm_x86_ops.tlb_remote_flush = hv_remote_flush_tlb; 39 39 svm_x86_ops.tlb_remote_flush_with_range = 40 40 hv_remote_flush_tlb_with_range; ··· 43 43 if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) { 44 44 int cpu; 45 45 46 - pr_info("kvm: Hyper-V Direct TLB Flush enabled\n"); 46 + pr_info(KBUILD_MODNAME ": Hyper-V Direct TLB Flush enabled\n"); 47 47 for_each_online_cpu(cpu) { 48 48 struct hv_vp_assist_page *vp_ap = 49 49 hv_get_vp_assist_page(cpu);
+1 -2
arch/x86/kvm/vmx/hyperv.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - 3 - #define pr_fmt(fmt) "kvm/hyper-v: " fmt 2 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 4 3 5 4 #include <linux/errno.h> 6 5 #include <linux/smp.h>
+1 -3
arch/x86/kvm/vmx/hyperv.h
··· 179 179 { 180 180 int offset = evmcs_field_offset(field, clean_field); 181 181 182 - WARN_ONCE(offset < 0, "KVM: accessing unsupported EVMCS field %lx\n", 183 - field); 184 - 182 + WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field); 185 183 return offset; 186 184 } 187 185
+2 -1
arch/x86/kvm/vmx/nested.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 3 4 #include <linux/objtool.h> 4 5 #include <linux/percpu.h> ··· 204 203 { 205 204 /* TODO: not to reset guest simply here. */ 206 205 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 207 - pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); 206 + pr_debug_ratelimited("nested vmx abort, indicator %d\n", indicator); 208 207 } 209 208 210 209 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
+3 -2
arch/x86/kvm/vmx/pmu_intel.c
··· 8 8 * Avi Kivity <avi@redhat.com> 9 9 * Gleb Natapov <gleb@redhat.com> 10 10 */ 11 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 + 11 13 #include <linux/types.h> 12 14 #include <linux/kvm_host.h> 13 15 #include <linux/perf_event.h> ··· 764 762 return; 765 763 766 764 warn: 767 - pr_warn_ratelimited("kvm: vcpu-%d: fail to passthrough LBR.\n", 768 - vcpu->vcpu_id); 765 + pr_warn_ratelimited("vcpu-%d: fail to passthrough LBR.\n", vcpu->vcpu_id); 769 766 } 770 767 771 768 static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
+2
arch/x86/kvm/vmx/posted_intr.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 + 2 4 #include <linux/kvm_host.h> 3 5 4 6 #include <asm/irq_remapping.h>
+3 -2
arch/x86/kvm/vmx/sgx.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* Copyright(c) 2021 Intel Corporation. */ 3 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 3 4 4 5 #include <asm/sgx.h> 5 6 ··· 165 164 if (!vcpu->kvm->arch.sgx_provisioning_allowed && 166 165 (attributes & SGX_ATTR_PROVISIONKEY)) { 167 166 if (sgx_12_1->eax & SGX_ATTR_PROVISIONKEY) 168 - pr_warn_once("KVM: SGX PROVISIONKEY advertised but not allowed\n"); 167 + pr_warn_once("SGX PROVISIONKEY advertised but not allowed\n"); 169 168 kvm_inject_gp(vcpu, 0); 170 169 return 1; 171 170 } ··· 382 381 return handle_encls_ecreate(vcpu); 383 382 if (leaf == EINIT) 384 383 return handle_encls_einit(vcpu); 385 - WARN(1, "KVM: unexpected exit on ENCLS[%u]", leaf); 384 + WARN_ONCE(1, "unexpected exit on ENCLS[%u]", leaf); 386 385 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; 387 386 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_ENCLS; 388 387 return 0;
+1
arch/x86/kvm/vmx/vmcs12.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 2 3 3 4 #include "vmcs12.h" 4 5
+19 -21
arch/x86/kvm/vmx/vmx.c
··· 12 12 * Avi Kivity <avi@qumranet.com> 13 13 * Yaniv Kamay <yaniv@qumranet.com> 14 14 */ 15 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 16 17 #include <linux/highmem.h> 17 18 #include <linux/hrtimer.h> ··· 445 444 if (fault) 446 445 kvm_spurious_fault(); 447 446 else 448 - vmx_insn_failed("kvm: vmread failed: field=%lx\n", field); 447 + vmx_insn_failed("vmread failed: field=%lx\n", field); 449 448 } 450 449 451 450 noinline void vmwrite_error(unsigned long field, unsigned long value) 452 451 { 453 - vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%u\n", 452 + vmx_insn_failed("vmwrite failed: field=%lx val=%lx err=%u\n", 454 453 field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); 455 454 } 456 455 457 456 noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr) 458 457 { 459 - vmx_insn_failed("kvm: vmclear failed: %p/%llx err=%u\n", 458 + vmx_insn_failed("vmclear failed: %p/%llx err=%u\n", 460 459 vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR)); 461 460 } 462 461 463 462 noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr) 464 463 { 465 - vmx_insn_failed("kvm: vmptrld failed: %p/%llx err=%u\n", 464 + vmx_insn_failed("vmptrld failed: %p/%llx err=%u\n", 466 465 vmcs, phys_addr, vmcs_read32(VM_INSTRUCTION_ERROR)); 467 466 } 468 467 469 468 noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva) 470 469 { 471 - vmx_insn_failed("kvm: invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n", 470 + vmx_insn_failed("invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n", 472 471 ext, vpid, gva); 473 472 } 474 473 475 474 noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa) 476 475 { 477 - vmx_insn_failed("kvm: invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n", 476 + vmx_insn_failed("invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n", 478 477 ext, eptp, gpa); 479 478 } 480 479 ··· 578 577 } 579 578 580 579 if (enlightened_vmcs) { 581 - pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n"); 580 + pr_info("Using Hyper-V Enlightened VMCS\n"); 582 581 static_branch_enable(&enable_evmcs); 583 582 } 584 583 ··· 1681 1680 if (!instr_len) 1682 1681 goto rip_updated; 1683 1682 1684 - WARN(exit_reason.enclave_mode, 1685 - "KVM: skipping instruction after SGX enclave VM-Exit"); 1683 + WARN_ONCE(exit_reason.enclave_mode, 1684 + "skipping instruction after SGX enclave VM-Exit"); 1686 1685 1687 1686 orig_rip = kvm_rip_read(vcpu); 1688 1687 rip = orig_rip + instr_len; ··· 3025 3024 var.type = 0x3; 3026 3025 var.avl = 0; 3027 3026 if (save->base & 0xf) 3028 - printk_once(KERN_WARNING "kvm: segment base is not " 3029 - "paragraph aligned when entering " 3030 - "protected mode (seg=%d)", seg); 3027 + pr_warn_once("segment base is not paragraph aligned " 3028 + "when entering protected mode (seg=%d)", seg); 3031 3029 } 3032 3030 3033 3031 vmcs_write16(sf->selector, var.selector); ··· 3056 3056 * vcpu. Warn the user that an update is overdue. 3057 3057 */ 3058 3058 if (!kvm_vmx->tss_addr) 3059 - printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be " 3060 - "called before entering vcpu\n"); 3059 + pr_warn_once("KVM_SET_TSS_ADDR needs to be called before running vCPU\n"); 3061 3060 3062 3061 vmx_segment_cache_clear(vmx); 3063 3062 ··· 6924 6925 gate_desc *desc = (gate_desc *)host_idt_base + vector; 6925 6926 6926 6927 if (KVM_BUG(!is_external_intr(intr_info), vcpu->kvm, 6927 - "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info)) 6928 + "unexpected VM-Exit interrupt info: 0x%x", intr_info)) 6928 6929 return; 6929 6930 6930 6931 handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc)); ··· 7529 7530 7530 7531 if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || 7531 7532 !this_cpu_has(X86_FEATURE_VMX)) { 7532 - pr_err("kvm: VMX is disabled on CPU %d\n", smp_processor_id()); 7533 + pr_err("VMX is disabled on CPU %d\n", smp_processor_id()); 7533 7534 return -EIO; 7534 7535 } 7535 7536 ··· 7538 7539 if (nested) 7539 7540 nested_vmx_setup_ctls_msrs(&vmcs_conf, vmx_cap.ept); 7540 7541 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { 7541 - printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", 7542 - smp_processor_id()); 7542 + pr_err("CPU %d feature inconsistency!\n", smp_processor_id()); 7543 7543 return -EIO; 7544 7544 } 7545 7545 return 0; ··· 8363 8365 return -EIO; 8364 8366 8365 8367 if (cpu_has_perf_global_ctrl_bug()) 8366 - pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " 8368 + pr_warn_once("VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL " 8367 8369 "does not work properly. Using workaround\n"); 8368 8370 8369 8371 if (boot_cpu_has(X86_FEATURE_NX)) ··· 8371 8373 8372 8374 if (boot_cpu_has(X86_FEATURE_MPX)) { 8373 8375 rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs); 8374 - WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost"); 8376 + WARN_ONCE(host_bndcfgs, "BNDCFGS in host will be lost"); 8375 8377 } 8376 8378 8377 8379 if (!cpu_has_vmx_mpx()) ··· 8390 8392 8391 8393 /* NX support is required for shadow paging. */ 8392 8394 if (!enable_ept && !boot_cpu_has(X86_FEATURE_NX)) { 8393 - pr_err_ratelimited("kvm: NX (Execute Disable) not supported\n"); 8395 + pr_err_ratelimited("NX (Execute Disable) not supported\n"); 8394 8396 return -EOPNOTSUPP; 8395 8397 } 8396 8398
+2 -2
arch/x86/kvm/vmx/vmx_ops.h
··· 100 100 return value; 101 101 102 102 do_fail: 103 - WARN_ONCE(1, "kvm: vmread failed: field=%lx\n", field); 104 - pr_warn_ratelimited("kvm: vmread failed: field=%lx\n", field); 103 + WARN_ONCE(1, KBUILD_MODNAME ": vmread failed: field=%lx\n", field); 104 + pr_warn_ratelimited(KBUILD_MODNAME ": vmread failed: field=%lx\n", field); 105 105 return 0; 106 106 107 107 do_exception:
+15 -13
arch/x86/kvm/x86.c
··· 15 15 * Amit Shah <amit.shah@qumranet.com> 16 16 * Ben-Ami Yassour <benami@il.ibm.com> 17 17 */ 18 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 19 20 #include <linux/kvm_host.h> 20 21 #include "irq.h" ··· 2088 2087 !guest_cpuid_has(vcpu, X86_FEATURE_MWAIT)) 2089 2088 return kvm_handle_invalid_op(vcpu); 2090 2089 2091 - pr_warn_once("kvm: %s instruction emulated as NOP!\n", insn); 2090 + pr_warn_once("%s instruction emulated as NOP!\n", insn); 2092 2091 return kvm_emulate_as_nop(vcpu); 2093 2092 } 2094 2093 int kvm_emulate_mwait(struct kvm_vcpu *vcpu) ··· 2435 2434 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); 2436 2435 thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); 2437 2436 if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) { 2438 - pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi); 2437 + pr_debug("requested TSC rate %u falls outside tolerance [%u,%u]\n", 2438 + user_tsc_khz, thresh_lo, thresh_hi); 2439 2439 use_scaling = 1; 2440 2440 } 2441 2441 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); ··· 7704 7702 return X86EMUL_CONTINUE; 7705 7703 7706 7704 emul_write: 7707 - printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); 7705 + pr_warn_once("emulating exchange as write\n"); 7708 7706 7709 7707 return emulator_write_emulated(ctxt, addr, new, bytes, exception); 7710 7708 } ··· 8265 8263 8266 8264 ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT); 8267 8265 if (!ctxt) { 8268 - pr_err("kvm: failed to allocate vcpu's emulator\n"); 8266 + pr_err("failed to allocate vcpu's emulator\n"); 8269 8267 return NULL; 8270 8268 } 8271 8269 ··· 9326 9324 int r, cpu; 9327 9325 9328 9326 if (kvm_x86_ops.hardware_enable) { 9329 - pr_err("kvm: already loaded vendor module '%s'\n", kvm_x86_ops.name); 9327 + pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name); 9330 9328 return -EEXIST; 9331 9329 } 9332 9330 9333 9331 if (!ops->cpu_has_kvm_support()) { 9334 - pr_err_ratelimited("kvm: no hardware support for '%s'\n", 9332 + pr_err_ratelimited("no hardware support for '%s'\n", 9335 9333 ops->runtime_ops->name); 9336 9334 return -EOPNOTSUPP; 9337 9335 } 9338 9336 if (ops->disabled_by_bios()) { 9339 - pr_err_ratelimited("kvm: support for '%s' disabled by bios\n", 9337 + pr_err_ratelimited("support for '%s' disabled by bios\n", 9340 9338 ops->runtime_ops->name); 9341 9339 return -EOPNOTSUPP; 9342 9340 } ··· 9347 9345 * vCPU's FPU state as a fxregs_state struct. 9348 9346 */ 9349 9347 if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) { 9350 - printk(KERN_ERR "kvm: inadequate fpu\n"); 9348 + pr_err("inadequate fpu\n"); 9351 9349 return -EOPNOTSUPP; 9352 9350 } 9353 9351 ··· 9365 9363 */ 9366 9364 if (rdmsrl_safe(MSR_IA32_CR_PAT, &host_pat) || 9367 9365 (host_pat & GENMASK(2, 0)) != 6) { 9368 - pr_err("kvm: host PAT[0] is not WB\n"); 9366 + pr_err("host PAT[0] is not WB\n"); 9369 9367 return -EIO; 9370 9368 } 9371 9369 9372 9370 x86_emulator_cache = kvm_alloc_emulator_cache(); 9373 9371 if (!x86_emulator_cache) { 9374 - pr_err("kvm: failed to allocate cache for x86 emulator\n"); 9372 + pr_err("failed to allocate cache for x86 emulator\n"); 9375 9373 return -ENOMEM; 9376 9374 } 9377 9375 9378 9376 user_return_msrs = alloc_percpu(struct kvm_user_return_msrs); 9379 9377 if (!user_return_msrs) { 9380 - printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n"); 9378 + pr_err("failed to allocate percpu kvm_user_return_msrs\n"); 9381 9379 r = -ENOMEM; 9382 9380 goto out_free_x86_emulator_cache; 9383 9381 } ··· 11649 11647 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) 11650 11648 { 11651 11649 if (kvm_check_tsc_unstable() && kvm->created_vcpus) 11652 - pr_warn_once("kvm: SMP vm created on host with unstable TSC; " 11650 + pr_warn_once("SMP vm created on host with unstable TSC; " 11653 11651 "guest TSC will not be reliable\n"); 11654 11652 11655 11653 if (!kvm->arch.max_vcpu_ids) ··· 11726 11724 goto free_wbinvd_dirty_mask; 11727 11725 11728 11726 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) { 11729 - pr_err("kvm: failed to allocate vcpu's fpu\n"); 11727 + pr_err("failed to allocate vcpu's fpu\n"); 11730 11728 goto free_emulate_ctxt; 11731 11729 } 11732 11730
+1
arch/x86/kvm/xen.c
··· 5 5 * 6 6 * KVM Xen emulation 7 7 */ 8 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 9 10 #include "x86.h" 10 11 #include "xen.h"
+1 -1
include/kvm/arm_arch_timer.h
··· 60 60 bool enabled; 61 61 }; 62 62 63 - int kvm_timer_hyp_init(bool); 63 + int __init kvm_timer_hyp_init(bool has_gic); 64 64 int kvm_timer_enable(struct kvm_vcpu *vcpu); 65 65 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); 66 66 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);