Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 platform updates from Ingo Molnar:
"The main changes in this cycle were:

- a refactoring of the early virt init code by merging 'struct
x86_hyper' into 'struct x86_platform' and 'struct x86_init', which
allows simplifications and also the addition of a new
->guest_late_init() callback. (Juergen Gross)

- timer_setup() conversion of the UV code (Kees Cook)"

* 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/virt/xen: Use guest_late_init to detect Xen PVH guest
x86/virt, x86/platform: Add ->guest_late_init() callback to hypervisor_x86 structure
x86/virt, x86/acpi: Add test for ACPI_FADT_NO_VGA
x86/virt: Add enum for hypervisors to replace x86_hyper
x86/virt, x86/platform: Merge 'struct x86_hyper' into 'struct x86_platform' and 'struct x86_init'
x86/platform/UV: Convert timers to use timer_setup()

+156 -109
+1 -1
arch/x86/hyperv/hv_init.c
··· 113 113 u64 guest_id; 114 114 union hv_x64_msr_hypercall_contents hypercall_msr; 115 115 116 - if (x86_hyper != &x86_hyper_ms_hyperv) 116 + if (x86_hyper_type != X86_HYPER_MS_HYPERV) 117 117 return; 118 118 119 119 /* Allocate percpu VP index */
+18 -28
arch/x86/include/asm/hypervisor.h
··· 23 23 #ifdef CONFIG_HYPERVISOR_GUEST 24 24 25 25 #include <asm/kvm_para.h> 26 + #include <asm/x86_init.h> 26 27 #include <asm/xen/hypervisor.h> 27 28 28 29 /* 29 30 * x86 hypervisor information 30 31 */ 32 + 33 + enum x86_hypervisor_type { 34 + X86_HYPER_NATIVE = 0, 35 + X86_HYPER_VMWARE, 36 + X86_HYPER_MS_HYPERV, 37 + X86_HYPER_XEN_PV, 38 + X86_HYPER_XEN_HVM, 39 + X86_HYPER_KVM, 40 + }; 41 + 31 42 struct hypervisor_x86 { 32 43 /* Hypervisor name */ 33 44 const char *name; ··· 46 35 /* Detection routine */ 47 36 uint32_t (*detect)(void); 48 37 49 - /* Platform setup (run once per boot) */ 50 - void (*init_platform)(void); 38 + /* Hypervisor type */ 39 + enum x86_hypervisor_type type; 51 40 52 - /* X2APIC detection (run once per boot) */ 53 - bool (*x2apic_available)(void); 41 + /* init time callbacks */ 42 + struct x86_hyper_init init; 54 43 55 - /* pin current vcpu to specified physical cpu (run rarely) */ 56 - void (*pin_vcpu)(int); 57 - 58 - /* called during init_mem_mapping() to setup early mappings. */ 59 - void (*init_mem_mapping)(void); 44 + /* runtime callbacks */ 45 + struct x86_hyper_runtime runtime; 60 46 }; 61 47 62 - extern const struct hypervisor_x86 *x86_hyper; 63 - 64 - /* Recognized hypervisors */ 65 - extern const struct hypervisor_x86 x86_hyper_vmware; 66 - extern const struct hypervisor_x86 x86_hyper_ms_hyperv; 67 - extern const struct hypervisor_x86 x86_hyper_xen_pv; 68 - extern const struct hypervisor_x86 x86_hyper_xen_hvm; 69 - extern const struct hypervisor_x86 x86_hyper_kvm; 70 - 48 + extern enum x86_hypervisor_type x86_hyper_type; 71 49 extern void init_hypervisor_platform(void); 72 - extern bool hypervisor_x2apic_available(void); 73 - extern void hypervisor_pin_vcpu(int cpu); 74 - 75 - static inline void hypervisor_init_mem_mapping(void) 76 - { 77 - if (x86_hyper && x86_hyper->init_mem_mapping) 78 - x86_hyper->init_mem_mapping(); 79 - } 80 50 #else 81 51 static inline void init_hypervisor_platform(void) { } 82 - static inline bool hypervisor_x2apic_available(void) { return false; } 83 - static inline void hypervisor_init_mem_mapping(void) { } 84 52 #endif /* CONFIG_HYPERVISOR_GUEST */ 85 53 #endif /* _ASM_X86_HYPERVISOR_H */
-2
arch/x86/include/asm/kvm_para.h
··· 88 88 #ifdef CONFIG_KVM_GUEST 89 89 bool kvm_para_available(void); 90 90 unsigned int kvm_arch_para_features(void); 91 - void __init kvm_guest_init(void); 92 91 void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); 93 92 void kvm_async_pf_task_wake(u32 token); 94 93 u32 kvm_read_and_reset_pf_reason(void); ··· 102 103 #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 103 104 104 105 #else /* CONFIG_KVM_GUEST */ 105 - #define kvm_guest_init() do {} while (0) 106 106 #define kvm_async_pf_task_wait(T, I) do {} while(0) 107 107 #define kvm_async_pf_task_wake(T) do {} while(0) 108 108
+27
arch/x86/include/asm/x86_init.h
··· 115 115 }; 116 116 117 117 /** 118 + * struct x86_hyper_init - x86 hypervisor init functions 119 + * @init_platform: platform setup 120 + * @guest_late_init: guest late init 121 + * @x2apic_available: X2APIC detection 122 + * @init_mem_mapping: setup early mappings during init_mem_mapping() 123 + */ 124 + struct x86_hyper_init { 125 + void (*init_platform)(void); 126 + void (*guest_late_init)(void); 127 + bool (*x2apic_available)(void); 128 + void (*init_mem_mapping)(void); 129 + }; 130 + 131 + /** 118 132 * struct x86_init_ops - functions for platform specific setup 119 133 * 120 134 */ ··· 141 127 struct x86_init_timers timers; 142 128 struct x86_init_iommu iommu; 143 129 struct x86_init_pci pci; 130 + struct x86_hyper_init hyper; 144 131 }; 145 132 146 133 /** ··· 210 195 struct x86_legacy_features { 211 196 enum x86_legacy_i8042_state i8042; 212 197 int rtc; 198 + int no_vga; 213 199 int reserve_bios_regions; 214 200 struct x86_legacy_devices devices; 201 + }; 202 + 203 + /** 204 + * struct x86_hyper_runtime - x86 hypervisor specific runtime callbacks 205 + * 206 + * @pin_vcpu: pin current vcpu to specified physical cpu (run rarely) 207 + */ 208 + struct x86_hyper_runtime { 209 + void (*pin_vcpu)(int cpu); 215 210 }; 216 211 217 212 /** ··· 243 218 * possible in x86_early_init_platform_quirks() by 244 219 * only using the current x86_hardware_subarch 245 220 * semantics. 221 + * @hyper: x86 hypervisor specific runtime callbacks 246 222 */ 247 223 struct x86_platform_ops { 248 224 unsigned long (*calibrate_cpu)(void); ··· 259 233 void (*apic_post_init)(void); 260 234 struct x86_legacy_features legacy; 261 235 void (*set_legacy_features)(void); 236 + struct x86_hyper_runtime hyper; 262 237 }; 263 238 264 239 struct pci_dev;
+5
arch/x86/kernel/acpi/boot.c
··· 961 961 x86_platform.legacy.rtc = 0; 962 962 } 963 963 964 + if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_VGA) { 965 + pr_debug("ACPI: probing for VGA not safe\n"); 966 + x86_platform.legacy.no_vga = 1; 967 + } 968 + 964 969 #ifdef CONFIG_X86_PM_TIMER 965 970 /* detect the location of the ACPI PM Timer */ 966 971 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) {
+1 -1
arch/x86/kernel/apic/apic.c
··· 1645 1645 * under KVM 1646 1646 */ 1647 1647 if (max_physical_apicid > 255 || 1648 - !hypervisor_x2apic_available()) { 1648 + !x86_init.hyper.x2apic_available()) { 1649 1649 pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n"); 1650 1650 x2apic_disable(); 1651 1651 return;
+2 -3
arch/x86/kernel/apic/x2apic_uv_x.c
··· 920 920 /* 921 921 * percpu heartbeat timer 922 922 */ 923 - static void uv_heartbeat(unsigned long ignored) 923 + static void uv_heartbeat(struct timer_list *timer) 924 924 { 925 - struct timer_list *timer = &uv_scir_info->timer; 926 925 unsigned char bits = uv_scir_info->state; 927 926 928 927 /* Flip heartbeat bit: */ ··· 946 947 struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer; 947 948 948 949 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY); 949 - setup_pinned_timer(timer, uv_heartbeat, cpu); 950 + timer_setup(timer, uv_heartbeat, TIMER_PINNED); 950 951 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL; 951 952 add_timer_on(timer, cpu); 952 953 uv_cpu_scir_info(cpu)->enabled = 1;
+35 -31
arch/x86/kernel/cpu/hypervisor.c
··· 26 26 #include <asm/processor.h> 27 27 #include <asm/hypervisor.h> 28 28 29 + extern const struct hypervisor_x86 x86_hyper_vmware; 30 + extern const struct hypervisor_x86 x86_hyper_ms_hyperv; 31 + extern const struct hypervisor_x86 x86_hyper_xen_pv; 32 + extern const struct hypervisor_x86 x86_hyper_xen_hvm; 33 + extern const struct hypervisor_x86 x86_hyper_kvm; 34 + 29 35 static const __initconst struct hypervisor_x86 * const hypervisors[] = 30 36 { 31 37 #ifdef CONFIG_XEN_PV ··· 47 41 #endif 48 42 }; 49 43 50 - const struct hypervisor_x86 *x86_hyper; 51 - EXPORT_SYMBOL(x86_hyper); 44 + enum x86_hypervisor_type x86_hyper_type; 45 + EXPORT_SYMBOL(x86_hyper_type); 52 46 53 - static inline void __init 47 + static inline const struct hypervisor_x86 * __init 54 48 detect_hypervisor_vendor(void) 55 49 { 56 - const struct hypervisor_x86 *h, * const *p; 50 + const struct hypervisor_x86 *h = NULL, * const *p; 57 51 uint32_t pri, max_pri = 0; 58 52 59 53 for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) { 60 - h = *p; 61 - pri = h->detect(); 62 - if (pri != 0 && pri > max_pri) { 54 + pri = (*p)->detect(); 55 + if (pri > max_pri) { 63 56 max_pri = pri; 64 - x86_hyper = h; 57 + h = *p; 65 58 } 66 59 } 67 60 68 - if (max_pri) 69 - pr_info("Hypervisor detected: %s\n", x86_hyper->name); 61 + if (h) 62 + pr_info("Hypervisor detected: %s\n", h->name); 63 + 64 + return h; 65 + } 66 + 67 + static void __init copy_array(const void *src, void *target, unsigned int size) 68 + { 69 + unsigned int i, n = size / sizeof(void *); 70 + const void * const *from = (const void * const *)src; 71 + const void **to = (const void **)target; 72 + 73 + for (i = 0; i < n; i++) 74 + if (from[i]) 75 + to[i] = from[i]; 70 76 } 71 77 72 78 void __init init_hypervisor_platform(void) 73 79 { 80 + const struct hypervisor_x86 *h; 74 81 75 - detect_hypervisor_vendor(); 82 + h = detect_hypervisor_vendor(); 76 83 77 - if (!x86_hyper) 84 + if (!h) 78 85 return; 79 86 80 - if (x86_hyper->init_platform) 81 - x86_hyper->init_platform(); 82 - } 87 + copy_array(&h->init, &x86_init.hyper, sizeof(h->init)); 88 + copy_array(&h->runtime, &x86_platform.hyper, sizeof(h->runtime)); 83 89 84 - bool __init hypervisor_x2apic_available(void) 85 - { 86 - return x86_hyper && 87 - x86_hyper->x2apic_available && 88 - x86_hyper->x2apic_available(); 89 - } 90 - 91 - void hypervisor_pin_vcpu(int cpu) 92 - { 93 - if (!x86_hyper) 94 - return; 95 - 96 - if (x86_hyper->pin_vcpu) 97 - x86_hyper->pin_vcpu(cpu); 98 - else 99 - WARN_ONCE(1, "vcpu pinning requested but not supported!\n"); 90 + x86_hyper_type = h->type; 91 + x86_init.hyper.init_platform(); 100 92 }
+3 -3
arch/x86/kernel/cpu/mshyperv.c
··· 254 254 #endif 255 255 } 256 256 257 - const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { 257 + const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = { 258 258 .name = "Microsoft Hyper-V", 259 259 .detect = ms_hyperv_platform, 260 - .init_platform = ms_hyperv_init_platform, 260 + .type = X86_HYPER_MS_HYPERV, 261 + .init.init_platform = ms_hyperv_init_platform, 261 262 }; 262 - EXPORT_SYMBOL(x86_hyper_ms_hyperv);
+4 -4
arch/x86/kernel/cpu/vmware.c
··· 205 205 (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0; 206 206 } 207 207 208 - const __refconst struct hypervisor_x86 x86_hyper_vmware = { 208 + const __initconst struct hypervisor_x86 x86_hyper_vmware = { 209 209 .name = "VMware", 210 210 .detect = vmware_platform, 211 - .init_platform = vmware_platform_setup, 212 - .x2apic_available = vmware_legacy_x2apic_available, 211 + .type = X86_HYPER_VMWARE, 212 + .init.init_platform = vmware_platform_setup, 213 + .init.x2apic_available = vmware_legacy_x2apic_available, 213 214 }; 214 - EXPORT_SYMBOL(x86_hyper_vmware);
+5 -4
arch/x86/kernel/kvm.c
··· 498 498 update_intr_gate(X86_TRAP_PF, async_page_fault); 499 499 } 500 500 501 - void __init kvm_guest_init(void) 501 + static void __init kvm_guest_init(void) 502 502 { 503 503 int i; 504 504 ··· 578 578 return kvm_cpuid_base(); 579 579 } 580 580 581 - const struct hypervisor_x86 x86_hyper_kvm __refconst = { 581 + const __initconst struct hypervisor_x86 x86_hyper_kvm = { 582 582 .name = "KVM", 583 583 .detect = kvm_detect, 584 - .x2apic_available = kvm_para_available, 584 + .type = X86_HYPER_KVM, 585 + .init.guest_late_init = kvm_guest_init, 586 + .init.x2apic_available = kvm_para_available, 585 587 }; 586 - EXPORT_SYMBOL_GPL(x86_hyper_kvm); 587 588 588 589 static __init int activate_jump_labels(void) 589 590 {
+1 -1
arch/x86/kernel/setup.c
··· 1296 1296 1297 1297 io_apic_init_mappings(); 1298 1298 1299 - kvm_guest_init(); 1299 + x86_init.hyper.guest_late_init(); 1300 1300 1301 1301 e820__reserve_resources(); 1302 1302 e820__register_nosave_regions(max_low_pfn);
+10
arch/x86/kernel/x86_init.c
··· 28 28 void __init x86_init_uint_noop(unsigned int unused) { } 29 29 int __init iommu_init_noop(void) { return 0; } 30 30 void iommu_shutdown_noop(void) { } 31 + bool __init bool_x86_init_noop(void) { return false; } 32 + void x86_op_int_noop(int cpu) { } 31 33 32 34 /* 33 35 * The platform setup functions are preset with the default functions ··· 83 81 .init_irq = x86_default_pci_init_irq, 84 82 .fixup_irqs = x86_default_pci_fixup_irqs, 85 83 }, 84 + 85 + .hyper = { 86 + .init_platform = x86_init_noop, 87 + .guest_late_init = x86_init_noop, 88 + .x2apic_available = bool_x86_init_noop, 89 + .init_mem_mapping = x86_init_noop, 90 + }, 86 91 }; 87 92 88 93 struct x86_cpuinit_ops x86_cpuinit = { ··· 110 101 .get_nmi_reason = default_get_nmi_reason, 111 102 .save_sched_clock_state = tsc_save_sched_clock_state, 112 103 .restore_sched_clock_state = tsc_restore_sched_clock_state, 104 + .hyper.pin_vcpu = x86_op_int_noop, 113 105 }; 114 106 115 107 EXPORT_SYMBOL_GPL(x86_platform);
+1 -1
arch/x86/mm/init.c
··· 671 671 load_cr3(swapper_pg_dir); 672 672 __flush_tlb_all(); 673 673 674 - hypervisor_init_mem_mapping(); 674 + x86_init.hyper.init_mem_mapping(); 675 675 676 676 early_memtest(0, max_pfn_mapped << PAGE_SHIFT); 677 677 }
+28 -8
arch/x86/xen/enlighten_hvm.c
··· 1 + #include <linux/acpi.h> 1 2 #include <linux/cpu.h> 2 3 #include <linux/kexec.h> 3 4 #include <linux/memblock.h> ··· 189 188 xen_hvm_init_time_ops(); 190 189 xen_hvm_init_mmu_ops(); 191 190 192 - if (xen_pvh_domain()) 193 - machine_ops.emergency_restart = xen_emergency_restart; 194 191 #ifdef CONFIG_KEXEC_CORE 195 192 machine_ops.shutdown = xen_hvm_shutdown; 196 193 machine_ops.crash_shutdown = xen_hvm_crash_shutdown; ··· 225 226 return xen_cpuid_base(); 226 227 } 227 228 228 - const struct hypervisor_x86 x86_hyper_xen_hvm = { 229 + static __init void xen_hvm_guest_late_init(void) 230 + { 231 + #ifdef CONFIG_XEN_PVH 232 + /* Test for PVH domain (PVH boot path taken overrides ACPI flags). */ 233 + if (!xen_pvh && 234 + (x86_platform.legacy.rtc || !x86_platform.legacy.no_vga)) 235 + return; 236 + 237 + /* PVH detected. */ 238 + xen_pvh = true; 239 + 240 + /* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */ 241 + if (!nr_ioapics && acpi_irq_model == ACPI_IRQ_MODEL_PIC) 242 + acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM; 243 + 244 + machine_ops.emergency_restart = xen_emergency_restart; 245 + pv_info.name = "Xen PVH"; 246 + #endif 247 + } 248 + 249 + const __initconst struct hypervisor_x86 x86_hyper_xen_hvm = { 229 250 .name = "Xen HVM", 230 251 .detect = xen_platform_hvm, 231 - .init_platform = xen_hvm_guest_init, 232 - .pin_vcpu = xen_pin_vcpu, 233 - .x2apic_available = xen_x2apic_para_available, 234 - .init_mem_mapping = xen_hvm_init_mem_mapping, 252 + .type = X86_HYPER_XEN_HVM, 253 + .init.init_platform = xen_hvm_guest_init, 254 + .init.x2apic_available = xen_x2apic_para_available, 255 + .init.init_mem_mapping = xen_hvm_init_mem_mapping, 256 + .init.guest_late_init = xen_hvm_guest_late_init, 257 + .runtime.pin_vcpu = xen_pin_vcpu, 235 258 }; 236 - EXPORT_SYMBOL(x86_hyper_xen_hvm);
+3 -3
arch/x86/xen/enlighten_pv.c
··· 1459 1459 return 0; 1460 1460 } 1461 1461 1462 - const struct hypervisor_x86 x86_hyper_xen_pv = { 1462 + const __initconst struct hypervisor_x86 x86_hyper_xen_pv = { 1463 1463 .name = "Xen PV", 1464 1464 .detect = xen_platform_pv, 1465 - .pin_vcpu = xen_pin_vcpu, 1465 + .type = X86_HYPER_XEN_PV, 1466 + .runtime.pin_vcpu = xen_pin_vcpu, 1466 1467 }; 1467 - EXPORT_SYMBOL(x86_hyper_xen_pv);
-9
arch/x86/xen/enlighten_pvh.c
··· 25 25 struct hvm_start_info pvh_start_info; 26 26 unsigned int pvh_start_info_sz = sizeof(pvh_start_info); 27 27 28 - static void xen_pvh_arch_setup(void) 29 - { 30 - /* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */ 31 - if (nr_ioapics == 0) 32 - acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM; 33 - } 34 - 35 28 static void __init init_pvh_bootparams(void) 36 29 { 37 30 struct xen_memory_map memmap; ··· 95 102 wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); 96 103 97 104 init_pvh_bootparams(); 98 - 99 - x86_init.oem.arch_setup = xen_pvh_arch_setup; 100 105 }
+1 -1
drivers/hv/vmbus_drv.c
··· 1534 1534 { 1535 1535 int ret, t; 1536 1536 1537 - if (x86_hyper != &x86_hyper_ms_hyperv) 1537 + if (x86_hyper_type != X86_HYPER_MS_HYPERV) 1538 1538 return -ENODEV; 1539 1539 1540 1540 init_completion(&probe_event);
+4 -6
drivers/input/mouse/vmmouse.c
··· 316 316 /* 317 317 * Array of supported hypervisors. 318 318 */ 319 - static const struct hypervisor_x86 *vmmouse_supported_hypervisors[] = { 320 - &x86_hyper_vmware, 321 - #ifdef CONFIG_KVM_GUEST 322 - &x86_hyper_kvm, 323 - #endif 319 + static enum x86_hypervisor_type vmmouse_supported_hypervisors[] = { 320 + X86_HYPER_VMWARE, 321 + X86_HYPER_KVM, 324 322 }; 325 323 326 324 /** ··· 329 331 int i; 330 332 331 333 for (i = 0; i < ARRAY_SIZE(vmmouse_supported_hypervisors); i++) 332 - if (vmmouse_supported_hypervisors[i] == x86_hyper) 334 + if (vmmouse_supported_hypervisors[i] == x86_hyper_type) 333 335 return true; 334 336 335 337 return false;
+1 -1
drivers/misc/vmw_balloon.c
··· 1271 1271 * Check if we are running on VMware's hypervisor and bail out 1272 1272 * if we are not. 1273 1273 */ 1274 - if (x86_hyper != &x86_hyper_vmware) 1274 + if (x86_hyper_type != X86_HYPER_VMWARE) 1275 1275 return -ENODEV; 1276 1276 1277 1277 for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
+6 -2
include/linux/hypervisor.h
··· 7 7 * Juergen Gross <jgross@suse.com> 8 8 */ 9 9 10 - #ifdef CONFIG_HYPERVISOR_GUEST 11 - #include <asm/hypervisor.h> 10 + #ifdef CONFIG_X86 11 + #include <asm/x86_init.h> 12 + static inline void hypervisor_pin_vcpu(int cpu) 13 + { 14 + x86_platform.hyper.pin_vcpu(cpu); 15 + } 12 16 #else 13 17 static inline void hypervisor_pin_vcpu(int cpu) 14 18 {