Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/paravirt: Switch time pvops functions to use static_call()

The time pvops functions are the only ones left which might be
used in 32-bit mode and which return a 64-bit value.

Switch them to use the static_call() mechanism instead of pvops, as
this allows quite some simplification of the pvops implementation.

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210311142319.4723-5-jgross@suse.com

authored by

Juergen Gross and committed by
Borislav Petkov
a0e2bf7c 6ea312d9

+71 -57
+5 -9
arch/arm/include/asm/paravirt.h
··· 3 3 #define _ASM_ARM_PARAVIRT_H 4 4 5 5 #ifdef CONFIG_PARAVIRT 6 + #include <linux/static_call_types.h> 7 + 6 8 struct static_key; 7 9 extern struct static_key paravirt_steal_enabled; 8 10 extern struct static_key paravirt_steal_rq_enabled; 9 11 10 - struct pv_time_ops { 11 - unsigned long long (*steal_clock)(int cpu); 12 - }; 12 + u64 dummy_steal_clock(int cpu); 13 13 14 - struct paravirt_patch_template { 15 - struct pv_time_ops time; 16 - }; 17 - 18 - extern struct paravirt_patch_template pv_ops; 14 + DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); 19 15 20 16 static inline u64 paravirt_steal_clock(int cpu) 21 17 { 22 - return pv_ops.time.steal_clock(cpu); 18 + return static_call(pv_steal_clock)(cpu); 23 19 } 24 20 #endif 25 21
+7 -2
arch/arm/kernel/paravirt.c
··· 9 9 #include <linux/export.h> 10 10 #include <linux/jump_label.h> 11 11 #include <linux/types.h> 12 + #include <linux/static_call.h> 12 13 #include <asm/paravirt.h> 13 14 14 15 struct static_key paravirt_steal_enabled; 15 16 struct static_key paravirt_steal_rq_enabled; 16 17 17 - struct paravirt_patch_template pv_ops; 18 - EXPORT_SYMBOL_GPL(pv_ops); 18 + static u64 native_steal_clock(int cpu) 19 + { 20 + return 0; 21 + } 22 + 23 + DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
+5 -9
arch/arm64/include/asm/paravirt.h
··· 3 3 #define _ASM_ARM64_PARAVIRT_H 4 4 5 5 #ifdef CONFIG_PARAVIRT 6 + #include <linux/static_call_types.h> 7 + 6 8 struct static_key; 7 9 extern struct static_key paravirt_steal_enabled; 8 10 extern struct static_key paravirt_steal_rq_enabled; 9 11 10 - struct pv_time_ops { 11 - unsigned long long (*steal_clock)(int cpu); 12 - }; 12 + u64 dummy_steal_clock(int cpu); 13 13 14 - struct paravirt_patch_template { 15 - struct pv_time_ops time; 16 - }; 17 - 18 - extern struct paravirt_patch_template pv_ops; 14 + DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); 19 15 20 16 static inline u64 paravirt_steal_clock(int cpu) 21 17 { 22 - return pv_ops.time.steal_clock(cpu); 18 + return static_call(pv_steal_clock)(cpu); 23 19 } 24 20 25 21 int __init pv_time_init(void);
+9 -4
arch/arm64/kernel/paravirt.c
··· 18 18 #include <linux/reboot.h> 19 19 #include <linux/slab.h> 20 20 #include <linux/types.h> 21 + #include <linux/static_call.h> 21 22 22 23 #include <asm/paravirt.h> 23 24 #include <asm/pvclock-abi.h> ··· 27 26 struct static_key paravirt_steal_enabled; 28 27 struct static_key paravirt_steal_rq_enabled; 29 28 30 - struct paravirt_patch_template pv_ops; 31 - EXPORT_SYMBOL_GPL(pv_ops); 29 + static u64 native_steal_clock(int cpu) 30 + { 31 + return 0; 32 + } 33 + 34 + DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); 32 35 33 36 struct pv_time_stolen_time_region { 34 37 struct pvclock_vcpu_stolen_time *kaddr; ··· 50 45 early_param("no-steal-acc", parse_no_stealacc); 51 46 52 47 /* return stolen time in ns by asking the hypervisor */ 53 - static u64 pv_steal_clock(int cpu) 48 + static u64 para_steal_clock(int cpu) 54 49 { 55 50 struct pv_time_stolen_time_region *reg; 56 51 ··· 155 150 if (ret) 156 151 return ret; 157 152 158 - pv_ops.time.steal_clock = pv_steal_clock; 153 + static_call_update(pv_steal_clock, para_steal_clock); 159 154 160 155 static_key_slow_inc(&paravirt_steal_enabled); 161 156 if (steal_acc)
+1
arch/x86/Kconfig
··· 777 777 778 778 config PARAVIRT 779 779 bool "Enable paravirtualization code" 780 + depends on HAVE_STATIC_CALL 780 781 help 781 782 This changes the kernel so it can modify itself when it is run 782 783 under a hypervisor, potentially improving performance significantly
+1 -1
arch/x86/include/asm/mshyperv.h
··· 63 63 static __always_inline void hv_setup_sched_clock(void *sched_clock) 64 64 { 65 65 #ifdef CONFIG_PARAVIRT 66 - pv_ops.time.sched_clock = sched_clock; 66 + paravirt_set_sched_clock(sched_clock); 67 67 #endif 68 68 } 69 69
+12 -3
arch/x86/include/asm/paravirt.h
··· 15 15 #include <linux/bug.h> 16 16 #include <linux/types.h> 17 17 #include <linux/cpumask.h> 18 + #include <linux/static_call_types.h> 18 19 #include <asm/frame.h> 19 20 20 - static inline unsigned long long paravirt_sched_clock(void) 21 + u64 dummy_steal_clock(int cpu); 22 + u64 dummy_sched_clock(void); 23 + 24 + DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); 25 + DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock); 26 + 27 + void paravirt_set_sched_clock(u64 (*func)(void)); 28 + 29 + static inline u64 paravirt_sched_clock(void) 21 30 { 22 - return PVOP_CALL0(unsigned long long, time.sched_clock); 31 + return static_call(pv_sched_clock)(); 23 32 } 24 33 25 34 struct static_key; ··· 42 33 43 34 static inline u64 paravirt_steal_clock(int cpu) 44 35 { 45 - return PVOP_CALL1(u64, time.steal_clock, cpu); 36 + return static_call(pv_steal_clock)(cpu); 46 37 } 47 38 48 39 /* The paravirtualized I/O functions */
-6
arch/x86/include/asm/paravirt_types.h
··· 95 95 } __no_randomize_layout; 96 96 #endif 97 97 98 - struct pv_time_ops { 99 - unsigned long long (*sched_clock)(void); 100 - unsigned long long (*steal_clock)(int cpu); 101 - } __no_randomize_layout; 102 - 103 98 struct pv_cpu_ops { 104 99 /* hooks for various privileged instructions */ 105 100 void (*io_delay)(void); ··· 286 291 * what to patch. */ 287 292 struct paravirt_patch_template { 288 293 struct pv_init_ops init; 289 - struct pv_time_ops time; 290 294 struct pv_cpu_ops cpu; 291 295 struct pv_irq_ops irq; 292 296 struct pv_mmu_ops mmu;
+3 -2
arch/x86/kernel/cpu/vmware.c
··· 27 27 #include <linux/clocksource.h> 28 28 #include <linux/cpu.h> 29 29 #include <linux/reboot.h> 30 + #include <linux/static_call.h> 30 31 #include <asm/div64.h> 31 32 #include <asm/x86_init.h> 32 33 #include <asm/hypervisor.h> ··· 337 336 vmware_cyc2ns_setup(); 338 337 339 338 if (vmw_sched_clock) 340 - pv_ops.time.sched_clock = vmware_sched_clock; 339 + paravirt_set_sched_clock(vmware_sched_clock); 341 340 342 341 if (vmware_is_stealclock_available()) { 343 342 has_steal_clock = true; 344 - pv_ops.time.steal_clock = vmware_steal_clock; 343 + static_call_update(pv_steal_clock, vmware_steal_clock); 345 344 346 345 /* We use reboot notifier only to disable steal clock */ 347 346 register_reboot_notifier(&vmware_pv_reboot_nb);
+1 -1
arch/x86/kernel/kvm.c
··· 650 650 651 651 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { 652 652 has_steal_clock = 1; 653 - pv_ops.time.steal_clock = kvm_steal_clock; 653 + static_call_update(pv_steal_clock, kvm_steal_clock); 654 654 } 655 655 656 656 if (pv_tlb_flush_supported()) {
+1 -1
arch/x86/kernel/kvmclock.c
··· 106 106 if (!stable) 107 107 clear_sched_clock_stable(); 108 108 kvm_sched_clock_offset = kvm_clock_read(); 109 - pv_ops.time.sched_clock = kvm_sched_clock_read; 109 + paravirt_set_sched_clock(kvm_sched_clock_read); 110 110 111 111 pr_info("kvm-clock: using sched offset of %llu cycles", 112 112 kvm_sched_clock_offset);
+9 -4
arch/x86/kernel/paravirt.c
··· 14 14 #include <linux/highmem.h> 15 15 #include <linux/kprobes.h> 16 16 #include <linux/pgtable.h> 17 + #include <linux/static_call.h> 17 18 18 19 #include <asm/bug.h> 19 20 #include <asm/paravirt.h> ··· 168 167 return 0; 169 168 } 170 169 170 + DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); 171 + DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock); 172 + 173 + void paravirt_set_sched_clock(u64 (*func)(void)) 174 + { 175 + static_call_update(pv_sched_clock, func); 176 + } 177 + 171 178 /* These are in entry.S */ 172 179 extern void native_iret(void); 173 180 ··· 280 271 struct paravirt_patch_template pv_ops = { 281 272 /* Init ops. */ 282 273 .init.patch = native_patch, 283 - 284 - /* Time ops. */ 285 - .time.sched_clock = native_sched_clock, 286 - .time.steal_clock = native_steal_clock, 287 274 288 275 /* Cpu ops. */ 289 276 .cpu.io_delay = native_io_delay,
+2 -1
arch/x86/kernel/tsc.c
··· 14 14 #include <linux/percpu.h> 15 15 #include <linux/timex.h> 16 16 #include <linux/static_key.h> 17 + #include <linux/static_call.h> 17 18 18 19 #include <asm/hpet.h> 19 20 #include <asm/timer.h> ··· 255 254 256 255 bool using_native_sched_clock(void) 257 256 { 258 - return pv_ops.time.sched_clock == native_sched_clock; 257 + return static_call_query(pv_sched_clock) == native_sched_clock; 259 258 } 260 259 #else 261 260 unsigned long long
+13 -13
arch/x86/xen/time.c
··· 379 379 } 380 380 } 381 381 382 - static const struct pv_time_ops xen_time_ops __initconst = { 383 - .sched_clock = xen_sched_clock, 384 - .steal_clock = xen_steal_clock, 385 - }; 386 - 387 382 static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; 388 383 static u64 xen_clock_value_saved; 389 384 ··· 520 525 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); 521 526 } 522 527 523 - void __init xen_init_time_ops(void) 528 + static void __init xen_init_time_common(void) 524 529 { 525 530 xen_sched_clock_offset = xen_clocksource_read(); 526 - pv_ops.time = xen_time_ops; 531 + static_call_update(pv_steal_clock, xen_steal_clock); 532 + paravirt_set_sched_clock(xen_sched_clock); 533 + 534 + x86_platform.calibrate_tsc = xen_tsc_khz; 535 + x86_platform.get_wallclock = xen_get_wallclock; 536 + } 537 + 538 + void __init xen_init_time_ops(void) 539 + { 540 + xen_init_time_common(); 527 541 528 542 x86_init.timers.timer_init = xen_time_init; 529 543 x86_init.timers.setup_percpu_clockev = x86_init_noop; 530 544 x86_cpuinit.setup_percpu_clockev = x86_init_noop; 531 545 532 - x86_platform.calibrate_tsc = xen_tsc_khz; 533 - x86_platform.get_wallclock = xen_get_wallclock; 534 546 /* Dom0 uses the native method to set the hardware RTC. */ 535 547 if (!xen_initial_domain()) 536 548 x86_platform.set_wallclock = xen_set_wallclock; ··· 571 569 return; 572 570 } 573 571 574 - xen_sched_clock_offset = xen_clocksource_read(); 575 - pv_ops.time = xen_time_ops; 572 + xen_init_time_common(); 573 + 576 574 x86_init.timers.setup_percpu_clockev = xen_time_init; 577 575 x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents; 578 576 579 - x86_platform.calibrate_tsc = xen_tsc_khz; 580 - x86_platform.get_wallclock = xen_get_wallclock; 581 577 x86_platform.set_wallclock = xen_set_wallclock; 582 578 } 583 579 #endif
+2 -1
drivers/xen/time.c
··· 7 7 #include <linux/math64.h> 8 8 #include <linux/gfp.h> 9 9 #include <linux/slab.h> 10 + #include <linux/static_call.h> 10 11 11 12 #include <asm/paravirt.h> 12 13 #include <asm/xen/hypervisor.h> ··· 176 175 xen_runstate_remote = !HYPERVISOR_vm_assist(VMASST_CMD_enable, 177 176 VMASST_TYPE_runstate_update_flag); 178 177 179 - pv_ops.time.steal_clock = xen_steal_clock; 178 + static_call_update(pv_steal_clock, xen_steal_clock); 180 179 181 180 static_key_slow_inc(&paravirt_steal_enabled); 182 181 if (xen_runstate_remote)