Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'kvm-arm64/ptp' into kvmarm-master/next

Signed-off-by: Marc Zyngier <maz@kernel.org>

+443 -78
+10
Documentation/virt/kvm/api.rst
··· 6737 6737 The KVM_XEN_HVM_CONFIG_RUNSTATE flag indicates that the runstate-related 6738 6738 features KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR/_CURRENT/_DATA/_ADJUST are 6739 6739 supported by the KVM_XEN_VCPU_SET_ATTR/KVM_XEN_VCPU_GET_ATTR ioctls. 6740 + 6741 + 8.31 KVM_CAP_PTP_KVM 6742 + -------------------- 6743 + 6744 + :Architectures: arm64 6745 + 6746 + This capability indicates that the KVM virtual PTP service is 6747 + supported in the host. A VMM can check whether the service is 6748 + available to the guest on migration. 6749 +
+1
Documentation/virt/kvm/arm/index.rst
··· 10 10 hyp-abi 11 11 psci 12 12 pvtime 13 + ptp_kvm
+25
Documentation/virt/kvm/arm/ptp_kvm.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 2 + 3 + PTP_KVM support for arm/arm64 4 + ============================= 5 + 6 + PTP_KVM is used for high precision time sync between host and guests. 7 + It relies on transferring the wall clock and counter value from the 8 + host to the guest using a KVM-specific hypercall. 9 + 10 + * ARM_SMCCC_HYP_KVM_PTP_FUNC_ID: 0x86000001 11 + 12 + This hypercall uses the SMC32/HVC32 calling convention: 13 + 14 + ARM_SMCCC_HYP_KVM_PTP_FUNC_ID 15 + ============== ======== ===================================== 16 + Function ID: (uint32) 0x86000001 17 + Arguments: (uint32) KVM_PTP_VIRT_COUNTER(0) 18 + KVM_PTP_PHYS_COUNTER(1) 19 + Return Values: (int32) NOT_SUPPORTED(-1) on error, or 20 + (uint32) Upper 32 bits of wall clock time (r0) 21 + (uint32) Lower 32 bits of wall clock time (r1) 22 + (uint32) Upper 32 bits of counter (r2) 23 + (uint32) Lower 32 bits of counter (r3) 24 + Endianness: No Restrictions. 25 + ============== ======== =====================================
+3
arch/arm/include/asm/hypervisor.h
··· 4 4 5 5 #include <asm/xen/hypervisor.h> 6 6 7 + void kvm_init_hyp_services(void); 8 + bool kvm_arm_hyp_service_available(u32 func_id); 9 + 7 10 #endif
+3
arch/arm64/include/asm/hypervisor.h
··· 4 4 5 5 #include <asm/xen/hypervisor.h> 6 6 7 + void kvm_init_hyp_services(void); 8 + bool kvm_arm_hyp_service_available(u32 func_id); 9 + 7 10 #endif
+1
arch/arm64/kvm/arm.c
··· 206 206 case KVM_CAP_ARM_INJECT_EXT_DABT: 207 207 case KVM_CAP_SET_GUEST_DEBUG: 208 208 case KVM_CAP_VCPU_ATTRIBUTES: 209 + case KVM_CAP_PTP_KVM: 209 210 r = 1; 210 211 break; 211 212 case KVM_CAP_ARM_SET_DEVICE_ADDR:
+71 -9
arch/arm64/kvm/hypercalls.c
··· 9 9 #include <kvm/arm_hypercalls.h> 10 10 #include <kvm/arm_psci.h> 11 11 12 + static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val) 13 + { 14 + struct system_time_snapshot systime_snapshot; 15 + u64 cycles = ~0UL; 16 + u32 feature; 17 + 18 + /* 19 + * system time and counter value must captured at the same 20 + * time to keep consistency and precision. 21 + */ 22 + ktime_get_snapshot(&systime_snapshot); 23 + 24 + /* 25 + * This is only valid if the current clocksource is the 26 + * architected counter, as this is the only one the guest 27 + * can see. 28 + */ 29 + if (systime_snapshot.cs_id != CSID_ARM_ARCH_COUNTER) 30 + return; 31 + 32 + /* 33 + * The guest selects one of the two reference counters 34 + * (virtual or physical) with the first argument of the SMCCC 35 + * call. In case the identifier is not supported, error out. 36 + */ 37 + feature = smccc_get_arg1(vcpu); 38 + switch (feature) { 39 + case KVM_PTP_VIRT_COUNTER: 40 + cycles = systime_snapshot.cycles - vcpu_read_sys_reg(vcpu, CNTVOFF_EL2); 41 + break; 42 + case KVM_PTP_PHYS_COUNTER: 43 + cycles = systime_snapshot.cycles; 44 + break; 45 + default: 46 + return; 47 + } 48 + 49 + /* 50 + * This relies on the top bit of val[0] never being set for 51 + * valid values of system time, because that is *really* far 52 + * in the future (about 292 years from 1970, and at that stage 53 + * nobody will give a damn about it). 54 + */ 55 + val[0] = upper_32_bits(systime_snapshot.real); 56 + val[1] = lower_32_bits(systime_snapshot.real); 57 + val[2] = upper_32_bits(cycles); 58 + val[3] = lower_32_bits(cycles); 59 + } 60 + 12 61 int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) 13 62 { 14 63 u32 func_id = smccc_get_function(vcpu); 15 - long val = SMCCC_RET_NOT_SUPPORTED; 64 + u64 val[4] = {SMCCC_RET_NOT_SUPPORTED}; 16 65 u32 feature; 17 66 gpa_t gpa; 18 67 19 68 switch (func_id) { 20 69 case ARM_SMCCC_VERSION_FUNC_ID: 21 - val = ARM_SMCCC_VERSION_1_1; 70 + val[0] = ARM_SMCCC_VERSION_1_1; 22 71 break; 23 72 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID: 24 73 feature = smccc_get_arg1(vcpu); ··· 77 28 case SPECTRE_VULNERABLE: 78 29 break; 79 30 case SPECTRE_MITIGATED: 80 - val = SMCCC_RET_SUCCESS; 31 + val[0] = SMCCC_RET_SUCCESS; 81 32 break; 82 33 case SPECTRE_UNAFFECTED: 83 - val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED; 34 + val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED; 84 35 break; 85 36 } 86 37 break; ··· 103 54 break; 104 55 fallthrough; 105 56 case SPECTRE_UNAFFECTED: 106 - val = SMCCC_RET_NOT_REQUIRED; 57 + val[0] = SMCCC_RET_NOT_REQUIRED; 107 58 break; 108 59 } 109 60 break; 110 61 case ARM_SMCCC_HV_PV_TIME_FEATURES: 111 - val = SMCCC_RET_SUCCESS; 62 + val[0] = SMCCC_RET_SUCCESS; 112 63 break; 113 64 } 114 65 break; 115 66 case ARM_SMCCC_HV_PV_TIME_FEATURES: 116 - val = kvm_hypercall_pv_features(vcpu); 67 + val[0] = kvm_hypercall_pv_features(vcpu); 117 68 break; 118 69 case ARM_SMCCC_HV_PV_TIME_ST: 119 70 gpa = kvm_init_stolen_time(vcpu); 120 71 if (gpa != GPA_INVALID) 121 - val = gpa; 72 + val[0] = gpa; 73 + break; 74 + case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID: 75 + val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0; 76 + val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1; 77 + val[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2; 78 + val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3; 79 + break; 80 + case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID: 81 + val[0] = BIT(ARM_SMCCC_KVM_FUNC_FEATURES); 82 + val[0] |= BIT(ARM_SMCCC_KVM_FUNC_PTP); 83 + break; 84 + case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID: 85 + kvm_ptp_get_time(vcpu, val); 122 86 break; 123 87 case ARM_SMCCC_TRNG_VERSION: 124 88 case ARM_SMCCC_TRNG_FEATURES: ··· 143 81 return kvm_psci_call(vcpu); 144 82 } 145 83 146 - smccc_set_retval(vcpu, val, 0, 0, 0); 84 + smccc_set_retval(vcpu, val[0], val[1], val[2], val[3]); 147 85 return 1; 148 86 }
+36
drivers/clocksource/arm_arch_timer.c
··· 16 16 #include <linux/cpu_pm.h> 17 17 #include <linux/clockchips.h> 18 18 #include <linux/clocksource.h> 19 + #include <linux/clocksource_ids.h> 19 20 #include <linux/interrupt.h> 20 21 #include <linux/of_irq.h> 21 22 #include <linux/of_address.h> ··· 25 24 #include <linux/sched/clock.h> 26 25 #include <linux/sched_clock.h> 27 26 #include <linux/acpi.h> 27 + #include <linux/arm-smccc.h> 28 + #include <linux/ptp_kvm.h> 28 29 29 30 #include <asm/arch_timer.h> 30 31 #include <asm/virt.h> ··· 194 191 195 192 static struct clocksource clocksource_counter = { 196 193 .name = "arch_sys_counter", 194 + .id = CSID_ARM_ARCH_COUNTER, 197 195 .rating = 400, 198 196 .read = arch_counter_read, 199 197 .mask = CLOCKSOURCE_MASK(56), ··· 1661 1657 } 1662 1658 TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init); 1663 1659 #endif 1660 + 1661 + int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts, 1662 + struct clocksource **cs) 1663 + { 1664 + struct arm_smccc_res hvc_res; 1665 + u32 ptp_counter; 1666 + ktime_t ktime; 1667 + 1668 + if (!IS_ENABLED(CONFIG_HAVE_ARM_SMCCC_DISCOVERY)) 1669 + return -EOPNOTSUPP; 1670 + 1671 + if (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) 1672 + ptp_counter = KVM_PTP_VIRT_COUNTER; 1673 + else 1674 + ptp_counter = KVM_PTP_PHYS_COUNTER; 1675 + 1676 + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID, 1677 + ptp_counter, &hvc_res); 1678 + 1679 + if ((int)(hvc_res.a0) < 0) 1680 + return -EOPNOTSUPP; 1681 + 1682 + ktime = (u64)hvc_res.a0 << 32 | hvc_res.a1; 1683 + *ts = ktime_to_timespec64(ktime); 1684 + if (cycle) 1685 + *cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3; 1686 + if (cs) 1687 + *cs = &clocksource_counter; 1688 + 1689 + return 0; 1690 + } 1691 + EXPORT_SYMBOL_GPL(kvm_arch_ptp_get_crosststamp);
+2
drivers/firmware/psci/psci.c
··· 23 23 24 24 #include <asm/cpuidle.h> 25 25 #include <asm/cputype.h> 26 + #include <asm/hypervisor.h> 26 27 #include <asm/system_misc.h> 27 28 #include <asm/smp_plat.h> 28 29 #include <asm/suspend.h> ··· 499 498 psci_init_cpu_suspend(); 500 499 psci_init_system_suspend(); 501 500 psci_init_system_reset2(); 501 + kvm_init_hyp_services(); 502 502 } 503 503 504 504 return 0;
+1 -1
drivers/firmware/smccc/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 # 3 - obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o 3 + obj-$(CONFIG_HAVE_ARM_SMCCC_DISCOVERY) += smccc.o kvm_guest.o 4 4 obj-$(CONFIG_ARM_SMCCC_SOC_ID) += soc_id.o
+50
drivers/firmware/smccc/kvm_guest.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #define pr_fmt(fmt) "smccc: KVM: " fmt 4 + 5 + #include <linux/arm-smccc.h> 6 + #include <linux/bitmap.h> 7 + #include <linux/kernel.h> 8 + #include <linux/string.h> 9 + 10 + #include <asm/hypervisor.h> 11 + 12 + static DECLARE_BITMAP(__kvm_arm_hyp_services, ARM_SMCCC_KVM_NUM_FUNCS) __ro_after_init = { }; 13 + 14 + void __init kvm_init_hyp_services(void) 15 + { 16 + struct arm_smccc_res res; 17 + u32 val[4]; 18 + 19 + if (arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_HVC) 20 + return; 21 + 22 + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res); 23 + if (res.a0 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 || 24 + res.a1 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 || 25 + res.a2 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 || 26 + res.a3 != ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3) 27 + return; 28 + 29 + memset(&res, 0, sizeof(res)); 30 + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID, &res); 31 + 32 + val[0] = lower_32_bits(res.a0); 33 + val[1] = lower_32_bits(res.a1); 34 + val[2] = lower_32_bits(res.a2); 35 + val[3] = lower_32_bits(res.a3); 36 + 37 + bitmap_from_arr32(__kvm_arm_hyp_services, val, ARM_SMCCC_KVM_NUM_FUNCS); 38 + 39 + pr_info("hypervisor services detected (0x%08lx 0x%08lx 0x%08lx 0x%08lx)\n", 40 + res.a3, res.a2, res.a1, res.a0); 41 + } 42 + 43 + bool kvm_arm_hyp_service_available(u32 func_id) 44 + { 45 + if (func_id >= ARM_SMCCC_KVM_NUM_FUNCS) 46 + return false; 47 + 48 + return test_bit(func_id, __kvm_arm_hyp_services); 49 + } 50 + EXPORT_SYMBOL_GPL(kvm_arm_hyp_service_available);
+1
drivers/firmware/smccc/smccc.c
··· 8 8 #include <linux/cache.h> 9 9 #include <linux/init.h> 10 10 #include <linux/arm-smccc.h> 11 + #include <linux/kernel.h> 11 12 #include <asm/archrandom.h> 12 13 13 14 static u32 smccc_version = ARM_SMCCC_VERSION_1_0;
+1 -1
drivers/ptp/Kconfig
··· 108 108 config PTP_1588_CLOCK_KVM 109 109 tristate "KVM virtual PTP clock" 110 110 depends on PTP_1588_CLOCK 111 - depends on KVM_GUEST && X86 111 + depends on (KVM_GUEST && X86) || (HAVE_ARM_SMCCC_DISCOVERY && ARM_ARCH_TIMER) 112 112 default y 113 113 help 114 114 This driver adds support for using kvm infrastructure as a PTP
+2
drivers/ptp/Makefile
··· 4 4 # 5 5 6 6 ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o 7 + ptp_kvm-$(CONFIG_X86) := ptp_kvm_x86.o ptp_kvm_common.o 8 + ptp_kvm-$(CONFIG_HAVE_ARM_SMCCC) := ptp_kvm_arm.o ptp_kvm_common.o 7 9 obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o 8 10 obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o 9 11 obj-$(CONFIG_PTP_1588_CLOCK_INES) += ptp_ines.o
+22 -62
drivers/ptp/ptp_kvm.c drivers/ptp/ptp_kvm_common.c
··· 8 8 #include <linux/err.h> 9 9 #include <linux/init.h> 10 10 #include <linux/kernel.h> 11 + #include <linux/slab.h> 11 12 #include <linux/module.h> 13 + #include <linux/ptp_kvm.h> 12 14 #include <uapi/linux/kvm_para.h> 13 15 #include <asm/kvm_para.h> 14 - #include <asm/pvclock.h> 15 - #include <asm/kvmclock.h> 16 16 #include <uapi/asm/kvm_para.h> 17 17 18 18 #include <linux/ptp_clock_kernel.h> ··· 24 24 25 25 static DEFINE_SPINLOCK(kvm_ptp_lock); 26 26 27 - static struct pvclock_vsyscall_time_info *hv_clock; 28 - 29 - static struct kvm_clock_pairing clock_pair; 30 - static phys_addr_t clock_pair_gpa; 31 - 32 27 static int ptp_kvm_get_time_fn(ktime_t *device_time, 33 28 struct system_counterval_t *system_counter, 34 29 void *ctx) 35 30 { 36 - unsigned long ret; 31 + long ret; 32 + u64 cycle; 37 33 struct timespec64 tspec; 38 - unsigned version; 39 - int cpu; 40 - struct pvclock_vcpu_time_info *src; 34 + struct clocksource *cs; 41 35 42 36 spin_lock(&kvm_ptp_lock); 43 37 44 38 preempt_disable_notrace(); 45 - cpu = smp_processor_id(); 46 - src = &hv_clock[cpu].pvti; 47 - 48 - do { 49 - /* 50 - * We are using a TSC value read in the hosts 51 - * kvm_hc_clock_pairing handling. 52 - * So any changes to tsc_to_system_mul 53 - * and tsc_shift or any other pvclock 54 - * data invalidate that measurement. 55 - */ 56 - version = pvclock_read_begin(src); 57 - 58 - ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, 59 - clock_pair_gpa, 60 - KVM_CLOCK_PAIRING_WALLCLOCK); 61 - if (ret != 0) { 62 - pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret); 63 - spin_unlock(&kvm_ptp_lock); 64 - preempt_enable_notrace(); 65 - return -EOPNOTSUPP; 66 - } 67 - 68 - tspec.tv_sec = clock_pair.sec; 69 - tspec.tv_nsec = clock_pair.nsec; 70 - ret = __pvclock_read_cycles(src, clock_pair.tsc); 71 - } while (pvclock_read_retry(src, version)); 39 + ret = kvm_arch_ptp_get_crosststamp(&cycle, &tspec, &cs); 40 + if (ret) { 41 + spin_unlock(&kvm_ptp_lock); 42 + preempt_enable_notrace(); 43 + return ret; 44 + } 72 45 73 46 preempt_enable_notrace(); 74 47 75 - system_counter->cycles = ret; 76 - system_counter->cs = &kvm_clock; 48 + system_counter->cycles = cycle; 49 + system_counter->cs = cs; 77 50 78 51 *device_time = timespec64_to_ktime(tspec); 79 52 ··· 84 111 85 112 static int ptp_kvm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) 86 113 { 87 - unsigned long ret; 114 + long ret; 88 115 struct timespec64 tspec; 89 116 90 117 spin_lock(&kvm_ptp_lock); 91 118 92 - ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, 93 - clock_pair_gpa, 94 - KVM_CLOCK_PAIRING_WALLCLOCK); 95 - if (ret != 0) { 96 - pr_err_ratelimited("clock offset hypercall ret %lu\n", ret); 119 + ret = kvm_arch_ptp_get_clock(&tspec); 120 + if (ret) { 97 121 spin_unlock(&kvm_ptp_lock); 98 - return -EOPNOTSUPP; 122 + return ret; 99 123 } 100 124 101 - tspec.tv_sec = clock_pair.sec; 102 - tspec.tv_nsec = clock_pair.nsec; 103 125 spin_unlock(&kvm_ptp_lock); 104 126 105 127 memcpy(ts, &tspec, sizeof(struct timespec64)); ··· 136 168 { 137 169 long ret; 138 170 139 - if (!kvm_para_available()) 140 - return -ENODEV; 141 - 142 - clock_pair_gpa = slow_virt_to_phys(&clock_pair); 143 - hv_clock = pvclock_get_pvti_cpu0_va(); 144 - 145 - if (!hv_clock) 146 - return -ENODEV; 147 - 148 - ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, 149 - KVM_CLOCK_PAIRING_WALLCLOCK); 150 - if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP) 151 - return -ENODEV; 171 + ret = kvm_arch_ptp_init(); 172 + if (ret) { 173 + pr_err("fail to initialize ptp_kvm"); 174 + return ret; 175 + } 152 176 153 177 kvm_ptp_clock.caps = ptp_kvm_caps; 154 178
+28
drivers/ptp/ptp_kvm_arm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Virtual PTP 1588 clock for use with KVM guests 4 + * Copyright (C) 2019 ARM Ltd. 5 + * All Rights Reserved 6 + */ 7 + 8 + #include <linux/arm-smccc.h> 9 + #include <linux/ptp_kvm.h> 10 + 11 + #include <asm/arch_timer.h> 12 + #include <asm/hypervisor.h> 13 + 14 + int kvm_arch_ptp_init(void) 15 + { 16 + int ret; 17 + 18 + ret = kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_PTP); 19 + if (ret <= 0) 20 + return -EOPNOTSUPP; 21 + 22 + return 0; 23 + } 24 + 25 + int kvm_arch_ptp_get_clock(struct timespec64 *ts) 26 + { 27 + return kvm_arch_ptp_get_crosststamp(NULL, ts, NULL); 28 + }
+97
drivers/ptp/ptp_kvm_x86.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* 3 + * Virtual PTP 1588 clock for use with KVM guests 4 + * 5 + * Copyright (C) 2017 Red Hat Inc. 6 + */ 7 + 8 + #include <linux/device.h> 9 + #include <linux/kernel.h> 10 + #include <asm/pvclock.h> 11 + #include <asm/kvmclock.h> 12 + #include <linux/module.h> 13 + #include <uapi/asm/kvm_para.h> 14 + #include <uapi/linux/kvm_para.h> 15 + #include <linux/ptp_clock_kernel.h> 16 + #include <linux/ptp_kvm.h> 17 + 18 + struct pvclock_vsyscall_time_info *hv_clock; 19 + 20 + static phys_addr_t clock_pair_gpa; 21 + static struct kvm_clock_pairing clock_pair; 22 + 23 + int kvm_arch_ptp_init(void) 24 + { 25 + long ret; 26 + 27 + if (!kvm_para_available()) 28 + return -ENODEV; 29 + 30 + clock_pair_gpa = slow_virt_to_phys(&clock_pair); 31 + hv_clock = pvclock_get_pvti_cpu0_va(); 32 + if (!hv_clock) 33 + return -ENODEV; 34 + 35 + ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, 36 + KVM_CLOCK_PAIRING_WALLCLOCK); 37 + if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP) 38 + return -ENODEV; 39 + 40 + return 0; 41 + } 42 + 43 + int kvm_arch_ptp_get_clock(struct timespec64 *ts) 44 + { 45 + long ret; 46 + 47 + ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, 48 + clock_pair_gpa, 49 + KVM_CLOCK_PAIRING_WALLCLOCK); 50 + if (ret != 0) { 51 + pr_err_ratelimited("clock offset hypercall ret %lu\n", ret); 52 + return -EOPNOTSUPP; 53 + } 54 + 55 + ts->tv_sec = clock_pair.sec; 56 + ts->tv_nsec = clock_pair.nsec; 57 + 58 + return 0; 59 + } 60 + 61 + int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec, 62 + struct clocksource **cs) 63 + { 64 + struct pvclock_vcpu_time_info *src; 65 + unsigned int version; 66 + long ret; 67 + int cpu; 68 + 69 + cpu = smp_processor_id(); 70 + src = &hv_clock[cpu].pvti; 71 + 72 + do { 73 + /* 74 + * We are using a TSC value read in the hosts 75 + * kvm_hc_clock_pairing handling. 76 + * So any changes to tsc_to_system_mul 77 + * and tsc_shift or any other pvclock 78 + * data invalidate that measurement. 79 + */ 80 + version = pvclock_read_begin(src); 81 + 82 + ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, 83 + clock_pair_gpa, 84 + KVM_CLOCK_PAIRING_WALLCLOCK); 85 + if (ret != 0) { 86 + pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret); 87 + return -EOPNOTSUPP; 88 + } 89 + tspec->tv_sec = clock_pair.sec; 90 + tspec->tv_nsec = clock_pair.nsec; 91 + *cycle = __pvclock_read_cycles(src, clock_pair.tsc); 92 + } while (pvclock_read_retry(src, version)); 93 + 94 + *cs = &kvm_clock; 95 + 96 + return 0; 97 + }
+41
include/linux/arm-smccc.h
··· 55 55 #define ARM_SMCCC_OWNER_TRUSTED_OS 50 56 56 #define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 57 57 58 + #define ARM_SMCCC_FUNC_QUERY_CALL_UID 0xff01 59 + 58 60 #define ARM_SMCCC_QUIRK_NONE 0 59 61 #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ 60 62 ··· 89 87 ARM_SMCCC_SMC_32, \ 90 88 0, 0x7fff) 91 89 90 + #define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \ 91 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ 92 + ARM_SMCCC_SMC_32, \ 93 + ARM_SMCCC_OWNER_VENDOR_HYP, \ 94 + ARM_SMCCC_FUNC_QUERY_CALL_UID) 95 + 96 + /* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */ 97 + #define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U 98 + #define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU 99 + #define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U 100 + #define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU 101 + 102 + /* KVM "vendor specific" services */ 103 + #define ARM_SMCCC_KVM_FUNC_FEATURES 0 104 + #define ARM_SMCCC_KVM_FUNC_PTP 1 105 + #define ARM_SMCCC_KVM_FUNC_FEATURES_2 127 106 + #define ARM_SMCCC_KVM_NUM_FUNCS 128 107 + 108 + #define ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID \ 109 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ 110 + ARM_SMCCC_SMC_32, \ 111 + ARM_SMCCC_OWNER_VENDOR_HYP, \ 112 + ARM_SMCCC_KVM_FUNC_FEATURES) 113 + 92 114 #define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1 115 + 116 + /* 117 + * ptp_kvm is a feature used for time sync between vm and host. 118 + * ptp_kvm module in guest kernel will get service from host using 119 + * this hypercall ID. 120 + */ 121 + #define ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID \ 122 + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ 123 + ARM_SMCCC_SMC_32, \ 124 + ARM_SMCCC_OWNER_VENDOR_HYP, \ 125 + ARM_SMCCC_KVM_FUNC_PTP) 126 + 127 + /* ptp_kvm counter type ID */ 128 + #define KVM_PTP_VIRT_COUNTER 0 129 + #define KVM_PTP_PHYS_COUNTER 1 93 130 94 131 /* Paravirtualised time calls (defined by ARM DEN0057A) */ 95 132 #define ARM_SMCCC_HV_PV_TIME_FEATURES \
+6
include/linux/clocksource.h
··· 17 17 #include <linux/timer.h> 18 18 #include <linux/init.h> 19 19 #include <linux/of.h> 20 + #include <linux/clocksource_ids.h> 20 21 #include <asm/div64.h> 21 22 #include <asm/io.h> 22 23 ··· 63 62 * 400-499: Perfect 64 63 * The ideal clocksource. A must-use where 65 64 * available. 65 + * @id: Defaults to CSID_GENERIC. The id value is captured 66 + * in certain snapshot functions to allow callers to 67 + * validate the clocksource from which the snapshot was 68 + * taken. 66 69 * @flags: Flags describing special properties 67 70 * @enable: Optional function to enable the clocksource 68 71 * @disable: Optional function to disable the clocksource ··· 105 100 const char *name; 106 101 struct list_head list; 107 102 int rating; 103 + enum clocksource_ids id; 108 104 enum vdso_clock_mode vdso_clock_mode; 109 105 unsigned long flags; 110 106
+12
include/linux/clocksource_ids.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_CLOCKSOURCE_IDS_H 3 + #define _LINUX_CLOCKSOURCE_IDS_H 4 + 5 + /* Enum to give clocksources a unique identifier */ 6 + enum clocksource_ids { 7 + CSID_GENERIC = 0, 8 + CSID_ARM_ARCH_COUNTER, 9 + CSID_MAX, 10 + }; 11 + 12 + #endif
+19
include/linux/ptp_kvm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * Virtual PTP 1588 clock for use with KVM guests 4 + * 5 + * Copyright (C) 2017 Red Hat Inc. 6 + */ 7 + 8 + #ifndef _PTP_KVM_H_ 9 + #define _PTP_KVM_H_ 10 + 11 + struct timespec64; 12 + struct clocksource; 13 + 14 + int kvm_arch_ptp_init(void); 15 + int kvm_arch_ptp_get_clock(struct timespec64 *ts); 16 + int kvm_arch_ptp_get_crosststamp(u64 *cycle, 17 + struct timespec64 *tspec, struct clocksource **cs); 18 + 19 + #endif /* _PTP_KVM_H_ */
+7 -5
include/linux/timekeeping.h
··· 3 3 #define _LINUX_TIMEKEEPING_H 4 4 5 5 #include <linux/errno.h> 6 + #include <linux/clocksource_ids.h> 6 7 7 8 /* Included from linux/ktime.h */ 8 9 ··· 244 243 * @cs_was_changed_seq: The sequence number of clocksource change events 245 244 */ 246 245 struct system_time_snapshot { 247 - u64 cycles; 248 - ktime_t real; 249 - ktime_t raw; 250 - unsigned int clock_was_set_seq; 251 - u8 cs_was_changed_seq; 246 + u64 cycles; 247 + ktime_t real; 248 + ktime_t raw; 249 + enum clocksource_ids cs_id; 250 + unsigned int clock_was_set_seq; 251 + u8 cs_was_changed_seq; 252 252 }; 253 253 254 254 /**
+1
include/uapi/linux/kvm.h
··· 1078 1078 #define KVM_CAP_DIRTY_LOG_RING 192 1079 1079 #define KVM_CAP_X86_BUS_LOCK_EXIT 193 1080 1080 #define KVM_CAP_PPC_DAWR1 194 1081 + #define KVM_CAP_PTP_KVM 195 1081 1082 1082 1083 #ifdef KVM_CAP_IRQ_ROUTING 1083 1084
+2
kernel/time/clocksource.c
··· 920 920 921 921 clocksource_arch_init(cs); 922 922 923 + if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX)) 924 + cs->id = CSID_GENERIC; 923 925 if (cs->vdso_clock_mode < 0 || 924 926 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) { 925 927 pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n",
+1
kernel/time/timekeeping.c
··· 1048 1048 do { 1049 1049 seq = read_seqcount_begin(&tk_core.seq); 1050 1050 now = tk_clock_read(&tk->tkr_mono); 1051 + systime_snapshot->cs_id = tk->tkr_mono.clock->id; 1051 1052 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; 1052 1053 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; 1053 1054 base_real = ktime_add(tk->tkr_mono.base,