Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: arm64: Kill leftovers of ad-hoc timer userspace access

Now that the whole timer infrastructure is handled as system register
accesses, get rid of the now unused ad-hoc infrastructure.

Signed-off-by: Marc Zyngier <maz@kernel.org>

-126
-68
arch/arm64/kvm/arch_timer.c
··· 1112 1112 disable_percpu_irq(host_ptimer_irq); 1113 1113 } 1114 1114 1115 - int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) 1116 - { 1117 - struct arch_timer_context *timer; 1118 - 1119 - switch (regid) { 1120 - case KVM_REG_ARM_TIMER_CTL: 1121 - timer = vcpu_vtimer(vcpu); 1122 - kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); 1123 - break; 1124 - case KVM_REG_ARM_TIMER_CNT: 1125 - if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, 1126 - &vcpu->kvm->arch.flags)) { 1127 - timer = vcpu_vtimer(vcpu); 1128 - timer_set_offset(timer, kvm_phys_timer_read() - value); 1129 - } 1130 - break; 1131 - case KVM_REG_ARM_TIMER_CVAL: 1132 - timer = vcpu_vtimer(vcpu); 1133 - kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); 1134 - break; 1135 - case KVM_REG_ARM_PTIMER_CTL: 1136 - timer = vcpu_ptimer(vcpu); 1137 - kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); 1138 - break; 1139 - case KVM_REG_ARM_PTIMER_CNT: 1140 - if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, 1141 - &vcpu->kvm->arch.flags)) { 1142 - timer = vcpu_ptimer(vcpu); 1143 - timer_set_offset(timer, kvm_phys_timer_read() - value); 1144 - } 1145 - break; 1146 - case KVM_REG_ARM_PTIMER_CVAL: 1147 - timer = vcpu_ptimer(vcpu); 1148 - kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); 1149 - break; 1150 - 1151 - default: 1152 - return -1; 1153 - } 1154 - 1155 - return 0; 1156 - } 1157 - 1158 1115 static u64 read_timer_ctl(struct arch_timer_context *timer) 1159 1116 { 1160 1117 /* ··· 1126 1169 ctl |= ARCH_TIMER_CTRL_IT_STAT; 1127 1170 1128 1171 return ctl; 1129 - } 1130 - 1131 - u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) 1132 - { 1133 - switch (regid) { 1134 - case KVM_REG_ARM_TIMER_CTL: 1135 - return kvm_arm_timer_read(vcpu, 1136 - vcpu_vtimer(vcpu), TIMER_REG_CTL); 1137 - case KVM_REG_ARM_TIMER_CNT: 1138 - return kvm_arm_timer_read(vcpu, 1139 - vcpu_vtimer(vcpu), TIMER_REG_CNT); 1140 - case KVM_REG_ARM_TIMER_CVAL: 1141 - return kvm_arm_timer_read(vcpu, 1142 - vcpu_vtimer(vcpu), TIMER_REG_CVAL); 1143 - case KVM_REG_ARM_PTIMER_CTL: 1144 - return kvm_arm_timer_read(vcpu, 1145 - vcpu_ptimer(vcpu), TIMER_REG_CTL); 1146 - case KVM_REG_ARM_PTIMER_CNT: 1147 - return kvm_arm_timer_read(vcpu, 1148 - vcpu_ptimer(vcpu), TIMER_REG_CNT); 1149 - case KVM_REG_ARM_PTIMER_CVAL: 1150 - return kvm_arm_timer_read(vcpu, 1151 - vcpu_ptimer(vcpu), TIMER_REG_CVAL); 1152 - } 1153 - return (u64)-1; 1154 1172 } 1155 1173 1156 1174 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
-55
arch/arm64/kvm/guest.c
··· 591 591 return copy_core_reg_indices(vcpu, NULL); 592 592 } 593 593 594 - static const u64 timer_reg_list[] = { 595 - }; 596 - 597 - #define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list) 598 - 599 - static bool is_timer_reg(u64 index) 600 - { 601 - return false; 602 - } 603 - 604 - static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 605 - { 606 - for (int i = 0; i < NUM_TIMER_REGS; i++) { 607 - if (put_user(timer_reg_list[i], uindices)) 608 - return -EFAULT; 609 - uindices++; 610 - } 611 - 612 - return 0; 613 - } 614 - 615 - static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 616 - { 617 - void __user *uaddr = (void __user *)(long)reg->addr; 618 - u64 val; 619 - int ret; 620 - 621 - ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); 622 - if (ret != 0) 623 - return -EFAULT; 624 - 625 - return kvm_arm_timer_set_reg(vcpu, reg->id, val); 626 - } 627 - 628 - static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 629 - { 630 - void __user *uaddr = (void __user *)(long)reg->addr; 631 - u64 val; 632 - 633 - val = kvm_arm_timer_get_reg(vcpu, reg->id); 634 - return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; 635 - } 636 - 637 594 static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) 638 595 { 639 596 const unsigned int slices = vcpu_sve_slices(vcpu); ··· 666 709 res += num_sve_regs(vcpu); 667 710 res += kvm_arm_num_sys_reg_descs(vcpu); 668 711 res += kvm_arm_get_fw_num_regs(vcpu); 669 - res += NUM_TIMER_REGS; 670 712 671 713 return res; 672 714 } ··· 696 740 return ret; 697 741 uindices += kvm_arm_get_fw_num_regs(vcpu); 698 742 699 - ret = copy_timer_indices(vcpu, uindices); 700 - if (ret < 0) 701 - return ret; 702 - uindices += NUM_TIMER_REGS; 703 - 704 743 return kvm_arm_copy_sys_reg_indices(vcpu, uindices); 705 744 } 706 745 ··· 713 762 case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg); 714 763 } 715 764 716 - if (is_timer_reg(reg->id)) 717 - return get_timer_reg(vcpu, reg); 718 - 719 765 return kvm_arm_sys_reg_get_reg(vcpu, reg); 720 766 } 721 767 ··· 729 781 return kvm_arm_set_fw_reg(vcpu, reg); 730 782 case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg); 731 783 } 732 - 733 - if (is_timer_reg(reg->id)) 734 - return set_timer_reg(vcpu, reg); 735 784 736 785 return kvm_arm_sys_reg_set_reg(vcpu, reg); 737 786 }
-3
include/kvm/arm_arch_timer.h
··· 107 107 108 108 void kvm_timer_init_vm(struct kvm *kvm); 109 109 110 - u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); 111 - int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); 112 - 113 110 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); 114 111 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); 115 112 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);