Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-arm-for-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm

KVM/ARM New features for 3.17 include:
- Fixes and code refactoring for stage2 kvm MMU unmap_range
- Support unmapping IPAs on deleting memslots for arm and arm64
- Support MMIO mappings in stage2 faults
- KVM VGIC v2 emulation on GICv3 hardware
- Big-Endian support for arm/arm64 (guest and host)
- Debug Architecture support for arm64 (arm32 is on Christoffer's todo list)

Conflicts:
virt/kvm/arm/vgic.c [last minute cherry-pick from 3.17 to 3.16]

+2861 -579
+8
Documentation/arm64/booting.txt
··· 168 168 the kernel image will be entered must be initialised by software at a 169 169 higher exception level to prevent execution in an UNKNOWN state. 170 170 171 + For systems with a GICv3 interrupt controller: 172 + - If EL3 is present: 173 + ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1. 174 + ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1. 175 + - If the kernel is entered at EL1: 176 + ICC.SRE_EL2.Enable (bit 3) must be initialised to 0b1 177 + ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b1. 178 + 171 179 The requirements described above for CPU mode, caches, MMUs, architected 172 180 timers, coherency and system registers apply to all CPUs. All CPUs must 173 181 enter the kernel in the same exception level.
+79
Documentation/devicetree/bindings/arm/gic-v3.txt
··· 1 + * ARM Generic Interrupt Controller, version 3 2 + 3 + AArch64 SMP cores are often associated with a GICv3, providing Private 4 + Peripheral Interrupts (PPI), Shared Peripheral Interrupts (SPI), 5 + Software Generated Interrupts (SGI), and Locality-specific Peripheral 6 + Interrupts (LPI). 7 + 8 + Main node required properties: 9 + 10 + - compatible : should at least contain "arm,gic-v3". 11 + - interrupt-controller : Identifies the node as an interrupt controller 12 + - #interrupt-cells : Specifies the number of cells needed to encode an 13 + interrupt source. Must be a single cell with a value of at least 3. 14 + 15 + The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI 16 + interrupts. Other values are reserved for future use. 17 + 18 + The 2nd cell contains the interrupt number for the interrupt type. 19 + SPI interrupts are in the range [0-987]. PPI interrupts are in the 20 + range [0-15]. 21 + 22 + The 3rd cell is the flags, encoded as follows: 23 + bits[3:0] trigger type and level flags. 24 + 1 = edge triggered 25 + 4 = level triggered 26 + 27 + Cells 4 and beyond are reserved for future use. When the 1st cell 28 + has a value of 0 or 1, cells 4 and beyond act as padding, and may be 29 + ignored. It is recommended that padding cells have a value of 0. 30 + 31 + - reg : Specifies base physical address(s) and size of the GIC 32 + registers, in the following order: 33 + - GIC Distributor interface (GICD) 34 + - GIC Redistributors (GICR), one range per redistributor region 35 + - GIC CPU interface (GICC) 36 + - GIC Hypervisor interface (GICH) 37 + - GIC Virtual CPU interface (GICV) 38 + 39 + GICC, GICH and GICV are optional. 40 + 41 + - interrupts : Interrupt source of the VGIC maintenance interrupt. 42 + 43 + Optional 44 + 45 + - redistributor-stride : If using padding pages, specifies the stride 46 + of consecutive redistributors. Must be a multiple of 64kB. 47 + 48 + - #redistributor-regions: The number of independent contiguous regions 49 + occupied by the redistributors. Required if more than one such 50 + region is present. 51 + 52 + Examples: 53 + 54 + gic: interrupt-controller@2cf00000 { 55 + compatible = "arm,gic-v3"; 56 + #interrupt-cells = <3>; 57 + interrupt-controller; 58 + reg = <0x0 0x2f000000 0 0x10000>, // GICD 59 + <0x0 0x2f100000 0 0x200000>, // GICR 60 + <0x0 0x2c000000 0 0x2000>, // GICC 61 + <0x0 0x2c010000 0 0x2000>, // GICH 62 + <0x0 0x2c020000 0 0x2000>; // GICV 63 + interrupts = <1 9 4>; 64 + }; 65 + 66 + gic: interrupt-controller@2c010000 { 67 + compatible = "arm,gic-v3"; 68 + #interrupt-cells = <3>; 69 + interrupt-controller; 70 + redistributor-stride = <0x0 0x40000>; // 256kB stride 71 + #redistributor-regions = <2>; 72 + reg = <0x0 0x2c010000 0 0x10000>, // GICD 73 + <0x0 0x2d000000 0 0x800000>, // GICR 1: CPUs 0-31 74 + <0x0 0x2e000000 0 0x800000>; // GICR 2: CPUs 32-63 75 + <0x0 0x2c040000 0 0x2000>, // GICC 76 + <0x0 0x2c060000 0 0x2000>, // GICH 77 + <0x0 0x2c080000 0 0x2000>; // GICV 78 + interrupts = <1 9 4>; 79 + };
+18
arch/arm/include/asm/kvm_asm.h
··· 61 61 #define ARM_EXCEPTION_FIQ 6 62 62 #define ARM_EXCEPTION_HVC 7 63 63 64 + /* 65 + * The rr_lo_hi macro swaps a pair of registers depending on 66 + * current endianness. It is used in conjunction with ldrd and strd 67 + * instructions that load/store a 64-bit value from/to memory to/from 68 + * a pair of registers which are used with the mrrc and mcrr instructions. 69 + * If used with the ldrd/strd instructions, the a1 parameter is the first 70 + * source/destination register and the a2 parameter is the second 71 + * source/destination register. Note that the ldrd/strd instructions 72 + * already swap the bytes within the words correctly according to the 73 + * endianness setting, but the order of the registers need to be effectively 74 + * swapped when used with the mrrc/mcrr instructions. 75 + */ 76 + #ifdef CONFIG_CPU_ENDIAN_BE8 77 + #define rr_lo_hi(a1, a2) a2, a1 78 + #else 79 + #define rr_lo_hi(a1, a2) a1, a2 80 + #endif 81 + 64 82 #ifndef __ASSEMBLY__ 65 83 struct kvm; 66 84 struct kvm_vcpu;
+18 -4
arch/arm/include/asm/kvm_emulate.h
··· 185 185 default: 186 186 return be32_to_cpu(data); 187 187 } 188 + } else { 189 + switch (len) { 190 + case 1: 191 + return data & 0xff; 192 + case 2: 193 + return le16_to_cpu(data & 0xffff); 194 + default: 195 + return le32_to_cpu(data); 196 + } 188 197 } 189 - 190 - return data; /* Leave LE untouched */ 191 198 } 192 199 193 200 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, ··· 210 203 default: 211 204 return cpu_to_be32(data); 212 205 } 206 + } else { 207 + switch (len) { 208 + case 1: 209 + return data & 0xff; 210 + case 2: 211 + return cpu_to_le16(data & 0xffff); 212 + default: 213 + return cpu_to_le32(data); 214 + } 213 215 } 214 - 215 - return data; /* Leave LE untouched */ 216 216 } 217 217 218 218 #endif /* __ARM_KVM_EMULATE_H__ */
+5 -3
arch/arm/include/asm/kvm_host.h
··· 225 225 return 0; 226 226 } 227 227 228 + static inline void vgic_arch_setup(const struct vgic_params *vgic) 229 + { 230 + BUG_ON(vgic->type != VGIC_V2); 231 + } 232 + 228 233 int kvm_perf_init(void); 229 234 int kvm_perf_teardown(void); 230 - 231 - u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); 232 - int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); 233 235 234 236 #endif /* __ARM_KVM_HOST_H__ */
+12
arch/arm/include/asm/kvm_mmu.h
··· 127 127 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 128 128 }) 129 129 130 + static inline bool kvm_page_empty(void *ptr) 131 + { 132 + struct page *ptr_page = virt_to_page(ptr); 133 + return page_count(ptr_page) == 1; 134 + } 135 + 136 + 137 + #define kvm_pte_table_empty(ptep) kvm_page_empty(ptep) 138 + #define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp) 139 + #define kvm_pud_table_empty(pudp) (0) 140 + 141 + 130 142 struct kvm; 131 143 132 144 #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
+7 -7
arch/arm/kernel/asm-offsets.c
··· 182 182 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); 183 183 #ifdef CONFIG_KVM_ARM_VGIC 184 184 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); 185 - DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); 186 - DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); 187 - DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); 188 - DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); 189 - DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); 190 - DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); 191 - DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); 185 + DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); 186 + DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); 187 + DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); 188 + DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr)); 189 + DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr)); 190 + DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); 191 + DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); 192 192 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); 193 193 #ifdef CONFIG_KVM_ARM_TIMER 194 194 DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
+1 -3
arch/arm/kernel/hyp-stub.S
··· 134 134 mcr p15, 4, r7, c1, c1, 3 @ HSTR 135 135 136 136 THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE 137 - #ifdef CONFIG_CPU_BIG_ENDIAN 138 - orr r7, #(1 << 9) @ HSCTLR.EE 139 - #endif 137 + ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE 140 138 mcr p15, 4, r7, c1, c0, 0 @ HSCTLR 141 139 142 140 mrc p15, 4, r7, c1, c1, 1 @ HDCR
+1 -1
arch/arm/kvm/Kconfig
··· 23 23 select HAVE_KVM_CPU_RELAX_INTERCEPT 24 24 select KVM_MMIO 25 25 select KVM_ARM_HOST 26 - depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN 26 + depends on ARM_VIRT_EXT && ARM_LPAE 27 27 ---help--- 28 28 Support hosting virtualized guest machines. You will also 29 29 need to select one or more of the processor modules below.
+1
arch/arm/kvm/Makefile
··· 21 21 obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o 22 22 obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o 23 23 obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o 24 + obj-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o 24 25 obj-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
-37
arch/arm/kvm/arm.c
··· 155 155 return VM_FAULT_SIGBUS; 156 156 } 157 157 158 - void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 159 - struct kvm_memory_slot *dont) 160 - { 161 - } 162 - 163 - int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 164 - unsigned long npages) 165 - { 166 - return 0; 167 - } 168 158 169 159 /** 170 160 * kvm_arch_destroy_vm - destroy the VM data structure ··· 215 225 return -EINVAL; 216 226 } 217 227 218 - void kvm_arch_memslots_updated(struct kvm *kvm) 219 - { 220 - } 221 - 222 - int kvm_arch_prepare_memory_region(struct kvm *kvm, 223 - struct kvm_memory_slot *memslot, 224 - struct kvm_userspace_memory_region *mem, 225 - enum kvm_mr_change change) 226 - { 227 - return 0; 228 - } 229 - 230 - void kvm_arch_commit_memory_region(struct kvm *kvm, 231 - struct kvm_userspace_memory_region *mem, 232 - const struct kvm_memory_slot *old, 233 - enum kvm_mr_change change) 234 - { 235 - } 236 - 237 - void kvm_arch_flush_shadow_all(struct kvm *kvm) 238 - { 239 - } 240 - 241 - void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 242 - struct kvm_memory_slot *slot) 243 - { 244 - } 245 228 246 229 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 247 230 {
+79 -9
arch/arm/kvm/coproc.c
··· 44 44 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 45 45 #define CSSELR_MAX 12 46 46 47 + /* 48 + * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some 49 + * of cp15 registers can be viewed either as couple of two u32 registers 50 + * or one u64 register. Current u64 register encoding is that least 51 + * significant u32 word is followed by most significant u32 word. 52 + */ 53 + static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu, 54 + const struct coproc_reg *r, 55 + u64 val) 56 + { 57 + vcpu->arch.cp15[r->reg] = val & 0xffffffff; 58 + vcpu->arch.cp15[r->reg + 1] = val >> 32; 59 + } 60 + 61 + static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, 62 + const struct coproc_reg *r) 63 + { 64 + u64 val; 65 + 66 + val = vcpu->arch.cp15[r->reg + 1]; 67 + val = val << 32; 68 + val = val | vcpu->arch.cp15[r->reg]; 69 + return val; 70 + } 71 + 47 72 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) 48 73 { 49 74 kvm_inject_undefined(vcpu); ··· 707 682 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, 708 683 }; 709 684 685 + /* 686 + * Reads a register value from a userspace address to a kernel 687 + * variable. Make sure that register size matches sizeof(*__val). 688 + */ 710 689 static int reg_from_user(void *val, const void __user *uaddr, u64 id) 711 690 { 712 - /* This Just Works because we are little endian. */ 713 691 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 714 692 return -EFAULT; 715 693 return 0; 716 694 } 717 695 696 + /* 697 + * Writes a register value to a userspace address from a kernel variable. 698 + * Make sure that register size matches sizeof(*__val). 699 + */ 718 700 static int reg_to_user(void __user *uaddr, const void *val, u64 id) 719 701 { 720 - /* This Just Works because we are little endian. */ 721 702 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 722 703 return -EFAULT; 723 704 return 0; ··· 733 702 { 734 703 struct coproc_params params; 735 704 const struct coproc_reg *r; 705 + int ret; 736 706 737 707 if (!index_to_params(id, &params)) 738 708 return -ENOENT; ··· 742 710 if (!r) 743 711 return -ENOENT; 744 712 745 - return reg_to_user(uaddr, &r->val, id); 713 + ret = -ENOENT; 714 + if (KVM_REG_SIZE(id) == 4) { 715 + u32 val = r->val; 716 + 717 + ret = reg_to_user(uaddr, &val, id); 718 + } else if (KVM_REG_SIZE(id) == 8) { 719 + ret = reg_to_user(uaddr, &r->val, id); 720 + } 721 + return ret; 746 722 } 747 723 748 724 static int set_invariant_cp15(u64 id, void __user *uaddr) ··· 758 718 struct coproc_params params; 759 719 const struct coproc_reg *r; 760 720 int err; 761 - u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ 721 + u64 val; 762 722 763 723 if (!index_to_params(id, &params)) 764 724 return -ENOENT; ··· 766 726 if (!r) 767 727 return -ENOENT; 768 728 769 - err = reg_from_user(&val, uaddr, id); 729 + err = -ENOENT; 730 + if (KVM_REG_SIZE(id) == 4) { 731 + u32 val32; 732 + 733 + err = reg_from_user(&val32, uaddr, id); 734 + if (!err) 735 + val = val32; 736 + } else if (KVM_REG_SIZE(id) == 8) { 737 + err = reg_from_user(&val, uaddr, id); 738 + } 770 739 if (err) 771 740 return err; 772 741 ··· 1053 1004 { 1054 1005 const struct coproc_reg *r; 1055 1006 void __user *uaddr = (void __user *)(long)reg->addr; 1007 + int ret; 1056 1008 1057 1009 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1058 1010 return demux_c15_get(reg->id, uaddr); ··· 1065 1015 if (!r) 1066 1016 return get_invariant_cp15(reg->id, uaddr); 1067 1017 1068 - /* Note: copies two regs if size is 64 bit. */ 1069 - return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); 1018 + ret = -ENOENT; 1019 + if (KVM_REG_SIZE(reg->id) == 8) { 1020 + u64 val; 1021 + 1022 + val = vcpu_cp15_reg64_get(vcpu, r); 1023 + ret = reg_to_user(uaddr, &val, reg->id); 1024 + } else if (KVM_REG_SIZE(reg->id) == 4) { 1025 + ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); 1026 + } 1027 + 1028 + return ret; 1070 1029 } 1071 1030 1072 1031 int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 1073 1032 { 1074 1033 const struct coproc_reg *r; 1075 1034 void __user *uaddr = (void __user *)(long)reg->addr; 1035 + int ret; 1076 1036 1077 1037 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1078 1038 return demux_c15_set(reg->id, uaddr); ··· 1094 1034 if (!r) 1095 1035 return set_invariant_cp15(reg->id, uaddr); 1096 1036 1097 - /* Note: copies two regs if size is 64 bit */ 1098 - return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); 1037 + ret = -ENOENT; 1038 + if (KVM_REG_SIZE(reg->id) == 8) { 1039 + u64 val; 1040 + 1041 + ret = reg_from_user(&val, uaddr, reg->id); 1042 + if (!ret) 1043 + vcpu_cp15_reg64_set(vcpu, r, val); 1044 + } else if (KVM_REG_SIZE(reg->id) == 4) { 1045 + ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); 1046 + } 1047 + 1048 + return ret; 1099 1049 } 1100 1050 1101 1051 static unsigned int num_demux_regs(void)
-10
arch/arm/kvm/guest.c
··· 124 124 return false; 125 125 } 126 126 127 - int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) 128 - { 129 - return 0; 130 - } 131 - 132 - u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) 133 - { 134 - return 0; 135 - } 136 - 137 127 #else 138 128 139 129 #define NUM_TIMER_REGS 3
+2 -2
arch/arm/kvm/init.S
··· 71 71 bne phase2 @ Yes, second stage init 72 72 73 73 @ Set the HTTBR to point to the hypervisor PGD pointer passed 74 - mcrr p15, 4, r2, r3, c2 74 + mcrr p15, 4, rr_lo_hi(r2, r3), c2 75 75 76 76 @ Set the HTCR and VTCR to the same shareability and cacheability 77 77 @ settings as the non-secure TTBCR and with T0SZ == 0. ··· 137 137 mov pc, r0 138 138 139 139 target: @ We're now in the trampoline code, switch page tables 140 - mcrr p15, 4, r2, r3, c2 140 + mcrr p15, 4, rr_lo_hi(r2, r3), c2 141 141 isb 142 142 143 143 @ Invalidate the old TLBs
+7 -2
arch/arm/kvm/interrupts.S
··· 52 52 dsb ishst 53 53 add r0, r0, #KVM_VTTBR 54 54 ldrd r2, r3, [r0] 55 - mcrr p15, 6, r2, r3, c2 @ Write VTTBR 55 + mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR 56 56 isb 57 57 mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) 58 58 dsb ish ··· 135 135 ldr r1, [vcpu, #VCPU_KVM] 136 136 add r1, r1, #KVM_VTTBR 137 137 ldrd r2, r3, [r1] 138 - mcrr p15, 6, r2, r3, c2 @ Write VTTBR 138 + mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR 139 139 140 140 @ We're all done, just restore the GPRs and go to the guest 141 141 restore_guest_regs ··· 199 199 200 200 restore_host_regs 201 201 clrex @ Clear exclusive monitor 202 + #ifndef CONFIG_CPU_ENDIAN_BE8 202 203 mov r0, r1 @ Return the return code 203 204 mov r1, #0 @ Clear upper bits in return value 205 + #else 206 + @ r1 already has return code 207 + mov r0, #0 @ Clear upper bits in return value 208 + #endif /* CONFIG_CPU_ENDIAN_BE8 */ 204 209 bx lr @ return to IOCTL 205 210 206 211 /********************************************************************
+30 -16
arch/arm/kvm/interrupts_head.S
··· 1 1 #include <linux/irqchip/arm-gic.h> 2 + #include <asm/assembler.h> 2 3 3 4 #define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) 4 5 #define VCPU_USR_SP (VCPU_USR_REG(13)) ··· 421 420 ldr r8, [r2, #GICH_ELRSR0] 422 421 ldr r9, [r2, #GICH_ELRSR1] 423 422 ldr r10, [r2, #GICH_APR] 423 + ARM_BE8(rev r3, r3 ) 424 + ARM_BE8(rev r4, r4 ) 425 + ARM_BE8(rev r5, r5 ) 426 + ARM_BE8(rev r6, r6 ) 427 + ARM_BE8(rev r7, r7 ) 428 + ARM_BE8(rev r8, r8 ) 429 + ARM_BE8(rev r9, r9 ) 430 + ARM_BE8(rev r10, r10 ) 424 431 425 - str r3, [r11, #VGIC_CPU_HCR] 426 - str r4, [r11, #VGIC_CPU_VMCR] 427 - str r5, [r11, #VGIC_CPU_MISR] 428 - str r6, [r11, #VGIC_CPU_EISR] 429 - str r7, [r11, #(VGIC_CPU_EISR + 4)] 430 - str r8, [r11, #VGIC_CPU_ELRSR] 431 - str r9, [r11, #(VGIC_CPU_ELRSR + 4)] 432 - str r10, [r11, #VGIC_CPU_APR] 432 + str r3, [r11, #VGIC_V2_CPU_HCR] 433 + str r4, [r11, #VGIC_V2_CPU_VMCR] 434 + str r5, [r11, #VGIC_V2_CPU_MISR] 435 + str r6, [r11, #VGIC_V2_CPU_EISR] 436 + str r7, [r11, #(VGIC_V2_CPU_EISR + 4)] 437 + str r8, [r11, #VGIC_V2_CPU_ELRSR] 438 + str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)] 439 + str r10, [r11, #VGIC_V2_CPU_APR] 433 440 434 441 /* Clear GICH_HCR */ 435 442 mov r5, #0 ··· 445 436 446 437 /* Save list registers */ 447 438 add r2, r2, #GICH_LR0 448 - add r3, r11, #VGIC_CPU_LR 439 + add r3, r11, #VGIC_V2_CPU_LR 449 440 ldr r4, [r11, #VGIC_CPU_NR_LR] 450 441 1: ldr r6, [r2], #4 442 + ARM_BE8(rev r6, r6 ) 451 443 str r6, [r3], #4 452 444 subs r4, r4, #1 453 445 bne 1b ··· 473 463 add r11, vcpu, #VCPU_VGIC_CPU 474 464 475 465 /* We only restore a minimal set of registers */ 476 - ldr r3, [r11, #VGIC_CPU_HCR] 477 - ldr r4, [r11, #VGIC_CPU_VMCR] 478 - ldr r8, [r11, #VGIC_CPU_APR] 466 + ldr r3, [r11, #VGIC_V2_CPU_HCR] 467 + ldr r4, [r11, #VGIC_V2_CPU_VMCR] 468 + ldr r8, [r11, #VGIC_V2_CPU_APR] 469 + ARM_BE8(rev r3, r3 ) 470 + ARM_BE8(rev r4, r4 ) 471 + ARM_BE8(rev r8, r8 ) 479 472 480 473 str r3, [r2, #GICH_HCR] 481 474 str r4, [r2, #GICH_VMCR] ··· 486 473 487 474 /* Restore list registers */ 488 475 add r2, r2, #GICH_LR0 489 - add r3, r11, #VGIC_CPU_LR 476 + add r3, r11, #VGIC_V2_CPU_LR 490 477 ldr r4, [r11, #VGIC_CPU_NR_LR] 491 478 1: ldr r6, [r3], #4 479 + ARM_BE8(rev r6, r6 ) 492 480 str r6, [r2], #4 493 481 subs r4, r4, #1 494 482 bne 1b ··· 520 506 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL 521 507 isb 522 508 523 - mrrc p15, 3, r2, r3, c14 @ CNTV_CVAL 509 + mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL 524 510 ldr r4, =VCPU_TIMER_CNTV_CVAL 525 511 add r5, vcpu, r4 526 512 strd r2, r3, [r5] ··· 560 546 561 547 ldr r2, [r4, #KVM_TIMER_CNTVOFF] 562 548 ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)] 563 - mcrr p15, 4, r2, r3, c14 @ CNTVOFF 549 + mcrr p15, 4, rr_lo_hi(r2, r3), c14 @ CNTVOFF 564 550 565 551 ldr r4, =VCPU_TIMER_CNTV_CVAL 566 552 add r5, vcpu, r4 567 553 ldrd r2, r3, [r5] 568 - mcrr p15, 3, r2, r3, c14 @ CNTV_CVAL 554 + mcrr p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL 569 555 isb 570 556 571 557 ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
+142 -80
arch/arm/kvm/mmu.c
··· 90 90 return p; 91 91 } 92 92 93 - static bool page_empty(void *ptr) 93 + static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) 94 94 { 95 - struct page *ptr_page = virt_to_page(ptr); 96 - return page_count(ptr_page) == 1; 95 + pud_t *pud_table __maybe_unused = pud_offset(pgd, 0); 96 + pgd_clear(pgd); 97 + kvm_tlb_flush_vmid_ipa(kvm, addr); 98 + pud_free(NULL, pud_table); 99 + put_page(virt_to_page(pgd)); 97 100 } 98 101 99 102 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 100 103 { 101 - if (pud_huge(*pud)) { 102 - pud_clear(pud); 103 - kvm_tlb_flush_vmid_ipa(kvm, addr); 104 - } else { 105 - pmd_t *pmd_table = pmd_offset(pud, 0); 106 - pud_clear(pud); 107 - kvm_tlb_flush_vmid_ipa(kvm, addr); 108 - pmd_free(NULL, pmd_table); 109 - } 104 + pmd_t *pmd_table = pmd_offset(pud, 0); 105 + VM_BUG_ON(pud_huge(*pud)); 106 + pud_clear(pud); 107 + kvm_tlb_flush_vmid_ipa(kvm, addr); 108 + pmd_free(NULL, pmd_table); 110 109 put_page(virt_to_page(pud)); 111 110 } 112 111 113 112 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) 114 113 { 115 - if (kvm_pmd_huge(*pmd)) { 116 - pmd_clear(pmd); 117 - kvm_tlb_flush_vmid_ipa(kvm, addr); 118 - } else { 119 - pte_t *pte_table = pte_offset_kernel(pmd, 0); 120 - pmd_clear(pmd); 121 - kvm_tlb_flush_vmid_ipa(kvm, addr); 122 - pte_free_kernel(NULL, pte_table); 123 - } 114 + pte_t *pte_table = pte_offset_kernel(pmd, 0); 115 + VM_BUG_ON(kvm_pmd_huge(*pmd)); 116 + pmd_clear(pmd); 117 + kvm_tlb_flush_vmid_ipa(kvm, addr); 118 + pte_free_kernel(NULL, pte_table); 124 119 put_page(virt_to_page(pmd)); 125 120 } 126 121 127 - static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) 122 + static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, 123 + phys_addr_t addr, phys_addr_t end) 128 124 { 129 - if (pte_present(*pte)) { 130 - kvm_set_pte(pte, __pte(0)); 131 - put_page(virt_to_page(pte)); 132 - kvm_tlb_flush_vmid_ipa(kvm, addr); 133 - } 125 + phys_addr_t start_addr = addr; 126 + pte_t *pte, *start_pte; 127 + 128 + start_pte = pte = pte_offset_kernel(pmd, addr); 129 + do { 130 + if (!pte_none(*pte)) { 131 + kvm_set_pte(pte, __pte(0)); 132 + put_page(virt_to_page(pte)); 133 + kvm_tlb_flush_vmid_ipa(kvm, addr); 134 + } 135 + } while (pte++, addr += PAGE_SIZE, addr != end); 136 + 137 + if (kvm_pte_table_empty(start_pte)) 138 + clear_pmd_entry(kvm, pmd, start_addr); 134 139 } 135 140 136 - static void unmap_range(struct kvm *kvm, pgd_t *pgdp, 137 - unsigned long long start, u64 size) 141 + static void unmap_pmds(struct kvm *kvm, pud_t *pud, 142 + phys_addr_t addr, phys_addr_t end) 138 143 { 139 - pgd_t *pgd; 140 - pud_t *pud; 141 - pmd_t *pmd; 142 - pte_t *pte; 143 - unsigned long long addr = start, end = start + size; 144 - u64 next; 144 + phys_addr_t next, start_addr = addr; 145 + pmd_t *pmd, *start_pmd; 145 146 146 - while (addr < end) { 147 - pgd = pgdp + pgd_index(addr); 148 - pud = pud_offset(pgd, addr); 149 - pte = NULL; 150 - if (pud_none(*pud)) { 151 - addr = kvm_pud_addr_end(addr, end); 152 - continue; 153 - } 154 - 155 - if (pud_huge(*pud)) { 156 - /* 157 - * If we are dealing with a huge pud, just clear it and 158 - * move on. 159 - */ 160 - clear_pud_entry(kvm, pud, addr); 161 - addr = kvm_pud_addr_end(addr, end); 162 - continue; 163 - } 164 - 165 - pmd = pmd_offset(pud, addr); 166 - if (pmd_none(*pmd)) { 167 - addr = kvm_pmd_addr_end(addr, end); 168 - continue; 169 - } 170 - 171 - if (!kvm_pmd_huge(*pmd)) { 172 - pte = pte_offset_kernel(pmd, addr); 173 - clear_pte_entry(kvm, pte, addr); 174 - next = addr + PAGE_SIZE; 175 - } 176 - 177 - /* 178 - * If the pmd entry is to be cleared, walk back up the ladder 179 - */ 180 - if (kvm_pmd_huge(*pmd) || (pte && page_empty(pte))) { 181 - clear_pmd_entry(kvm, pmd, addr); 182 - next = kvm_pmd_addr_end(addr, end); 183 - if (page_empty(pmd) && !page_empty(pud)) { 184 - clear_pud_entry(kvm, pud, addr); 185 - next = kvm_pud_addr_end(addr, end); 147 + start_pmd = pmd = pmd_offset(pud, addr); 148 + do { 149 + next = kvm_pmd_addr_end(addr, end); 150 + if (!pmd_none(*pmd)) { 151 + if (kvm_pmd_huge(*pmd)) { 152 + pmd_clear(pmd); 153 + kvm_tlb_flush_vmid_ipa(kvm, addr); 154 + put_page(virt_to_page(pmd)); 155 + } else { 156 + unmap_ptes(kvm, pmd, addr, next); 186 157 } 187 158 } 159 + } while (pmd++, addr = next, addr != end); 188 160 189 - addr = next; 190 - } 161 + if (kvm_pmd_table_empty(start_pmd)) 162 + clear_pud_entry(kvm, pud, start_addr); 163 + } 164 + 165 + static void unmap_puds(struct kvm *kvm, pgd_t *pgd, 166 + phys_addr_t addr, phys_addr_t end) 167 + { 168 + phys_addr_t next, start_addr = addr; 169 + pud_t *pud, *start_pud; 170 + 171 + start_pud = pud = pud_offset(pgd, addr); 172 + do { 173 + next = kvm_pud_addr_end(addr, end); 174 + if (!pud_none(*pud)) { 175 + if (pud_huge(*pud)) { 176 + pud_clear(pud); 177 + kvm_tlb_flush_vmid_ipa(kvm, addr); 178 + put_page(virt_to_page(pud)); 179 + } else { 180 + unmap_pmds(kvm, pud, addr, next); 181 + } 182 + } 183 + } while (pud++, addr = next, addr != end); 184 + 185 + if (kvm_pud_table_empty(start_pud)) 186 + clear_pgd_entry(kvm, pgd, start_addr); 187 + } 188 + 189 + 190 + static void unmap_range(struct kvm *kvm, pgd_t *pgdp, 191 + phys_addr_t start, u64 size) 192 + { 193 + pgd_t *pgd; 194 + phys_addr_t addr = start, end = start + size; 195 + phys_addr_t next; 196 + 197 + pgd = pgdp + pgd_index(addr); 198 + do { 199 + next = kvm_pgd_addr_end(addr, end); 200 + unmap_puds(kvm, pgd, addr, next); 201 + } while (pgd++, addr = next, addr != end); 191 202 } 192 203 193 204 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, ··· 759 748 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; 760 749 struct vm_area_struct *vma; 761 750 pfn_t pfn; 751 + pgprot_t mem_type = PAGE_S2; 762 752 763 753 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); 764 754 if (fault_status == FSC_PERM && !write_fault) { ··· 810 798 if (is_error_pfn(pfn)) 811 799 return -EFAULT; 812 800 801 + if (kvm_is_mmio_pfn(pfn)) 802 + mem_type = PAGE_S2_DEVICE; 803 + 813 804 spin_lock(&kvm->mmu_lock); 814 805 if (mmu_notifier_retry(kvm, mmu_seq)) 815 806 goto out_unlock; ··· 820 805 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); 821 806 822 807 if (hugetlb) { 823 - pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2); 808 + pmd_t new_pmd = pfn_pmd(pfn, mem_type); 824 809 new_pmd = pmd_mkhuge(new_pmd); 825 810 if (writable) { 826 811 kvm_set_s2pmd_writable(&new_pmd); ··· 829 814 coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE); 830 815 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); 831 816 } else { 832 - pte_t new_pte = pfn_pte(pfn, PAGE_S2); 817 + pte_t new_pte = pfn_pte(pfn, mem_type); 833 818 if (writable) { 834 819 kvm_set_s2pte_writable(&new_pte); 835 820 kvm_set_pfn_dirty(pfn); 836 821 } 837 822 coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); 838 - ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false); 823 + ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, 824 + mem_type == PAGE_S2_DEVICE); 839 825 } 840 826 841 827 ··· 1115 1099 out: 1116 1100 free_hyp_pgds(); 1117 1101 return err; 1102 + } 1103 + 1104 + void kvm_arch_commit_memory_region(struct kvm *kvm, 1105 + struct kvm_userspace_memory_region *mem, 1106 + const struct kvm_memory_slot *old, 1107 + enum kvm_mr_change change) 1108 + { 1109 + gpa_t gpa = old->base_gfn << PAGE_SHIFT; 1110 + phys_addr_t size = old->npages << PAGE_SHIFT; 1111 + if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) { 1112 + spin_lock(&kvm->mmu_lock); 1113 + unmap_stage2_range(kvm, gpa, size); 1114 + spin_unlock(&kvm->mmu_lock); 1115 + } 1116 + } 1117 + 1118 + int kvm_arch_prepare_memory_region(struct kvm *kvm, 1119 + struct kvm_memory_slot *memslot, 1120 + struct kvm_userspace_memory_region *mem, 1121 + enum kvm_mr_change change) 1122 + { 1123 + return 0; 1124 + } 1125 + 1126 + void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 1127 + struct kvm_memory_slot *dont) 1128 + { 1129 + } 1130 + 1131 + int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 1132 + unsigned long npages) 1133 + { 1134 + return 0; 1135 + } 1136 + 1137 + void kvm_arch_memslots_updated(struct kvm *kvm) 1138 + { 1139 + } 1140 + 1141 + void kvm_arch_flush_shadow_all(struct kvm *kvm) 1142 + { 1143 + } 1144 + 1145 + void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 1146 + struct kvm_memory_slot *slot) 1147 + { 1118 1148 }
+14 -5
arch/arm64/include/asm/debug-monitors.h
··· 18 18 19 19 #ifdef __KERNEL__ 20 20 21 + /* Low-level stepping controls. */ 22 + #define DBG_MDSCR_SS (1 << 0) 23 + #define DBG_SPSR_SS (1 << 21) 24 + 25 + /* MDSCR_EL1 enabling bits */ 26 + #define DBG_MDSCR_KDE (1 << 13) 27 + #define DBG_MDSCR_MDE (1 << 15) 28 + #define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE) 29 + 21 30 #define DBG_ESR_EVT(x) (((x) >> 27) & 0x7) 22 31 23 32 /* AArch64 */ ··· 82 73 83 74 #define CACHE_FLUSH_IS_SAFE 1 84 75 85 - enum debug_el { 86 - DBG_ACTIVE_EL0 = 0, 87 - DBG_ACTIVE_EL1, 88 - }; 89 - 90 76 /* AArch32 */ 91 77 #define DBG_ESR_EVT_BKPT 0x4 92 78 #define DBG_ESR_EVT_VECC 0x5 ··· 118 114 void unregister_break_hook(struct break_hook *hook); 119 115 120 116 u8 debug_monitors_arch(void); 117 + 118 + enum debug_el { 119 + DBG_ACTIVE_EL0 = 0, 120 + DBG_ACTIVE_EL1, 121 + }; 121 122 122 123 void enable_debug_monitors(enum debug_el el); 123 124 void disable_debug_monitors(enum debug_el el);
+3 -2
arch/arm64/include/asm/kvm_arm.h
··· 76 76 */ 77 77 #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \ 78 78 HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \ 79 - HCR_AMO | HCR_IMO | HCR_FMO | \ 80 - HCR_SWIO | HCR_TIDCP | HCR_RW) 79 + HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW) 81 80 #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) 81 + #define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO) 82 + 82 83 83 84 /* Hyp System Control Register (SCTLR_EL2) bits */ 84 85 #define SCTLR_EL2_EE (1 << 25)
+43 -10
arch/arm64/include/asm/kvm_asm.h
··· 18 18 #ifndef __ARM_KVM_ASM_H__ 19 19 #define __ARM_KVM_ASM_H__ 20 20 21 + #include <asm/virt.h> 22 + 21 23 /* 22 24 * 0 is reserved as an invalid value. 23 25 * Order *must* be kept in sync with the hyp switch code. ··· 45 43 #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ 46 44 #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ 47 45 #define PAR_EL1 21 /* Physical Address Register */ 46 + #define MDSCR_EL1 22 /* Monitor Debug System Control Register */ 47 + #define DBGBCR0_EL1 23 /* Debug Breakpoint Control Registers (0-15) */ 48 + #define DBGBCR15_EL1 38 49 + #define DBGBVR0_EL1 39 /* Debug Breakpoint Value Registers (0-15) */ 50 + #define DBGBVR15_EL1 54 51 + #define DBGWCR0_EL1 55 /* Debug Watchpoint Control Registers (0-15) */ 52 + #define DBGWCR15_EL1 70 53 + #define DBGWVR0_EL1 71 /* Debug Watchpoint Value Registers (0-15) */ 54 + #define DBGWVR15_EL1 86 55 + #define MDCCINT_EL1 87 /* Monitor Debug Comms Channel Interrupt Enable Reg */ 56 + 48 57 /* 32bit specific registers. Keep them at the end of the range */ 49 - #define DACR32_EL2 22 /* Domain Access Control Register */ 50 - #define IFSR32_EL2 23 /* Instruction Fault Status Register */ 51 - #define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */ 52 - #define DBGVCR32_EL2 25 /* Debug Vector Catch Register */ 53 - #define TEECR32_EL1 26 /* ThumbEE Configuration Register */ 54 - #define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */ 55 - #define NR_SYS_REGS 28 58 + #define DACR32_EL2 88 /* Domain Access Control Register */ 59 + #define IFSR32_EL2 89 /* Instruction Fault Status Register */ 60 + #define FPEXC32_EL2 90 /* Floating-Point Exception Control Register */ 61 + #define DBGVCR32_EL2 91 /* Debug Vector Catch Register */ 62 + #define TEECR32_EL1 92 /* ThumbEE Configuration Register */ 63 + #define TEEHBR32_EL1 93 /* ThumbEE Handler Base Register */ 64 + #define NR_SYS_REGS 94 56 65 57 66 /* 32bit mapping */ 58 67 #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ ··· 95 82 #define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ 96 83 #define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */ 97 84 #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ 98 - #define NR_CP15_REGS (NR_SYS_REGS * 2) 85 + 86 + #define cp14_DBGDSCRext (MDSCR_EL1 * 2) 87 + #define cp14_DBGBCR0 (DBGBCR0_EL1 * 2) 88 + #define cp14_DBGBVR0 (DBGBVR0_EL1 * 2) 89 + #define cp14_DBGBXVR0 (cp14_DBGBVR0 + 1) 90 + #define cp14_DBGWCR0 (DBGWCR0_EL1 * 2) 91 + #define cp14_DBGWVR0 (DBGWVR0_EL1 * 2) 92 + #define cp14_DBGDCCINT (MDCCINT_EL1 * 2) 93 + 94 + #define NR_COPRO_REGS (NR_SYS_REGS * 2) 99 95 100 96 #define ARM_EXCEPTION_IRQ 0 101 97 #define ARM_EXCEPTION_TRAP 1 98 + 99 + #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0 100 + #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT) 102 101 103 102 #ifndef __ASSEMBLY__ 104 103 struct kvm; ··· 121 96 122 97 extern char __kvm_hyp_vector[]; 123 98 124 - extern char __kvm_hyp_code_start[]; 125 - extern char __kvm_hyp_code_end[]; 99 + #define __kvm_hyp_code_start __hyp_text_start 100 + #define __kvm_hyp_code_end __hyp_text_end 126 101 127 102 extern void __kvm_flush_vm_context(void); 128 103 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 129 104 130 105 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); 106 + 107 + extern u64 __vgic_v3_get_ich_vtr_el2(void); 108 + 109 + extern char __save_vgic_v2_state[]; 110 + extern char __restore_vgic_v2_state[]; 111 + extern char __save_vgic_v3_state[]; 112 + extern char __restore_vgic_v3_state[]; 113 + 131 114 #endif 132 115 133 116 #endif /* __ARM_KVM_ASM_H__ */
+2 -1
arch/arm64/include/asm/kvm_coproc.h
··· 39 39 struct kvm_sys_reg_target_table *table); 40 40 41 41 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); 42 - int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 42 + int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 43 + int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 43 44 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 44 45 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 45 46 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
+22
arch/arm64/include/asm/kvm_emulate.h
··· 213 213 default: 214 214 return be64_to_cpu(data); 215 215 } 216 + } else { 217 + switch (len) { 218 + case 1: 219 + return data & 0xff; 220 + case 2: 221 + return le16_to_cpu(data & 0xffff); 222 + case 4: 223 + return le32_to_cpu(data & 0xffffffff); 224 + default: 225 + return le64_to_cpu(data); 226 + } 216 227 } 217 228 218 229 return data; /* Leave LE untouched */ ··· 243 232 return cpu_to_be32(data & 0xffffffff); 244 233 default: 245 234 return cpu_to_be64(data); 235 + } 236 + } else { 237 + switch (len) { 238 + case 1: 239 + return data & 0xff; 240 + case 2: 241 + return cpu_to_le16(data & 0xffff); 242 + case 4: 243 + return cpu_to_le32(data & 0xffffffff); 244 + default: 245 + return cpu_to_le64(data); 246 246 } 247 247 } 248 248
+46 -2
arch/arm64/include/asm/kvm_host.h
··· 86 86 struct kvm_regs gp_regs; 87 87 union { 88 88 u64 sys_regs[NR_SYS_REGS]; 89 - u32 cp15[NR_CP15_REGS]; 89 + u32 copro[NR_COPRO_REGS]; 90 90 }; 91 91 }; 92 92 ··· 100 100 101 101 /* Exception Information */ 102 102 struct kvm_vcpu_fault_info fault; 103 + 104 + /* Debug state */ 105 + u64 debug_flags; 103 106 104 107 /* Pointer to host CPU context */ 105 108 kvm_cpu_context_t *host_cpu_context; ··· 141 138 142 139 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) 143 140 #define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) 144 - #define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)]) 141 + /* 142 + * CP14 and CP15 live in the same array, as they are backed by the 143 + * same system registers. 144 + */ 145 + #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) 146 + #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)]) 147 + 148 + #ifdef CONFIG_CPU_BIG_ENDIAN 149 + #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r)) 150 + #define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r) + 1) 151 + #else 152 + #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r) + 1) 153 + #define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r)) 154 + #endif 145 155 146 156 struct kvm_vm_stat { 147 157 u32 remote_tlb_flush; ··· 214 198 */ 215 199 kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr, 216 200 hyp_stack_ptr, vector_ptr); 201 + } 202 + 203 + struct vgic_sr_vectors { 204 + void *save_vgic; 205 + void *restore_vgic; 206 + }; 207 + 208 + static inline void vgic_arch_setup(const struct vgic_params *vgic) 209 + { 210 + extern struct vgic_sr_vectors __vgic_sr_vectors; 211 + 212 + switch(vgic->type) 213 + { 214 + case VGIC_V2: 215 + __vgic_sr_vectors.save_vgic = __save_vgic_v2_state; 216 + __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state; 217 + break; 218 + 219 + #ifdef CONFIG_ARM_GIC_V3 220 + case VGIC_V3: 221 + __vgic_sr_vectors.save_vgic = __save_vgic_v3_state; 222 + __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state; 223 + break; 224 + #endif 225 + 226 + default: 227 + BUG(); 228 + } 217 229 } 218 230 219 231 #endif /* __ARM64_KVM_HOST_H__ */
+15
arch/arm64/include/asm/kvm_mmu.h
··· 125 125 #define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) 126 126 #define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) 127 127 128 + static inline bool kvm_page_empty(void *ptr) 129 + { 130 + struct page *ptr_page = virt_to_page(ptr); 131 + return page_count(ptr_page) == 1; 132 + } 133 + 134 + #define kvm_pte_table_empty(ptep) kvm_page_empty(ptep) 135 + #ifndef CONFIG_ARM64_64K_PAGES 136 + #define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp) 137 + #else 138 + #define kvm_pmd_table_empty(pmdp) (0) 139 + #endif 140 + #define kvm_pud_table_empty(pudp) (0) 141 + 142 + 128 143 struct kvm; 129 144 130 145 #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
+4
arch/arm64/include/asm/virt.h
··· 50 50 return __boot_cpu_mode[0] != __boot_cpu_mode[1]; 51 51 } 52 52 53 + /* The section containing the hypervisor text */ 54 + extern char __hyp_text_start[]; 55 + extern char __hyp_text_end[]; 56 + 53 57 #endif /* __ASSEMBLY__ */ 54 58 55 59 #endif /* ! __ASM__VIRT_H */
+19 -7
arch/arm64/kernel/asm-offsets.c
··· 120 120 DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2)); 121 121 DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2)); 122 122 DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2)); 123 + DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags)); 123 124 DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); 124 125 DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); 125 126 DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); ··· 130 129 DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); 131 130 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); 132 131 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); 133 - DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr)); 134 - DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr)); 135 - DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr)); 136 - DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr)); 137 - DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr)); 138 - DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr)); 139 - DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr)); 132 + DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic)); 133 + DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic)); 134 + DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors)); 135 + DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); 136 + DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); 137 + DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); 138 + DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr)); 139 + DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr)); 140 + DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); 141 + DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); 142 + DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr)); 143 + DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr)); 144 + DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr)); 145 + DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr)); 146 + DEFINE(VGIC_V3_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr)); 147 + DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r)); 148 + DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r)); 149 + DEFINE(VGIC_V3_CPU_LR, offsetof(struct vgic_cpu, vgic_v3.vgic_lr)); 140 150 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); 141 151 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); 142 152 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
-9
arch/arm64/kernel/debug-monitors.c
··· 30 30 #include <asm/cputype.h> 31 31 #include <asm/system_misc.h> 32 32 33 - /* Low-level stepping controls. */ 34 - #define DBG_MDSCR_SS (1 << 0) 35 - #define DBG_SPSR_SS (1 << 21) 36 - 37 - /* MDSCR_EL1 enabling bits */ 38 - #define DBG_MDSCR_KDE (1 << 13) 39 - #define DBG_MDSCR_MDE (1 << 15) 40 - #define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE) 41 - 42 33 /* Determine debug architecture. */ 43 34 u8 debug_monitors_arch(void) 44 35 {
+4
arch/arm64/kvm/Makefile
··· 20 20 kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o 21 21 22 22 kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic.o 23 + kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v2.o 24 + kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v2-switch.o 25 + kvm-$(CONFIG_KVM_ARM_VGIC) += $(KVM)/arm/vgic-v3.o 26 + kvm-$(CONFIG_KVM_ARM_VGIC) += vgic-v3-switch.o 23 27 kvm-$(CONFIG_KVM_ARM_TIMER) += $(KVM)/arm/arch_timer.o
+67 -1
arch/arm64/kvm/guest.c
··· 136 136 } 137 137 138 138 /** 139 + * ARM64 versions of the TIMER registers, always available on arm64 140 + */ 141 + 142 + #define NUM_TIMER_REGS 3 143 + 144 + static bool is_timer_reg(u64 index) 145 + { 146 + switch (index) { 147 + case KVM_REG_ARM_TIMER_CTL: 148 + case KVM_REG_ARM_TIMER_CNT: 149 + case KVM_REG_ARM_TIMER_CVAL: 150 + return true; 151 + } 152 + return false; 153 + } 154 + 155 + static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 156 + { 157 + if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) 158 + return -EFAULT; 159 + uindices++; 160 + if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) 161 + return -EFAULT; 162 + uindices++; 163 + if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) 164 + return -EFAULT; 165 + 166 + return 0; 167 + } 168 + 169 + static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 170 + { 171 + void __user *uaddr = (void __user *)(long)reg->addr; 172 + u64 val; 173 + int ret; 174 + 175 + ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); 176 + if (ret != 0) 177 + return ret; 178 + 179 + return kvm_arm_timer_set_reg(vcpu, reg->id, val); 180 + } 181 + 182 + static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 183 + { 184 + void __user *uaddr = (void __user *)(long)reg->addr; 185 + u64 val; 186 + 187 + val = kvm_arm_timer_get_reg(vcpu, reg->id); 188 + return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); 189 + } 190 + 191 + /** 139 192 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG 140 193 * 141 194 * This is for all registers. 142 195 */ 143 196 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 144 197 { 145 - return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu); 198 + return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) 199 + + NUM_TIMER_REGS; 146 200 } 147 201 148 202 /** ··· 208 154 { 209 155 unsigned int i; 210 156 const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; 157 + int ret; 211 158 212 159 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { 213 160 if (put_user(core_reg | i, uindices)) 214 161 return -EFAULT; 215 162 uindices++; 216 163 } 164 + 165 + ret = copy_timer_indices(vcpu, uindices); 166 + if (ret) 167 + return ret; 168 + uindices += NUM_TIMER_REGS; 217 169 218 170 return kvm_arm_copy_sys_reg_indices(vcpu, uindices); 219 171 } ··· 234 174 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 235 175 return get_core_reg(vcpu, reg); 236 176 177 + if (is_timer_reg(reg->id)) 178 + return get_timer_reg(vcpu, reg); 179 + 237 180 return kvm_arm_sys_reg_get_reg(vcpu, reg); 238 181 } 239 182 ··· 249 186 /* Register group 16 means we set a core register. */ 250 187 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 251 188 return set_core_reg(vcpu, reg); 189 + 190 + if (is_timer_reg(reg->id)) 191 + return set_timer_reg(vcpu, reg); 252 192 253 193 return kvm_arm_sys_reg_set_reg(vcpu, reg); 254 194 }
+2 -2
arch/arm64/kvm/handle_exit.c
··· 73 73 [ESR_EL2_EC_WFI] = kvm_handle_wfx, 74 74 [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, 75 75 [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, 76 - [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access, 76 + [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32, 77 77 [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store, 78 - [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_access, 78 + [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64, 79 79 [ESR_EL2_EC_HVC32] = handle_hvc, 80 80 [ESR_EL2_EC_SMC32] = handle_smc, 81 81 [ESR_EL2_EC_HVC64] = handle_hvc,
+494 -106
arch/arm64/kvm/hyp.S
··· 16 16 */ 17 17 18 18 #include <linux/linkage.h> 19 - #include <linux/irqchip/arm-gic.h> 20 19 21 20 #include <asm/assembler.h> 22 21 #include <asm/memory.h> 23 22 #include <asm/asm-offsets.h> 23 + #include <asm/debug-monitors.h> 24 24 #include <asm/fpsimdmacros.h> 25 25 #include <asm/kvm.h> 26 26 #include <asm/kvm_asm.h> ··· 35 35 .text 36 36 .pushsection .hyp.text, "ax" 37 37 .align PAGE_SHIFT 38 - 39 - __kvm_hyp_code_start: 40 - .globl __kvm_hyp_code_start 41 38 42 39 .macro save_common_regs 43 40 // x2: base address for cpu context ··· 212 215 mrs x22, amair_el1 213 216 mrs x23, cntkctl_el1 214 217 mrs x24, par_el1 218 + mrs x25, mdscr_el1 215 219 216 220 stp x4, x5, [x3] 217 221 stp x6, x7, [x3, #16] ··· 224 226 stp x18, x19, [x3, #112] 225 227 stp x20, x21, [x3, #128] 226 228 stp x22, x23, [x3, #144] 227 - str x24, [x3, #160] 229 + stp x24, x25, [x3, #160] 230 + .endm 231 + 232 + .macro save_debug 233 + // x2: base address for cpu context 234 + // x3: tmp register 235 + 236 + mrs x26, id_aa64dfr0_el1 237 + ubfx x24, x26, #12, #4 // Extract BRPs 238 + ubfx x25, x26, #20, #4 // Extract WRPs 239 + mov w26, #15 240 + sub w24, w26, w24 // How many BPs to skip 241 + sub w25, w26, w25 // How many WPs to skip 242 + 243 + add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1) 244 + 245 + adr x26, 1f 246 + add x26, x26, x24, lsl #2 247 + br x26 248 + 1: 249 + mrs x20, dbgbcr15_el1 250 + mrs x19, dbgbcr14_el1 251 + mrs x18, dbgbcr13_el1 252 + mrs x17, dbgbcr12_el1 253 + mrs x16, dbgbcr11_el1 254 + mrs x15, dbgbcr10_el1 255 + mrs x14, dbgbcr9_el1 256 + mrs x13, dbgbcr8_el1 257 + mrs x12, dbgbcr7_el1 258 + mrs x11, dbgbcr6_el1 259 + mrs x10, dbgbcr5_el1 260 + mrs x9, dbgbcr4_el1 261 + mrs x8, dbgbcr3_el1 262 + mrs x7, dbgbcr2_el1 263 + mrs x6, dbgbcr1_el1 264 + mrs x5, dbgbcr0_el1 265 + 266 + adr x26, 1f 267 + add x26, x26, x24, lsl #2 268 + br x26 269 + 270 + 1: 271 + str x20, [x3, #(15 * 8)] 272 + str x19, [x3, #(14 * 8)] 273 + str x18, [x3, #(13 * 8)] 274 + str x17, [x3, #(12 * 8)] 275 + str x16, [x3, #(11 * 8)] 276 + str x15, [x3, #(10 * 8)] 277 + str x14, [x3, #(9 * 8)] 278 + str x13, [x3, #(8 * 8)] 279 + str x12, [x3, #(7 * 8)] 280 + str x11, [x3, #(6 * 8)] 281 + str x10, [x3, #(5 * 8)] 282 + str x9, [x3, #(4 * 8)] 283 + str x8, [x3, #(3 * 8)] 284 + str x7, [x3, #(2 * 8)] 285 + str x6, [x3, #(1 * 8)] 286 + str x5, [x3, #(0 * 8)] 287 + 288 + add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1) 289 + 290 + adr x26, 1f 291 + add x26, x26, x24, lsl #2 292 + br x26 293 + 1: 294 + mrs x20, dbgbvr15_el1 295 + mrs x19, dbgbvr14_el1 296 + mrs x18, dbgbvr13_el1 297 + mrs x17, dbgbvr12_el1 298 + mrs x16, dbgbvr11_el1 299 + mrs x15, dbgbvr10_el1 300 + mrs x14, dbgbvr9_el1 301 + mrs x13, dbgbvr8_el1 302 + mrs x12, dbgbvr7_el1 303 + mrs x11, dbgbvr6_el1 304 + mrs x10, dbgbvr5_el1 305 + mrs x9, dbgbvr4_el1 306 + mrs x8, dbgbvr3_el1 307 + mrs x7, dbgbvr2_el1 308 + mrs x6, dbgbvr1_el1 309 + mrs x5, dbgbvr0_el1 310 + 311 + adr x26, 1f 312 + add x26, x26, x24, lsl #2 313 + br x26 314 + 315 + 1: 316 + str x20, [x3, #(15 * 8)] 317 + str x19, [x3, #(14 * 8)] 318 + str x18, [x3, #(13 * 8)] 319 + str x17, [x3, #(12 * 8)] 320 + str x16, [x3, #(11 * 8)] 321 + str x15, [x3, #(10 * 8)] 322 + str x14, [x3, #(9 * 8)] 323 + str x13, [x3, #(8 * 8)] 324 + str x12, [x3, #(7 * 8)] 325 + str x11, [x3, #(6 * 8)] 326 + str x10, [x3, #(5 * 8)] 327 + str x9, [x3, #(4 * 8)] 328 + str x8, [x3, #(3 * 8)] 329 + str x7, [x3, #(2 * 8)] 330 + str x6, [x3, #(1 * 8)] 331 + str x5, [x3, #(0 * 8)] 332 + 333 + add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1) 334 + 335 + adr x26, 1f 336 + add x26, x26, x25, lsl #2 337 + br x26 338 + 1: 339 + mrs x20, dbgwcr15_el1 340 + mrs x19, dbgwcr14_el1 341 + mrs x18, dbgwcr13_el1 342 + mrs x17, dbgwcr12_el1 343 + mrs x16, dbgwcr11_el1 344 + mrs x15, dbgwcr10_el1 345 + mrs x14, dbgwcr9_el1 346 + mrs x13, dbgwcr8_el1 347 + mrs x12, dbgwcr7_el1 348 + mrs x11, dbgwcr6_el1 349 + mrs x10, dbgwcr5_el1 350 + mrs x9, dbgwcr4_el1 351 + mrs x8, dbgwcr3_el1 352 + mrs x7, dbgwcr2_el1 353 + mrs x6, dbgwcr1_el1 354 + mrs x5, dbgwcr0_el1 355 + 356 + adr x26, 1f 357 + add x26, x26, x25, lsl #2 358 + br x26 359 + 360 + 1: 361 + str x20, [x3, #(15 * 8)] 362 + str x19, [x3, #(14 * 8)] 363 + str x18, [x3, #(13 * 8)] 364 + str x17, [x3, #(12 * 8)] 365 + str x16, [x3, #(11 * 8)] 366 + str x15, [x3, #(10 * 8)] 367 + str x14, [x3, #(9 * 8)] 368 + str x13, [x3, #(8 * 8)] 369 + str x12, [x3, #(7 * 8)] 370 + str x11, [x3, #(6 * 8)] 371 + str x10, [x3, #(5 * 8)] 372 + str x9, [x3, #(4 * 8)] 373 + str x8, [x3, #(3 * 8)] 374 + str x7, [x3, #(2 * 8)] 375 + str x6, [x3, #(1 * 8)] 376 + str x5, [x3, #(0 * 8)] 377 + 378 + add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1) 379 + 380 + adr x26, 1f 381 + add x26, x26, x25, lsl #2 382 + br x26 383 + 1: 384 + mrs x20, dbgwvr15_el1 385 + mrs x19, dbgwvr14_el1 386 + mrs x18, dbgwvr13_el1 387 + mrs x17, dbgwvr12_el1 388 + mrs x16, dbgwvr11_el1 389 + mrs x15, dbgwvr10_el1 390 + mrs x14, dbgwvr9_el1 391 + mrs x13, dbgwvr8_el1 392 + mrs x12, dbgwvr7_el1 393 + mrs x11, dbgwvr6_el1 394 + mrs x10, dbgwvr5_el1 395 + mrs x9, dbgwvr4_el1 396 + mrs x8, dbgwvr3_el1 397 + mrs x7, dbgwvr2_el1 398 + mrs x6, dbgwvr1_el1 399 + mrs x5, dbgwvr0_el1 400 + 401 + adr x26, 1f 402 + add x26, x26, x25, lsl #2 403 + br x26 404 + 405 + 1: 406 + str x20, [x3, #(15 * 8)] 407 + str x19, [x3, #(14 * 8)] 408 + str x18, [x3, #(13 * 8)] 409 + str x17, [x3, #(12 * 8)] 410 + str x16, [x3, #(11 * 8)] 411 + str x15, [x3, #(10 * 8)] 412 + str x14, [x3, #(9 * 8)] 413 + str x13, [x3, #(8 * 8)] 414 + str x12, [x3, #(7 * 8)] 415 + str x11, [x3, #(6 * 8)] 416 + str x10, [x3, #(5 * 8)] 417 + str x9, [x3, #(4 * 8)] 418 + str x8, [x3, #(3 * 8)] 419 + str x7, [x3, #(2 * 8)] 420 + str x6, [x3, #(1 * 8)] 421 + str x5, [x3, #(0 * 8)] 422 + 423 + mrs x21, mdccint_el1 424 + str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)] 228 425 .endm 229 426 230 427 .macro restore_sysregs ··· 438 245 ldp x18, x19, [x3, #112] 439 246 ldp x20, x21, [x3, #128] 440 247 ldp x22, x23, [x3, #144] 441 - ldr x24, [x3, #160] 248 + ldp x24, x25, [x3, #160] 442 249 443 250 msr vmpidr_el2, x4 444 251 msr csselr_el1, x5 ··· 461 268 msr amair_el1, x22 462 269 msr cntkctl_el1, x23 463 270 msr par_el1, x24 271 + msr mdscr_el1, x25 272 + .endm 273 + 274 + .macro restore_debug 275 + // x2: base address for cpu context 276 + // x3: tmp register 277 + 278 + mrs x26, id_aa64dfr0_el1 279 + ubfx x24, x26, #12, #4 // Extract BRPs 280 + ubfx x25, x26, #20, #4 // Extract WRPs 281 + mov w26, #15 282 + sub w24, w26, w24 // How many BPs to skip 283 + sub w25, w26, w25 // How many WPs to skip 284 + 285 + add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1) 286 + 287 + adr x26, 1f 288 + add x26, x26, x24, lsl #2 289 + br x26 290 + 1: 291 + ldr x20, [x3, #(15 * 8)] 292 + ldr x19, [x3, #(14 * 8)] 293 + ldr x18, [x3, #(13 * 8)] 294 + ldr x17, [x3, #(12 * 8)] 295 + ldr x16, [x3, #(11 * 8)] 296 + ldr x15, [x3, #(10 * 8)] 297 + ldr x14, [x3, #(9 * 8)] 298 + ldr x13, [x3, #(8 * 8)] 299 + ldr x12, [x3, #(7 * 8)] 300 + ldr x11, [x3, #(6 * 8)] 301 + ldr x10, [x3, #(5 * 8)] 302 + ldr x9, [x3, #(4 * 8)] 303 + ldr x8, [x3, #(3 * 8)] 304 + ldr x7, [x3, #(2 * 8)] 305 + ldr x6, [x3, #(1 * 8)] 306 + ldr x5, [x3, #(0 * 8)] 307 + 308 + adr x26, 1f 309 + add x26, x26, x24, lsl #2 310 + br x26 311 + 1: 312 + msr dbgbcr15_el1, x20 313 + msr dbgbcr14_el1, x19 314 + msr dbgbcr13_el1, x18 315 + msr dbgbcr12_el1, x17 316 + msr dbgbcr11_el1, x16 317 + msr dbgbcr10_el1, x15 318 + msr dbgbcr9_el1, x14 319 + msr dbgbcr8_el1, x13 320 + msr dbgbcr7_el1, x12 321 + msr dbgbcr6_el1, x11 322 + msr dbgbcr5_el1, x10 323 + msr dbgbcr4_el1, x9 324 + msr dbgbcr3_el1, x8 325 + msr dbgbcr2_el1, x7 326 + msr dbgbcr1_el1, x6 327 + msr dbgbcr0_el1, x5 328 + 329 + add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1) 330 + 331 + adr x26, 1f 332 + add x26, x26, x24, lsl #2 333 + br x26 334 + 1: 335 + ldr x20, [x3, #(15 * 8)] 336 + ldr x19, [x3, #(14 * 8)] 337 + ldr x18, [x3, #(13 * 8)] 338 + ldr x17, [x3, #(12 * 8)] 339 + ldr x16, [x3, #(11 * 8)] 340 + ldr x15, [x3, #(10 * 8)] 341 + ldr x14, [x3, #(9 * 8)] 342 + ldr x13, [x3, #(8 * 8)] 343 + ldr x12, [x3, #(7 * 8)] 344 + ldr x11, [x3, #(6 * 8)] 345 + ldr x10, [x3, #(5 * 8)] 346 + ldr x9, [x3, #(4 * 8)] 347 + ldr x8, [x3, #(3 * 8)] 348 + ldr x7, [x3, #(2 * 8)] 349 + ldr x6, [x3, #(1 * 8)] 350 + ldr x5, [x3, #(0 * 8)] 351 + 352 + adr x26, 1f 353 + add x26, x26, x24, lsl #2 354 + br x26 355 + 1: 356 + msr dbgbvr15_el1, x20 357 + msr dbgbvr14_el1, x19 358 + msr dbgbvr13_el1, x18 359 + msr dbgbvr12_el1, x17 360 + msr dbgbvr11_el1, x16 361 + msr dbgbvr10_el1, x15 362 + msr dbgbvr9_el1, x14 363 + msr dbgbvr8_el1, x13 364 + msr dbgbvr7_el1, x12 365 + msr dbgbvr6_el1, x11 366 + msr dbgbvr5_el1, x10 367 + msr dbgbvr4_el1, x9 368 + msr dbgbvr3_el1, x8 369 + msr dbgbvr2_el1, x7 370 + msr dbgbvr1_el1, x6 371 + msr dbgbvr0_el1, x5 372 + 373 + add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1) 374 + 375 + adr x26, 1f 376 + add x26, x26, x25, lsl #2 377 + br x26 378 + 1: 379 + ldr x20, [x3, #(15 * 8)] 380 + ldr x19, [x3, #(14 * 8)] 381 + ldr x18, [x3, #(13 * 8)] 382 + ldr x17, [x3, #(12 * 8)] 383 + ldr x16, [x3, #(11 * 8)] 384 + ldr x15, [x3, #(10 * 8)] 385 + ldr x14, [x3, #(9 * 8)] 386 + ldr x13, [x3, #(8 * 8)] 387 + ldr x12, [x3, #(7 * 8)] 388 + ldr x11, [x3, #(6 * 8)] 389 + ldr x10, [x3, #(5 * 8)] 390 + ldr x9, [x3, #(4 * 8)] 391 + ldr x8, [x3, #(3 * 8)] 392 + ldr x7, [x3, #(2 * 8)] 393 + ldr x6, [x3, #(1 * 8)] 394 + ldr x5, [x3, #(0 * 8)] 395 + 396 + adr x26, 1f 397 + add x26, x26, x25, lsl #2 398 + br x26 399 + 1: 400 + msr dbgwcr15_el1, x20 401 + msr dbgwcr14_el1, x19 402 + msr dbgwcr13_el1, x18 403 + msr dbgwcr12_el1, x17 404 + msr dbgwcr11_el1, x16 405 + msr dbgwcr10_el1, x15 406 + msr dbgwcr9_el1, x14 407 + msr dbgwcr8_el1, x13 408 + msr dbgwcr7_el1, x12 409 + msr dbgwcr6_el1, x11 410 + msr dbgwcr5_el1, x10 411 + msr dbgwcr4_el1, x9 412 + msr dbgwcr3_el1, x8 413 + msr dbgwcr2_el1, x7 414 + msr dbgwcr1_el1, x6 415 + msr dbgwcr0_el1, x5 416 + 417 + add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1) 418 + 419 + adr x26, 1f 420 + add x26, x26, x25, lsl #2 421 + br x26 422 + 1: 423 + ldr x20, [x3, #(15 * 8)] 424 + ldr x19, [x3, #(14 * 8)] 425 + ldr x18, [x3, #(13 * 8)] 426 + ldr x17, [x3, #(12 * 8)] 427 + ldr x16, [x3, #(11 * 8)] 428 + ldr x15, [x3, #(10 * 8)] 429 + ldr x14, [x3, #(9 * 8)] 430 + ldr x13, [x3, #(8 * 8)] 431 + ldr x12, [x3, #(7 * 8)] 432 + ldr x11, [x3, #(6 * 8)] 433 + ldr x10, [x3, #(5 * 8)] 434 + ldr x9, [x3, #(4 * 8)] 435 + ldr x8, [x3, #(3 * 8)] 436 + ldr x7, [x3, #(2 * 8)] 437 + ldr x6, [x3, #(1 * 8)] 438 + ldr x5, [x3, #(0 * 8)] 439 + 440 + adr x26, 1f 441 + add x26, x26, x25, lsl #2 442 + br x26 443 + 1: 444 + msr dbgwvr15_el1, x20 445 + msr dbgwvr14_el1, x19 446 + msr dbgwvr13_el1, x18 447 + msr dbgwvr12_el1, x17 448 + msr dbgwvr11_el1, x16 449 + msr dbgwvr10_el1, x15 450 + msr dbgwvr9_el1, x14 451 + msr dbgwvr8_el1, x13 452 + msr dbgwvr7_el1, x12 453 + msr dbgwvr6_el1, x11 454 + msr dbgwvr5_el1, x10 455 + msr dbgwvr4_el1, x9 456 + msr dbgwvr3_el1, x8 457 + msr dbgwvr2_el1, x7 458 + msr dbgwvr1_el1, x6 459 + msr dbgwvr0_el1, x5 460 + 461 + ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)] 462 + msr mdccint_el1, x21 464 463 .endm 465 464 466 465 .macro skip_32bit_state tmp, target ··· 665 280 // Skip ThumbEE state if not needed 666 281 mrs \tmp, id_pfr0_el1 667 282 tbz \tmp, #12, \target 283 + .endm 284 + 285 + .macro skip_debug_state tmp, target 286 + ldr \tmp, [x0, #VCPU_DEBUG_FLAGS] 287 + tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target 288 + .endm 289 + 290 + .macro compute_debug_state target 291 + // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY 292 + // is set, we do a full save/restore cycle and disable trapping. 293 + add x25, x0, #VCPU_CONTEXT 294 + 295 + // Check the state of MDSCR_EL1 296 + ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)] 297 + and x26, x25, #DBG_MDSCR_KDE 298 + and x25, x25, #DBG_MDSCR_MDE 299 + adds xzr, x25, x26 300 + b.eq 9998f // Nothing to see there 301 + 302 + // If any interesting bits was set, we must set the flag 303 + mov x26, #KVM_ARM64_DEBUG_DIRTY 304 + str x26, [x0, #VCPU_DEBUG_FLAGS] 305 + b 9999f // Don't skip restore 306 + 307 + 9998: 308 + // Otherwise load the flags from memory in case we recently 309 + // trapped 310 + skip_debug_state x25, \target 311 + 9999: 668 312 .endm 669 313 670 314 .macro save_guest_32bit_state ··· 711 297 mrs x4, dacr32_el2 712 298 mrs x5, ifsr32_el2 713 299 mrs x6, fpexc32_el2 714 - mrs x7, dbgvcr32_el2 715 300 stp x4, x5, [x3] 716 - stp x6, x7, [x3, #16] 301 + str x6, [x3, #16] 717 302 303 + skip_debug_state x8, 2f 304 + mrs x7, dbgvcr32_el2 305 + str x7, [x3, #24] 306 + 2: 718 307 skip_tee_state x8, 1f 719 308 720 309 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) ··· 740 323 741 324 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) 742 325 ldp x4, x5, [x3] 743 - ldp x6, x7, [x3, #16] 326 + ldr x6, [x3, #16] 744 327 msr dacr32_el2, x4 745 328 msr ifsr32_el2, x5 746 329 msr fpexc32_el2, x6 747 - msr dbgvcr32_el2, x7 748 330 331 + skip_debug_state x8, 2f 332 + ldr x7, [x3, #24] 333 + msr dbgvcr32_el2, x7 334 + 2: 749 335 skip_tee_state x8, 1f 750 336 751 337 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) ··· 759 339 .endm 760 340 761 341 .macro activate_traps 762 - ldr x2, [x0, #VCPU_IRQ_LINES] 763 - ldr x1, [x0, #VCPU_HCR_EL2] 764 - orr x2, x2, x1 765 - msr hcr_el2, x2 766 - 342 + ldr x2, [x0, #VCPU_HCR_EL2] 343 + msr hcr_el2, x2 767 344 ldr x2, =(CPTR_EL2_TTA) 768 345 msr cptr_el2, x2 769 346 ··· 770 353 mrs x2, mdcr_el2 771 354 and x2, x2, #MDCR_EL2_HPMN_MASK 772 355 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR) 356 + orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA) 357 + 358 + // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap 359 + // if not dirty. 360 + ldr x3, [x0, #VCPU_DEBUG_FLAGS] 361 + tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f 362 + orr x2, x2, #MDCR_EL2_TDA 363 + 1: 773 364 msr mdcr_el2, x2 774 365 .endm 775 366 ··· 804 379 .endm 805 380 806 381 /* 807 - * Save the VGIC CPU state into memory 808 - * x0: Register pointing to VCPU struct 809 - * Do not corrupt x1!!! 382 + * Call into the vgic backend for state saving 810 383 */ 811 384 .macro save_vgic_state 812 - /* Get VGIC VCTRL base into x2 */ 813 - ldr x2, [x0, #VCPU_KVM] 814 - kern_hyp_va x2 815 - ldr x2, [x2, #KVM_VGIC_VCTRL] 816 - kern_hyp_va x2 817 - cbz x2, 2f // disabled 818 - 819 - /* Compute the address of struct vgic_cpu */ 820 - add x3, x0, #VCPU_VGIC_CPU 821 - 822 - /* Save all interesting registers */ 823 - ldr w4, [x2, #GICH_HCR] 824 - ldr w5, [x2, #GICH_VMCR] 825 - ldr w6, [x2, #GICH_MISR] 826 - ldr w7, [x2, #GICH_EISR0] 827 - ldr w8, [x2, #GICH_EISR1] 828 - ldr w9, [x2, #GICH_ELRSR0] 829 - ldr w10, [x2, #GICH_ELRSR1] 830 - ldr w11, [x2, #GICH_APR] 831 - CPU_BE( rev w4, w4 ) 832 - CPU_BE( rev w5, w5 ) 833 - CPU_BE( rev w6, w6 ) 834 - CPU_BE( rev w7, w7 ) 835 - CPU_BE( rev w8, w8 ) 836 - CPU_BE( rev w9, w9 ) 837 - CPU_BE( rev w10, w10 ) 838 - CPU_BE( rev w11, w11 ) 839 - 840 - str w4, [x3, #VGIC_CPU_HCR] 841 - str w5, [x3, #VGIC_CPU_VMCR] 842 - str w6, [x3, #VGIC_CPU_MISR] 843 - str w7, [x3, #VGIC_CPU_EISR] 844 - str w8, [x3, #(VGIC_CPU_EISR + 4)] 845 - str w9, [x3, #VGIC_CPU_ELRSR] 846 - str w10, [x3, #(VGIC_CPU_ELRSR + 4)] 847 - str w11, [x3, #VGIC_CPU_APR] 848 - 849 - /* Clear GICH_HCR */ 850 - str wzr, [x2, #GICH_HCR] 851 - 852 - /* Save list registers */ 853 - add x2, x2, #GICH_LR0 854 - ldr w4, [x3, #VGIC_CPU_NR_LR] 855 - add x3, x3, #VGIC_CPU_LR 856 - 1: ldr w5, [x2], #4 857 - CPU_BE( rev w5, w5 ) 858 - str w5, [x3], #4 859 - sub w4, w4, #1 860 - cbnz w4, 1b 861 - 2: 385 + adr x24, __vgic_sr_vectors 386 + ldr x24, [x24, VGIC_SAVE_FN] 387 + kern_hyp_va x24 388 + blr x24 389 + mrs x24, hcr_el2 390 + mov x25, #HCR_INT_OVERRIDE 391 + neg x25, x25 392 + and x24, x24, x25 393 + msr hcr_el2, x24 862 394 .endm 863 395 864 396 /* 865 - * Restore the VGIC CPU state from memory 866 - * x0: Register pointing to VCPU struct 397 + * Call into the vgic backend for state restoring 867 398 */ 868 399 .macro restore_vgic_state 869 - /* Get VGIC VCTRL base into x2 */ 870 - ldr x2, [x0, #VCPU_KVM] 871 - kern_hyp_va x2 872 - ldr x2, [x2, #KVM_VGIC_VCTRL] 873 - kern_hyp_va x2 874 - cbz x2, 2f // disabled 875 - 876 - /* Compute the address of struct vgic_cpu */ 877 - add x3, x0, #VCPU_VGIC_CPU 878 - 879 - /* We only restore a minimal set of registers */ 880 - ldr w4, [x3, #VGIC_CPU_HCR] 881 - ldr w5, [x3, #VGIC_CPU_VMCR] 882 - ldr w6, [x3, #VGIC_CPU_APR] 883 - CPU_BE( rev w4, w4 ) 884 - CPU_BE( rev w5, w5 ) 885 - CPU_BE( rev w6, w6 ) 886 - 887 - str w4, [x2, #GICH_HCR] 888 - str w5, [x2, #GICH_VMCR] 889 - str w6, [x2, #GICH_APR] 890 - 891 - /* Restore list registers */ 892 - add x2, x2, #GICH_LR0 893 - ldr w4, [x3, #VGIC_CPU_NR_LR] 894 - add x3, x3, #VGIC_CPU_LR 895 - 1: ldr w5, [x3], #4 896 - CPU_BE( rev w5, w5 ) 897 - str w5, [x2], #4 898 - sub w4, w4, #1 899 - cbnz w4, 1b 900 - 2: 400 + mrs x24, hcr_el2 401 + ldr x25, [x0, #VCPU_IRQ_LINES] 402 + orr x24, x24, #HCR_INT_OVERRIDE 403 + orr x24, x24, x25 404 + msr hcr_el2, x24 405 + adr x24, __vgic_sr_vectors 406 + ldr x24, [x24, #VGIC_RESTORE_FN] 407 + kern_hyp_va x24 408 + blr x24 901 409 .endm 902 410 903 411 .macro save_timer_state ··· 895 537 restore_sysregs 896 538 ret 897 539 540 + __save_debug: 541 + save_debug 542 + ret 543 + 544 + __restore_debug: 545 + restore_debug 546 + ret 547 + 898 548 __save_fpsimd: 899 549 save_fpsimd 900 550 ret ··· 934 568 bl __save_fpsimd 935 569 bl __save_sysregs 936 570 571 + compute_debug_state 1f 572 + bl __save_debug 573 + 1: 937 574 activate_traps 938 575 activate_vm 939 576 ··· 948 579 949 580 bl __restore_sysregs 950 581 bl __restore_fpsimd 582 + 583 + skip_debug_state x3, 1f 584 + bl __restore_debug 585 + 1: 951 586 restore_guest_32bit_state 952 587 restore_guest_regs 953 588 ··· 968 595 save_guest_regs 969 596 bl __save_fpsimd 970 597 bl __save_sysregs 598 + 599 + skip_debug_state x3, 1f 600 + bl __save_debug 601 + 1: 971 602 save_guest_32bit_state 972 603 973 604 save_timer_state ··· 986 609 987 610 bl __restore_sysregs 988 611 bl __restore_fpsimd 612 + 613 + skip_debug_state x3, 1f 614 + // Clear the dirty flag for the next run, as all the state has 615 + // already been saved. Note that we nuke the whole 64bit word. 616 + // If we ever add more flags, we'll have to be more careful... 617 + str xzr, [x0, #VCPU_DEBUG_FLAGS] 618 + bl __restore_debug 619 + 1: 989 620 restore_host_regs 990 621 991 622 mov x0, x1 ··· 1037 652 dsb ish 1038 653 ret 1039 654 ENDPROC(__kvm_flush_vm_context) 655 + 656 + // struct vgic_sr_vectors __vgi_sr_vectors; 657 + .align 3 658 + ENTRY(__vgic_sr_vectors) 659 + .skip VGIC_SR_VECTOR_SZ 660 + ENDPROC(__vgic_sr_vectors) 1040 661 1041 662 __kvm_hyp_panic: 1042 663 // Guess the context by looking at VTTBR: ··· 1221 830 mrs x2, far_el2 1222 831 1223 832 2: mrs x0, tpidr_el2 1224 - str x1, [x0, #VCPU_ESR_EL2] 833 + str w1, [x0, #VCPU_ESR_EL2] 1225 834 str x2, [x0, #VCPU_FAR_EL2] 1226 835 str x3, [x0, #VCPU_HPFAR_EL2] 1227 836 ··· 1270 879 ventry el1_fiq_invalid // FIQ 32-bit EL1 1271 880 ventry el1_error_invalid // Error 32-bit EL1 1272 881 ENDPROC(__kvm_hyp_vector) 1273 - 1274 - __kvm_hyp_code_end: 1275 - .globl __kvm_hyp_code_end 1276 882 1277 883 .popsection
+473 -73
arch/arm64/kvm/sys_regs.c
··· 30 30 #include <asm/kvm_mmu.h> 31 31 #include <asm/cacheflush.h> 32 32 #include <asm/cputype.h> 33 + #include <asm/debug-monitors.h> 33 34 #include <trace/events/kvm.h> 34 35 35 36 #include "sys_regs.h" ··· 138 137 if (!p->is_aarch32) { 139 138 vcpu_sys_reg(vcpu, r->reg) = val; 140 139 } else { 141 - vcpu_cp15(vcpu, r->reg) = val & 0xffffffffUL; 142 140 if (!p->is_32bit) 143 - vcpu_cp15(vcpu, r->reg + 1) = val >> 32; 141 + vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; 142 + vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; 144 143 } 144 + 145 145 return true; 146 146 } 147 147 ··· 165 163 return true; 166 164 } 167 165 168 - /* 169 - * We could trap ID_DFR0 and tell the guest we don't support performance 170 - * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was 171 - * NAKed, so it will read the PMCR anyway. 172 - * 173 - * Therefore we tell the guest we have 0 counters. Unfortunately, we 174 - * must always support PMCCNTR (the cycle counter): we just RAZ/WI for 175 - * all PM registers, which doesn't crash the guest kernel at least. 176 - */ 177 - static bool pm_fake(struct kvm_vcpu *vcpu, 178 - const struct sys_reg_params *p, 179 - const struct sys_reg_desc *r) 166 + static bool trap_raz_wi(struct kvm_vcpu *vcpu, 167 + const struct sys_reg_params *p, 168 + const struct sys_reg_desc *r) 180 169 { 181 170 if (p->is_write) 182 171 return ignore_write(vcpu, p); 183 172 else 184 173 return read_zero(vcpu, p); 174 + } 175 + 176 + static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 177 + const struct sys_reg_params *p, 178 + const struct sys_reg_desc *r) 179 + { 180 + if (p->is_write) { 181 + return ignore_write(vcpu, p); 182 + } else { 183 + *vcpu_reg(vcpu, p->Rt) = (1 << 3); 184 + return true; 185 + } 186 + } 187 + 188 + static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, 189 + const struct sys_reg_params *p, 190 + const struct sys_reg_desc *r) 191 + { 192 + if (p->is_write) { 193 + return ignore_write(vcpu, p); 194 + } else { 195 + u32 val; 196 + asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); 197 + *vcpu_reg(vcpu, p->Rt) = val; 198 + return true; 199 + } 200 + } 201 + 202 + /* 203 + * We want to avoid world-switching all the DBG registers all the 204 + * time: 205 + * 206 + * - If we've touched any debug register, it is likely that we're 207 + * going to touch more of them. It then makes sense to disable the 208 + * traps and start doing the save/restore dance 209 + * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is 210 + * then mandatory to save/restore the registers, as the guest 211 + * depends on them. 212 + * 213 + * For this, we use a DIRTY bit, indicating the guest has modified the 214 + * debug registers, used as follow: 215 + * 216 + * On guest entry: 217 + * - If the dirty bit is set (because we're coming back from trapping), 218 + * disable the traps, save host registers, restore guest registers. 219 + * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), 220 + * set the dirty bit, disable the traps, save host registers, 221 + * restore guest registers. 222 + * - Otherwise, enable the traps 223 + * 224 + * On guest exit: 225 + * - If the dirty bit is set, save guest registers, restore host 226 + * registers and clear the dirty bit. This ensure that the host can 227 + * now use the debug registers. 228 + */ 229 + static bool trap_debug_regs(struct kvm_vcpu *vcpu, 230 + const struct sys_reg_params *p, 231 + const struct sys_reg_desc *r) 232 + { 233 + if (p->is_write) { 234 + vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 235 + vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 236 + } else { 237 + *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); 238 + } 239 + 240 + return true; 185 241 } 186 242 187 243 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) ··· 258 198 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff); 259 199 } 260 200 201 + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ 202 + #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ 203 + /* DBGBVRn_EL1 */ \ 204 + { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \ 205 + trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \ 206 + /* DBGBCRn_EL1 */ \ 207 + { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \ 208 + trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \ 209 + /* DBGWVRn_EL1 */ \ 210 + { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \ 211 + trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \ 212 + /* DBGWCRn_EL1 */ \ 213 + { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ 214 + trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 } 215 + 261 216 /* 262 217 * Architected system registers. 263 218 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 219 + * 220 + * We could trap ID_DFR0 and tell the guest we don't support performance 221 + * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was 222 + * NAKed, so it will read the PMCR anyway. 223 + * 224 + * Therefore we tell the guest we have 0 counters. Unfortunately, we 225 + * must always support PMCCNTR (the cycle counter): we just RAZ/WI for 226 + * all PM registers, which doesn't crash the guest kernel at least. 227 + * 228 + * Debug handling: We do trap most, if not all debug related system 229 + * registers. The implementation is good enough to ensure that a guest 230 + * can use these with minimal performance degradation. The drawback is 231 + * that we don't implement any of the external debug, none of the 232 + * OSlock protocol. This should be revisited if we ever encounter a 233 + * more demanding guest... 264 234 */ 265 235 static const struct sys_reg_desc sys_reg_descs[] = { 266 236 /* DC ISW */ ··· 303 213 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), 304 214 access_dcsw }, 305 215 216 + DBG_BCR_BVR_WCR_WVR_EL1(0), 217 + DBG_BCR_BVR_WCR_WVR_EL1(1), 218 + /* MDCCINT_EL1 */ 219 + { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), 220 + trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, 221 + /* MDSCR_EL1 */ 222 + { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), 223 + trap_debug_regs, reset_val, MDSCR_EL1, 0 }, 224 + DBG_BCR_BVR_WCR_WVR_EL1(2), 225 + DBG_BCR_BVR_WCR_WVR_EL1(3), 226 + DBG_BCR_BVR_WCR_WVR_EL1(4), 227 + DBG_BCR_BVR_WCR_WVR_EL1(5), 228 + DBG_BCR_BVR_WCR_WVR_EL1(6), 229 + DBG_BCR_BVR_WCR_WVR_EL1(7), 230 + DBG_BCR_BVR_WCR_WVR_EL1(8), 231 + DBG_BCR_BVR_WCR_WVR_EL1(9), 232 + DBG_BCR_BVR_WCR_WVR_EL1(10), 233 + DBG_BCR_BVR_WCR_WVR_EL1(11), 234 + DBG_BCR_BVR_WCR_WVR_EL1(12), 235 + DBG_BCR_BVR_WCR_WVR_EL1(13), 236 + DBG_BCR_BVR_WCR_WVR_EL1(14), 237 + DBG_BCR_BVR_WCR_WVR_EL1(15), 238 + 239 + /* MDRAR_EL1 */ 240 + { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), 241 + trap_raz_wi }, 242 + /* OSLAR_EL1 */ 243 + { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100), 244 + trap_raz_wi }, 245 + /* OSLSR_EL1 */ 246 + { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100), 247 + trap_oslsr_el1 }, 248 + /* OSDLR_EL1 */ 249 + { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100), 250 + trap_raz_wi }, 251 + /* DBGPRCR_EL1 */ 252 + { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100), 253 + trap_raz_wi }, 254 + /* DBGCLAIMSET_EL1 */ 255 + { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110), 256 + trap_raz_wi }, 257 + /* DBGCLAIMCLR_EL1 */ 258 + { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110), 259 + trap_raz_wi }, 260 + /* DBGAUTHSTATUS_EL1 */ 261 + { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110), 262 + trap_dbgauthstatus_el1 }, 263 + 306 264 /* TEECR32_EL1 */ 307 265 { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), 308 266 NULL, reset_val, TEECR32_EL1, 0 }, 309 267 /* TEEHBR32_EL1 */ 310 268 { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000), 311 269 NULL, reset_val, TEEHBR32_EL1, 0 }, 270 + 271 + /* MDCCSR_EL1 */ 272 + { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000), 273 + trap_raz_wi }, 274 + /* DBGDTR_EL0 */ 275 + { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000), 276 + trap_raz_wi }, 277 + /* DBGDTR[TR]X_EL0 */ 278 + { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000), 279 + trap_raz_wi }, 280 + 312 281 /* DBGVCR32_EL2 */ 313 282 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), 314 283 NULL, reset_val, DBGVCR32_EL2, 0 }, ··· 409 260 410 261 /* PMINTENSET_EL1 */ 411 262 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), 412 - pm_fake }, 263 + trap_raz_wi }, 413 264 /* PMINTENCLR_EL1 */ 414 265 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), 415 - pm_fake }, 266 + trap_raz_wi }, 416 267 417 268 /* MAIR_EL1 */ 418 269 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), ··· 441 292 442 293 /* PMCR_EL0 */ 443 294 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), 444 - pm_fake }, 295 + trap_raz_wi }, 445 296 /* PMCNTENSET_EL0 */ 446 297 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), 447 - pm_fake }, 298 + trap_raz_wi }, 448 299 /* PMCNTENCLR_EL0 */ 449 300 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), 450 - pm_fake }, 301 + trap_raz_wi }, 451 302 /* PMOVSCLR_EL0 */ 452 303 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), 453 - pm_fake }, 304 + trap_raz_wi }, 454 305 /* PMSWINC_EL0 */ 455 306 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), 456 - pm_fake }, 307 + trap_raz_wi }, 457 308 /* PMSELR_EL0 */ 458 309 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), 459 - pm_fake }, 310 + trap_raz_wi }, 460 311 /* PMCEID0_EL0 */ 461 312 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), 462 - pm_fake }, 313 + trap_raz_wi }, 463 314 /* PMCEID1_EL0 */ 464 315 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), 465 - pm_fake }, 316 + trap_raz_wi }, 466 317 /* PMCCNTR_EL0 */ 467 318 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), 468 - pm_fake }, 319 + trap_raz_wi }, 469 320 /* PMXEVTYPER_EL0 */ 470 321 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), 471 - pm_fake }, 322 + trap_raz_wi }, 472 323 /* PMXEVCNTR_EL0 */ 473 324 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), 474 - pm_fake }, 325 + trap_raz_wi }, 475 326 /* PMUSERENR_EL0 */ 476 327 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), 477 - pm_fake }, 328 + trap_raz_wi }, 478 329 /* PMOVSSET_EL0 */ 479 330 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), 480 - pm_fake }, 331 + trap_raz_wi }, 481 332 482 333 /* TPIDR_EL0 */ 483 334 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), ··· 497 348 NULL, reset_val, FPEXC32_EL2, 0x70 }, 498 349 }; 499 350 351 + static bool trap_dbgidr(struct kvm_vcpu *vcpu, 352 + const struct sys_reg_params *p, 353 + const struct sys_reg_desc *r) 354 + { 355 + if (p->is_write) { 356 + return ignore_write(vcpu, p); 357 + } else { 358 + u64 dfr = read_cpuid(ID_AA64DFR0_EL1); 359 + u64 pfr = read_cpuid(ID_AA64PFR0_EL1); 360 + u32 el3 = !!((pfr >> 12) & 0xf); 361 + 362 + *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) | 363 + (((dfr >> 12) & 0xf) << 24) | 364 + (((dfr >> 28) & 0xf) << 20) | 365 + (6 << 16) | (el3 << 14) | (el3 << 12)); 366 + return true; 367 + } 368 + } 369 + 370 + static bool trap_debug32(struct kvm_vcpu *vcpu, 371 + const struct sys_reg_params *p, 372 + const struct sys_reg_desc *r) 373 + { 374 + if (p->is_write) { 375 + vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 376 + vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 377 + } else { 378 + *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg); 379 + } 380 + 381 + return true; 382 + } 383 + 384 + #define DBG_BCR_BVR_WCR_WVR(n) \ 385 + /* DBGBVRn */ \ 386 + { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \ 387 + NULL, (cp14_DBGBVR0 + (n) * 2) }, \ 388 + /* DBGBCRn */ \ 389 + { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \ 390 + NULL, (cp14_DBGBCR0 + (n) * 2) }, \ 391 + /* DBGWVRn */ \ 392 + { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \ 393 + NULL, (cp14_DBGWVR0 + (n) * 2) }, \ 394 + /* DBGWCRn */ \ 395 + { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \ 396 + NULL, (cp14_DBGWCR0 + (n) * 2) } 397 + 398 + #define DBGBXVR(n) \ 399 + { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \ 400 + NULL, cp14_DBGBXVR0 + n * 2 } 401 + 402 + /* 403 + * Trapped cp14 registers. We generally ignore most of the external 404 + * debug, on the principle that they don't really make sense to a 405 + * guest. Revisit this one day, whould this principle change. 406 + */ 407 + static const struct sys_reg_desc cp14_regs[] = { 408 + /* DBGIDR */ 409 + { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr }, 410 + /* DBGDTRRXext */ 411 + { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, 412 + 413 + DBG_BCR_BVR_WCR_WVR(0), 414 + /* DBGDSCRint */ 415 + { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, 416 + DBG_BCR_BVR_WCR_WVR(1), 417 + /* DBGDCCINT */ 418 + { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, 419 + /* DBGDSCRext */ 420 + { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, 421 + DBG_BCR_BVR_WCR_WVR(2), 422 + /* DBGDTR[RT]Xint */ 423 + { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, 424 + /* DBGDTR[RT]Xext */ 425 + { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, 426 + DBG_BCR_BVR_WCR_WVR(3), 427 + DBG_BCR_BVR_WCR_WVR(4), 428 + DBG_BCR_BVR_WCR_WVR(5), 429 + /* DBGWFAR */ 430 + { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, 431 + /* DBGOSECCR */ 432 + { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, 433 + DBG_BCR_BVR_WCR_WVR(6), 434 + /* DBGVCR */ 435 + { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, 436 + DBG_BCR_BVR_WCR_WVR(7), 437 + DBG_BCR_BVR_WCR_WVR(8), 438 + DBG_BCR_BVR_WCR_WVR(9), 439 + DBG_BCR_BVR_WCR_WVR(10), 440 + DBG_BCR_BVR_WCR_WVR(11), 441 + DBG_BCR_BVR_WCR_WVR(12), 442 + DBG_BCR_BVR_WCR_WVR(13), 443 + DBG_BCR_BVR_WCR_WVR(14), 444 + DBG_BCR_BVR_WCR_WVR(15), 445 + 446 + /* DBGDRAR (32bit) */ 447 + { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, 448 + 449 + DBGBXVR(0), 450 + /* DBGOSLAR */ 451 + { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi }, 452 + DBGBXVR(1), 453 + /* DBGOSLSR */ 454 + { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 }, 455 + DBGBXVR(2), 456 + DBGBXVR(3), 457 + /* DBGOSDLR */ 458 + { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, 459 + DBGBXVR(4), 460 + /* DBGPRCR */ 461 + { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, 462 + DBGBXVR(5), 463 + DBGBXVR(6), 464 + DBGBXVR(7), 465 + DBGBXVR(8), 466 + DBGBXVR(9), 467 + DBGBXVR(10), 468 + DBGBXVR(11), 469 + DBGBXVR(12), 470 + DBGBXVR(13), 471 + DBGBXVR(14), 472 + DBGBXVR(15), 473 + 474 + /* DBGDSAR (32bit) */ 475 + { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, 476 + 477 + /* DBGDEVID2 */ 478 + { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, 479 + /* DBGDEVID1 */ 480 + { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, 481 + /* DBGDEVID */ 482 + { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, 483 + /* DBGCLAIMSET */ 484 + { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, 485 + /* DBGCLAIMCLR */ 486 + { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, 487 + /* DBGAUTHSTATUS */ 488 + { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, 489 + }; 490 + 491 + /* Trapped cp14 64bit registers */ 492 + static const struct sys_reg_desc cp14_64_regs[] = { 493 + /* DBGDRAR (64bit) */ 494 + { Op1( 0), CRm( 1), .access = trap_raz_wi }, 495 + 496 + /* DBGDSAR (64bit) */ 497 + { Op1( 0), CRm( 2), .access = trap_raz_wi }, 498 + }; 499 + 500 500 /* 501 501 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, 502 502 * depending on the way they are accessed (as a 32bit or a 64bit 503 503 * register). 504 504 */ 505 505 static const struct sys_reg_desc cp15_regs[] = { 506 - { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 507 506 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, 508 507 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 509 508 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, ··· 671 374 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, 672 375 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, 673 376 674 - { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake }, 675 - { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake }, 676 - { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake }, 677 - { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake }, 678 - { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake }, 679 - { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake }, 680 - { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake }, 681 - { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake }, 682 - { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake }, 683 - { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake }, 684 - { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake }, 685 - { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake }, 686 - { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake }, 377 + /* PMU */ 378 + { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi }, 379 + { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi }, 380 + { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, 381 + { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, 382 + { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi }, 383 + { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi }, 384 + { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi }, 385 + { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, 386 + { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi }, 387 + { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, 388 + { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, 389 + { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, 390 + { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, 687 391 688 392 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, 689 393 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, 690 394 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, 691 395 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, 692 396 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, 397 + }; 693 398 399 + static const struct sys_reg_desc cp15_64_regs[] = { 400 + { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 694 401 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, 695 402 }; 696 403 ··· 755 454 return 1; 756 455 } 757 456 758 - int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) 457 + /* 458 + * emulate_cp -- tries to match a sys_reg access in a handling table, and 459 + * call the corresponding trap handler. 460 + * 461 + * @params: pointer to the descriptor of the access 462 + * @table: array of trap descriptors 463 + * @num: size of the trap descriptor array 464 + * 465 + * Return 0 if the access has been handled, and -1 if not. 466 + */ 467 + static int emulate_cp(struct kvm_vcpu *vcpu, 468 + const struct sys_reg_params *params, 469 + const struct sys_reg_desc *table, 470 + size_t num) 759 471 { 760 - kvm_inject_undefined(vcpu); 761 - return 1; 762 - } 472 + const struct sys_reg_desc *r; 763 473 764 - static void emulate_cp15(struct kvm_vcpu *vcpu, 765 - const struct sys_reg_params *params) 766 - { 767 - size_t num; 768 - const struct sys_reg_desc *table, *r; 474 + if (!table) 475 + return -1; /* Not handled */ 769 476 770 - table = get_target_table(vcpu->arch.target, false, &num); 771 - 772 - /* Search target-specific then generic table. */ 773 477 r = find_reg(params, table, num); 774 - if (!r) 775 - r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); 776 478 777 - if (likely(r)) { 479 + if (r) { 778 480 /* 779 481 * Not having an accessor means that we have 780 482 * configured a trap that we don't know how to ··· 789 485 if (likely(r->access(vcpu, params, r))) { 790 486 /* Skip instruction, since it was emulated */ 791 487 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 792 - return; 793 488 } 794 - /* If access function fails, it should complain. */ 489 + 490 + /* Handled */ 491 + return 0; 795 492 } 796 493 797 - kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu)); 494 + /* Not handled */ 495 + return -1; 496 + } 497 + 498 + static void unhandled_cp_access(struct kvm_vcpu *vcpu, 499 + struct sys_reg_params *params) 500 + { 501 + u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); 502 + int cp; 503 + 504 + switch(hsr_ec) { 505 + case ESR_EL2_EC_CP15_32: 506 + case ESR_EL2_EC_CP15_64: 507 + cp = 15; 508 + break; 509 + case ESR_EL2_EC_CP14_MR: 510 + case ESR_EL2_EC_CP14_64: 511 + cp = 14; 512 + break; 513 + default: 514 + WARN_ON((cp = -1)); 515 + } 516 + 517 + kvm_err("Unsupported guest CP%d access at: %08lx\n", 518 + cp, *vcpu_pc(vcpu)); 798 519 print_sys_reg_instr(params); 799 520 kvm_inject_undefined(vcpu); 800 521 } 801 522 802 523 /** 803 - * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access 524 + * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access 804 525 * @vcpu: The VCPU pointer 805 526 * @run: The kvm_run struct 806 527 */ 807 - int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 528 + static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, 529 + const struct sys_reg_desc *global, 530 + size_t nr_global, 531 + const struct sys_reg_desc *target_specific, 532 + size_t nr_specific) 808 533 { 809 534 struct sys_reg_params params; 810 535 u32 hsr = kvm_vcpu_get_hsr(vcpu); ··· 862 529 *vcpu_reg(vcpu, params.Rt) = val; 863 530 } 864 531 865 - emulate_cp15(vcpu, &params); 532 + if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) 533 + goto out; 534 + if (!emulate_cp(vcpu, &params, global, nr_global)) 535 + goto out; 866 536 537 + unhandled_cp_access(vcpu, &params); 538 + 539 + out: 867 540 /* Do the opposite hack for the read side */ 868 541 if (!params.is_write) { 869 542 u64 val = *vcpu_reg(vcpu, params.Rt); ··· 885 546 * @vcpu: The VCPU pointer 886 547 * @run: The kvm_run struct 887 548 */ 888 - int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 549 + static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, 550 + const struct sys_reg_desc *global, 551 + size_t nr_global, 552 + const struct sys_reg_desc *target_specific, 553 + size_t nr_specific) 889 554 { 890 555 struct sys_reg_params params; 891 556 u32 hsr = kvm_vcpu_get_hsr(vcpu); ··· 904 561 params.Op1 = (hsr >> 14) & 0x7; 905 562 params.Op2 = (hsr >> 17) & 0x7; 906 563 907 - emulate_cp15(vcpu, &params); 564 + if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) 565 + return 1; 566 + if (!emulate_cp(vcpu, &params, global, nr_global)) 567 + return 1; 568 + 569 + unhandled_cp_access(vcpu, &params); 908 570 return 1; 571 + } 572 + 573 + int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 574 + { 575 + const struct sys_reg_desc *target_specific; 576 + size_t num; 577 + 578 + target_specific = get_target_table(vcpu->arch.target, false, &num); 579 + return kvm_handle_cp_64(vcpu, 580 + cp15_64_regs, ARRAY_SIZE(cp15_64_regs), 581 + target_specific, num); 582 + } 583 + 584 + int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 585 + { 586 + const struct sys_reg_desc *target_specific; 587 + size_t num; 588 + 589 + target_specific = get_target_table(vcpu->arch.target, false, &num); 590 + return kvm_handle_cp_32(vcpu, 591 + cp15_regs, ARRAY_SIZE(cp15_regs), 592 + target_specific, num); 593 + } 594 + 595 + int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 596 + { 597 + return kvm_handle_cp_64(vcpu, 598 + cp14_64_regs, ARRAY_SIZE(cp14_64_regs), 599 + NULL, 0); 600 + } 601 + 602 + int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 603 + { 604 + return kvm_handle_cp_32(vcpu, 605 + cp14_regs, ARRAY_SIZE(cp14_regs), 606 + NULL, 0); 909 607 } 910 608 911 609 static int emulate_sys_reg(struct kvm_vcpu *vcpu, ··· 1160 776 NULL, get_ctr_el0 }, 1161 777 }; 1162 778 1163 - static int reg_from_user(void *val, const void __user *uaddr, u64 id) 779 + static int reg_from_user(u64 *val, const void __user *uaddr, u64 id) 1164 780 { 1165 - /* This Just Works because we are little endian. */ 1166 781 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 1167 782 return -EFAULT; 1168 783 return 0; 1169 784 } 1170 785 1171 - static int reg_to_user(void __user *uaddr, const void *val, u64 id) 786 + static int reg_to_user(void __user *uaddr, const u64 *val, u64 id) 1172 787 { 1173 - /* This Just Works because we are little endian. */ 1174 788 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 1175 789 return -EFAULT; 1176 790 return 0; ··· 1344 962 1345 963 static int write_demux_regids(u64 __user *uindices) 1346 964 { 1347 - u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 965 + u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 1348 966 unsigned int i; 1349 967 1350 968 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; ··· 1451 1069 return write_demux_regids(uindices); 1452 1070 } 1453 1071 1072 + static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n) 1073 + { 1074 + unsigned int i; 1075 + 1076 + for (i = 1; i < n; i++) { 1077 + if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) { 1078 + kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1); 1079 + return 1; 1080 + } 1081 + } 1082 + 1083 + return 0; 1084 + } 1085 + 1454 1086 void kvm_sys_reg_table_init(void) 1455 1087 { 1456 1088 unsigned int i; 1457 1089 struct sys_reg_desc clidr; 1458 1090 1459 1091 /* Make sure tables are unique and in order. */ 1460 - for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++) 1461 - BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0); 1092 + BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs))); 1093 + BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs))); 1094 + BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs))); 1095 + BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); 1096 + BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs))); 1097 + BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs))); 1462 1098 1463 1099 /* We abuse the reset function to overwrite the table itself. */ 1464 1100 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
+133
arch/arm64/kvm/vgic-v2-switch.S
··· 1 + /* 2 + * Copyright (C) 2012,2013 - ARM Ltd 3 + * Author: Marc Zyngier <marc.zyngier@arm.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 + */ 17 + 18 + #include <linux/linkage.h> 19 + #include <linux/irqchip/arm-gic.h> 20 + 21 + #include <asm/assembler.h> 22 + #include <asm/memory.h> 23 + #include <asm/asm-offsets.h> 24 + #include <asm/kvm.h> 25 + #include <asm/kvm_asm.h> 26 + #include <asm/kvm_arm.h> 27 + #include <asm/kvm_mmu.h> 28 + 29 + .text 30 + .pushsection .hyp.text, "ax" 31 + 32 + /* 33 + * Save the VGIC CPU state into memory 34 + * x0: Register pointing to VCPU struct 35 + * Do not corrupt x1!!! 36 + */ 37 + ENTRY(__save_vgic_v2_state) 38 + __save_vgic_v2_state: 39 + /* Get VGIC VCTRL base into x2 */ 40 + ldr x2, [x0, #VCPU_KVM] 41 + kern_hyp_va x2 42 + ldr x2, [x2, #KVM_VGIC_VCTRL] 43 + kern_hyp_va x2 44 + cbz x2, 2f // disabled 45 + 46 + /* Compute the address of struct vgic_cpu */ 47 + add x3, x0, #VCPU_VGIC_CPU 48 + 49 + /* Save all interesting registers */ 50 + ldr w4, [x2, #GICH_HCR] 51 + ldr w5, [x2, #GICH_VMCR] 52 + ldr w6, [x2, #GICH_MISR] 53 + ldr w7, [x2, #GICH_EISR0] 54 + ldr w8, [x2, #GICH_EISR1] 55 + ldr w9, [x2, #GICH_ELRSR0] 56 + ldr w10, [x2, #GICH_ELRSR1] 57 + ldr w11, [x2, #GICH_APR] 58 + CPU_BE( rev w4, w4 ) 59 + CPU_BE( rev w5, w5 ) 60 + CPU_BE( rev w6, w6 ) 61 + CPU_BE( rev w7, w7 ) 62 + CPU_BE( rev w8, w8 ) 63 + CPU_BE( rev w9, w9 ) 64 + CPU_BE( rev w10, w10 ) 65 + CPU_BE( rev w11, w11 ) 66 + 67 + str w4, [x3, #VGIC_V2_CPU_HCR] 68 + str w5, [x3, #VGIC_V2_CPU_VMCR] 69 + str w6, [x3, #VGIC_V2_CPU_MISR] 70 + str w7, [x3, #VGIC_V2_CPU_EISR] 71 + str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] 72 + str w9, [x3, #VGIC_V2_CPU_ELRSR] 73 + str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] 74 + str w11, [x3, #VGIC_V2_CPU_APR] 75 + 76 + /* Clear GICH_HCR */ 77 + str wzr, [x2, #GICH_HCR] 78 + 79 + /* Save list registers */ 80 + add x2, x2, #GICH_LR0 81 + ldr w4, [x3, #VGIC_CPU_NR_LR] 82 + add x3, x3, #VGIC_V2_CPU_LR 83 + 1: ldr w5, [x2], #4 84 + CPU_BE( rev w5, w5 ) 85 + str w5, [x3], #4 86 + sub w4, w4, #1 87 + cbnz w4, 1b 88 + 2: 89 + ret 90 + ENDPROC(__save_vgic_v2_state) 91 + 92 + /* 93 + * Restore the VGIC CPU state from memory 94 + * x0: Register pointing to VCPU struct 95 + */ 96 + ENTRY(__restore_vgic_v2_state) 97 + __restore_vgic_v2_state: 98 + /* Get VGIC VCTRL base into x2 */ 99 + ldr x2, [x0, #VCPU_KVM] 100 + kern_hyp_va x2 101 + ldr x2, [x2, #KVM_VGIC_VCTRL] 102 + kern_hyp_va x2 103 + cbz x2, 2f // disabled 104 + 105 + /* Compute the address of struct vgic_cpu */ 106 + add x3, x0, #VCPU_VGIC_CPU 107 + 108 + /* We only restore a minimal set of registers */ 109 + ldr w4, [x3, #VGIC_V2_CPU_HCR] 110 + ldr w5, [x3, #VGIC_V2_CPU_VMCR] 111 + ldr w6, [x3, #VGIC_V2_CPU_APR] 112 + CPU_BE( rev w4, w4 ) 113 + CPU_BE( rev w5, w5 ) 114 + CPU_BE( rev w6, w6 ) 115 + 116 + str w4, [x2, #GICH_HCR] 117 + str w5, [x2, #GICH_VMCR] 118 + str w6, [x2, #GICH_APR] 119 + 120 + /* Restore list registers */ 121 + add x2, x2, #GICH_LR0 122 + ldr w4, [x3, #VGIC_CPU_NR_LR] 123 + add x3, x3, #VGIC_V2_CPU_LR 124 + 1: ldr w5, [x3], #4 125 + CPU_BE( rev w5, w5 ) 126 + str w5, [x2], #4 127 + sub w4, w4, #1 128 + cbnz w4, 1b 129 + 2: 130 + ret 131 + ENDPROC(__restore_vgic_v2_state) 132 + 133 + .popsection
+267
arch/arm64/kvm/vgic-v3-switch.S
··· 1 + /* 2 + * Copyright (C) 2012,2013 - ARM Ltd 3 + * Author: Marc Zyngier <marc.zyngier@arm.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 + */ 17 + 18 + #include <linux/linkage.h> 19 + #include <linux/irqchip/arm-gic-v3.h> 20 + 21 + #include <asm/assembler.h> 22 + #include <asm/memory.h> 23 + #include <asm/asm-offsets.h> 24 + #include <asm/kvm.h> 25 + #include <asm/kvm_asm.h> 26 + #include <asm/kvm_arm.h> 27 + 28 + .text 29 + .pushsection .hyp.text, "ax" 30 + 31 + /* 32 + * We store LRs in reverse order to let the CPU deal with streaming 33 + * access. Use this macro to make it look saner... 34 + */ 35 + #define LR_OFFSET(n) (VGIC_V3_CPU_LR + (15 - n) * 8) 36 + 37 + /* 38 + * Save the VGIC CPU state into memory 39 + * x0: Register pointing to VCPU struct 40 + * Do not corrupt x1!!! 41 + */ 42 + .macro save_vgic_v3_state 43 + // Compute the address of struct vgic_cpu 44 + add x3, x0, #VCPU_VGIC_CPU 45 + 46 + // Make sure stores to the GIC via the memory mapped interface 47 + // are now visible to the system register interface 48 + dsb st 49 + 50 + // Save all interesting registers 51 + mrs_s x4, ICH_HCR_EL2 52 + mrs_s x5, ICH_VMCR_EL2 53 + mrs_s x6, ICH_MISR_EL2 54 + mrs_s x7, ICH_EISR_EL2 55 + mrs_s x8, ICH_ELSR_EL2 56 + 57 + str w4, [x3, #VGIC_V3_CPU_HCR] 58 + str w5, [x3, #VGIC_V3_CPU_VMCR] 59 + str w6, [x3, #VGIC_V3_CPU_MISR] 60 + str w7, [x3, #VGIC_V3_CPU_EISR] 61 + str w8, [x3, #VGIC_V3_CPU_ELRSR] 62 + 63 + msr_s ICH_HCR_EL2, xzr 64 + 65 + mrs_s x21, ICH_VTR_EL2 66 + mvn w22, w21 67 + ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4 68 + 69 + adr x24, 1f 70 + add x24, x24, x23 71 + br x24 72 + 73 + 1: 74 + mrs_s x20, ICH_LR15_EL2 75 + mrs_s x19, ICH_LR14_EL2 76 + mrs_s x18, ICH_LR13_EL2 77 + mrs_s x17, ICH_LR12_EL2 78 + mrs_s x16, ICH_LR11_EL2 79 + mrs_s x15, ICH_LR10_EL2 80 + mrs_s x14, ICH_LR9_EL2 81 + mrs_s x13, ICH_LR8_EL2 82 + mrs_s x12, ICH_LR7_EL2 83 + mrs_s x11, ICH_LR6_EL2 84 + mrs_s x10, ICH_LR5_EL2 85 + mrs_s x9, ICH_LR4_EL2 86 + mrs_s x8, ICH_LR3_EL2 87 + mrs_s x7, ICH_LR2_EL2 88 + mrs_s x6, ICH_LR1_EL2 89 + mrs_s x5, ICH_LR0_EL2 90 + 91 + adr x24, 1f 92 + add x24, x24, x23 93 + br x24 94 + 95 + 1: 96 + str x20, [x3, #LR_OFFSET(15)] 97 + str x19, [x3, #LR_OFFSET(14)] 98 + str x18, [x3, #LR_OFFSET(13)] 99 + str x17, [x3, #LR_OFFSET(12)] 100 + str x16, [x3, #LR_OFFSET(11)] 101 + str x15, [x3, #LR_OFFSET(10)] 102 + str x14, [x3, #LR_OFFSET(9)] 103 + str x13, [x3, #LR_OFFSET(8)] 104 + str x12, [x3, #LR_OFFSET(7)] 105 + str x11, [x3, #LR_OFFSET(6)] 106 + str x10, [x3, #LR_OFFSET(5)] 107 + str x9, [x3, #LR_OFFSET(4)] 108 + str x8, [x3, #LR_OFFSET(3)] 109 + str x7, [x3, #LR_OFFSET(2)] 110 + str x6, [x3, #LR_OFFSET(1)] 111 + str x5, [x3, #LR_OFFSET(0)] 112 + 113 + tbnz w21, #29, 6f // 6 bits 114 + tbz w21, #30, 5f // 5 bits 115 + // 7 bits 116 + mrs_s x20, ICH_AP0R3_EL2 117 + str w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)] 118 + mrs_s x19, ICH_AP0R2_EL2 119 + str w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)] 120 + 6: mrs_s x18, ICH_AP0R1_EL2 121 + str w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)] 122 + 5: mrs_s x17, ICH_AP0R0_EL2 123 + str w17, [x3, #VGIC_V3_CPU_AP0R] 124 + 125 + tbnz w21, #29, 6f // 6 bits 126 + tbz w21, #30, 5f // 5 bits 127 + // 7 bits 128 + mrs_s x20, ICH_AP1R3_EL2 129 + str w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)] 130 + mrs_s x19, ICH_AP1R2_EL2 131 + str w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)] 132 + 6: mrs_s x18, ICH_AP1R1_EL2 133 + str w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)] 134 + 5: mrs_s x17, ICH_AP1R0_EL2 135 + str w17, [x3, #VGIC_V3_CPU_AP1R] 136 + 137 + // Restore SRE_EL1 access and re-enable SRE at EL1. 138 + mrs_s x5, ICC_SRE_EL2 139 + orr x5, x5, #ICC_SRE_EL2_ENABLE 140 + msr_s ICC_SRE_EL2, x5 141 + isb 142 + mov x5, #1 143 + msr_s ICC_SRE_EL1, x5 144 + .endm 145 + 146 + /* 147 + * Restore the VGIC CPU state from memory 148 + * x0: Register pointing to VCPU struct 149 + */ 150 + .macro restore_vgic_v3_state 151 + // Disable SRE_EL1 access. Necessary, otherwise 152 + // ICH_VMCR_EL2.VFIQEn becomes one, and FIQ happens... 153 + msr_s ICC_SRE_EL1, xzr 154 + isb 155 + 156 + // Compute the address of struct vgic_cpu 157 + add x3, x0, #VCPU_VGIC_CPU 158 + 159 + // Restore all interesting registers 160 + ldr w4, [x3, #VGIC_V3_CPU_HCR] 161 + ldr w5, [x3, #VGIC_V3_CPU_VMCR] 162 + 163 + msr_s ICH_HCR_EL2, x4 164 + msr_s ICH_VMCR_EL2, x5 165 + 166 + mrs_s x21, ICH_VTR_EL2 167 + 168 + tbnz w21, #29, 6f // 6 bits 169 + tbz w21, #30, 5f // 5 bits 170 + // 7 bits 171 + ldr w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)] 172 + msr_s ICH_AP1R3_EL2, x20 173 + ldr w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)] 174 + msr_s ICH_AP1R2_EL2, x19 175 + 6: ldr w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)] 176 + msr_s ICH_AP1R1_EL2, x18 177 + 5: ldr w17, [x3, #VGIC_V3_CPU_AP1R] 178 + msr_s ICH_AP1R0_EL2, x17 179 + 180 + tbnz w21, #29, 6f // 6 bits 181 + tbz w21, #30, 5f // 5 bits 182 + // 7 bits 183 + ldr w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)] 184 + msr_s ICH_AP0R3_EL2, x20 185 + ldr w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)] 186 + msr_s ICH_AP0R2_EL2, x19 187 + 6: ldr w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)] 188 + msr_s ICH_AP0R1_EL2, x18 189 + 5: ldr w17, [x3, #VGIC_V3_CPU_AP0R] 190 + msr_s ICH_AP0R0_EL2, x17 191 + 192 + and w22, w21, #0xf 193 + mvn w22, w21 194 + ubfiz w23, w22, 2, 4 // w23 = (15 - ListRegs) * 4 195 + 196 + adr x24, 1f 197 + add x24, x24, x23 198 + br x24 199 + 200 + 1: 201 + ldr x20, [x3, #LR_OFFSET(15)] 202 + ldr x19, [x3, #LR_OFFSET(14)] 203 + ldr x18, [x3, #LR_OFFSET(13)] 204 + ldr x17, [x3, #LR_OFFSET(12)] 205 + ldr x16, [x3, #LR_OFFSET(11)] 206 + ldr x15, [x3, #LR_OFFSET(10)] 207 + ldr x14, [x3, #LR_OFFSET(9)] 208 + ldr x13, [x3, #LR_OFFSET(8)] 209 + ldr x12, [x3, #LR_OFFSET(7)] 210 + ldr x11, [x3, #LR_OFFSET(6)] 211 + ldr x10, [x3, #LR_OFFSET(5)] 212 + ldr x9, [x3, #LR_OFFSET(4)] 213 + ldr x8, [x3, #LR_OFFSET(3)] 214 + ldr x7, [x3, #LR_OFFSET(2)] 215 + ldr x6, [x3, #LR_OFFSET(1)] 216 + ldr x5, [x3, #LR_OFFSET(0)] 217 + 218 + adr x24, 1f 219 + add x24, x24, x23 220 + br x24 221 + 222 + 1: 223 + msr_s ICH_LR15_EL2, x20 224 + msr_s ICH_LR14_EL2, x19 225 + msr_s ICH_LR13_EL2, x18 226 + msr_s ICH_LR12_EL2, x17 227 + msr_s ICH_LR11_EL2, x16 228 + msr_s ICH_LR10_EL2, x15 229 + msr_s ICH_LR9_EL2, x14 230 + msr_s ICH_LR8_EL2, x13 231 + msr_s ICH_LR7_EL2, x12 232 + msr_s ICH_LR6_EL2, x11 233 + msr_s ICH_LR5_EL2, x10 234 + msr_s ICH_LR4_EL2, x9 235 + msr_s ICH_LR3_EL2, x8 236 + msr_s ICH_LR2_EL2, x7 237 + msr_s ICH_LR1_EL2, x6 238 + msr_s ICH_LR0_EL2, x5 239 + 240 + // Ensure that the above will have reached the 241 + // (re)distributors. This ensure the guest will read 242 + // the correct values from the memory-mapped interface. 243 + isb 244 + dsb sy 245 + 246 + // Prevent the guest from touching the GIC system registers 247 + mrs_s x5, ICC_SRE_EL2 248 + and x5, x5, #~ICC_SRE_EL2_ENABLE 249 + msr_s ICC_SRE_EL2, x5 250 + .endm 251 + 252 + ENTRY(__save_vgic_v3_state) 253 + save_vgic_v3_state 254 + ret 255 + ENDPROC(__save_vgic_v3_state) 256 + 257 + ENTRY(__restore_vgic_v3_state) 258 + restore_vgic_v3_state 259 + ret 260 + ENDPROC(__restore_vgic_v3_state) 261 + 262 + ENTRY(__vgic_v3_get_ich_vtr_el2) 263 + mrs_s x0, ICH_VTR_EL2 264 + ret 265 + ENDPROC(__vgic_v3_get_ich_vtr_el2) 266 + 267 + .popsection
+14
include/kvm/arm_arch_timer.h
··· 67 67 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); 68 68 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); 69 69 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); 70 + 71 + u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); 72 + int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); 73 + 70 74 #else 71 75 static inline int kvm_timer_hyp_init(void) 72 76 { ··· 88 84 static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {} 89 85 static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {} 90 86 static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {} 87 + 88 + static inline int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) 89 + { 90 + return 0; 91 + } 92 + 93 + static inline u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) 94 + { 95 + return 0; 96 + } 91 97 #endif 92 98 93 99 #endif
+104 -11
include/kvm/arm_vgic.h
··· 24 24 #include <linux/irqreturn.h> 25 25 #include <linux/spinlock.h> 26 26 #include <linux/types.h> 27 - #include <linux/irqchip/arm-gic.h> 28 27 29 28 #define VGIC_NR_IRQS 256 30 29 #define VGIC_NR_SGIS 16 ··· 31 32 #define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) 32 33 #define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS) 33 34 #define VGIC_MAX_CPUS KVM_MAX_VCPUS 34 - #define VGIC_MAX_LRS (1 << 6) 35 + 36 + #define VGIC_V2_MAX_LRS (1 << 6) 37 + #define VGIC_V3_MAX_LRS 16 35 38 36 39 /* Sanity checks... */ 37 40 #if (VGIC_MAX_CPUS > 8) ··· 69 68 u32 shared[VGIC_NR_SHARED_IRQS / 4]; 70 69 }; 71 70 71 + struct kvm_vcpu; 72 + 73 + enum vgic_type { 74 + VGIC_V2, /* Good ol' GICv2 */ 75 + VGIC_V3, /* New fancy GICv3 */ 76 + }; 77 + 78 + #define LR_STATE_PENDING (1 << 0) 79 + #define LR_STATE_ACTIVE (1 << 1) 80 + #define LR_STATE_MASK (3 << 0) 81 + #define LR_EOI_INT (1 << 2) 82 + 83 + struct vgic_lr { 84 + u16 irq; 85 + u8 source; 86 + u8 state; 87 + }; 88 + 89 + struct vgic_vmcr { 90 + u32 ctlr; 91 + u32 abpr; 92 + u32 bpr; 93 + u32 pmr; 94 + }; 95 + 96 + struct vgic_ops { 97 + struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int); 98 + void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr); 99 + void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr); 100 + u64 (*get_elrsr)(const struct kvm_vcpu *vcpu); 101 + u64 (*get_eisr)(const struct kvm_vcpu *vcpu); 102 + u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu); 103 + void (*enable_underflow)(struct kvm_vcpu *vcpu); 104 + void (*disable_underflow)(struct kvm_vcpu *vcpu); 105 + void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 106 + void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 107 + void (*enable)(struct kvm_vcpu *vcpu); 108 + }; 109 + 110 + struct vgic_params { 111 + /* vgic type */ 112 + enum vgic_type type; 113 + /* Physical address of vgic virtual cpu interface */ 114 + phys_addr_t vcpu_base; 115 + /* Number of list registers */ 116 + u32 nr_lr; 117 + /* Interrupt number */ 118 + unsigned int maint_irq; 119 + /* Virtual control interface base address */ 120 + void __iomem *vctrl_base; 121 + }; 122 + 72 123 struct vgic_dist { 73 124 #ifdef CONFIG_KVM_ARM_VGIC 74 125 spinlock_t lock; 126 + bool in_kernel; 75 127 bool ready; 76 128 77 129 /* Virtual control interface mapping */ ··· 164 110 #endif 165 111 }; 166 112 113 + struct vgic_v2_cpu_if { 114 + u32 vgic_hcr; 115 + u32 vgic_vmcr; 116 + u32 vgic_misr; /* Saved only */ 117 + u32 vgic_eisr[2]; /* Saved only */ 118 + u32 vgic_elrsr[2]; /* Saved only */ 119 + u32 vgic_apr; 120 + u32 vgic_lr[VGIC_V2_MAX_LRS]; 121 + }; 122 + 123 + struct vgic_v3_cpu_if { 124 + #ifdef CONFIG_ARM_GIC_V3 125 + u32 vgic_hcr; 126 + u32 vgic_vmcr; 127 + u32 vgic_misr; /* Saved only */ 128 + u32 vgic_eisr; /* Saved only */ 129 + u32 vgic_elrsr; /* Saved only */ 130 + u32 vgic_ap0r[4]; 131 + u32 vgic_ap1r[4]; 132 + u64 vgic_lr[VGIC_V3_MAX_LRS]; 133 + #endif 134 + }; 135 + 167 136 struct vgic_cpu { 168 137 #ifdef CONFIG_KVM_ARM_VGIC 169 138 /* per IRQ to LR mapping */ ··· 197 120 DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS); 198 121 199 122 /* Bitmap of used/free list registers */ 200 - DECLARE_BITMAP( lr_used, VGIC_MAX_LRS); 123 + DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS); 201 124 202 125 /* Number of list registers on this CPU */ 203 126 int nr_lr; 204 127 205 128 /* CPU vif control registers for world switch */ 206 - u32 vgic_hcr; 207 - u32 vgic_vmcr; 208 - u32 vgic_misr; /* Saved only */ 209 - u32 vgic_eisr[2]; /* Saved only */ 210 - u32 vgic_elrsr[2]; /* Saved only */ 211 - u32 vgic_apr; 212 - u32 vgic_lr[VGIC_MAX_LRS]; 129 + union { 130 + struct vgic_v2_cpu_if vgic_v2; 131 + struct vgic_v3_cpu_if vgic_v3; 132 + }; 213 133 #endif 214 134 }; 215 135 216 136 #define LR_EMPTY 0xff 137 + 138 + #define INT_STATUS_EOI (1 << 0) 139 + #define INT_STATUS_UNDERFLOW (1 << 1) 217 140 218 141 struct kvm; 219 142 struct kvm_vcpu; ··· 234 157 bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, 235 158 struct kvm_exit_mmio *mmio); 236 159 237 - #define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base)) 160 + #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) 238 161 #define vgic_initialized(k) ((k)->arch.vgic.ready) 162 + 163 + int vgic_v2_probe(struct device_node *vgic_node, 164 + const struct vgic_ops **ops, 165 + const struct vgic_params **params); 166 + #ifdef CONFIG_ARM_GIC_V3 167 + int vgic_v3_probe(struct device_node *vgic_node, 168 + const struct vgic_ops **ops, 169 + const struct vgic_params **params); 170 + #else 171 + static inline int vgic_v3_probe(struct device_node *vgic_node, 172 + const struct vgic_ops **ops, 173 + const struct vgic_params **params) 174 + { 175 + return -ENODEV; 176 + } 177 + #endif 239 178 240 179 #else 241 180 static inline int kvm_vgic_hyp_init(void)
+265
virt/kvm/arm/vgic-v2.c
··· 1 + /* 2 + * Copyright (C) 2012,2013 ARM Limited, All Rights Reserved. 3 + * Author: Marc Zyngier <marc.zyngier@arm.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 + */ 17 + 18 + #include <linux/cpu.h> 19 + #include <linux/kvm.h> 20 + #include <linux/kvm_host.h> 21 + #include <linux/interrupt.h> 22 + #include <linux/io.h> 23 + #include <linux/of.h> 24 + #include <linux/of_address.h> 25 + #include <linux/of_irq.h> 26 + 27 + #include <linux/irqchip/arm-gic.h> 28 + 29 + #include <asm/kvm_emulate.h> 30 + #include <asm/kvm_arm.h> 31 + #include <asm/kvm_mmu.h> 32 + 33 + static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr) 34 + { 35 + struct vgic_lr lr_desc; 36 + u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr]; 37 + 38 + lr_desc.irq = val & GICH_LR_VIRTUALID; 39 + if (lr_desc.irq <= 15) 40 + lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7; 41 + else 42 + lr_desc.source = 0; 43 + lr_desc.state = 0; 44 + 45 + if (val & GICH_LR_PENDING_BIT) 46 + lr_desc.state |= LR_STATE_PENDING; 47 + if (val & GICH_LR_ACTIVE_BIT) 48 + lr_desc.state |= LR_STATE_ACTIVE; 49 + if (val & GICH_LR_EOI) 50 + lr_desc.state |= LR_EOI_INT; 51 + 52 + return lr_desc; 53 + } 54 + 55 + static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr, 56 + struct vgic_lr lr_desc) 57 + { 58 + u32 lr_val = (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | lr_desc.irq; 59 + 60 + if (lr_desc.state & LR_STATE_PENDING) 61 + lr_val |= GICH_LR_PENDING_BIT; 62 + if (lr_desc.state & LR_STATE_ACTIVE) 63 + lr_val |= GICH_LR_ACTIVE_BIT; 64 + if (lr_desc.state & LR_EOI_INT) 65 + lr_val |= GICH_LR_EOI; 66 + 67 + vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val; 68 + } 69 + 70 + static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, 71 + struct vgic_lr lr_desc) 72 + { 73 + if (!(lr_desc.state & LR_STATE_MASK)) 74 + set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr); 75 + } 76 + 77 + static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu) 78 + { 79 + u64 val; 80 + 81 + #if BITS_PER_LONG == 64 82 + val = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[1]; 83 + val <<= 32; 84 + val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[0]; 85 + #else 86 + val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr; 87 + #endif 88 + return val; 89 + } 90 + 91 + static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu) 92 + { 93 + u64 val; 94 + 95 + #if BITS_PER_LONG == 64 96 + val = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[1]; 97 + val <<= 32; 98 + val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[0]; 99 + #else 100 + val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr; 101 + #endif 102 + return val; 103 + } 104 + 105 + static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu) 106 + { 107 + u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr; 108 + u32 ret = 0; 109 + 110 + if (misr & GICH_MISR_EOI) 111 + ret |= INT_STATUS_EOI; 112 + if (misr & GICH_MISR_U) 113 + ret |= INT_STATUS_UNDERFLOW; 114 + 115 + return ret; 116 + } 117 + 118 + static void vgic_v2_enable_underflow(struct kvm_vcpu *vcpu) 119 + { 120 + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE; 121 + } 122 + 123 + static void vgic_v2_disable_underflow(struct kvm_vcpu *vcpu) 124 + { 125 + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE; 126 + } 127 + 128 + static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 129 + { 130 + u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr; 131 + 132 + vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT; 133 + vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT; 134 + vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT; 135 + vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT; 136 + } 137 + 138 + static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 139 + { 140 + u32 vmcr; 141 + 142 + vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; 143 + vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK; 144 + vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK; 145 + vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK; 146 + 147 + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr; 148 + } 149 + 150 + static void vgic_v2_enable(struct kvm_vcpu *vcpu) 151 + { 152 + /* 153 + * By forcing VMCR to zero, the GIC will restore the binary 154 + * points to their reset values. Anything else resets to zero 155 + * anyway. 156 + */ 157 + vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0; 158 + 159 + /* Get the show on the road... */ 160 + vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN; 161 + } 162 + 163 + static const struct vgic_ops vgic_v2_ops = { 164 + .get_lr = vgic_v2_get_lr, 165 + .set_lr = vgic_v2_set_lr, 166 + .sync_lr_elrsr = vgic_v2_sync_lr_elrsr, 167 + .get_elrsr = vgic_v2_get_elrsr, 168 + .get_eisr = vgic_v2_get_eisr, 169 + .get_interrupt_status = vgic_v2_get_interrupt_status, 170 + .enable_underflow = vgic_v2_enable_underflow, 171 + .disable_underflow = vgic_v2_disable_underflow, 172 + .get_vmcr = vgic_v2_get_vmcr, 173 + .set_vmcr = vgic_v2_set_vmcr, 174 + .enable = vgic_v2_enable, 175 + }; 176 + 177 + static struct vgic_params vgic_v2_params; 178 + 179 + /** 180 + * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT 181 + * @node: pointer to the DT node 182 + * @ops: address of a pointer to the GICv2 operations 183 + * @params: address of a pointer to HW-specific parameters 184 + * 185 + * Returns 0 if a GICv2 has been found, with the low level operations 186 + * in *ops and the HW parameters in *params. Returns an error code 187 + * otherwise. 188 + */ 189 + int vgic_v2_probe(struct device_node *vgic_node, 190 + const struct vgic_ops **ops, 191 + const struct vgic_params **params) 192 + { 193 + int ret; 194 + struct resource vctrl_res; 195 + struct resource vcpu_res; 196 + struct vgic_params *vgic = &vgic_v2_params; 197 + 198 + vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0); 199 + if (!vgic->maint_irq) { 200 + kvm_err("error getting vgic maintenance irq from DT\n"); 201 + ret = -ENXIO; 202 + goto out; 203 + } 204 + 205 + ret = of_address_to_resource(vgic_node, 2, &vctrl_res); 206 + if (ret) { 207 + kvm_err("Cannot obtain GICH resource\n"); 208 + goto out; 209 + } 210 + 211 + vgic->vctrl_base = of_iomap(vgic_node, 2); 212 + if (!vgic->vctrl_base) { 213 + kvm_err("Cannot ioremap GICH\n"); 214 + ret = -ENOMEM; 215 + goto out; 216 + } 217 + 218 + vgic->nr_lr = readl_relaxed(vgic->vctrl_base + GICH_VTR); 219 + vgic->nr_lr = (vgic->nr_lr & 0x3f) + 1; 220 + 221 + ret = create_hyp_io_mappings(vgic->vctrl_base, 222 + vgic->vctrl_base + resource_size(&vctrl_res), 223 + vctrl_res.start); 224 + if (ret) { 225 + kvm_err("Cannot map VCTRL into hyp\n"); 226 + goto out_unmap; 227 + } 228 + 229 + if (of_address_to_resource(vgic_node, 3, &vcpu_res)) { 230 + kvm_err("Cannot obtain GICV resource\n"); 231 + ret = -ENXIO; 232 + goto out_unmap; 233 + } 234 + 235 + if (!PAGE_ALIGNED(vcpu_res.start)) { 236 + kvm_err("GICV physical address 0x%llx not page aligned\n", 237 + (unsigned long long)vcpu_res.start); 238 + ret = -ENXIO; 239 + goto out_unmap; 240 + } 241 + 242 + if (!PAGE_ALIGNED(resource_size(&vcpu_res))) { 243 + kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n", 244 + (unsigned long long)resource_size(&vcpu_res), 245 + PAGE_SIZE); 246 + ret = -ENXIO; 247 + goto out_unmap; 248 + } 249 + 250 + vgic->vcpu_base = vcpu_res.start; 251 + 252 + kvm_info("%s@%llx IRQ%d\n", vgic_node->name, 253 + vctrl_res.start, vgic->maint_irq); 254 + 255 + vgic->type = VGIC_V2; 256 + *ops = &vgic_v2_ops; 257 + *params = vgic; 258 + goto out; 259 + 260 + out_unmap: 261 + iounmap(vgic->vctrl_base); 262 + out: 263 + of_node_put(vgic_node); 264 + return ret; 265 + }
+247
virt/kvm/arm/vgic-v3.c
··· 1 + /* 2 + * Copyright (C) 2013 ARM Limited, All Rights Reserved. 3 + * Author: Marc Zyngier <marc.zyngier@arm.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 + */ 17 + 18 + #include <linux/cpu.h> 19 + #include <linux/kvm.h> 20 + #include <linux/kvm_host.h> 21 + #include <linux/interrupt.h> 22 + #include <linux/io.h> 23 + #include <linux/of.h> 24 + #include <linux/of_address.h> 25 + #include <linux/of_irq.h> 26 + 27 + #include <linux/irqchip/arm-gic-v3.h> 28 + 29 + #include <asm/kvm_emulate.h> 30 + #include <asm/kvm_arm.h> 31 + #include <asm/kvm_mmu.h> 32 + 33 + /* These are for GICv2 emulation only */ 34 + #define GICH_LR_VIRTUALID (0x3ffUL << 0) 35 + #define GICH_LR_PHYSID_CPUID_SHIFT (10) 36 + #define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) 37 + 38 + /* 39 + * LRs are stored in reverse order in memory. make sure we index them 40 + * correctly. 41 + */ 42 + #define LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) 43 + 44 + static u32 ich_vtr_el2; 45 + 46 + static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) 47 + { 48 + struct vgic_lr lr_desc; 49 + u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)]; 50 + 51 + lr_desc.irq = val & GICH_LR_VIRTUALID; 52 + if (lr_desc.irq <= 15) 53 + lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7; 54 + else 55 + lr_desc.source = 0; 56 + lr_desc.state = 0; 57 + 58 + if (val & ICH_LR_PENDING_BIT) 59 + lr_desc.state |= LR_STATE_PENDING; 60 + if (val & ICH_LR_ACTIVE_BIT) 61 + lr_desc.state |= LR_STATE_ACTIVE; 62 + if (val & ICH_LR_EOI) 63 + lr_desc.state |= LR_EOI_INT; 64 + 65 + return lr_desc; 66 + } 67 + 68 + static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr, 69 + struct vgic_lr lr_desc) 70 + { 71 + u64 lr_val = (((u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | 72 + lr_desc.irq); 73 + 74 + if (lr_desc.state & LR_STATE_PENDING) 75 + lr_val |= ICH_LR_PENDING_BIT; 76 + if (lr_desc.state & LR_STATE_ACTIVE) 77 + lr_val |= ICH_LR_ACTIVE_BIT; 78 + if (lr_desc.state & LR_EOI_INT) 79 + lr_val |= ICH_LR_EOI; 80 + 81 + vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val; 82 + } 83 + 84 + static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, 85 + struct vgic_lr lr_desc) 86 + { 87 + if (!(lr_desc.state & LR_STATE_MASK)) 88 + vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr); 89 + } 90 + 91 + static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu) 92 + { 93 + return vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr; 94 + } 95 + 96 + static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu) 97 + { 98 + return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr; 99 + } 100 + 101 + static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu) 102 + { 103 + u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr; 104 + u32 ret = 0; 105 + 106 + if (misr & ICH_MISR_EOI) 107 + ret |= INT_STATUS_EOI; 108 + if (misr & ICH_MISR_U) 109 + ret |= INT_STATUS_UNDERFLOW; 110 + 111 + return ret; 112 + } 113 + 114 + static void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 115 + { 116 + u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr; 117 + 118 + vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT; 119 + vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; 120 + vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; 121 + vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; 122 + } 123 + 124 + static void vgic_v3_enable_underflow(struct kvm_vcpu *vcpu) 125 + { 126 + vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr |= ICH_HCR_UIE; 127 + } 128 + 129 + static void vgic_v3_disable_underflow(struct kvm_vcpu *vcpu) 130 + { 131 + vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr &= ~ICH_HCR_UIE; 132 + } 133 + 134 + static void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 135 + { 136 + u32 vmcr; 137 + 138 + vmcr = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK; 139 + vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; 140 + vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; 141 + vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; 142 + 143 + vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr; 144 + } 145 + 146 + static void vgic_v3_enable(struct kvm_vcpu *vcpu) 147 + { 148 + /* 149 + * By forcing VMCR to zero, the GIC will restore the binary 150 + * points to their reset values. Anything else resets to zero 151 + * anyway. 152 + */ 153 + vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = 0; 154 + 155 + /* Get the show on the road... */ 156 + vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr = ICH_HCR_EN; 157 + } 158 + 159 + static const struct vgic_ops vgic_v3_ops = { 160 + .get_lr = vgic_v3_get_lr, 161 + .set_lr = vgic_v3_set_lr, 162 + .sync_lr_elrsr = vgic_v3_sync_lr_elrsr, 163 + .get_elrsr = vgic_v3_get_elrsr, 164 + .get_eisr = vgic_v3_get_eisr, 165 + .get_interrupt_status = vgic_v3_get_interrupt_status, 166 + .enable_underflow = vgic_v3_enable_underflow, 167 + .disable_underflow = vgic_v3_disable_underflow, 168 + .get_vmcr = vgic_v3_get_vmcr, 169 + .set_vmcr = vgic_v3_set_vmcr, 170 + .enable = vgic_v3_enable, 171 + }; 172 + 173 + static struct vgic_params vgic_v3_params; 174 + 175 + /** 176 + * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT 177 + * @node: pointer to the DT node 178 + * @ops: address of a pointer to the GICv3 operations 179 + * @params: address of a pointer to HW-specific parameters 180 + * 181 + * Returns 0 if a GICv3 has been found, with the low level operations 182 + * in *ops and the HW parameters in *params. Returns an error code 183 + * otherwise. 184 + */ 185 + int vgic_v3_probe(struct device_node *vgic_node, 186 + const struct vgic_ops **ops, 187 + const struct vgic_params **params) 188 + { 189 + int ret = 0; 190 + u32 gicv_idx; 191 + struct resource vcpu_res; 192 + struct vgic_params *vgic = &vgic_v3_params; 193 + 194 + vgic->maint_irq = irq_of_parse_and_map(vgic_node, 0); 195 + if (!vgic->maint_irq) { 196 + kvm_err("error getting vgic maintenance irq from DT\n"); 197 + ret = -ENXIO; 198 + goto out; 199 + } 200 + 201 + ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2); 202 + 203 + /* 204 + * The ListRegs field is 5 bits, but there is a architectural 205 + * maximum of 16 list registers. Just ignore bit 4... 206 + */ 207 + vgic->nr_lr = (ich_vtr_el2 & 0xf) + 1; 208 + 209 + if (of_property_read_u32(vgic_node, "#redistributor-regions", &gicv_idx)) 210 + gicv_idx = 1; 211 + 212 + gicv_idx += 3; /* Also skip GICD, GICC, GICH */ 213 + if (of_address_to_resource(vgic_node, gicv_idx, &vcpu_res)) { 214 + kvm_err("Cannot obtain GICV region\n"); 215 + ret = -ENXIO; 216 + goto out; 217 + } 218 + 219 + if (!PAGE_ALIGNED(vcpu_res.start)) { 220 + kvm_err("GICV physical address 0x%llx not page aligned\n", 221 + (unsigned long long)vcpu_res.start); 222 + ret = -ENXIO; 223 + goto out; 224 + } 225 + 226 + if (!PAGE_ALIGNED(resource_size(&vcpu_res))) { 227 + kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n", 228 + (unsigned long long)resource_size(&vcpu_res), 229 + PAGE_SIZE); 230 + ret = -ENXIO; 231 + goto out; 232 + } 233 + 234 + vgic->vcpu_base = vcpu_res.start; 235 + vgic->vctrl_base = NULL; 236 + vgic->type = VGIC_V3; 237 + 238 + kvm_info("%s@%llx IRQ%d\n", vgic_node->name, 239 + vcpu_res.start, vgic->maint_irq); 240 + 241 + *ops = &vgic_v3_ops; 242 + *params = vgic; 243 + 244 + out: 245 + of_node_put(vgic_node); 246 + return ret; 247 + }
+213 -176
virt/kvm/arm/vgic.c
··· 76 76 #define IMPLEMENTER_ARM 0x43b 77 77 #define GICC_ARCH_VERSION_V2 0x2 78 78 79 - /* Physical address of vgic virtual cpu interface */ 80 - static phys_addr_t vgic_vcpu_base; 81 - 82 - /* Virtual control interface base address */ 83 - static void __iomem *vgic_vctrl_base; 84 - 85 - static struct device_node *vgic_node; 86 - 87 79 #define ACCESS_READ_VALUE (1 << 0) 88 80 #define ACCESS_READ_RAZ (0 << 0) 89 81 #define ACCESS_READ_MASK(x) ((x) & (1 << 0)) ··· 86 94 #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) 87 95 88 96 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); 97 + static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); 89 98 static void vgic_update_state(struct kvm *kvm); 90 99 static void vgic_kick_vcpus(struct kvm *kvm); 91 100 static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); 92 - static u32 vgic_nr_lr; 101 + static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); 102 + static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); 103 + static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 104 + static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); 93 105 94 - static unsigned int vgic_maint_irq; 106 + static const struct vgic_ops *vgic_ops; 107 + static const struct vgic_params *vgic; 108 + 109 + /* 110 + * struct vgic_bitmap contains unions that provide two views of 111 + * the same data. In one case it is an array of registers of 112 + * u32's, and in the other case it is a bitmap of unsigned 113 + * longs. 114 + * 115 + * This does not work on 64-bit BE systems, because the bitmap access 116 + * will store two consecutive 32-bit words with the higher-addressed 117 + * register's bits at the lower index and the lower-addressed register's 118 + * bits at the higher index. 119 + * 120 + * Therefore, swizzle the register index when accessing the 32-bit word 121 + * registers to access the right register's value. 122 + */ 123 + #if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 64 124 + #define REG_OFFSET_SWIZZLE 1 125 + #else 126 + #define REG_OFFSET_SWIZZLE 0 127 + #endif 95 128 96 129 static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, 97 130 int cpuid, u32 offset) 98 131 { 99 132 offset >>= 2; 100 133 if (!offset) 101 - return x->percpu[cpuid].reg; 134 + return x->percpu[cpuid].reg + (offset ^ REG_OFFSET_SWIZZLE); 102 135 else 103 - return x->shared.reg + offset - 1; 136 + return x->shared.reg + ((offset - 1) ^ REG_OFFSET_SWIZZLE); 104 137 } 105 138 106 139 static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x, ··· 258 241 259 242 static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) 260 243 { 261 - return *((u32 *)mmio->data) & mask; 244 + return le32_to_cpu(*((u32 *)mmio->data)) & mask; 262 245 } 263 246 264 247 static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value) 265 248 { 266 - *((u32 *)mmio->data) = value & mask; 249 + *((u32 *)mmio->data) = cpu_to_le32(value) & mask; 267 250 } 268 251 269 252 /** ··· 610 593 return false; 611 594 } 612 595 613 - #define LR_CPUID(lr) \ 614 - (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT) 615 - #define LR_IRQID(lr) \ 616 - ((lr) & GICH_LR_VIRTUALID) 617 - 618 - static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu) 619 - { 620 - clear_bit(lr_nr, vgic_cpu->lr_used); 621 - vgic_cpu->vgic_lr[lr_nr] &= ~GICH_LR_STATE; 622 - vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; 623 - } 624 - 625 596 /** 626 597 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor 627 598 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs ··· 627 622 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 628 623 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 629 624 int vcpu_id = vcpu->vcpu_id; 630 - int i, irq, source_cpu; 631 - u32 *lr; 625 + int i; 632 626 633 627 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) { 634 - lr = &vgic_cpu->vgic_lr[i]; 635 - irq = LR_IRQID(*lr); 636 - source_cpu = LR_CPUID(*lr); 628 + struct vgic_lr lr = vgic_get_lr(vcpu, i); 637 629 638 630 /* 639 631 * There are three options for the state bits: ··· 642 640 * If the LR holds only an active interrupt (not pending) then 643 641 * just leave it alone. 644 642 */ 645 - if ((*lr & GICH_LR_STATE) == GICH_LR_ACTIVE_BIT) 643 + if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE) 646 644 continue; 647 645 648 646 /* ··· 651 649 * is fine, then we are only setting a few bits that were 652 650 * already set. 653 651 */ 654 - vgic_dist_irq_set(vcpu, irq); 655 - if (irq < VGIC_NR_SGIS) 656 - dist->irq_sgi_sources[vcpu_id][irq] |= 1 << source_cpu; 657 - *lr &= ~GICH_LR_PENDING_BIT; 652 + vgic_dist_irq_set(vcpu, lr.irq); 653 + if (lr.irq < VGIC_NR_SGIS) 654 + dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source; 655 + lr.state &= ~LR_STATE_PENDING; 656 + vgic_set_lr(vcpu, i, lr); 658 657 659 658 /* 660 659 * If there's no state left on the LR (it could still be 661 660 * active), then the LR does not hold any useful info and can 662 661 * be marked as free for other use. 663 662 */ 664 - if (!(*lr & GICH_LR_STATE)) 665 - vgic_retire_lr(i, irq, vgic_cpu); 663 + if (!(lr.state & LR_STATE_MASK)) 664 + vgic_retire_lr(i, lr.irq, vcpu); 666 665 667 666 /* Finally update the VGIC state. */ 668 667 vgic_update_state(vcpu->kvm); ··· 992 989 } 993 990 } 994 991 995 - #define MK_LR_PEND(src, irq) \ 996 - (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq)) 992 + static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr) 993 + { 994 + return vgic_ops->get_lr(vcpu, lr); 995 + } 996 + 997 + static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, 998 + struct vgic_lr vlr) 999 + { 1000 + vgic_ops->set_lr(vcpu, lr, vlr); 1001 + } 1002 + 1003 + static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr, 1004 + struct vgic_lr vlr) 1005 + { 1006 + vgic_ops->sync_lr_elrsr(vcpu, lr, vlr); 1007 + } 1008 + 1009 + static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu) 1010 + { 1011 + return vgic_ops->get_elrsr(vcpu); 1012 + } 1013 + 1014 + static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu) 1015 + { 1016 + return vgic_ops->get_eisr(vcpu); 1017 + } 1018 + 1019 + static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu) 1020 + { 1021 + return vgic_ops->get_interrupt_status(vcpu); 1022 + } 1023 + 1024 + static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu) 1025 + { 1026 + vgic_ops->enable_underflow(vcpu); 1027 + } 1028 + 1029 + static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu) 1030 + { 1031 + vgic_ops->disable_underflow(vcpu); 1032 + } 1033 + 1034 + static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) 1035 + { 1036 + vgic_ops->get_vmcr(vcpu, vmcr); 1037 + } 1038 + 1039 + static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) 1040 + { 1041 + vgic_ops->set_vmcr(vcpu, vmcr); 1042 + } 1043 + 1044 + static inline void vgic_enable(struct kvm_vcpu *vcpu) 1045 + { 1046 + vgic_ops->enable(vcpu); 1047 + } 1048 + 1049 + static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) 1050 + { 1051 + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1052 + struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); 1053 + 1054 + vlr.state = 0; 1055 + vgic_set_lr(vcpu, lr_nr, vlr); 1056 + clear_bit(lr_nr, vgic_cpu->lr_used); 1057 + vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; 1058 + } 997 1059 998 1060 /* 999 1061 * An interrupt may have been disabled after being made pending on the ··· 1074 1006 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1075 1007 int lr; 1076 1008 1077 - for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) { 1078 - int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; 1009 + for_each_set_bit(lr, vgic_cpu->lr_used, vgic->nr_lr) { 1010 + struct vgic_lr vlr = vgic_get_lr(vcpu, lr); 1079 1011 1080 - if (!vgic_irq_is_enabled(vcpu, irq)) { 1081 - vgic_retire_lr(lr, irq, vgic_cpu); 1082 - if (vgic_irq_is_active(vcpu, irq)) 1083 - vgic_irq_clear_active(vcpu, irq); 1012 + if (!vgic_irq_is_enabled(vcpu, vlr.irq)) { 1013 + vgic_retire_lr(lr, vlr.irq, vcpu); 1014 + if (vgic_irq_is_active(vcpu, vlr.irq)) 1015 + vgic_irq_clear_active(vcpu, vlr.irq); 1084 1016 } 1085 1017 } 1086 1018 } ··· 1092 1024 static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) 1093 1025 { 1094 1026 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1027 + struct vgic_lr vlr; 1095 1028 int lr; 1096 1029 1097 1030 /* Sanitize the input... */ ··· 1105 1036 lr = vgic_cpu->vgic_irq_lr_map[irq]; 1106 1037 1107 1038 /* Do we have an active interrupt for the same CPUID? */ 1108 - if (lr != LR_EMPTY && 1109 - (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) { 1110 - kvm_debug("LR%d piggyback for IRQ%d %x\n", 1111 - lr, irq, vgic_cpu->vgic_lr[lr]); 1112 - BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); 1113 - vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; 1114 - return true; 1039 + if (lr != LR_EMPTY) { 1040 + vlr = vgic_get_lr(vcpu, lr); 1041 + if (vlr.source == sgi_source_id) { 1042 + kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq); 1043 + BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); 1044 + vlr.state |= LR_STATE_PENDING; 1045 + vgic_set_lr(vcpu, lr, vlr); 1046 + return true; 1047 + } 1115 1048 } 1116 1049 1117 1050 /* Try to use another LR for this interrupt */ 1118 1051 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used, 1119 - vgic_cpu->nr_lr); 1120 - if (lr >= vgic_cpu->nr_lr) 1052 + vgic->nr_lr); 1053 + if (lr >= vgic->nr_lr) 1121 1054 return false; 1122 1055 1123 1056 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id); 1124 - vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq); 1125 1057 vgic_cpu->vgic_irq_lr_map[irq] = lr; 1126 1058 set_bit(lr, vgic_cpu->lr_used); 1127 1059 1060 + vlr.irq = irq; 1061 + vlr.source = sgi_source_id; 1062 + vlr.state = LR_STATE_PENDING; 1128 1063 if (!vgic_irq_is_edge(vcpu, irq)) 1129 - vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; 1064 + vlr.state |= LR_EOI_INT; 1065 + 1066 + vgic_set_lr(vcpu, lr, vlr); 1130 1067 1131 1068 return true; 1132 1069 } ··· 1230 1155 1231 1156 epilog: 1232 1157 if (overflow) { 1233 - vgic_cpu->vgic_hcr |= GICH_HCR_UIE; 1158 + vgic_enable_underflow(vcpu); 1234 1159 } else { 1235 - vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; 1160 + vgic_disable_underflow(vcpu); 1236 1161 /* 1237 1162 * We're about to run this VCPU, and we've consumed 1238 1163 * everything the distributor had in store for ··· 1245 1170 1246 1171 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) 1247 1172 { 1248 - struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1173 + u32 status = vgic_get_interrupt_status(vcpu); 1249 1174 bool level_pending = false; 1250 1175 1251 - kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); 1176 + kvm_debug("STATUS = %08x\n", status); 1252 1177 1253 - if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { 1178 + if (status & INT_STATUS_EOI) { 1254 1179 /* 1255 1180 * Some level interrupts have been EOIed. Clear their 1256 1181 * active bit. 1257 1182 */ 1258 - int lr, irq; 1183 + u64 eisr = vgic_get_eisr(vcpu); 1184 + unsigned long *eisr_ptr = (unsigned long *)&eisr; 1185 + int lr; 1259 1186 1260 - for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr, 1261 - vgic_cpu->nr_lr) { 1262 - irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; 1187 + for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) { 1188 + struct vgic_lr vlr = vgic_get_lr(vcpu, lr); 1263 1189 1264 - vgic_irq_clear_active(vcpu, irq); 1265 - vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI; 1190 + vgic_irq_clear_active(vcpu, vlr.irq); 1191 + WARN_ON(vlr.state & LR_STATE_MASK); 1192 + vlr.state = 0; 1193 + vgic_set_lr(vcpu, lr, vlr); 1266 1194 1267 1195 /* Any additional pending interrupt? */ 1268 - if (vgic_dist_irq_is_pending(vcpu, irq)) { 1269 - vgic_cpu_irq_set(vcpu, irq); 1196 + if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) { 1197 + vgic_cpu_irq_set(vcpu, vlr.irq); 1270 1198 level_pending = true; 1271 1199 } else { 1272 - vgic_cpu_irq_clear(vcpu, irq); 1200 + vgic_cpu_irq_clear(vcpu, vlr.irq); 1273 1201 } 1274 1202 1275 1203 /* 1276 1204 * Despite being EOIed, the LR may not have 1277 1205 * been marked as empty. 1278 1206 */ 1279 - set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); 1280 - vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; 1207 + vgic_sync_lr_elrsr(vcpu, lr, vlr); 1281 1208 } 1282 1209 } 1283 1210 1284 - if (vgic_cpu->vgic_misr & GICH_MISR_U) 1285 - vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE; 1211 + if (status & INT_STATUS_UNDERFLOW) 1212 + vgic_disable_underflow(vcpu); 1286 1213 1287 1214 return level_pending; 1288 1215 } ··· 1297 1220 { 1298 1221 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1299 1222 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 1223 + u64 elrsr; 1224 + unsigned long *elrsr_ptr; 1300 1225 int lr, pending; 1301 1226 bool level_pending; 1302 1227 1303 1228 level_pending = vgic_process_maintenance(vcpu); 1229 + elrsr = vgic_get_elrsr(vcpu); 1230 + elrsr_ptr = (unsigned long *)&elrsr; 1304 1231 1305 1232 /* Clear mappings for empty LRs */ 1306 - for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr, 1307 - vgic_cpu->nr_lr) { 1308 - int irq; 1233 + for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) { 1234 + struct vgic_lr vlr; 1309 1235 1310 1236 if (!test_and_clear_bit(lr, vgic_cpu->lr_used)) 1311 1237 continue; 1312 1238 1313 - irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; 1239 + vlr = vgic_get_lr(vcpu, lr); 1314 1240 1315 - BUG_ON(irq >= VGIC_NR_IRQS); 1316 - vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; 1241 + BUG_ON(vlr.irq >= VGIC_NR_IRQS); 1242 + vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY; 1317 1243 } 1318 1244 1319 1245 /* Check if we still have something up our sleeve... */ 1320 - pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr, 1321 - vgic_cpu->nr_lr); 1322 - if (level_pending || pending < vgic_cpu->nr_lr) 1246 + pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr); 1247 + if (level_pending || pending < vgic->nr_lr) 1323 1248 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); 1324 1249 } 1325 1250 ··· 1511 1432 } 1512 1433 1513 1434 /* 1514 - * By forcing VMCR to zero, the GIC will restore the binary 1515 - * points to their reset values. Anything else resets to zero 1516 - * anyway. 1435 + * Store the number of LRs per vcpu, so we don't have to go 1436 + * all the way to the distributor structure to find out. Only 1437 + * assembly code should use this one. 1517 1438 */ 1518 - vgic_cpu->vgic_vmcr = 0; 1439 + vgic_cpu->nr_lr = vgic->nr_lr; 1519 1440 1520 - vgic_cpu->nr_lr = vgic_nr_lr; 1521 - vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */ 1441 + vgic_enable(vcpu); 1522 1442 1523 1443 return 0; 1524 1444 } 1525 1445 1526 1446 static void vgic_init_maintenance_interrupt(void *info) 1527 1447 { 1528 - enable_percpu_irq(vgic_maint_irq, 0); 1448 + enable_percpu_irq(vgic->maint_irq, 0); 1529 1449 } 1530 1450 1531 1451 static int vgic_cpu_notify(struct notifier_block *self, ··· 1537 1459 break; 1538 1460 case CPU_DYING: 1539 1461 case CPU_DYING_FROZEN: 1540 - disable_percpu_irq(vgic_maint_irq); 1462 + disable_percpu_irq(vgic->maint_irq); 1541 1463 break; 1542 1464 } 1543 1465 ··· 1548 1470 .notifier_call = vgic_cpu_notify, 1549 1471 }; 1550 1472 1473 + static const struct of_device_id vgic_ids[] = { 1474 + { .compatible = "arm,cortex-a15-gic", .data = vgic_v2_probe, }, 1475 + { .compatible = "arm,gic-v3", .data = vgic_v3_probe, }, 1476 + {}, 1477 + }; 1478 + 1551 1479 int kvm_vgic_hyp_init(void) 1552 1480 { 1481 + const struct of_device_id *matched_id; 1482 + int (*vgic_probe)(struct device_node *,const struct vgic_ops **, 1483 + const struct vgic_params **); 1484 + struct device_node *vgic_node; 1553 1485 int ret; 1554 - struct resource vctrl_res; 1555 - struct resource vcpu_res; 1556 1486 1557 - vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic"); 1487 + vgic_node = of_find_matching_node_and_match(NULL, 1488 + vgic_ids, &matched_id); 1558 1489 if (!vgic_node) { 1559 - kvm_err("error: no compatible vgic node in DT\n"); 1490 + kvm_err("error: no compatible GIC node found\n"); 1560 1491 return -ENODEV; 1561 1492 } 1562 1493 1563 - vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0); 1564 - if (!vgic_maint_irq) { 1565 - kvm_err("error getting vgic maintenance irq from DT\n"); 1566 - ret = -ENXIO; 1567 - goto out; 1568 - } 1494 + vgic_probe = matched_id->data; 1495 + ret = vgic_probe(vgic_node, &vgic_ops, &vgic); 1496 + if (ret) 1497 + return ret; 1569 1498 1570 - ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler, 1499 + ret = request_percpu_irq(vgic->maint_irq, vgic_maintenance_handler, 1571 1500 "vgic", kvm_get_running_vcpus()); 1572 1501 if (ret) { 1573 - kvm_err("Cannot register interrupt %d\n", vgic_maint_irq); 1574 - goto out; 1502 + kvm_err("Cannot register interrupt %d\n", vgic->maint_irq); 1503 + return ret; 1575 1504 } 1576 1505 1577 1506 ret = __register_cpu_notifier(&vgic_cpu_nb); ··· 1587 1502 goto out_free_irq; 1588 1503 } 1589 1504 1590 - ret = of_address_to_resource(vgic_node, 2, &vctrl_res); 1591 - if (ret) { 1592 - kvm_err("Cannot obtain VCTRL resource\n"); 1593 - goto out_free_irq; 1594 - } 1505 + /* Callback into for arch code for setup */ 1506 + vgic_arch_setup(vgic); 1595 1507 1596 - vgic_vctrl_base = of_iomap(vgic_node, 2); 1597 - if (!vgic_vctrl_base) { 1598 - kvm_err("Cannot ioremap VCTRL\n"); 1599 - ret = -ENOMEM; 1600 - goto out_free_irq; 1601 - } 1602 - 1603 - vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR); 1604 - vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1; 1605 - 1606 - ret = create_hyp_io_mappings(vgic_vctrl_base, 1607 - vgic_vctrl_base + resource_size(&vctrl_res), 1608 - vctrl_res.start); 1609 - if (ret) { 1610 - kvm_err("Cannot map VCTRL into hyp\n"); 1611 - goto out_unmap; 1612 - } 1613 - 1614 - if (of_address_to_resource(vgic_node, 3, &vcpu_res)) { 1615 - kvm_err("Cannot obtain VCPU resource\n"); 1616 - ret = -ENXIO; 1617 - goto out_unmap; 1618 - } 1619 - 1620 - if (!PAGE_ALIGNED(vcpu_res.start)) { 1621 - kvm_err("GICV physical address 0x%llx not page aligned\n", 1622 - (unsigned long long)vcpu_res.start); 1623 - ret = -ENXIO; 1624 - goto out_unmap; 1625 - } 1626 - 1627 - if (!PAGE_ALIGNED(resource_size(&vcpu_res))) { 1628 - kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n", 1629 - (unsigned long long)resource_size(&vcpu_res), 1630 - PAGE_SIZE); 1631 - ret = -ENXIO; 1632 - goto out_unmap; 1633 - } 1634 - 1635 - vgic_vcpu_base = vcpu_res.start; 1636 - 1637 - kvm_info("%s@%llx IRQ%d\n", vgic_node->name, 1638 - vctrl_res.start, vgic_maint_irq); 1639 1508 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); 1640 1509 1641 - goto out; 1510 + return 0; 1642 1511 1643 - out_unmap: 1644 - iounmap(vgic_vctrl_base); 1645 1512 out_free_irq: 1646 - free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus()); 1647 - out: 1648 - of_node_put(vgic_node); 1513 + free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus()); 1649 1514 return ret; 1650 1515 } 1651 1516 ··· 1628 1593 } 1629 1594 1630 1595 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base, 1631 - vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE); 1596 + vgic->vcpu_base, KVM_VGIC_V2_CPU_SIZE); 1632 1597 if (ret) { 1633 1598 kvm_err("Unable to remap VGIC CPU to VCPU\n"); 1634 1599 goto out; ··· 1674 1639 } 1675 1640 1676 1641 spin_lock_init(&kvm->arch.vgic.lock); 1677 - kvm->arch.vgic.vctrl_base = vgic_vctrl_base; 1642 + kvm->arch.vgic.in_kernel = true; 1643 + kvm->arch.vgic.vctrl_base = vgic->vctrl_base; 1678 1644 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; 1679 1645 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; 1680 1646 ··· 1774 1738 static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, 1775 1739 struct kvm_exit_mmio *mmio, phys_addr_t offset) 1776 1740 { 1777 - struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1778 - u32 reg, mask = 0, shift = 0; 1779 1741 bool updated = false; 1742 + struct vgic_vmcr vmcr; 1743 + u32 *vmcr_field; 1744 + u32 reg; 1745 + 1746 + vgic_get_vmcr(vcpu, &vmcr); 1780 1747 1781 1748 switch (offset & ~0x3) { 1782 1749 case GIC_CPU_CTRL: 1783 - mask = GICH_VMCR_CTRL_MASK; 1784 - shift = GICH_VMCR_CTRL_SHIFT; 1750 + vmcr_field = &vmcr.ctlr; 1785 1751 break; 1786 1752 case GIC_CPU_PRIMASK: 1787 - mask = GICH_VMCR_PRIMASK_MASK; 1788 - shift = GICH_VMCR_PRIMASK_SHIFT; 1753 + vmcr_field = &vmcr.pmr; 1789 1754 break; 1790 1755 case GIC_CPU_BINPOINT: 1791 - mask = GICH_VMCR_BINPOINT_MASK; 1792 - shift = GICH_VMCR_BINPOINT_SHIFT; 1756 + vmcr_field = &vmcr.bpr; 1793 1757 break; 1794 1758 case GIC_CPU_ALIAS_BINPOINT: 1795 - mask = GICH_VMCR_ALIAS_BINPOINT_MASK; 1796 - shift = GICH_VMCR_ALIAS_BINPOINT_SHIFT; 1759 + vmcr_field = &vmcr.abpr; 1797 1760 break; 1761 + default: 1762 + BUG(); 1798 1763 } 1799 1764 1800 1765 if (!mmio->is_write) { 1801 - reg = (vgic_cpu->vgic_vmcr & mask) >> shift; 1766 + reg = *vmcr_field; 1802 1767 mmio_data_write(mmio, ~0, reg); 1803 1768 } else { 1804 1769 reg = mmio_data_read(mmio, ~0); 1805 - reg = (reg << shift) & mask; 1806 - if (reg != (vgic_cpu->vgic_vmcr & mask)) 1770 + if (reg != *vmcr_field) { 1771 + *vmcr_field = reg; 1772 + vgic_set_vmcr(vcpu, &vmcr); 1807 1773 updated = true; 1808 - vgic_cpu->vgic_vmcr &= ~mask; 1809 - vgic_cpu->vgic_vmcr |= reg; 1774 + } 1810 1775 } 1811 1776 return updated; 1812 1777 }