Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm/arm64: KVM: rework MPIDR assignment and add accessors

The virtual MPIDR registers (containing topology information) for the
guest are currently mapped linearily to the vcpu_id. Improve this
mapping for arm64 by using three levels to not artificially limit the
number of vCPUs.
To help this, change and rename the kvm_vcpu_get_mpidr() function to
mask off the non-affinity bits in the MPIDR register.
Also add an accessor to later allow easier access to a vCPU with a
given MPIDR. Use this new accessor in the PSCI emulation.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>

authored by

Andre Przywara and committed by
Christoffer Dall
4429fc64 7276030a

+39 -18
+3 -2
arch/arm/include/asm/kvm_emulate.h
··· 23 23 #include <asm/kvm_asm.h> 24 24 #include <asm/kvm_mmio.h> 25 25 #include <asm/kvm_arm.h> 26 + #include <asm/cputype.h> 26 27 27 28 unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); 28 29 unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); ··· 168 167 return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; 169 168 } 170 169 171 - static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) 170 + static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) 172 171 { 173 - return vcpu->arch.cp15[c0_MPIDR]; 172 + return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK; 174 173 } 175 174 176 175 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
+2
arch/arm/include/asm/kvm_host.h
··· 236 236 237 237 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot); 238 238 239 + struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); 240 + 239 241 static inline void kvm_arch_hardware_disable(void) {} 240 242 static inline void kvm_arch_hardware_unsetup(void) {} 241 243 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+13
arch/arm/kvm/arm.c
··· 1075 1075 *(int *)ret = kvm_target_cpu(); 1076 1076 } 1077 1077 1078 + struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) 1079 + { 1080 + struct kvm_vcpu *vcpu; 1081 + int i; 1082 + 1083 + mpidr &= MPIDR_HWID_BITMASK; 1084 + kvm_for_each_vcpu(i, vcpu, kvm) { 1085 + if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) 1086 + return vcpu; 1087 + } 1088 + return NULL; 1089 + } 1090 + 1078 1091 /** 1079 1092 * Initialize Hyp-mode and memory mappings on all CPUs. 1080 1093 */
+5 -12
arch/arm/kvm/psci.c
··· 22 22 #include <asm/cputype.h> 23 23 #include <asm/kvm_emulate.h> 24 24 #include <asm/kvm_psci.h> 25 + #include <asm/kvm_host.h> 25 26 26 27 /* 27 28 * This is an implementation of the Power State Coordination Interface ··· 67 66 static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) 68 67 { 69 68 struct kvm *kvm = source_vcpu->kvm; 70 - struct kvm_vcpu *vcpu = NULL, *tmp; 69 + struct kvm_vcpu *vcpu = NULL; 71 70 wait_queue_head_t *wq; 72 71 unsigned long cpu_id; 73 72 unsigned long context_id; 74 - unsigned long mpidr; 75 73 phys_addr_t target_pc; 76 - int i; 77 74 78 - cpu_id = *vcpu_reg(source_vcpu, 1); 75 + cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; 79 76 if (vcpu_mode_is_32bit(source_vcpu)) 80 77 cpu_id &= ~((u32) 0); 81 78 82 - kvm_for_each_vcpu(i, tmp, kvm) { 83 - mpidr = kvm_vcpu_get_mpidr(tmp); 84 - if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) { 85 - vcpu = tmp; 86 - break; 87 - } 88 - } 79 + vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id); 89 80 90 81 /* 91 82 * Make sure the caller requested a valid CPU and that the CPU is ··· 148 155 * then ON else OFF 149 156 */ 150 157 kvm_for_each_vcpu(i, tmp, kvm) { 151 - mpidr = kvm_vcpu_get_mpidr(tmp); 158 + mpidr = kvm_vcpu_get_mpidr_aff(tmp); 152 159 if (((mpidr & target_affinity_mask) == target_affinity) && 153 160 !tmp->arch.pause) { 154 161 return PSCI_0_2_AFFINITY_LEVEL_ON;
+3 -2
arch/arm64/include/asm/kvm_emulate.h
··· 27 27 #include <asm/kvm_arm.h> 28 28 #include <asm/kvm_mmio.h> 29 29 #include <asm/ptrace.h> 30 + #include <asm/cputype.h> 30 31 31 32 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); 32 33 unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); ··· 193 192 return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; 194 193 } 195 194 196 - static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) 195 + static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) 197 196 { 198 - return vcpu_sys_reg(vcpu, MPIDR_EL1); 197 + return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; 199 198 } 200 199 201 200 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
+2
arch/arm64/include/asm/kvm_host.h
··· 207 207 int kvm_perf_init(void); 208 208 int kvm_perf_teardown(void); 209 209 210 + struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); 211 + 210 212 static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, 211 213 phys_addr_t pgd_ptr, 212 214 unsigned long hyp_stack_ptr,
+11 -2
arch/arm64/kvm/sys_regs.c
··· 252 252 253 253 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 254 254 { 255 + u64 mpidr; 256 + 255 257 /* 256 - * Simply map the vcpu_id into the Aff0 field of the MPIDR. 258 + * Map the vcpu_id into the first three affinity level fields of 259 + * the MPIDR. We limit the number of VCPUs in level 0 due to a 260 + * limitation to 16 CPUs in that level in the ICC_SGIxR registers 261 + * of the GICv3 to be able to address each CPU directly when 262 + * sending IPIs. 257 263 */ 258 - vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff); 264 + mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); 265 + mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); 266 + mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); 267 + vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr; 259 268 } 260 269 261 270 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */