Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.7 455 lines 14 kB view raw
1/* 2 * VGICv3 MMIO handling functions 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14#include <linux/irqchip/arm-gic-v3.h> 15#include <linux/kvm.h> 16#include <linux/kvm_host.h> 17#include <kvm/iodev.h> 18#include <kvm/arm_vgic.h> 19 20#include <asm/kvm_emulate.h> 21 22#include "vgic.h" 23#include "vgic-mmio.h" 24 25/* extract @num bytes at @offset bytes offset in data */ 26static unsigned long extract_bytes(unsigned long data, unsigned int offset, 27 unsigned int num) 28{ 29 return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0); 30} 31 32static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, 33 gpa_t addr, unsigned int len) 34{ 35 u32 value = 0; 36 37 switch (addr & 0x0c) { 38 case GICD_CTLR: 39 if (vcpu->kvm->arch.vgic.enabled) 40 value |= GICD_CTLR_ENABLE_SS_G1; 41 value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS; 42 break; 43 case GICD_TYPER: 44 value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; 45 value = (value >> 5) - 1; 46 value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19; 47 break; 48 case GICD_IIDR: 49 value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); 50 break; 51 default: 52 return 0; 53 } 54 55 return value; 56} 57 58static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, 59 gpa_t addr, unsigned int len, 60 unsigned long val) 61{ 62 struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 63 bool was_enabled = dist->enabled; 64 65 switch (addr & 0x0c) { 66 case GICD_CTLR: 67 dist->enabled = val & GICD_CTLR_ENABLE_SS_G1; 68 69 if (!was_enabled && dist->enabled) 70 vgic_kick_vcpus(vcpu->kvm); 71 break; 72 case GICD_TYPER: 73 case GICD_IIDR: 74 return; 75 } 76} 77 78static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu, 79 gpa_t addr, unsigned int len) 80{ 81 int intid = VGIC_ADDR_TO_INTID(addr, 64); 82 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid); 83 84 if (!irq) 85 return 0; 86 87 /* The upper word is RAZ for us. */ 88 if (addr & 4) 89 return 0; 90 91 return extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len); 92} 93 94static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, 95 gpa_t addr, unsigned int len, 96 unsigned long val) 97{ 98 int intid = VGIC_ADDR_TO_INTID(addr, 64); 99 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid); 100 101 if (!irq) 102 return; 103 104 /* The upper word is WI for us since we don't implement Aff3. */ 105 if (addr & 4) 106 return; 107 108 spin_lock(&irq->irq_lock); 109 110 /* We only care about and preserve Aff0, Aff1 and Aff2. */ 111 irq->mpidr = val & GENMASK(23, 0); 112 irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); 113 114 spin_unlock(&irq->irq_lock); 115} 116 117static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu, 118 gpa_t addr, unsigned int len) 119{ 120 unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu); 121 int target_vcpu_id = vcpu->vcpu_id; 122 u64 value; 123 124 value = (mpidr & GENMASK(23, 0)) << 32; 125 value |= ((target_vcpu_id & 0xffff) << 8); 126 if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1) 127 value |= GICR_TYPER_LAST; 128 129 return extract_bytes(value, addr & 7, len); 130} 131 132static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu, 133 gpa_t addr, unsigned int len) 134{ 135 return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); 136} 137 138static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu, 139 gpa_t addr, unsigned int len) 140{ 141 switch (addr & 0xffff) { 142 case GICD_PIDR2: 143 /* report a GICv3 compliant implementation */ 144 return 0x3b; 145 } 146 147 return 0; 148} 149 150/* 151 * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the 152 * redistributors, while SPIs are covered by registers in the distributor 153 * block. Trying to set private IRQs in this block gets ignored. 154 * We take some special care here to fix the calculation of the register 155 * offset. 156 */ 157#define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, bpi, acc) \ 158 { \ 159 .reg_offset = off, \ 160 .bits_per_irq = bpi, \ 161 .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \ 162 .access_flags = acc, \ 163 .read = vgic_mmio_read_raz, \ 164 .write = vgic_mmio_write_wi, \ 165 }, { \ 166 .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \ 167 .bits_per_irq = bpi, \ 168 .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8, \ 169 .access_flags = acc, \ 170 .read = rd, \ 171 .write = wr, \ 172 } 173 174static const struct vgic_register_region vgic_v3_dist_registers[] = { 175 REGISTER_DESC_WITH_LENGTH(GICD_CTLR, 176 vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc, 16, 177 VGIC_ACCESS_32bit), 178 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR, 179 vgic_mmio_read_rao, vgic_mmio_write_wi, 1, 180 VGIC_ACCESS_32bit), 181 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER, 182 vgic_mmio_read_enable, vgic_mmio_write_senable, 1, 183 VGIC_ACCESS_32bit), 184 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER, 185 vgic_mmio_read_enable, vgic_mmio_write_cenable, 1, 186 VGIC_ACCESS_32bit), 187 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR, 188 vgic_mmio_read_pending, vgic_mmio_write_spending, 1, 189 VGIC_ACCESS_32bit), 190 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR, 191 vgic_mmio_read_pending, vgic_mmio_write_cpending, 1, 192 VGIC_ACCESS_32bit), 193 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER, 194 vgic_mmio_read_active, vgic_mmio_write_sactive, 1, 195 VGIC_ACCESS_32bit), 196 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER, 197 vgic_mmio_read_active, vgic_mmio_write_cactive, 1, 198 VGIC_ACCESS_32bit), 199 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR, 200 vgic_mmio_read_priority, vgic_mmio_write_priority, 8, 201 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), 202 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR, 203 vgic_mmio_read_raz, vgic_mmio_write_wi, 8, 204 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), 205 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR, 206 vgic_mmio_read_config, vgic_mmio_write_config, 2, 207 VGIC_ACCESS_32bit), 208 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR, 209 vgic_mmio_read_raz, vgic_mmio_write_wi, 1, 210 VGIC_ACCESS_32bit), 211 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER, 212 vgic_mmio_read_irouter, vgic_mmio_write_irouter, 64, 213 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 214 REGISTER_DESC_WITH_LENGTH(GICD_IDREGS, 215 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48, 216 VGIC_ACCESS_32bit), 217}; 218 219static const struct vgic_register_region vgic_v3_rdbase_registers[] = { 220 REGISTER_DESC_WITH_LENGTH(GICR_CTLR, 221 vgic_mmio_read_raz, vgic_mmio_write_wi, 4, 222 VGIC_ACCESS_32bit), 223 REGISTER_DESC_WITH_LENGTH(GICR_IIDR, 224 vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4, 225 VGIC_ACCESS_32bit), 226 REGISTER_DESC_WITH_LENGTH(GICR_TYPER, 227 vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8, 228 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 229 REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER, 230 vgic_mmio_read_raz, vgic_mmio_write_wi, 8, 231 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 232 REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER, 233 vgic_mmio_read_raz, vgic_mmio_write_wi, 8, 234 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), 235 REGISTER_DESC_WITH_LENGTH(GICR_IDREGS, 236 vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48, 237 VGIC_ACCESS_32bit), 238}; 239 240static const struct vgic_register_region vgic_v3_sgibase_registers[] = { 241 REGISTER_DESC_WITH_LENGTH(GICR_IGROUPR0, 242 vgic_mmio_read_rao, vgic_mmio_write_wi, 4, 243 VGIC_ACCESS_32bit), 244 REGISTER_DESC_WITH_LENGTH(GICR_ISENABLER0, 245 vgic_mmio_read_enable, vgic_mmio_write_senable, 4, 246 VGIC_ACCESS_32bit), 247 REGISTER_DESC_WITH_LENGTH(GICR_ICENABLER0, 248 vgic_mmio_read_enable, vgic_mmio_write_cenable, 4, 249 VGIC_ACCESS_32bit), 250 REGISTER_DESC_WITH_LENGTH(GICR_ISPENDR0, 251 vgic_mmio_read_pending, vgic_mmio_write_spending, 4, 252 VGIC_ACCESS_32bit), 253 REGISTER_DESC_WITH_LENGTH(GICR_ICPENDR0, 254 vgic_mmio_read_pending, vgic_mmio_write_cpending, 4, 255 VGIC_ACCESS_32bit), 256 REGISTER_DESC_WITH_LENGTH(GICR_ISACTIVER0, 257 vgic_mmio_read_active, vgic_mmio_write_sactive, 4, 258 VGIC_ACCESS_32bit), 259 REGISTER_DESC_WITH_LENGTH(GICR_ICACTIVER0, 260 vgic_mmio_read_active, vgic_mmio_write_cactive, 4, 261 VGIC_ACCESS_32bit), 262 REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0, 263 vgic_mmio_read_priority, vgic_mmio_write_priority, 32, 264 VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), 265 REGISTER_DESC_WITH_LENGTH(GICR_ICFGR0, 266 vgic_mmio_read_config, vgic_mmio_write_config, 8, 267 VGIC_ACCESS_32bit), 268 REGISTER_DESC_WITH_LENGTH(GICR_IGRPMODR0, 269 vgic_mmio_read_raz, vgic_mmio_write_wi, 4, 270 VGIC_ACCESS_32bit), 271 REGISTER_DESC_WITH_LENGTH(GICR_NSACR, 272 vgic_mmio_read_raz, vgic_mmio_write_wi, 4, 273 VGIC_ACCESS_32bit), 274}; 275 276unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev) 277{ 278 dev->regions = vgic_v3_dist_registers; 279 dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers); 280 281 kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops); 282 283 return SZ_64K; 284} 285 286int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t redist_base_address) 287{ 288 int nr_vcpus = atomic_read(&kvm->online_vcpus); 289 struct kvm_vcpu *vcpu; 290 struct vgic_io_device *devices; 291 int c, ret = 0; 292 293 devices = kmalloc(sizeof(struct vgic_io_device) * nr_vcpus * 2, 294 GFP_KERNEL); 295 if (!devices) 296 return -ENOMEM; 297 298 kvm_for_each_vcpu(c, vcpu, kvm) { 299 gpa_t rd_base = redist_base_address + c * SZ_64K * 2; 300 gpa_t sgi_base = rd_base + SZ_64K; 301 struct vgic_io_device *rd_dev = &devices[c * 2]; 302 struct vgic_io_device *sgi_dev = &devices[c * 2 + 1]; 303 304 kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); 305 rd_dev->base_addr = rd_base; 306 rd_dev->regions = vgic_v3_rdbase_registers; 307 rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers); 308 rd_dev->redist_vcpu = vcpu; 309 310 mutex_lock(&kvm->slots_lock); 311 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base, 312 SZ_64K, &rd_dev->dev); 313 mutex_unlock(&kvm->slots_lock); 314 315 if (ret) 316 break; 317 318 kvm_iodevice_init(&sgi_dev->dev, &kvm_io_gic_ops); 319 sgi_dev->base_addr = sgi_base; 320 sgi_dev->regions = vgic_v3_sgibase_registers; 321 sgi_dev->nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers); 322 sgi_dev->redist_vcpu = vcpu; 323 324 mutex_lock(&kvm->slots_lock); 325 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base, 326 SZ_64K, &sgi_dev->dev); 327 mutex_unlock(&kvm->slots_lock); 328 if (ret) { 329 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, 330 &rd_dev->dev); 331 break; 332 } 333 } 334 335 if (ret) { 336 /* The current c failed, so we start with the previous one. */ 337 for (c--; c >= 0; c--) { 338 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, 339 &devices[c * 2].dev); 340 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, 341 &devices[c * 2 + 1].dev); 342 } 343 kfree(devices); 344 } else { 345 kvm->arch.vgic.redist_iodevs = devices; 346 } 347 348 return ret; 349} 350 351/* 352 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI 353 * generation register ICC_SGI1R_EL1) with a given VCPU. 354 * If the VCPU's MPIDR matches, return the level0 affinity, otherwise 355 * return -1. 356 */ 357static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu) 358{ 359 unsigned long affinity; 360 int level0; 361 362 /* 363 * Split the current VCPU's MPIDR into affinity level 0 and the 364 * rest as this is what we have to compare against. 365 */ 366 affinity = kvm_vcpu_get_mpidr_aff(vcpu); 367 level0 = MPIDR_AFFINITY_LEVEL(affinity, 0); 368 affinity &= ~MPIDR_LEVEL_MASK; 369 370 /* bail out if the upper three levels don't match */ 371 if (sgi_aff != affinity) 372 return -1; 373 374 /* Is this VCPU's bit set in the mask ? */ 375 if (!(sgi_cpu_mask & BIT(level0))) 376 return -1; 377 378 return level0; 379} 380 381/* 382 * The ICC_SGI* registers encode the affinity differently from the MPIDR, 383 * so provide a wrapper to use the existing defines to isolate a certain 384 * affinity level. 385 */ 386#define SGI_AFFINITY_LEVEL(reg, level) \ 387 ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \ 388 >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level)) 389 390/** 391 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs 392 * @vcpu: The VCPU requesting a SGI 393 * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU 394 * 395 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register. 396 * This will trap in sys_regs.c and call this function. 397 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the 398 * target processors as well as a bitmask of 16 Aff0 CPUs. 399 * If the interrupt routing mode bit is not set, we iterate over all VCPUs to 400 * check for matching ones. If this bit is set, we signal all, but not the 401 * calling VCPU. 402 */ 403void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg) 404{ 405 struct kvm *kvm = vcpu->kvm; 406 struct kvm_vcpu *c_vcpu; 407 u16 target_cpus; 408 u64 mpidr; 409 int sgi, c; 410 int vcpu_id = vcpu->vcpu_id; 411 bool broadcast; 412 413 sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; 414 broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT); 415 target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT; 416 mpidr = SGI_AFFINITY_LEVEL(reg, 3); 417 mpidr |= SGI_AFFINITY_LEVEL(reg, 2); 418 mpidr |= SGI_AFFINITY_LEVEL(reg, 1); 419 420 /* 421 * We iterate over all VCPUs to find the MPIDRs matching the request. 422 * If we have handled one CPU, we clear its bit to detect early 423 * if we are already finished. This avoids iterating through all 424 * VCPUs when most of the times we just signal a single VCPU. 425 */ 426 kvm_for_each_vcpu(c, c_vcpu, kvm) { 427 struct vgic_irq *irq; 428 429 /* Exit early if we have dealt with all requested CPUs */ 430 if (!broadcast && target_cpus == 0) 431 break; 432 433 /* Don't signal the calling VCPU */ 434 if (broadcast && c == vcpu_id) 435 continue; 436 437 if (!broadcast) { 438 int level0; 439 440 level0 = match_mpidr(mpidr, target_cpus, c_vcpu); 441 if (level0 == -1) 442 continue; 443 444 /* remove this matching VCPU from the mask */ 445 target_cpus &= ~BIT(level0); 446 } 447 448 irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); 449 450 spin_lock(&irq->irq_lock); 451 irq->pending = true; 452 453 vgic_queue_irq_unlock(vcpu->kvm, irq); 454 } 455}