Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm/arm64: KVM: refactor MMIO accessors

The MMIO accessors for GICD_I[CS]ENABLER, GICD_I[CS]PENDR and
GICD_ICFGR behave very similar for GICv2 and GICv3, although the way
the affected VCPU is determined differs.
Since we need them to access the registers from three different
places in the future, we factor out a generic, backend-facing
implementation and use small wrappers in the current GICv2 emulation.
This will ease adding GICv3 accessors later.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>

authored by

Andre Przywara and committed by
Christoffer Dall
d97f683d 2f5fa41a

+74 -52
+74 -52
virt/kvm/arm/vgic.c
··· 492 492 return false; 493 493 } 494 494 495 - static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, 496 - struct kvm_exit_mmio *mmio, 497 - phys_addr_t offset) 495 + static bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, 496 + phys_addr_t offset, int vcpu_id, int access) 498 497 { 499 - u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled, 500 - vcpu->vcpu_id, offset); 501 - vgic_reg_access(mmio, reg, offset, 502 - ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); 498 + u32 *reg; 499 + int mode = ACCESS_READ_VALUE | access; 500 + struct kvm_vcpu *target_vcpu = kvm_get_vcpu(kvm, vcpu_id); 501 + 502 + reg = vgic_bitmap_get_reg(&kvm->arch.vgic.irq_enabled, vcpu_id, offset); 503 + vgic_reg_access(mmio, reg, offset, mode); 503 504 if (mmio->is_write) { 504 - vgic_update_state(vcpu->kvm); 505 + if (access & ACCESS_WRITE_CLEARBIT) { 506 + if (offset < 4) /* Force SGI enabled */ 507 + *reg |= 0xffff; 508 + vgic_retire_disabled_irqs(target_vcpu); 509 + } 510 + vgic_update_state(kvm); 505 511 return true; 506 512 } 507 513 508 514 return false; 515 + } 516 + 517 + static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu, 518 + struct kvm_exit_mmio *mmio, 519 + phys_addr_t offset) 520 + { 521 + return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, 522 + vcpu->vcpu_id, ACCESS_WRITE_SETBIT); 509 523 } 510 524 511 525 static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu, 512 526 struct kvm_exit_mmio *mmio, 513 527 phys_addr_t offset) 514 528 { 515 - u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled, 516 - vcpu->vcpu_id, offset); 517 - vgic_reg_access(mmio, reg, offset, 518 - ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); 519 - if (mmio->is_write) { 520 - if (offset < 4) /* Force SGI enabled */ 521 - *reg |= 0xffff; 522 - vgic_retire_disabled_irqs(vcpu); 523 - vgic_update_state(vcpu->kvm); 524 - return true; 525 - } 526 - 527 - return false; 529 + return vgic_handle_enable_reg(vcpu->kvm, mmio, offset, 530 + vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT); 528 531 } 529 532 530 - static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu, 533 + static bool vgic_handle_set_pending_reg(struct kvm *kvm, 531 534 struct kvm_exit_mmio *mmio, 532 - phys_addr_t offset) 535 + phys_addr_t offset, int vcpu_id) 533 536 { 534 537 u32 *reg, orig; 535 538 u32 level_mask; 536 - struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 539 + int mode = ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT; 540 + struct vgic_dist *dist = &kvm->arch.vgic; 537 541 538 - reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu->vcpu_id, offset); 542 + reg = vgic_bitmap_get_reg(&dist->irq_cfg, vcpu_id, offset); 539 543 level_mask = (~(*reg)); 540 544 541 545 /* Mark both level and edge triggered irqs as pending */ 542 - reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset); 546 + reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset); 543 547 orig = *reg; 544 - vgic_reg_access(mmio, reg, offset, 545 - ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); 548 + vgic_reg_access(mmio, reg, offset, mode); 546 549 547 550 if (mmio->is_write) { 548 551 /* Set the soft-pending flag only for level-triggered irqs */ 549 552 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend, 550 - vcpu->vcpu_id, offset); 551 - vgic_reg_access(mmio, reg, offset, 552 - ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT); 553 + vcpu_id, offset); 554 + vgic_reg_access(mmio, reg, offset, mode); 553 555 *reg &= level_mask; 554 556 555 557 /* Ignore writes to SGIs */ ··· 560 558 *reg |= orig & 0xffff; 561 559 } 562 560 563 - vgic_update_state(vcpu->kvm); 561 + vgic_update_state(kvm); 564 562 return true; 565 563 } 566 564 567 565 return false; 568 566 } 569 567 570 - static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, 568 + static bool vgic_handle_clear_pending_reg(struct kvm *kvm, 571 569 struct kvm_exit_mmio *mmio, 572 - phys_addr_t offset) 570 + phys_addr_t offset, int vcpu_id) 573 571 { 574 572 u32 *level_active; 575 573 u32 *reg, orig; 576 - struct vgic_dist *dist = &vcpu->kvm->arch.vgic; 574 + int mode = ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT; 575 + struct vgic_dist *dist = &kvm->arch.vgic; 577 576 578 - reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu->vcpu_id, offset); 577 + reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset); 579 578 orig = *reg; 580 - vgic_reg_access(mmio, reg, offset, 581 - ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); 579 + vgic_reg_access(mmio, reg, offset, mode); 582 580 if (mmio->is_write) { 583 581 /* Re-set level triggered level-active interrupts */ 584 582 level_active = vgic_bitmap_get_reg(&dist->irq_level, 585 - vcpu->vcpu_id, offset); 586 - reg = vgic_bitmap_get_reg(&dist->irq_pending, 587 - vcpu->vcpu_id, offset); 583 + vcpu_id, offset); 584 + reg = vgic_bitmap_get_reg(&dist->irq_pending, vcpu_id, offset); 588 585 *reg |= *level_active; 589 586 590 587 /* Ignore writes to SGIs */ ··· 594 593 595 594 /* Clear soft-pending flags */ 596 595 reg = vgic_bitmap_get_reg(&dist->irq_soft_pend, 597 - vcpu->vcpu_id, offset); 598 - vgic_reg_access(mmio, reg, offset, 599 - ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); 596 + vcpu_id, offset); 597 + vgic_reg_access(mmio, reg, offset, mode); 600 598 601 - vgic_update_state(vcpu->kvm); 599 + vgic_update_state(kvm); 602 600 return true; 603 601 } 604 - 605 602 return false; 603 + } 604 + 605 + static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu, 606 + struct kvm_exit_mmio *mmio, 607 + phys_addr_t offset) 608 + { 609 + return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset, 610 + vcpu->vcpu_id); 611 + } 612 + 613 + static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu, 614 + struct kvm_exit_mmio *mmio, 615 + phys_addr_t offset) 616 + { 617 + return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset, 618 + vcpu->vcpu_id); 606 619 } 607 620 608 621 static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu, ··· 741 726 * LSB is always 0. As such, we only keep the upper bit, and use the 742 727 * two above functions to compress/expand the bits 743 728 */ 744 - static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, 745 - struct kvm_exit_mmio *mmio, phys_addr_t offset) 729 + static bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, 730 + phys_addr_t offset) 746 731 { 747 732 u32 val; 748 - u32 *reg; 749 - 750 - reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, 751 - vcpu->vcpu_id, offset >> 1); 752 733 753 734 if (offset & 4) 754 735 val = *reg >> 16; ··· 771 760 } 772 761 773 762 return false; 763 + } 764 + 765 + static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu, 766 + struct kvm_exit_mmio *mmio, phys_addr_t offset) 767 + { 768 + u32 *reg; 769 + 770 + reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg, 771 + vcpu->vcpu_id, offset >> 1); 772 + 773 + return vgic_handle_cfg_reg(reg, mmio, offset); 774 774 } 775 775 776 776 static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,