Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: KVM: common infrastructure for handling AArch32 CP14/CP15

As we're about to trap a bunch of CP14 registers, let's rework
the CP15 handling so it can be generalized and work with multiple
tables.

Reviewed-by: Anup Patel <anup.patel@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>

authored by

Marc Zyngier and committed by
Christoffer Dall
72564016 0c557ed4

+124 -31
+1 -1
arch/arm64/include/asm/kvm_asm.h
··· 95 95 #define c10_AMAIR0 (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */ 96 96 #define c10_AMAIR1 (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */ 97 97 #define c14_CNTKCTL (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */ 98 - #define NR_CP15_REGS (NR_SYS_REGS * 2) 98 + #define NR_COPRO_REGS (NR_SYS_REGS * 2) 99 99 100 100 #define ARM_EXCEPTION_IRQ 0 101 101 #define ARM_EXCEPTION_TRAP 1
+2 -1
arch/arm64/include/asm/kvm_coproc.h
··· 39 39 struct kvm_sys_reg_target_table *table); 40 40 41 41 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); 42 - int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 42 + int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 43 + int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 43 44 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 44 45 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 45 46 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
+9 -4
arch/arm64/include/asm/kvm_host.h
··· 86 86 struct kvm_regs gp_regs; 87 87 union { 88 88 u64 sys_regs[NR_SYS_REGS]; 89 - u32 cp15[NR_CP15_REGS]; 89 + u32 copro[NR_COPRO_REGS]; 90 90 }; 91 91 }; 92 92 ··· 141 141 142 142 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) 143 143 #define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) 144 - #define vcpu_cp15(v,r) ((v)->arch.ctxt.cp15[(r)]) 144 + /* 145 + * CP14 and CP15 live in the same array, as they are backed by the 146 + * same system registers. 147 + */ 148 + #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) 149 + #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)]) 145 150 146 151 #ifdef CONFIG_CPU_BIG_ENDIAN 147 - #define vcpu_cp15_64_low(v,r) ((v)->arch.ctxt.cp15[((r) + 1)]) 152 + #define vcpu_cp15_64_low(v,r) ((v)->arch.ctxt.copro[((r) + 1)]) 148 153 #else 149 - #define vcpu_cp15_64_low(v,r) ((v)->arch.ctxt.cp15[((r) + 0)]) 154 + #define vcpu_cp15_64_low(v,r) ((v)->arch.ctxt.copro[((r) + 0)]) 150 155 #endif 151 156 152 157 struct kvm_vm_stat {
+2 -2
arch/arm64/kvm/handle_exit.c
··· 73 73 [ESR_EL2_EC_WFI] = kvm_handle_wfx, 74 74 [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, 75 75 [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, 76 - [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access, 76 + [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32, 77 77 [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store, 78 - [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_access, 78 + [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64, 79 79 [ESR_EL2_EC_HVC32] = handle_hvc, 80 80 [ESR_EL2_EC_SMC32] = handle_smc, 81 81 [ESR_EL2_EC_HVC64] = handle_hvc,
+110 -23
arch/arm64/kvm/sys_regs.c
··· 494 494 NULL, reset_val, FPEXC32_EL2, 0x70 }, 495 495 }; 496 496 497 + /* Trapped cp14 registers */ 498 + static const struct sys_reg_desc cp14_regs[] = { 499 + }; 500 + 497 501 /* 498 502 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, 499 503 * depending on the way they are accessed (as a 32bit or a 64bit ··· 605 601 return 1; 606 602 } 607 603 608 - int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) 604 + /* 605 + * emulate_cp -- tries to match a sys_reg access in a handling table, and 606 + * call the corresponding trap handler. 607 + * 608 + * @params: pointer to the descriptor of the access 609 + * @table: array of trap descriptors 610 + * @num: size of the trap descriptor array 611 + * 612 + * Return 0 if the access has been handled, and -1 if not. 613 + */ 614 + static int emulate_cp(struct kvm_vcpu *vcpu, 615 + const struct sys_reg_params *params, 616 + const struct sys_reg_desc *table, 617 + size_t num) 609 618 { 610 - kvm_inject_undefined(vcpu); 611 - return 1; 612 - } 619 + const struct sys_reg_desc *r; 613 620 614 - static void emulate_cp15(struct kvm_vcpu *vcpu, 615 - const struct sys_reg_params *params) 616 - { 617 - size_t num; 618 - const struct sys_reg_desc *table, *r; 621 + if (!table) 622 + return -1; /* Not handled */ 619 623 620 - table = get_target_table(vcpu->arch.target, false, &num); 621 - 622 - /* Search target-specific then generic table. */ 623 624 r = find_reg(params, table, num); 624 - if (!r) 625 - r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); 626 625 627 - if (likely(r)) { 626 + if (r) { 628 627 /* 629 628 * Not having an accessor means that we have 630 629 * configured a trap that we don't know how to ··· 639 632 if (likely(r->access(vcpu, params, r))) { 640 633 /* Skip instruction, since it was emulated */ 641 634 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 642 - return; 643 635 } 644 - /* If access function fails, it should complain. */ 636 + 637 + /* Handled */ 638 + return 0; 645 639 } 646 640 647 - kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu)); 641 + /* Not handled */ 642 + return -1; 643 + } 644 + 645 + static void unhandled_cp_access(struct kvm_vcpu *vcpu, 646 + struct sys_reg_params *params) 647 + { 648 + u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); 649 + int cp; 650 + 651 + switch(hsr_ec) { 652 + case ESR_EL2_EC_CP15_32: 653 + case ESR_EL2_EC_CP15_64: 654 + cp = 15; 655 + break; 656 + case ESR_EL2_EC_CP14_MR: 657 + case ESR_EL2_EC_CP14_64: 658 + cp = 14; 659 + break; 660 + default: 661 + WARN_ON((cp = -1)); 662 + } 663 + 664 + kvm_err("Unsupported guest CP%d access at: %08lx\n", 665 + cp, *vcpu_pc(vcpu)); 648 666 print_sys_reg_instr(params); 649 667 kvm_inject_undefined(vcpu); 650 668 } 651 669 652 670 /** 653 - * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access 671 + * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access 654 672 * @vcpu: The VCPU pointer 655 673 * @run: The kvm_run struct 656 674 */ 657 - int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 675 + static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, 676 + const struct sys_reg_desc *global, 677 + size_t nr_global, 678 + const struct sys_reg_desc *target_specific, 679 + size_t nr_specific) 658 680 { 659 681 struct sys_reg_params params; 660 682 u32 hsr = kvm_vcpu_get_hsr(vcpu); ··· 712 676 *vcpu_reg(vcpu, params.Rt) = val; 713 677 } 714 678 715 - emulate_cp15(vcpu, &params); 679 + if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) 680 + goto out; 681 + if (!emulate_cp(vcpu, &params, global, nr_global)) 682 + goto out; 716 683 684 + unhandled_cp_access(vcpu, &params); 685 + 686 + out: 717 687 /* Do the opposite hack for the read side */ 718 688 if (!params.is_write) { 719 689 u64 val = *vcpu_reg(vcpu, params.Rt); ··· 735 693 * @vcpu: The VCPU pointer 736 694 * @run: The kvm_run struct 737 695 */ 738 - int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 696 + static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, 697 + const struct sys_reg_desc *global, 698 + size_t nr_global, 699 + const struct sys_reg_desc *target_specific, 700 + size_t nr_specific) 739 701 { 740 702 struct sys_reg_params params; 741 703 u32 hsr = kvm_vcpu_get_hsr(vcpu); ··· 754 708 params.Op1 = (hsr >> 14) & 0x7; 755 709 params.Op2 = (hsr >> 17) & 0x7; 756 710 757 - emulate_cp15(vcpu, &params); 711 + if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) 712 + return 1; 713 + if (!emulate_cp(vcpu, &params, global, nr_global)) 714 + return 1; 715 + 716 + unhandled_cp_access(vcpu, &params); 758 717 return 1; 718 + } 719 + 720 + int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 721 + { 722 + const struct sys_reg_desc *target_specific; 723 + size_t num; 724 + 725 + target_specific = get_target_table(vcpu->arch.target, false, &num); 726 + return kvm_handle_cp_64(vcpu, 727 + cp15_regs, ARRAY_SIZE(cp15_regs), 728 + target_specific, num); 729 + } 730 + 731 + int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 732 + { 733 + const struct sys_reg_desc *target_specific; 734 + size_t num; 735 + 736 + target_specific = get_target_table(vcpu->arch.target, false, &num); 737 + return kvm_handle_cp_32(vcpu, 738 + cp15_regs, ARRAY_SIZE(cp15_regs), 739 + target_specific, num); 740 + } 741 + 742 + int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 743 + { 744 + return kvm_handle_cp_64(vcpu, 745 + cp14_regs, ARRAY_SIZE(cp14_regs), 746 + NULL, 0); 747 + } 748 + 749 + int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 750 + { 751 + return kvm_handle_cp_32(vcpu, 752 + cp14_regs, ARRAY_SIZE(cp14_regs), 753 + NULL, 0); 759 754 } 760 755 761 756 static int emulate_sys_reg(struct kvm_vcpu *vcpu,