Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Clean up redundant kvm_run parameters in assembly

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.

[paulus@ozlabs.org - Fixed places that were missed in book3s_interrupts.S]

Signed-off-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>

authored by

Tianjia Zhang and committed by
Paul Mackerras
7ec21d9d 1508c22f

+45 -50
+1 -1
arch/powerpc/include/asm/kvm_ppc.h
··· 59 59 }; 60 60 61 61 extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu); 62 - extern int __kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); 62 + extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu); 63 63 extern void kvmppc_handler_highmem(void); 64 64 65 65 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
+27 -29
arch/powerpc/kvm/book3s_interrupts.S
··· 55 55 ****************************************************************************/ 56 56 57 57 /* Registers: 58 - * r3: kvm_run pointer 59 - * r4: vcpu pointer 58 + * r3: vcpu pointer 60 59 */ 61 60 _GLOBAL(__kvmppc_vcpu_run) 62 61 ··· 67 68 /* Save host state to the stack */ 68 69 PPC_STLU r1, -SWITCH_FRAME_SIZE(r1) 69 70 70 - /* Save r3 (kvm_run) and r4 (vcpu) */ 71 - SAVE_2GPRS(3, r1) 71 + /* Save r3 (vcpu) */ 72 + SAVE_GPR(3, r1) 72 73 73 74 /* Save non-volatile registers (r14 - r31) */ 74 75 SAVE_NVGPRS(r1) ··· 81 82 PPC_STL r0, _LINK(r1) 82 83 83 84 /* Load non-volatile guest state from the vcpu */ 84 - VCPU_LOAD_NVGPRS(r4) 85 + VCPU_LOAD_NVGPRS(r3) 85 86 86 87 kvm_start_lightweight: 87 88 /* Copy registers into shadow vcpu so we can access them in real mode */ 88 - mr r3, r4 89 89 bl FUNC(kvmppc_copy_to_svcpu) 90 90 nop 91 - REST_GPR(4, r1) 91 + REST_GPR(3, r1) 92 92 93 93 #ifdef CONFIG_PPC_BOOK3S_64 94 94 /* Get the dcbz32 flag */ 95 - PPC_LL r3, VCPU_HFLAGS(r4) 96 - rldicl r3, r3, 0, 63 /* r3 &= 1 */ 97 - stb r3, HSTATE_RESTORE_HID5(r13) 95 + PPC_LL r0, VCPU_HFLAGS(r3) 96 + rldicl r0, r0, 0, 63 /* r3 &= 1 */ 97 + stb r0, HSTATE_RESTORE_HID5(r13) 98 98 99 99 /* Load up guest SPRG3 value, since it's user readable */ 100 - lwz r3, VCPU_SHAREDBE(r4) 101 - cmpwi r3, 0 102 - ld r5, VCPU_SHARED(r4) 100 + lbz r4, VCPU_SHAREDBE(r3) 101 + cmpwi r4, 0 102 + ld r5, VCPU_SHARED(r3) 103 103 beq sprg3_little_endian 104 104 sprg3_big_endian: 105 105 #ifdef __BIG_ENDIAN__ 106 - ld r3, VCPU_SHARED_SPRG3(r5) 106 + ld r4, VCPU_SHARED_SPRG3(r5) 107 107 #else 108 108 addi r5, r5, VCPU_SHARED_SPRG3 109 - ldbrx r3, 0, r5 109 + ldbrx r4, 0, r5 110 110 #endif 111 111 b after_sprg3_load 112 112 sprg3_little_endian: 113 113 #ifdef __LITTLE_ENDIAN__ 114 - ld r3, VCPU_SHARED_SPRG3(r5) 114 + ld r4, VCPU_SHARED_SPRG3(r5) 115 115 #else 116 116 addi r5, r5, VCPU_SHARED_SPRG3 117 - ldbrx r3, 0, r5 117 + ldbrx r4, 0, r5 118 118 #endif 119 119 120 120 after_sprg3_load: 121 - mtspr SPRN_SPRG3, r3 121 + mtspr SPRN_SPRG3, r4 122 122 #endif /* CONFIG_PPC_BOOK3S_64 */ 123 123 124 - PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */ 124 + PPC_LL r4, VCPU_SHADOW_MSR(r3) /* get shadow_msr */ 125 125 126 126 /* Jump to segment patching handler and into our guest */ 127 127 bl FUNC(kvmppc_entry_trampoline) ··· 144 146 * 145 147 */ 146 148 147 - PPC_LL r3, GPR4(r1) /* vcpu pointer */ 149 + PPC_LL r3, GPR3(r1) /* vcpu pointer */ 148 150 149 151 /* 150 152 * kvmppc_copy_from_svcpu can clobber volatile registers, save ··· 167 169 #endif /* CONFIG_PPC_BOOK3S_64 */ 168 170 169 171 /* R7 = vcpu */ 170 - PPC_LL r7, GPR4(r1) 172 + PPC_LL r7, GPR3(r1) 171 173 172 174 PPC_STL r14, VCPU_GPR(R14)(r7) 173 175 PPC_STL r15, VCPU_GPR(R15)(r7) ··· 188 190 PPC_STL r30, VCPU_GPR(R30)(r7) 189 191 PPC_STL r31, VCPU_GPR(R31)(r7) 190 192 191 - /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ 192 - lwz r5, VCPU_TRAP(r7) 193 + /* Pass the exit number as 2nd argument to kvmppc_handle_exit */ 194 + lwz r4, VCPU_TRAP(r7) 193 195 194 - /* Restore r3 (kvm_run) and r4 (vcpu) */ 195 - REST_2GPRS(3, r1) 196 + /* Restore r3 (vcpu) */ 197 + REST_GPR(3, r1) 196 198 bl FUNC(kvmppc_handle_exit_pr) 197 199 198 200 /* If RESUME_GUEST, get back in the loop */ ··· 221 223 PPC_LL r4, _LINK(r1) 222 224 PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1) 223 225 224 - /* Load vcpu and cpu_run */ 225 - REST_2GPRS(3, r1) 226 + /* Load vcpu */ 227 + REST_GPR(3, r1) 226 228 227 229 /* Load non-volatile guest state from the vcpu */ 228 - VCPU_LOAD_NVGPRS(r4) 230 + VCPU_LOAD_NVGPRS(r3) 229 231 230 232 /* Jump back into the beginning of this function */ 231 233 b kvm_start_lightweight ··· 233 235 kvm_loop_lightweight: 234 236 235 237 /* We'll need the vcpu pointer */ 236 - REST_GPR(4, r1) 238 + REST_GPR(3, r1) 237 239 238 240 /* Jump back into the beginning of this function */ 239 241 b kvm_start_lightweight
+4 -5
arch/powerpc/kvm/book3s_pr.c
··· 1151 1151 return r; 1152 1152 } 1153 1153 1154 - int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, 1155 - unsigned int exit_nr) 1154 + int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) 1156 1155 { 1156 + struct kvm_run *run = vcpu->run; 1157 1157 int r = RESUME_HOST; 1158 1158 int s; 1159 1159 ··· 1826 1826 1827 1827 static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu) 1828 1828 { 1829 - struct kvm_run *run = vcpu->run; 1830 1829 int ret; 1831 1830 #ifdef CONFIG_ALTIVEC 1832 1831 unsigned long uninitialized_var(vrsave); ··· 1833 1834 1834 1835 /* Check if we can run the vcpu at all */ 1835 1836 if (!vcpu->arch.sane) { 1836 - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1837 + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1837 1838 ret = -EINVAL; 1838 1839 goto out; 1839 1840 } ··· 1860 1861 1861 1862 kvmppc_fix_ee_before_entry(); 1862 1863 1863 - ret = __kvmppc_vcpu_run(run, vcpu); 1864 + ret = __kvmppc_vcpu_run(vcpu); 1864 1865 1865 1866 kvmppc_clear_debug(vcpu); 1866 1867
+4 -5
arch/powerpc/kvm/booke.c
··· 731 731 732 732 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu) 733 733 { 734 - struct kvm_run *run = vcpu->run; 735 734 int ret, s; 736 735 struct debug_reg debug; 737 736 738 737 if (!vcpu->arch.sane) { 739 - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 738 + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 740 739 return -EINVAL; 741 740 } 742 741 ··· 777 778 vcpu->arch.pgdir = vcpu->kvm->mm->pgd; 778 779 kvmppc_fix_ee_before_entry(); 779 780 780 - ret = __kvmppc_vcpu_run(run, vcpu); 781 + ret = __kvmppc_vcpu_run(vcpu); 781 782 782 783 /* No need for guest_exit. It's done in handle_exit. 783 784 We also get here with interrupts enabled. */ ··· 981 982 * 982 983 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) 983 984 */ 984 - int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 985 - unsigned int exit_nr) 985 + int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) 986 986 { 987 + struct kvm_run *run = vcpu->run; 987 988 int r = RESUME_HOST; 988 989 int s; 989 990 int idx;
+4 -5
arch/powerpc/kvm/booke_interrupts.S
··· 237 237 /* Switch to kernel stack and jump to handler. */ 238 238 LOAD_REG_ADDR(r3, kvmppc_handle_exit) 239 239 mtctr r3 240 - lwz r3, HOST_RUN(r1) 240 + mr r3, r4 241 241 lwz r2, HOST_R2(r1) 242 242 mr r14, r4 /* Save vcpu pointer. */ 243 243 ··· 337 337 338 338 339 339 /* Registers: 340 - * r3: kvm_run pointer 341 - * r4: vcpu pointer 340 + * r3: vcpu pointer 342 341 */ 343 342 _GLOBAL(__kvmppc_vcpu_run) 344 343 stwu r1, -HOST_STACK_SIZE(r1) 345 - stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ 344 + stw r1, VCPU_HOST_STACK(r3) /* Save stack pointer to vcpu. */ 346 345 347 346 /* Save host state to stack. */ 348 - stw r3, HOST_RUN(r1) 347 + mr r4, r3 349 348 mflr r3 350 349 stw r3, HOST_STACK_LR(r1) 351 350 mfcr r5
+5 -5
arch/powerpc/kvm/bookehv_interrupts.S
··· 434 434 #endif 435 435 436 436 /* Switch to kernel stack and jump to handler. */ 437 - PPC_LL r3, HOST_RUN(r1) 437 + mr r3, r4 438 438 mr r5, r14 /* intno */ 439 439 mr r14, r4 /* Save vcpu pointer. */ 440 + mr r4, r5 440 441 bl kvmppc_handle_exit 441 442 442 443 /* Restore vcpu pointer and the nonvolatiles we used. */ ··· 526 525 blr 527 526 528 527 /* Registers: 529 - * r3: kvm_run pointer 530 - * r4: vcpu pointer 528 + * r3: vcpu pointer 531 529 */ 532 530 _GLOBAL(__kvmppc_vcpu_run) 533 531 stwu r1, -HOST_STACK_SIZE(r1) 534 - PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ 532 + PPC_STL r1, VCPU_HOST_STACK(r3) /* Save stack pointer to vcpu. */ 535 533 536 534 /* Save host state to stack. */ 537 - PPC_STL r3, HOST_RUN(r1) 535 + mr r4, r3 538 536 mflr r3 539 537 mfcr r5 540 538 PPC_STL r3, HOST_STACK_LR(r1)