Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Fix usage of register macros getting ready for %r0 change

Anything that uses a constructed instruction (ie. from ppc-opcode.h),
need to use the new R0 macro, as %r0 is not going to work.

Also convert usages of macros where we are just determining an offset
(usually for a load/store), like:
std r14,STK_REG(r14)(r1)
Can't use STK_REG(r14) as %r14 doesn't work in the STK_REG macro since
it's just calculating an offset.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Michael Neuling and committed by
Benjamin Herrenschmidt
c75df6f9 564aa5cf

+657 -657
+3 -3
arch/powerpc/kernel/cpu_setup_a2.S
··· 100 100 lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h 101 101 mtspr SPRN_MMUCR0, r4 102 102 li r4,A2_IERAT_SIZE-1 103 - PPC_ERATWE(r4,r4,3) 103 + PPC_ERATWE(R4,R4,3) 104 104 105 105 /* Now set the D-ERAT watermark to 31 */ 106 106 lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h 107 107 mtspr SPRN_MMUCR0, r4 108 108 li r4,A2_DERAT_SIZE-1 109 - PPC_ERATWE(r4,r4,3) 109 + PPC_ERATWE(R4,R4,3) 110 110 111 111 /* And invalidate the beast just in case. That won't get rid of 112 112 * a bolted entry though it will be in LRU and so will go away eventually 113 113 * but let's not bother for now 114 114 */ 115 - PPC_ERATILX(0,0,0) 115 + PPC_ERATILX(0,R0,R0) 116 116 1: 117 117 blr 118 118
+2 -2
arch/powerpc/kernel/fpu.S
··· 106 106 #endif 107 107 lfd fr0,THREAD_FPSCR(r5) 108 108 MTFSF_L(fr0) 109 - REST_32FPVSRS(0, r4, r5) 109 + REST_32FPVSRS(0, R4, R5) 110 110 #ifndef CONFIG_SMP 111 111 subi r4,r5,THREAD 112 112 fromreal(r4) ··· 140 140 addi r3,r3,THREAD /* want THREAD of task */ 141 141 PPC_LL r5,PT_REGS(r3) 142 142 PPC_LCMPI 0,r5,0 143 - SAVE_32FPVSRS(0, r4 ,r3) 143 + SAVE_32FPVSRS(0, R4 ,R3) 144 144 mffs fr0 145 145 stfd fr0,THREAD_FPSCR(r3) 146 146 beq 1f
+1 -1
arch/powerpc/kernel/kvm.c
··· 302 302 303 303 if (imm_one) { 304 304 p[kvm_emulate_wrtee_reg_offs] = 305 - KVM_INST_LI | __PPC_RT(30) | MSR_EE; 305 + KVM_INST_LI | __PPC_RT(R30) | MSR_EE; 306 306 } else { 307 307 /* Make clobbered registers work too */ 308 308 switch (get_rt(rt)) {
+2 -2
arch/powerpc/kernel/misc_64.S
··· 314 314 mtmsrd r0 315 315 sync 316 316 isync 317 - LBZCIX(r3,0,r3) 317 + LBZCIX(R3,0,R3) 318 318 isync 319 319 mtmsrd r7 320 320 sync ··· 329 329 mtmsrd r0 330 330 sync 331 331 isync 332 - STBCIX(r3,0,r4) 332 + STBCIX(R3,0,R4) 333 333 isync 334 334 mtmsrd r7 335 335 sync
+109 -109
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 206 206 /* Load up FP, VMX and VSX registers */ 207 207 bl kvmppc_load_fp 208 208 209 - ld r14, VCPU_GPR(r14)(r4) 210 - ld r15, VCPU_GPR(r15)(r4) 211 - ld r16, VCPU_GPR(r16)(r4) 212 - ld r17, VCPU_GPR(r17)(r4) 213 - ld r18, VCPU_GPR(r18)(r4) 214 - ld r19, VCPU_GPR(r19)(r4) 215 - ld r20, VCPU_GPR(r20)(r4) 216 - ld r21, VCPU_GPR(r21)(r4) 217 - ld r22, VCPU_GPR(r22)(r4) 218 - ld r23, VCPU_GPR(r23)(r4) 219 - ld r24, VCPU_GPR(r24)(r4) 220 - ld r25, VCPU_GPR(r25)(r4) 221 - ld r26, VCPU_GPR(r26)(r4) 222 - ld r27, VCPU_GPR(r27)(r4) 223 - ld r28, VCPU_GPR(r28)(r4) 224 - ld r29, VCPU_GPR(r29)(r4) 225 - ld r30, VCPU_GPR(r30)(r4) 226 - ld r31, VCPU_GPR(r31)(r4) 209 + ld r14, VCPU_GPR(R14)(r4) 210 + ld r15, VCPU_GPR(R15)(r4) 211 + ld r16, VCPU_GPR(R16)(r4) 212 + ld r17, VCPU_GPR(R17)(r4) 213 + ld r18, VCPU_GPR(R18)(r4) 214 + ld r19, VCPU_GPR(R19)(r4) 215 + ld r20, VCPU_GPR(R20)(r4) 216 + ld r21, VCPU_GPR(R21)(r4) 217 + ld r22, VCPU_GPR(R22)(r4) 218 + ld r23, VCPU_GPR(R23)(r4) 219 + ld r24, VCPU_GPR(R24)(r4) 220 + ld r25, VCPU_GPR(R25)(r4) 221 + ld r26, VCPU_GPR(R26)(r4) 222 + ld r27, VCPU_GPR(R27)(r4) 223 + ld r28, VCPU_GPR(R28)(r4) 224 + ld r29, VCPU_GPR(R29)(r4) 225 + ld r30, VCPU_GPR(R30)(r4) 226 + ld r31, VCPU_GPR(R31)(r4) 227 227 228 228 BEGIN_FTR_SECTION 229 229 /* Switch DSCR to guest value */ ··· 547 547 mtlr r5 548 548 mtcr r6 549 549 550 - ld r0, VCPU_GPR(r0)(r4) 551 - ld r1, VCPU_GPR(r1)(r4) 552 - ld r2, VCPU_GPR(r2)(r4) 553 - ld r3, VCPU_GPR(r3)(r4) 554 - ld r5, VCPU_GPR(r5)(r4) 555 - ld r6, VCPU_GPR(r6)(r4) 556 - ld r7, VCPU_GPR(r7)(r4) 557 - ld r8, VCPU_GPR(r8)(r4) 558 - ld r9, VCPU_GPR(r9)(r4) 559 - ld r10, VCPU_GPR(r10)(r4) 560 - ld r11, VCPU_GPR(r11)(r4) 561 - ld r12, VCPU_GPR(r12)(r4) 562 - ld r13, VCPU_GPR(r13)(r4) 550 + ld r0, VCPU_GPR(R0)(r4) 551 + ld r1, VCPU_GPR(R1)(r4) 552 + ld r2, VCPU_GPR(R2)(r4) 553 + ld r3, VCPU_GPR(R3)(r4) 554 + ld r5, VCPU_GPR(R5)(r4) 555 + ld r6, VCPU_GPR(R6)(r4) 556 + ld r7, VCPU_GPR(R7)(r4) 557 + ld r8, VCPU_GPR(R8)(r4) 558 + ld r9, VCPU_GPR(R9)(r4) 559 + ld r10, VCPU_GPR(R10)(r4) 560 + ld r11, VCPU_GPR(R11)(r4) 561 + ld r12, VCPU_GPR(R12)(r4) 562 + ld r13, VCPU_GPR(R13)(r4) 563 563 564 - ld r4, VCPU_GPR(r4)(r4) 564 + ld r4, VCPU_GPR(R4)(r4) 565 565 566 566 hrfid 567 567 b . ··· 590 590 591 591 /* Save registers */ 592 592 593 - std r0, VCPU_GPR(r0)(r9) 594 - std r1, VCPU_GPR(r1)(r9) 595 - std r2, VCPU_GPR(r2)(r9) 596 - std r3, VCPU_GPR(r3)(r9) 597 - std r4, VCPU_GPR(r4)(r9) 598 - std r5, VCPU_GPR(r5)(r9) 599 - std r6, VCPU_GPR(r6)(r9) 600 - std r7, VCPU_GPR(r7)(r9) 601 - std r8, VCPU_GPR(r8)(r9) 593 + std r0, VCPU_GPR(R0)(r9) 594 + std r1, VCPU_GPR(R1)(r9) 595 + std r2, VCPU_GPR(R2)(r9) 596 + std r3, VCPU_GPR(R3)(r9) 597 + std r4, VCPU_GPR(R4)(r9) 598 + std r5, VCPU_GPR(R5)(r9) 599 + std r6, VCPU_GPR(R6)(r9) 600 + std r7, VCPU_GPR(R7)(r9) 601 + std r8, VCPU_GPR(R8)(r9) 602 602 ld r0, HSTATE_HOST_R2(r13) 603 - std r0, VCPU_GPR(r9)(r9) 604 - std r10, VCPU_GPR(r10)(r9) 605 - std r11, VCPU_GPR(r11)(r9) 603 + std r0, VCPU_GPR(R9)(r9) 604 + std r10, VCPU_GPR(R10)(r9) 605 + std r11, VCPU_GPR(R11)(r9) 606 606 ld r3, HSTATE_SCRATCH0(r13) 607 607 lwz r4, HSTATE_SCRATCH1(r13) 608 - std r3, VCPU_GPR(r12)(r9) 608 + std r3, VCPU_GPR(R12)(r9) 609 609 stw r4, VCPU_CR(r9) 610 610 611 611 /* Restore R1/R2 so we can handle faults */ ··· 626 626 627 627 GET_SCRATCH0(r3) 628 628 mflr r4 629 - std r3, VCPU_GPR(r13)(r9) 629 + std r3, VCPU_GPR(R13)(r9) 630 630 std r4, VCPU_LR(r9) 631 631 632 632 /* Unset guest mode */ ··· 968 968 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 969 969 970 970 /* Save non-volatile GPRs */ 971 - std r14, VCPU_GPR(r14)(r9) 972 - std r15, VCPU_GPR(r15)(r9) 973 - std r16, VCPU_GPR(r16)(r9) 974 - std r17, VCPU_GPR(r17)(r9) 975 - std r18, VCPU_GPR(r18)(r9) 976 - std r19, VCPU_GPR(r19)(r9) 977 - std r20, VCPU_GPR(r20)(r9) 978 - std r21, VCPU_GPR(r21)(r9) 979 - std r22, VCPU_GPR(r22)(r9) 980 - std r23, VCPU_GPR(r23)(r9) 981 - std r24, VCPU_GPR(r24)(r9) 982 - std r25, VCPU_GPR(r25)(r9) 983 - std r26, VCPU_GPR(r26)(r9) 984 - std r27, VCPU_GPR(r27)(r9) 985 - std r28, VCPU_GPR(r28)(r9) 986 - std r29, VCPU_GPR(r29)(r9) 987 - std r30, VCPU_GPR(r30)(r9) 988 - std r31, VCPU_GPR(r31)(r9) 971 + std r14, VCPU_GPR(R14)(r9) 972 + std r15, VCPU_GPR(R15)(r9) 973 + std r16, VCPU_GPR(R16)(r9) 974 + std r17, VCPU_GPR(R17)(r9) 975 + std r18, VCPU_GPR(R18)(r9) 976 + std r19, VCPU_GPR(R19)(r9) 977 + std r20, VCPU_GPR(R20)(r9) 978 + std r21, VCPU_GPR(R21)(r9) 979 + std r22, VCPU_GPR(R22)(r9) 980 + std r23, VCPU_GPR(R23)(r9) 981 + std r24, VCPU_GPR(R24)(r9) 982 + std r25, VCPU_GPR(R25)(r9) 983 + std r26, VCPU_GPR(R26)(r9) 984 + std r27, VCPU_GPR(R27)(r9) 985 + std r28, VCPU_GPR(R28)(r9) 986 + std r29, VCPU_GPR(R29)(r9) 987 + std r30, VCPU_GPR(R30)(r9) 988 + std r31, VCPU_GPR(R31)(r9) 989 989 990 990 /* Save SPRGs */ 991 991 mfspr r3, SPRN_SPRG0 ··· 1160 1160 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1161 1161 beq 3f 1162 1162 clrrdi r0, r4, 28 1163 - PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ 1163 + PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1164 1164 bne 1f /* if no SLB entry found */ 1165 1165 4: std r4, VCPU_FAULT_DAR(r9) 1166 1166 stw r6, VCPU_FAULT_DSISR(r9) ··· 1234 1234 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 1235 1235 beq 3f 1236 1236 clrrdi r0, r10, 28 1237 - PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ 1237 + PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1238 1238 bne 1f /* if no SLB entry found */ 1239 1239 4: 1240 1240 /* Search the hash table. */ ··· 1278 1278 */ 1279 1279 .globl hcall_try_real_mode 1280 1280 hcall_try_real_mode: 1281 - ld r3,VCPU_GPR(r3)(r9) 1281 + ld r3,VCPU_GPR(R3)(r9) 1282 1282 andi. r0,r11,MSR_PR 1283 1283 bne hcall_real_cont 1284 1284 clrrdi r3,r3,2 ··· 1291 1291 add r3,r3,r4 1292 1292 mtctr r3 1293 1293 mr r3,r9 /* get vcpu pointer */ 1294 - ld r4,VCPU_GPR(r4)(r9) 1294 + ld r4,VCPU_GPR(R4)(r9) 1295 1295 bctrl 1296 1296 cmpdi r3,H_TOO_HARD 1297 1297 beq hcall_real_fallback 1298 1298 ld r4,HSTATE_KVM_VCPU(r13) 1299 - std r3,VCPU_GPR(r3)(r4) 1299 + std r3,VCPU_GPR(R3)(r4) 1300 1300 ld r10,VCPU_PC(r4) 1301 1301 ld r11,VCPU_MSR(r4) 1302 1302 b fast_guest_return ··· 1424 1424 li r0,0 /* set trap to 0 to say hcall is handled */ 1425 1425 stw r0,VCPU_TRAP(r3) 1426 1426 li r0,H_SUCCESS 1427 - std r0,VCPU_GPR(r3)(r3) 1427 + std r0,VCPU_GPR(R3)(r3) 1428 1428 BEGIN_FTR_SECTION 1429 1429 b 2f /* just send it up to host on 970 */ 1430 1430 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) ··· 1443 1443 addi r6,r5,VCORE_NAPPING_THREADS 1444 1444 31: lwarx r4,0,r6 1445 1445 or r4,r4,r0 1446 - PPC_POPCNTW(r7,r4) 1446 + PPC_POPCNTW(R7,R4) 1447 1447 cmpw r7,r8 1448 1448 bge 2f 1449 1449 stwcx. r4,0,r6 ··· 1464 1464 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 1465 1465 */ 1466 1466 /* Save non-volatile GPRs */ 1467 - std r14, VCPU_GPR(r14)(r3) 1468 - std r15, VCPU_GPR(r15)(r3) 1469 - std r16, VCPU_GPR(r16)(r3) 1470 - std r17, VCPU_GPR(r17)(r3) 1471 - std r18, VCPU_GPR(r18)(r3) 1472 - std r19, VCPU_GPR(r19)(r3) 1473 - std r20, VCPU_GPR(r20)(r3) 1474 - std r21, VCPU_GPR(r21)(r3) 1475 - std r22, VCPU_GPR(r22)(r3) 1476 - std r23, VCPU_GPR(r23)(r3) 1477 - std r24, VCPU_GPR(r24)(r3) 1478 - std r25, VCPU_GPR(r25)(r3) 1479 - std r26, VCPU_GPR(r26)(r3) 1480 - std r27, VCPU_GPR(r27)(r3) 1481 - std r28, VCPU_GPR(r28)(r3) 1482 - std r29, VCPU_GPR(r29)(r3) 1483 - std r30, VCPU_GPR(r30)(r3) 1484 - std r31, VCPU_GPR(r31)(r3) 1467 + std r14, VCPU_GPR(R14)(r3) 1468 + std r15, VCPU_GPR(R15)(r3) 1469 + std r16, VCPU_GPR(R16)(r3) 1470 + std r17, VCPU_GPR(R17)(r3) 1471 + std r18, VCPU_GPR(R18)(r3) 1472 + std r19, VCPU_GPR(R19)(r3) 1473 + std r20, VCPU_GPR(R20)(r3) 1474 + std r21, VCPU_GPR(R21)(r3) 1475 + std r22, VCPU_GPR(R22)(r3) 1476 + std r23, VCPU_GPR(R23)(r3) 1477 + std r24, VCPU_GPR(R24)(r3) 1478 + std r25, VCPU_GPR(R25)(r3) 1479 + std r26, VCPU_GPR(R26)(r3) 1480 + std r27, VCPU_GPR(R27)(r3) 1481 + std r28, VCPU_GPR(R28)(r3) 1482 + std r29, VCPU_GPR(R29)(r3) 1483 + std r30, VCPU_GPR(R30)(r3) 1484 + std r31, VCPU_GPR(R31)(r3) 1485 1485 1486 1486 /* save FP state */ 1487 1487 bl .kvmppc_save_fp ··· 1513 1513 bl kvmppc_load_fp 1514 1514 1515 1515 /* Load NV GPRS */ 1516 - ld r14, VCPU_GPR(r14)(r4) 1517 - ld r15, VCPU_GPR(r15)(r4) 1518 - ld r16, VCPU_GPR(r16)(r4) 1519 - ld r17, VCPU_GPR(r17)(r4) 1520 - ld r18, VCPU_GPR(r18)(r4) 1521 - ld r19, VCPU_GPR(r19)(r4) 1522 - ld r20, VCPU_GPR(r20)(r4) 1523 - ld r21, VCPU_GPR(r21)(r4) 1524 - ld r22, VCPU_GPR(r22)(r4) 1525 - ld r23, VCPU_GPR(r23)(r4) 1526 - ld r24, VCPU_GPR(r24)(r4) 1527 - ld r25, VCPU_GPR(r25)(r4) 1528 - ld r26, VCPU_GPR(r26)(r4) 1529 - ld r27, VCPU_GPR(r27)(r4) 1530 - ld r28, VCPU_GPR(r28)(r4) 1531 - ld r29, VCPU_GPR(r29)(r4) 1532 - ld r30, VCPU_GPR(r30)(r4) 1533 - ld r31, VCPU_GPR(r31)(r4) 1516 + ld r14, VCPU_GPR(R14)(r4) 1517 + ld r15, VCPU_GPR(R15)(r4) 1518 + ld r16, VCPU_GPR(R16)(r4) 1519 + ld r17, VCPU_GPR(R17)(r4) 1520 + ld r18, VCPU_GPR(R18)(r4) 1521 + ld r19, VCPU_GPR(R19)(r4) 1522 + ld r20, VCPU_GPR(R20)(r4) 1523 + ld r21, VCPU_GPR(R21)(r4) 1524 + ld r22, VCPU_GPR(R22)(r4) 1525 + ld r23, VCPU_GPR(R23)(r4) 1526 + ld r24, VCPU_GPR(R24)(r4) 1527 + ld r25, VCPU_GPR(R25)(r4) 1528 + ld r26, VCPU_GPR(R26)(r4) 1529 + ld r27, VCPU_GPR(R27)(r4) 1530 + ld r28, VCPU_GPR(R28)(r4) 1531 + ld r29, VCPU_GPR(R29)(r4) 1532 + ld r30, VCPU_GPR(R30)(r4) 1533 + ld r31, VCPU_GPR(R31)(r4) 1534 1534 1535 1535 /* clear our bit in vcore->napping_threads */ 1536 1536 33: ld r5,HSTATE_KVM_VCORE(r13) ··· 1649 1649 reg = 0 1650 1650 .rept 32 1651 1651 li r6,reg*16+VCPU_VSRS 1652 - STXVD2X(reg,r6,r3) 1652 + STXVD2X(reg,R6,R3) 1653 1653 reg = reg + 1 1654 1654 .endr 1655 1655 FTR_SECTION_ELSE ··· 1711 1711 reg = 0 1712 1712 .rept 32 1713 1713 li r7,reg*16+VCPU_VSRS 1714 - LXVD2X(reg,r7,r4) 1714 + LXVD2X(reg,R7,R4) 1715 1715 reg = reg + 1 1716 1716 .endr 1717 1717 FTR_SECTION_ELSE
+36 -36
arch/powerpc/kvm/book3s_interrupts.S
··· 39 39 40 40 #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) 41 41 #define VCPU_LOAD_NVGPRS(vcpu) \ 42 - PPC_LL r14, VCPU_GPR(r14)(vcpu); \ 43 - PPC_LL r15, VCPU_GPR(r15)(vcpu); \ 44 - PPC_LL r16, VCPU_GPR(r16)(vcpu); \ 45 - PPC_LL r17, VCPU_GPR(r17)(vcpu); \ 46 - PPC_LL r18, VCPU_GPR(r18)(vcpu); \ 47 - PPC_LL r19, VCPU_GPR(r19)(vcpu); \ 48 - PPC_LL r20, VCPU_GPR(r20)(vcpu); \ 49 - PPC_LL r21, VCPU_GPR(r21)(vcpu); \ 50 - PPC_LL r22, VCPU_GPR(r22)(vcpu); \ 51 - PPC_LL r23, VCPU_GPR(r23)(vcpu); \ 52 - PPC_LL r24, VCPU_GPR(r24)(vcpu); \ 53 - PPC_LL r25, VCPU_GPR(r25)(vcpu); \ 54 - PPC_LL r26, VCPU_GPR(r26)(vcpu); \ 55 - PPC_LL r27, VCPU_GPR(r27)(vcpu); \ 56 - PPC_LL r28, VCPU_GPR(r28)(vcpu); \ 57 - PPC_LL r29, VCPU_GPR(r29)(vcpu); \ 58 - PPC_LL r30, VCPU_GPR(r30)(vcpu); \ 59 - PPC_LL r31, VCPU_GPR(r31)(vcpu); \ 42 + PPC_LL r14, VCPU_GPR(R14)(vcpu); \ 43 + PPC_LL r15, VCPU_GPR(R15)(vcpu); \ 44 + PPC_LL r16, VCPU_GPR(R16)(vcpu); \ 45 + PPC_LL r17, VCPU_GPR(R17)(vcpu); \ 46 + PPC_LL r18, VCPU_GPR(R18)(vcpu); \ 47 + PPC_LL r19, VCPU_GPR(R19)(vcpu); \ 48 + PPC_LL r20, VCPU_GPR(R20)(vcpu); \ 49 + PPC_LL r21, VCPU_GPR(R21)(vcpu); \ 50 + PPC_LL r22, VCPU_GPR(R22)(vcpu); \ 51 + PPC_LL r23, VCPU_GPR(R23)(vcpu); \ 52 + PPC_LL r24, VCPU_GPR(R24)(vcpu); \ 53 + PPC_LL r25, VCPU_GPR(R25)(vcpu); \ 54 + PPC_LL r26, VCPU_GPR(R26)(vcpu); \ 55 + PPC_LL r27, VCPU_GPR(R27)(vcpu); \ 56 + PPC_LL r28, VCPU_GPR(R28)(vcpu); \ 57 + PPC_LL r29, VCPU_GPR(R29)(vcpu); \ 58 + PPC_LL r30, VCPU_GPR(R30)(vcpu); \ 59 + PPC_LL r31, VCPU_GPR(R31)(vcpu); \ 60 60 61 61 /***************************************************************************** 62 62 * * ··· 131 131 /* R7 = vcpu */ 132 132 PPC_LL r7, GPR4(r1) 133 133 134 - PPC_STL r14, VCPU_GPR(r14)(r7) 135 - PPC_STL r15, VCPU_GPR(r15)(r7) 136 - PPC_STL r16, VCPU_GPR(r16)(r7) 137 - PPC_STL r17, VCPU_GPR(r17)(r7) 138 - PPC_STL r18, VCPU_GPR(r18)(r7) 139 - PPC_STL r19, VCPU_GPR(r19)(r7) 140 - PPC_STL r20, VCPU_GPR(r20)(r7) 141 - PPC_STL r21, VCPU_GPR(r21)(r7) 142 - PPC_STL r22, VCPU_GPR(r22)(r7) 143 - PPC_STL r23, VCPU_GPR(r23)(r7) 144 - PPC_STL r24, VCPU_GPR(r24)(r7) 145 - PPC_STL r25, VCPU_GPR(r25)(r7) 146 - PPC_STL r26, VCPU_GPR(r26)(r7) 147 - PPC_STL r27, VCPU_GPR(r27)(r7) 148 - PPC_STL r28, VCPU_GPR(r28)(r7) 149 - PPC_STL r29, VCPU_GPR(r29)(r7) 150 - PPC_STL r30, VCPU_GPR(r30)(r7) 151 - PPC_STL r31, VCPU_GPR(r31)(r7) 134 + PPC_STL r14, VCPU_GPR(R14)(r7) 135 + PPC_STL r15, VCPU_GPR(R15)(r7) 136 + PPC_STL r16, VCPU_GPR(R16)(r7) 137 + PPC_STL r17, VCPU_GPR(R17)(r7) 138 + PPC_STL r18, VCPU_GPR(R18)(r7) 139 + PPC_STL r19, VCPU_GPR(R19)(r7) 140 + PPC_STL r20, VCPU_GPR(R20)(r7) 141 + PPC_STL r21, VCPU_GPR(R21)(r7) 142 + PPC_STL r22, VCPU_GPR(R22)(r7) 143 + PPC_STL r23, VCPU_GPR(R23)(r7) 144 + PPC_STL r24, VCPU_GPR(R24)(r7) 145 + PPC_STL r25, VCPU_GPR(R25)(r7) 146 + PPC_STL r26, VCPU_GPR(R26)(r7) 147 + PPC_STL r27, VCPU_GPR(R27)(r7) 148 + PPC_STL r28, VCPU_GPR(R28)(r7) 149 + PPC_STL r29, VCPU_GPR(R29)(r7) 150 + PPC_STL r30, VCPU_GPR(R30)(r7) 151 + PPC_STL r31, VCPU_GPR(R31)(r7) 152 152 153 153 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ 154 154 mr r5, r12
+136 -136
arch/powerpc/kvm/booke_interrupts.S
··· 37 37 #define HOST_CR 16 38 38 #define HOST_NV_GPRS 20 39 39 #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) 40 - #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) 40 + #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4) 41 41 #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ 42 42 #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ 43 43 ··· 58 58 /* Get pointer to vcpu and record exit number. */ 59 59 mtspr SPRN_SPRG_WSCRATCH0, r4 60 60 mfspr r4, SPRN_SPRG_RVCPU 61 - stw r5, VCPU_GPR(r5)(r4) 62 - stw r6, VCPU_GPR(r6)(r4) 61 + stw r5, VCPU_GPR(R5)(r4) 62 + stw r6, VCPU_GPR(R6)(r4) 63 63 mfctr r5 64 64 lis r6, kvmppc_resume_host@h 65 65 stw r5, VCPU_CTR(r4) ··· 100 100 * r5: KVM exit number 101 101 */ 102 102 _GLOBAL(kvmppc_resume_host) 103 - stw r3, VCPU_GPR(r3)(r4) 103 + stw r3, VCPU_GPR(R3)(r4) 104 104 mfcr r3 105 105 stw r3, VCPU_CR(r4) 106 - stw r7, VCPU_GPR(r7)(r4) 107 - stw r8, VCPU_GPR(r8)(r4) 108 - stw r9, VCPU_GPR(r9)(r4) 106 + stw r7, VCPU_GPR(R7)(r4) 107 + stw r8, VCPU_GPR(R8)(r4) 108 + stw r9, VCPU_GPR(R9)(r4) 109 109 110 110 li r6, 1 111 111 slw r6, r6, r5 ··· 135 135 isync 136 136 stw r9, VCPU_LAST_INST(r4) 137 137 138 - stw r15, VCPU_GPR(r15)(r4) 139 - stw r16, VCPU_GPR(r16)(r4) 140 - stw r17, VCPU_GPR(r17)(r4) 141 - stw r18, VCPU_GPR(r18)(r4) 142 - stw r19, VCPU_GPR(r19)(r4) 143 - stw r20, VCPU_GPR(r20)(r4) 144 - stw r21, VCPU_GPR(r21)(r4) 145 - stw r22, VCPU_GPR(r22)(r4) 146 - stw r23, VCPU_GPR(r23)(r4) 147 - stw r24, VCPU_GPR(r24)(r4) 148 - stw r25, VCPU_GPR(r25)(r4) 149 - stw r26, VCPU_GPR(r26)(r4) 150 - stw r27, VCPU_GPR(r27)(r4) 151 - stw r28, VCPU_GPR(r28)(r4) 152 - stw r29, VCPU_GPR(r29)(r4) 153 - stw r30, VCPU_GPR(r30)(r4) 154 - stw r31, VCPU_GPR(r31)(r4) 138 + stw r15, VCPU_GPR(R15)(r4) 139 + stw r16, VCPU_GPR(R16)(r4) 140 + stw r17, VCPU_GPR(R17)(r4) 141 + stw r18, VCPU_GPR(R18)(r4) 142 + stw r19, VCPU_GPR(R19)(r4) 143 + stw r20, VCPU_GPR(R20)(r4) 144 + stw r21, VCPU_GPR(R21)(r4) 145 + stw r22, VCPU_GPR(R22)(r4) 146 + stw r23, VCPU_GPR(R23)(r4) 147 + stw r24, VCPU_GPR(R24)(r4) 148 + stw r25, VCPU_GPR(R25)(r4) 149 + stw r26, VCPU_GPR(R26)(r4) 150 + stw r27, VCPU_GPR(R27)(r4) 151 + stw r28, VCPU_GPR(R28)(r4) 152 + stw r29, VCPU_GPR(R29)(r4) 153 + stw r30, VCPU_GPR(R30)(r4) 154 + stw r31, VCPU_GPR(R31)(r4) 155 155 ..skip_inst_copy: 156 156 157 157 /* Also grab DEAR and ESR before the host can clobber them. */ ··· 169 169 ..skip_esr: 170 170 171 171 /* Save remaining volatile guest register state to vcpu. */ 172 - stw r0, VCPU_GPR(r0)(r4) 173 - stw r1, VCPU_GPR(r1)(r4) 174 - stw r2, VCPU_GPR(r2)(r4) 175 - stw r10, VCPU_GPR(r10)(r4) 176 - stw r11, VCPU_GPR(r11)(r4) 177 - stw r12, VCPU_GPR(r12)(r4) 178 - stw r13, VCPU_GPR(r13)(r4) 179 - stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */ 172 + stw r0, VCPU_GPR(R0)(r4) 173 + stw r1, VCPU_GPR(R1)(r4) 174 + stw r2, VCPU_GPR(R2)(r4) 175 + stw r10, VCPU_GPR(R10)(r4) 176 + stw r11, VCPU_GPR(R11)(r4) 177 + stw r12, VCPU_GPR(R12)(r4) 178 + stw r13, VCPU_GPR(R13)(r4) 179 + stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */ 180 180 mflr r3 181 181 stw r3, VCPU_LR(r4) 182 182 mfxer r3 183 183 stw r3, VCPU_XER(r4) 184 184 mfspr r3, SPRN_SPRG_RSCRATCH0 185 - stw r3, VCPU_GPR(r4)(r4) 185 + stw r3, VCPU_GPR(R4)(r4) 186 186 mfspr r3, SPRN_SRR0 187 187 stw r3, VCPU_PC(r4) 188 188 ··· 214 214 215 215 /* Restore vcpu pointer and the nonvolatiles we used. */ 216 216 mr r4, r14 217 - lwz r14, VCPU_GPR(r14)(r4) 217 + lwz r14, VCPU_GPR(R14)(r4) 218 218 219 219 /* Sometimes instruction emulation must restore complete GPR state. */ 220 220 andi. r5, r3, RESUME_FLAG_NV 221 221 beq ..skip_nv_load 222 - lwz r15, VCPU_GPR(r15)(r4) 223 - lwz r16, VCPU_GPR(r16)(r4) 224 - lwz r17, VCPU_GPR(r17)(r4) 225 - lwz r18, VCPU_GPR(r18)(r4) 226 - lwz r19, VCPU_GPR(r19)(r4) 227 - lwz r20, VCPU_GPR(r20)(r4) 228 - lwz r21, VCPU_GPR(r21)(r4) 229 - lwz r22, VCPU_GPR(r22)(r4) 230 - lwz r23, VCPU_GPR(r23)(r4) 231 - lwz r24, VCPU_GPR(r24)(r4) 232 - lwz r25, VCPU_GPR(r25)(r4) 233 - lwz r26, VCPU_GPR(r26)(r4) 234 - lwz r27, VCPU_GPR(r27)(r4) 235 - lwz r28, VCPU_GPR(r28)(r4) 236 - lwz r29, VCPU_GPR(r29)(r4) 237 - lwz r30, VCPU_GPR(r30)(r4) 238 - lwz r31, VCPU_GPR(r31)(r4) 222 + lwz r15, VCPU_GPR(R15)(r4) 223 + lwz r16, VCPU_GPR(R16)(r4) 224 + lwz r17, VCPU_GPR(R17)(r4) 225 + lwz r18, VCPU_GPR(R18)(r4) 226 + lwz r19, VCPU_GPR(R19)(r4) 227 + lwz r20, VCPU_GPR(R20)(r4) 228 + lwz r21, VCPU_GPR(R21)(r4) 229 + lwz r22, VCPU_GPR(R22)(r4) 230 + lwz r23, VCPU_GPR(R23)(r4) 231 + lwz r24, VCPU_GPR(R24)(r4) 232 + lwz r25, VCPU_GPR(R25)(r4) 233 + lwz r26, VCPU_GPR(R26)(r4) 234 + lwz r27, VCPU_GPR(R27)(r4) 235 + lwz r28, VCPU_GPR(R28)(r4) 236 + lwz r29, VCPU_GPR(R29)(r4) 237 + lwz r30, VCPU_GPR(R30)(r4) 238 + lwz r31, VCPU_GPR(R31)(r4) 239 239 ..skip_nv_load: 240 240 241 241 /* Should we return to the guest? */ ··· 257 257 258 258 /* We already saved guest volatile register state; now save the 259 259 * non-volatiles. */ 260 - stw r15, VCPU_GPR(r15)(r4) 261 - stw r16, VCPU_GPR(r16)(r4) 262 - stw r17, VCPU_GPR(r17)(r4) 263 - stw r18, VCPU_GPR(r18)(r4) 264 - stw r19, VCPU_GPR(r19)(r4) 265 - stw r20, VCPU_GPR(r20)(r4) 266 - stw r21, VCPU_GPR(r21)(r4) 267 - stw r22, VCPU_GPR(r22)(r4) 268 - stw r23, VCPU_GPR(r23)(r4) 269 - stw r24, VCPU_GPR(r24)(r4) 270 - stw r25, VCPU_GPR(r25)(r4) 271 - stw r26, VCPU_GPR(r26)(r4) 272 - stw r27, VCPU_GPR(r27)(r4) 273 - stw r28, VCPU_GPR(r28)(r4) 274 - stw r29, VCPU_GPR(r29)(r4) 275 - stw r30, VCPU_GPR(r30)(r4) 276 - stw r31, VCPU_GPR(r31)(r4) 260 + stw r15, VCPU_GPR(R15)(r4) 261 + stw r16, VCPU_GPR(R16)(r4) 262 + stw r17, VCPU_GPR(R17)(r4) 263 + stw r18, VCPU_GPR(R18)(r4) 264 + stw r19, VCPU_GPR(R19)(r4) 265 + stw r20, VCPU_GPR(R20)(r4) 266 + stw r21, VCPU_GPR(R21)(r4) 267 + stw r22, VCPU_GPR(R22)(r4) 268 + stw r23, VCPU_GPR(R23)(r4) 269 + stw r24, VCPU_GPR(R24)(r4) 270 + stw r25, VCPU_GPR(R25)(r4) 271 + stw r26, VCPU_GPR(R26)(r4) 272 + stw r27, VCPU_GPR(R27)(r4) 273 + stw r28, VCPU_GPR(R28)(r4) 274 + stw r29, VCPU_GPR(R29)(r4) 275 + stw r30, VCPU_GPR(R30)(r4) 276 + stw r31, VCPU_GPR(R31)(r4) 277 277 278 278 /* Load host non-volatile register state from host stack. */ 279 - lwz r14, HOST_NV_GPR(r14)(r1) 280 - lwz r15, HOST_NV_GPR(r15)(r1) 281 - lwz r16, HOST_NV_GPR(r16)(r1) 282 - lwz r17, HOST_NV_GPR(r17)(r1) 283 - lwz r18, HOST_NV_GPR(r18)(r1) 284 - lwz r19, HOST_NV_GPR(r19)(r1) 285 - lwz r20, HOST_NV_GPR(r20)(r1) 286 - lwz r21, HOST_NV_GPR(r21)(r1) 287 - lwz r22, HOST_NV_GPR(r22)(r1) 288 - lwz r23, HOST_NV_GPR(r23)(r1) 289 - lwz r24, HOST_NV_GPR(r24)(r1) 290 - lwz r25, HOST_NV_GPR(r25)(r1) 291 - lwz r26, HOST_NV_GPR(r26)(r1) 292 - lwz r27, HOST_NV_GPR(r27)(r1) 293 - lwz r28, HOST_NV_GPR(r28)(r1) 294 - lwz r29, HOST_NV_GPR(r29)(r1) 295 - lwz r30, HOST_NV_GPR(r30)(r1) 296 - lwz r31, HOST_NV_GPR(r31)(r1) 279 + lwz r14, HOST_NV_GPR(R14)(r1) 280 + lwz r15, HOST_NV_GPR(R15)(r1) 281 + lwz r16, HOST_NV_GPR(R16)(r1) 282 + lwz r17, HOST_NV_GPR(R17)(r1) 283 + lwz r18, HOST_NV_GPR(R18)(r1) 284 + lwz r19, HOST_NV_GPR(R19)(r1) 285 + lwz r20, HOST_NV_GPR(R20)(r1) 286 + lwz r21, HOST_NV_GPR(R21)(r1) 287 + lwz r22, HOST_NV_GPR(R22)(r1) 288 + lwz r23, HOST_NV_GPR(R23)(r1) 289 + lwz r24, HOST_NV_GPR(R24)(r1) 290 + lwz r25, HOST_NV_GPR(R25)(r1) 291 + lwz r26, HOST_NV_GPR(R26)(r1) 292 + lwz r27, HOST_NV_GPR(R27)(r1) 293 + lwz r28, HOST_NV_GPR(R28)(r1) 294 + lwz r29, HOST_NV_GPR(R29)(r1) 295 + lwz r30, HOST_NV_GPR(R30)(r1) 296 + lwz r31, HOST_NV_GPR(R31)(r1) 297 297 298 298 /* Return to kvm_vcpu_run(). */ 299 299 lwz r4, HOST_STACK_LR(r1) ··· 321 321 stw r5, HOST_CR(r1) 322 322 323 323 /* Save host non-volatile register state to stack. */ 324 - stw r14, HOST_NV_GPR(r14)(r1) 325 - stw r15, HOST_NV_GPR(r15)(r1) 326 - stw r16, HOST_NV_GPR(r16)(r1) 327 - stw r17, HOST_NV_GPR(r17)(r1) 328 - stw r18, HOST_NV_GPR(r18)(r1) 329 - stw r19, HOST_NV_GPR(r19)(r1) 330 - stw r20, HOST_NV_GPR(r20)(r1) 331 - stw r21, HOST_NV_GPR(r21)(r1) 332 - stw r22, HOST_NV_GPR(r22)(r1) 333 - stw r23, HOST_NV_GPR(r23)(r1) 334 - stw r24, HOST_NV_GPR(r24)(r1) 335 - stw r25, HOST_NV_GPR(r25)(r1) 336 - stw r26, HOST_NV_GPR(r26)(r1) 337 - stw r27, HOST_NV_GPR(r27)(r1) 338 - stw r28, HOST_NV_GPR(r28)(r1) 339 - stw r29, HOST_NV_GPR(r29)(r1) 340 - stw r30, HOST_NV_GPR(r30)(r1) 341 - stw r31, HOST_NV_GPR(r31)(r1) 324 + stw r14, HOST_NV_GPR(R14)(r1) 325 + stw r15, HOST_NV_GPR(R15)(r1) 326 + stw r16, HOST_NV_GPR(R16)(r1) 327 + stw r17, HOST_NV_GPR(R17)(r1) 328 + stw r18, HOST_NV_GPR(R18)(r1) 329 + stw r19, HOST_NV_GPR(R19)(r1) 330 + stw r20, HOST_NV_GPR(R20)(r1) 331 + stw r21, HOST_NV_GPR(R21)(r1) 332 + stw r22, HOST_NV_GPR(R22)(r1) 333 + stw r23, HOST_NV_GPR(R23)(r1) 334 + stw r24, HOST_NV_GPR(R24)(r1) 335 + stw r25, HOST_NV_GPR(R25)(r1) 336 + stw r26, HOST_NV_GPR(R26)(r1) 337 + stw r27, HOST_NV_GPR(R27)(r1) 338 + stw r28, HOST_NV_GPR(R28)(r1) 339 + stw r29, HOST_NV_GPR(R29)(r1) 340 + stw r30, HOST_NV_GPR(R30)(r1) 341 + stw r31, HOST_NV_GPR(R31)(r1) 342 342 343 343 /* Load guest non-volatiles. */ 344 - lwz r14, VCPU_GPR(r14)(r4) 345 - lwz r15, VCPU_GPR(r15)(r4) 346 - lwz r16, VCPU_GPR(r16)(r4) 347 - lwz r17, VCPU_GPR(r17)(r4) 348 - lwz r18, VCPU_GPR(r18)(r4) 349 - lwz r19, VCPU_GPR(r19)(r4) 350 - lwz r20, VCPU_GPR(r20)(r4) 351 - lwz r21, VCPU_GPR(r21)(r4) 352 - lwz r22, VCPU_GPR(r22)(r4) 353 - lwz r23, VCPU_GPR(r23)(r4) 354 - lwz r24, VCPU_GPR(r24)(r4) 355 - lwz r25, VCPU_GPR(r25)(r4) 356 - lwz r26, VCPU_GPR(r26)(r4) 357 - lwz r27, VCPU_GPR(r27)(r4) 358 - lwz r28, VCPU_GPR(r28)(r4) 359 - lwz r29, VCPU_GPR(r29)(r4) 360 - lwz r30, VCPU_GPR(r30)(r4) 361 - lwz r31, VCPU_GPR(r31)(r4) 344 + lwz r14, VCPU_GPR(R14)(r4) 345 + lwz r15, VCPU_GPR(R15)(r4) 346 + lwz r16, VCPU_GPR(R16)(r4) 347 + lwz r17, VCPU_GPR(R17)(r4) 348 + lwz r18, VCPU_GPR(R18)(r4) 349 + lwz r19, VCPU_GPR(R19)(r4) 350 + lwz r20, VCPU_GPR(R20)(r4) 351 + lwz r21, VCPU_GPR(R21)(r4) 352 + lwz r22, VCPU_GPR(R22)(r4) 353 + lwz r23, VCPU_GPR(R23)(r4) 354 + lwz r24, VCPU_GPR(R24)(r4) 355 + lwz r25, VCPU_GPR(R25)(r4) 356 + lwz r26, VCPU_GPR(R26)(r4) 357 + lwz r27, VCPU_GPR(R27)(r4) 358 + lwz r28, VCPU_GPR(R28)(r4) 359 + lwz r29, VCPU_GPR(R29)(r4) 360 + lwz r30, VCPU_GPR(R30)(r4) 361 + lwz r31, VCPU_GPR(R31)(r4) 362 362 363 363 #ifdef CONFIG_SPE 364 364 /* save host SPEFSCR and load guest SPEFSCR */ ··· 386 386 #endif 387 387 388 388 /* Load some guest volatiles. */ 389 - lwz r0, VCPU_GPR(r0)(r4) 390 - lwz r2, VCPU_GPR(r2)(r4) 391 - lwz r9, VCPU_GPR(r9)(r4) 392 - lwz r10, VCPU_GPR(r10)(r4) 393 - lwz r11, VCPU_GPR(r11)(r4) 394 - lwz r12, VCPU_GPR(r12)(r4) 395 - lwz r13, VCPU_GPR(r13)(r4) 389 + lwz r0, VCPU_GPR(R0)(r4) 390 + lwz r2, VCPU_GPR(R2)(r4) 391 + lwz r9, VCPU_GPR(R9)(r4) 392 + lwz r10, VCPU_GPR(R10)(r4) 393 + lwz r11, VCPU_GPR(R11)(r4) 394 + lwz r12, VCPU_GPR(R12)(r4) 395 + lwz r13, VCPU_GPR(R13)(r4) 396 396 lwz r3, VCPU_LR(r4) 397 397 mtlr r3 398 398 lwz r3, VCPU_XER(r4) ··· 411 411 412 412 /* Can't switch the stack pointer until after IVPR is switched, 413 413 * because host interrupt handlers would get confused. */ 414 - lwz r1, VCPU_GPR(r1)(r4) 414 + lwz r1, VCPU_GPR(R1)(r4) 415 415 416 416 /* 417 417 * Host interrupt handlers may have clobbered these ··· 449 449 mtcr r5 450 450 mtsrr0 r6 451 451 mtsrr1 r7 452 - lwz r5, VCPU_GPR(r5)(r4) 453 - lwz r6, VCPU_GPR(r6)(r4) 454 - lwz r7, VCPU_GPR(r7)(r4) 455 - lwz r8, VCPU_GPR(r8)(r4) 452 + lwz r5, VCPU_GPR(R5)(r4) 453 + lwz r6, VCPU_GPR(R6)(r4) 454 + lwz r7, VCPU_GPR(R7)(r4) 455 + lwz r8, VCPU_GPR(R8)(r4) 456 456 457 457 /* Clear any debug events which occurred since we disabled MSR[DE]. 458 458 * XXX This gives us a 3-instruction window in which a breakpoint ··· 461 461 ori r3, r3, 0xffff 462 462 mtspr SPRN_DBSR, r3 463 463 464 - lwz r3, VCPU_GPR(r3)(r4) 465 - lwz r4, VCPU_GPR(r4)(r4) 464 + lwz r3, VCPU_GPR(R3)(r4) 465 + lwz r4, VCPU_GPR(R4)(r4) 466 466 rfi 467 467 468 468 #ifdef CONFIG_SPE
+110 -110
arch/powerpc/kvm/bookehv_interrupts.S
··· 67 67 */ 68 68 .macro kvm_handler_common intno, srr0, flags 69 69 /* Restore host stack pointer */ 70 - PPC_STL r1, VCPU_GPR(r1)(r4) 71 - PPC_STL r2, VCPU_GPR(r2)(r4) 70 + PPC_STL r1, VCPU_GPR(R1)(r4) 71 + PPC_STL r2, VCPU_GPR(R2)(r4) 72 72 PPC_LL r1, VCPU_HOST_STACK(r4) 73 73 PPC_LL r2, HOST_R2(r1) 74 74 75 75 mfspr r10, SPRN_PID 76 76 lwz r8, VCPU_HOST_PID(r4) 77 77 PPC_LL r11, VCPU_SHARED(r4) 78 - PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ 78 + PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */ 79 79 li r14, \intno 80 80 81 81 stw r10, VCPU_GUEST_PID(r4) ··· 137 137 */ 138 138 139 139 mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ 140 - PPC_STL r15, VCPU_GPR(r15)(r4) 141 - PPC_STL r16, VCPU_GPR(r16)(r4) 142 - PPC_STL r17, VCPU_GPR(r17)(r4) 143 - PPC_STL r18, VCPU_GPR(r18)(r4) 144 - PPC_STL r19, VCPU_GPR(r19)(r4) 140 + PPC_STL r15, VCPU_GPR(R15)(r4) 141 + PPC_STL r16, VCPU_GPR(R16)(r4) 142 + PPC_STL r17, VCPU_GPR(R17)(r4) 143 + PPC_STL r18, VCPU_GPR(R18)(r4) 144 + PPC_STL r19, VCPU_GPR(R19)(r4) 145 145 mr r8, r3 146 - PPC_STL r20, VCPU_GPR(r20)(r4) 146 + PPC_STL r20, VCPU_GPR(R20)(r4) 147 147 rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS 148 - PPC_STL r21, VCPU_GPR(r21)(r4) 148 + PPC_STL r21, VCPU_GPR(R21)(r4) 149 149 rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR 150 - PPC_STL r22, VCPU_GPR(r22)(r4) 150 + PPC_STL r22, VCPU_GPR(R22)(r4) 151 151 rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID 152 - PPC_STL r23, VCPU_GPR(r23)(r4) 153 - PPC_STL r24, VCPU_GPR(r24)(r4) 154 - PPC_STL r25, VCPU_GPR(r25)(r4) 155 - PPC_STL r26, VCPU_GPR(r26)(r4) 156 - PPC_STL r27, VCPU_GPR(r27)(r4) 157 - PPC_STL r28, VCPU_GPR(r28)(r4) 158 - PPC_STL r29, VCPU_GPR(r29)(r4) 159 - PPC_STL r30, VCPU_GPR(r30)(r4) 160 - PPC_STL r31, VCPU_GPR(r31)(r4) 152 + PPC_STL r23, VCPU_GPR(R23)(r4) 153 + PPC_STL r24, VCPU_GPR(R24)(r4) 154 + PPC_STL r25, VCPU_GPR(R25)(r4) 155 + PPC_STL r26, VCPU_GPR(R26)(r4) 156 + PPC_STL r27, VCPU_GPR(R27)(r4) 157 + PPC_STL r28, VCPU_GPR(R28)(r4) 158 + PPC_STL r29, VCPU_GPR(R29)(r4) 159 + PPC_STL r30, VCPU_GPR(R30)(r4) 160 + PPC_STL r31, VCPU_GPR(R31)(r4) 161 161 mtspr SPRN_EPLC, r8 162 162 163 163 /* disable preemption, so we are sure we hit the fixup handler */ ··· 211 211 .macro kvm_handler intno srr0, srr1, flags 212 212 _GLOBAL(kvmppc_handler_\intno\()_\srr1) 213 213 GET_VCPU(r11, r10) 214 - PPC_STL r3, VCPU_GPR(r3)(r11) 214 + PPC_STL r3, VCPU_GPR(R3)(r11) 215 215 mfspr r3, SPRN_SPRG_RSCRATCH0 216 - PPC_STL r4, VCPU_GPR(r4)(r11) 216 + PPC_STL r4, VCPU_GPR(R4)(r11) 217 217 PPC_LL r4, THREAD_NORMSAVE(0)(r10) 218 - PPC_STL r5, VCPU_GPR(r5)(r11) 218 + PPC_STL r5, VCPU_GPR(R5)(r11) 219 219 stw r13, VCPU_CR(r11) 220 220 mfspr r5, \srr0 221 - PPC_STL r3, VCPU_GPR(r10)(r11) 221 + PPC_STL r3, VCPU_GPR(R10)(r11) 222 222 PPC_LL r3, THREAD_NORMSAVE(2)(r10) 223 - PPC_STL r6, VCPU_GPR(r6)(r11) 224 - PPC_STL r4, VCPU_GPR(r11)(r11) 223 + PPC_STL r6, VCPU_GPR(R6)(r11) 224 + PPC_STL r4, VCPU_GPR(R11)(r11) 225 225 mfspr r6, \srr1 226 - PPC_STL r7, VCPU_GPR(r7)(r11) 227 - PPC_STL r8, VCPU_GPR(r8)(r11) 228 - PPC_STL r9, VCPU_GPR(r9)(r11) 229 - PPC_STL r3, VCPU_GPR(r13)(r11) 226 + PPC_STL r7, VCPU_GPR(R7)(r11) 227 + PPC_STL r8, VCPU_GPR(R8)(r11) 228 + PPC_STL r9, VCPU_GPR(R9)(r11) 229 + PPC_STL r3, VCPU_GPR(R13)(r11) 230 230 mfctr r7 231 - PPC_STL r12, VCPU_GPR(r12)(r11) 231 + PPC_STL r12, VCPU_GPR(R12)(r11) 232 232 PPC_STL r7, VCPU_CTR(r11) 233 233 mr r4, r11 234 234 kvm_handler_common \intno, \srr0, \flags ··· 238 238 _GLOBAL(kvmppc_handler_\intno\()_\srr1) 239 239 mfspr r10, SPRN_SPRG_THREAD 240 240 GET_VCPU(r11, r10) 241 - PPC_STL r3, VCPU_GPR(r3)(r11) 241 + PPC_STL r3, VCPU_GPR(R3)(r11) 242 242 mfspr r3, \scratch 243 - PPC_STL r4, VCPU_GPR(r4)(r11) 243 + PPC_STL r4, VCPU_GPR(R4)(r11) 244 244 PPC_LL r4, GPR9(r8) 245 - PPC_STL r5, VCPU_GPR(r5)(r11) 245 + PPC_STL r5, VCPU_GPR(R5)(r11) 246 246 stw r9, VCPU_CR(r11) 247 247 mfspr r5, \srr0 248 - PPC_STL r3, VCPU_GPR(r8)(r11) 248 + PPC_STL r3, VCPU_GPR(R8)(r11) 249 249 PPC_LL r3, GPR10(r8) 250 - PPC_STL r6, VCPU_GPR(r6)(r11) 251 - PPC_STL r4, VCPU_GPR(r9)(r11) 250 + PPC_STL r6, VCPU_GPR(R6)(r11) 251 + PPC_STL r4, VCPU_GPR(R9)(r11) 252 252 mfspr r6, \srr1 253 253 PPC_LL r4, GPR11(r8) 254 - PPC_STL r7, VCPU_GPR(r7)(r11) 255 - PPC_STL r3, VCPU_GPR(r10)(r11) 254 + PPC_STL r7, VCPU_GPR(R7)(r11) 255 + PPC_STL r3, VCPU_GPR(R10)(r11) 256 256 mfctr r7 257 - PPC_STL r12, VCPU_GPR(r12)(r11) 258 - PPC_STL r13, VCPU_GPR(r13)(r11) 259 - PPC_STL r4, VCPU_GPR(r11)(r11) 257 + PPC_STL r12, VCPU_GPR(R12)(r11) 258 + PPC_STL r13, VCPU_GPR(R13)(r11) 259 + PPC_STL r4, VCPU_GPR(R11)(r11) 260 260 PPC_STL r7, VCPU_CTR(r11) 261 261 mr r4, r11 262 262 kvm_handler_common \intno, \srr0, \flags ··· 310 310 _GLOBAL(kvmppc_resume_host) 311 311 /* Save remaining volatile guest register state to vcpu. */ 312 312 mfspr r3, SPRN_VRSAVE 313 - PPC_STL r0, VCPU_GPR(r0)(r4) 313 + PPC_STL r0, VCPU_GPR(R0)(r4) 314 314 mflr r5 315 315 mfspr r6, SPRN_SPRG4 316 316 PPC_STL r5, VCPU_LR(r4) ··· 358 358 359 359 /* Restore vcpu pointer and the nonvolatiles we used. */ 360 360 mr r4, r14 361 - PPC_LL r14, VCPU_GPR(r14)(r4) 361 + PPC_LL r14, VCPU_GPR(R14)(r4) 362 362 363 363 andi. r5, r3, RESUME_FLAG_NV 364 364 beq skip_nv_load 365 - PPC_LL r15, VCPU_GPR(r15)(r4) 366 - PPC_LL r16, VCPU_GPR(r16)(r4) 367 - PPC_LL r17, VCPU_GPR(r17)(r4) 368 - PPC_LL r18, VCPU_GPR(r18)(r4) 369 - PPC_LL r19, VCPU_GPR(r19)(r4) 370 - PPC_LL r20, VCPU_GPR(r20)(r4) 371 - PPC_LL r21, VCPU_GPR(r21)(r4) 372 - PPC_LL r22, VCPU_GPR(r22)(r4) 373 - PPC_LL r23, VCPU_GPR(r23)(r4) 374 - PPC_LL r24, VCPU_GPR(r24)(r4) 375 - PPC_LL r25, VCPU_GPR(r25)(r4) 376 - PPC_LL r26, VCPU_GPR(r26)(r4) 377 - PPC_LL r27, VCPU_GPR(r27)(r4) 378 - PPC_LL r28, VCPU_GPR(r28)(r4) 379 - PPC_LL r29, VCPU_GPR(r29)(r4) 380 - PPC_LL r30, VCPU_GPR(r30)(r4) 381 - PPC_LL r31, VCPU_GPR(r31)(r4) 365 + PPC_LL r15, VCPU_GPR(R15)(r4) 366 + PPC_LL r16, VCPU_GPR(R16)(r4) 367 + PPC_LL r17, VCPU_GPR(R17)(r4) 368 + PPC_LL r18, VCPU_GPR(R18)(r4) 369 + PPC_LL r19, VCPU_GPR(R19)(r4) 370 + PPC_LL r20, VCPU_GPR(R20)(r4) 371 + PPC_LL r21, VCPU_GPR(R21)(r4) 372 + PPC_LL r22, VCPU_GPR(R22)(r4) 373 + PPC_LL r23, VCPU_GPR(R23)(r4) 374 + PPC_LL r24, VCPU_GPR(R24)(r4) 375 + PPC_LL r25, VCPU_GPR(R25)(r4) 376 + PPC_LL r26, VCPU_GPR(R26)(r4) 377 + PPC_LL r27, VCPU_GPR(R27)(r4) 378 + PPC_LL r28, VCPU_GPR(R28)(r4) 379 + PPC_LL r29, VCPU_GPR(R29)(r4) 380 + PPC_LL r30, VCPU_GPR(R30)(r4) 381 + PPC_LL r31, VCPU_GPR(R31)(r4) 382 382 skip_nv_load: 383 383 /* Should we return to the guest? */ 384 384 andi. r5, r3, RESUME_FLAG_HOST ··· 396 396 * non-volatiles. 397 397 */ 398 398 399 - PPC_STL r15, VCPU_GPR(r15)(r4) 400 - PPC_STL r16, VCPU_GPR(r16)(r4) 401 - PPC_STL r17, VCPU_GPR(r17)(r4) 402 - PPC_STL r18, VCPU_GPR(r18)(r4) 403 - PPC_STL r19, VCPU_GPR(r19)(r4) 404 - PPC_STL r20, VCPU_GPR(r20)(r4) 405 - PPC_STL r21, VCPU_GPR(r21)(r4) 406 - PPC_STL r22, VCPU_GPR(r22)(r4) 407 - PPC_STL r23, VCPU_GPR(r23)(r4) 408 - PPC_STL r24, VCPU_GPR(r24)(r4) 409 - PPC_STL r25, VCPU_GPR(r25)(r4) 410 - PPC_STL r26, VCPU_GPR(r26)(r4) 411 - PPC_STL r27, VCPU_GPR(r27)(r4) 412 - PPC_STL r28, VCPU_GPR(r28)(r4) 413 - PPC_STL r29, VCPU_GPR(r29)(r4) 414 - PPC_STL r30, VCPU_GPR(r30)(r4) 415 - PPC_STL r31, VCPU_GPR(r31)(r4) 399 + PPC_STL r15, VCPU_GPR(R15)(r4) 400 + PPC_STL r16, VCPU_GPR(R16)(r4) 401 + PPC_STL r17, VCPU_GPR(R17)(r4) 402 + PPC_STL r18, VCPU_GPR(R18)(r4) 403 + PPC_STL r19, VCPU_GPR(R19)(r4) 404 + PPC_STL r20, VCPU_GPR(R20)(r4) 405 + PPC_STL r21, VCPU_GPR(R21)(r4) 406 + PPC_STL r22, VCPU_GPR(R22)(r4) 407 + PPC_STL r23, VCPU_GPR(R23)(r4) 408 + PPC_STL r24, VCPU_GPR(R24)(r4) 409 + PPC_STL r25, VCPU_GPR(R25)(r4) 410 + PPC_STL r26, VCPU_GPR(R26)(r4) 411 + PPC_STL r27, VCPU_GPR(R27)(r4) 412 + PPC_STL r28, VCPU_GPR(R28)(r4) 413 + PPC_STL r29, VCPU_GPR(R29)(r4) 414 + PPC_STL r30, VCPU_GPR(R30)(r4) 415 + PPC_STL r31, VCPU_GPR(R31)(r4) 416 416 417 417 /* Load host non-volatile register state from host stack. */ 418 418 PPC_LL r14, HOST_NV_GPR(r14)(r1) ··· 478 478 PPC_STL r31, HOST_NV_GPR(r31)(r1) 479 479 480 480 /* Load guest non-volatiles. */ 481 - PPC_LL r14, VCPU_GPR(r14)(r4) 482 - PPC_LL r15, VCPU_GPR(r15)(r4) 483 - PPC_LL r16, VCPU_GPR(r16)(r4) 484 - PPC_LL r17, VCPU_GPR(r17)(r4) 485 - PPC_LL r18, VCPU_GPR(r18)(r4) 486 - PPC_LL r19, VCPU_GPR(r19)(r4) 487 - PPC_LL r20, VCPU_GPR(r20)(r4) 488 - PPC_LL r21, VCPU_GPR(r21)(r4) 489 - PPC_LL r22, VCPU_GPR(r22)(r4) 490 - PPC_LL r23, VCPU_GPR(r23)(r4) 491 - PPC_LL r24, VCPU_GPR(r24)(r4) 492 - PPC_LL r25, VCPU_GPR(r25)(r4) 493 - PPC_LL r26, VCPU_GPR(r26)(r4) 494 - PPC_LL r27, VCPU_GPR(r27)(r4) 495 - PPC_LL r28, VCPU_GPR(r28)(r4) 496 - PPC_LL r29, VCPU_GPR(r29)(r4) 497 - PPC_LL r30, VCPU_GPR(r30)(r4) 498 - PPC_LL r31, VCPU_GPR(r31)(r4) 481 + PPC_LL r14, VCPU_GPR(R14)(r4) 482 + PPC_LL r15, VCPU_GPR(R15)(r4) 483 + PPC_LL r16, VCPU_GPR(R16)(r4) 484 + PPC_LL r17, VCPU_GPR(R17)(r4) 485 + PPC_LL r18, VCPU_GPR(R18)(r4) 486 + PPC_LL r19, VCPU_GPR(R19)(r4) 487 + PPC_LL r20, VCPU_GPR(R20)(r4) 488 + PPC_LL r21, VCPU_GPR(R21)(r4) 489 + PPC_LL r22, VCPU_GPR(R22)(r4) 490 + PPC_LL r23, VCPU_GPR(R23)(r4) 491 + PPC_LL r24, VCPU_GPR(R24)(r4) 492 + PPC_LL r25, VCPU_GPR(R25)(r4) 493 + PPC_LL r26, VCPU_GPR(R26)(r4) 494 + PPC_LL r27, VCPU_GPR(R27)(r4) 495 + PPC_LL r28, VCPU_GPR(R28)(r4) 496 + PPC_LL r29, VCPU_GPR(R29)(r4) 497 + PPC_LL r30, VCPU_GPR(R30)(r4) 498 + PPC_LL r31, VCPU_GPR(R31)(r4) 499 499 500 500 501 501 lightweight_exit: ··· 554 554 lwz r7, VCPU_CR(r4) 555 555 PPC_LL r8, VCPU_PC(r4) 556 556 PPC_LD(r9, VCPU_SHARED_MSR, r11) 557 - PPC_LL r0, VCPU_GPR(r0)(r4) 558 - PPC_LL r1, VCPU_GPR(r1)(r4) 559 - PPC_LL r2, VCPU_GPR(r2)(r4) 560 - PPC_LL r10, VCPU_GPR(r10)(r4) 561 - PPC_LL r11, VCPU_GPR(r11)(r4) 562 - PPC_LL r12, VCPU_GPR(r12)(r4) 563 - PPC_LL r13, VCPU_GPR(r13)(r4) 557 + PPC_LL r0, VCPU_GPR(R0)(r4) 558 + PPC_LL r1, VCPU_GPR(R1)(r4) 559 + PPC_LL r2, VCPU_GPR(R2)(r4) 560 + PPC_LL r10, VCPU_GPR(R10)(r4) 561 + PPC_LL r11, VCPU_GPR(R11)(r4) 562 + PPC_LL r12, VCPU_GPR(R12)(r4) 563 + PPC_LL r13, VCPU_GPR(R13)(r4) 564 564 mtlr r3 565 565 mtxer r5 566 566 mtctr r6 ··· 586 586 mtcr r7 587 587 588 588 /* Finish loading guest volatiles and jump to guest. */ 589 - PPC_LL r5, VCPU_GPR(r5)(r4) 590 - PPC_LL r6, VCPU_GPR(r6)(r4) 591 - PPC_LL r7, VCPU_GPR(r7)(r4) 592 - PPC_LL r8, VCPU_GPR(r8)(r4) 593 - PPC_LL r9, VCPU_GPR(r9)(r4) 589 + PPC_LL r5, VCPU_GPR(R5)(r4) 590 + PPC_LL r6, VCPU_GPR(R6)(r4) 591 + PPC_LL r7, VCPU_GPR(R7)(r4) 592 + PPC_LL r8, VCPU_GPR(R8)(r4) 593 + PPC_LL r9, VCPU_GPR(R9)(r4) 594 594 595 - PPC_LL r3, VCPU_GPR(r3)(r4) 596 - PPC_LL r4, VCPU_GPR(r4)(r4) 595 + PPC_LL r3, VCPU_GPR(R3)(r4) 596 + PPC_LL r4, VCPU_GPR(R4)(r4) 597 597 rfi
+12 -12
arch/powerpc/lib/checksum_64.S
··· 114 114 mtctr r6 115 115 116 116 stdu r1,-STACKFRAMESIZE(r1) 117 - std r14,STK_REG(r14)(r1) 118 - std r15,STK_REG(r15)(r1) 119 - std r16,STK_REG(r16)(r1) 117 + std r14,STK_REG(R14)(r1) 118 + std r15,STK_REG(R15)(r1) 119 + std r16,STK_REG(R16)(r1) 120 120 121 121 ld r6,0(r3) 122 122 ld r9,8(r3) ··· 175 175 adde r0,r0,r15 176 176 adde r0,r0,r16 177 177 178 - ld r14,STK_REG(r14)(r1) 179 - ld r15,STK_REG(r15)(r1) 180 - ld r16,STK_REG(r16)(r1) 178 + ld r14,STK_REG(R14)(r1) 179 + ld r15,STK_REG(R15)(r1) 180 + ld r16,STK_REG(R16)(r1) 181 181 addi r1,r1,STACKFRAMESIZE 182 182 183 183 andi. r4,r4,63 ··· 299 299 mtctr r6 300 300 301 301 stdu r1,-STACKFRAMESIZE(r1) 302 - std r14,STK_REG(r14)(r1) 303 - std r15,STK_REG(r15)(r1) 304 - std r16,STK_REG(r16)(r1) 302 + std r14,STK_REG(R14)(r1) 303 + std r15,STK_REG(R15)(r1) 304 + std r16,STK_REG(R16)(r1) 305 305 306 306 source; ld r6,0(r3) 307 307 source; ld r9,8(r3) ··· 382 382 adde r0,r0,r15 383 383 adde r0,r0,r16 384 384 385 - ld r14,STK_REG(r14)(r1) 386 - ld r15,STK_REG(r15)(r1) 387 - ld r16,STK_REG(r16)(r1) 385 + ld r14,STK_REG(R14)(r1) 386 + ld r15,STK_REG(R15)(r1) 387 + ld r16,STK_REG(R16)(r1) 388 388 addi r1,r1,STACKFRAMESIZE 389 389 390 390 andi. r5,r5,63
+14 -14
arch/powerpc/lib/copypage_power7.S
··· 113 113 #endif 114 114 115 115 .Lnonvmx_copy: 116 - std r14,STK_REG(r14)(r1) 117 - std r15,STK_REG(r15)(r1) 118 - std r16,STK_REG(r16)(r1) 119 - std r17,STK_REG(r17)(r1) 120 - std r18,STK_REG(r18)(r1) 121 - std r19,STK_REG(r19)(r1) 122 - std r20,STK_REG(r20)(r1) 116 + std r14,STK_REG(R14)(r1) 117 + std r15,STK_REG(R15)(r1) 118 + std r16,STK_REG(R16)(r1) 119 + std r17,STK_REG(R17)(r1) 120 + std r18,STK_REG(R18)(r1) 121 + std r19,STK_REG(R19)(r1) 122 + std r20,STK_REG(R20)(r1) 123 123 124 124 1: ld r0,0(r4) 125 125 ld r5,8(r4) ··· 157 157 addi r3,r3,128 158 158 bdnz 1b 159 159 160 - ld r14,STK_REG(r14)(r1) 161 - ld r15,STK_REG(r15)(r1) 162 - ld r16,STK_REG(r16)(r1) 163 - ld r17,STK_REG(r17)(r1) 164 - ld r18,STK_REG(r18)(r1) 165 - ld r19,STK_REG(r19)(r1) 166 - ld r20,STK_REG(r20)(r1) 160 + ld r14,STK_REG(R14)(r1) 161 + ld r15,STK_REG(R15)(r1) 162 + ld r16,STK_REG(R16)(r1) 163 + ld r17,STK_REG(R17)(r1) 164 + ld r18,STK_REG(R18)(r1) 165 + ld r19,STK_REG(R19)(r1) 166 + ld r20,STK_REG(R20)(r1) 167 167 addi r1,r1,STACKFRAMESIZE 168 168 blr
+3 -3
arch/powerpc/lib/copyuser_64.S
··· 30 30 dcbt 0,r4 31 31 beq .Lcopy_page_4K 32 32 andi. r6,r6,7 33 - PPC_MTOCRF(0x01,r5) 33 + PPC_MTOCRF(0x01,R5) 34 34 blt cr1,.Lshort_copy 35 35 /* Below we want to nop out the bne if we're on a CPU that has the 36 36 * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit ··· 186 186 blr 187 187 188 188 .Ldst_unaligned: 189 - PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */ 189 + PPC_MTOCRF(0x01,R6) /* put #bytes to 8B bdry into cr7 */ 190 190 subf r5,r6,r5 191 191 li r7,0 192 192 cmpldi cr1,r5,16 ··· 201 201 2: bf cr7*4+1,3f 202 202 37: lwzx r0,r7,r4 203 203 83: stwx r0,r7,r3 204 - 3: PPC_MTOCRF(0x01,r5) 204 + 3: PPC_MTOCRF(0x01,R5) 205 205 add r4,r6,r4 206 206 add r3,r6,r3 207 207 b .Ldst_aligned
+42 -42
arch/powerpc/lib/copyuser_power7.S
··· 57 57 58 58 59 59 .Ldo_err4: 60 - ld r16,STK_REG(r16)(r1) 61 - ld r15,STK_REG(r15)(r1) 62 - ld r14,STK_REG(r14)(r1) 60 + ld r16,STK_REG(R16)(r1) 61 + ld r15,STK_REG(R15)(r1) 62 + ld r14,STK_REG(R14)(r1) 63 63 .Ldo_err3: 64 64 bl .exit_vmx_usercopy 65 65 ld r0,STACKFRAMESIZE+16(r1) ··· 68 68 #endif /* CONFIG_ALTIVEC */ 69 69 70 70 .Ldo_err2: 71 - ld r22,STK_REG(r22)(r1) 72 - ld r21,STK_REG(r21)(r1) 73 - ld r20,STK_REG(r20)(r1) 74 - ld r19,STK_REG(r19)(r1) 75 - ld r18,STK_REG(r18)(r1) 76 - ld r17,STK_REG(r17)(r1) 77 - ld r16,STK_REG(r16)(r1) 78 - ld r15,STK_REG(r15)(r1) 79 - ld r14,STK_REG(r14)(r1) 71 + ld r22,STK_REG(R22)(r1) 72 + ld r21,STK_REG(R21)(r1) 73 + ld r20,STK_REG(R20)(r1) 74 + ld r19,STK_REG(R19)(r1) 75 + ld r18,STK_REG(R18)(r1) 76 + ld r17,STK_REG(R17)(r1) 77 + ld r16,STK_REG(R16)(r1) 78 + ld r15,STK_REG(R15)(r1) 79 + ld r14,STK_REG(R14)(r1) 80 80 .Lexit: 81 81 addi r1,r1,STACKFRAMESIZE 82 82 .Ldo_err1: ··· 137 137 138 138 mflr r0 139 139 stdu r1,-STACKFRAMESIZE(r1) 140 - std r14,STK_REG(r14)(r1) 141 - std r15,STK_REG(r15)(r1) 142 - std r16,STK_REG(r16)(r1) 143 - std r17,STK_REG(r17)(r1) 144 - std r18,STK_REG(r18)(r1) 145 - std r19,STK_REG(r19)(r1) 146 - std r20,STK_REG(r20)(r1) 147 - std r21,STK_REG(r21)(r1) 148 - std r22,STK_REG(r22)(r1) 140 + std r14,STK_REG(R14)(r1) 141 + std r15,STK_REG(R15)(r1) 142 + std r16,STK_REG(R16)(r1) 143 + std r17,STK_REG(R17)(r1) 144 + std r18,STK_REG(R18)(r1) 145 + std r19,STK_REG(R19)(r1) 146 + std r20,STK_REG(R20)(r1) 147 + std r21,STK_REG(R21)(r1) 148 + std r22,STK_REG(R22)(r1) 149 149 std r0,STACKFRAMESIZE+16(r1) 150 150 151 151 srdi r6,r5,7 ··· 192 192 193 193 clrldi r5,r5,(64-7) 194 194 195 - ld r14,STK_REG(r14)(r1) 196 - ld r15,STK_REG(r15)(r1) 197 - ld r16,STK_REG(r16)(r1) 198 - ld r17,STK_REG(r17)(r1) 199 - ld r18,STK_REG(r18)(r1) 200 - ld r19,STK_REG(r19)(r1) 201 - ld r20,STK_REG(r20)(r1) 202 - ld r21,STK_REG(r21)(r1) 203 - ld r22,STK_REG(r22)(r1) 195 + ld r14,STK_REG(R14)(r1) 196 + ld r15,STK_REG(R15)(r1) 197 + ld r16,STK_REG(R16)(r1) 198 + ld r17,STK_REG(R17)(r1) 199 + ld r18,STK_REG(R18)(r1) 200 + ld r19,STK_REG(R19)(r1) 201 + ld r20,STK_REG(R20)(r1) 202 + ld r21,STK_REG(R21)(r1) 203 + ld r22,STK_REG(R22)(r1) 204 204 addi r1,r1,STACKFRAMESIZE 205 205 206 206 /* Up to 127B to go */ ··· 440 440 7: sub r5,r5,r6 441 441 srdi r6,r5,7 442 442 443 - std r14,STK_REG(r14)(r1) 444 - std r15,STK_REG(r15)(r1) 445 - std r16,STK_REG(r16)(r1) 443 + std r14,STK_REG(R14)(r1) 444 + std r15,STK_REG(R15)(r1) 445 + std r16,STK_REG(R16)(r1) 446 446 447 447 li r12,64 448 448 li r14,80 ··· 477 477 addi r3,r3,128 478 478 bdnz 8b 479 479 480 - ld r14,STK_REG(r14)(r1) 481 - ld r15,STK_REG(r15)(r1) 482 - ld r16,STK_REG(r16)(r1) 480 + ld r14,STK_REG(R14)(r1) 481 + ld r15,STK_REG(R15)(r1) 482 + ld r16,STK_REG(R16)(r1) 483 483 484 484 /* Up to 127B to go */ 485 485 clrldi r5,r5,(64-7) ··· 625 625 7: sub r5,r5,r6 626 626 srdi r6,r5,7 627 627 628 - std r14,STK_REG(r14)(r1) 629 - std r15,STK_REG(r15)(r1) 630 - std r16,STK_REG(r16)(r1) 628 + std r14,STK_REG(R14)(r1) 629 + std r15,STK_REG(R15)(r1) 630 + std r16,STK_REG(R16)(r1) 631 631 632 632 li r12,64 633 633 li r14,80 ··· 670 670 addi r3,r3,128 671 671 bdnz 8b 672 672 673 - ld r14,STK_REG(r14)(r1) 674 - ld r15,STK_REG(r15)(r1) 675 - ld r16,STK_REG(r16)(r1) 673 + ld r14,STK_REG(R14)(r1) 674 + ld r15,STK_REG(R15)(r1) 675 + ld r16,STK_REG(R16)(r1) 676 676 677 677 /* Up to 127B to go */ 678 678 clrldi r5,r5,(64-7)
+7 -7
arch/powerpc/lib/hweight_64.S
··· 28 28 nop 29 29 nop 30 30 FTR_SECTION_ELSE 31 - PPC_POPCNTB(r3,r3) 31 + PPC_POPCNTB(R3,R3) 32 32 clrldi r3,r3,64-8 33 33 blr 34 34 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) ··· 42 42 nop 43 43 FTR_SECTION_ELSE 44 44 BEGIN_FTR_SECTION_NESTED(50) 45 - PPC_POPCNTB(r3,r3) 45 + PPC_POPCNTB(R3,R3) 46 46 srdi r4,r3,8 47 47 add r3,r4,r3 48 48 clrldi r3,r3,64-8 49 49 blr 50 50 FTR_SECTION_ELSE_NESTED(50) 51 51 clrlwi r3,r3,16 52 - PPC_POPCNTW(r3,r3) 52 + PPC_POPCNTW(R3,R3) 53 53 clrldi r3,r3,64-8 54 54 blr 55 55 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50) ··· 66 66 nop 67 67 FTR_SECTION_ELSE 68 68 BEGIN_FTR_SECTION_NESTED(51) 69 - PPC_POPCNTB(r3,r3) 69 + PPC_POPCNTB(R3,R3) 70 70 srdi r4,r3,16 71 71 add r3,r4,r3 72 72 srdi r4,r3,8 ··· 74 74 clrldi r3,r3,64-8 75 75 blr 76 76 FTR_SECTION_ELSE_NESTED(51) 77 - PPC_POPCNTW(r3,r3) 77 + PPC_POPCNTW(R3,R3) 78 78 clrldi r3,r3,64-8 79 79 blr 80 80 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51) ··· 93 93 nop 94 94 FTR_SECTION_ELSE 95 95 BEGIN_FTR_SECTION_NESTED(52) 96 - PPC_POPCNTB(r3,r3) 96 + PPC_POPCNTB(R3,R3) 97 97 srdi r4,r3,32 98 98 add r3,r4,r3 99 99 srdi r4,r3,16 ··· 103 103 clrldi r3,r3,64-8 104 104 blr 105 105 FTR_SECTION_ELSE_NESTED(52) 106 - PPC_POPCNTD(r3,r3) 106 + PPC_POPCNTD(R3,R3) 107 107 clrldi r3,r3,64-8 108 108 blr 109 109 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52)
+6 -6
arch/powerpc/lib/ldstfp.S
··· 330 330 MTMSRD(r7) 331 331 isync 332 332 beq cr7,1f 333 - STXVD2X(0,r1,r8) 333 + STXVD2X(0,R1,R8) 334 334 1: li r9,-EFAULT 335 - 2: LXVD2X(0,0,r4) 335 + 2: LXVD2X(0,0,R4) 336 336 li r9,0 337 337 3: beq cr7,4f 338 338 bl put_vsr 339 - LXVD2X(0,r1,r8) 339 + LXVD2X(0,R1,R8) 340 340 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) 341 341 mtlr r0 342 342 MTMSRD(r6) ··· 358 358 MTMSRD(r7) 359 359 isync 360 360 beq cr7,1f 361 - STXVD2X(0,r1,r8) 361 + STXVD2X(0,R1,R8) 362 362 bl get_vsr 363 363 1: li r9,-EFAULT 364 - 2: STXVD2X(0,0,r4) 364 + 2: STXVD2X(0,0,R4) 365 365 li r9,0 366 366 3: beq cr7,4f 367 - LXVD2X(0,r1,r8) 367 + LXVD2X(0,R1,R8) 368 368 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) 369 369 mtlr r0 370 370 MTMSRD(r6)
+3 -3
arch/powerpc/lib/mem_64.S
··· 19 19 rlwimi r4,r4,16,0,15 20 20 cmplw cr1,r5,r0 /* do we get that far? */ 21 21 rldimi r4,r4,32,0 22 - PPC_MTOCRF(1,r0) 22 + PPC_MTOCRF(1,R0) 23 23 mr r6,r3 24 24 blt cr1,8f 25 25 beq+ 3f /* if already 8-byte aligned */ ··· 49 49 bdnz 4b 50 50 5: srwi. r0,r5,3 51 51 clrlwi r5,r5,29 52 - PPC_MTOCRF(1,r0) 52 + PPC_MTOCRF(1,R0) 53 53 beq 8f 54 54 bf 29,6f 55 55 std r4,0(r6) ··· 65 65 std r4,0(r6) 66 66 addi r6,r6,8 67 67 8: cmpwi r5,0 68 - PPC_MTOCRF(1,r5) 68 + PPC_MTOCRF(1,R5) 69 69 beqlr+ 70 70 bf 29,9f 71 71 stw r4,0(r6)
+3 -3
arch/powerpc/lib/memcpy_64.S
··· 16 16 FTR_SECTION_ELSE 17 17 b memcpy_power7 18 18 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY) 19 - PPC_MTOCRF(0x01,r5) 19 + PPC_MTOCRF(0x01,R5) 20 20 cmpldi cr1,r5,16 21 21 neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry 22 22 andi. r6,r6,7 ··· 158 158 blr 159 159 160 160 .Ldst_unaligned: 161 - PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7 161 + PPC_MTOCRF(0x01,R6) # put #bytes to 8B bdry into cr7 162 162 subf r5,r6,r5 163 163 li r7,0 164 164 cmpldi cr1,r5,16 ··· 173 173 2: bf cr7*4+1,3f 174 174 lwzx r0,r7,r4 175 175 stwx r0,r7,r3 176 - 3: PPC_MTOCRF(0x01,r5) 176 + 3: PPC_MTOCRF(0x01,R5) 177 177 add r4,r6,r4 178 178 add r3,r6,r3 179 179 b .Ldst_aligned
+30 -30
arch/powerpc/lib/memcpy_power7.S
··· 69 69 70 70 mflr r0 71 71 stdu r1,-STACKFRAMESIZE(r1) 72 - std r14,STK_REG(r14)(r1) 73 - std r15,STK_REG(r15)(r1) 74 - std r16,STK_REG(r16)(r1) 75 - std r17,STK_REG(r17)(r1) 76 - std r18,STK_REG(r18)(r1) 77 - std r19,STK_REG(r19)(r1) 78 - std r20,STK_REG(r20)(r1) 79 - std r21,STK_REG(r21)(r1) 80 - std r22,STK_REG(r22)(r1) 72 + std r14,STK_REG(R14)(r1) 73 + std r15,STK_REG(R15)(r1) 74 + std r16,STK_REG(R16)(r1) 75 + std r17,STK_REG(R17)(r1) 76 + std r18,STK_REG(R18)(r1) 77 + std r19,STK_REG(R19)(r1) 78 + std r20,STK_REG(R20)(r1) 79 + std r21,STK_REG(R21)(r1) 80 + std r22,STK_REG(R22)(r1) 81 81 std r0,STACKFRAMESIZE+16(r1) 82 82 83 83 srdi r6,r5,7 ··· 124 124 125 125 clrldi r5,r5,(64-7) 126 126 127 - ld r14,STK_REG(r14)(r1) 128 - ld r15,STK_REG(r15)(r1) 129 - ld r16,STK_REG(r16)(r1) 130 - ld r17,STK_REG(r17)(r1) 131 - ld r18,STK_REG(r18)(r1) 132 - ld r19,STK_REG(r19)(r1) 133 - ld r20,STK_REG(r20)(r1) 134 - ld r21,STK_REG(r21)(r1) 135 - ld r22,STK_REG(r22)(r1) 127 + ld r14,STK_REG(R14)(r1) 128 + ld r15,STK_REG(R15)(r1) 129 + ld r16,STK_REG(R16)(r1) 130 + ld r17,STK_REG(R17)(r1) 131 + ld r18,STK_REG(R18)(r1) 132 + ld r19,STK_REG(R19)(r1) 133 + ld r20,STK_REG(R20)(r1) 134 + ld r21,STK_REG(R21)(r1) 135 + ld r22,STK_REG(R22)(r1) 136 136 addi r1,r1,STACKFRAMESIZE 137 137 138 138 /* Up to 127B to go */ ··· 343 343 7: sub r5,r5,r6 344 344 srdi r6,r5,7 345 345 346 - std r14,STK_REG(r14)(r1) 347 - std r15,STK_REG(r15)(r1) 348 - std r16,STK_REG(r16)(r1) 346 + std r14,STK_REG(R14)(r1) 347 + std r15,STK_REG(R15)(r1) 348 + std r16,STK_REG(R16)(r1) 349 349 350 350 li r12,64 351 351 li r14,80 ··· 380 380 addi r3,r3,128 381 381 bdnz 8b 382 382 383 - ld r14,STK_REG(r14)(r1) 384 - ld r15,STK_REG(r15)(r1) 385 - ld r16,STK_REG(r16)(r1) 383 + ld r14,STK_REG(R14)(r1) 384 + ld r15,STK_REG(R15)(r1) 385 + ld r16,STK_REG(R16)(r1) 386 386 387 387 /* Up to 127B to go */ 388 388 clrldi r5,r5,(64-7) ··· 529 529 7: sub r5,r5,r6 530 530 srdi r6,r5,7 531 531 532 - std r14,STK_REG(r14)(r1) 533 - std r15,STK_REG(r15)(r1) 534 - std r16,STK_REG(r16)(r1) 532 + std r14,STK_REG(R14)(r1) 533 + std r15,STK_REG(R15)(r1) 534 + std r16,STK_REG(R16)(r1) 535 535 536 536 li r12,64 537 537 li r14,80 ··· 574 574 addi r3,r3,128 575 575 bdnz 8b 576 576 577 - ld r14,STK_REG(r14)(r1) 578 - ld r15,STK_REG(r15)(r1) 579 - ld r16,STK_REG(r16)(r1) 577 + ld r14,STK_REG(R14)(r1) 578 + ld r15,STK_REG(R15)(r1) 579 + ld r16,STK_REG(R16)(r1) 580 580 581 581 /* Up to 127B to go */ 582 582 clrldi r5,r5,(64-7)
+74 -74
arch/powerpc/mm/hash_low_64.S
··· 64 64 std r0,16(r1) 65 65 stdu r1,-STACKFRAMESIZE(r1) 66 66 /* Save all params that we need after a function call */ 67 - std r6,STK_PARM(r6)(r1) 68 - std r8,STK_PARM(r8)(r1) 69 - std r9,STK_PARM(r9)(r1) 67 + std r6,STK_PARM(R6)(r1) 68 + std r8,STK_PARM(R8)(r1) 69 + std r9,STK_PARM(R9)(r1) 70 70 71 71 /* Save non-volatile registers. 72 72 * r31 will hold "old PTE" ··· 75 75 * r28 is a hash value 76 76 * r27 is hashtab mask (maybe dynamic patched instead ?) 77 77 */ 78 - std r27,STK_REG(r27)(r1) 79 - std r28,STK_REG(r28)(r1) 80 - std r29,STK_REG(r29)(r1) 81 - std r30,STK_REG(r30)(r1) 82 - std r31,STK_REG(r31)(r1) 78 + std r27,STK_REG(R27)(r1) 79 + std r28,STK_REG(R28)(r1) 80 + std r29,STK_REG(R29)(r1) 81 + std r30,STK_REG(R30)(r1) 82 + std r31,STK_REG(R31)(r1) 83 83 84 84 /* Step 1: 85 85 * ··· 162 162 /* At this point, r3 contains new PP bits, save them in 163 163 * place of "access" in the param area (sic) 164 164 */ 165 - std r3,STK_PARM(r4)(r1) 165 + std r3,STK_PARM(R4)(r1) 166 166 167 167 /* Get htab_hash_mask */ 168 168 ld r4,htab_hash_mask@got(2) ··· 192 192 rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ 193 193 194 194 /* Call ppc_md.hpte_insert */ 195 - ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ 195 + ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ 196 196 mr r4,r29 /* Retrieve va */ 197 197 li r7,0 /* !bolted, !secondary */ 198 198 li r8,MMU_PAGE_4K /* page size */ 199 - ld r9,STK_PARM(r9)(r1) /* segment size */ 199 + ld r9,STK_PARM(R9)(r1) /* segment size */ 200 200 _GLOBAL(htab_call_hpte_insert1) 201 201 bl . /* Patched by htab_finish_init() */ 202 202 cmpdi 0,r3,0 ··· 215 215 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ 216 216 217 217 /* Call ppc_md.hpte_insert */ 218 - ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ 218 + ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ 219 219 mr r4,r29 /* Retrieve va */ 220 220 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 221 221 li r8,MMU_PAGE_4K /* page size */ 222 - ld r9,STK_PARM(r9)(r1) /* segment size */ 222 + ld r9,STK_PARM(R9)(r1) /* segment size */ 223 223 _GLOBAL(htab_call_hpte_insert2) 224 224 bl . /* Patched by htab_finish_init() */ 225 225 cmpdi 0,r3,0 ··· 255 255 * (maybe add eieio may be good still ?) 256 256 */ 257 257 htab_write_out_pte: 258 - ld r6,STK_PARM(r6)(r1) 258 + ld r6,STK_PARM(R6)(r1) 259 259 std r30,0(r6) 260 260 li r3, 0 261 261 htab_bail: 262 - ld r27,STK_REG(r27)(r1) 263 - ld r28,STK_REG(r28)(r1) 264 - ld r29,STK_REG(r29)(r1) 265 - ld r30,STK_REG(r30)(r1) 266 - ld r31,STK_REG(r31)(r1) 262 + ld r27,STK_REG(R27)(r1) 263 + ld r28,STK_REG(R28)(r1) 264 + ld r29,STK_REG(R29)(r1) 265 + ld r30,STK_REG(R30)(r1) 266 + ld r31,STK_REG(R31)(r1) 267 267 addi r1,r1,STACKFRAMESIZE 268 268 ld r0,16(r1) 269 269 mtlr r0 ··· 288 288 /* Call ppc_md.hpte_updatepp */ 289 289 mr r5,r29 /* va */ 290 290 li r6,MMU_PAGE_4K /* page size */ 291 - ld r7,STK_PARM(r9)(r1) /* segment size */ 292 - ld r8,STK_PARM(r8)(r1) /* get "local" param */ 291 + ld r7,STK_PARM(R9)(r1) /* segment size */ 292 + ld r8,STK_PARM(R8)(r1) /* get "local" param */ 293 293 _GLOBAL(htab_call_hpte_updatepp) 294 294 bl . /* Patched by htab_finish_init() */ 295 295 ··· 312 312 313 313 htab_pte_insert_failure: 314 314 /* Bail out restoring old PTE */ 315 - ld r6,STK_PARM(r6)(r1) 315 + ld r6,STK_PARM(R6)(r1) 316 316 std r31,0(r6) 317 317 li r3,-1 318 318 b htab_bail ··· 340 340 std r0,16(r1) 341 341 stdu r1,-STACKFRAMESIZE(r1) 342 342 /* Save all params that we need after a function call */ 343 - std r6,STK_PARM(r6)(r1) 344 - std r8,STK_PARM(r8)(r1) 345 - std r9,STK_PARM(r9)(r1) 343 + std r6,STK_PARM(R6)(r1) 344 + std r8,STK_PARM(R8)(r1) 345 + std r9,STK_PARM(R9)(r1) 346 346 347 347 /* Save non-volatile registers. 348 348 * r31 will hold "old PTE" ··· 353 353 * r26 is the hidx mask 354 354 * r25 is the index in combo page 355 355 */ 356 - std r25,STK_REG(r25)(r1) 357 - std r26,STK_REG(r26)(r1) 358 - std r27,STK_REG(r27)(r1) 359 - std r28,STK_REG(r28)(r1) 360 - std r29,STK_REG(r29)(r1) 361 - std r30,STK_REG(r30)(r1) 362 - std r31,STK_REG(r31)(r1) 356 + std r25,STK_REG(R25)(r1) 357 + std r26,STK_REG(R26)(r1) 358 + std r27,STK_REG(R27)(r1) 359 + std r28,STK_REG(R28)(r1) 360 + std r29,STK_REG(R29)(r1) 361 + std r30,STK_REG(R30)(r1) 362 + std r31,STK_REG(R31)(r1) 363 363 364 364 /* Step 1: 365 365 * ··· 452 452 /* At this point, r3 contains new PP bits, save them in 453 453 * place of "access" in the param area (sic) 454 454 */ 455 - std r3,STK_PARM(r4)(r1) 455 + std r3,STK_PARM(R4)(r1) 456 456 457 457 /* Get htab_hash_mask */ 458 458 ld r4,htab_hash_mask@got(2) ··· 473 473 andis. r0,r31,_PAGE_COMBO@h 474 474 beq htab_inval_old_hpte 475 475 476 - ld r6,STK_PARM(r6)(r1) 476 + ld r6,STK_PARM(R6)(r1) 477 477 ori r26,r6,0x8000 /* Load the hidx mask */ 478 478 ld r26,0(r26) 479 479 addi r5,r25,36 /* Check actual HPTE_SUB bit, this */ ··· 495 495 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 496 496 497 497 /* Call ppc_md.hpte_insert */ 498 - ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ 498 + ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ 499 499 mr r4,r29 /* Retrieve va */ 500 500 li r7,0 /* !bolted, !secondary */ 501 501 li r8,MMU_PAGE_4K /* page size */ 502 - ld r9,STK_PARM(r9)(r1) /* segment size */ 502 + ld r9,STK_PARM(R9)(r1) /* segment size */ 503 503 _GLOBAL(htab_call_hpte_insert1) 504 504 bl . /* patched by htab_finish_init() */ 505 505 cmpdi 0,r3,0 ··· 522 522 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ 523 523 524 524 /* Call ppc_md.hpte_insert */ 525 - ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ 525 + ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ 526 526 mr r4,r29 /* Retrieve va */ 527 527 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 528 528 li r8,MMU_PAGE_4K /* page size */ 529 - ld r9,STK_PARM(r9)(r1) /* segment size */ 529 + ld r9,STK_PARM(R9)(r1) /* segment size */ 530 530 _GLOBAL(htab_call_hpte_insert2) 531 531 bl . /* patched by htab_finish_init() */ 532 532 cmpdi 0,r3,0 ··· 559 559 mr r4,r31 /* PTE.pte */ 560 560 li r5,0 /* PTE.hidx */ 561 561 li r6,MMU_PAGE_64K /* psize */ 562 - ld r7,STK_PARM(r9)(r1) /* ssize */ 563 - ld r8,STK_PARM(r8)(r1) /* local */ 562 + ld r7,STK_PARM(R9)(r1) /* ssize */ 563 + ld r8,STK_PARM(R8)(r1) /* local */ 564 564 bl .flush_hash_page 565 565 /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ 566 566 lis r0,_PAGE_HPTE_SUB@h ··· 576 576 /* Insert slot number & secondary bit in PTE second half, 577 577 * clear _PAGE_BUSY and set approriate HPTE slot bit 578 578 */ 579 - ld r6,STK_PARM(r6)(r1) 579 + ld r6,STK_PARM(R6)(r1) 580 580 li r0,_PAGE_BUSY 581 581 andc r30,r30,r0 582 582 /* HPTE SUB bit */ ··· 597 597 std r30,0(r6) 598 598 li r3, 0 599 599 htab_bail: 600 - ld r25,STK_REG(r25)(r1) 601 - ld r26,STK_REG(r26)(r1) 602 - ld r27,STK_REG(r27)(r1) 603 - ld r28,STK_REG(r28)(r1) 604 - ld r29,STK_REG(r29)(r1) 605 - ld r30,STK_REG(r30)(r1) 606 - ld r31,STK_REG(r31)(r1) 600 + ld r25,STK_REG(R25)(r1) 601 + ld r26,STK_REG(R26)(r1) 602 + ld r27,STK_REG(R27)(r1) 603 + ld r28,STK_REG(R28)(r1) 604 + ld r29,STK_REG(R29)(r1) 605 + ld r30,STK_REG(R30)(r1) 606 + ld r31,STK_REG(R31)(r1) 607 607 addi r1,r1,STACKFRAMESIZE 608 608 ld r0,16(r1) 609 609 mtlr r0 ··· 630 630 /* Call ppc_md.hpte_updatepp */ 631 631 mr r5,r29 /* va */ 632 632 li r6,MMU_PAGE_4K /* page size */ 633 - ld r7,STK_PARM(r9)(r1) /* segment size */ 634 - ld r8,STK_PARM(r8)(r1) /* get "local" param */ 633 + ld r7,STK_PARM(R9)(r1) /* segment size */ 634 + ld r8,STK_PARM(R8)(r1) /* get "local" param */ 635 635 _GLOBAL(htab_call_hpte_updatepp) 636 636 bl . /* patched by htab_finish_init() */ 637 637 ··· 644 644 /* Clear the BUSY bit and Write out the PTE */ 645 645 li r0,_PAGE_BUSY 646 646 andc r30,r30,r0 647 - ld r6,STK_PARM(r6)(r1) 647 + ld r6,STK_PARM(R6)(r1) 648 648 std r30,0(r6) 649 649 li r3,0 650 650 b htab_bail ··· 657 657 658 658 htab_pte_insert_failure: 659 659 /* Bail out restoring old PTE */ 660 - ld r6,STK_PARM(r6)(r1) 660 + ld r6,STK_PARM(R6)(r1) 661 661 std r31,0(r6) 662 662 li r3,-1 663 663 b htab_bail ··· 677 677 std r0,16(r1) 678 678 stdu r1,-STACKFRAMESIZE(r1) 679 679 /* Save all params that we need after a function call */ 680 - std r6,STK_PARM(r6)(r1) 681 - std r8,STK_PARM(r8)(r1) 682 - std r9,STK_PARM(r9)(r1) 680 + std r6,STK_PARM(R6)(r1) 681 + std r8,STK_PARM(R8)(r1) 682 + std r9,STK_PARM(R9)(r1) 683 683 684 684 /* Save non-volatile registers. 685 685 * r31 will hold "old PTE" ··· 688 688 * r28 is a hash value 689 689 * r27 is hashtab mask (maybe dynamic patched instead ?) 690 690 */ 691 - std r27,STK_REG(r27)(r1) 692 - std r28,STK_REG(r28)(r1) 693 - std r29,STK_REG(r29)(r1) 694 - std r30,STK_REG(r30)(r1) 695 - std r31,STK_REG(r31)(r1) 691 + std r27,STK_REG(R27)(r1) 692 + std r28,STK_REG(R28)(r1) 693 + std r29,STK_REG(R29)(r1) 694 + std r30,STK_REG(R30)(r1) 695 + std r31,STK_REG(R31)(r1) 696 696 697 697 /* Step 1: 698 698 * ··· 780 780 /* At this point, r3 contains new PP bits, save them in 781 781 * place of "access" in the param area (sic) 782 782 */ 783 - std r3,STK_PARM(r4)(r1) 783 + std r3,STK_PARM(R4)(r1) 784 784 785 785 /* Get htab_hash_mask */ 786 786 ld r4,htab_hash_mask@got(2) ··· 813 813 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 814 814 815 815 /* Call ppc_md.hpte_insert */ 816 - ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ 816 + ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ 817 817 mr r4,r29 /* Retrieve va */ 818 818 li r7,0 /* !bolted, !secondary */ 819 819 li r8,MMU_PAGE_64K 820 - ld r9,STK_PARM(r9)(r1) /* segment size */ 820 + ld r9,STK_PARM(R9)(r1) /* segment size */ 821 821 _GLOBAL(ht64_call_hpte_insert1) 822 822 bl . /* patched by htab_finish_init() */ 823 823 cmpdi 0,r3,0 ··· 836 836 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ 837 837 838 838 /* Call ppc_md.hpte_insert */ 839 - ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ 839 + ld r6,STK_PARM(R4)(r1) /* Retrieve new pp bits */ 840 840 mr r4,r29 /* Retrieve va */ 841 841 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 842 842 li r8,MMU_PAGE_64K 843 - ld r9,STK_PARM(r9)(r1) /* segment size */ 843 + ld r9,STK_PARM(R9)(r1) /* segment size */ 844 844 _GLOBAL(ht64_call_hpte_insert2) 845 845 bl . /* patched by htab_finish_init() */ 846 846 cmpdi 0,r3,0 ··· 876 876 * (maybe add eieio may be good still ?) 877 877 */ 878 878 ht64_write_out_pte: 879 - ld r6,STK_PARM(r6)(r1) 879 + ld r6,STK_PARM(R6)(r1) 880 880 std r30,0(r6) 881 881 li r3, 0 882 882 ht64_bail: 883 - ld r27,STK_REG(r27)(r1) 884 - ld r28,STK_REG(r28)(r1) 885 - ld r29,STK_REG(r29)(r1) 886 - ld r30,STK_REG(r30)(r1) 887 - ld r31,STK_REG(r31)(r1) 883 + ld r27,STK_REG(R27)(r1) 884 + ld r28,STK_REG(R28)(r1) 885 + ld r29,STK_REG(R29)(r1) 886 + ld r30,STK_REG(R30)(r1) 887 + ld r31,STK_REG(R31)(r1) 888 888 addi r1,r1,STACKFRAMESIZE 889 889 ld r0,16(r1) 890 890 mtlr r0 ··· 909 909 /* Call ppc_md.hpte_updatepp */ 910 910 mr r5,r29 /* va */ 911 911 li r6,MMU_PAGE_64K 912 - ld r7,STK_PARM(r9)(r1) /* segment size */ 913 - ld r8,STK_PARM(r8)(r1) /* get "local" param */ 912 + ld r7,STK_PARM(R9)(r1) /* segment size */ 913 + ld r8,STK_PARM(R8)(r1) /* get "local" param */ 914 914 _GLOBAL(ht64_call_hpte_updatepp) 915 915 bl . /* patched by htab_finish_init() */ 916 916 ··· 933 933 934 934 ht64_pte_insert_failure: 935 935 /* Bail out restoring old PTE */ 936 - ld r6,STK_PARM(r6)(r1) 936 + ld r6,STK_PARM(R6)(r1) 937 937 std r31,0(r6) 938 938 li r3,-1 939 939 b ht64_bail
+5 -5
arch/powerpc/mm/tlb_low_64e.S
··· 126 126 /* Set the TLB reservation and search for existing entry. Then load 127 127 * the entry. 128 128 */ 129 - PPC_TLBSRX_DOT(0,r16) 129 + PPC_TLBSRX_DOT(R0,R16) 130 130 ldx r14,r14,r15 /* grab pgd entry */ 131 131 beq normal_tlb_miss_done /* tlb exists already, bail */ 132 132 MMU_FTR_SECTION_ELSE ··· 395 395 /* Set the TLB reservation and search for existing entry. Then load 396 396 * the entry. 397 397 */ 398 - PPC_TLBSRX_DOT(0,r16) 398 + PPC_TLBSRX_DOT(R0,R16) 399 399 ld r14,0(r10) 400 400 beq normal_tlb_miss_done 401 401 MMU_FTR_SECTION_ELSE ··· 528 528 /* Search if we already have a TLB entry for that virtual address, and 529 529 * if we do, bail out. 530 530 */ 531 - PPC_TLBSRX_DOT(0,r16) 531 + PPC_TLBSRX_DOT(R0,R16) 532 532 beq virt_page_table_tlb_miss_done 533 533 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV) 534 534 ··· 779 779 * 780 780 * MAS1:IND should be already set based on MAS4 781 781 */ 782 - PPC_TLBSRX_DOT(0,r16) 782 + PPC_TLBSRX_DOT(R0,R16) 783 783 beq htw_tlb_miss_done 784 784 785 785 /* Now, we need to walk the page tables. First check if we are in ··· 919 919 mtspr SPRN_MAS1,r15 920 920 921 921 /* Already somebody there ? */ 922 - PPC_TLBSRX_DOT(0,r16) 922 + PPC_TLBSRX_DOT(R0,R16) 923 923 beq tlb_load_linear_done 924 924 925 925 /* Now we build the remaining MAS. MAS0 and 2 should be fine
+3 -3
arch/powerpc/mm/tlb_nohash_low.S
··· 313 313 mtspr SPRN_MAS1,r4 314 314 tlbwe 315 315 MMU_FTR_SECTION_ELSE 316 - PPC_TLBILX_VA(0,r3) 316 + PPC_TLBILX_VA(R0,R3) 317 317 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX) 318 318 msync 319 319 isync ··· 364 364 beq 1f 365 365 rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND 366 366 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ 367 - PPC_TLBILX_VA(0,r3) 367 + PPC_TLBILX_VA(R0,R3) 368 368 msync 369 369 isync 370 370 wrtee r10 ··· 379 379 beq 1f 380 380 rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND 381 381 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ 382 - PPC_TLBIVAX(0,r3) 382 + PPC_TLBIVAX(R0,R3) 383 383 eieio 384 384 tlbsync 385 385 sync
+2 -2
arch/powerpc/net/bpf_jit_comp.c
··· 39 39 /* Make stackframe */ 40 40 if (ctx->seen & SEEN_DATAREF) { 41 41 /* If we call any helpers (for loads), save LR */ 42 - EMIT(PPC_INST_MFLR | __PPC_RT(0)); 42 + EMIT(PPC_INST_MFLR | __PPC_RT(R0)); 43 43 PPC_STD(0, 1, 16); 44 44 45 45 /* Back up non-volatile regs. */ ··· 56 56 PPC_STD(i, 1, -(8*(32-i))); 57 57 } 58 58 } 59 - EMIT(PPC_INST_STDU | __PPC_RS(1) | __PPC_RA(1) | 59 + EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) | 60 60 (-BPF_PPC_STACKFRAME & 0xfffc)); 61 61 } 62 62
+13 -13
arch/powerpc/platforms/cell/beat_hvCall.S
··· 74 74 mr r6,r7 75 75 mr r7,r8 76 76 mr r8,r9 77 - ld r10,STK_PARM(r10)(r1) 77 + ld r10,STK_PARM(R10)(r1) 78 78 79 79 HVSC /* invoke the hypervisor */ 80 80 ··· 94 94 95 95 HCALL_INST_PRECALL 96 96 97 - std r4,STK_PARM(r4)(r1) /* save ret buffer */ 97 + std r4,STK_PARM(R4)(r1) /* save ret buffer */ 98 98 99 99 mr r11,r3 100 100 mr r3,r5 ··· 108 108 109 109 HCALL_INST_POSTCALL 110 110 111 - ld r12,STK_PARM(r4)(r1) 111 + ld r12,STK_PARM(R4)(r1) 112 112 std r4, 0(r12) 113 113 114 114 lwz r0,8(r1) ··· 125 125 126 126 HCALL_INST_PRECALL 127 127 128 - std r4,STK_PARM(r4)(r1) /* save ret buffer */ 128 + std r4,STK_PARM(R4)(r1) /* save ret buffer */ 129 129 130 130 mr r11,r3 131 131 mr r3,r5 ··· 139 139 140 140 HCALL_INST_POSTCALL 141 141 142 - ld r12,STK_PARM(r4)(r1) 142 + ld r12,STK_PARM(R4)(r1) 143 143 std r4, 0(r12) 144 144 std r5, 8(r12) 145 145 ··· 157 157 158 158 HCALL_INST_PRECALL 159 159 160 - std r4,STK_PARM(r4)(r1) /* save ret buffer */ 160 + std r4,STK_PARM(R4)(r1) /* save ret buffer */ 161 161 162 162 mr r11,r3 163 163 mr r3,r5 ··· 171 171 172 172 HCALL_INST_POSTCALL 173 173 174 - ld r12,STK_PARM(r4)(r1) 174 + ld r12,STK_PARM(R4)(r1) 175 175 std r4, 0(r12) 176 176 std r5, 8(r12) 177 177 std r6, 16(r12) ··· 190 190 191 191 HCALL_INST_PRECALL 192 192 193 - std r4,STK_PARM(r4)(r1) /* save ret buffer */ 193 + std r4,STK_PARM(R4)(r1) /* save ret buffer */ 194 194 195 195 mr r11,r3 196 196 mr r3,r5 ··· 204 204 205 205 HCALL_INST_POSTCALL 206 206 207 - ld r12,STK_PARM(r4)(r1) 207 + ld r12,STK_PARM(R4)(r1) 208 208 std r4, 0(r12) 209 209 std r5, 8(r12) 210 210 std r6, 16(r12) ··· 224 224 225 225 HCALL_INST_PRECALL 226 226 227 - std r4,STK_PARM(r4)(r1) /* save ret buffer */ 227 + std r4,STK_PARM(R4)(r1) /* save ret buffer */ 228 228 229 229 mr r11,r3 230 230 mr r3,r5 ··· 238 238 239 239 HCALL_INST_POSTCALL 240 240 241 - ld r12,STK_PARM(r4)(r1) 241 + ld r12,STK_PARM(R4)(r1) 242 242 std r4, 0(r12) 243 243 std r5, 8(r12) 244 244 std r6, 16(r12) ··· 259 259 260 260 HCALL_INST_PRECALL 261 261 262 - std r4,STK_PARM(r4)(r1) /* save ret buffer */ 262 + std r4,STK_PARM(R4)(r1) /* save ret buffer */ 263 263 264 264 mr r11,r3 265 265 mr r3,r5 ··· 273 273 274 274 HCALL_INST_POSTCALL 275 275 276 - ld r12,STK_PARM(r4)(r1) 276 + ld r12,STK_PARM(R4)(r1) 277 277 std r4, 0(r12) 278 278 std r5, 8(r12) 279 279 std r6, 16(r12)
+4 -4
arch/powerpc/platforms/powernv/opal-takeover.S
··· 23 23 _GLOBAL(opal_query_takeover) 24 24 mfcr r0 25 25 stw r0,8(r1) 26 - std r3,STK_PARAM(r3)(r1) 27 - std r4,STK_PARAM(r4)(r1) 26 + std r3,STK_PARAM(R3)(r1) 27 + std r4,STK_PARAM(R4)(r1) 28 28 li r3,H_HAL_TAKEOVER 29 29 li r4,H_HAL_TAKEOVER_QUERY_MAGIC 30 30 HVSC 31 - ld r10,STK_PARAM(r3)(r1) 31 + ld r10,STK_PARAM(R3)(r1) 32 32 std r4,0(r10) 33 - ld r10,STK_PARAM(r4)(r1) 33 + ld r10,STK_PARAM(R4)(r1) 34 34 std r5,0(r10) 35 35 lwz r0,8(r1) 36 36 mtcrf 0xff,r0
+1 -1
arch/powerpc/platforms/powernv/opal-wrappers.S
··· 32 32 std r12,PACASAVEDMSR(r13); \ 33 33 andc r12,r12,r0; \ 34 34 mtmsrd r12,1; \ 35 - LOAD_REG_ADDR(r0,.opal_return); \ 35 + LOAD_REG_ADDR(R0,.opal_return); \ 36 36 mtlr r0; \ 37 37 li r0,MSR_DR|MSR_IR; \ 38 38 andc r12,r12,r0; \
+36 -36
arch/powerpc/platforms/pseries/hvCall.S
··· 40 40 cmpdi r12,0; \ 41 41 beq+ 1f; \ 42 42 mflr r0; \ 43 - std r3,STK_PARM(r3)(r1); \ 44 - std r4,STK_PARM(r4)(r1); \ 45 - std r5,STK_PARM(r5)(r1); \ 46 - std r6,STK_PARM(r6)(r1); \ 47 - std r7,STK_PARM(r7)(r1); \ 48 - std r8,STK_PARM(r8)(r1); \ 49 - std r9,STK_PARM(r9)(r1); \ 50 - std r10,STK_PARM(r10)(r1); \ 43 + std r3,STK_PARM(R3)(r1); \ 44 + std r4,STK_PARM(R4)(r1); \ 45 + std r5,STK_PARM(R5)(r1); \ 46 + std r6,STK_PARM(R6)(r1); \ 47 + std r7,STK_PARM(R7)(r1); \ 48 + std r8,STK_PARM(R8)(r1); \ 49 + std r9,STK_PARM(R9)(r1); \ 50 + std r10,STK_PARM(R10)(r1); \ 51 51 std r0,16(r1); \ 52 52 addi r4,r1,STK_PARM(FIRST_REG); \ 53 53 stdu r1,-STACK_FRAME_OVERHEAD(r1); \ 54 54 bl .__trace_hcall_entry; \ 55 55 addi r1,r1,STACK_FRAME_OVERHEAD; \ 56 56 ld r0,16(r1); \ 57 - ld r3,STK_PARM(r3)(r1); \ 58 - ld r4,STK_PARM(r4)(r1); \ 59 - ld r5,STK_PARM(r5)(r1); \ 60 - ld r6,STK_PARM(r6)(r1); \ 61 - ld r7,STK_PARM(r7)(r1); \ 62 - ld r8,STK_PARM(r8)(r1); \ 63 - ld r9,STK_PARM(r9)(r1); \ 64 - ld r10,STK_PARM(r10)(r1); \ 57 + ld r3,STK_PARM(R3)(r1); \ 58 + ld r4,STK_PARM(R4)(r1); \ 59 + ld r5,STK_PARM(R5)(r1); \ 60 + ld r6,STK_PARM(R6)(r1); \ 61 + ld r7,STK_PARM(R7)(r1); \ 62 + ld r8,STK_PARM(R8)(r1); \ 63 + ld r9,STK_PARM(R9)(r1); \ 64 + ld r10,STK_PARM(R10)(r1); \ 65 65 mtlr r0; \ 66 66 1: 67 67 ··· 79 79 cmpdi r12,0; \ 80 80 beq+ 1f; \ 81 81 mflr r0; \ 82 - ld r6,STK_PARM(r3)(r1); \ 83 - std r3,STK_PARM(r3)(r1); \ 82 + ld r6,STK_PARM(R3)(r1); \ 83 + std r3,STK_PARM(R3)(r1); \ 84 84 mr r4,r3; \ 85 85 mr r3,r6; \ 86 86 std r0,16(r1); \ ··· 88 88 bl .__trace_hcall_exit; \ 89 89 addi r1,r1,STACK_FRAME_OVERHEAD; \ 90 90 ld r0,16(r1); \ 91 - ld r3,STK_PARM(r3)(r1); \ 91 + ld r3,STK_PARM(R3)(r1); \ 92 92 mtlr r0; \ 93 93 1: 94 94 ··· 114 114 mfcr r0 115 115 stw r0,8(r1) 116 116 117 - HCALL_INST_PRECALL(r4) 117 + HCALL_INST_PRECALL(R4) 118 118 119 119 HVSC /* invoke the hypervisor */ 120 120 ··· 130 130 mfcr r0 131 131 stw r0,8(r1) 132 132 133 - HCALL_INST_PRECALL(r5) 133 + HCALL_INST_PRECALL(R5) 134 134 135 - std r4,STK_PARM(r4)(r1) /* Save ret buffer */ 135 + std r4,STK_PARM(R4)(r1) /* Save ret buffer */ 136 136 137 137 mr r4,r5 138 138 mr r5,r6 ··· 143 143 144 144 HVSC /* invoke the hypervisor */ 145 145 146 - ld r12,STK_PARM(r4)(r1) 146 + ld r12,STK_PARM(R4)(r1) 147 147 std r4, 0(r12) 148 148 std r5, 8(r12) 149 149 std r6, 16(r12) ··· 168 168 mfcr r0 169 169 stw r0,8(r1) 170 170 171 - std r4,STK_PARM(r4)(r1) /* Save ret buffer */ 171 + std r4,STK_PARM(R4)(r1) /* Save ret buffer */ 172 172 173 173 mr r4,r5 174 174 mr r5,r6 ··· 179 179 180 180 HVSC /* invoke the hypervisor */ 181 181 182 - ld r12,STK_PARM(r4)(r1) 182 + ld r12,STK_PARM(R4)(r1) 183 183 std r4, 0(r12) 184 184 std r5, 8(r12) 185 185 std r6, 16(r12) ··· 196 196 mfcr r0 197 197 stw r0,8(r1) 198 198 199 - HCALL_INST_PRECALL(r5) 199 + HCALL_INST_PRECALL(R5) 200 200 201 - std r4,STK_PARM(r4)(r1) /* Save ret buffer */ 201 + std r4,STK_PARM(R4)(r1) /* Save ret buffer */ 202 202 203 203 mr r4,r5 204 204 mr r5,r6 ··· 206 206 mr r7,r8 207 207 mr r8,r9 208 208 mr r9,r10 209 - ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ 210 - ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ 211 - ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ 209 + ld r10,STK_PARM(R11)(r1) /* put arg7 in R10 */ 210 + ld r11,STK_PARM(R12)(r1) /* put arg8 in R11 */ 211 + ld r12,STK_PARM(R13)(r1) /* put arg9 in R12 */ 212 212 213 213 HVSC /* invoke the hypervisor */ 214 214 215 215 mr r0,r12 216 - ld r12,STK_PARM(r4)(r1) 216 + ld r12,STK_PARM(R4)(r1) 217 217 std r4, 0(r12) 218 218 std r5, 8(r12) 219 219 std r6, 16(r12) ··· 238 238 mfcr r0 239 239 stw r0,8(r1) 240 240 241 - std r4,STK_PARM(r4)(r1) /* Save ret buffer */ 241 + std r4,STK_PARM(R4)(r1) /* Save ret buffer */ 242 242 243 243 mr r4,r5 244 244 mr r5,r6 ··· 246 246 mr r7,r8 247 247 mr r8,r9 248 248 mr r9,r10 249 - ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ 250 - ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ 251 - ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ 249 + ld r10,STK_PARM(R11)(r1) /* put arg7 in R10 */ 250 + ld r11,STK_PARM(R12)(r1) /* put arg8 in R11 */ 251 + ld r12,STK_PARM(R13)(r1) /* put arg9 in R12 */ 252 252 253 253 HVSC /* invoke the hypervisor */ 254 254 255 255 mr r0,r12 256 - ld r12,STK_PARM(r4)(r1) 256 + ld r12,STK_PARM(R4)(r1) 257 257 std r4, 0(r12) 258 258 std r5, 8(r12) 259 259 std r6, 16(r12)