powerpc: Fix usage of register macros getting ready for %r0 change
[deliverable/linux.git] / arch / powerpc / kvm / book3s_hv_rmhandlers.S
index a1044f43becd380cdc7216e082fbf267b97a3fae..bc99015030c386340d9a99f12cb769be5591c0e6 100644 (file)
@@ -206,24 +206,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
        /* Load up FP, VMX and VSX registers */
        bl      kvmppc_load_fp
 
-       ld      r14, VCPU_GPR(r14)(r4)
-       ld      r15, VCPU_GPR(r15)(r4)
-       ld      r16, VCPU_GPR(r16)(r4)
-       ld      r17, VCPU_GPR(r17)(r4)
-       ld      r18, VCPU_GPR(r18)(r4)
-       ld      r19, VCPU_GPR(r19)(r4)
-       ld      r20, VCPU_GPR(r20)(r4)
-       ld      r21, VCPU_GPR(r21)(r4)
-       ld      r22, VCPU_GPR(r22)(r4)
-       ld      r23, VCPU_GPR(r23)(r4)
-       ld      r24, VCPU_GPR(r24)(r4)
-       ld      r25, VCPU_GPR(r25)(r4)
-       ld      r26, VCPU_GPR(r26)(r4)
-       ld      r27, VCPU_GPR(r27)(r4)
-       ld      r28, VCPU_GPR(r28)(r4)
-       ld      r29, VCPU_GPR(r29)(r4)
-       ld      r30, VCPU_GPR(r30)(r4)
-       ld      r31, VCPU_GPR(r31)(r4)
+       ld      r14, VCPU_GPR(R14)(r4)
+       ld      r15, VCPU_GPR(R15)(r4)
+       ld      r16, VCPU_GPR(R16)(r4)
+       ld      r17, VCPU_GPR(R17)(r4)
+       ld      r18, VCPU_GPR(R18)(r4)
+       ld      r19, VCPU_GPR(R19)(r4)
+       ld      r20, VCPU_GPR(R20)(r4)
+       ld      r21, VCPU_GPR(R21)(r4)
+       ld      r22, VCPU_GPR(R22)(r4)
+       ld      r23, VCPU_GPR(R23)(r4)
+       ld      r24, VCPU_GPR(R24)(r4)
+       ld      r25, VCPU_GPR(R25)(r4)
+       ld      r26, VCPU_GPR(R26)(r4)
+       ld      r27, VCPU_GPR(R27)(r4)
+       ld      r28, VCPU_GPR(R28)(r4)
+       ld      r29, VCPU_GPR(R29)(r4)
+       ld      r30, VCPU_GPR(R30)(r4)
+       ld      r31, VCPU_GPR(R31)(r4)
 
 BEGIN_FTR_SECTION
        /* Switch DSCR to guest value */
@@ -547,21 +547,21 @@ fast_guest_return:
        mtlr    r5
        mtcr    r6
 
-       ld      r0, VCPU_GPR(r0)(r4)
-       ld      r1, VCPU_GPR(r1)(r4)
-       ld      r2, VCPU_GPR(r2)(r4)
-       ld      r3, VCPU_GPR(r3)(r4)
-       ld      r5, VCPU_GPR(r5)(r4)
-       ld      r6, VCPU_GPR(r6)(r4)
-       ld      r7, VCPU_GPR(r7)(r4)
-       ld      r8, VCPU_GPR(r8)(r4)
-       ld      r9, VCPU_GPR(r9)(r4)
-       ld      r10, VCPU_GPR(r10)(r4)
-       ld      r11, VCPU_GPR(r11)(r4)
-       ld      r12, VCPU_GPR(r12)(r4)
-       ld      r13, VCPU_GPR(r13)(r4)
-
-       ld      r4, VCPU_GPR(r4)(r4)
+       ld      r0, VCPU_GPR(R0)(r4)
+       ld      r1, VCPU_GPR(R1)(r4)
+       ld      r2, VCPU_GPR(R2)(r4)
+       ld      r3, VCPU_GPR(R3)(r4)
+       ld      r5, VCPU_GPR(R5)(r4)
+       ld      r6, VCPU_GPR(R6)(r4)
+       ld      r7, VCPU_GPR(R7)(r4)
+       ld      r8, VCPU_GPR(R8)(r4)
+       ld      r9, VCPU_GPR(R9)(r4)
+       ld      r10, VCPU_GPR(R10)(r4)
+       ld      r11, VCPU_GPR(R11)(r4)
+       ld      r12, VCPU_GPR(R12)(r4)
+       ld      r13, VCPU_GPR(R13)(r4)
+
+       ld      r4, VCPU_GPR(R4)(r4)
 
        hrfid
        b       .
@@ -590,22 +590,22 @@ kvmppc_interrupt:
 
        /* Save registers */
 
-       std     r0, VCPU_GPR(r0)(r9)
-       std     r1, VCPU_GPR(r1)(r9)
-       std     r2, VCPU_GPR(r2)(r9)
-       std     r3, VCPU_GPR(r3)(r9)
-       std     r4, VCPU_GPR(r4)(r9)
-       std     r5, VCPU_GPR(r5)(r9)
-       std     r6, VCPU_GPR(r6)(r9)
-       std     r7, VCPU_GPR(r7)(r9)
-       std     r8, VCPU_GPR(r8)(r9)
+       std     r0, VCPU_GPR(R0)(r9)
+       std     r1, VCPU_GPR(R1)(r9)
+       std     r2, VCPU_GPR(R2)(r9)
+       std     r3, VCPU_GPR(R3)(r9)
+       std     r4, VCPU_GPR(R4)(r9)
+       std     r5, VCPU_GPR(R5)(r9)
+       std     r6, VCPU_GPR(R6)(r9)
+       std     r7, VCPU_GPR(R7)(r9)
+       std     r8, VCPU_GPR(R8)(r9)
        ld      r0, HSTATE_HOST_R2(r13)
-       std     r0, VCPU_GPR(r9)(r9)
-       std     r10, VCPU_GPR(r10)(r9)
-       std     r11, VCPU_GPR(r11)(r9)
+       std     r0, VCPU_GPR(R9)(r9)
+       std     r10, VCPU_GPR(R10)(r9)
+       std     r11, VCPU_GPR(R11)(r9)
        ld      r3, HSTATE_SCRATCH0(r13)
        lwz     r4, HSTATE_SCRATCH1(r13)
-       std     r3, VCPU_GPR(r12)(r9)
+       std     r3, VCPU_GPR(R12)(r9)
        stw     r4, VCPU_CR(r9)
 
        /* Restore R1/R2 so we can handle faults */
@@ -626,7 +626,7 @@ kvmppc_interrupt:
 
        GET_SCRATCH0(r3)
        mflr    r4
-       std     r3, VCPU_GPR(r13)(r9)
+       std     r3, VCPU_GPR(R13)(r9)
        std     r4, VCPU_LR(r9)
 
        /* Unset guest mode */
@@ -968,24 +968,24 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
        /* Save non-volatile GPRs */
-       std     r14, VCPU_GPR(r14)(r9)
-       std     r15, VCPU_GPR(r15)(r9)
-       std     r16, VCPU_GPR(r16)(r9)
-       std     r17, VCPU_GPR(r17)(r9)
-       std     r18, VCPU_GPR(r18)(r9)
-       std     r19, VCPU_GPR(r19)(r9)
-       std     r20, VCPU_GPR(r20)(r9)
-       std     r21, VCPU_GPR(r21)(r9)
-       std     r22, VCPU_GPR(r22)(r9)
-       std     r23, VCPU_GPR(r23)(r9)
-       std     r24, VCPU_GPR(r24)(r9)
-       std     r25, VCPU_GPR(r25)(r9)
-       std     r26, VCPU_GPR(r26)(r9)
-       std     r27, VCPU_GPR(r27)(r9)
-       std     r28, VCPU_GPR(r28)(r9)
-       std     r29, VCPU_GPR(r29)(r9)
-       std     r30, VCPU_GPR(r30)(r9)
-       std     r31, VCPU_GPR(r31)(r9)
+       std     r14, VCPU_GPR(R14)(r9)
+       std     r15, VCPU_GPR(R15)(r9)
+       std     r16, VCPU_GPR(R16)(r9)
+       std     r17, VCPU_GPR(R17)(r9)
+       std     r18, VCPU_GPR(R18)(r9)
+       std     r19, VCPU_GPR(R19)(r9)
+       std     r20, VCPU_GPR(R20)(r9)
+       std     r21, VCPU_GPR(R21)(r9)
+       std     r22, VCPU_GPR(R22)(r9)
+       std     r23, VCPU_GPR(R23)(r9)
+       std     r24, VCPU_GPR(R24)(r9)
+       std     r25, VCPU_GPR(R25)(r9)
+       std     r26, VCPU_GPR(R26)(r9)
+       std     r27, VCPU_GPR(R27)(r9)
+       std     r28, VCPU_GPR(R28)(r9)
+       std     r29, VCPU_GPR(R29)(r9)
+       std     r30, VCPU_GPR(R30)(r9)
+       std     r31, VCPU_GPR(R31)(r9)
 
        /* Save SPRGs */
        mfspr   r3, SPRN_SPRG0
@@ -1160,7 +1160,7 @@ kvmppc_hdsi:
        andi.   r0, r11, MSR_DR         /* data relocation enabled? */
        beq     3f
        clrrdi  r0, r4, 28
-       PPC_SLBFEE_DOT(r5, r0)          /* if so, look up SLB */
+       PPC_SLBFEE_DOT(R5, R0)          /* if so, look up SLB */
        bne     1f                      /* if no SLB entry found */
 4:     std     r4, VCPU_FAULT_DAR(r9)
        stw     r6, VCPU_FAULT_DSISR(r9)
@@ -1234,7 +1234,7 @@ kvmppc_hisi:
        andi.   r0, r11, MSR_IR         /* instruction relocation enabled? */
        beq     3f
        clrrdi  r0, r10, 28
-       PPC_SLBFEE_DOT(r5, r0)          /* if so, look up SLB */
+       PPC_SLBFEE_DOT(R5, R0)          /* if so, look up SLB */
        bne     1f                      /* if no SLB entry found */
 4:
        /* Search the hash table. */
@@ -1278,7 +1278,7 @@ kvmppc_hisi:
  */
        .globl  hcall_try_real_mode
 hcall_try_real_mode:
-       ld      r3,VCPU_GPR(r3)(r9)
+       ld      r3,VCPU_GPR(R3)(r9)
        andi.   r0,r11,MSR_PR
        bne     hcall_real_cont
        clrrdi  r3,r3,2
@@ -1291,12 +1291,12 @@ hcall_try_real_mode:
        add     r3,r3,r4
        mtctr   r3
        mr      r3,r9           /* get vcpu pointer */
-       ld      r4,VCPU_GPR(r4)(r9)
+       ld      r4,VCPU_GPR(R4)(r9)
        bctrl
        cmpdi   r3,H_TOO_HARD
        beq     hcall_real_fallback
        ld      r4,HSTATE_KVM_VCPU(r13)
-       std     r3,VCPU_GPR(r3)(r4)
+       std     r3,VCPU_GPR(R3)(r4)
        ld      r10,VCPU_PC(r4)
        ld      r11,VCPU_MSR(r4)
        b       fast_guest_return
@@ -1424,7 +1424,7 @@ _GLOBAL(kvmppc_h_cede)
        li      r0,0            /* set trap to 0 to say hcall is handled */
        stw     r0,VCPU_TRAP(r3)
        li      r0,H_SUCCESS
-       std     r0,VCPU_GPR(r3)(r3)
+       std     r0,VCPU_GPR(R3)(r3)
 BEGIN_FTR_SECTION
        b       2f              /* just send it up to host on 970 */
 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
@@ -1443,7 +1443,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
        addi    r6,r5,VCORE_NAPPING_THREADS
 31:    lwarx   r4,0,r6
        or      r4,r4,r0
-       PPC_POPCNTW(r7,r4)
+       PPC_POPCNTW(R7,R4)
        cmpw    r7,r8
        bge     2f
        stwcx.  r4,0,r6
@@ -1464,24 +1464,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
  * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
  */
        /* Save non-volatile GPRs */
-       std     r14, VCPU_GPR(r14)(r3)
-       std     r15, VCPU_GPR(r15)(r3)
-       std     r16, VCPU_GPR(r16)(r3)
-       std     r17, VCPU_GPR(r17)(r3)
-       std     r18, VCPU_GPR(r18)(r3)
-       std     r19, VCPU_GPR(r19)(r3)
-       std     r20, VCPU_GPR(r20)(r3)
-       std     r21, VCPU_GPR(r21)(r3)
-       std     r22, VCPU_GPR(r22)(r3)
-       std     r23, VCPU_GPR(r23)(r3)
-       std     r24, VCPU_GPR(r24)(r3)
-       std     r25, VCPU_GPR(r25)(r3)
-       std     r26, VCPU_GPR(r26)(r3)
-       std     r27, VCPU_GPR(r27)(r3)
-       std     r28, VCPU_GPR(r28)(r3)
-       std     r29, VCPU_GPR(r29)(r3)
-       std     r30, VCPU_GPR(r30)(r3)
-       std     r31, VCPU_GPR(r31)(r3)
+       std     r14, VCPU_GPR(R14)(r3)
+       std     r15, VCPU_GPR(R15)(r3)
+       std     r16, VCPU_GPR(R16)(r3)
+       std     r17, VCPU_GPR(R17)(r3)
+       std     r18, VCPU_GPR(R18)(r3)
+       std     r19, VCPU_GPR(R19)(r3)
+       std     r20, VCPU_GPR(R20)(r3)
+       std     r21, VCPU_GPR(R21)(r3)
+       std     r22, VCPU_GPR(R22)(r3)
+       std     r23, VCPU_GPR(R23)(r3)
+       std     r24, VCPU_GPR(R24)(r3)
+       std     r25, VCPU_GPR(R25)(r3)
+       std     r26, VCPU_GPR(R26)(r3)
+       std     r27, VCPU_GPR(R27)(r3)
+       std     r28, VCPU_GPR(R28)(r3)
+       std     r29, VCPU_GPR(R29)(r3)
+       std     r30, VCPU_GPR(R30)(r3)
+       std     r31, VCPU_GPR(R31)(r3)
 
        /* save FP state */
        bl      .kvmppc_save_fp
@@ -1513,24 +1513,24 @@ kvm_end_cede:
        bl      kvmppc_load_fp
 
        /* Load NV GPRS */
-       ld      r14, VCPU_GPR(r14)(r4)
-       ld      r15, VCPU_GPR(r15)(r4)
-       ld      r16, VCPU_GPR(r16)(r4)
-       ld      r17, VCPU_GPR(r17)(r4)
-       ld      r18, VCPU_GPR(r18)(r4)
-       ld      r19, VCPU_GPR(r19)(r4)
-       ld      r20, VCPU_GPR(r20)(r4)
-       ld      r21, VCPU_GPR(r21)(r4)
-       ld      r22, VCPU_GPR(r22)(r4)
-       ld      r23, VCPU_GPR(r23)(r4)
-       ld      r24, VCPU_GPR(r24)(r4)
-       ld      r25, VCPU_GPR(r25)(r4)
-       ld      r26, VCPU_GPR(r26)(r4)
-       ld      r27, VCPU_GPR(r27)(r4)
-       ld      r28, VCPU_GPR(r28)(r4)
-       ld      r29, VCPU_GPR(r29)(r4)
-       ld      r30, VCPU_GPR(r30)(r4)
-       ld      r31, VCPU_GPR(r31)(r4)
+       ld      r14, VCPU_GPR(R14)(r4)
+       ld      r15, VCPU_GPR(R15)(r4)
+       ld      r16, VCPU_GPR(R16)(r4)
+       ld      r17, VCPU_GPR(R17)(r4)
+       ld      r18, VCPU_GPR(R18)(r4)
+       ld      r19, VCPU_GPR(R19)(r4)
+       ld      r20, VCPU_GPR(R20)(r4)
+       ld      r21, VCPU_GPR(R21)(r4)
+       ld      r22, VCPU_GPR(R22)(r4)
+       ld      r23, VCPU_GPR(R23)(r4)
+       ld      r24, VCPU_GPR(R24)(r4)
+       ld      r25, VCPU_GPR(R25)(r4)
+       ld      r26, VCPU_GPR(R26)(r4)
+       ld      r27, VCPU_GPR(R27)(r4)
+       ld      r28, VCPU_GPR(R28)(r4)
+       ld      r29, VCPU_GPR(R29)(r4)
+       ld      r30, VCPU_GPR(R30)(r4)
+       ld      r31, VCPU_GPR(R31)(r4)
 
        /* clear our bit in vcore->napping_threads */
 33:    ld      r5,HSTATE_KVM_VCORE(r13)
@@ -1649,7 +1649,7 @@ BEGIN_FTR_SECTION
        reg = 0
        .rept   32
        li      r6,reg*16+VCPU_VSRS
-       STXVD2X(reg,r6,r3)
+       STXVD2X(reg,R6,R3)
        reg = reg + 1
        .endr
 FTR_SECTION_ELSE
@@ -1711,7 +1711,7 @@ BEGIN_FTR_SECTION
        reg = 0
        .rept   32
        li      r7,reg*16+VCPU_VSRS
-       LXVD2X(reg,r7,r4)
+       LXVD2X(reg,R7,R4)
        reg = reg + 1
        .endr
 FTR_SECTION_ELSE
This page took 0.034418 seconds and 5 git commands to generate.