powerpc: Fix usage of register macros getting ready for %r0 change
[deliverable/linux.git] / arch / powerpc / platforms / pseries / hvCall.S
1 /*
2 * This file contains the generic code to perform a call to the
3 * pSeries LPAR hypervisor.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10 #include <asm/hvcall.h>
11 #include <asm/processor.h>
12 #include <asm/ppc_asm.h>
13 #include <asm/asm-offsets.h>
14 #include <asm/ptrace.h>
15
16 #define STK_PARM(i) (48 + ((i)-3)*8)
17
18 #ifdef CONFIG_TRACEPOINTS
19
20 .section ".toc","aw"
21
22 .globl hcall_tracepoint_refcount
23 hcall_tracepoint_refcount:
24 .llong 0
25
26 .section ".text"
27
28 /*
29 * precall must preserve all registers. use unused STK_PARM()
30 * areas to save snapshots and opcode. We branch around this
31 * in early init (eg when populating the MMU hashtable) by using an
32 * unconditional cpu feature.
33 */
34 #define HCALL_INST_PRECALL(FIRST_REG) \
35 BEGIN_FTR_SECTION; \
36 b 1f; \
37 END_FTR_SECTION(0, 1); \
38 ld r12,hcall_tracepoint_refcount@toc(r2); \
39 std r12,32(r1); \
40 cmpdi r12,0; \
41 beq+ 1f; \
42 mflr r0; \
43 std r3,STK_PARM(R3)(r1); \
44 std r4,STK_PARM(R4)(r1); \
45 std r5,STK_PARM(R5)(r1); \
46 std r6,STK_PARM(R6)(r1); \
47 std r7,STK_PARM(R7)(r1); \
48 std r8,STK_PARM(R8)(r1); \
49 std r9,STK_PARM(R9)(r1); \
50 std r10,STK_PARM(R10)(r1); \
51 std r0,16(r1); \
52 addi r4,r1,STK_PARM(FIRST_REG); \
53 stdu r1,-STACK_FRAME_OVERHEAD(r1); \
54 bl .__trace_hcall_entry; \
55 addi r1,r1,STACK_FRAME_OVERHEAD; \
56 ld r0,16(r1); \
57 ld r3,STK_PARM(R3)(r1); \
58 ld r4,STK_PARM(R4)(r1); \
59 ld r5,STK_PARM(R5)(r1); \
60 ld r6,STK_PARM(R6)(r1); \
61 ld r7,STK_PARM(R7)(r1); \
62 ld r8,STK_PARM(R8)(r1); \
63 ld r9,STK_PARM(R9)(r1); \
64 ld r10,STK_PARM(R10)(r1); \
65 mtlr r0; \
66 1:
67
68 /*
69 * postcall is performed immediately before function return which
70 * allows liberal use of volatile registers. We branch around this
71 * in early init (eg when populating the MMU hashtable) by using an
72 * unconditional cpu feature.
73 */
74 #define __HCALL_INST_POSTCALL \
75 BEGIN_FTR_SECTION; \
76 b 1f; \
77 END_FTR_SECTION(0, 1); \
78 ld r12,32(r1); \
79 cmpdi r12,0; \
80 beq+ 1f; \
81 mflr r0; \
82 ld r6,STK_PARM(R3)(r1); \
83 std r3,STK_PARM(R3)(r1); \
84 mr r4,r3; \
85 mr r3,r6; \
86 std r0,16(r1); \
87 stdu r1,-STACK_FRAME_OVERHEAD(r1); \
88 bl .__trace_hcall_exit; \
89 addi r1,r1,STACK_FRAME_OVERHEAD; \
90 ld r0,16(r1); \
91 ld r3,STK_PARM(R3)(r1); \
92 mtlr r0; \
93 1:
94
95 #define HCALL_INST_POSTCALL_NORETS \
96 li r5,0; \
97 __HCALL_INST_POSTCALL
98
99 #define HCALL_INST_POSTCALL(BUFREG) \
100 mr r5,BUFREG; \
101 __HCALL_INST_POSTCALL
102
103 #else
104 #define HCALL_INST_PRECALL(FIRST_ARG)
105 #define HCALL_INST_POSTCALL_NORETS
106 #define HCALL_INST_POSTCALL(BUFREG)
107 #endif
108
109 .text
110
111 _GLOBAL(plpar_hcall_norets)
112 HMT_MEDIUM
113
114 mfcr r0
115 stw r0,8(r1)
116
117 HCALL_INST_PRECALL(R4)
118
119 HVSC /* invoke the hypervisor */
120
121 HCALL_INST_POSTCALL_NORETS
122
123 lwz r0,8(r1)
124 mtcrf 0xff,r0
125 blr /* return r3 = status */
126
127 _GLOBAL(plpar_hcall)
128 HMT_MEDIUM
129
130 mfcr r0
131 stw r0,8(r1)
132
133 HCALL_INST_PRECALL(R5)
134
135 std r4,STK_PARM(R4)(r1) /* Save ret buffer */
136
137 mr r4,r5
138 mr r5,r6
139 mr r6,r7
140 mr r7,r8
141 mr r8,r9
142 mr r9,r10
143
144 HVSC /* invoke the hypervisor */
145
146 ld r12,STK_PARM(R4)(r1)
147 std r4, 0(r12)
148 std r5, 8(r12)
149 std r6, 16(r12)
150 std r7, 24(r12)
151
152 HCALL_INST_POSTCALL(r12)
153
154 lwz r0,8(r1)
155 mtcrf 0xff,r0
156
157 blr /* return r3 = status */
158
159 /*
160 * plpar_hcall_raw can be called in real mode. kexec/kdump need some
161 * hypervisor calls to be executed in real mode. So plpar_hcall_raw
162 * does not access the per cpu hypervisor call statistics variables,
163 * since these variables may not be present in the RMO region.
164 */
165 _GLOBAL(plpar_hcall_raw)
166 HMT_MEDIUM
167
168 mfcr r0
169 stw r0,8(r1)
170
171 std r4,STK_PARM(R4)(r1) /* Save ret buffer */
172
173 mr r4,r5
174 mr r5,r6
175 mr r6,r7
176 mr r7,r8
177 mr r8,r9
178 mr r9,r10
179
180 HVSC /* invoke the hypervisor */
181
182 ld r12,STK_PARM(R4)(r1)
183 std r4, 0(r12)
184 std r5, 8(r12)
185 std r6, 16(r12)
186 std r7, 24(r12)
187
188 lwz r0,8(r1)
189 mtcrf 0xff,r0
190
191 blr /* return r3 = status */
192
193 _GLOBAL(plpar_hcall9)
194 HMT_MEDIUM
195
196 mfcr r0
197 stw r0,8(r1)
198
199 HCALL_INST_PRECALL(R5)
200
201 std r4,STK_PARM(R4)(r1) /* Save ret buffer */
202
203 mr r4,r5
204 mr r5,r6
205 mr r6,r7
206 mr r7,r8
207 mr r8,r9
208 mr r9,r10
209 ld r10,STK_PARM(R11)(r1) /* put arg7 in R10 */
210 ld r11,STK_PARM(R12)(r1) /* put arg8 in R11 */
211 ld r12,STK_PARM(R13)(r1) /* put arg9 in R12 */
212
213 HVSC /* invoke the hypervisor */
214
215 mr r0,r12
216 ld r12,STK_PARM(R4)(r1)
217 std r4, 0(r12)
218 std r5, 8(r12)
219 std r6, 16(r12)
220 std r7, 24(r12)
221 std r8, 32(r12)
222 std r9, 40(r12)
223 std r10,48(r12)
224 std r11,56(r12)
225 std r0, 64(r12)
226
227 HCALL_INST_POSTCALL(r12)
228
229 lwz r0,8(r1)
230 mtcrf 0xff,r0
231
232 blr /* return r3 = status */
233
234 /* See plpar_hcall_raw to see why this is needed */
235 _GLOBAL(plpar_hcall9_raw)
236 HMT_MEDIUM
237
238 mfcr r0
239 stw r0,8(r1)
240
241 std r4,STK_PARM(R4)(r1) /* Save ret buffer */
242
243 mr r4,r5
244 mr r5,r6
245 mr r6,r7
246 mr r7,r8
247 mr r8,r9
248 mr r9,r10
249 ld r10,STK_PARM(R11)(r1) /* put arg7 in R10 */
250 ld r11,STK_PARM(R12)(r1) /* put arg8 in R11 */
251 ld r12,STK_PARM(R13)(r1) /* put arg9 in R12 */
252
253 HVSC /* invoke the hypervisor */
254
255 mr r0,r12
256 ld r12,STK_PARM(R4)(r1)
257 std r4, 0(r12)
258 std r5, 8(r12)
259 std r6, 16(r12)
260 std r7, 24(r12)
261 std r8, 32(r12)
262 std r9, 40(r12)
263 std r10,48(r12)
264 std r11,56(r12)
265 std r0, 64(r12)
266
267 lwz r0,8(r1)
268 mtcrf 0xff,r0
269
270 blr /* return r3 = status */
This page took 0.049714 seconds and 5 git commands to generate.