pseries/cpuidle: Remove dependency of pseries.h file
[deliverable/linux.git] / arch / powerpc / platforms / pseries / hvCall.S
1 /*
2 * This file contains the generic code to perform a call to the
3 * pSeries LPAR hypervisor.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10 #include <asm/hvcall.h>
11 #include <asm/processor.h>
12 #include <asm/ppc_asm.h>
13 #include <asm/asm-offsets.h>
14 #include <asm/ptrace.h>
15
16 #ifdef CONFIG_TRACEPOINTS
17
18 .section ".toc","aw"
19
20 .globl hcall_tracepoint_refcount
21 hcall_tracepoint_refcount:
22 .llong 0
23
24 .section ".text"
25
26 /*
27 * precall must preserve all registers. use unused STK_PARAM()
28 * areas to save snapshots and opcode. We branch around this
29 * in early init (eg when populating the MMU hashtable) by using an
30 * unconditional cpu feature.
31 */
32 #define HCALL_INST_PRECALL(FIRST_REG) \
33 BEGIN_FTR_SECTION; \
34 b 1f; \
35 END_FTR_SECTION(0, 1); \
36 ld r12,hcall_tracepoint_refcount@toc(r2); \
37 std r12,32(r1); \
38 cmpdi r12,0; \
39 beq+ 1f; \
40 mflr r0; \
41 std r3,STK_PARAM(R3)(r1); \
42 std r4,STK_PARAM(R4)(r1); \
43 std r5,STK_PARAM(R5)(r1); \
44 std r6,STK_PARAM(R6)(r1); \
45 std r7,STK_PARAM(R7)(r1); \
46 std r8,STK_PARAM(R8)(r1); \
47 std r9,STK_PARAM(R9)(r1); \
48 std r10,STK_PARAM(R10)(r1); \
49 std r0,16(r1); \
50 addi r4,r1,STK_PARAM(FIRST_REG); \
51 stdu r1,-STACK_FRAME_OVERHEAD(r1); \
52 bl .__trace_hcall_entry; \
53 addi r1,r1,STACK_FRAME_OVERHEAD; \
54 ld r0,16(r1); \
55 ld r3,STK_PARAM(R3)(r1); \
56 ld r4,STK_PARAM(R4)(r1); \
57 ld r5,STK_PARAM(R5)(r1); \
58 ld r6,STK_PARAM(R6)(r1); \
59 ld r7,STK_PARAM(R7)(r1); \
60 ld r8,STK_PARAM(R8)(r1); \
61 ld r9,STK_PARAM(R9)(r1); \
62 ld r10,STK_PARAM(R10)(r1); \
63 mtlr r0; \
64 1:
65
66 /*
67 * postcall is performed immediately before function return which
68 * allows liberal use of volatile registers. We branch around this
69 * in early init (eg when populating the MMU hashtable) by using an
70 * unconditional cpu feature.
71 */
72 #define __HCALL_INST_POSTCALL \
73 BEGIN_FTR_SECTION; \
74 b 1f; \
75 END_FTR_SECTION(0, 1); \
76 ld r12,32(r1); \
77 cmpdi r12,0; \
78 beq+ 1f; \
79 mflr r0; \
80 ld r6,STK_PARAM(R3)(r1); \
81 std r3,STK_PARAM(R3)(r1); \
82 mr r4,r3; \
83 mr r3,r6; \
84 std r0,16(r1); \
85 stdu r1,-STACK_FRAME_OVERHEAD(r1); \
86 bl .__trace_hcall_exit; \
87 addi r1,r1,STACK_FRAME_OVERHEAD; \
88 ld r0,16(r1); \
89 ld r3,STK_PARAM(R3)(r1); \
90 mtlr r0; \
91 1:
92
93 #define HCALL_INST_POSTCALL_NORETS \
94 li r5,0; \
95 __HCALL_INST_POSTCALL
96
97 #define HCALL_INST_POSTCALL(BUFREG) \
98 mr r5,BUFREG; \
99 __HCALL_INST_POSTCALL
100
101 #else
102 #define HCALL_INST_PRECALL(FIRST_ARG)
103 #define HCALL_INST_POSTCALL_NORETS
104 #define HCALL_INST_POSTCALL(BUFREG)
105 #endif
106
107 .text
108
109 _GLOBAL(plpar_hcall_norets)
110 HMT_MEDIUM
111
112 mfcr r0
113 stw r0,8(r1)
114
115 HCALL_INST_PRECALL(R4)
116
117 HVSC /* invoke the hypervisor */
118
119 HCALL_INST_POSTCALL_NORETS
120
121 lwz r0,8(r1)
122 mtcrf 0xff,r0
123 blr /* return r3 = status */
124
125 _GLOBAL(plpar_hcall)
126 HMT_MEDIUM
127
128 mfcr r0
129 stw r0,8(r1)
130
131 HCALL_INST_PRECALL(R5)
132
133 std r4,STK_PARAM(R4)(r1) /* Save ret buffer */
134
135 mr r4,r5
136 mr r5,r6
137 mr r6,r7
138 mr r7,r8
139 mr r8,r9
140 mr r9,r10
141
142 HVSC /* invoke the hypervisor */
143
144 ld r12,STK_PARAM(R4)(r1)
145 std r4, 0(r12)
146 std r5, 8(r12)
147 std r6, 16(r12)
148 std r7, 24(r12)
149
150 HCALL_INST_POSTCALL(r12)
151
152 lwz r0,8(r1)
153 mtcrf 0xff,r0
154
155 blr /* return r3 = status */
156
157 /*
158 * plpar_hcall_raw can be called in real mode. kexec/kdump need some
159 * hypervisor calls to be executed in real mode. So plpar_hcall_raw
160 * does not access the per cpu hypervisor call statistics variables,
161 * since these variables may not be present in the RMO region.
162 */
163 _GLOBAL(plpar_hcall_raw)
164 HMT_MEDIUM
165
166 mfcr r0
167 stw r0,8(r1)
168
169 std r4,STK_PARAM(R4)(r1) /* Save ret buffer */
170
171 mr r4,r5
172 mr r5,r6
173 mr r6,r7
174 mr r7,r8
175 mr r8,r9
176 mr r9,r10
177
178 HVSC /* invoke the hypervisor */
179
180 ld r12,STK_PARAM(R4)(r1)
181 std r4, 0(r12)
182 std r5, 8(r12)
183 std r6, 16(r12)
184 std r7, 24(r12)
185
186 lwz r0,8(r1)
187 mtcrf 0xff,r0
188
189 blr /* return r3 = status */
190
191 _GLOBAL(plpar_hcall9)
192 HMT_MEDIUM
193
194 mfcr r0
195 stw r0,8(r1)
196
197 HCALL_INST_PRECALL(R5)
198
199 std r4,STK_PARAM(R4)(r1) /* Save ret buffer */
200
201 mr r4,r5
202 mr r5,r6
203 mr r6,r7
204 mr r7,r8
205 mr r8,r9
206 mr r9,r10
207 ld r10,STK_PARAM(R11)(r1) /* put arg7 in R10 */
208 ld r11,STK_PARAM(R12)(r1) /* put arg8 in R11 */
209 ld r12,STK_PARAM(R13)(r1) /* put arg9 in R12 */
210
211 HVSC /* invoke the hypervisor */
212
213 mr r0,r12
214 ld r12,STK_PARAM(R4)(r1)
215 std r4, 0(r12)
216 std r5, 8(r12)
217 std r6, 16(r12)
218 std r7, 24(r12)
219 std r8, 32(r12)
220 std r9, 40(r12)
221 std r10,48(r12)
222 std r11,56(r12)
223 std r0, 64(r12)
224
225 HCALL_INST_POSTCALL(r12)
226
227 lwz r0,8(r1)
228 mtcrf 0xff,r0
229
230 blr /* return r3 = status */
231
232 /* See plpar_hcall_raw to see why this is needed */
233 _GLOBAL(plpar_hcall9_raw)
234 HMT_MEDIUM
235
236 mfcr r0
237 stw r0,8(r1)
238
239 std r4,STK_PARAM(R4)(r1) /* Save ret buffer */
240
241 mr r4,r5
242 mr r5,r6
243 mr r6,r7
244 mr r7,r8
245 mr r8,r9
246 mr r9,r10
247 ld r10,STK_PARAM(R11)(r1) /* put arg7 in R10 */
248 ld r11,STK_PARAM(R12)(r1) /* put arg8 in R11 */
249 ld r12,STK_PARAM(R13)(r1) /* put arg9 in R12 */
250
251 HVSC /* invoke the hypervisor */
252
253 mr r0,r12
254 ld r12,STK_PARAM(R4)(r1)
255 std r4, 0(r12)
256 std r5, 8(r12)
257 std r6, 16(r12)
258 std r7, 24(r12)
259 std r8, 32(r12)
260 std r9, 40(r12)
261 std r10,48(r12)
262 std r11,56(r12)
263 std r0, 64(r12)
264
265 lwz r0,8(r1)
266 mtcrf 0xff,r0
267
268 blr /* return r3 = status */
This page took 0.139632 seconds and 5 git commands to generate.