powernv/cpuidle: Redesign idle states management
[deliverable/linux.git] / arch / powerpc / kernel / idle_power7.S
1 /*
2 * This file contains the power_save function for Power7 CPUs.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10 #include <linux/threads.h>
11 #include <asm/processor.h>
12 #include <asm/page.h>
13 #include <asm/cputable.h>
14 #include <asm/thread_info.h>
15 #include <asm/ppc_asm.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/ppc-opcode.h>
18 #include <asm/hw_irq.h>
19 #include <asm/kvm_book3s_asm.h>
20 #include <asm/opal.h>
21 #include <asm/cpuidle.h>
22
23 #undef DEBUG
24
25 /* Idle state entry routines */
26
27 #define IDLE_STATE_ENTER_SEQ(IDLE_INST) \
28 /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
29 std r0,0(r1); \
30 ptesync; \
31 ld r0,0(r1); \
32 1: cmp cr0,r0,r0; \
33 bne 1b; \
34 IDLE_INST; \
35 b .
36
37 .text
38
39 /*
40 * Pass requested state in r3:
41 * r3 - PNV_THREAD_NAP/SLEEP/WINKLE
42 *
43 * To check IRQ_HAPPENED in r4
44 * 0 - don't check
45 * 1 - check
46 */
47 _GLOBAL(power7_powersave_common)
48 /* Use r3 to pass state nap/sleep/winkle */
49 /* NAP is a state loss, we create a regs frame on the
50 * stack, fill it up with the state we care about and
51 * stick a pointer to it in PACAR1. We really only
52 * need to save PC, some CR bits and the NV GPRs,
53 * but for now an interrupt frame will do.
54 */
55 mflr r0
56 std r0,16(r1)
57 stdu r1,-INT_FRAME_SIZE(r1)
58 std r0,_LINK(r1)
59 std r0,_NIP(r1)
60
61 #ifndef CONFIG_SMP
62 /* Make sure FPU, VSX etc... are flushed as we may lose
63 * state when going to nap mode
64 */
65 bl discard_lazy_cpu_state
66 #endif /* CONFIG_SMP */
67
68 /* Hard disable interrupts */
69 mfmsr r9
70 rldicl r9,r9,48,1
71 rotldi r9,r9,16
72 mtmsrd r9,1 /* hard-disable interrupts */
73
74 /* Check if something happened while soft-disabled */
75 lbz r0,PACAIRQHAPPENED(r13)
76 andi. r0,r0,~PACA_IRQ_HARD_DIS@l
77 beq 1f
78 cmpwi cr0,r4,0
79 beq 1f
80 addi r1,r1,INT_FRAME_SIZE
81 ld r0,16(r1)
82 mtlr r0
83 blr
84
85 1: /* We mark irqs hard disabled as this is the state we'll
86 * be in when returning and we need to tell arch_local_irq_restore()
87 * about it
88 */
89 li r0,PACA_IRQ_HARD_DIS
90 stb r0,PACAIRQHAPPENED(r13)
91
92 /* We haven't lost state ... yet */
93 li r0,0
94 stb r0,PACA_NAPSTATELOST(r13)
95
96 /* Continue saving state */
97 SAVE_GPR(2, r1)
98 SAVE_NVGPRS(r1)
99 mfcr r4
100 std r4,_CCR(r1)
101 std r9,_MSR(r1)
102 std r1,PACAR1(r13)
103
104 /*
105 * Go to real mode to do the nap, as required by the architecture.
106 * Also, we need to be in real mode before setting hwthread_state,
107 * because as soon as we do that, another thread can switch
108 * the MMU context to the guest.
109 */
110 LOAD_REG_IMMEDIATE(r5, MSR_IDLE)
111 li r6, MSR_RI
112 andc r6, r9, r6
113 LOAD_REG_ADDR(r7, power7_enter_nap_mode)
114 mtmsrd r6, 1 /* clear RI before setting SRR0/1 */
115 mtspr SPRN_SRR0, r7
116 mtspr SPRN_SRR1, r5
117 rfid
118
119 .globl power7_enter_nap_mode
120 power7_enter_nap_mode:
121 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
122 /* Tell KVM we're napping */
123 li r4,KVM_HWTHREAD_IN_NAP
124 stb r4,HSTATE_HWTHREAD_STATE(r13)
125 #endif
126 stb r3,PACA_THREAD_IDLE_STATE(r13)
127 cmpwi cr1,r3,PNV_THREAD_SLEEP
128 bge cr1,2f
129 IDLE_STATE_ENTER_SEQ(PPC_NAP)
130 /* No return */
131 2:
132 /* Sleep or winkle */
133 lbz r7,PACA_THREAD_MASK(r13)
134 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
135 lwarx_loop1:
136 lwarx r15,0,r14
137 andc r15,r15,r7 /* Clear thread bit */
138
139 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
140
141 /*
142 * If cr0 = 0, then current thread is the last thread of the core entering
143 * sleep. Last thread needs to execute the hardware bug workaround code if
144 * required by the platform.
145 * Make the workaround call unconditionally here. The below branch call is
146 * patched out when the idle states are discovered if the platform does not
147 * require it.
148 */
149 .global pnv_fastsleep_workaround_at_entry
150 pnv_fastsleep_workaround_at_entry:
151 beq fastsleep_workaround_at_entry
152
153 stwcx. r15,0,r14
154 bne- lwarx_loop1
155 isync
156
157 common_enter: /* common code for all the threads entering sleep */
158 IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
159
160 fastsleep_workaround_at_entry:
161 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
162 stwcx. r15,0,r14
163 bne- lwarx_loop1
164 isync
165
166 /* Fast sleep workaround */
167 li r3,1
168 li r4,1
169 li r0,OPAL_CONFIG_CPU_IDLE_STATE
170 bl opal_call_realmode
171
172 /* Clear Lock bit */
173 li r0,0
174 lwsync
175 stw r0,0(r14)
176 b common_enter
177
178
179 _GLOBAL(power7_idle)
180 /* Now check if user or arch enabled NAP mode */
181 LOAD_REG_ADDRBASE(r3,powersave_nap)
182 lwz r4,ADDROFF(powersave_nap)(r3)
183 cmpwi 0,r4,0
184 beqlr
185 li r3, 1
186 /* fall through */
187
188 _GLOBAL(power7_nap)
189 mr r4,r3
190 li r3,PNV_THREAD_NAP
191 b power7_powersave_common
192 /* No return */
193
194 _GLOBAL(power7_sleep)
195 li r3,PNV_THREAD_SLEEP
196 li r4,1
197 b power7_powersave_common
198 /* No return */
199
200 #define CHECK_HMI_INTERRUPT \
201 mfspr r0,SPRN_SRR1; \
202 BEGIN_FTR_SECTION_NESTED(66); \
203 rlwinm r0,r0,45-31,0xf; /* extract wake reason field (P8) */ \
204 FTR_SECTION_ELSE_NESTED(66); \
205 rlwinm r0,r0,45-31,0xe; /* P7 wake reason field is 3 bits */ \
206 ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
207 cmpwi r0,0xa; /* Hypervisor maintenance ? */ \
208 bne 20f; \
209 /* Invoke opal call to handle hmi */ \
210 ld r2,PACATOC(r13); \
211 ld r1,PACAR1(r13); \
212 std r3,ORIG_GPR3(r1); /* Save original r3 */ \
213 li r0,OPAL_HANDLE_HMI; /* Pass opal token argument*/ \
214 bl opal_call_realmode; \
215 ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
216 20: nop;
217
218
219 _GLOBAL(power7_wakeup_tb_loss)
220 ld r2,PACATOC(r13);
221 ld r1,PACAR1(r13)
222 /*
223 * Before entering any idle state, the NVGPRs are saved in the stack
224 * and they are restored before switching to the process context. Hence
225 * until they are restored, they are free to be used.
226 *
227 * Save SRR1 in a NVGPR as it might be clobbered in opal_call_realmode
228 * (called in CHECK_HMI_INTERRUPT). SRR1 is required to determine the
229 * wakeup reason if we branch to kvm_start_guest.
230 */
231
232 mfspr r16,SPRN_SRR1
233 BEGIN_FTR_SECTION
234 CHECK_HMI_INTERRUPT
235 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
236
237 lbz r7,PACA_THREAD_MASK(r13)
238 ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
239 lwarx_loop2:
240 lwarx r15,0,r14
241 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
242 /*
243 * Lock bit is set in one of the 2 cases-
244 * a. In the sleep/winkle enter path, the last thread is executing
245 * fastsleep workaround code.
246 * b. In the wake up path, another thread is executing fastsleep
247 * workaround undo code or resyncing timebase or restoring context
248 * In either case loop until the lock bit is cleared.
249 */
250 bne core_idle_lock_held
251
252 cmpwi cr2,r15,0
253 or r15,r15,r7 /* Set thread bit */
254
255 beq cr2,first_thread
256
257 /* Not first thread in core to wake up */
258 stwcx. r15,0,r14
259 bne- lwarx_loop2
260 isync
261 b common_exit
262
263 core_idle_lock_held:
264 HMT_LOW
265 core_idle_lock_loop:
266 lwz r15,0(14)
267 andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
268 bne core_idle_lock_loop
269 HMT_MEDIUM
270 b lwarx_loop2
271
272 first_thread:
273 /* First thread in core to wakeup */
274 ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
275 stwcx. r15,0,r14
276 bne- lwarx_loop2
277 isync
278
279 /*
280 * First thread in the core waking up from fastsleep. It needs to
281 * call the fastsleep workaround code if the platform requires it.
282 * Call it unconditionally here. The below branch instruction will
283 * be patched out when the idle states are discovered if platform
284 * does not require workaround.
285 */
286 .global pnv_fastsleep_workaround_at_exit
287 pnv_fastsleep_workaround_at_exit:
288 b fastsleep_workaround_at_exit
289
290 timebase_resync:
291 /* Do timebase resync if we are waking up from sleep. Use cr3 value
292 * set in exceptions-64s.S */
293 ble cr3,clear_lock
294 /* Time base re-sync */
295 li r0,OPAL_RESYNC_TIMEBASE
296 bl opal_call_realmode;
297 /* TODO: Check r3 for failure */
298
299 clear_lock:
300 andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
301 lwsync
302 stw r15,0(r14)
303
304 common_exit:
305 li r5,PNV_THREAD_RUNNING
306 stb r5,PACA_THREAD_IDLE_STATE(r13)
307
308 mtspr SPRN_SRR1,r16
309 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
310 li r0,KVM_HWTHREAD_IN_KERNEL
311 stb r0,HSTATE_HWTHREAD_STATE(r13)
312 /* Order setting hwthread_state vs. testing hwthread_req */
313 sync
314 lbz r0,HSTATE_HWTHREAD_REQ(r13)
315 cmpwi r0,0
316 beq 6f
317 b kvm_start_guest
318 6:
319 #endif
320
321 REST_NVGPRS(r1)
322 REST_GPR(2, r1)
323 ld r3,_CCR(r1)
324 ld r4,_MSR(r1)
325 ld r5,_NIP(r1)
326 addi r1,r1,INT_FRAME_SIZE
327 mtcr r3
328 mfspr r3,SPRN_SRR1 /* Return SRR1 */
329 mtspr SPRN_SRR1,r4
330 mtspr SPRN_SRR0,r5
331 rfid
332
333 fastsleep_workaround_at_exit:
334 li r3,1
335 li r4,0
336 li r0,OPAL_CONFIG_CPU_IDLE_STATE
337 bl opal_call_realmode
338 b timebase_resync
339
340 /*
341 * R3 here contains the value that will be returned to the caller
342 * of power7_nap.
343 */
344 _GLOBAL(power7_wakeup_loss)
345 ld r1,PACAR1(r13)
346 BEGIN_FTR_SECTION
347 CHECK_HMI_INTERRUPT
348 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
349 REST_NVGPRS(r1)
350 REST_GPR(2, r1)
351 ld r6,_CCR(r1)
352 ld r4,_MSR(r1)
353 ld r5,_NIP(r1)
354 addi r1,r1,INT_FRAME_SIZE
355 mtcr r6
356 mtspr SPRN_SRR1,r4
357 mtspr SPRN_SRR0,r5
358 rfid
359
360 /*
361 * R3 here contains the value that will be returned to the caller
362 * of power7_nap.
363 */
364 _GLOBAL(power7_wakeup_noloss)
365 lbz r0,PACA_NAPSTATELOST(r13)
366 cmpwi r0,0
367 bne power7_wakeup_loss
368 BEGIN_FTR_SECTION
369 CHECK_HMI_INTERRUPT
370 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
371 ld r1,PACAR1(r13)
372 ld r4,_MSR(r1)
373 ld r5,_NIP(r1)
374 addi r1,r1,INT_FRAME_SIZE
375 mtspr SPRN_SRR1,r4
376 mtspr SPRN_SRR0,r5
377 rfid
This page took 0.04024 seconds and 5 git commands to generate.