Commit | Line | Data |
---|---|---|
948cf67c | 1 | /* |
7230c564 | 2 | * This file contains the power_save function for Power7 CPUs. |
948cf67c BH |
3 | * |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | */ | |
9 | ||
10 | #include <linux/threads.h> | |
11 | #include <asm/processor.h> | |
12 | #include <asm/page.h> | |
13 | #include <asm/cputable.h> | |
14 | #include <asm/thread_info.h> | |
15 | #include <asm/ppc_asm.h> | |
16 | #include <asm/asm-offsets.h> | |
17 | #include <asm/ppc-opcode.h> | |
7230c564 | 18 | #include <asm/hw_irq.h> |
f0888f70 | 19 | #include <asm/kvm_book3s_asm.h> |
97eb001f | 20 | #include <asm/opal.h> |
948cf67c BH |
21 | |
22 | #undef DEBUG | |
23 | ||
aca79d2b | 24 | /* Idle state entry routines */ |
948cf67c | 25 | |
aca79d2b VS |
26 | #define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ |
27 | /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ | |
28 | std r0,0(r1); \ | |
29 | ptesync; \ | |
30 | ld r0,0(r1); \ | |
31 | 1: cmp cr0,r0,r0; \ | |
32 | bne 1b; \ | |
33 | IDLE_INST; \ | |
34 | b . | |
948cf67c | 35 | |
aca79d2b VS |
36 | .text |
37 | ||
38 | /* | |
39 | * Pass requested state in r3: | |
40 | * 0 - nap | |
41 | * 1 - sleep | |
8d6f7c5a ME |
42 | * |
43 | * To check IRQ_HAPPENED in r4 | |
44 | * 0 - don't check | |
45 | * 1 - check | |
aca79d2b VS |
46 | */ |
47 | _GLOBAL(power7_powersave_common) | |
48 | /* Use r3 to pass state nap/sleep/winkle */ | |
948cf67c BH |
49 | /* NAP is a state loss, we create a regs frame on the |
50 | * stack, fill it up with the state we care about and | |
51 | * stick a pointer to it in PACAR1. We really only | |
52 | * need to save PC, some CR bits and the NV GPRs, | |
53 | * but for now an interrupt frame will do. | |
54 | */ | |
55 | mflr r0 | |
56 | std r0,16(r1) | |
57 | stdu r1,-INT_FRAME_SIZE(r1) | |
58 | std r0,_LINK(r1) | |
59 | std r0,_NIP(r1) | |
60 | ||
61 | #ifndef CONFIG_SMP | |
62 | /* Make sure FPU, VSX etc... are flushed as we may lose | |
63 | * state when going to nap mode | |
64 | */ | |
b1576fec | 65 | bl discard_lazy_cpu_state |
948cf67c BH |
66 | #endif /* CONFIG_SMP */ |
67 | ||
68 | /* Hard disable interrupts */ | |
69 | mfmsr r9 | |
70 | rldicl r9,r9,48,1 | |
71 | rotldi r9,r9,16 | |
72 | mtmsrd r9,1 /* hard-disable interrupts */ | |
7230c564 BH |
73 | |
74 | /* Check if something happened while soft-disabled */ | |
75 | lbz r0,PACAIRQHAPPENED(r13) | |
76 | cmpwi cr0,r0,0 | |
77 | beq 1f | |
8d6f7c5a ME |
78 | cmpwi cr0,r4,0 |
79 | beq 1f | |
7230c564 BH |
80 | addi r1,r1,INT_FRAME_SIZE |
81 | ld r0,16(r1) | |
82 | mtlr r0 | |
83 | blr | |
84 | ||
85 | 1: /* We mark irqs hard disabled as this is the state we'll | |
86 | * be in when returning and we need to tell arch_local_irq_restore() | |
87 | * about it | |
88 | */ | |
89 | li r0,PACA_IRQ_HARD_DIS | |
90 | stb r0,PACAIRQHAPPENED(r13) | |
91 | ||
92 | /* We haven't lost state ... yet */ | |
948cf67c | 93 | li r0,0 |
2fde6d20 | 94 | stb r0,PACA_NAPSTATELOST(r13) |
948cf67c BH |
95 | |
96 | /* Continue saving state */ | |
97 | SAVE_GPR(2, r1) | |
98 | SAVE_NVGPRS(r1) | |
aca79d2b VS |
99 | mfcr r4 |
100 | std r4,_CCR(r1) | |
948cf67c BH |
101 | std r9,_MSR(r1) |
102 | std r1,PACAR1(r13) | |
103 | ||
1c51089f | 104 | _GLOBAL(power7_enter_nap_mode) |
9975f5e3 | 105 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
f0888f70 PM |
106 | /* Tell KVM we're napping */ |
107 | li r4,KVM_HWTHREAD_IN_NAP | |
108 | stb r4,HSTATE_HWTHREAD_STATE(r13) | |
109 | #endif | |
aca79d2b VS |
110 | cmpwi cr0,r3,1 |
111 | beq 2f | |
112 | IDLE_STATE_ENTER_SEQ(PPC_NAP) | |
113 | /* No return */ | |
114 | 2: IDLE_STATE_ENTER_SEQ(PPC_SLEEP) | |
115 | /* No return */ | |
f0888f70 | 116 | |
aca79d2b VS |
117 | _GLOBAL(power7_idle) |
118 | /* Now check if user or arch enabled NAP mode */ | |
119 | LOAD_REG_ADDRBASE(r3,powersave_nap) | |
120 | lwz r4,ADDROFF(powersave_nap)(r3) | |
121 | cmpwi 0,r4,0 | |
122 | beqlr | |
8d6f7c5a | 123 | li r3, 1 |
aca79d2b VS |
124 | /* fall through */ |
125 | ||
126 | _GLOBAL(power7_nap) | |
8d6f7c5a | 127 | mr r4,r3 |
aca79d2b VS |
128 | li r3,0 |
129 | b power7_powersave_common | |
130 | /* No return */ | |
131 | ||
132 | _GLOBAL(power7_sleep) | |
133 | li r3,1 | |
8d6f7c5a | 134 | li r4,0 |
aca79d2b VS |
135 | b power7_powersave_common |
136 | /* No return */ | |
948cf67c | 137 | |
97eb001f VS |
138 | _GLOBAL(power7_wakeup_tb_loss) |
139 | ld r2,PACATOC(r13); | |
140 | ld r1,PACAR1(r13) | |
141 | ||
142 | /* Time base re-sync */ | |
143 | li r0,OPAL_RESYNC_TIMEBASE | |
144 | LOAD_REG_ADDR(r11,opal); | |
145 | ld r12,8(r11); | |
146 | ld r2,0(r11); | |
147 | mtctr r12 | |
148 | bctrl | |
149 | ||
150 | /* TODO: Check r3 for failure */ | |
151 | ||
152 | REST_NVGPRS(r1) | |
153 | REST_GPR(2, r1) | |
154 | ld r3,_CCR(r1) | |
155 | ld r4,_MSR(r1) | |
156 | ld r5,_NIP(r1) | |
157 | addi r1,r1,INT_FRAME_SIZE | |
158 | mtcr r3 | |
159 | mfspr r3,SPRN_SRR1 /* Return SRR1 */ | |
160 | mtspr SPRN_SRR1,r4 | |
161 | mtspr SPRN_SRR0,r5 | |
162 | rfid | |
163 | ||
948cf67c | 164 | _GLOBAL(power7_wakeup_loss) |
948cf67c BH |
165 | ld r1,PACAR1(r13) |
166 | REST_NVGPRS(r1) | |
167 | REST_GPR(2, r1) | |
168 | ld r3,_CCR(r1) | |
169 | ld r4,_MSR(r1) | |
170 | ld r5,_NIP(r1) | |
171 | addi r1,r1,INT_FRAME_SIZE | |
172 | mtcr r3 | |
173 | mtspr SPRN_SRR1,r4 | |
174 | mtspr SPRN_SRR0,r5 | |
175 | rfid | |
176 | ||
177 | _GLOBAL(power7_wakeup_noloss) | |
2fde6d20 PM |
178 | lbz r0,PACA_NAPSTATELOST(r13) |
179 | cmpwi r0,0 | |
b1576fec | 180 | bne power7_wakeup_loss |
948cf67c BH |
181 | ld r1,PACAR1(r13) |
182 | ld r4,_MSR(r1) | |
183 | ld r5,_NIP(r1) | |
184 | addi r1,r1,INT_FRAME_SIZE | |
185 | mtspr SPRN_SRR1,r4 | |
186 | mtspr SPRN_SRR0,r5 | |
187 | rfid |