pseries/cpuidle: Remove dependency of pseries.h file
[deliverable/linux.git] / arch / powerpc / platforms / pseries / processor_idle.c
CommitLineData
707827f3
DD
1/*
2 * processor_idle - idle state cpuidle driver.
3 * Adapted from drivers/idle/intel_idle.c and
4 * drivers/acpi/processor_idle.c
5 *
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/moduleparam.h>
12#include <linux/cpuidle.h>
13#include <linux/cpu.h>
16aaaff6 14#include <linux/notifier.h>
707827f3
DD
15
16#include <asm/paca.h>
17#include <asm/reg.h>
707827f3
DD
18#include <asm/machdep.h>
19#include <asm/firmware.h>
ae3a197e 20#include <asm/runlatch.h>
707827f3
DD
21
22#include "plpar_wrappers.h"
707827f3
DD
23
24struct cpuidle_driver pseries_idle_driver = {
1ca80944
DL
25 .name = "pseries_idle",
26 .owner = THIS_MODULE,
707827f3
DD
27};
28
29#define MAX_IDLE_STATE_COUNT 2
30
31static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
32static struct cpuidle_device __percpu *pseries_cpuidle_devices;
33static struct cpuidle_state *cpuidle_state_table;
34
1ca80944 35static inline void idle_loop_prolog(unsigned long *in_purr)
707827f3 36{
707827f3
DD
37 *in_purr = mfspr(SPRN_PURR);
38 /*
39 * Indicate to the HV that we are idle. Now would be
40 * a good time to find other work to dispatch.
41 */
42 get_lppaca()->idle = 1;
43}
44
1ca80944 45static inline void idle_loop_epilog(unsigned long in_purr)
707827f3 46{
7ffcf8ec
AB
47 u64 wait_cycles;
48
49 wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
50 wait_cycles += mfspr(SPRN_PURR) - in_purr;
51 get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
707827f3 52 get_lppaca()->idle = 0;
707827f3
DD
53}
54
55static int snooze_loop(struct cpuidle_device *dev,
56 struct cpuidle_driver *drv,
57 int index)
58{
59 unsigned long in_purr;
83dac594 60 int cpu = dev->cpu;
707827f3 61
1ca80944 62 idle_loop_prolog(&in_purr);
83dac594
DD
63 local_irq_enable();
64 set_thread_flag(TIF_POLLING_NRFLAG);
707827f3 65
83dac594
DD
66 while ((!need_resched()) && cpu_online(cpu)) {
67 ppc64_runlatch_off();
68 HMT_low();
69 HMT_very_low();
707827f3
DD
70 }
71
707827f3 72 HMT_medium();
83dac594
DD
73 clear_thread_flag(TIF_POLLING_NRFLAG);
74 smp_mb();
75
1ca80944
DL
76 idle_loop_epilog(in_purr);
77
707827f3
DD
78 return index;
79}
80
7230c564
BH
81static void check_and_cede_processor(void)
82{
83 /*
be2cf20a
BH
84 * Ensure our interrupt state is properly tracked,
85 * also checks if no interrupt has occurred while we
86 * were soft-disabled
7230c564 87 */
be2cf20a 88 if (prep_irq_for_idle()) {
7230c564 89 cede_processor();
be2cf20a
BH
90#ifdef CONFIG_TRACE_IRQFLAGS
91 /* Ensure that H_CEDE returns with IRQs on */
92 if (WARN_ON(!(mfmsr() & MSR_EE)))
93 __hard_irq_enable();
94#endif
95 }
7230c564
BH
96}
97
707827f3
DD
98static int dedicated_cede_loop(struct cpuidle_device *dev,
99 struct cpuidle_driver *drv,
100 int index)
101{
102 unsigned long in_purr;
707827f3 103
1ca80944 104 idle_loop_prolog(&in_purr);
707827f3
DD
105 get_lppaca()->donate_dedicated_cpu = 1;
106
107 ppc64_runlatch_off();
108 HMT_medium();
7230c564 109 check_and_cede_processor();
707827f3
DD
110
111 get_lppaca()->donate_dedicated_cpu = 0;
1ca80944
DL
112
113 idle_loop_epilog(in_purr);
114
707827f3
DD
115 return index;
116}
117
118static int shared_cede_loop(struct cpuidle_device *dev,
119 struct cpuidle_driver *drv,
120 int index)
121{
122 unsigned long in_purr;
707827f3 123
1ca80944 124 idle_loop_prolog(&in_purr);
707827f3
DD
125
126 /*
127 * Yield the processor to the hypervisor. We return if
128 * an external interrupt occurs (which are driven prior
129 * to returning here) or if a prod occurs from another
130 * processor. When returning here, external interrupts
131 * are enabled.
132 */
7230c564 133 check_and_cede_processor();
707827f3 134
1ca80944
DL
135 idle_loop_epilog(in_purr);
136
707827f3
DD
137 return index;
138}
139
140/*
141 * States for dedicated partition case.
142 */
143static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
144 { /* Snooze */
145 .name = "snooze",
146 .desc = "snooze",
147 .flags = CPUIDLE_FLAG_TIME_VALID,
148 .exit_latency = 0,
149 .target_residency = 0,
150 .enter = &snooze_loop },
151 { /* CEDE */
152 .name = "CEDE",
153 .desc = "CEDE",
154 .flags = CPUIDLE_FLAG_TIME_VALID,
83dac594
DD
155 .exit_latency = 10,
156 .target_residency = 100,
707827f3
DD
157 .enter = &dedicated_cede_loop },
158};
159
160/*
161 * States for shared partition case.
162 */
163static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
164 { /* Shared Cede */
165 .name = "Shared Cede",
166 .desc = "Shared Cede",
167 .flags = CPUIDLE_FLAG_TIME_VALID,
168 .exit_latency = 0,
169 .target_residency = 0,
170 .enter = &shared_cede_loop },
171};
172
8ea959a1
DD
173void update_smt_snooze_delay(int cpu, int residency)
174{
175 struct cpuidle_driver *drv = cpuidle_get_driver();
176 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
177
178 if (cpuidle_state_table != dedicated_states)
179 return;
180
181 if (residency < 0) {
182 /* Disable the Nap state on that cpu */
183 if (dev)
184 dev->states_usage[1].disable = 1;
185 } else
186 if (drv)
83dac594 187 drv->states[1].target_residency = residency;
8ea959a1
DD
188}
189
16aaaff6
DD
190static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
191 unsigned long action, void *hcpu)
707827f3 192{
16aaaff6 193 int hotcpu = (unsigned long)hcpu;
707827f3 194 struct cpuidle_device *dev =
16aaaff6
DD
195 per_cpu_ptr(pseries_cpuidle_devices, hotcpu);
196
852d8cb1
DD
197 if (dev && cpuidle_get_driver()) {
198 switch (action) {
199 case CPU_ONLINE:
200 case CPU_ONLINE_FROZEN:
201 cpuidle_pause_and_lock();
16aaaff6 202 cpuidle_enable_device(dev);
852d8cb1
DD
203 cpuidle_resume_and_unlock();
204 break;
205
206 case CPU_DEAD:
207 case CPU_DEAD_FROZEN:
208 cpuidle_pause_and_lock();
209 cpuidle_disable_device(dev);
210 cpuidle_resume_and_unlock();
211 break;
212
213 default:
214 return NOTIFY_DONE;
16aaaff6 215 }
707827f3 216 }
16aaaff6 217 return NOTIFY_OK;
707827f3
DD
218}
219
16aaaff6
DD
220static struct notifier_block setup_hotplug_notifier = {
221 .notifier_call = pseries_cpuidle_add_cpu_notifier,
222};
223
707827f3
DD
224/*
225 * pseries_cpuidle_driver_init()
226 */
227static int pseries_cpuidle_driver_init(void)
228{
229 int idle_state;
230 struct cpuidle_driver *drv = &pseries_idle_driver;
231
232 drv->state_count = 0;
233
234 for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {
235
236 if (idle_state > max_idle_state)
237 break;
238
239 /* is the state not enabled? */
240 if (cpuidle_state_table[idle_state].enter == NULL)
241 continue;
242
243 drv->states[drv->state_count] = /* structure copy */
244 cpuidle_state_table[idle_state];
245
707827f3
DD
246 drv->state_count += 1;
247 }
248
249 return 0;
250}
251
252/* pseries_idle_devices_uninit(void)
253 * unregister cpuidle devices and de-allocate memory
254 */
255static void pseries_idle_devices_uninit(void)
256{
257 int i;
258 struct cpuidle_device *dev;
259
260 for_each_possible_cpu(i) {
261 dev = per_cpu_ptr(pseries_cpuidle_devices, i);
262 cpuidle_unregister_device(dev);
263 }
264
265 free_percpu(pseries_cpuidle_devices);
266 return;
267}
268
269/* pseries_idle_devices_init()
270 * allocate, initialize and register cpuidle device
271 */
272static int pseries_idle_devices_init(void)
273{
274 int i;
275 struct cpuidle_driver *drv = &pseries_idle_driver;
276 struct cpuidle_device *dev;
277
278 pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device);
279 if (pseries_cpuidle_devices == NULL)
280 return -ENOMEM;
281
282 for_each_possible_cpu(i) {
283 dev = per_cpu_ptr(pseries_cpuidle_devices, i);
284 dev->state_count = drv->state_count;
285 dev->cpu = i;
286 if (cpuidle_register_device(dev)) {
287 printk(KERN_DEBUG \
288 "cpuidle_register_device %d failed!\n", i);
289 return -EIO;
290 }
291 }
292
293 return 0;
294}
295
296/*
297 * pseries_idle_probe()
298 * Choose state table for shared versus dedicated partition
299 */
300static int pseries_idle_probe(void)
301{
302
303 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
304 return -ENODEV;
305
e8bb3e00
DD
306 if (cpuidle_disable != IDLE_NO_OVERRIDE)
307 return -ENODEV;
308
707827f3
DD
309 if (max_idle_state == 0) {
310 printk(KERN_DEBUG "pseries processor idle disabled.\n");
311 return -EPERM;
312 }
313
f13c13a0 314 if (lppaca_shared_proc(get_lppaca()))
707827f3
DD
315 cpuidle_state_table = shared_states;
316 else
317 cpuidle_state_table = dedicated_states;
318
319 return 0;
320}
321
322static int __init pseries_processor_idle_init(void)
323{
324 int retval;
325
326 retval = pseries_idle_probe();
327 if (retval)
328 return retval;
329
330 pseries_cpuidle_driver_init();
331 retval = cpuidle_register_driver(&pseries_idle_driver);
332 if (retval) {
333 printk(KERN_DEBUG "Registration of pseries driver failed.\n");
334 return retval;
335 }
336
337 retval = pseries_idle_devices_init();
338 if (retval) {
339 pseries_idle_devices_uninit();
340 cpuidle_unregister_driver(&pseries_idle_driver);
341 return retval;
342 }
343
16aaaff6 344 register_cpu_notifier(&setup_hotplug_notifier);
707827f3
DD
345 printk(KERN_DEBUG "pseries_idle_driver registered\n");
346
347 return 0;
348}
349
350static void __exit pseries_processor_idle_exit(void)
351{
352
852d8cb1 353 unregister_cpu_notifier(&setup_hotplug_notifier);
707827f3
DD
354 pseries_idle_devices_uninit();
355 cpuidle_unregister_driver(&pseries_idle_driver);
356
357 return;
358}
359
360module_init(pseries_processor_idle_init);
361module_exit(pseries_processor_idle_exit);
362
363MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>");
364MODULE_DESCRIPTION("Cpuidle driver for POWER");
365MODULE_LICENSE("GPL");
This page took 0.12908 seconds and 5 git commands to generate.