ARM: omap3: cpuidle: enable time keeping
[deliverable/linux.git] / arch / powerpc / platforms / pseries / processor_idle.c
CommitLineData
707827f3
DD
1/*
2 * processor_idle - idle state cpuidle driver.
3 * Adapted from drivers/idle/intel_idle.c and
4 * drivers/acpi/processor_idle.c
5 *
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/moduleparam.h>
12#include <linux/cpuidle.h>
13#include <linux/cpu.h>
16aaaff6 14#include <linux/notifier.h>
707827f3
DD
15
16#include <asm/paca.h>
17#include <asm/reg.h>
707827f3
DD
18#include <asm/machdep.h>
19#include <asm/firmware.h>
ae3a197e 20#include <asm/runlatch.h>
707827f3
DD
21
22#include "plpar_wrappers.h"
23#include "pseries.h"
24
25struct cpuidle_driver pseries_idle_driver = {
26 .name = "pseries_idle",
27 .owner = THIS_MODULE,
28};
29
30#define MAX_IDLE_STATE_COUNT 2
31
32static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
33static struct cpuidle_device __percpu *pseries_cpuidle_devices;
34static struct cpuidle_state *cpuidle_state_table;
35
707827f3
DD
36static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
37{
38
a474a515 39 *kt_before = ktime_get();
707827f3
DD
40 *in_purr = mfspr(SPRN_PURR);
41 /*
42 * Indicate to the HV that we are idle. Now would be
43 * a good time to find other work to dispatch.
44 */
45 get_lppaca()->idle = 1;
46}
47
48static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before)
49{
50 get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
51 get_lppaca()->idle = 0;
52
a474a515 53 return ktime_to_us(ktime_sub(ktime_get(), kt_before));
707827f3
DD
54}
55
56static int snooze_loop(struct cpuidle_device *dev,
57 struct cpuidle_driver *drv,
58 int index)
59{
60 unsigned long in_purr;
61 ktime_t kt_before;
83dac594 62 int cpu = dev->cpu;
707827f3
DD
63
64 idle_loop_prolog(&in_purr, &kt_before);
83dac594
DD
65 local_irq_enable();
66 set_thread_flag(TIF_POLLING_NRFLAG);
707827f3 67
83dac594
DD
68 while ((!need_resched()) && cpu_online(cpu)) {
69 ppc64_runlatch_off();
70 HMT_low();
71 HMT_very_low();
707827f3
DD
72 }
73
707827f3 74 HMT_medium();
83dac594
DD
75 clear_thread_flag(TIF_POLLING_NRFLAG);
76 smp_mb();
77
707827f3
DD
78 dev->last_residency =
79 (int)idle_loop_epilog(in_purr, kt_before);
80 return index;
81}
82
7230c564
BH
83static void check_and_cede_processor(void)
84{
85 /*
be2cf20a
BH
86 * Ensure our interrupt state is properly tracked,
87 * also checks if no interrupt has occurred while we
88 * were soft-disabled
7230c564 89 */
be2cf20a 90 if (prep_irq_for_idle()) {
7230c564 91 cede_processor();
be2cf20a
BH
92#ifdef CONFIG_TRACE_IRQFLAGS
93 /* Ensure that H_CEDE returns with IRQs on */
94 if (WARN_ON(!(mfmsr() & MSR_EE)))
95 __hard_irq_enable();
96#endif
97 }
7230c564
BH
98}
99
707827f3
DD
100static int dedicated_cede_loop(struct cpuidle_device *dev,
101 struct cpuidle_driver *drv,
102 int index)
103{
104 unsigned long in_purr;
105 ktime_t kt_before;
106
107 idle_loop_prolog(&in_purr, &kt_before);
108 get_lppaca()->donate_dedicated_cpu = 1;
109
110 ppc64_runlatch_off();
111 HMT_medium();
7230c564 112 check_and_cede_processor();
707827f3
DD
113
114 get_lppaca()->donate_dedicated_cpu = 0;
115 dev->last_residency =
116 (int)idle_loop_epilog(in_purr, kt_before);
117 return index;
118}
119
120static int shared_cede_loop(struct cpuidle_device *dev,
121 struct cpuidle_driver *drv,
122 int index)
123{
124 unsigned long in_purr;
125 ktime_t kt_before;
126
127 idle_loop_prolog(&in_purr, &kt_before);
128
129 /*
130 * Yield the processor to the hypervisor. We return if
131 * an external interrupt occurs (which are driven prior
132 * to returning here) or if a prod occurs from another
133 * processor. When returning here, external interrupts
134 * are enabled.
135 */
7230c564 136 check_and_cede_processor();
707827f3
DD
137
138 dev->last_residency =
139 (int)idle_loop_epilog(in_purr, kt_before);
140 return index;
141}
142
143/*
144 * States for dedicated partition case.
145 */
146static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
147 { /* Snooze */
148 .name = "snooze",
149 .desc = "snooze",
150 .flags = CPUIDLE_FLAG_TIME_VALID,
151 .exit_latency = 0,
152 .target_residency = 0,
153 .enter = &snooze_loop },
154 { /* CEDE */
155 .name = "CEDE",
156 .desc = "CEDE",
157 .flags = CPUIDLE_FLAG_TIME_VALID,
83dac594
DD
158 .exit_latency = 10,
159 .target_residency = 100,
707827f3
DD
160 .enter = &dedicated_cede_loop },
161};
162
163/*
164 * States for shared partition case.
165 */
166static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
167 { /* Shared Cede */
168 .name = "Shared Cede",
169 .desc = "Shared Cede",
170 .flags = CPUIDLE_FLAG_TIME_VALID,
171 .exit_latency = 0,
172 .target_residency = 0,
173 .enter = &shared_cede_loop },
174};
175
8ea959a1
DD
176void update_smt_snooze_delay(int cpu, int residency)
177{
178 struct cpuidle_driver *drv = cpuidle_get_driver();
179 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
180
181 if (cpuidle_state_table != dedicated_states)
182 return;
183
184 if (residency < 0) {
185 /* Disable the Nap state on that cpu */
186 if (dev)
187 dev->states_usage[1].disable = 1;
188 } else
189 if (drv)
83dac594 190 drv->states[1].target_residency = residency;
8ea959a1
DD
191}
192
16aaaff6
DD
193static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
194 unsigned long action, void *hcpu)
707827f3 195{
16aaaff6 196 int hotcpu = (unsigned long)hcpu;
707827f3 197 struct cpuidle_device *dev =
16aaaff6
DD
198 per_cpu_ptr(pseries_cpuidle_devices, hotcpu);
199
852d8cb1
DD
200 if (dev && cpuidle_get_driver()) {
201 switch (action) {
202 case CPU_ONLINE:
203 case CPU_ONLINE_FROZEN:
204 cpuidle_pause_and_lock();
16aaaff6 205 cpuidle_enable_device(dev);
852d8cb1
DD
206 cpuidle_resume_and_unlock();
207 break;
208
209 case CPU_DEAD:
210 case CPU_DEAD_FROZEN:
211 cpuidle_pause_and_lock();
212 cpuidle_disable_device(dev);
213 cpuidle_resume_and_unlock();
214 break;
215
216 default:
217 return NOTIFY_DONE;
16aaaff6 218 }
707827f3 219 }
16aaaff6 220 return NOTIFY_OK;
707827f3
DD
221}
222
16aaaff6
DD
223static struct notifier_block setup_hotplug_notifier = {
224 .notifier_call = pseries_cpuidle_add_cpu_notifier,
225};
226
707827f3
DD
227/*
228 * pseries_cpuidle_driver_init()
229 */
230static int pseries_cpuidle_driver_init(void)
231{
232 int idle_state;
233 struct cpuidle_driver *drv = &pseries_idle_driver;
234
235 drv->state_count = 0;
236
237 for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {
238
239 if (idle_state > max_idle_state)
240 break;
241
242 /* is the state not enabled? */
243 if (cpuidle_state_table[idle_state].enter == NULL)
244 continue;
245
246 drv->states[drv->state_count] = /* structure copy */
247 cpuidle_state_table[idle_state];
248
707827f3
DD
249 drv->state_count += 1;
250 }
251
252 return 0;
253}
254
255/* pseries_idle_devices_uninit(void)
256 * unregister cpuidle devices and de-allocate memory
257 */
258static void pseries_idle_devices_uninit(void)
259{
260 int i;
261 struct cpuidle_device *dev;
262
263 for_each_possible_cpu(i) {
264 dev = per_cpu_ptr(pseries_cpuidle_devices, i);
265 cpuidle_unregister_device(dev);
266 }
267
268 free_percpu(pseries_cpuidle_devices);
269 return;
270}
271
272/* pseries_idle_devices_init()
273 * allocate, initialize and register cpuidle device
274 */
275static int pseries_idle_devices_init(void)
276{
277 int i;
278 struct cpuidle_driver *drv = &pseries_idle_driver;
279 struct cpuidle_device *dev;
280
281 pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device);
282 if (pseries_cpuidle_devices == NULL)
283 return -ENOMEM;
284
285 for_each_possible_cpu(i) {
286 dev = per_cpu_ptr(pseries_cpuidle_devices, i);
287 dev->state_count = drv->state_count;
288 dev->cpu = i;
289 if (cpuidle_register_device(dev)) {
290 printk(KERN_DEBUG \
291 "cpuidle_register_device %d failed!\n", i);
292 return -EIO;
293 }
294 }
295
296 return 0;
297}
298
299/*
300 * pseries_idle_probe()
301 * Choose state table for shared versus dedicated partition
302 */
303static int pseries_idle_probe(void)
304{
305
306 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
307 return -ENODEV;
308
e8bb3e00
DD
309 if (cpuidle_disable != IDLE_NO_OVERRIDE)
310 return -ENODEV;
311
707827f3
DD
312 if (max_idle_state == 0) {
313 printk(KERN_DEBUG "pseries processor idle disabled.\n");
314 return -EPERM;
315 }
316
317 if (get_lppaca()->shared_proc)
318 cpuidle_state_table = shared_states;
319 else
320 cpuidle_state_table = dedicated_states;
321
322 return 0;
323}
324
325static int __init pseries_processor_idle_init(void)
326{
327 int retval;
328
329 retval = pseries_idle_probe();
330 if (retval)
331 return retval;
332
333 pseries_cpuidle_driver_init();
334 retval = cpuidle_register_driver(&pseries_idle_driver);
335 if (retval) {
336 printk(KERN_DEBUG "Registration of pseries driver failed.\n");
337 return retval;
338 }
339
340 retval = pseries_idle_devices_init();
341 if (retval) {
342 pseries_idle_devices_uninit();
343 cpuidle_unregister_driver(&pseries_idle_driver);
344 return retval;
345 }
346
16aaaff6 347 register_cpu_notifier(&setup_hotplug_notifier);
707827f3
DD
348 printk(KERN_DEBUG "pseries_idle_driver registered\n");
349
350 return 0;
351}
352
353static void __exit pseries_processor_idle_exit(void)
354{
355
852d8cb1 356 unregister_cpu_notifier(&setup_hotplug_notifier);
707827f3
DD
357 pseries_idle_devices_uninit();
358 cpuidle_unregister_driver(&pseries_idle_driver);
359
360 return;
361}
362
363module_init(pseries_processor_idle_init);
364module_exit(pseries_processor_idle_exit);
365
366MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>");
367MODULE_DESCRIPTION("Cpuidle driver for POWER");
368MODULE_LICENSE("GPL");
This page took 0.097014 seconds and 5 git commands to generate.