powerpc/pseries/cpuidle: Move processor_idle.c to drivers/cpuidle.
[deliverable/linux.git] / drivers / cpuidle / cpuidle-pseries.c
CommitLineData
707827f3 1/*
962e7bd4 2 * cpuidle-pseries - idle state cpuidle driver.
707827f3
DD
3 * Adapted from drivers/idle/intel_idle.c and
4 * drivers/acpi/processor_idle.c
5 *
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/moduleparam.h>
12#include <linux/cpuidle.h>
13#include <linux/cpu.h>
16aaaff6 14#include <linux/notifier.h>
707827f3
DD
15
16#include <asm/paca.h>
17#include <asm/reg.h>
707827f3
DD
18#include <asm/machdep.h>
19#include <asm/firmware.h>
212bebb4 20#include <asm/plpar_wrappers.h>
707827f3
DD
21
22struct cpuidle_driver pseries_idle_driver = {
1ca80944
DL
23 .name = "pseries_idle",
24 .owner = THIS_MODULE,
707827f3
DD
25};
26
27#define MAX_IDLE_STATE_COUNT 2
28
29static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
962e7bd4 30static struct cpuidle_device __percpu *pseries_cpuidle_devices;
707827f3
DD
31static struct cpuidle_state *cpuidle_state_table;
32
1ca80944 33static inline void idle_loop_prolog(unsigned long *in_purr)
707827f3 34{
707827f3
DD
35 *in_purr = mfspr(SPRN_PURR);
36 /*
37 * Indicate to the HV that we are idle. Now would be
38 * a good time to find other work to dispatch.
39 */
40 get_lppaca()->idle = 1;
41}
42
1ca80944 43static inline void idle_loop_epilog(unsigned long in_purr)
707827f3 44{
7ffcf8ec
AB
45 u64 wait_cycles;
46
47 wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
48 wait_cycles += mfspr(SPRN_PURR) - in_purr;
49 get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
707827f3 50 get_lppaca()->idle = 0;
707827f3
DD
51}
52
53static int snooze_loop(struct cpuidle_device *dev,
54 struct cpuidle_driver *drv,
55 int index)
56{
57 unsigned long in_purr;
83dac594 58 int cpu = dev->cpu;
707827f3 59
1ca80944 60 idle_loop_prolog(&in_purr);
83dac594
DD
61 local_irq_enable();
62 set_thread_flag(TIF_POLLING_NRFLAG);
707827f3 63
83dac594 64 while ((!need_resched()) && cpu_online(cpu)) {
83dac594
DD
65 HMT_low();
66 HMT_very_low();
707827f3
DD
67 }
68
707827f3 69 HMT_medium();
83dac594
DD
70 clear_thread_flag(TIF_POLLING_NRFLAG);
71 smp_mb();
72
1ca80944
DL
73 idle_loop_epilog(in_purr);
74
707827f3
DD
75 return index;
76}
77
7230c564
BH
78static void check_and_cede_processor(void)
79{
80 /*
be2cf20a
BH
81 * Ensure our interrupt state is properly tracked,
82 * also checks if no interrupt has occurred while we
83 * were soft-disabled
7230c564 84 */
be2cf20a 85 if (prep_irq_for_idle()) {
7230c564 86 cede_processor();
be2cf20a
BH
87#ifdef CONFIG_TRACE_IRQFLAGS
88 /* Ensure that H_CEDE returns with IRQs on */
89 if (WARN_ON(!(mfmsr() & MSR_EE)))
90 __hard_irq_enable();
91#endif
92 }
7230c564
BH
93}
94
707827f3
DD
95static int dedicated_cede_loop(struct cpuidle_device *dev,
96 struct cpuidle_driver *drv,
97 int index)
98{
99 unsigned long in_purr;
707827f3 100
1ca80944 101 idle_loop_prolog(&in_purr);
707827f3
DD
102 get_lppaca()->donate_dedicated_cpu = 1;
103
707827f3 104 HMT_medium();
7230c564 105 check_and_cede_processor();
707827f3
DD
106
107 get_lppaca()->donate_dedicated_cpu = 0;
1ca80944
DL
108
109 idle_loop_epilog(in_purr);
110
707827f3
DD
111 return index;
112}
113
114static int shared_cede_loop(struct cpuidle_device *dev,
115 struct cpuidle_driver *drv,
116 int index)
117{
118 unsigned long in_purr;
707827f3 119
1ca80944 120 idle_loop_prolog(&in_purr);
707827f3
DD
121
122 /*
123 * Yield the processor to the hypervisor. We return if
124 * an external interrupt occurs (which are driven prior
125 * to returning here) or if a prod occurs from another
126 * processor. When returning here, external interrupts
127 * are enabled.
128 */
7230c564 129 check_and_cede_processor();
707827f3 130
1ca80944
DL
131 idle_loop_epilog(in_purr);
132
707827f3
DD
133 return index;
134}
135
136/*
137 * States for dedicated partition case.
138 */
139static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
140 { /* Snooze */
141 .name = "snooze",
142 .desc = "snooze",
143 .flags = CPUIDLE_FLAG_TIME_VALID,
144 .exit_latency = 0,
145 .target_residency = 0,
146 .enter = &snooze_loop },
147 { /* CEDE */
148 .name = "CEDE",
149 .desc = "CEDE",
150 .flags = CPUIDLE_FLAG_TIME_VALID,
83dac594
DD
151 .exit_latency = 10,
152 .target_residency = 100,
707827f3
DD
153 .enter = &dedicated_cede_loop },
154};
155
156/*
157 * States for shared partition case.
158 */
159static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
160 { /* Shared Cede */
161 .name = "Shared Cede",
162 .desc = "Shared Cede",
163 .flags = CPUIDLE_FLAG_TIME_VALID,
164 .exit_latency = 0,
165 .target_residency = 0,
166 .enter = &shared_cede_loop },
167};
168
8ea959a1
DD
169void update_smt_snooze_delay(int cpu, int residency)
170{
171 struct cpuidle_driver *drv = cpuidle_get_driver();
172 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);
173
174 if (cpuidle_state_table != dedicated_states)
175 return;
176
177 if (residency < 0) {
178 /* Disable the Nap state on that cpu */
179 if (dev)
180 dev->states_usage[1].disable = 1;
181 } else
182 if (drv)
83dac594 183 drv->states[1].target_residency = residency;
8ea959a1
DD
184}
185
16aaaff6
DD
186static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
187 unsigned long action, void *hcpu)
707827f3 188{
16aaaff6 189 int hotcpu = (unsigned long)hcpu;
707827f3 190 struct cpuidle_device *dev =
962e7bd4 191 per_cpu_ptr(pseries_cpuidle_devices, hotcpu);
16aaaff6 192
852d8cb1
DD
193 if (dev && cpuidle_get_driver()) {
194 switch (action) {
195 case CPU_ONLINE:
196 case CPU_ONLINE_FROZEN:
197 cpuidle_pause_and_lock();
16aaaff6 198 cpuidle_enable_device(dev);
852d8cb1
DD
199 cpuidle_resume_and_unlock();
200 break;
201
202 case CPU_DEAD:
203 case CPU_DEAD_FROZEN:
204 cpuidle_pause_and_lock();
205 cpuidle_disable_device(dev);
206 cpuidle_resume_and_unlock();
207 break;
208
209 default:
210 return NOTIFY_DONE;
16aaaff6 211 }
707827f3 212 }
16aaaff6 213 return NOTIFY_OK;
707827f3
DD
214}
215
16aaaff6
DD
216static struct notifier_block setup_hotplug_notifier = {
217 .notifier_call = pseries_cpuidle_add_cpu_notifier,
218};
219
707827f3
DD
220/*
221 * pseries_cpuidle_driver_init()
222 */
223static int pseries_cpuidle_driver_init(void)
224{
225 int idle_state;
226 struct cpuidle_driver *drv = &pseries_idle_driver;
227
228 drv->state_count = 0;
229
230 for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {
231
232 if (idle_state > max_idle_state)
233 break;
234
235 /* is the state not enabled? */
236 if (cpuidle_state_table[idle_state].enter == NULL)
237 continue;
238
239 drv->states[drv->state_count] = /* structure copy */
240 cpuidle_state_table[idle_state];
241
707827f3
DD
242 drv->state_count += 1;
243 }
244
245 return 0;
246}
247
962e7bd4
DD
248/* pseries_idle_devices_uninit(void)
249 * unregister cpuidle devices and de-allocate memory
250 */
251static void pseries_idle_devices_uninit(void)
252{
253 int i;
254 struct cpuidle_device *dev;
255
256 for_each_possible_cpu(i) {
257 dev = per_cpu_ptr(pseries_cpuidle_devices, i);
258 cpuidle_unregister_device(dev);
259 }
260
261 free_percpu(pseries_cpuidle_devices);
262 return;
263}
264
265/* pseries_idle_devices_init()
266 * allocate, initialize and register cpuidle device
267 */
268static int pseries_idle_devices_init(void)
269{
270 int i;
271 struct cpuidle_driver *drv = &pseries_idle_driver;
272 struct cpuidle_device *dev;
273
274 pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device);
275 if (pseries_cpuidle_devices == NULL)
276 return -ENOMEM;
277
278 for_each_possible_cpu(i) {
279 dev = per_cpu_ptr(pseries_cpuidle_devices, i);
280 dev->state_count = drv->state_count;
281 dev->cpu = i;
282 if (cpuidle_register_device(dev)) {
283 printk(KERN_DEBUG \
284 "cpuidle_register_device %d failed!\n", i);
285 return -EIO;
286 }
287 }
288
289 return 0;
290}
291
707827f3
DD
292/*
293 * pseries_idle_probe()
294 * Choose state table for shared versus dedicated partition
295 */
296static int pseries_idle_probe(void)
297{
298
299 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
300 return -ENODEV;
301
e8bb3e00
DD
302 if (cpuidle_disable != IDLE_NO_OVERRIDE)
303 return -ENODEV;
304
707827f3
DD
305 if (max_idle_state == 0) {
306 printk(KERN_DEBUG "pseries processor idle disabled.\n");
307 return -EPERM;
308 }
309
f13c13a0 310 if (lppaca_shared_proc(get_lppaca()))
707827f3
DD
311 cpuidle_state_table = shared_states;
312 else
313 cpuidle_state_table = dedicated_states;
314
315 return 0;
316}
317
318static int __init pseries_processor_idle_init(void)
319{
320 int retval;
321
322 retval = pseries_idle_probe();
323 if (retval)
324 return retval;
325
326 pseries_cpuidle_driver_init();
962e7bd4 327 retval = cpuidle_register_driver(&pseries_idle_driver);
707827f3
DD
328 if (retval) {
329 printk(KERN_DEBUG "Registration of pseries driver failed.\n");
330 return retval;
331 }
332
962e7bd4
DD
333 retval = pseries_idle_devices_init();
334 if (retval) {
335 pseries_idle_devices_uninit();
336 cpuidle_unregister_driver(&pseries_idle_driver);
337 return retval;
338 }
339
16aaaff6 340 register_cpu_notifier(&setup_hotplug_notifier);
707827f3
DD
341 printk(KERN_DEBUG "pseries_idle_driver registered\n");
342
343 return 0;
344}
345
346static void __exit pseries_processor_idle_exit(void)
347{
348
852d8cb1 349 unregister_cpu_notifier(&setup_hotplug_notifier);
962e7bd4
DD
350 pseries_idle_devices_uninit();
351 cpuidle_unregister_driver(&pseries_idle_driver);
707827f3
DD
352
353 return;
354}
355
356module_init(pseries_processor_idle_init);
357module_exit(pseries_processor_idle_exit);
358
359MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>");
360MODULE_DESCRIPTION("Cpuidle driver for POWER");
361MODULE_LICENSE("GPL");
This page took 0.128628 seconds and 5 git commands to generate.