ARM: ux500: cpuidle: replace for_each_online_cpu by for_each_possible_cpu
[deliverable/linux.git] / drivers / cpuidle / cpuidle.c
CommitLineData
4f86d3a8
LB
1/*
2 * cpuidle.c - core cpuidle infrastructure
3 *
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
7 *
8 * This code is licenced under the GPL.
9 */
10
b60e6a0e 11#include <linux/clockchips.h>
4f86d3a8
LB
12#include <linux/kernel.h>
13#include <linux/mutex.h>
14#include <linux/sched.h>
15#include <linux/notifier.h>
e8db0be1 16#include <linux/pm_qos.h>
4f86d3a8
LB
17#include <linux/cpu.h>
18#include <linux/cpuidle.h>
9a0b8415 19#include <linux/ktime.h>
2e94d1f7 20#include <linux/hrtimer.h>
884b17e1 21#include <linux/module.h>
288f023e 22#include <trace/events/power.h>
4f86d3a8
LB
23
24#include "cpuidle.h"
25
26DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
4f86d3a8
LB
27
28DEFINE_MUTEX(cpuidle_lock);
29LIST_HEAD(cpuidle_detected_devices);
4f86d3a8
LB
30
31static int enabled_devices;
62027aea 32static int off __read_mostly;
a0bfa137 33static int initialized __read_mostly;
62027aea
LB
34
35int cpuidle_disabled(void)
36{
37 return off;
38}
d91ee586
LB
39void disable_cpuidle(void)
40{
41 off = 1;
42}
4f86d3a8 43
dcb84f33
VP
44static int __cpuidle_register_device(struct cpuidle_device *dev);
45
1a022e3f
BO
46/**
47 * cpuidle_play_dead - cpu off-lining
48 *
ee01e663 49 * Returns in case of an error or no driver
1a022e3f
BO
50 */
51int cpuidle_play_dead(void)
52{
53 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
bf4d1b5d 54 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
8aef33a7 55 int i;
1a022e3f 56
ee01e663
TK
57 if (!drv)
58 return -ENODEV;
59
1a022e3f 60 /* Find lowest-power state that supports long-term idle */
8aef33a7
DL
61 for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
62 if (drv->states[i].enter_dead)
63 return drv->states[i].enter_dead(dev, i);
1a022e3f
BO
64
65 return -ENODEV;
66}
67
56cfbf74
CC
68/**
69 * cpuidle_enter_state - enter the state and update stats
70 * @dev: cpuidle device for this cpu
71 * @drv: cpuidle driver for this cpu
72 * @next_state: index into drv->states of the state to enter
73 */
74int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
554c06ba 75 int index)
56cfbf74
CC
76{
77 int entered_state;
78
554c06ba
DL
79 struct cpuidle_state *target_state = &drv->states[index];
80 ktime_t time_start, time_end;
81 s64 diff;
82
83 time_start = ktime_get();
84
85 entered_state = target_state->enter(dev, drv, index);
86
87 time_end = ktime_get();
88
89 local_irq_enable();
90
91 diff = ktime_to_us(ktime_sub(time_end, time_start));
92 if (diff > INT_MAX)
93 diff = INT_MAX;
94
95 dev->last_residency = (int) diff;
56cfbf74
CC
96
97 if (entered_state >= 0) {
98 /* Update cpuidle counters */
99 /* This can be moved to within driver enter routine
100 * but that results in multiple copies of same code.
101 */
a474a515 102 dev->states_usage[entered_state].time += dev->last_residency;
56cfbf74
CC
103 dev->states_usage[entered_state].usage++;
104 } else {
105 dev->last_residency = 0;
106 }
107
108 return entered_state;
109}
110
4f86d3a8
LB
111/**
112 * cpuidle_idle_call - the main idle loop
113 *
114 * NOTE: no locks or semaphores should be used here
a0bfa137 115 * return non-zero on failure
4f86d3a8 116 */
a0bfa137 117int cpuidle_idle_call(void)
4f86d3a8 118{
4a6f4fe8 119 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
bf4d1b5d 120 struct cpuidle_driver *drv;
e978aa7d 121 int next_state, entered_state;
4f86d3a8 122
a0bfa137
LB
123 if (off)
124 return -ENODEV;
125
126 if (!initialized)
127 return -ENODEV;
128
4f86d3a8 129 /* check if the device is ready */
a0bfa137
LB
130 if (!dev || !dev->enabled)
131 return -EBUSY;
4f86d3a8 132
bf4d1b5d
DL
133 drv = cpuidle_get_cpu_driver(dev);
134
4f86d3a8 135 /* ask the governor for the next state */
46bcfad7 136 next_state = cpuidle_curr_governor->select(drv, dev);
246eb7f0 137 if (need_resched()) {
d73d68dc
YS
138 dev->last_residency = 0;
139 /* give the governor an opportunity to reflect on the outcome */
140 if (cpuidle_curr_governor->reflect)
141 cpuidle_curr_governor->reflect(dev, next_state);
246eb7f0 142 local_irq_enable();
a0bfa137 143 return 0;
246eb7f0
KH
144 }
145
76027ea8 146 trace_cpu_idle_rcuidle(next_state, dev->cpu);
f77cfe4e 147
b60e6a0e
DL
148 if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
149 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
150 &dev->cpu);
151
4126c019
CC
152 if (cpuidle_state_is_coupled(dev, drv, next_state))
153 entered_state = cpuidle_enter_state_coupled(dev, drv,
154 next_state);
155 else
156 entered_state = cpuidle_enter_state(dev, drv, next_state);
f77cfe4e 157
b60e6a0e
DL
158 if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
159 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
160 &dev->cpu);
161
76027ea8 162 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
f77cfe4e 163
4f86d3a8
LB
164 /* give the governor an opportunity to reflect on the outcome */
165 if (cpuidle_curr_governor->reflect)
e978aa7d 166 cpuidle_curr_governor->reflect(dev, entered_state);
a0bfa137
LB
167
168 return 0;
4f86d3a8
LB
169}
170
171/**
172 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
173 */
174void cpuidle_install_idle_handler(void)
175{
a0bfa137 176 if (enabled_devices) {
4f86d3a8
LB
177 /* Make sure all changes finished before we switch to new idle */
178 smp_wmb();
a0bfa137 179 initialized = 1;
4f86d3a8
LB
180 }
181}
182
183/**
184 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
185 */
186void cpuidle_uninstall_idle_handler(void)
187{
a0bfa137
LB
188 if (enabled_devices) {
189 initialized = 0;
4a162513 190 kick_all_cpus_sync();
4f86d3a8
LB
191 }
192}
193
194/**
195 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
196 */
197void cpuidle_pause_and_lock(void)
198{
199 mutex_lock(&cpuidle_lock);
200 cpuidle_uninstall_idle_handler();
201}
202
203EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
204
205/**
206 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
207 */
208void cpuidle_resume_and_unlock(void)
209{
210 cpuidle_install_idle_handler();
211 mutex_unlock(&cpuidle_lock);
212}
213
214EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
215
8651f97b
PM
216/* Currently used in suspend/resume path to suspend cpuidle */
217void cpuidle_pause(void)
218{
219 mutex_lock(&cpuidle_lock);
220 cpuidle_uninstall_idle_handler();
221 mutex_unlock(&cpuidle_lock);
222}
223
224/* Currently used in suspend/resume path to resume cpuidle */
225void cpuidle_resume(void)
226{
227 mutex_lock(&cpuidle_lock);
228 cpuidle_install_idle_handler();
229 mutex_unlock(&cpuidle_lock);
230}
231
d8c216cf 232#ifdef CONFIG_ARCH_HAS_CPU_RELAX
46bcfad7
DD
233static int poll_idle(struct cpuidle_device *dev,
234 struct cpuidle_driver *drv, int index)
d8c216cf
RW
235{
236 ktime_t t1, t2;
237 s64 diff;
d8c216cf
RW
238
239 t1 = ktime_get();
240 local_irq_enable();
241 while (!need_resched())
242 cpu_relax();
243
244 t2 = ktime_get();
245 diff = ktime_to_us(ktime_sub(t2, t1));
246 if (diff > INT_MAX)
247 diff = INT_MAX;
248
e978aa7d
DD
249 dev->last_residency = (int) diff;
250
251 return index;
d8c216cf
RW
252}
253
46bcfad7 254static void poll_idle_init(struct cpuidle_driver *drv)
d8c216cf 255{
46bcfad7 256 struct cpuidle_state *state = &drv->states[0];
d8c216cf 257
720f1c30 258 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
d8c216cf
RW
259 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
260 state->exit_latency = 0;
261 state->target_residency = 0;
262 state->power_usage = -1;
d247632c 263 state->flags = 0;
d8c216cf 264 state->enter = poll_idle;
cbc9ef02 265 state->disabled = false;
d8c216cf
RW
266}
267#else
46bcfad7 268static void poll_idle_init(struct cpuidle_driver *drv) {}
d8c216cf
RW
269#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
270
4f86d3a8
LB
271/**
272 * cpuidle_enable_device - enables idle PM for a CPU
273 * @dev: the CPU
274 *
275 * This function must be called between cpuidle_pause_and_lock and
276 * cpuidle_resume_and_unlock when used externally.
277 */
278int cpuidle_enable_device(struct cpuidle_device *dev)
279{
280 int ret, i;
bf4d1b5d 281 struct cpuidle_driver *drv;
4f86d3a8 282
1b0a0e9a
SB
283 if (!dev)
284 return -EINVAL;
285
4f86d3a8
LB
286 if (dev->enabled)
287 return 0;
bf4d1b5d
DL
288
289 drv = cpuidle_get_cpu_driver(dev);
290
e1689795 291 if (!drv || !cpuidle_curr_governor)
4f86d3a8 292 return -EIO;
bf4d1b5d 293
4f86d3a8 294 if (!dev->state_count)
fc850f39 295 dev->state_count = drv->state_count;
4f86d3a8 296
dcb84f33
VP
297 if (dev->registered == 0) {
298 ret = __cpuidle_register_device(dev);
299 if (ret)
300 return ret;
301 }
302
e1689795 303 poll_idle_init(drv);
d8c216cf 304
bf4d1b5d
DL
305 ret = cpuidle_add_device_sysfs(dev);
306 if (ret)
4f86d3a8
LB
307 return ret;
308
309 if (cpuidle_curr_governor->enable &&
e1689795 310 (ret = cpuidle_curr_governor->enable(drv, dev)))
4f86d3a8
LB
311 goto fail_sysfs;
312
313 for (i = 0; i < dev->state_count; i++) {
4202735e
DD
314 dev->states_usage[i].usage = 0;
315 dev->states_usage[i].time = 0;
4f86d3a8
LB
316 }
317 dev->last_residency = 0;
4f86d3a8
LB
318
319 smp_wmb();
320
321 dev->enabled = 1;
322
323 enabled_devices++;
324 return 0;
325
326fail_sysfs:
bf4d1b5d 327 cpuidle_remove_device_sysfs(dev);
4f86d3a8
LB
328
329 return ret;
330}
331
332EXPORT_SYMBOL_GPL(cpuidle_enable_device);
333
334/**
335 * cpuidle_disable_device - disables idle PM for a CPU
336 * @dev: the CPU
337 *
338 * This function must be called between cpuidle_pause_and_lock and
339 * cpuidle_resume_and_unlock when used externally.
340 */
341void cpuidle_disable_device(struct cpuidle_device *dev)
342{
bf4d1b5d
DL
343 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
344
cf31cd1a 345 if (!dev || !dev->enabled)
4f86d3a8 346 return;
bf4d1b5d
DL
347
348 if (!drv || !cpuidle_curr_governor)
4f86d3a8
LB
349 return;
350
351 dev->enabled = 0;
352
353 if (cpuidle_curr_governor->disable)
bf4d1b5d 354 cpuidle_curr_governor->disable(drv, dev);
4f86d3a8 355
bf4d1b5d 356 cpuidle_remove_device_sysfs(dev);
4f86d3a8
LB
357 enabled_devices--;
358}
359
360EXPORT_SYMBOL_GPL(cpuidle_disable_device);
361
362/**
dcb84f33
VP
363 * __cpuidle_register_device - internal register function called before register
364 * and enable routines
4f86d3a8 365 * @dev: the cpu
dcb84f33
VP
366 *
367 * cpuidle_lock mutex must be held before this is called
4f86d3a8 368 */
dcb84f33 369static int __cpuidle_register_device(struct cpuidle_device *dev)
4f86d3a8
LB
370{
371 int ret;
bf4d1b5d 372 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
4f86d3a8 373
bf4d1b5d 374 if (!try_module_get(drv->owner))
4f86d3a8
LB
375 return -EINVAL;
376
4f86d3a8
LB
377 per_cpu(cpuidle_devices, dev->cpu) = dev;
378 list_add(&dev->device_list, &cpuidle_detected_devices);
1aef40e2 379 ret = cpuidle_add_sysfs(dev);
3af272ab
CC
380 if (ret)
381 goto err_sysfs;
4f86d3a8 382
4126c019
CC
383 ret = cpuidle_coupled_register_device(dev);
384 if (ret)
385 goto err_coupled;
4f86d3a8 386
dcb84f33
VP
387 dev->registered = 1;
388 return 0;
3af272ab 389
4126c019 390err_coupled:
1aef40e2 391 cpuidle_remove_sysfs(dev);
3af272ab
CC
392err_sysfs:
393 list_del(&dev->device_list);
394 per_cpu(cpuidle_devices, dev->cpu) = NULL;
bf4d1b5d 395 module_put(drv->owner);
3af272ab 396 return ret;
dcb84f33
VP
397}
398
399/**
400 * cpuidle_register_device - registers a CPU's idle PM feature
401 * @dev: the cpu
402 */
403int cpuidle_register_device(struct cpuidle_device *dev)
404{
405 int ret;
406
1b0a0e9a
SB
407 if (!dev)
408 return -EINVAL;
409
dcb84f33
VP
410 mutex_lock(&cpuidle_lock);
411
412 if ((ret = __cpuidle_register_device(dev))) {
413 mutex_unlock(&cpuidle_lock);
414 return ret;
415 }
416
4f86d3a8
LB
417 cpuidle_enable_device(dev);
418 cpuidle_install_idle_handler();
419
420 mutex_unlock(&cpuidle_lock);
421
422 return 0;
423
424}
425
426EXPORT_SYMBOL_GPL(cpuidle_register_device);
427
428/**
429 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
430 * @dev: the cpu
431 */
432void cpuidle_unregister_device(struct cpuidle_device *dev)
433{
bf4d1b5d 434 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
4f86d3a8 435
dcb84f33
VP
436 if (dev->registered == 0)
437 return;
438
4f86d3a8
LB
439 cpuidle_pause_and_lock();
440
441 cpuidle_disable_device(dev);
442
1aef40e2 443 cpuidle_remove_sysfs(dev);
4f86d3a8 444 list_del(&dev->device_list);
4f86d3a8
LB
445 per_cpu(cpuidle_devices, dev->cpu) = NULL;
446
4126c019
CC
447 cpuidle_coupled_unregister_device(dev);
448
4f86d3a8
LB
449 cpuidle_resume_and_unlock();
450
bf4d1b5d 451 module_put(drv->owner);
4f86d3a8
LB
452}
453
454EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
455
456#ifdef CONFIG_SMP
457
458static void smp_callback(void *v)
459{
460 /* we already woke the CPU up, nothing more to do */
461}
462
463/*
464 * This function gets called when a part of the kernel has a new latency
465 * requirement. This means we need to get all processors out of their C-state,
466 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
467 * wakes them all right up.
468 */
469static int cpuidle_latency_notify(struct notifier_block *b,
470 unsigned long l, void *v)
471{
8691e5a8 472 smp_call_function(smp_callback, NULL, 1);
4f86d3a8
LB
473 return NOTIFY_OK;
474}
475
476static struct notifier_block cpuidle_latency_notifier = {
477 .notifier_call = cpuidle_latency_notify,
478};
479
d82b3518
MG
480static inline void latency_notifier_init(struct notifier_block *n)
481{
482 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
483}
4f86d3a8
LB
484
485#else /* CONFIG_SMP */
486
487#define latency_notifier_init(x) do { } while (0)
488
489#endif /* CONFIG_SMP */
490
491/**
492 * cpuidle_init - core initializer
493 */
494static int __init cpuidle_init(void)
495{
496 int ret;
497
62027aea
LB
498 if (cpuidle_disabled())
499 return -ENODEV;
500
8a25a2fd 501 ret = cpuidle_add_interface(cpu_subsys.dev_root);
4f86d3a8
LB
502 if (ret)
503 return ret;
504
505 latency_notifier_init(&cpuidle_latency_notifier);
506
507 return 0;
508}
509
62027aea 510module_param(off, int, 0444);
4f86d3a8 511core_initcall(cpuidle_init);
This page took 0.589273 seconds and 5 git commands to generate.