cpuidle: Quickly notice prediction failure in general case
[deliverable/linux.git] / drivers / cpuidle / cpuidle.c
CommitLineData
4f86d3a8
LB
1/*
2 * cpuidle.c - core cpuidle infrastructure
3 *
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
7 *
8 * This code is licenced under the GPL.
9 */
10
11#include <linux/kernel.h>
12#include <linux/mutex.h>
13#include <linux/sched.h>
14#include <linux/notifier.h>
e8db0be1 15#include <linux/pm_qos.h>
4f86d3a8
LB
16#include <linux/cpu.h>
17#include <linux/cpuidle.h>
9a0b8415 18#include <linux/ktime.h>
2e94d1f7 19#include <linux/hrtimer.h>
884b17e1 20#include <linux/module.h>
288f023e 21#include <trace/events/power.h>
4f86d3a8
LB
22
23#include "cpuidle.h"
24
25DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
4f86d3a8
LB
26
27DEFINE_MUTEX(cpuidle_lock);
28LIST_HEAD(cpuidle_detected_devices);
4f86d3a8
LB
29
30static int enabled_devices;
62027aea 31static int off __read_mostly;
a0bfa137 32static int initialized __read_mostly;
62027aea
LB
33
34int cpuidle_disabled(void)
35{
36 return off;
37}
d91ee586
LB
38void disable_cpuidle(void)
39{
40 off = 1;
41}
4f86d3a8 42
dcb84f33
VP
43static int __cpuidle_register_device(struct cpuidle_device *dev);
44
e1689795
RL
45static inline int cpuidle_enter(struct cpuidle_device *dev,
46 struct cpuidle_driver *drv, int index)
47{
48 struct cpuidle_state *target_state = &drv->states[index];
49 return target_state->enter(dev, drv, index);
50}
51
52static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
53 struct cpuidle_driver *drv, int index)
54{
55 return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
56}
57
58typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
59 struct cpuidle_driver *drv, int index);
60
61static cpuidle_enter_t cpuidle_enter_ops;
62
1a022e3f
BO
63/**
64 * cpuidle_play_dead - cpu off-lining
65 *
ee01e663 66 * Returns in case of an error or no driver
1a022e3f
BO
67 */
68int cpuidle_play_dead(void)
69{
70 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
71 struct cpuidle_driver *drv = cpuidle_get_driver();
72 int i, dead_state = -1;
73 int power_usage = -1;
74
ee01e663
TK
75 if (!drv)
76 return -ENODEV;
77
1a022e3f
BO
78 /* Find lowest-power state that supports long-term idle */
79 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
80 struct cpuidle_state *s = &drv->states[i];
81
82 if (s->power_usage < power_usage && s->enter_dead) {
83 power_usage = s->power_usage;
84 dead_state = i;
85 }
86 }
87
88 if (dead_state != -1)
89 return drv->states[dead_state].enter_dead(dev, dead_state);
90
91 return -ENODEV;
92}
93
56cfbf74
CC
94/**
95 * cpuidle_enter_state - enter the state and update stats
96 * @dev: cpuidle device for this cpu
97 * @drv: cpuidle driver for this cpu
98 * @next_state: index into drv->states of the state to enter
99 */
100int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
101 int next_state)
102{
103 int entered_state;
104
105 entered_state = cpuidle_enter_ops(dev, drv, next_state);
106
107 if (entered_state >= 0) {
108 /* Update cpuidle counters */
109 /* This can be moved to within driver enter routine
110 * but that results in multiple copies of same code.
111 */
112 dev->states_usage[entered_state].time +=
113 (unsigned long long)dev->last_residency;
114 dev->states_usage[entered_state].usage++;
115 } else {
116 dev->last_residency = 0;
117 }
118
119 return entered_state;
120}
121
4f86d3a8
LB
122/**
123 * cpuidle_idle_call - the main idle loop
124 *
125 * NOTE: no locks or semaphores should be used here
a0bfa137 126 * return non-zero on failure
4f86d3a8 127 */
a0bfa137 128int cpuidle_idle_call(void)
4f86d3a8 129{
4a6f4fe8 130 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
46bcfad7 131 struct cpuidle_driver *drv = cpuidle_get_driver();
e978aa7d 132 int next_state, entered_state;
4f86d3a8 133
a0bfa137
LB
134 if (off)
135 return -ENODEV;
136
137 if (!initialized)
138 return -ENODEV;
139
4f86d3a8 140 /* check if the device is ready */
a0bfa137
LB
141 if (!dev || !dev->enabled)
142 return -EBUSY;
4f86d3a8
LB
143
144 /* ask the governor for the next state */
46bcfad7 145 next_state = cpuidle_curr_governor->select(drv, dev);
246eb7f0
KH
146 if (need_resched()) {
147 local_irq_enable();
a0bfa137 148 return 0;
246eb7f0
KH
149 }
150
76027ea8
SR
151 trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
152 trace_cpu_idle_rcuidle(next_state, dev->cpu);
f77cfe4e 153
4126c019
CC
154 if (cpuidle_state_is_coupled(dev, drv, next_state))
155 entered_state = cpuidle_enter_state_coupled(dev, drv,
156 next_state);
157 else
158 entered_state = cpuidle_enter_state(dev, drv, next_state);
f77cfe4e 159
76027ea8
SR
160 trace_power_end_rcuidle(dev->cpu);
161 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
f77cfe4e 162
4f86d3a8
LB
163 /* give the governor an opportunity to reflect on the outcome */
164 if (cpuidle_curr_governor->reflect)
e978aa7d 165 cpuidle_curr_governor->reflect(dev, entered_state);
a0bfa137
LB
166
167 return 0;
4f86d3a8
LB
168}
169
170/**
171 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
172 */
173void cpuidle_install_idle_handler(void)
174{
a0bfa137 175 if (enabled_devices) {
4f86d3a8
LB
176 /* Make sure all changes finished before we switch to new idle */
177 smp_wmb();
a0bfa137 178 initialized = 1;
4f86d3a8
LB
179 }
180}
181
182/**
183 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
184 */
185void cpuidle_uninstall_idle_handler(void)
186{
a0bfa137
LB
187 if (enabled_devices) {
188 initialized = 0;
4a162513 189 kick_all_cpus_sync();
4f86d3a8
LB
190 }
191}
192
193/**
194 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
195 */
196void cpuidle_pause_and_lock(void)
197{
198 mutex_lock(&cpuidle_lock);
199 cpuidle_uninstall_idle_handler();
200}
201
202EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
203
204/**
205 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
206 */
207void cpuidle_resume_and_unlock(void)
208{
209 cpuidle_install_idle_handler();
210 mutex_unlock(&cpuidle_lock);
211}
212
213EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
214
8651f97b
PM
215/* Currently used in suspend/resume path to suspend cpuidle */
216void cpuidle_pause(void)
217{
218 mutex_lock(&cpuidle_lock);
219 cpuidle_uninstall_idle_handler();
220 mutex_unlock(&cpuidle_lock);
221}
222
223/* Currently used in suspend/resume path to resume cpuidle */
224void cpuidle_resume(void)
225{
226 mutex_lock(&cpuidle_lock);
227 cpuidle_install_idle_handler();
228 mutex_unlock(&cpuidle_lock);
229}
230
e1689795
RL
231/**
232 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function
233 * @dev: pointer to a valid cpuidle_device object
234 * @drv: pointer to a valid cpuidle_driver object
235 * @index: index of the target cpuidle state.
236 */
237int cpuidle_wrap_enter(struct cpuidle_device *dev,
238 struct cpuidle_driver *drv, int index,
239 int (*enter)(struct cpuidle_device *dev,
240 struct cpuidle_driver *drv, int index))
241{
242 ktime_t time_start, time_end;
243 s64 diff;
244
245 time_start = ktime_get();
246
247 index = enter(dev, drv, index);
248
249 time_end = ktime_get();
250
251 local_irq_enable();
252
253 diff = ktime_to_us(ktime_sub(time_end, time_start));
254 if (diff > INT_MAX)
255 diff = INT_MAX;
256
257 dev->last_residency = (int) diff;
258
259 return index;
260}
261
d8c216cf 262#ifdef CONFIG_ARCH_HAS_CPU_RELAX
46bcfad7
DD
263static int poll_idle(struct cpuidle_device *dev,
264 struct cpuidle_driver *drv, int index)
d8c216cf
RW
265{
266 ktime_t t1, t2;
267 s64 diff;
d8c216cf
RW
268
269 t1 = ktime_get();
270 local_irq_enable();
271 while (!need_resched())
272 cpu_relax();
273
274 t2 = ktime_get();
275 diff = ktime_to_us(ktime_sub(t2, t1));
276 if (diff > INT_MAX)
277 diff = INT_MAX;
278
e978aa7d
DD
279 dev->last_residency = (int) diff;
280
281 return index;
d8c216cf
RW
282}
283
46bcfad7 284static void poll_idle_init(struct cpuidle_driver *drv)
d8c216cf 285{
46bcfad7 286 struct cpuidle_state *state = &drv->states[0];
d8c216cf 287
720f1c30 288 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
d8c216cf
RW
289 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
290 state->exit_latency = 0;
291 state->target_residency = 0;
292 state->power_usage = -1;
d247632c 293 state->flags = 0;
d8c216cf 294 state->enter = poll_idle;
cbc9ef02 295 state->disabled = false;
d8c216cf
RW
296}
297#else
46bcfad7 298static void poll_idle_init(struct cpuidle_driver *drv) {}
d8c216cf
RW
299#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
300
4f86d3a8
LB
301/**
302 * cpuidle_enable_device - enables idle PM for a CPU
303 * @dev: the CPU
304 *
305 * This function must be called between cpuidle_pause_and_lock and
306 * cpuidle_resume_and_unlock when used externally.
307 */
308int cpuidle_enable_device(struct cpuidle_device *dev)
309{
310 int ret, i;
e1689795 311 struct cpuidle_driver *drv = cpuidle_get_driver();
4f86d3a8 312
1b0a0e9a
SB
313 if (!dev)
314 return -EINVAL;
315
4f86d3a8
LB
316 if (dev->enabled)
317 return 0;
e1689795 318 if (!drv || !cpuidle_curr_governor)
4f86d3a8
LB
319 return -EIO;
320 if (!dev->state_count)
fc850f39 321 dev->state_count = drv->state_count;
4f86d3a8 322
dcb84f33
VP
323 if (dev->registered == 0) {
324 ret = __cpuidle_register_device(dev);
325 if (ret)
326 return ret;
327 }
328
e1689795
RL
329 cpuidle_enter_ops = drv->en_core_tk_irqen ?
330 cpuidle_enter_tk : cpuidle_enter;
331
332 poll_idle_init(drv);
d8c216cf 333
4f86d3a8
LB
334 if ((ret = cpuidle_add_state_sysfs(dev)))
335 return ret;
336
337 if (cpuidle_curr_governor->enable &&
e1689795 338 (ret = cpuidle_curr_governor->enable(drv, dev)))
4f86d3a8
LB
339 goto fail_sysfs;
340
341 for (i = 0; i < dev->state_count; i++) {
4202735e
DD
342 dev->states_usage[i].usage = 0;
343 dev->states_usage[i].time = 0;
4f86d3a8
LB
344 }
345 dev->last_residency = 0;
4f86d3a8
LB
346
347 smp_wmb();
348
349 dev->enabled = 1;
350
351 enabled_devices++;
352 return 0;
353
354fail_sysfs:
355 cpuidle_remove_state_sysfs(dev);
356
357 return ret;
358}
359
360EXPORT_SYMBOL_GPL(cpuidle_enable_device);
361
362/**
363 * cpuidle_disable_device - disables idle PM for a CPU
364 * @dev: the CPU
365 *
366 * This function must be called between cpuidle_pause_and_lock and
367 * cpuidle_resume_and_unlock when used externally.
368 */
369void cpuidle_disable_device(struct cpuidle_device *dev)
370{
cf31cd1a 371 if (!dev || !dev->enabled)
4f86d3a8 372 return;
752138df 373 if (!cpuidle_get_driver() || !cpuidle_curr_governor)
4f86d3a8
LB
374 return;
375
376 dev->enabled = 0;
377
378 if (cpuidle_curr_governor->disable)
46bcfad7 379 cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
4f86d3a8
LB
380
381 cpuidle_remove_state_sysfs(dev);
382 enabled_devices--;
383}
384
385EXPORT_SYMBOL_GPL(cpuidle_disable_device);
386
387/**
dcb84f33
VP
388 * __cpuidle_register_device - internal register function called before register
389 * and enable routines
4f86d3a8 390 * @dev: the cpu
dcb84f33
VP
391 *
392 * cpuidle_lock mutex must be held before this is called
4f86d3a8 393 */
dcb84f33 394static int __cpuidle_register_device(struct cpuidle_device *dev)
4f86d3a8
LB
395{
396 int ret;
752138df 397 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
4f86d3a8 398
752138df 399 if (!try_module_get(cpuidle_driver->owner))
4f86d3a8
LB
400 return -EINVAL;
401
4f86d3a8
LB
402 per_cpu(cpuidle_devices, dev->cpu) = dev;
403 list_add(&dev->device_list, &cpuidle_detected_devices);
1aef40e2 404 ret = cpuidle_add_sysfs(dev);
3af272ab
CC
405 if (ret)
406 goto err_sysfs;
4f86d3a8 407
4126c019
CC
408 ret = cpuidle_coupled_register_device(dev);
409 if (ret)
410 goto err_coupled;
4f86d3a8 411
dcb84f33
VP
412 dev->registered = 1;
413 return 0;
3af272ab 414
4126c019 415err_coupled:
1aef40e2 416 cpuidle_remove_sysfs(dev);
3af272ab
CC
417err_sysfs:
418 list_del(&dev->device_list);
419 per_cpu(cpuidle_devices, dev->cpu) = NULL;
420 module_put(cpuidle_driver->owner);
421 return ret;
dcb84f33
VP
422}
423
424/**
425 * cpuidle_register_device - registers a CPU's idle PM feature
426 * @dev: the cpu
427 */
428int cpuidle_register_device(struct cpuidle_device *dev)
429{
430 int ret;
431
1b0a0e9a
SB
432 if (!dev)
433 return -EINVAL;
434
dcb84f33
VP
435 mutex_lock(&cpuidle_lock);
436
437 if ((ret = __cpuidle_register_device(dev))) {
438 mutex_unlock(&cpuidle_lock);
439 return ret;
440 }
441
4f86d3a8
LB
442 cpuidle_enable_device(dev);
443 cpuidle_install_idle_handler();
444
445 mutex_unlock(&cpuidle_lock);
446
447 return 0;
448
449}
450
451EXPORT_SYMBOL_GPL(cpuidle_register_device);
452
453/**
454 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
455 * @dev: the cpu
456 */
457void cpuidle_unregister_device(struct cpuidle_device *dev)
458{
752138df 459 struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
4f86d3a8 460
dcb84f33
VP
461 if (dev->registered == 0)
462 return;
463
4f86d3a8
LB
464 cpuidle_pause_and_lock();
465
466 cpuidle_disable_device(dev);
467
1aef40e2 468 cpuidle_remove_sysfs(dev);
4f86d3a8 469 list_del(&dev->device_list);
4f86d3a8
LB
470 per_cpu(cpuidle_devices, dev->cpu) = NULL;
471
4126c019
CC
472 cpuidle_coupled_unregister_device(dev);
473
4f86d3a8
LB
474 cpuidle_resume_and_unlock();
475
752138df 476 module_put(cpuidle_driver->owner);
4f86d3a8
LB
477}
478
479EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
480
481#ifdef CONFIG_SMP
482
483static void smp_callback(void *v)
484{
485 /* we already woke the CPU up, nothing more to do */
486}
487
488/*
489 * This function gets called when a part of the kernel has a new latency
490 * requirement. This means we need to get all processors out of their C-state,
491 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
492 * wakes them all right up.
493 */
494static int cpuidle_latency_notify(struct notifier_block *b,
495 unsigned long l, void *v)
496{
8691e5a8 497 smp_call_function(smp_callback, NULL, 1);
4f86d3a8
LB
498 return NOTIFY_OK;
499}
500
501static struct notifier_block cpuidle_latency_notifier = {
502 .notifier_call = cpuidle_latency_notify,
503};
504
d82b3518
MG
505static inline void latency_notifier_init(struct notifier_block *n)
506{
507 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
508}
4f86d3a8
LB
509
510#else /* CONFIG_SMP */
511
512#define latency_notifier_init(x) do { } while (0)
513
514#endif /* CONFIG_SMP */
515
516/**
517 * cpuidle_init - core initializer
518 */
519static int __init cpuidle_init(void)
520{
521 int ret;
522
62027aea
LB
523 if (cpuidle_disabled())
524 return -ENODEV;
525
8a25a2fd 526 ret = cpuidle_add_interface(cpu_subsys.dev_root);
4f86d3a8
LB
527 if (ret)
528 return ret;
529
530 latency_notifier_init(&cpuidle_latency_notifier);
531
532 return 0;
533}
534
62027aea 535module_param(off, int, 0444);
4f86d3a8 536core_initcall(cpuidle_init);
This page took 0.433832 seconds and 5 git commands to generate.