2 * cpuidle.c - core cpuidle infrastructure
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
8 * This code is licenced under the GPL.
11 #include <linux/kernel.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>
14 #include <linux/notifier.h>
15 #include <linux/pm_qos_params.h>
16 #include <linux/cpu.h>
17 #include <linux/cpuidle.h>
18 #include <linux/ktime.h>
19 #include <linux/hrtimer.h>
20 #include <trace/events/power.h>
24 DEFINE_PER_CPU(struct cpuidle_device
*, cpuidle_devices
);
26 DEFINE_MUTEX(cpuidle_lock
);
27 LIST_HEAD(cpuidle_detected_devices
);
28 static void (*pm_idle_old
)(void);
30 static int enabled_devices
;
31 static int off __read_mostly
;
33 int cpuidle_disabled(void)
38 #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
39 static void cpuidle_kick_cpus(void)
43 #elif defined(CONFIG_SMP)
44 # error "Arch needs cpu_idle_wait() equivalent here"
45 #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */
46 static void cpuidle_kick_cpus(void) {}
49 static int __cpuidle_register_device(struct cpuidle_device
*dev
);
52 * cpuidle_idle_call - the main idle loop
54 * NOTE: no locks or semaphores should be used here
56 static void cpuidle_idle_call(void)
58 struct cpuidle_device
*dev
= __this_cpu_read(cpuidle_devices
);
59 struct cpuidle_state
*target_state
;
62 /* check if the device is ready */
63 if (!dev
|| !dev
->enabled
) {
67 #if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE)
76 /* shows regressions, re-enable for 2.6.29 */
78 * run any timers that can be run now, at this point
79 * before calculating the idle duration etc.
81 hrtimer_peek_ahead_timers();
85 * Call the device's prepare function before calling the
86 * governor's select function. ->prepare gives the device's
87 * cpuidle driver a chance to update any dynamic information
88 * of its cpuidle states for the current idle period, e.g.
89 * state availability, latencies, residencies, etc.
94 /* ask the governor for the next state */
95 next_state
= cpuidle_curr_governor
->select(dev
);
101 target_state
= &dev
->states
[next_state
];
103 /* enter the state and update stats */
104 dev
->last_state
= target_state
;
106 trace_power_start(POWER_CSTATE
, next_state
, dev
->cpu
);
107 trace_cpu_idle(next_state
, dev
->cpu
);
109 dev
->last_residency
= target_state
->enter(dev
, target_state
);
111 trace_power_end(dev
->cpu
);
112 trace_cpu_idle(PWR_EVENT_EXIT
, dev
->cpu
);
115 target_state
= dev
->last_state
;
117 target_state
->time
+= (unsigned long long)dev
->last_residency
;
118 target_state
->usage
++;
120 /* give the governor an opportunity to reflect on the outcome */
121 if (cpuidle_curr_governor
->reflect
)
122 cpuidle_curr_governor
->reflect(dev
);
126 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
128 void cpuidle_install_idle_handler(void)
130 if (enabled_devices
&& (pm_idle
!= cpuidle_idle_call
)) {
131 /* Make sure all changes finished before we switch to new idle */
133 pm_idle
= cpuidle_idle_call
;
138 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
140 void cpuidle_uninstall_idle_handler(void)
142 if (enabled_devices
&& pm_idle_old
&& (pm_idle
!= pm_idle_old
)) {
143 pm_idle
= pm_idle_old
;
149 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
151 void cpuidle_pause_and_lock(void)
153 mutex_lock(&cpuidle_lock
);
154 cpuidle_uninstall_idle_handler();
157 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock
);
160 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
162 void cpuidle_resume_and_unlock(void)
164 cpuidle_install_idle_handler();
165 mutex_unlock(&cpuidle_lock
);
168 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock
);
170 #ifdef CONFIG_ARCH_HAS_CPU_RELAX
171 static int poll_idle(struct cpuidle_device
*dev
, struct cpuidle_state
*st
)
179 while (!need_resched())
183 diff
= ktime_to_us(ktime_sub(t2
, t1
));
191 static void poll_idle_init(struct cpuidle_device
*dev
)
193 struct cpuidle_state
*state
= &dev
->states
[0];
195 cpuidle_set_statedata(state
, NULL
);
197 snprintf(state
->name
, CPUIDLE_NAME_LEN
, "POLL");
198 snprintf(state
->desc
, CPUIDLE_DESC_LEN
, "CPUIDLE CORE POLL IDLE");
199 state
->exit_latency
= 0;
200 state
->target_residency
= 0;
201 state
->power_usage
= -1;
203 state
->enter
= poll_idle
;
206 static void poll_idle_init(struct cpuidle_device
*dev
) {}
207 #endif /* CONFIG_ARCH_HAS_CPU_RELAX */
210 * cpuidle_enable_device - enables idle PM for a CPU
213 * This function must be called between cpuidle_pause_and_lock and
214 * cpuidle_resume_and_unlock when used externally.
216 int cpuidle_enable_device(struct cpuidle_device
*dev
)
222 if (!cpuidle_get_driver() || !cpuidle_curr_governor
)
224 if (!dev
->state_count
)
227 if (dev
->registered
== 0) {
228 ret
= __cpuidle_register_device(dev
);
235 if ((ret
= cpuidle_add_state_sysfs(dev
)))
238 if (cpuidle_curr_governor
->enable
&&
239 (ret
= cpuidle_curr_governor
->enable(dev
)))
242 for (i
= 0; i
< dev
->state_count
; i
++) {
243 dev
->states
[i
].usage
= 0;
244 dev
->states
[i
].time
= 0;
246 dev
->last_residency
= 0;
247 dev
->last_state
= NULL
;
257 cpuidle_remove_state_sysfs(dev
);
262 EXPORT_SYMBOL_GPL(cpuidle_enable_device
);
265 * cpuidle_disable_device - disables idle PM for a CPU
268 * This function must be called between cpuidle_pause_and_lock and
269 * cpuidle_resume_and_unlock when used externally.
271 void cpuidle_disable_device(struct cpuidle_device
*dev
)
275 if (!cpuidle_get_driver() || !cpuidle_curr_governor
)
280 if (cpuidle_curr_governor
->disable
)
281 cpuidle_curr_governor
->disable(dev
);
283 cpuidle_remove_state_sysfs(dev
);
287 EXPORT_SYMBOL_GPL(cpuidle_disable_device
);
290 * __cpuidle_register_device - internal register function called before register
291 * and enable routines
294 * cpuidle_lock mutex must be held before this is called
296 static int __cpuidle_register_device(struct cpuidle_device
*dev
)
299 struct sys_device
*sys_dev
= get_cpu_sysdev((unsigned long)dev
->cpu
);
300 struct cpuidle_driver
*cpuidle_driver
= cpuidle_get_driver();
304 if (!try_module_get(cpuidle_driver
->owner
))
307 init_completion(&dev
->kobj_unregister
);
310 * cpuidle driver should set the dev->power_specified bit
311 * before registering the device if the driver provides
312 * power_usage numbers.
314 * For those devices whose ->power_specified is not set,
315 * we fill in power_usage with decreasing values as the
316 * cpuidle code has an implicit assumption that state Cn
317 * uses less power than C(n-1).
319 * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
320 * an power value of -1. So we use -2, -3, etc, for other
323 if (!dev
->power_specified
) {
325 for (i
= CPUIDLE_DRIVER_STATE_START
; i
< dev
->state_count
; i
++)
326 dev
->states
[i
].power_usage
= -1 - i
;
329 per_cpu(cpuidle_devices
, dev
->cpu
) = dev
;
330 list_add(&dev
->device_list
, &cpuidle_detected_devices
);
331 if ((ret
= cpuidle_add_sysfs(sys_dev
))) {
332 module_put(cpuidle_driver
->owner
);
341 * cpuidle_register_device - registers a CPU's idle PM feature
344 int cpuidle_register_device(struct cpuidle_device
*dev
)
348 mutex_lock(&cpuidle_lock
);
350 if ((ret
= __cpuidle_register_device(dev
))) {
351 mutex_unlock(&cpuidle_lock
);
355 cpuidle_enable_device(dev
);
356 cpuidle_install_idle_handler();
358 mutex_unlock(&cpuidle_lock
);
364 EXPORT_SYMBOL_GPL(cpuidle_register_device
);
367 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
370 void cpuidle_unregister_device(struct cpuidle_device
*dev
)
372 struct sys_device
*sys_dev
= get_cpu_sysdev((unsigned long)dev
->cpu
);
373 struct cpuidle_driver
*cpuidle_driver
= cpuidle_get_driver();
375 if (dev
->registered
== 0)
378 cpuidle_pause_and_lock();
380 cpuidle_disable_device(dev
);
382 cpuidle_remove_sysfs(sys_dev
);
383 list_del(&dev
->device_list
);
384 wait_for_completion(&dev
->kobj_unregister
);
385 per_cpu(cpuidle_devices
, dev
->cpu
) = NULL
;
387 cpuidle_resume_and_unlock();
389 module_put(cpuidle_driver
->owner
);
392 EXPORT_SYMBOL_GPL(cpuidle_unregister_device
);
396 static void smp_callback(void *v
)
398 /* we already woke the CPU up, nothing more to do */
402 * This function gets called when a part of the kernel has a new latency
403 * requirement. This means we need to get all processors out of their C-state,
404 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
405 * wakes them all right up.
407 static int cpuidle_latency_notify(struct notifier_block
*b
,
408 unsigned long l
, void *v
)
410 smp_call_function(smp_callback
, NULL
, 1);
414 static struct notifier_block cpuidle_latency_notifier
= {
415 .notifier_call
= cpuidle_latency_notify
,
418 static inline void latency_notifier_init(struct notifier_block
*n
)
420 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY
, n
);
423 #else /* CONFIG_SMP */
425 #define latency_notifier_init(x) do { } while (0)
427 #endif /* CONFIG_SMP */
430 * cpuidle_init - core initializer
432 static int __init
cpuidle_init(void)
436 if (cpuidle_disabled())
439 pm_idle_old
= pm_idle
;
441 ret
= cpuidle_add_class_sysfs(&cpu_sysdev_class
);
445 latency_notifier_init(&cpuidle_latency_notifier
);
450 module_param(off
, int, 0444);
451 core_initcall(cpuidle_init
);