Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / drivers / cpuidle / cpuidle.c
1 /*
2 * cpuidle.c - core cpuidle infrastructure
3 *
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
7 *
8 * This code is licenced under the GPL.
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>
14 #include <linux/notifier.h>
15 #include <linux/pm_qos_params.h>
16 #include <linux/cpu.h>
17 #include <linux/cpuidle.h>
18 #include <linux/ktime.h>
19
20 #include "cpuidle.h"
21
22 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
23
24 DEFINE_MUTEX(cpuidle_lock);
25 LIST_HEAD(cpuidle_detected_devices);
26 static void (*pm_idle_old)(void);
27
28 static int enabled_devices;
29
30 #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
31 static void cpuidle_kick_cpus(void)
32 {
33 cpu_idle_wait();
34 }
35 #elif defined(CONFIG_SMP)
36 # error "Arch needs cpu_idle_wait() equivalent here"
37 #else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */
38 static void cpuidle_kick_cpus(void) {}
39 #endif
40
41 /**
42 * cpuidle_idle_call - the main idle loop
43 *
44 * NOTE: no locks or semaphores should be used here
45 */
46 static void cpuidle_idle_call(void)
47 {
48 struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices);
49 struct cpuidle_state *target_state;
50 int next_state;
51
52 /* check if the device is ready */
53 if (!dev || !dev->enabled) {
54 if (pm_idle_old)
55 pm_idle_old();
56 else
57 local_irq_enable();
58 return;
59 }
60
61 /* ask the governor for the next state */
62 next_state = cpuidle_curr_governor->select(dev);
63 if (need_resched())
64 return;
65 target_state = &dev->states[next_state];
66
67 /* enter the state and update stats */
68 dev->last_residency = target_state->enter(dev, target_state);
69 dev->last_state = target_state;
70 target_state->time += dev->last_residency;
71 target_state->usage++;
72
73 /* give the governor an opportunity to reflect on the outcome */
74 if (cpuidle_curr_governor->reflect)
75 cpuidle_curr_governor->reflect(dev);
76 }
77
78 /**
79 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
80 */
81 void cpuidle_install_idle_handler(void)
82 {
83 if (enabled_devices && (pm_idle != cpuidle_idle_call)) {
84 /* Make sure all changes finished before we switch to new idle */
85 smp_wmb();
86 pm_idle = cpuidle_idle_call;
87 }
88 }
89
90 /**
91 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
92 */
93 void cpuidle_uninstall_idle_handler(void)
94 {
95 if (enabled_devices && (pm_idle != pm_idle_old)) {
96 pm_idle = pm_idle_old;
97 cpuidle_kick_cpus();
98 }
99 }
100
101 /**
102 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
103 */
104 void cpuidle_pause_and_lock(void)
105 {
106 mutex_lock(&cpuidle_lock);
107 cpuidle_uninstall_idle_handler();
108 }
109
110 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
111
112 /**
113 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
114 */
115 void cpuidle_resume_and_unlock(void)
116 {
117 cpuidle_install_idle_handler();
118 mutex_unlock(&cpuidle_lock);
119 }
120
121 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
122
123 /**
124 * cpuidle_enable_device - enables idle PM for a CPU
125 * @dev: the CPU
126 *
127 * This function must be called between cpuidle_pause_and_lock and
128 * cpuidle_resume_and_unlock when used externally.
129 */
130 int cpuidle_enable_device(struct cpuidle_device *dev)
131 {
132 int ret, i;
133
134 if (dev->enabled)
135 return 0;
136 if (!cpuidle_curr_driver || !cpuidle_curr_governor)
137 return -EIO;
138 if (!dev->state_count)
139 return -EINVAL;
140
141 if ((ret = cpuidle_add_state_sysfs(dev)))
142 return ret;
143
144 if (cpuidle_curr_governor->enable &&
145 (ret = cpuidle_curr_governor->enable(dev)))
146 goto fail_sysfs;
147
148 for (i = 0; i < dev->state_count; i++) {
149 dev->states[i].usage = 0;
150 dev->states[i].time = 0;
151 }
152 dev->last_residency = 0;
153 dev->last_state = NULL;
154
155 smp_wmb();
156
157 dev->enabled = 1;
158
159 enabled_devices++;
160 return 0;
161
162 fail_sysfs:
163 cpuidle_remove_state_sysfs(dev);
164
165 return ret;
166 }
167
168 EXPORT_SYMBOL_GPL(cpuidle_enable_device);
169
170 /**
171 * cpuidle_disable_device - disables idle PM for a CPU
172 * @dev: the CPU
173 *
174 * This function must be called between cpuidle_pause_and_lock and
175 * cpuidle_resume_and_unlock when used externally.
176 */
177 void cpuidle_disable_device(struct cpuidle_device *dev)
178 {
179 if (!dev->enabled)
180 return;
181 if (!cpuidle_curr_driver || !cpuidle_curr_governor)
182 return;
183
184 dev->enabled = 0;
185
186 if (cpuidle_curr_governor->disable)
187 cpuidle_curr_governor->disable(dev);
188
189 cpuidle_remove_state_sysfs(dev);
190 enabled_devices--;
191 }
192
193 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
194
195 #ifdef CONFIG_ARCH_HAS_CPU_RELAX
196 static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
197 {
198 ktime_t t1, t2;
199 s64 diff;
200 int ret;
201
202 t1 = ktime_get();
203 local_irq_enable();
204 while (!need_resched())
205 cpu_relax();
206
207 t2 = ktime_get();
208 diff = ktime_to_us(ktime_sub(t2, t1));
209 if (diff > INT_MAX)
210 diff = INT_MAX;
211
212 ret = (int) diff;
213 return ret;
214 }
215
216 static void poll_idle_init(struct cpuidle_device *dev)
217 {
218 struct cpuidle_state *state = &dev->states[0];
219
220 cpuidle_set_statedata(state, NULL);
221
222 snprintf(state->name, CPUIDLE_NAME_LEN, "C0 (poll idle)");
223 state->exit_latency = 0;
224 state->target_residency = 0;
225 state->power_usage = -1;
226 state->flags = CPUIDLE_FLAG_POLL | CPUIDLE_FLAG_TIME_VALID;
227 state->enter = poll_idle;
228 }
229 #else
230 static void poll_idle_init(struct cpuidle_device *dev) {}
231 #endif /* CONFIG_ARCH_HAS_CPU_RELAX */
232
233 /**
234 * cpuidle_register_device - registers a CPU's idle PM feature
235 * @dev: the cpu
236 */
237 int cpuidle_register_device(struct cpuidle_device *dev)
238 {
239 int ret;
240 struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
241
242 if (!sys_dev)
243 return -EINVAL;
244 if (!try_module_get(cpuidle_curr_driver->owner))
245 return -EINVAL;
246
247 init_completion(&dev->kobj_unregister);
248
249 mutex_lock(&cpuidle_lock);
250
251 poll_idle_init(dev);
252
253 per_cpu(cpuidle_devices, dev->cpu) = dev;
254 list_add(&dev->device_list, &cpuidle_detected_devices);
255 if ((ret = cpuidle_add_sysfs(sys_dev))) {
256 mutex_unlock(&cpuidle_lock);
257 module_put(cpuidle_curr_driver->owner);
258 return ret;
259 }
260
261 cpuidle_enable_device(dev);
262 cpuidle_install_idle_handler();
263
264 mutex_unlock(&cpuidle_lock);
265
266 return 0;
267
268 }
269
270 EXPORT_SYMBOL_GPL(cpuidle_register_device);
271
272 /**
273 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
274 * @dev: the cpu
275 */
276 void cpuidle_unregister_device(struct cpuidle_device *dev)
277 {
278 struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
279
280 cpuidle_pause_and_lock();
281
282 cpuidle_disable_device(dev);
283
284 cpuidle_remove_sysfs(sys_dev);
285 list_del(&dev->device_list);
286 wait_for_completion(&dev->kobj_unregister);
287 per_cpu(cpuidle_devices, dev->cpu) = NULL;
288
289 cpuidle_resume_and_unlock();
290
291 module_put(cpuidle_curr_driver->owner);
292 }
293
294 EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
295
296 #ifdef CONFIG_SMP
297
298 static void smp_callback(void *v)
299 {
300 /* we already woke the CPU up, nothing more to do */
301 }
302
303 /*
304 * This function gets called when a part of the kernel has a new latency
305 * requirement. This means we need to get all processors out of their C-state,
306 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
307 * wakes them all right up.
308 */
309 static int cpuidle_latency_notify(struct notifier_block *b,
310 unsigned long l, void *v)
311 {
312 smp_call_function(smp_callback, NULL, 0, 1);
313 return NOTIFY_OK;
314 }
315
316 static struct notifier_block cpuidle_latency_notifier = {
317 .notifier_call = cpuidle_latency_notify,
318 };
319
320 static inline void latency_notifier_init(struct notifier_block *n)
321 {
322 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
323 }
324
325 #else /* CONFIG_SMP */
326
327 #define latency_notifier_init(x) do { } while (0)
328
329 #endif /* CONFIG_SMP */
330
331 /**
332 * cpuidle_init - core initializer
333 */
334 static int __init cpuidle_init(void)
335 {
336 int ret;
337
338 pm_idle_old = pm_idle;
339
340 ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
341 if (ret)
342 return ret;
343
344 latency_notifier_init(&cpuidle_latency_notifier);
345
346 return 0;
347 }
348
349 core_initcall(cpuidle_init);
This page took 0.039246 seconds and 6 git commands to generate.