cpufreq: Make acpi-cpufreq link first
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
7d5e350f 42static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
1da177e4
LT
48static DEFINE_SPINLOCK(cpufreq_driver_lock);
49
6954ca9c
VK
50/* Used when we unregister cpufreq driver */
51static struct cpumask cpufreq_online_mask;
52
5a01f2e8
VP
53/*
54 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
55 * all cpufreq/hotplug/workqueue/etc related lock issues.
56 *
57 * The rules for this semaphore:
58 * - Any routine that wants to read from the policy structure will
59 * do a down_read on this semaphore.
60 * - Any routine that will write to the policy structure and/or may take away
61 * the policy altogether (eg. CPU hotplug), will hold this lock in write
62 * mode before doing so.
63 *
64 * Additional rules:
65 * - All holders of the lock should check to make sure that the CPU they
66 * are concerned with are online after they get the lock.
67 * - Governor routines that can be called in cpufreq hotplug path should not
68 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
69 * - Lock should not be held across
70 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 71 */
f1625066 72static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
73static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
74
75#define lock_policy_rwsem(mode, cpu) \
226528c6 76static int lock_policy_rwsem_##mode \
5a01f2e8
VP
77(int cpu) \
78{ \
f1625066 79 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
80 BUG_ON(policy_cpu == -1); \
81 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
82 if (unlikely(!cpu_online(cpu))) { \
83 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
84 return -1; \
85 } \
86 \
87 return 0; \
88}
89
90lock_policy_rwsem(read, cpu);
5a01f2e8
VP
91
92lock_policy_rwsem(write, cpu);
5a01f2e8 93
226528c6 94static void unlock_policy_rwsem_read(int cpu)
5a01f2e8 95{
f1625066 96 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
5a01f2e8
VP
97 BUG_ON(policy_cpu == -1);
98 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
99}
5a01f2e8 100
226528c6 101static void unlock_policy_rwsem_write(int cpu)
5a01f2e8 102{
f1625066 103 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
5a01f2e8
VP
104 BUG_ON(policy_cpu == -1);
105 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
106}
5a01f2e8
VP
107
108
1da177e4 109/* internal prototypes */
29464f28
DJ
110static int __cpufreq_governor(struct cpufreq_policy *policy,
111 unsigned int event);
5a01f2e8 112static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 113static void handle_update(struct work_struct *work);
1da177e4
LT
114
115/**
32ee8c3e
DJ
116 * Two notifier lists: the "policy" list is involved in the
117 * validation process for a new CPU frequency policy; the
1da177e4
LT
118 * "transition" list for kernel code that needs to handle
119 * changes to devices when the CPU clock speed changes.
120 * The mutex locks both lists.
121 */
e041c683 122static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 123static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 124
74212ca4 125static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
126static int __init init_cpufreq_transition_notifier_list(void)
127{
128 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 129 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
130 return 0;
131}
b3438f82 132pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 133
a7b422cd 134static int off __read_mostly;
da584455 135static int cpufreq_disabled(void)
a7b422cd
KRW
136{
137 return off;
138}
139void disable_cpufreq(void)
140{
141 off = 1;
142}
1da177e4 143static LIST_HEAD(cpufreq_governor_list);
29464f28 144static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 145
a9144436 146static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
147{
148 struct cpufreq_policy *data;
149 unsigned long flags;
150
7a6aedfa 151 if (cpu >= nr_cpu_ids)
1da177e4
LT
152 goto err_out;
153
154 /* get the cpufreq driver */
155 spin_lock_irqsave(&cpufreq_driver_lock, flags);
156
157 if (!cpufreq_driver)
158 goto err_out_unlock;
159
160 if (!try_module_get(cpufreq_driver->owner))
161 goto err_out_unlock;
162
163
164 /* get the CPU */
7a6aedfa 165 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
166
167 if (!data)
168 goto err_out_put_module;
169
a9144436 170 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
171 goto err_out_put_module;
172
1da177e4 173 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
174 return data;
175
7d5e350f 176err_out_put_module:
1da177e4 177 module_put(cpufreq_driver->owner);
7d5e350f 178err_out_unlock:
1da177e4 179 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 180err_out:
1da177e4
LT
181 return NULL;
182}
a9144436
SB
183
184struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
185{
d5aaffa9
DB
186 if (cpufreq_disabled())
187 return NULL;
188
a9144436
SB
189 return __cpufreq_cpu_get(cpu, false);
190}
1da177e4
LT
191EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
192
a9144436
SB
193static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
194{
195 return __cpufreq_cpu_get(cpu, true);
196}
197
198static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
199{
200 if (!sysfs)
201 kobject_put(&data->kobj);
202 module_put(cpufreq_driver->owner);
203}
7d5e350f 204
1da177e4
LT
205void cpufreq_cpu_put(struct cpufreq_policy *data)
206{
d5aaffa9
DB
207 if (cpufreq_disabled())
208 return;
209
a9144436 210 __cpufreq_cpu_put(data, false);
1da177e4
LT
211}
212EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
213
a9144436
SB
214static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
215{
216 __cpufreq_cpu_put(data, true);
217}
1da177e4 218
1da177e4
LT
219/*********************************************************************
220 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
221 *********************************************************************/
222
223/**
224 * adjust_jiffies - adjust the system "loops_per_jiffy"
225 *
226 * This function alters the system "loops_per_jiffy" for the clock
227 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 228 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
229 * per-CPU loops_per_jiffy value wherever possible.
230 */
231#ifndef CONFIG_SMP
232static unsigned long l_p_j_ref;
233static unsigned int l_p_j_ref_freq;
234
858119e1 235static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
236{
237 if (ci->flags & CPUFREQ_CONST_LOOPS)
238 return;
239
240 if (!l_p_j_ref_freq) {
241 l_p_j_ref = loops_per_jiffy;
242 l_p_j_ref_freq = ci->old;
2d06d8c4 243 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 244 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 245 }
d08de0c1 246 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 247 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
248 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
249 ci->new);
2d06d8c4 250 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 251 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
252 }
253}
254#else
e08f5f5b
GS
255static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
256{
257 return;
258}
1da177e4
LT
259#endif
260
261
262/**
e4472cb3
DJ
263 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
264 * on frequency transition.
1da177e4 265 *
e4472cb3
DJ
266 * This function calls the transition notifiers and the "adjust_jiffies"
267 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 268 * external effects.
1da177e4
LT
269 */
270void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
271{
e4472cb3
DJ
272 struct cpufreq_policy *policy;
273
1da177e4
LT
274 BUG_ON(irqs_disabled());
275
d5aaffa9
DB
276 if (cpufreq_disabled())
277 return;
278
1da177e4 279 freqs->flags = cpufreq_driver->flags;
2d06d8c4 280 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 281 state, freqs->new);
1da177e4 282
7a6aedfa 283 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
1da177e4 284 switch (state) {
e4472cb3 285
1da177e4 286 case CPUFREQ_PRECHANGE:
32ee8c3e 287 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
288 * which is not equal to what the cpufreq core thinks is
289 * "old frequency".
1da177e4
LT
290 */
291 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
292 if ((policy) && (policy->cpu == freqs->cpu) &&
293 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 294 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
295 " %u, cpufreq assumed %u kHz.\n",
296 freqs->old, policy->cur);
297 freqs->old = policy->cur;
1da177e4
LT
298 }
299 }
b4dfdbb3 300 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 301 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
302 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
303 break;
e4472cb3 304
1da177e4
LT
305 case CPUFREQ_POSTCHANGE:
306 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 307 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723
TR
308 (unsigned long)freqs->cpu);
309 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
25e41933 310 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 311 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 312 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
313 if (likely(policy) && likely(policy->cpu == freqs->cpu))
314 policy->cur = freqs->new;
1da177e4
LT
315 break;
316 }
1da177e4
LT
317}
318EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
319
320
321
322/*********************************************************************
323 * SYSFS INTERFACE *
324 *********************************************************************/
325
3bcb09a3
JF
326static struct cpufreq_governor *__find_governor(const char *str_governor)
327{
328 struct cpufreq_governor *t;
329
330 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 331 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
332 return t;
333
334 return NULL;
335}
336
1da177e4
LT
337/**
338 * cpufreq_parse_governor - parse a governor string
339 */
905d77cd 340static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
341 struct cpufreq_governor **governor)
342{
3bcb09a3
JF
343 int err = -EINVAL;
344
1da177e4 345 if (!cpufreq_driver)
3bcb09a3
JF
346 goto out;
347
1da177e4
LT
348 if (cpufreq_driver->setpolicy) {
349 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
350 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 351 err = 0;
e08f5f5b
GS
352 } else if (!strnicmp(str_governor, "powersave",
353 CPUFREQ_NAME_LEN)) {
1da177e4 354 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 355 err = 0;
1da177e4 356 }
3bcb09a3 357 } else if (cpufreq_driver->target) {
1da177e4 358 struct cpufreq_governor *t;
3bcb09a3 359
3fc54d37 360 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
361
362 t = __find_governor(str_governor);
363
ea714970 364 if (t == NULL) {
1a8e1463 365 int ret;
ea714970 366
1a8e1463
KC
367 mutex_unlock(&cpufreq_governor_mutex);
368 ret = request_module("cpufreq_%s", str_governor);
369 mutex_lock(&cpufreq_governor_mutex);
ea714970 370
1a8e1463
KC
371 if (ret == 0)
372 t = __find_governor(str_governor);
ea714970
JF
373 }
374
3bcb09a3
JF
375 if (t != NULL) {
376 *governor = t;
377 err = 0;
1da177e4 378 }
3bcb09a3 379
3fc54d37 380 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 381 }
29464f28 382out:
3bcb09a3 383 return err;
1da177e4 384}
1da177e4
LT
385
386
1da177e4 387/**
e08f5f5b
GS
388 * cpufreq_per_cpu_attr_read() / show_##file_name() -
389 * print out cpufreq information
1da177e4
LT
390 *
391 * Write out information from cpufreq_driver->policy[cpu]; object must be
392 * "unsigned int".
393 */
394
32ee8c3e
DJ
395#define show_one(file_name, object) \
396static ssize_t show_##file_name \
905d77cd 397(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 398{ \
29464f28 399 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
400}
401
402show_one(cpuinfo_min_freq, cpuinfo.min_freq);
403show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 404show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
405show_one(scaling_min_freq, min);
406show_one(scaling_max_freq, max);
407show_one(scaling_cur_freq, cur);
408
e08f5f5b
GS
409static int __cpufreq_set_policy(struct cpufreq_policy *data,
410 struct cpufreq_policy *policy);
7970e08b 411
1da177e4
LT
412/**
413 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
414 */
415#define store_one(file_name, object) \
416static ssize_t store_##file_name \
905d77cd 417(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 418{ \
f55c9c26 419 unsigned int ret; \
1da177e4
LT
420 struct cpufreq_policy new_policy; \
421 \
422 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
423 if (ret) \
424 return -EINVAL; \
425 \
29464f28 426 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
427 if (ret != 1) \
428 return -EINVAL; \
429 \
7970e08b
TR
430 ret = __cpufreq_set_policy(policy, &new_policy); \
431 policy->user_policy.object = policy->object; \
1da177e4
LT
432 \
433 return ret ? ret : count; \
434}
435
29464f28
DJ
436store_one(scaling_min_freq, min);
437store_one(scaling_max_freq, max);
1da177e4
LT
438
439/**
440 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
441 */
905d77cd
DJ
442static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
443 char *buf)
1da177e4 444{
5a01f2e8 445 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
446 if (!cur_freq)
447 return sprintf(buf, "<unknown>");
448 return sprintf(buf, "%u\n", cur_freq);
449}
450
451
452/**
453 * show_scaling_governor - show the current policy for the specified CPU
454 */
905d77cd 455static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 456{
29464f28 457 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
458 return sprintf(buf, "powersave\n");
459 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
460 return sprintf(buf, "performance\n");
461 else if (policy->governor)
4b972f0b 462 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 463 policy->governor->name);
1da177e4
LT
464 return -EINVAL;
465}
466
467
468/**
469 * store_scaling_governor - store policy for the specified CPU
470 */
905d77cd
DJ
471static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
472 const char *buf, size_t count)
1da177e4 473{
f55c9c26 474 unsigned int ret;
1da177e4
LT
475 char str_governor[16];
476 struct cpufreq_policy new_policy;
477
478 ret = cpufreq_get_policy(&new_policy, policy->cpu);
479 if (ret)
480 return ret;
481
29464f28 482 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
483 if (ret != 1)
484 return -EINVAL;
485
e08f5f5b
GS
486 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
487 &new_policy.governor))
1da177e4
LT
488 return -EINVAL;
489
7970e08b
TR
490 /* Do not use cpufreq_set_policy here or the user_policy.max
491 will be wrongly overridden */
7970e08b
TR
492 ret = __cpufreq_set_policy(policy, &new_policy);
493
494 policy->user_policy.policy = policy->policy;
495 policy->user_policy.governor = policy->governor;
7970e08b 496
e08f5f5b
GS
497 if (ret)
498 return ret;
499 else
500 return count;
1da177e4
LT
501}
502
503/**
504 * show_scaling_driver - show the cpufreq driver currently loaded
505 */
905d77cd 506static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 507{
4b972f0b 508 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
509}
510
511/**
512 * show_scaling_available_governors - show the available CPUfreq governors
513 */
905d77cd
DJ
514static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
515 char *buf)
1da177e4
LT
516{
517 ssize_t i = 0;
518 struct cpufreq_governor *t;
519
520 if (!cpufreq_driver->target) {
521 i += sprintf(buf, "performance powersave");
522 goto out;
523 }
524
525 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
526 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
527 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 528 goto out;
4b972f0b 529 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 530 }
7d5e350f 531out:
1da177e4
LT
532 i += sprintf(&buf[i], "\n");
533 return i;
534}
e8628dd0 535
835481d9 536static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
537{
538 ssize_t i = 0;
539 unsigned int cpu;
540
835481d9 541 for_each_cpu(cpu, mask) {
1da177e4
LT
542 if (i)
543 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
544 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
545 if (i >= (PAGE_SIZE - 5))
29464f28 546 break;
1da177e4
LT
547 }
548 i += sprintf(&buf[i], "\n");
549 return i;
550}
551
e8628dd0
DW
552/**
553 * show_related_cpus - show the CPUs affected by each transition even if
554 * hw coordination is in use
555 */
556static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
557{
835481d9 558 if (cpumask_empty(policy->related_cpus))
e8628dd0
DW
559 return show_cpus(policy->cpus, buf);
560 return show_cpus(policy->related_cpus, buf);
561}
562
563/**
564 * show_affected_cpus - show the CPUs affected by each transition
565 */
566static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
567{
568 return show_cpus(policy->cpus, buf);
569}
570
9e76988e 571static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 572 const char *buf, size_t count)
9e76988e
VP
573{
574 unsigned int freq = 0;
575 unsigned int ret;
576
879000f9 577 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
578 return -EINVAL;
579
580 ret = sscanf(buf, "%u", &freq);
581 if (ret != 1)
582 return -EINVAL;
583
584 policy->governor->store_setspeed(policy, freq);
585
586 return count;
587}
588
589static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
590{
879000f9 591 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
592 return sprintf(buf, "<unsupported>\n");
593
594 return policy->governor->show_setspeed(policy, buf);
595}
1da177e4 596
e2f74f35 597/**
8bf1ac72 598 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
599 */
600static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
601{
602 unsigned int limit;
603 int ret;
604 if (cpufreq_driver->bios_limit) {
605 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
606 if (!ret)
607 return sprintf(buf, "%u\n", limit);
608 }
609 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
610}
611
6dad2a29
BP
612cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
613cpufreq_freq_attr_ro(cpuinfo_min_freq);
614cpufreq_freq_attr_ro(cpuinfo_max_freq);
615cpufreq_freq_attr_ro(cpuinfo_transition_latency);
616cpufreq_freq_attr_ro(scaling_available_governors);
617cpufreq_freq_attr_ro(scaling_driver);
618cpufreq_freq_attr_ro(scaling_cur_freq);
619cpufreq_freq_attr_ro(bios_limit);
620cpufreq_freq_attr_ro(related_cpus);
621cpufreq_freq_attr_ro(affected_cpus);
622cpufreq_freq_attr_rw(scaling_min_freq);
623cpufreq_freq_attr_rw(scaling_max_freq);
624cpufreq_freq_attr_rw(scaling_governor);
625cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 626
905d77cd 627static struct attribute *default_attrs[] = {
1da177e4
LT
628 &cpuinfo_min_freq.attr,
629 &cpuinfo_max_freq.attr,
ed129784 630 &cpuinfo_transition_latency.attr,
1da177e4
LT
631 &scaling_min_freq.attr,
632 &scaling_max_freq.attr,
633 &affected_cpus.attr,
e8628dd0 634 &related_cpus.attr,
1da177e4
LT
635 &scaling_governor.attr,
636 &scaling_driver.attr,
637 &scaling_available_governors.attr,
9e76988e 638 &scaling_setspeed.attr,
1da177e4
LT
639 NULL
640};
641
8aa84ad8
TR
642struct kobject *cpufreq_global_kobject;
643EXPORT_SYMBOL(cpufreq_global_kobject);
644
29464f28
DJ
645#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
646#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 647
29464f28 648static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 649{
905d77cd
DJ
650 struct cpufreq_policy *policy = to_policy(kobj);
651 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 652 ssize_t ret = -EINVAL;
a9144436 653 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 654 if (!policy)
0db4a8a9 655 goto no_policy;
5a01f2e8
VP
656
657 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 658 goto fail;
5a01f2e8 659
e08f5f5b
GS
660 if (fattr->show)
661 ret = fattr->show(policy, buf);
662 else
663 ret = -EIO;
664
5a01f2e8 665 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 666fail:
a9144436 667 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 668no_policy:
1da177e4
LT
669 return ret;
670}
671
905d77cd
DJ
672static ssize_t store(struct kobject *kobj, struct attribute *attr,
673 const char *buf, size_t count)
1da177e4 674{
905d77cd
DJ
675 struct cpufreq_policy *policy = to_policy(kobj);
676 struct freq_attr *fattr = to_attr(attr);
a07530b4 677 ssize_t ret = -EINVAL;
a9144436 678 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 679 if (!policy)
a07530b4 680 goto no_policy;
5a01f2e8
VP
681
682 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 683 goto fail;
5a01f2e8 684
e08f5f5b
GS
685 if (fattr->store)
686 ret = fattr->store(policy, buf, count);
687 else
688 ret = -EIO;
689
5a01f2e8 690 unlock_policy_rwsem_write(policy->cpu);
a07530b4 691fail:
a9144436 692 cpufreq_cpu_put_sysfs(policy);
a07530b4 693no_policy:
1da177e4
LT
694 return ret;
695}
696
905d77cd 697static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 698{
905d77cd 699 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 700 pr_debug("last reference is dropped\n");
1da177e4
LT
701 complete(&policy->kobj_unregister);
702}
703
52cf25d0 704static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
705 .show = show,
706 .store = store,
707};
708
709static struct kobj_type ktype_cpufreq = {
710 .sysfs_ops = &sysfs_ops,
711 .default_attrs = default_attrs,
712 .release = cpufreq_sysfs_release,
713};
714
4bfa042c
TR
715/*
716 * Returns:
717 * Negative: Failure
718 * 0: Success
719 * Positive: When we have a managed CPU and the sysfs got symlinked
720 */
cf3289d0
AC
721static int cpufreq_add_dev_policy(unsigned int cpu,
722 struct cpufreq_policy *policy,
8a25a2fd 723 struct device *dev)
ecf7e461
DJ
724{
725 int ret = 0;
726#ifdef CONFIG_SMP
727 unsigned long flags;
728 unsigned int j;
ecf7e461 729#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
730 struct cpufreq_governor *gov;
731
732 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
733 if (gov) {
734 policy->governor = gov;
2d06d8c4 735 pr_debug("Restoring governor %s for cpu %d\n",
ecf7e461
DJ
736 policy->governor->name, cpu);
737 }
738#endif
739
740 for_each_cpu(j, policy->cpus) {
741 struct cpufreq_policy *managed_policy;
742
743 if (cpu == j)
744 continue;
745
746 /* Check for existing affected CPUs.
747 * They may not be aware of it due to CPU Hotplug.
748 * cpufreq_cpu_put is called when the device is removed
749 * in __cpufreq_remove_dev()
750 */
751 managed_policy = cpufreq_cpu_get(j);
752 if (unlikely(managed_policy)) {
753
754 /* Set proper policy_cpu */
755 unlock_policy_rwsem_write(cpu);
f1625066 756 per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
ecf7e461
DJ
757
758 if (lock_policy_rwsem_write(cpu) < 0) {
759 /* Should not go through policy unlock path */
760 if (cpufreq_driver->exit)
761 cpufreq_driver->exit(policy);
762 cpufreq_cpu_put(managed_policy);
763 return -EBUSY;
764 }
765
f6a7409c
VK
766 __cpufreq_governor(managed_policy, CPUFREQ_GOV_STOP);
767
ecf7e461
DJ
768 spin_lock_irqsave(&cpufreq_driver_lock, flags);
769 cpumask_copy(managed_policy->cpus, policy->cpus);
770 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
771 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
772
f6a7409c
VK
773 __cpufreq_governor(managed_policy, CPUFREQ_GOV_START);
774 __cpufreq_governor(managed_policy, CPUFREQ_GOV_LIMITS);
775
2d06d8c4 776 pr_debug("CPU already managed, adding link\n");
8a25a2fd 777 ret = sysfs_create_link(&dev->kobj,
ecf7e461
DJ
778 &managed_policy->kobj,
779 "cpufreq");
780 if (ret)
781 cpufreq_cpu_put(managed_policy);
782 /*
783 * Success. We only needed to be added to the mask.
784 * Call driver->exit() because only the cpu parent of
785 * the kobj needed to call init().
786 */
787 if (cpufreq_driver->exit)
788 cpufreq_driver->exit(policy);
4bfa042c
TR
789
790 if (!ret)
791 return 1;
792 else
793 return ret;
ecf7e461
DJ
794 }
795 }
796#endif
797 return ret;
798}
799
800
19d6f7ec 801/* symlink affected CPUs */
cf3289d0
AC
802static int cpufreq_add_dev_symlink(unsigned int cpu,
803 struct cpufreq_policy *policy)
19d6f7ec
DJ
804{
805 unsigned int j;
806 int ret = 0;
807
808 for_each_cpu(j, policy->cpus) {
809 struct cpufreq_policy *managed_policy;
8a25a2fd 810 struct device *cpu_dev;
19d6f7ec
DJ
811
812 if (j == cpu)
813 continue;
814 if (!cpu_online(j))
815 continue;
816
2d06d8c4 817 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 818 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
819 cpu_dev = get_cpu_device(j);
820 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
821 "cpufreq");
822 if (ret) {
823 cpufreq_cpu_put(managed_policy);
824 return ret;
825 }
826 }
827 return ret;
828}
829
cf3289d0
AC
830static int cpufreq_add_dev_interface(unsigned int cpu,
831 struct cpufreq_policy *policy,
8a25a2fd 832 struct device *dev)
909a694e 833{
ecf7e461 834 struct cpufreq_policy new_policy;
909a694e
DJ
835 struct freq_attr **drv_attr;
836 unsigned long flags;
837 int ret = 0;
838 unsigned int j;
839
840 /* prepare interface data */
841 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 842 &dev->kobj, "cpufreq");
909a694e
DJ
843 if (ret)
844 return ret;
845
846 /* set up files for this cpu device */
847 drv_attr = cpufreq_driver->attr;
848 while ((drv_attr) && (*drv_attr)) {
849 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
850 if (ret)
851 goto err_out_kobj_put;
852 drv_attr++;
853 }
854 if (cpufreq_driver->get) {
855 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
856 if (ret)
857 goto err_out_kobj_put;
858 }
859 if (cpufreq_driver->target) {
860 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
861 if (ret)
862 goto err_out_kobj_put;
863 }
e2f74f35
TR
864 if (cpufreq_driver->bios_limit) {
865 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
866 if (ret)
867 goto err_out_kobj_put;
868 }
909a694e
DJ
869
870 spin_lock_irqsave(&cpufreq_driver_lock, flags);
871 for_each_cpu(j, policy->cpus) {
bec037aa
JL
872 if (!cpu_online(j))
873 continue;
909a694e 874 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 875 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e
DJ
876 }
877 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
878
879 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
880 if (ret)
881 goto err_out_kobj_put;
882
883 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
884 /* assure that the starting sequence is run in __cpufreq_set_policy */
885 policy->governor = NULL;
886
887 /* set default policy */
888 ret = __cpufreq_set_policy(policy, &new_policy);
889 policy->user_policy.policy = policy->policy;
890 policy->user_policy.governor = policy->governor;
891
892 if (ret) {
2d06d8c4 893 pr_debug("setting policy failed\n");
ecf7e461
DJ
894 if (cpufreq_driver->exit)
895 cpufreq_driver->exit(policy);
896 }
909a694e
DJ
897 return ret;
898
899err_out_kobj_put:
900 kobject_put(&policy->kobj);
901 wait_for_completion(&policy->kobj_unregister);
902 return ret;
903}
904
1da177e4
LT
905
906/**
907 * cpufreq_add_dev - add a CPU device
908 *
32ee8c3e 909 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
910 *
911 * The Oracle says: try running cpufreq registration/unregistration concurrently
912 * with with cpu hotplugging and all hell will break loose. Tried to clean this
913 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 914 */
8a25a2fd 915static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 916{
8a25a2fd 917 unsigned int cpu = dev->id;
90e41bac 918 int ret = 0, found = 0;
1da177e4 919 struct cpufreq_policy *policy;
1da177e4
LT
920 unsigned long flags;
921 unsigned int j;
90e41bac
PB
922#ifdef CONFIG_HOTPLUG_CPU
923 int sibling;
924#endif
1da177e4 925
c32b6b8e
AR
926 if (cpu_is_offline(cpu))
927 return 0;
928
2d06d8c4 929 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
930
931#ifdef CONFIG_SMP
932 /* check whether a different CPU already registered this
933 * CPU because it is in the same boat. */
934 policy = cpufreq_cpu_get(cpu);
935 if (unlikely(policy)) {
8ff69732 936 cpufreq_cpu_put(policy);
1da177e4
LT
937 return 0;
938 }
939#endif
940
941 if (!try_module_get(cpufreq_driver->owner)) {
942 ret = -EINVAL;
943 goto module_out;
944 }
945
059019a3 946 ret = -ENOMEM;
e98df50c 947 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 948 if (!policy)
1da177e4 949 goto nomem_out;
059019a3
DJ
950
951 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 952 goto err_free_policy;
059019a3
DJ
953
954 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 955 goto err_free_cpumask;
1da177e4
LT
956
957 policy->cpu = cpu;
835481d9 958 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 959
5a01f2e8 960 /* Initially set CPU itself as the policy_cpu */
f1625066 961 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
3f4a782b
MD
962 ret = (lock_policy_rwsem_write(cpu) < 0);
963 WARN_ON(ret);
5a01f2e8 964
1da177e4 965 init_completion(&policy->kobj_unregister);
65f27f38 966 INIT_WORK(&policy->update, handle_update);
1da177e4 967
8122c6ce 968 /* Set governor before ->init, so that driver could check it */
90e41bac
PB
969#ifdef CONFIG_HOTPLUG_CPU
970 for_each_online_cpu(sibling) {
971 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
972 if (cp && cp->governor &&
973 (cpumask_test_cpu(cpu, cp->related_cpus))) {
974 policy->governor = cp->governor;
975 found = 1;
976 break;
977 }
978 }
979#endif
980 if (!found)
981 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1da177e4
LT
982 /* call driver. From then on the cpufreq must be able
983 * to accept all calls to ->verify and ->setpolicy for this CPU
984 */
985 ret = cpufreq_driver->init(policy);
986 if (ret) {
2d06d8c4 987 pr_debug("initialization failed\n");
3f4a782b 988 goto err_unlock_policy;
1da177e4 989 }
643ae6e8
VK
990
991 /*
992 * affected cpus must always be the one, which are online. We aren't
993 * managing offline cpus here.
994 */
995 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
6954ca9c 996 cpumask_and(policy->cpus, policy->cpus, &cpufreq_online_mask);
643ae6e8 997
187d9f4e
MC
998 policy->user_policy.min = policy->min;
999 policy->user_policy.max = policy->max;
1da177e4 1000
a1531acd
TR
1001 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1002 CPUFREQ_START, policy);
1003
8a25a2fd 1004 ret = cpufreq_add_dev_policy(cpu, policy, dev);
4bfa042c
TR
1005 if (ret) {
1006 if (ret > 0)
1007 /* This is a managed cpu, symlink created,
1008 exit with 0 */
1009 ret = 0;
ecf7e461 1010 goto err_unlock_policy;
4bfa042c 1011 }
1da177e4 1012
8a25a2fd 1013 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
1014 if (ret)
1015 goto err_out_unregister;
8ff69732 1016
dca02613
LW
1017 unlock_policy_rwsem_write(cpu);
1018
038c5b3e 1019 kobject_uevent(&policy->kobj, KOBJ_ADD);
1da177e4 1020 module_put(cpufreq_driver->owner);
2d06d8c4 1021 pr_debug("initialization complete\n");
87c32271 1022
1da177e4
LT
1023 return 0;
1024
1025
1026err_out_unregister:
1027 spin_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 1028 for_each_cpu(j, policy->cpus)
7a6aedfa 1029 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
1030 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1031
c10997f6 1032 kobject_put(&policy->kobj);
1da177e4
LT
1033 wait_for_completion(&policy->kobj_unregister);
1034
3f4a782b 1035err_unlock_policy:
45709118 1036 unlock_policy_rwsem_write(cpu);
cad70a6a 1037 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
1038err_free_cpumask:
1039 free_cpumask_var(policy->cpus);
1040err_free_policy:
1da177e4 1041 kfree(policy);
1da177e4
LT
1042nomem_out:
1043 module_put(cpufreq_driver->owner);
c32b6b8e 1044module_out:
1da177e4
LT
1045 return ret;
1046}
1047
b8eed8af
VK
1048static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1049{
1050 int j;
1051
1052 policy->last_cpu = policy->cpu;
1053 policy->cpu = cpu;
1054
1055 for_each_cpu(j, policy->cpus) {
1056 if (!cpu_online(j))
1057 continue;
1058 per_cpu(cpufreq_policy_cpu, j) = cpu;
1059 }
1060
1061#ifdef CONFIG_CPU_FREQ_TABLE
1062 cpufreq_frequency_table_update_policy_cpu(policy);
1063#endif
1064 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1065 CPUFREQ_UPDATE_POLICY_CPU, policy);
1066}
1da177e4
LT
1067
1068/**
5a01f2e8 1069 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1070 *
1071 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1072 * Caller should already have policy_rwsem in write mode for this CPU.
1073 * This routine frees the rwsem before returning.
1da177e4 1074 */
8a25a2fd 1075static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1076{
b8eed8af 1077 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1078 unsigned long flags;
1079 struct cpufreq_policy *data;
499bca9b
AW
1080 struct kobject *kobj;
1081 struct completion *cmp;
8a25a2fd 1082 struct device *cpu_dev;
1da177e4 1083
b8eed8af 1084 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4
LT
1085
1086 spin_lock_irqsave(&cpufreq_driver_lock, flags);
7a6aedfa 1087 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
1088
1089 if (!data) {
b8eed8af 1090 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4 1091 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
5a01f2e8 1092 unlock_policy_rwsem_write(cpu);
1da177e4
LT
1093 return -EINVAL;
1094 }
1da177e4 1095
b8eed8af 1096 if (cpufreq_driver->target)
f6a7409c 1097 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
084f3493
TR
1098
1099#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
1100 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1101 CPUFREQ_NAME_LEN);
084f3493
TR
1102#endif
1103
b8eed8af
VK
1104 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1105 cpus = cpumask_weight(data->cpus);
1106 cpumask_clear_cpu(cpu, data->cpus);
1da177e4 1107
b8eed8af
VK
1108 if (unlikely((cpu == data->cpu) && (cpus > 1))) {
1109 /* first sibling now owns the new sysfs dir */
1110 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1111 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1112 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1113 if (ret) {
1114 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1115 cpumask_set_cpu(cpu, data->cpus);
1116 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1117 "cpufreq");
1118 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
499bca9b 1119 unlock_policy_rwsem_write(cpu);
b8eed8af 1120 return -EINVAL;
1da177e4 1121 }
b8eed8af
VK
1122
1123 update_policy_cpu(data, cpu_dev->id);
1124 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1125 __func__, cpu_dev->id, cpu);
1da177e4 1126 }
1da177e4 1127
b8eed8af 1128 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
5a01f2e8 1129
b8eed8af
VK
1130 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1131 cpufreq_cpu_put(data);
499bca9b 1132 unlock_policy_rwsem_write(cpu);
b8eed8af 1133 sysfs_remove_link(&dev->kobj, "cpufreq");
1da177e4 1134
b8eed8af
VK
1135 /* If cpu is last user of policy, free policy */
1136 if (cpus == 1) {
1137 lock_policy_rwsem_write(cpu);
1138 kobj = &data->kobj;
1139 cmp = &data->kobj_unregister;
1140 unlock_policy_rwsem_write(cpu);
1141 kobject_put(kobj);
7d26e2d5 1142
b8eed8af
VK
1143 /* we need to make sure that the underlying kobj is actually
1144 * not referenced anymore by anybody before we proceed with
1145 * unloading.
1146 */
1147 pr_debug("waiting for dropping of refcount\n");
1148 wait_for_completion(cmp);
1149 pr_debug("wait complete\n");
27ecddc2 1150
27ecddc2 1151 lock_policy_rwsem_write(cpu);
b8eed8af
VK
1152 if (cpufreq_driver->exit)
1153 cpufreq_driver->exit(data);
1154 unlock_policy_rwsem_write(cpu);
27ecddc2 1155
b8eed8af
VK
1156 free_cpumask_var(data->related_cpus);
1157 free_cpumask_var(data->cpus);
1158 kfree(data);
1159 } else if (cpufreq_driver->target) {
1160 __cpufreq_governor(data, CPUFREQ_GOV_START);
1161 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1162 }
1da177e4 1163
1da177e4
LT
1164 return 0;
1165}
1166
1167
8a25a2fd 1168static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1169{
8a25a2fd 1170 unsigned int cpu = dev->id;
5a01f2e8 1171 int retval;
ec28297a
VP
1172
1173 if (cpu_is_offline(cpu))
1174 return 0;
1175
5a01f2e8
VP
1176 if (unlikely(lock_policy_rwsem_write(cpu)))
1177 BUG();
1178
6954ca9c 1179 cpumask_clear_cpu(cpu, &cpufreq_online_mask);
8a25a2fd 1180 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1181 return retval;
1182}
1183
1184
65f27f38 1185static void handle_update(struct work_struct *work)
1da177e4 1186{
65f27f38
DH
1187 struct cpufreq_policy *policy =
1188 container_of(work, struct cpufreq_policy, update);
1189 unsigned int cpu = policy->cpu;
2d06d8c4 1190 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1191 cpufreq_update_policy(cpu);
1192}
1193
1194/**
1195 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1196 * @cpu: cpu number
1197 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1198 * @new_freq: CPU frequency the CPU actually runs at
1199 *
29464f28
DJ
1200 * We adjust to current frequency first, and need to clean up later.
1201 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1202 */
e08f5f5b
GS
1203static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1204 unsigned int new_freq)
1da177e4
LT
1205{
1206 struct cpufreq_freqs freqs;
1207
2d06d8c4 1208 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1209 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1210
1211 freqs.cpu = cpu;
1212 freqs.old = old_freq;
1213 freqs.new = new_freq;
1214 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1215 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1216}
1217
1218
32ee8c3e 1219/**
4ab70df4 1220 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1221 * @cpu: CPU number
1222 *
1223 * This is the last known freq, without actually getting it from the driver.
1224 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1225 */
1226unsigned int cpufreq_quick_get(unsigned int cpu)
1227{
1228 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
e08f5f5b 1229 unsigned int ret_freq = 0;
95235ca2
VP
1230
1231 if (policy) {
e08f5f5b 1232 ret_freq = policy->cur;
95235ca2
VP
1233 cpufreq_cpu_put(policy);
1234 }
1235
4d34a67d 1236 return ret_freq;
95235ca2
VP
1237}
1238EXPORT_SYMBOL(cpufreq_quick_get);
1239
3d737108
JB
1240/**
1241 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1242 * @cpu: CPU number
1243 *
1244 * Just return the max possible frequency for a given CPU.
1245 */
1246unsigned int cpufreq_quick_get_max(unsigned int cpu)
1247{
1248 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1249 unsigned int ret_freq = 0;
1250
1251 if (policy) {
1252 ret_freq = policy->max;
1253 cpufreq_cpu_put(policy);
1254 }
1255
1256 return ret_freq;
1257}
1258EXPORT_SYMBOL(cpufreq_quick_get_max);
1259
95235ca2 1260
5a01f2e8 1261static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1262{
7a6aedfa 1263 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1264 unsigned int ret_freq = 0;
1da177e4 1265
1da177e4 1266 if (!cpufreq_driver->get)
4d34a67d 1267 return ret_freq;
1da177e4 1268
e08f5f5b 1269 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1270
e08f5f5b
GS
1271 if (ret_freq && policy->cur &&
1272 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1273 /* verify no discrepancy between actual and
1274 saved value exists */
1275 if (unlikely(ret_freq != policy->cur)) {
1276 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1277 schedule_work(&policy->update);
1278 }
1279 }
1280
4d34a67d 1281 return ret_freq;
5a01f2e8 1282}
1da177e4 1283
5a01f2e8
VP
1284/**
1285 * cpufreq_get - get the current CPU frequency (in kHz)
1286 * @cpu: CPU number
1287 *
1288 * Get the CPU current (static) CPU frequency
1289 */
1290unsigned int cpufreq_get(unsigned int cpu)
1291{
1292 unsigned int ret_freq = 0;
1293 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1294
1295 if (!policy)
1296 goto out;
1297
1298 if (unlikely(lock_policy_rwsem_read(cpu)))
1299 goto out_policy;
1300
1301 ret_freq = __cpufreq_get(cpu);
1302
1303 unlock_policy_rwsem_read(cpu);
1da177e4 1304
5a01f2e8
VP
1305out_policy:
1306 cpufreq_cpu_put(policy);
1307out:
4d34a67d 1308 return ret_freq;
1da177e4
LT
1309}
1310EXPORT_SYMBOL(cpufreq_get);
1311
8a25a2fd
KS
1312static struct subsys_interface cpufreq_interface = {
1313 .name = "cpufreq",
1314 .subsys = &cpu_subsys,
1315 .add_dev = cpufreq_add_dev,
1316 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1317};
1318
1da177e4 1319
42d4dc3f 1320/**
e00e56df
RW
1321 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1322 *
1323 * This function is only executed for the boot processor. The other CPUs
1324 * have been put offline by means of CPU hotplug.
42d4dc3f 1325 */
e00e56df 1326static int cpufreq_bp_suspend(void)
42d4dc3f 1327{
e08f5f5b 1328 int ret = 0;
4bc5d341 1329
e00e56df 1330 int cpu = smp_processor_id();
42d4dc3f
BH
1331 struct cpufreq_policy *cpu_policy;
1332
2d06d8c4 1333 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1334
e00e56df 1335 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1336 cpu_policy = cpufreq_cpu_get(cpu);
1337 if (!cpu_policy)
e00e56df 1338 return 0;
42d4dc3f
BH
1339
1340 if (cpufreq_driver->suspend) {
7ca64e2d 1341 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1342 if (ret)
42d4dc3f
BH
1343 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1344 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1345 }
1346
42d4dc3f 1347 cpufreq_cpu_put(cpu_policy);
c9060494 1348 return ret;
42d4dc3f
BH
1349}
1350
1da177e4 1351/**
e00e56df 1352 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1353 *
1354 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1355 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1356 * restored. It will verify that the current freq is in sync with
1357 * what we believe it to be. This is a bit later than when it
1358 * should be, but nonethteless it's better than calling
1359 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1360 *
1361 * This function is only executed for the boot CPU. The other CPUs have not
1362 * been turned on yet.
1da177e4 1363 */
e00e56df 1364static void cpufreq_bp_resume(void)
1da177e4 1365{
e08f5f5b 1366 int ret = 0;
4bc5d341 1367
e00e56df 1368 int cpu = smp_processor_id();
1da177e4
LT
1369 struct cpufreq_policy *cpu_policy;
1370
2d06d8c4 1371 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1372
e00e56df 1373 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1374 cpu_policy = cpufreq_cpu_get(cpu);
1375 if (!cpu_policy)
e00e56df 1376 return;
1da177e4
LT
1377
1378 if (cpufreq_driver->resume) {
1379 ret = cpufreq_driver->resume(cpu_policy);
1380 if (ret) {
1381 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1382 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1383 goto fail;
1da177e4
LT
1384 }
1385 }
1386
1da177e4 1387 schedule_work(&cpu_policy->update);
ce6c3997 1388
c9060494 1389fail:
1da177e4 1390 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1391}
1392
e00e56df
RW
1393static struct syscore_ops cpufreq_syscore_ops = {
1394 .suspend = cpufreq_bp_suspend,
1395 .resume = cpufreq_bp_resume,
1da177e4
LT
1396};
1397
1398
1399/*********************************************************************
1400 * NOTIFIER LISTS INTERFACE *
1401 *********************************************************************/
1402
1403/**
1404 * cpufreq_register_notifier - register a driver with cpufreq
1405 * @nb: notifier function to register
1406 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1407 *
32ee8c3e 1408 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1409 * are notified about clock rate changes (once before and once after
1410 * the transition), or a list of drivers that are notified about
1411 * changes in cpufreq policy.
1412 *
1413 * This function may sleep, and has the same return conditions as
e041c683 1414 * blocking_notifier_chain_register.
1da177e4
LT
1415 */
1416int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1417{
1418 int ret;
1419
d5aaffa9
DB
1420 if (cpufreq_disabled())
1421 return -EINVAL;
1422
74212ca4
CEB
1423 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1424
1da177e4
LT
1425 switch (list) {
1426 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1427 ret = srcu_notifier_chain_register(
e041c683 1428 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1429 break;
1430 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1431 ret = blocking_notifier_chain_register(
1432 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1433 break;
1434 default:
1435 ret = -EINVAL;
1436 }
1da177e4
LT
1437
1438 return ret;
1439}
1440EXPORT_SYMBOL(cpufreq_register_notifier);
1441
1442
1443/**
1444 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1445 * @nb: notifier block to be unregistered
1446 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1447 *
1448 * Remove a driver from the CPU frequency notifier list.
1449 *
1450 * This function may sleep, and has the same return conditions as
e041c683 1451 * blocking_notifier_chain_unregister.
1da177e4
LT
1452 */
1453int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1454{
1455 int ret;
1456
d5aaffa9
DB
1457 if (cpufreq_disabled())
1458 return -EINVAL;
1459
1da177e4
LT
1460 switch (list) {
1461 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1462 ret = srcu_notifier_chain_unregister(
e041c683 1463 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1464 break;
1465 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1466 ret = blocking_notifier_chain_unregister(
1467 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1468 break;
1469 default:
1470 ret = -EINVAL;
1471 }
1da177e4
LT
1472
1473 return ret;
1474}
1475EXPORT_SYMBOL(cpufreq_unregister_notifier);
1476
1477
1478/*********************************************************************
1479 * GOVERNORS *
1480 *********************************************************************/
1481
1482
1483int __cpufreq_driver_target(struct cpufreq_policy *policy,
1484 unsigned int target_freq,
1485 unsigned int relation)
1486{
1487 int retval = -EINVAL;
7249924e 1488 unsigned int old_target_freq = target_freq;
c32b6b8e 1489
a7b422cd
KRW
1490 if (cpufreq_disabled())
1491 return -ENODEV;
1492
7249924e
VK
1493 /* Make sure that target_freq is within supported range */
1494 if (target_freq > policy->max)
1495 target_freq = policy->max;
1496 if (target_freq < policy->min)
1497 target_freq = policy->min;
1498
1499 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1500 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1501
1502 if (target_freq == policy->cur)
1503 return 0;
1504
1da177e4
LT
1505 if (cpu_online(policy->cpu) && cpufreq_driver->target)
1506 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1507
1da177e4
LT
1508 return retval;
1509}
1510EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1511
1da177e4
LT
1512int cpufreq_driver_target(struct cpufreq_policy *policy,
1513 unsigned int target_freq,
1514 unsigned int relation)
1515{
f1829e4a 1516 int ret = -EINVAL;
1da177e4
LT
1517
1518 policy = cpufreq_cpu_get(policy->cpu);
1519 if (!policy)
f1829e4a 1520 goto no_policy;
1da177e4 1521
5a01f2e8 1522 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1523 goto fail;
1da177e4
LT
1524
1525 ret = __cpufreq_driver_target(policy, target_freq, relation);
1526
5a01f2e8 1527 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1528
f1829e4a 1529fail:
1da177e4 1530 cpufreq_cpu_put(policy);
f1829e4a 1531no_policy:
1da177e4
LT
1532 return ret;
1533}
1534EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1535
bf0b90e3 1536int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1537{
1538 int ret = 0;
1539
d5aaffa9
DB
1540 if (cpufreq_disabled())
1541 return ret;
1542
0676f7f2
VK
1543 if (!(cpu_online(cpu) && cpufreq_driver->getavg))
1544 return 0;
1545
dfde5d62
VP
1546 policy = cpufreq_cpu_get(policy->cpu);
1547 if (!policy)
1548 return -EINVAL;
1549
0676f7f2 1550 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1551
dfde5d62
VP
1552 cpufreq_cpu_put(policy);
1553 return ret;
1554}
5a01f2e8 1555EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1556
153d7f3f 1557/*
153d7f3f
AV
1558 * when "event" is CPUFREQ_GOV_LIMITS
1559 */
1da177e4 1560
e08f5f5b
GS
1561static int __cpufreq_governor(struct cpufreq_policy *policy,
1562 unsigned int event)
1da177e4 1563{
cc993cab 1564 int ret;
6afde10c
TR
1565
1566 /* Only must be defined when default governor is known to have latency
1567 restrictions, like e.g. conservative or ondemand.
1568 That this is the case is already ensured in Kconfig
1569 */
1570#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1571 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1572#else
1573 struct cpufreq_governor *gov = NULL;
1574#endif
1c256245
TR
1575
1576 if (policy->governor->max_transition_latency &&
1577 policy->cpuinfo.transition_latency >
1578 policy->governor->max_transition_latency) {
6afde10c
TR
1579 if (!gov)
1580 return -EINVAL;
1581 else {
1582 printk(KERN_WARNING "%s governor failed, too long"
1583 " transition latency of HW, fallback"
1584 " to %s governor\n",
1585 policy->governor->name,
1586 gov->name);
1587 policy->governor = gov;
1588 }
1c256245 1589 }
1da177e4
LT
1590
1591 if (!try_module_get(policy->governor->owner))
1592 return -EINVAL;
1593
2d06d8c4 1594 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1595 policy->cpu, event);
1da177e4
LT
1596 ret = policy->governor->governor(policy, event);
1597
e08f5f5b
GS
1598 /* we keep one module reference alive for
1599 each CPU governed by this CPU */
1da177e4
LT
1600 if ((event != CPUFREQ_GOV_START) || ret)
1601 module_put(policy->governor->owner);
1602 if ((event == CPUFREQ_GOV_STOP) && !ret)
1603 module_put(policy->governor->owner);
1604
1605 return ret;
1606}
1607
1608
1da177e4
LT
1609int cpufreq_register_governor(struct cpufreq_governor *governor)
1610{
3bcb09a3 1611 int err;
1da177e4
LT
1612
1613 if (!governor)
1614 return -EINVAL;
1615
a7b422cd
KRW
1616 if (cpufreq_disabled())
1617 return -ENODEV;
1618
3fc54d37 1619 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1620
3bcb09a3
JF
1621 err = -EBUSY;
1622 if (__find_governor(governor->name) == NULL) {
1623 err = 0;
1624 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1625 }
1da177e4 1626
32ee8c3e 1627 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1628 return err;
1da177e4
LT
1629}
1630EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1631
1632
1633void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1634{
90e41bac
PB
1635#ifdef CONFIG_HOTPLUG_CPU
1636 int cpu;
1637#endif
1638
1da177e4
LT
1639 if (!governor)
1640 return;
1641
a7b422cd
KRW
1642 if (cpufreq_disabled())
1643 return;
1644
90e41bac
PB
1645#ifdef CONFIG_HOTPLUG_CPU
1646 for_each_present_cpu(cpu) {
1647 if (cpu_online(cpu))
1648 continue;
1649 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1650 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1651 }
1652#endif
1653
3fc54d37 1654 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1655 list_del(&governor->governor_list);
3fc54d37 1656 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1657 return;
1658}
1659EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1660
1661
1662
1663/*********************************************************************
1664 * POLICY INTERFACE *
1665 *********************************************************************/
1666
1667/**
1668 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1669 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1670 * is written
1da177e4
LT
1671 *
1672 * Reads the current cpufreq policy.
1673 */
1674int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1675{
1676 struct cpufreq_policy *cpu_policy;
1677 if (!policy)
1678 return -EINVAL;
1679
1680 cpu_policy = cpufreq_cpu_get(cpu);
1681 if (!cpu_policy)
1682 return -EINVAL;
1683
1da177e4 1684 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1685
1686 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1687 return 0;
1688}
1689EXPORT_SYMBOL(cpufreq_get_policy);
1690
1691
153d7f3f 1692/*
e08f5f5b
GS
1693 * data : current policy.
1694 * policy : policy to be set.
153d7f3f 1695 */
e08f5f5b
GS
1696static int __cpufreq_set_policy(struct cpufreq_policy *data,
1697 struct cpufreq_policy *policy)
1da177e4
LT
1698{
1699 int ret = 0;
1700
2d06d8c4 1701 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1702 policy->min, policy->max);
1703
e08f5f5b
GS
1704 memcpy(&policy->cpuinfo, &data->cpuinfo,
1705 sizeof(struct cpufreq_cpuinfo));
1da177e4 1706
53391fa2 1707 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1708 ret = -EINVAL;
1709 goto error_out;
1710 }
1711
1da177e4
LT
1712 /* verify the cpu speed can be set within this limit */
1713 ret = cpufreq_driver->verify(policy);
1714 if (ret)
1715 goto error_out;
1716
1da177e4 1717 /* adjust if necessary - all reasons */
e041c683
AS
1718 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1719 CPUFREQ_ADJUST, policy);
1da177e4
LT
1720
1721 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1722 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1723 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1724
1725 /* verify the cpu speed can be set within this limit,
1726 which might be different to the first one */
1727 ret = cpufreq_driver->verify(policy);
e041c683 1728 if (ret)
1da177e4 1729 goto error_out;
1da177e4
LT
1730
1731 /* notification of the new policy */
e041c683
AS
1732 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1733 CPUFREQ_NOTIFY, policy);
1da177e4 1734
7d5e350f
DJ
1735 data->min = policy->min;
1736 data->max = policy->max;
1da177e4 1737
2d06d8c4 1738 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1739 data->min, data->max);
1da177e4
LT
1740
1741 if (cpufreq_driver->setpolicy) {
1742 data->policy = policy->policy;
2d06d8c4 1743 pr_debug("setting range\n");
1da177e4
LT
1744 ret = cpufreq_driver->setpolicy(policy);
1745 } else {
1746 if (policy->governor != data->governor) {
1747 /* save old, working values */
1748 struct cpufreq_governor *old_gov = data->governor;
1749
2d06d8c4 1750 pr_debug("governor switch\n");
1da177e4
LT
1751
1752 /* end old governor */
ffe6275f 1753 if (data->governor)
1da177e4
LT
1754 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1755
1756 /* start new governor */
1757 data->governor = policy->governor;
1758 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1759 /* new governor failed, so re-start old one */
2d06d8c4 1760 pr_debug("starting governor %s failed\n",
e08f5f5b 1761 data->governor->name);
1da177e4
LT
1762 if (old_gov) {
1763 data->governor = old_gov;
e08f5f5b
GS
1764 __cpufreq_governor(data,
1765 CPUFREQ_GOV_START);
1da177e4
LT
1766 }
1767 ret = -EINVAL;
1768 goto error_out;
1769 }
1770 /* might be a policy change, too, so fall through */
1771 }
2d06d8c4 1772 pr_debug("governor: change or update limits\n");
1da177e4
LT
1773 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1774 }
1775
7d5e350f 1776error_out:
1da177e4
LT
1777 return ret;
1778}
1779
1da177e4
LT
1780/**
1781 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1782 * @cpu: CPU which shall be re-evaluated
1783 *
25985edc 1784 * Useful for policy notifiers which have different necessities
1da177e4
LT
1785 * at different times.
1786 */
1787int cpufreq_update_policy(unsigned int cpu)
1788{
1789 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1790 struct cpufreq_policy policy;
f1829e4a 1791 int ret;
1da177e4 1792
f1829e4a
JL
1793 if (!data) {
1794 ret = -ENODEV;
1795 goto no_policy;
1796 }
1da177e4 1797
f1829e4a
JL
1798 if (unlikely(lock_policy_rwsem_write(cpu))) {
1799 ret = -EINVAL;
1800 goto fail;
1801 }
1da177e4 1802
2d06d8c4 1803 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1804 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1805 policy.min = data->user_policy.min;
1806 policy.max = data->user_policy.max;
1807 policy.policy = data->user_policy.policy;
1808 policy.governor = data->user_policy.governor;
1809
0961dd0d
TR
1810 /* BIOS might change freq behind our back
1811 -> ask driver for current freq and notify governors about a change */
1812 if (cpufreq_driver->get) {
1813 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1814 if (!data->cur) {
2d06d8c4 1815 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1816 data->cur = policy.cur;
1817 } else {
1818 if (data->cur != policy.cur)
e08f5f5b
GS
1819 cpufreq_out_of_sync(cpu, data->cur,
1820 policy.cur);
a85f7bd3 1821 }
0961dd0d
TR
1822 }
1823
1da177e4
LT
1824 ret = __cpufreq_set_policy(data, &policy);
1825
5a01f2e8
VP
1826 unlock_policy_rwsem_write(cpu);
1827
f1829e4a 1828fail:
1da177e4 1829 cpufreq_cpu_put(data);
f1829e4a 1830no_policy:
1da177e4
LT
1831 return ret;
1832}
1833EXPORT_SYMBOL(cpufreq_update_policy);
1834
dd184a01 1835static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1836 unsigned long action, void *hcpu)
1837{
1838 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1839 struct device *dev;
c32b6b8e 1840
8a25a2fd
KS
1841 dev = get_cpu_device(cpu);
1842 if (dev) {
c32b6b8e
AR
1843 switch (action) {
1844 case CPU_ONLINE:
8bb78442 1845 case CPU_ONLINE_FROZEN:
8a25a2fd 1846 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1847 break;
1848 case CPU_DOWN_PREPARE:
8bb78442 1849 case CPU_DOWN_PREPARE_FROZEN:
5a01f2e8
VP
1850 if (unlikely(lock_policy_rwsem_write(cpu)))
1851 BUG();
1852
8a25a2fd 1853 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1854 break;
5a01f2e8 1855 case CPU_DOWN_FAILED:
8bb78442 1856 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1857 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1858 break;
1859 }
1860 }
1861 return NOTIFY_OK;
1862}
1863
9c36f746 1864static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1865 .notifier_call = cpufreq_cpu_callback,
1866};
1da177e4
LT
1867
1868/*********************************************************************
1869 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1870 *********************************************************************/
1871
1872/**
1873 * cpufreq_register_driver - register a CPU Frequency driver
1874 * @driver_data: A struct cpufreq_driver containing the values#
1875 * submitted by the CPU Frequency driver.
1876 *
32ee8c3e 1877 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1878 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1879 * (and isn't unregistered in the meantime).
1da177e4
LT
1880 *
1881 */
221dee28 1882int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1883{
1884 unsigned long flags;
1885 int ret;
1886
a7b422cd
KRW
1887 if (cpufreq_disabled())
1888 return -ENODEV;
1889
1da177e4
LT
1890 if (!driver_data || !driver_data->verify || !driver_data->init ||
1891 ((!driver_data->setpolicy) && (!driver_data->target)))
1892 return -EINVAL;
1893
2d06d8c4 1894 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1895
1896 if (driver_data->setpolicy)
1897 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1898
1899 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1900 if (cpufreq_driver) {
1901 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1902 return -EBUSY;
1903 }
1904 cpufreq_driver = driver_data;
1905 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1906
6954ca9c
VK
1907 cpumask_setall(&cpufreq_online_mask);
1908
8a25a2fd 1909 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1910 if (ret)
1911 goto err_null_driver;
1da177e4 1912
8f5bc2ab 1913 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1914 int i;
1915 ret = -ENODEV;
1916
1917 /* check for at least one working CPU */
7a6aedfa
MT
1918 for (i = 0; i < nr_cpu_ids; i++)
1919 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1920 ret = 0;
7a6aedfa
MT
1921 break;
1922 }
1da177e4
LT
1923
1924 /* if all ->init() calls failed, unregister */
1925 if (ret) {
2d06d8c4 1926 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1927 driver_data->name);
8a25a2fd 1928 goto err_if_unreg;
1da177e4
LT
1929 }
1930 }
1931
8f5bc2ab 1932 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1933 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1934
8f5bc2ab 1935 return 0;
8a25a2fd
KS
1936err_if_unreg:
1937 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab
JS
1938err_null_driver:
1939 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1940 cpufreq_driver = NULL;
1941 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1942 return ret;
1da177e4
LT
1943}
1944EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1945
1946
1947/**
1948 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1949 *
32ee8c3e 1950 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1951 * the right to do so, i.e. if you have succeeded in initialising before!
1952 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1953 * currently not initialised.
1954 */
221dee28 1955int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1956{
1957 unsigned long flags;
1958
2d06d8c4 1959 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 1960 return -EINVAL;
1da177e4 1961
2d06d8c4 1962 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 1963
8a25a2fd 1964 subsys_interface_unregister(&cpufreq_interface);
65edc68c 1965 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1966
1967 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1968 cpufreq_driver = NULL;
1969 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1970
1971 return 0;
1972}
1973EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1974
1975static int __init cpufreq_core_init(void)
1976{
1977 int cpu;
1978
a7b422cd
KRW
1979 if (cpufreq_disabled())
1980 return -ENODEV;
1981
5a01f2e8 1982 for_each_possible_cpu(cpu) {
f1625066 1983 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1984 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1985 }
8aa84ad8 1986
8a25a2fd 1987 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 1988 BUG_ON(!cpufreq_global_kobject);
e00e56df 1989 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 1990
5a01f2e8
VP
1991 return 0;
1992}
5a01f2e8 1993core_initcall(cpufreq_core_init);
This page took 0.697762 seconds and 5 git commands to generate.