cpufreq: MAINTAINERS: Add co-maintainer
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
5800043b 42static struct cpufreq_driver __rcu *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
0d1857a1 48static DEFINE_RWLOCK(cpufreq_driver_lock);
1da177e4 49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
5a01f2e8
VP
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
f1625066 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 71static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 72{ \
f1625066 73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
76 \
77 return 0; \
78}
79
80lock_policy_rwsem(read, cpu);
5a01f2e8 81lock_policy_rwsem(write, cpu);
5a01f2e8 82
fa1d8af4
VK
83#define unlock_policy_rwsem(mode, cpu) \
84static void unlock_policy_rwsem_##mode(int cpu) \
85{ \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 89}
5a01f2e8 90
fa1d8af4
VK
91unlock_policy_rwsem(read, cpu);
92unlock_policy_rwsem(write, cpu);
5a01f2e8 93
1da177e4 94/* internal prototypes */
29464f28
DJ
95static int __cpufreq_governor(struct cpufreq_policy *policy,
96 unsigned int event);
5a01f2e8 97static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 98static void handle_update(struct work_struct *work);
1da177e4
LT
99
100/**
32ee8c3e
DJ
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
1da177e4
LT
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
106 */
e041c683 107static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 108static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 109
74212ca4 110static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
111static int __init init_cpufreq_transition_notifier_list(void)
112{
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 114 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
115 return 0;
116}
b3438f82 117pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 118
a7b422cd 119static int off __read_mostly;
da584455 120static int cpufreq_disabled(void)
a7b422cd
KRW
121{
122 return off;
123}
124void disable_cpufreq(void)
125{
126 off = 1;
127}
1da177e4 128static LIST_HEAD(cpufreq_governor_list);
29464f28 129static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 130
4d5dcc42
VK
131bool have_governor_per_policy(void)
132{
5800043b
NZ
133 bool have_governor_per_policy;
134 rcu_read_lock();
135 have_governor_per_policy =
136 rcu_dereference(cpufreq_driver)->have_governor_per_policy;
137 rcu_read_unlock();
138 return have_governor_per_policy;
4d5dcc42
VK
139}
140
a9144436 141static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
142{
143 struct cpufreq_policy *data;
5800043b 144 struct cpufreq_driver *driver;
1da177e4
LT
145 unsigned long flags;
146
7a6aedfa 147 if (cpu >= nr_cpu_ids)
1da177e4
LT
148 goto err_out;
149
150 /* get the cpufreq driver */
5800043b
NZ
151 rcu_read_lock();
152 driver = rcu_dereference(cpufreq_driver);
1da177e4 153
5800043b 154 if (!driver)
1da177e4
LT
155 goto err_out_unlock;
156
5800043b 157 if (!try_module_get(driver->owner))
1da177e4
LT
158 goto err_out_unlock;
159
5800043b 160 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4
LT
161
162 /* get the CPU */
7a6aedfa 163 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
164
165 if (!data)
166 goto err_out_put_module;
167
a9144436 168 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
169 goto err_out_put_module;
170
0d1857a1 171 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b 172 rcu_read_unlock();
1da177e4
LT
173 return data;
174
7d5e350f 175err_out_put_module:
5800043b 176 module_put(driver->owner);
0d1857a1 177 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b
NZ
178err_out_unlock:
179 rcu_read_unlock();
7d5e350f 180err_out:
1da177e4
LT
181 return NULL;
182}
a9144436
SB
183
184struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
185{
d5aaffa9
DB
186 if (cpufreq_disabled())
187 return NULL;
188
a9144436
SB
189 return __cpufreq_cpu_get(cpu, false);
190}
1da177e4
LT
191EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
192
a9144436
SB
193static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
194{
195 return __cpufreq_cpu_get(cpu, true);
196}
197
198static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
199{
200 if (!sysfs)
201 kobject_put(&data->kobj);
5800043b
NZ
202 rcu_read_lock();
203 module_put(rcu_dereference(cpufreq_driver)->owner);
204 rcu_read_unlock();
a9144436 205}
7d5e350f 206
1da177e4
LT
207void cpufreq_cpu_put(struct cpufreq_policy *data)
208{
d5aaffa9
DB
209 if (cpufreq_disabled())
210 return;
211
a9144436 212 __cpufreq_cpu_put(data, false);
1da177e4
LT
213}
214EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
215
a9144436
SB
216static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
217{
218 __cpufreq_cpu_put(data, true);
219}
1da177e4 220
1da177e4
LT
221/*********************************************************************
222 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
223 *********************************************************************/
224
225/**
226 * adjust_jiffies - adjust the system "loops_per_jiffy"
227 *
228 * This function alters the system "loops_per_jiffy" for the clock
229 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 230 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
231 * per-CPU loops_per_jiffy value wherever possible.
232 */
233#ifndef CONFIG_SMP
234static unsigned long l_p_j_ref;
235static unsigned int l_p_j_ref_freq;
236
858119e1 237static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
238{
239 if (ci->flags & CPUFREQ_CONST_LOOPS)
240 return;
241
242 if (!l_p_j_ref_freq) {
243 l_p_j_ref = loops_per_jiffy;
244 l_p_j_ref_freq = ci->old;
2d06d8c4 245 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 246 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 247 }
d08de0c1 248 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 249 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
250 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
251 ci->new);
2d06d8c4 252 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 253 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
254 }
255}
256#else
e08f5f5b
GS
257static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
258{
259 return;
260}
1da177e4
LT
261#endif
262
263
b43a7ffb
VK
264void __cpufreq_notify_transition(struct cpufreq_policy *policy,
265 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
266{
267 BUG_ON(irqs_disabled());
268
d5aaffa9
DB
269 if (cpufreq_disabled())
270 return;
271
5800043b
NZ
272 rcu_read_lock();
273 freqs->flags = rcu_dereference(cpufreq_driver)->flags;
274 rcu_read_unlock();
2d06d8c4 275 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 276 state, freqs->new);
1da177e4 277
1da177e4 278 switch (state) {
e4472cb3 279
1da177e4 280 case CPUFREQ_PRECHANGE:
32ee8c3e 281 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
282 * which is not equal to what the cpufreq core thinks is
283 * "old frequency".
1da177e4 284 */
5800043b 285 if (!(freqs->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
286 if ((policy) && (policy->cpu == freqs->cpu) &&
287 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 288 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
289 " %u, cpufreq assumed %u kHz.\n",
290 freqs->old, policy->cur);
291 freqs->old = policy->cur;
1da177e4
LT
292 }
293 }
b4dfdbb3 294 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 295 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
296 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
297 break;
e4472cb3 298
1da177e4
LT
299 case CPUFREQ_POSTCHANGE:
300 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 301 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 302 (unsigned long)freqs->cpu);
25e41933 303 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 304 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 305 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
306 if (likely(policy) && likely(policy->cpu == freqs->cpu))
307 policy->cur = freqs->new;
1da177e4
LT
308 break;
309 }
1da177e4 310}
b43a7ffb
VK
311/**
312 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
313 * on frequency transition.
314 *
315 * This function calls the transition notifiers and the "adjust_jiffies"
316 * function. It is called twice on all CPU frequency changes that have
317 * external effects.
318 */
319void cpufreq_notify_transition(struct cpufreq_policy *policy,
320 struct cpufreq_freqs *freqs, unsigned int state)
321{
322 for_each_cpu(freqs->cpu, policy->cpus)
323 __cpufreq_notify_transition(policy, freqs, state);
324}
1da177e4
LT
325EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
326
327
328
329/*********************************************************************
330 * SYSFS INTERFACE *
331 *********************************************************************/
332
3bcb09a3
JF
333static struct cpufreq_governor *__find_governor(const char *str_governor)
334{
335 struct cpufreq_governor *t;
336
337 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 338 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
339 return t;
340
341 return NULL;
342}
343
1da177e4
LT
344/**
345 * cpufreq_parse_governor - parse a governor string
346 */
905d77cd 347static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
348 struct cpufreq_governor **governor)
349{
3bcb09a3 350 int err = -EINVAL;
5800043b
NZ
351 struct cpufreq_driver *driver;
352 bool has_setpolicy;
353 bool has_target;
354
355 rcu_read_lock();
356 driver = rcu_dereference(cpufreq_driver);
357 if (!driver) {
358 rcu_read_unlock();
3bcb09a3 359 goto out;
5800043b
NZ
360 }
361 has_setpolicy = driver->setpolicy ? true : false;
362 has_target = driver->target ? true : false;
363 rcu_read_unlock();
3bcb09a3 364
5800043b 365 if (has_setpolicy) {
1da177e4
LT
366 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
367 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 368 err = 0;
e08f5f5b
GS
369 } else if (!strnicmp(str_governor, "powersave",
370 CPUFREQ_NAME_LEN)) {
1da177e4 371 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 372 err = 0;
1da177e4 373 }
5800043b 374 } else if (has_target) {
1da177e4 375 struct cpufreq_governor *t;
3bcb09a3 376
3fc54d37 377 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
378
379 t = __find_governor(str_governor);
380
ea714970 381 if (t == NULL) {
1a8e1463 382 int ret;
ea714970 383
1a8e1463
KC
384 mutex_unlock(&cpufreq_governor_mutex);
385 ret = request_module("cpufreq_%s", str_governor);
386 mutex_lock(&cpufreq_governor_mutex);
ea714970 387
1a8e1463
KC
388 if (ret == 0)
389 t = __find_governor(str_governor);
ea714970
JF
390 }
391
3bcb09a3
JF
392 if (t != NULL) {
393 *governor = t;
394 err = 0;
1da177e4 395 }
3bcb09a3 396
3fc54d37 397 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 398 }
29464f28 399out:
3bcb09a3 400 return err;
1da177e4 401}
1da177e4
LT
402
403
1da177e4 404/**
e08f5f5b
GS
405 * cpufreq_per_cpu_attr_read() / show_##file_name() -
406 * print out cpufreq information
1da177e4
LT
407 *
408 * Write out information from cpufreq_driver->policy[cpu]; object must be
409 * "unsigned int".
410 */
411
32ee8c3e
DJ
412#define show_one(file_name, object) \
413static ssize_t show_##file_name \
905d77cd 414(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 415{ \
29464f28 416 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
417}
418
419show_one(cpuinfo_min_freq, cpuinfo.min_freq);
420show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 421show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
422show_one(scaling_min_freq, min);
423show_one(scaling_max_freq, max);
424show_one(scaling_cur_freq, cur);
425
e08f5f5b
GS
426static int __cpufreq_set_policy(struct cpufreq_policy *data,
427 struct cpufreq_policy *policy);
7970e08b 428
1da177e4
LT
429/**
430 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
431 */
432#define store_one(file_name, object) \
433static ssize_t store_##file_name \
905d77cd 434(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 435{ \
f55c9c26 436 unsigned int ret; \
1da177e4
LT
437 struct cpufreq_policy new_policy; \
438 \
439 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
440 if (ret) \
441 return -EINVAL; \
442 \
29464f28 443 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
444 if (ret != 1) \
445 return -EINVAL; \
446 \
7970e08b
TR
447 ret = __cpufreq_set_policy(policy, &new_policy); \
448 policy->user_policy.object = policy->object; \
1da177e4
LT
449 \
450 return ret ? ret : count; \
451}
452
29464f28
DJ
453store_one(scaling_min_freq, min);
454store_one(scaling_max_freq, max);
1da177e4
LT
455
456/**
457 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
458 */
905d77cd
DJ
459static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
460 char *buf)
1da177e4 461{
5a01f2e8 462 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
463 if (!cur_freq)
464 return sprintf(buf, "<unknown>");
465 return sprintf(buf, "%u\n", cur_freq);
466}
467
468
469/**
470 * show_scaling_governor - show the current policy for the specified CPU
471 */
905d77cd 472static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 473{
29464f28 474 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
475 return sprintf(buf, "powersave\n");
476 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
477 return sprintf(buf, "performance\n");
478 else if (policy->governor)
4b972f0b 479 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 480 policy->governor->name);
1da177e4
LT
481 return -EINVAL;
482}
483
484
485/**
486 * store_scaling_governor - store policy for the specified CPU
487 */
905d77cd
DJ
488static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
489 const char *buf, size_t count)
1da177e4 490{
f55c9c26 491 unsigned int ret;
1da177e4
LT
492 char str_governor[16];
493 struct cpufreq_policy new_policy;
494
495 ret = cpufreq_get_policy(&new_policy, policy->cpu);
496 if (ret)
497 return ret;
498
29464f28 499 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
500 if (ret != 1)
501 return -EINVAL;
502
e08f5f5b
GS
503 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
504 &new_policy.governor))
1da177e4
LT
505 return -EINVAL;
506
7970e08b
TR
507 /* Do not use cpufreq_set_policy here or the user_policy.max
508 will be wrongly overridden */
7970e08b
TR
509 ret = __cpufreq_set_policy(policy, &new_policy);
510
511 policy->user_policy.policy = policy->policy;
512 policy->user_policy.governor = policy->governor;
7970e08b 513
e08f5f5b
GS
514 if (ret)
515 return ret;
516 else
517 return count;
1da177e4
LT
518}
519
520/**
521 * show_scaling_driver - show the cpufreq driver currently loaded
522 */
905d77cd 523static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 524{
5800043b
NZ
525 ssize_t size;
526 rcu_read_lock();
527 size = scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
528 rcu_dereference(cpufreq_driver)->name);
529 rcu_read_unlock();
530 return size;
1da177e4
LT
531}
532
533/**
534 * show_scaling_available_governors - show the available CPUfreq governors
535 */
905d77cd
DJ
536static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
537 char *buf)
1da177e4
LT
538{
539 ssize_t i = 0;
540 struct cpufreq_governor *t;
541
5800043b
NZ
542 rcu_read_lock();
543 if (!rcu_dereference(cpufreq_driver)->target) {
544 rcu_read_unlock();
1da177e4
LT
545 i += sprintf(buf, "performance powersave");
546 goto out;
547 }
5800043b 548 rcu_read_unlock();
1da177e4
LT
549
550 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
551 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
552 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 553 goto out;
4b972f0b 554 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 555 }
7d5e350f 556out:
1da177e4
LT
557 i += sprintf(&buf[i], "\n");
558 return i;
559}
e8628dd0 560
835481d9 561static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
562{
563 ssize_t i = 0;
564 unsigned int cpu;
565
835481d9 566 for_each_cpu(cpu, mask) {
1da177e4
LT
567 if (i)
568 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
569 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
570 if (i >= (PAGE_SIZE - 5))
29464f28 571 break;
1da177e4
LT
572 }
573 i += sprintf(&buf[i], "\n");
574 return i;
575}
576
e8628dd0
DW
577/**
578 * show_related_cpus - show the CPUs affected by each transition even if
579 * hw coordination is in use
580 */
581static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
582{
e8628dd0
DW
583 return show_cpus(policy->related_cpus, buf);
584}
585
586/**
587 * show_affected_cpus - show the CPUs affected by each transition
588 */
589static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
590{
591 return show_cpus(policy->cpus, buf);
592}
593
9e76988e 594static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 595 const char *buf, size_t count)
9e76988e
VP
596{
597 unsigned int freq = 0;
598 unsigned int ret;
599
879000f9 600 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
601 return -EINVAL;
602
603 ret = sscanf(buf, "%u", &freq);
604 if (ret != 1)
605 return -EINVAL;
606
607 policy->governor->store_setspeed(policy, freq);
608
609 return count;
610}
611
612static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
613{
879000f9 614 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
615 return sprintf(buf, "<unsupported>\n");
616
617 return policy->governor->show_setspeed(policy, buf);
618}
1da177e4 619
e2f74f35 620/**
8bf1ac72 621 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
622 */
623static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
624{
625 unsigned int limit;
5800043b 626 int (*bios_limit)(int cpu, unsigned int *limit);
e2f74f35 627 int ret;
5800043b
NZ
628
629 rcu_read_lock();
630 bios_limit = rcu_dereference(cpufreq_driver)->bios_limit;
631 rcu_read_unlock();
632
633 if (bios_limit) {
634 ret = bios_limit(policy->cpu, &limit);
e2f74f35
TR
635 if (!ret)
636 return sprintf(buf, "%u\n", limit);
637 }
638 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
639}
640
6dad2a29
BP
641cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
642cpufreq_freq_attr_ro(cpuinfo_min_freq);
643cpufreq_freq_attr_ro(cpuinfo_max_freq);
644cpufreq_freq_attr_ro(cpuinfo_transition_latency);
645cpufreq_freq_attr_ro(scaling_available_governors);
646cpufreq_freq_attr_ro(scaling_driver);
647cpufreq_freq_attr_ro(scaling_cur_freq);
648cpufreq_freq_attr_ro(bios_limit);
649cpufreq_freq_attr_ro(related_cpus);
650cpufreq_freq_attr_ro(affected_cpus);
651cpufreq_freq_attr_rw(scaling_min_freq);
652cpufreq_freq_attr_rw(scaling_max_freq);
653cpufreq_freq_attr_rw(scaling_governor);
654cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 655
905d77cd 656static struct attribute *default_attrs[] = {
1da177e4
LT
657 &cpuinfo_min_freq.attr,
658 &cpuinfo_max_freq.attr,
ed129784 659 &cpuinfo_transition_latency.attr,
1da177e4
LT
660 &scaling_min_freq.attr,
661 &scaling_max_freq.attr,
662 &affected_cpus.attr,
e8628dd0 663 &related_cpus.attr,
1da177e4
LT
664 &scaling_governor.attr,
665 &scaling_driver.attr,
666 &scaling_available_governors.attr,
9e76988e 667 &scaling_setspeed.attr,
1da177e4
LT
668 NULL
669};
670
8aa84ad8
TR
671struct kobject *cpufreq_global_kobject;
672EXPORT_SYMBOL(cpufreq_global_kobject);
673
29464f28
DJ
674#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
675#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 676
29464f28 677static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 678{
905d77cd
DJ
679 struct cpufreq_policy *policy = to_policy(kobj);
680 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 681 ssize_t ret = -EINVAL;
a9144436 682 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 683 if (!policy)
0db4a8a9 684 goto no_policy;
5a01f2e8
VP
685
686 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 687 goto fail;
5a01f2e8 688
e08f5f5b
GS
689 if (fattr->show)
690 ret = fattr->show(policy, buf);
691 else
692 ret = -EIO;
693
5a01f2e8 694 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 695fail:
a9144436 696 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 697no_policy:
1da177e4
LT
698 return ret;
699}
700
905d77cd
DJ
701static ssize_t store(struct kobject *kobj, struct attribute *attr,
702 const char *buf, size_t count)
1da177e4 703{
905d77cd
DJ
704 struct cpufreq_policy *policy = to_policy(kobj);
705 struct freq_attr *fattr = to_attr(attr);
a07530b4 706 ssize_t ret = -EINVAL;
a9144436 707 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 708 if (!policy)
a07530b4 709 goto no_policy;
5a01f2e8
VP
710
711 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 712 goto fail;
5a01f2e8 713
e08f5f5b
GS
714 if (fattr->store)
715 ret = fattr->store(policy, buf, count);
716 else
717 ret = -EIO;
718
5a01f2e8 719 unlock_policy_rwsem_write(policy->cpu);
a07530b4 720fail:
a9144436 721 cpufreq_cpu_put_sysfs(policy);
a07530b4 722no_policy:
1da177e4
LT
723 return ret;
724}
725
905d77cd 726static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 727{
905d77cd 728 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 729 pr_debug("last reference is dropped\n");
1da177e4
LT
730 complete(&policy->kobj_unregister);
731}
732
52cf25d0 733static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
734 .show = show,
735 .store = store,
736};
737
738static struct kobj_type ktype_cpufreq = {
739 .sysfs_ops = &sysfs_ops,
740 .default_attrs = default_attrs,
741 .release = cpufreq_sysfs_release,
742};
743
19d6f7ec 744/* symlink affected CPUs */
cf3289d0
AC
745static int cpufreq_add_dev_symlink(unsigned int cpu,
746 struct cpufreq_policy *policy)
19d6f7ec
DJ
747{
748 unsigned int j;
749 int ret = 0;
750
751 for_each_cpu(j, policy->cpus) {
752 struct cpufreq_policy *managed_policy;
8a25a2fd 753 struct device *cpu_dev;
19d6f7ec
DJ
754
755 if (j == cpu)
756 continue;
19d6f7ec 757
2d06d8c4 758 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 759 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
760 cpu_dev = get_cpu_device(j);
761 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
762 "cpufreq");
763 if (ret) {
764 cpufreq_cpu_put(managed_policy);
765 return ret;
766 }
767 }
768 return ret;
769}
770
cf3289d0
AC
771static int cpufreq_add_dev_interface(unsigned int cpu,
772 struct cpufreq_policy *policy,
8a25a2fd 773 struct device *dev)
909a694e 774{
ecf7e461 775 struct cpufreq_policy new_policy;
909a694e 776 struct freq_attr **drv_attr;
5800043b 777 struct cpufreq_driver *driver;
909a694e
DJ
778 unsigned long flags;
779 int ret = 0;
780 unsigned int j;
781
782 /* prepare interface data */
783 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 784 &dev->kobj, "cpufreq");
909a694e
DJ
785 if (ret)
786 return ret;
787
788 /* set up files for this cpu device */
5800043b
NZ
789 rcu_read_lock();
790 driver = rcu_dereference(cpufreq_driver);
791 drv_attr = driver->attr;
909a694e
DJ
792 while ((drv_attr) && (*drv_attr)) {
793 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
794 if (ret)
5800043b 795 goto err_out_unlock;
909a694e
DJ
796 drv_attr++;
797 }
5800043b 798 if (driver->get) {
909a694e
DJ
799 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
800 if (ret)
5800043b 801 goto err_out_unlock;
909a694e 802 }
5800043b 803 if (driver->target) {
909a694e
DJ
804 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
805 if (ret)
5800043b 806 goto err_out_unlock;
909a694e 807 }
5800043b 808 if (driver->bios_limit) {
e2f74f35
TR
809 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
810 if (ret)
5800043b 811 goto err_out_unlock;
e2f74f35 812 }
5800043b 813 rcu_read_unlock();
909a694e 814
0d1857a1 815 write_lock_irqsave(&cpufreq_driver_lock, flags);
909a694e 816 for_each_cpu(j, policy->cpus) {
909a694e 817 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 818 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e 819 }
0d1857a1 820 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
909a694e
DJ
821
822 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
823 if (ret)
824 goto err_out_kobj_put;
825
826 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
827 /* assure that the starting sequence is run in __cpufreq_set_policy */
828 policy->governor = NULL;
829
830 /* set default policy */
831 ret = __cpufreq_set_policy(policy, &new_policy);
832 policy->user_policy.policy = policy->policy;
833 policy->user_policy.governor = policy->governor;
834
835 if (ret) {
5800043b
NZ
836 int (*exit)(struct cpufreq_policy *policy);
837
2d06d8c4 838 pr_debug("setting policy failed\n");
5800043b
NZ
839 rcu_read_lock();
840 exit = rcu_dereference(cpufreq_driver)->exit;
841 rcu_read_unlock();
842 if (exit)
843 exit(policy);
844
ecf7e461 845 }
909a694e
DJ
846 return ret;
847
5800043b
NZ
848err_out_unlock:
849 rcu_read_unlock();
909a694e
DJ
850err_out_kobj_put:
851 kobject_put(&policy->kobj);
852 wait_for_completion(&policy->kobj_unregister);
853 return ret;
854}
855
fcf80582
VK
856#ifdef CONFIG_HOTPLUG_CPU
857static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
858 struct device *dev)
859{
860 struct cpufreq_policy *policy;
820c6ca2 861 int ret = 0, has_target = 0;
fcf80582
VK
862 unsigned long flags;
863
864 policy = cpufreq_cpu_get(sibling);
865 WARN_ON(!policy);
866
820c6ca2
VK
867 rcu_read_lock();
868 has_target = !!rcu_dereference(cpufreq_driver)->target;
869 rcu_read_unlock();
870
871 if (has_target)
872 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
fcf80582 873
2eaa3e2d
VK
874 lock_policy_rwsem_write(sibling);
875
0d1857a1 876 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 877
fcf80582 878 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 879 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 880 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 881 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 882
2eaa3e2d
VK
883 unlock_policy_rwsem_write(sibling);
884
820c6ca2
VK
885 if (has_target) {
886 __cpufreq_governor(policy, CPUFREQ_GOV_START);
887 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
888 }
fcf80582 889
fcf80582
VK
890 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
891 if (ret) {
892 cpufreq_cpu_put(policy);
893 return ret;
894 }
895
896 return 0;
897}
898#endif
1da177e4
LT
899
900/**
901 * cpufreq_add_dev - add a CPU device
902 *
32ee8c3e 903 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
904 *
905 * The Oracle says: try running cpufreq registration/unregistration concurrently
906 * with with cpu hotplugging and all hell will break loose. Tried to clean this
907 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 908 */
8a25a2fd 909static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 910{
fcf80582 911 unsigned int j, cpu = dev->id;
65922465 912 int ret = -ENOMEM;
1da177e4 913 struct cpufreq_policy *policy;
5800043b
NZ
914 struct cpufreq_driver *driver;
915 int (*init)(struct cpufreq_policy *policy);
1da177e4 916 unsigned long flags;
90e41bac 917#ifdef CONFIG_HOTPLUG_CPU
fcf80582 918 struct cpufreq_governor *gov;
90e41bac
PB
919 int sibling;
920#endif
1da177e4 921
c32b6b8e
AR
922 if (cpu_is_offline(cpu))
923 return 0;
924
2d06d8c4 925 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
926
927#ifdef CONFIG_SMP
928 /* check whether a different CPU already registered this
929 * CPU because it is in the same boat. */
930 policy = cpufreq_cpu_get(cpu);
931 if (unlikely(policy)) {
8ff69732 932 cpufreq_cpu_put(policy);
1da177e4
LT
933 return 0;
934 }
fcf80582
VK
935
936#ifdef CONFIG_HOTPLUG_CPU
937 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 938 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
939 for_each_online_cpu(sibling) {
940 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 941 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 942 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 943 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 944 }
fcf80582 945 }
0d1857a1 946 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 947#endif
1da177e4
LT
948#endif
949
5800043b
NZ
950 rcu_read_lock();
951 driver = rcu_dereference(cpufreq_driver);
952 if (!try_module_get(driver->owner)) {
953 rcu_read_unlock();
1da177e4
LT
954 ret = -EINVAL;
955 goto module_out;
956 }
5800043b
NZ
957 init = driver->init;
958 rcu_read_unlock();
1da177e4 959
e98df50c 960 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 961 if (!policy)
1da177e4 962 goto nomem_out;
059019a3
DJ
963
964 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 965 goto err_free_policy;
059019a3
DJ
966
967 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 968 goto err_free_cpumask;
1da177e4
LT
969
970 policy->cpu = cpu;
65922465 971 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 972 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 973
5a01f2e8 974 /* Initially set CPU itself as the policy_cpu */
f1625066 975 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 976
1da177e4 977 init_completion(&policy->kobj_unregister);
65f27f38 978 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
979
980 /* call driver. From then on the cpufreq must be able
981 * to accept all calls to ->verify and ->setpolicy for this CPU
982 */
5800043b 983 ret = init(policy);
1da177e4 984 if (ret) {
2d06d8c4 985 pr_debug("initialization failed\n");
2eaa3e2d 986 goto err_set_policy_cpu;
1da177e4 987 }
643ae6e8 988
fcf80582
VK
989 /* related cpus should atleast have policy->cpus */
990 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
991
643ae6e8
VK
992 /*
993 * affected cpus must always be the one, which are online. We aren't
994 * managing offline cpus here.
995 */
996 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
997
187d9f4e
MC
998 policy->user_policy.min = policy->min;
999 policy->user_policy.max = policy->max;
1da177e4 1000
a1531acd
TR
1001 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1002 CPUFREQ_START, policy);
1003
fcf80582
VK
1004#ifdef CONFIG_HOTPLUG_CPU
1005 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1006 if (gov) {
1007 policy->governor = gov;
1008 pr_debug("Restoring governor %s for cpu %d\n",
1009 policy->governor->name, cpu);
4bfa042c 1010 }
fcf80582 1011#endif
1da177e4 1012
8a25a2fd 1013 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
1014 if (ret)
1015 goto err_out_unregister;
8ff69732 1016
038c5b3e 1017 kobject_uevent(&policy->kobj, KOBJ_ADD);
5800043b
NZ
1018 rcu_read_lock();
1019 module_put(rcu_dereference(cpufreq_driver)->owner);
1020 rcu_read_unlock();
2d06d8c4 1021 pr_debug("initialization complete\n");
87c32271 1022
1da177e4
LT
1023 return 0;
1024
1da177e4 1025err_out_unregister:
0d1857a1 1026 write_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 1027 for_each_cpu(j, policy->cpus)
7a6aedfa 1028 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1029 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1030
c10997f6 1031 kobject_put(&policy->kobj);
1da177e4
LT
1032 wait_for_completion(&policy->kobj_unregister);
1033
2eaa3e2d
VK
1034err_set_policy_cpu:
1035 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 1036 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
1037err_free_cpumask:
1038 free_cpumask_var(policy->cpus);
1039err_free_policy:
1da177e4 1040 kfree(policy);
1da177e4 1041nomem_out:
5800043b
NZ
1042 rcu_read_lock();
1043 module_put(rcu_dereference(cpufreq_driver)->owner);
1044 rcu_read_unlock();
c32b6b8e 1045module_out:
1da177e4
LT
1046 return ret;
1047}
1048
b8eed8af
VK
1049static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1050{
1051 int j;
1052
1053 policy->last_cpu = policy->cpu;
1054 policy->cpu = cpu;
1055
3361b7b1 1056 for_each_cpu(j, policy->cpus)
b8eed8af 1057 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
1058
1059#ifdef CONFIG_CPU_FREQ_TABLE
1060 cpufreq_frequency_table_update_policy_cpu(policy);
1061#endif
1062 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1063 CPUFREQ_UPDATE_POLICY_CPU, policy);
1064}
1da177e4
LT
1065
1066/**
5a01f2e8 1067 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1068 *
1069 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1070 * Caller should already have policy_rwsem in write mode for this CPU.
1071 * This routine frees the rwsem before returning.
1da177e4 1072 */
8a25a2fd 1073static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1074{
b8eed8af 1075 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1076 unsigned long flags;
1077 struct cpufreq_policy *data;
5800043b 1078 struct cpufreq_driver *driver;
499bca9b
AW
1079 struct kobject *kobj;
1080 struct completion *cmp;
8a25a2fd 1081 struct device *cpu_dev;
5800043b
NZ
1082 bool has_target;
1083 int (*exit)(struct cpufreq_policy *policy);
1da177e4 1084
b8eed8af 1085 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1086
0d1857a1 1087 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1088
7a6aedfa 1089 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1090 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1091
0d1857a1 1092 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1093
1094 if (!data) {
b8eed8af 1095 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1096 return -EINVAL;
1097 }
1da177e4 1098
5800043b
NZ
1099 rcu_read_lock();
1100 driver = rcu_dereference(cpufreq_driver);
1101 has_target = driver->target ? true : false;
1102 exit = driver->exit;
1103 if (has_target)
f6a7409c 1104 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1105
084f3493 1106#ifdef CONFIG_HOTPLUG_CPU
5800043b 1107 if (!driver->setpolicy)
fa69e33f
DB
1108 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1109 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4 1110#endif
5800043b 1111 rcu_read_unlock();
1da177e4 1112
2eaa3e2d 1113 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1114 cpus = cpumask_weight(data->cpus);
e4969eba
VK
1115
1116 if (cpus > 1)
1117 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1118 unlock_policy_rwsem_write(cpu);
084f3493 1119
73bf0fc2
VK
1120 if (cpu != data->cpu) {
1121 sysfs_remove_link(&dev->kobj, "cpufreq");
1122 } else if (cpus > 1) {
b8eed8af
VK
1123 /* first sibling now owns the new sysfs dir */
1124 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1125 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1126 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1127 if (ret) {
1128 pr_err("%s: Failed to move kobj: %d", __func__, ret);
084f3493 1129
2eaa3e2d 1130 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1131 cpumask_set_cpu(cpu, data->cpus);
1da177e4 1132
0d1857a1 1133 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1134 per_cpu(cpufreq_cpu_data, cpu) = data;
0d1857a1 1135 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1136
499bca9b 1137 unlock_policy_rwsem_write(cpu);
1da177e4 1138
2eaa3e2d
VK
1139 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1140 "cpufreq");
b8eed8af 1141 return -EINVAL;
1da177e4 1142 }
5a01f2e8 1143
2eaa3e2d 1144 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1145 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1146 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1147 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1148 __func__, cpu_dev->id, cpu);
1da177e4 1149 }
1da177e4 1150
b8eed8af
VK
1151 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1152 cpufreq_cpu_put(data);
1da177e4 1153
b8eed8af
VK
1154 /* If cpu is last user of policy, free policy */
1155 if (cpus == 1) {
820c6ca2
VK
1156 if (has_target)
1157 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
7bd353a9 1158
2eaa3e2d 1159 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1160 kobj = &data->kobj;
1161 cmp = &data->kobj_unregister;
2eaa3e2d 1162 unlock_policy_rwsem_read(cpu);
b8eed8af 1163 kobject_put(kobj);
7d26e2d5 1164
b8eed8af
VK
1165 /* we need to make sure that the underlying kobj is actually
1166 * not referenced anymore by anybody before we proceed with
1167 * unloading.
1168 */
1169 pr_debug("waiting for dropping of refcount\n");
1170 wait_for_completion(cmp);
1171 pr_debug("wait complete\n");
7d26e2d5 1172
5800043b
NZ
1173 if (exit)
1174 exit(data);
27ecddc2 1175
b8eed8af
VK
1176 free_cpumask_var(data->related_cpus);
1177 free_cpumask_var(data->cpus);
1178 kfree(data);
5800043b 1179 } else if (has_target) {
b8eed8af
VK
1180 __cpufreq_governor(data, CPUFREQ_GOV_START);
1181 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
27ecddc2 1182 }
1da177e4 1183
2eaa3e2d 1184 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1185 return 0;
1186}
1187
1188
8a25a2fd 1189static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1190{
8a25a2fd 1191 unsigned int cpu = dev->id;
5a01f2e8 1192 int retval;
ec28297a
VP
1193
1194 if (cpu_is_offline(cpu))
1195 return 0;
1196
8a25a2fd 1197 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1198 return retval;
1199}
1200
1201
65f27f38 1202static void handle_update(struct work_struct *work)
1da177e4 1203{
65f27f38
DH
1204 struct cpufreq_policy *policy =
1205 container_of(work, struct cpufreq_policy, update);
1206 unsigned int cpu = policy->cpu;
2d06d8c4 1207 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1208 cpufreq_update_policy(cpu);
1209}
1210
1211/**
1212 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1213 * @cpu: cpu number
1214 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1215 * @new_freq: CPU frequency the CPU actually runs at
1216 *
29464f28
DJ
1217 * We adjust to current frequency first, and need to clean up later.
1218 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1219 */
e08f5f5b
GS
1220static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1221 unsigned int new_freq)
1da177e4 1222{
b43a7ffb 1223 struct cpufreq_policy *policy;
1da177e4 1224 struct cpufreq_freqs freqs;
b43a7ffb
VK
1225 unsigned long flags;
1226
1da177e4 1227
2d06d8c4 1228 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1229 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1230
1da177e4
LT
1231 freqs.old = old_freq;
1232 freqs.new = new_freq;
b43a7ffb
VK
1233
1234 read_lock_irqsave(&cpufreq_driver_lock, flags);
1235 policy = per_cpu(cpufreq_cpu_data, cpu);
1236 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1237
1238 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1239 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1240}
1241
1242
32ee8c3e 1243/**
4ab70df4 1244 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1245 * @cpu: CPU number
1246 *
1247 * This is the last known freq, without actually getting it from the driver.
1248 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1249 */
1250unsigned int cpufreq_quick_get(unsigned int cpu)
1251{
9e21ba8b 1252 struct cpufreq_policy *policy;
5800043b
NZ
1253 struct cpufreq_driver *driver;
1254 unsigned int (*get)(unsigned int cpu);
e08f5f5b 1255 unsigned int ret_freq = 0;
95235ca2 1256
5800043b
NZ
1257 rcu_read_lock();
1258 driver = rcu_dereference(cpufreq_driver);
1259 if (driver && driver->setpolicy && driver->get) {
1260 get = driver->get;
1261 rcu_read_unlock();
1262 return get(cpu);
1263 }
1264 rcu_read_unlock();
9e21ba8b
DB
1265
1266 policy = cpufreq_cpu_get(cpu);
95235ca2 1267 if (policy) {
e08f5f5b 1268 ret_freq = policy->cur;
95235ca2
VP
1269 cpufreq_cpu_put(policy);
1270 }
1271
4d34a67d 1272 return ret_freq;
95235ca2
VP
1273}
1274EXPORT_SYMBOL(cpufreq_quick_get);
1275
3d737108
JB
1276/**
1277 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1278 * @cpu: CPU number
1279 *
1280 * Just return the max possible frequency for a given CPU.
1281 */
1282unsigned int cpufreq_quick_get_max(unsigned int cpu)
1283{
1284 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1285 unsigned int ret_freq = 0;
1286
1287 if (policy) {
1288 ret_freq = policy->max;
1289 cpufreq_cpu_put(policy);
1290 }
1291
1292 return ret_freq;
1293}
1294EXPORT_SYMBOL(cpufreq_quick_get_max);
1295
95235ca2 1296
5a01f2e8 1297static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1298{
7a6aedfa 1299 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
5800043b
NZ
1300 struct cpufreq_driver *driver;
1301 unsigned int (*get)(unsigned int cpu);
e08f5f5b 1302 unsigned int ret_freq = 0;
5800043b
NZ
1303 u8 flags;
1304
1da177e4 1305
5800043b
NZ
1306 rcu_read_lock();
1307 driver = rcu_dereference(cpufreq_driver);
1308 if (!driver->get) {
1309 rcu_read_unlock();
4d34a67d 1310 return ret_freq;
5800043b
NZ
1311 }
1312 flags = driver->flags;
1313 get = driver->get;
1314 rcu_read_unlock();
1da177e4 1315
5800043b 1316 ret_freq = get(cpu);
1da177e4 1317
e08f5f5b 1318 if (ret_freq && policy->cur &&
5800043b 1319 !(flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1320 /* verify no discrepancy between actual and
1321 saved value exists */
1322 if (unlikely(ret_freq != policy->cur)) {
1323 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1324 schedule_work(&policy->update);
1325 }
1326 }
1327
4d34a67d 1328 return ret_freq;
5a01f2e8 1329}
1da177e4 1330
5a01f2e8
VP
1331/**
1332 * cpufreq_get - get the current CPU frequency (in kHz)
1333 * @cpu: CPU number
1334 *
1335 * Get the CPU current (static) CPU frequency
1336 */
1337unsigned int cpufreq_get(unsigned int cpu)
1338{
1339 unsigned int ret_freq = 0;
1340 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1341
1342 if (!policy)
1343 goto out;
1344
1345 if (unlikely(lock_policy_rwsem_read(cpu)))
1346 goto out_policy;
1347
1348 ret_freq = __cpufreq_get(cpu);
1349
1350 unlock_policy_rwsem_read(cpu);
1da177e4 1351
5a01f2e8
VP
1352out_policy:
1353 cpufreq_cpu_put(policy);
1354out:
4d34a67d 1355 return ret_freq;
1da177e4
LT
1356}
1357EXPORT_SYMBOL(cpufreq_get);
1358
8a25a2fd
KS
1359static struct subsys_interface cpufreq_interface = {
1360 .name = "cpufreq",
1361 .subsys = &cpu_subsys,
1362 .add_dev = cpufreq_add_dev,
1363 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1364};
1365
1da177e4 1366
42d4dc3f 1367/**
e00e56df
RW
1368 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1369 *
1370 * This function is only executed for the boot processor. The other CPUs
1371 * have been put offline by means of CPU hotplug.
42d4dc3f 1372 */
e00e56df 1373static int cpufreq_bp_suspend(void)
42d4dc3f 1374{
5800043b 1375 int (*suspend)(struct cpufreq_policy *policy);
e08f5f5b 1376 int ret = 0;
4bc5d341 1377
e00e56df 1378 int cpu = smp_processor_id();
42d4dc3f
BH
1379 struct cpufreq_policy *cpu_policy;
1380
2d06d8c4 1381 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1382
e00e56df 1383 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1384 cpu_policy = cpufreq_cpu_get(cpu);
1385 if (!cpu_policy)
e00e56df 1386 return 0;
42d4dc3f 1387
5800043b
NZ
1388 rcu_read_lock();
1389 suspend = rcu_dereference(cpufreq_driver)->suspend;
1390 rcu_read_unlock();
1391 if (suspend) {
1392 ret = suspend(cpu_policy);
ce6c3997 1393 if (ret)
42d4dc3f
BH
1394 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1395 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1396 }
1397
42d4dc3f 1398 cpufreq_cpu_put(cpu_policy);
c9060494 1399 return ret;
42d4dc3f
BH
1400}
1401
1da177e4 1402/**
e00e56df 1403 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1404 *
1405 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1406 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1407 * restored. It will verify that the current freq is in sync with
1408 * what we believe it to be. This is a bit later than when it
1409 * should be, but nonethteless it's better than calling
1410 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1411 *
1412 * This function is only executed for the boot CPU. The other CPUs have not
1413 * been turned on yet.
1da177e4 1414 */
e00e56df 1415static void cpufreq_bp_resume(void)
1da177e4 1416{
e08f5f5b 1417 int ret = 0;
5800043b 1418 int (*resume)(struct cpufreq_policy *policy);
4bc5d341 1419
e00e56df 1420 int cpu = smp_processor_id();
1da177e4
LT
1421 struct cpufreq_policy *cpu_policy;
1422
2d06d8c4 1423 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1424
e00e56df 1425 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1426 cpu_policy = cpufreq_cpu_get(cpu);
1427 if (!cpu_policy)
e00e56df 1428 return;
1da177e4 1429
5800043b
NZ
1430 rcu_read_lock();
1431 resume = rcu_dereference(cpufreq_driver)->resume;
1432 rcu_read_unlock();
1433
1434 if (resume) {
1435 ret = resume(cpu_policy);
1da177e4
LT
1436 if (ret) {
1437 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1438 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1439 goto fail;
1da177e4
LT
1440 }
1441 }
1442
1da177e4 1443 schedule_work(&cpu_policy->update);
ce6c3997 1444
c9060494 1445fail:
1da177e4 1446 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1447}
1448
e00e56df
RW
1449static struct syscore_ops cpufreq_syscore_ops = {
1450 .suspend = cpufreq_bp_suspend,
1451 .resume = cpufreq_bp_resume,
1da177e4
LT
1452};
1453
9d95046e
BP
1454/**
1455 * cpufreq_get_current_driver - return current driver's name
1456 *
1457 * Return the name string of the currently loaded cpufreq driver
1458 * or NULL, if none.
1459 */
1460const char *cpufreq_get_current_driver(void)
1461{
5800043b
NZ
1462 struct cpufreq_driver *driver;
1463 const char *name = NULL;
1464 rcu_read_lock();
1465 driver = rcu_dereference(cpufreq_driver);
1466 if (driver)
1467 name = driver->name;
1468 rcu_read_unlock();
1469 return name;
9d95046e
BP
1470}
1471EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1472
1473/*********************************************************************
1474 * NOTIFIER LISTS INTERFACE *
1475 *********************************************************************/
1476
1477/**
1478 * cpufreq_register_notifier - register a driver with cpufreq
1479 * @nb: notifier function to register
1480 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1481 *
32ee8c3e 1482 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1483 * are notified about clock rate changes (once before and once after
1484 * the transition), or a list of drivers that are notified about
1485 * changes in cpufreq policy.
1486 *
1487 * This function may sleep, and has the same return conditions as
e041c683 1488 * blocking_notifier_chain_register.
1da177e4
LT
1489 */
1490int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1491{
1492 int ret;
1493
d5aaffa9
DB
1494 if (cpufreq_disabled())
1495 return -EINVAL;
1496
74212ca4
CEB
1497 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1498
1da177e4
LT
1499 switch (list) {
1500 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1501 ret = srcu_notifier_chain_register(
e041c683 1502 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1503 break;
1504 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1505 ret = blocking_notifier_chain_register(
1506 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1507 break;
1508 default:
1509 ret = -EINVAL;
1510 }
1da177e4
LT
1511
1512 return ret;
1513}
1514EXPORT_SYMBOL(cpufreq_register_notifier);
1515
1516
1517/**
1518 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1519 * @nb: notifier block to be unregistered
1520 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1521 *
1522 * Remove a driver from the CPU frequency notifier list.
1523 *
1524 * This function may sleep, and has the same return conditions as
e041c683 1525 * blocking_notifier_chain_unregister.
1da177e4
LT
1526 */
1527int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1528{
1529 int ret;
1530
d5aaffa9
DB
1531 if (cpufreq_disabled())
1532 return -EINVAL;
1533
1da177e4
LT
1534 switch (list) {
1535 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1536 ret = srcu_notifier_chain_unregister(
e041c683 1537 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1538 break;
1539 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1540 ret = blocking_notifier_chain_unregister(
1541 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1542 break;
1543 default:
1544 ret = -EINVAL;
1545 }
1da177e4
LT
1546
1547 return ret;
1548}
1549EXPORT_SYMBOL(cpufreq_unregister_notifier);
1550
1551
1552/*********************************************************************
1553 * GOVERNORS *
1554 *********************************************************************/
1555
1556
1557int __cpufreq_driver_target(struct cpufreq_policy *policy,
1558 unsigned int target_freq,
1559 unsigned int relation)
1560{
1561 int retval = -EINVAL;
7249924e 1562 unsigned int old_target_freq = target_freq;
5800043b
NZ
1563 int (*target)(struct cpufreq_policy *policy,
1564 unsigned int target_freq,
1565 unsigned int relation);
c32b6b8e 1566
a7b422cd
KRW
1567 if (cpufreq_disabled())
1568 return -ENODEV;
1569
7249924e
VK
1570 /* Make sure that target_freq is within supported range */
1571 if (target_freq > policy->max)
1572 target_freq = policy->max;
1573 if (target_freq < policy->min)
1574 target_freq = policy->min;
1575
1576 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1577 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1578
1579 if (target_freq == policy->cur)
1580 return 0;
1581
5800043b
NZ
1582 rcu_read_lock();
1583 target = rcu_dereference(cpufreq_driver)->target;
1584 rcu_read_unlock();
1585 if (target)
1586 retval = target(policy, target_freq, relation);
90d45d17 1587
1da177e4
LT
1588 return retval;
1589}
1590EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1591
1da177e4
LT
1592int cpufreq_driver_target(struct cpufreq_policy *policy,
1593 unsigned int target_freq,
1594 unsigned int relation)
1595{
f1829e4a 1596 int ret = -EINVAL;
1da177e4
LT
1597
1598 policy = cpufreq_cpu_get(policy->cpu);
1599 if (!policy)
f1829e4a 1600 goto no_policy;
1da177e4 1601
5a01f2e8 1602 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1603 goto fail;
1da177e4
LT
1604
1605 ret = __cpufreq_driver_target(policy, target_freq, relation);
1606
5a01f2e8 1607 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1608
f1829e4a 1609fail:
1da177e4 1610 cpufreq_cpu_put(policy);
f1829e4a 1611no_policy:
1da177e4
LT
1612 return ret;
1613}
1614EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1615
bf0b90e3 1616int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1617{
1618 int ret = 0;
5800043b
NZ
1619 unsigned int (*getavg)(struct cpufreq_policy *policy,
1620 unsigned int cpu);
dfde5d62 1621
d5aaffa9
DB
1622 if (cpufreq_disabled())
1623 return ret;
1624
5800043b
NZ
1625 rcu_read_lock();
1626 getavg = rcu_dereference(cpufreq_driver)->getavg;
1627 rcu_read_unlock();
1628
1629 if (!getavg)
0676f7f2
VK
1630 return 0;
1631
dfde5d62
VP
1632 policy = cpufreq_cpu_get(policy->cpu);
1633 if (!policy)
1634 return -EINVAL;
1635
5800043b 1636 ret = getavg(policy, cpu);
dfde5d62 1637
dfde5d62
VP
1638 cpufreq_cpu_put(policy);
1639 return ret;
1640}
5a01f2e8 1641EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1642
153d7f3f 1643/*
153d7f3f
AV
1644 * when "event" is CPUFREQ_GOV_LIMITS
1645 */
1da177e4 1646
e08f5f5b
GS
1647static int __cpufreq_governor(struct cpufreq_policy *policy,
1648 unsigned int event)
1da177e4 1649{
cc993cab 1650 int ret;
6afde10c
TR
1651
1652 /* Only must be defined when default governor is known to have latency
1653 restrictions, like e.g. conservative or ondemand.
1654 That this is the case is already ensured in Kconfig
1655 */
1656#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1657 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1658#else
1659 struct cpufreq_governor *gov = NULL;
1660#endif
1c256245
TR
1661
1662 if (policy->governor->max_transition_latency &&
1663 policy->cpuinfo.transition_latency >
1664 policy->governor->max_transition_latency) {
6afde10c
TR
1665 if (!gov)
1666 return -EINVAL;
1667 else {
1668 printk(KERN_WARNING "%s governor failed, too long"
1669 " transition latency of HW, fallback"
1670 " to %s governor\n",
1671 policy->governor->name,
1672 gov->name);
1673 policy->governor = gov;
1674 }
1c256245 1675 }
1da177e4
LT
1676
1677 if (!try_module_get(policy->governor->owner))
1678 return -EINVAL;
1679
2d06d8c4 1680 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1681 policy->cpu, event);
1da177e4
LT
1682 ret = policy->governor->governor(policy, event);
1683
4d5dcc42
VK
1684 if (!ret) {
1685 if (event == CPUFREQ_GOV_POLICY_INIT)
1686 policy->governor->initialized++;
1687 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1688 policy->governor->initialized--;
1689 }
b394058f 1690
e08f5f5b
GS
1691 /* we keep one module reference alive for
1692 each CPU governed by this CPU */
1da177e4
LT
1693 if ((event != CPUFREQ_GOV_START) || ret)
1694 module_put(policy->governor->owner);
1695 if ((event == CPUFREQ_GOV_STOP) && !ret)
1696 module_put(policy->governor->owner);
1697
1698 return ret;
1699}
1700
1701
1da177e4
LT
1702int cpufreq_register_governor(struct cpufreq_governor *governor)
1703{
3bcb09a3 1704 int err;
1da177e4
LT
1705
1706 if (!governor)
1707 return -EINVAL;
1708
a7b422cd
KRW
1709 if (cpufreq_disabled())
1710 return -ENODEV;
1711
3fc54d37 1712 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1713
b394058f 1714 governor->initialized = 0;
3bcb09a3
JF
1715 err = -EBUSY;
1716 if (__find_governor(governor->name) == NULL) {
1717 err = 0;
1718 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1719 }
1da177e4 1720
32ee8c3e 1721 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1722 return err;
1da177e4
LT
1723}
1724EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1725
1726
1727void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1728{
90e41bac
PB
1729#ifdef CONFIG_HOTPLUG_CPU
1730 int cpu;
1731#endif
1732
1da177e4
LT
1733 if (!governor)
1734 return;
1735
a7b422cd
KRW
1736 if (cpufreq_disabled())
1737 return;
1738
90e41bac
PB
1739#ifdef CONFIG_HOTPLUG_CPU
1740 for_each_present_cpu(cpu) {
1741 if (cpu_online(cpu))
1742 continue;
1743 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1744 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1745 }
1746#endif
1747
3fc54d37 1748 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1749 list_del(&governor->governor_list);
3fc54d37 1750 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1751 return;
1752}
1753EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1754
1755
1756
1757/*********************************************************************
1758 * POLICY INTERFACE *
1759 *********************************************************************/
1760
1761/**
1762 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1763 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1764 * is written
1da177e4
LT
1765 *
1766 * Reads the current cpufreq policy.
1767 */
1768int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1769{
1770 struct cpufreq_policy *cpu_policy;
1771 if (!policy)
1772 return -EINVAL;
1773
1774 cpu_policy = cpufreq_cpu_get(cpu);
1775 if (!cpu_policy)
1776 return -EINVAL;
1777
1da177e4 1778 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1779
1780 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1781 return 0;
1782}
1783EXPORT_SYMBOL(cpufreq_get_policy);
1784
1785
153d7f3f 1786/*
e08f5f5b
GS
1787 * data : current policy.
1788 * policy : policy to be set.
153d7f3f 1789 */
e08f5f5b
GS
1790static int __cpufreq_set_policy(struct cpufreq_policy *data,
1791 struct cpufreq_policy *policy)
1da177e4 1792{
7bd353a9 1793 int ret = 0, failed = 1;
5800043b
NZ
1794 struct cpufreq_driver *driver;
1795 int (*verify)(struct cpufreq_policy *policy);
1796 int (*setpolicy)(struct cpufreq_policy *policy);
1da177e4 1797
2d06d8c4 1798 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1799 policy->min, policy->max);
1800
e08f5f5b
GS
1801 memcpy(&policy->cpuinfo, &data->cpuinfo,
1802 sizeof(struct cpufreq_cpuinfo));
1da177e4 1803
53391fa2 1804 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1805 ret = -EINVAL;
1806 goto error_out;
1807 }
1808
1da177e4 1809 /* verify the cpu speed can be set within this limit */
5800043b
NZ
1810 rcu_read_lock();
1811 driver = rcu_dereference(cpufreq_driver);
1812 verify = driver->verify;
1813 setpolicy = driver->setpolicy;
1814 rcu_read_unlock();
1815
1816 ret = verify(policy);
1da177e4
LT
1817 if (ret)
1818 goto error_out;
1819
1da177e4 1820 /* adjust if necessary - all reasons */
e041c683
AS
1821 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1822 CPUFREQ_ADJUST, policy);
1da177e4
LT
1823
1824 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1825 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1826 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1827
1828 /* verify the cpu speed can be set within this limit,
1829 which might be different to the first one */
5800043b 1830 ret = verify(policy);
e041c683 1831 if (ret)
1da177e4 1832 goto error_out;
1da177e4
LT
1833
1834 /* notification of the new policy */
e041c683
AS
1835 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1836 CPUFREQ_NOTIFY, policy);
1da177e4 1837
7d5e350f
DJ
1838 data->min = policy->min;
1839 data->max = policy->max;
1da177e4 1840
2d06d8c4 1841 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1842 data->min, data->max);
1da177e4 1843
5800043b 1844 if (setpolicy) {
1da177e4 1845 data->policy = policy->policy;
2d06d8c4 1846 pr_debug("setting range\n");
5800043b 1847 ret = setpolicy(policy);
1da177e4
LT
1848 } else {
1849 if (policy->governor != data->governor) {
1850 /* save old, working values */
1851 struct cpufreq_governor *old_gov = data->governor;
1852
2d06d8c4 1853 pr_debug("governor switch\n");
1da177e4
LT
1854
1855 /* end old governor */
7bd353a9 1856 if (data->governor) {
1da177e4 1857 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
7bd353a9
VK
1858 __cpufreq_governor(data,
1859 CPUFREQ_GOV_POLICY_EXIT);
1860 }
1da177e4
LT
1861
1862 /* start new governor */
1863 data->governor = policy->governor;
7bd353a9
VK
1864 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1865 if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
1866 failed = 0;
1867 else
1868 __cpufreq_governor(data,
1869 CPUFREQ_GOV_POLICY_EXIT);
1870 }
1871
1872 if (failed) {
1da177e4 1873 /* new governor failed, so re-start old one */
2d06d8c4 1874 pr_debug("starting governor %s failed\n",
e08f5f5b 1875 data->governor->name);
1da177e4
LT
1876 if (old_gov) {
1877 data->governor = old_gov;
7bd353a9
VK
1878 __cpufreq_governor(data,
1879 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1880 __cpufreq_governor(data,
1881 CPUFREQ_GOV_START);
1da177e4
LT
1882 }
1883 ret = -EINVAL;
1884 goto error_out;
1885 }
1886 /* might be a policy change, too, so fall through */
1887 }
2d06d8c4 1888 pr_debug("governor: change or update limits\n");
1da177e4
LT
1889 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1890 }
1891
7d5e350f 1892error_out:
1da177e4
LT
1893 return ret;
1894}
1895
1da177e4
LT
1896/**
1897 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1898 * @cpu: CPU which shall be re-evaluated
1899 *
25985edc 1900 * Useful for policy notifiers which have different necessities
1da177e4
LT
1901 * at different times.
1902 */
1903int cpufreq_update_policy(unsigned int cpu)
1904{
1905 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1906 struct cpufreq_policy policy;
5800043b
NZ
1907 struct cpufreq_driver *driver;
1908 unsigned int (*get)(unsigned int cpu);
1909 int (*target)(struct cpufreq_policy *policy,
1910 unsigned int target_freq,
1911 unsigned int relation);
f1829e4a 1912 int ret;
1da177e4 1913
f1829e4a
JL
1914 if (!data) {
1915 ret = -ENODEV;
1916 goto no_policy;
1917 }
1da177e4 1918
f1829e4a
JL
1919 if (unlikely(lock_policy_rwsem_write(cpu))) {
1920 ret = -EINVAL;
1921 goto fail;
1922 }
1da177e4 1923
2d06d8c4 1924 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1925 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1926 policy.min = data->user_policy.min;
1927 policy.max = data->user_policy.max;
1928 policy.policy = data->user_policy.policy;
1929 policy.governor = data->user_policy.governor;
1930
0961dd0d
TR
1931 /* BIOS might change freq behind our back
1932 -> ask driver for current freq and notify governors about a change */
5800043b
NZ
1933 rcu_read_lock();
1934 driver = rcu_access_pointer(cpufreq_driver);
1935 get = driver->get;
1936 target = driver->target;
1937 rcu_read_unlock();
1938 if (get) {
1939 policy.cur = get(cpu);
a85f7bd3 1940 if (!data->cur) {
2d06d8c4 1941 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1942 data->cur = policy.cur;
1943 } else {
5800043b 1944 if (data->cur != policy.cur && target)
e08f5f5b
GS
1945 cpufreq_out_of_sync(cpu, data->cur,
1946 policy.cur);
a85f7bd3 1947 }
0961dd0d
TR
1948 }
1949
1da177e4
LT
1950 ret = __cpufreq_set_policy(data, &policy);
1951
5a01f2e8
VP
1952 unlock_policy_rwsem_write(cpu);
1953
f1829e4a 1954fail:
1da177e4 1955 cpufreq_cpu_put(data);
f1829e4a 1956no_policy:
1da177e4
LT
1957 return ret;
1958}
1959EXPORT_SYMBOL(cpufreq_update_policy);
1960
dd184a01 1961static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1962 unsigned long action, void *hcpu)
1963{
1964 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1965 struct device *dev;
c32b6b8e 1966
8a25a2fd
KS
1967 dev = get_cpu_device(cpu);
1968 if (dev) {
c32b6b8e
AR
1969 switch (action) {
1970 case CPU_ONLINE:
8bb78442 1971 case CPU_ONLINE_FROZEN:
8a25a2fd 1972 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1973 break;
1974 case CPU_DOWN_PREPARE:
8bb78442 1975 case CPU_DOWN_PREPARE_FROZEN:
8a25a2fd 1976 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1977 break;
5a01f2e8 1978 case CPU_DOWN_FAILED:
8bb78442 1979 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1980 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1981 break;
1982 }
1983 }
1984 return NOTIFY_OK;
1985}
1986
9c36f746 1987static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1988 .notifier_call = cpufreq_cpu_callback,
1989};
1da177e4
LT
1990
1991/*********************************************************************
1992 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1993 *********************************************************************/
1994
1995/**
1996 * cpufreq_register_driver - register a CPU Frequency driver
1997 * @driver_data: A struct cpufreq_driver containing the values#
1998 * submitted by the CPU Frequency driver.
1999 *
32ee8c3e 2000 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2001 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2002 * (and isn't unregistered in the meantime).
1da177e4
LT
2003 *
2004 */
221dee28 2005int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2006{
2007 unsigned long flags;
2008 int ret;
2009
a7b422cd
KRW
2010 if (cpufreq_disabled())
2011 return -ENODEV;
2012
1da177e4
LT
2013 if (!driver_data || !driver_data->verify || !driver_data->init ||
2014 ((!driver_data->setpolicy) && (!driver_data->target)))
2015 return -EINVAL;
2016
2d06d8c4 2017 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2018
2019 if (driver_data->setpolicy)
2020 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2021
0d1857a1 2022 write_lock_irqsave(&cpufreq_driver_lock, flags);
5800043b 2023 if (rcu_access_pointer(cpufreq_driver)) {
0d1857a1 2024 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2025 return -EBUSY;
2026 }
5800043b 2027 rcu_assign_pointer(cpufreq_driver, driver_data);
0d1857a1 2028 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b 2029 synchronize_rcu();
1da177e4 2030
8a25a2fd 2031 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
2032 if (ret)
2033 goto err_null_driver;
1da177e4 2034
5800043b 2035 if (!(driver_data->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2036 int i;
2037 ret = -ENODEV;
2038
2039 /* check for at least one working CPU */
7a6aedfa
MT
2040 for (i = 0; i < nr_cpu_ids; i++)
2041 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2042 ret = 0;
7a6aedfa
MT
2043 break;
2044 }
1da177e4
LT
2045
2046 /* if all ->init() calls failed, unregister */
2047 if (ret) {
2d06d8c4 2048 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2049 driver_data->name);
8a25a2fd 2050 goto err_if_unreg;
1da177e4
LT
2051 }
2052 }
2053
8f5bc2ab 2054 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2055 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2056
8f5bc2ab 2057 return 0;
8a25a2fd
KS
2058err_if_unreg:
2059 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2060err_null_driver:
0d1857a1 2061 write_lock_irqsave(&cpufreq_driver_lock, flags);
5800043b 2062 rcu_assign_pointer(cpufreq_driver, NULL);
0d1857a1 2063 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b 2064 synchronize_rcu();
4d34a67d 2065 return ret;
1da177e4
LT
2066}
2067EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2068
2069
2070/**
2071 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2072 *
32ee8c3e 2073 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2074 * the right to do so, i.e. if you have succeeded in initialising before!
2075 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2076 * currently not initialised.
2077 */
221dee28 2078int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2079{
2080 unsigned long flags;
5800043b 2081 struct cpufreq_driver *old_driver;
1da177e4 2082
5800043b
NZ
2083 rcu_read_lock();
2084 old_driver = rcu_access_pointer(cpufreq_driver);
2085 if (!old_driver || (driver != old_driver)) {
2086 rcu_read_unlock();
1da177e4 2087 return -EINVAL;
5800043b
NZ
2088 }
2089 rcu_read_unlock();
1da177e4 2090
2d06d8c4 2091 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2092
8a25a2fd 2093 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2094 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2095
0d1857a1 2096 write_lock_irqsave(&cpufreq_driver_lock, flags);
5800043b 2097 rcu_assign_pointer(cpufreq_driver, NULL);
0d1857a1 2098 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b 2099 synchronize_rcu();
1da177e4
LT
2100
2101 return 0;
2102}
2103EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2104
2105static int __init cpufreq_core_init(void)
2106{
2107 int cpu;
2108
a7b422cd
KRW
2109 if (cpufreq_disabled())
2110 return -ENODEV;
2111
5a01f2e8 2112 for_each_possible_cpu(cpu) {
f1625066 2113 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
2114 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2115 }
8aa84ad8 2116
8a25a2fd 2117 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 2118 BUG_ON(!cpufreq_global_kobject);
e00e56df 2119 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2120
5a01f2e8
VP
2121 return 0;
2122}
5a01f2e8 2123core_initcall(cpufreq_core_init);
This page took 0.744592 seconds and 5 git commands to generate.