cpufreq: exynos5440: Protect OPP search calls with RCU lock
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
5800043b 42static struct cpufreq_driver __rcu *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
0d1857a1 48static DEFINE_RWLOCK(cpufreq_driver_lock);
1da177e4 49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
5a01f2e8
VP
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
f1625066 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 71static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 72{ \
f1625066 73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
76 \
77 return 0; \
78}
79
80lock_policy_rwsem(read, cpu);
5a01f2e8 81lock_policy_rwsem(write, cpu);
5a01f2e8 82
fa1d8af4
VK
83#define unlock_policy_rwsem(mode, cpu) \
84static void unlock_policy_rwsem_##mode(int cpu) \
85{ \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 89}
5a01f2e8 90
fa1d8af4
VK
91unlock_policy_rwsem(read, cpu);
92unlock_policy_rwsem(write, cpu);
5a01f2e8 93
1da177e4 94/* internal prototypes */
29464f28
DJ
95static int __cpufreq_governor(struct cpufreq_policy *policy,
96 unsigned int event);
5a01f2e8 97static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 98static void handle_update(struct work_struct *work);
1da177e4
LT
99
100/**
32ee8c3e
DJ
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
1da177e4
LT
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
106 */
e041c683 107static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 108static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 109
74212ca4 110static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
111static int __init init_cpufreq_transition_notifier_list(void)
112{
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 114 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
115 return 0;
116}
b3438f82 117pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 118
a7b422cd 119static int off __read_mostly;
da584455 120static int cpufreq_disabled(void)
a7b422cd
KRW
121{
122 return off;
123}
124void disable_cpufreq(void)
125{
126 off = 1;
127}
1da177e4 128static LIST_HEAD(cpufreq_governor_list);
29464f28 129static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 130
4d5dcc42
VK
131bool have_governor_per_policy(void)
132{
5800043b
NZ
133 bool have_governor_per_policy;
134 rcu_read_lock();
135 have_governor_per_policy =
136 rcu_dereference(cpufreq_driver)->have_governor_per_policy;
137 rcu_read_unlock();
138 return have_governor_per_policy;
4d5dcc42
VK
139}
140
a9144436 141static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
142{
143 struct cpufreq_policy *data;
5800043b 144 struct cpufreq_driver *driver;
1da177e4
LT
145 unsigned long flags;
146
7a6aedfa 147 if (cpu >= nr_cpu_ids)
1da177e4
LT
148 goto err_out;
149
150 /* get the cpufreq driver */
5800043b
NZ
151 rcu_read_lock();
152 driver = rcu_dereference(cpufreq_driver);
1da177e4 153
5800043b 154 if (!driver)
1da177e4
LT
155 goto err_out_unlock;
156
5800043b 157 if (!try_module_get(driver->owner))
1da177e4
LT
158 goto err_out_unlock;
159
5800043b 160 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4
LT
161
162 /* get the CPU */
7a6aedfa 163 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
164
165 if (!data)
166 goto err_out_put_module;
167
a9144436 168 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
169 goto err_out_put_module;
170
0d1857a1 171 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b 172 rcu_read_unlock();
1da177e4
LT
173 return data;
174
7d5e350f 175err_out_put_module:
5800043b 176 module_put(driver->owner);
0d1857a1 177 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b
NZ
178err_out_unlock:
179 rcu_read_unlock();
7d5e350f 180err_out:
1da177e4
LT
181 return NULL;
182}
a9144436
SB
183
184struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
185{
d5aaffa9
DB
186 if (cpufreq_disabled())
187 return NULL;
188
a9144436
SB
189 return __cpufreq_cpu_get(cpu, false);
190}
1da177e4
LT
191EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
192
a9144436
SB
193static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
194{
195 return __cpufreq_cpu_get(cpu, true);
196}
197
198static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
199{
200 if (!sysfs)
201 kobject_put(&data->kobj);
5800043b
NZ
202 rcu_read_lock();
203 module_put(rcu_dereference(cpufreq_driver)->owner);
204 rcu_read_unlock();
a9144436 205}
7d5e350f 206
1da177e4
LT
207void cpufreq_cpu_put(struct cpufreq_policy *data)
208{
d5aaffa9
DB
209 if (cpufreq_disabled())
210 return;
211
a9144436 212 __cpufreq_cpu_put(data, false);
1da177e4
LT
213}
214EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
215
a9144436
SB
216static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
217{
218 __cpufreq_cpu_put(data, true);
219}
1da177e4 220
1da177e4
LT
221/*********************************************************************
222 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
223 *********************************************************************/
224
225/**
226 * adjust_jiffies - adjust the system "loops_per_jiffy"
227 *
228 * This function alters the system "loops_per_jiffy" for the clock
229 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 230 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
231 * per-CPU loops_per_jiffy value wherever possible.
232 */
233#ifndef CONFIG_SMP
234static unsigned long l_p_j_ref;
235static unsigned int l_p_j_ref_freq;
236
858119e1 237static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
238{
239 if (ci->flags & CPUFREQ_CONST_LOOPS)
240 return;
241
242 if (!l_p_j_ref_freq) {
243 l_p_j_ref = loops_per_jiffy;
244 l_p_j_ref_freq = ci->old;
2d06d8c4 245 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 246 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 247 }
d08de0c1 248 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 249 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
250 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
251 ci->new);
2d06d8c4 252 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 253 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
254 }
255}
256#else
e08f5f5b
GS
257static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
258{
259 return;
260}
1da177e4
LT
261#endif
262
263
b43a7ffb
VK
264void __cpufreq_notify_transition(struct cpufreq_policy *policy,
265 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
266{
267 BUG_ON(irqs_disabled());
268
d5aaffa9
DB
269 if (cpufreq_disabled())
270 return;
271
5800043b
NZ
272 rcu_read_lock();
273 freqs->flags = rcu_dereference(cpufreq_driver)->flags;
274 rcu_read_unlock();
2d06d8c4 275 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 276 state, freqs->new);
1da177e4 277
1da177e4 278 switch (state) {
e4472cb3 279
1da177e4 280 case CPUFREQ_PRECHANGE:
32ee8c3e 281 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
282 * which is not equal to what the cpufreq core thinks is
283 * "old frequency".
1da177e4 284 */
5800043b 285 if (!(freqs->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
286 if ((policy) && (policy->cpu == freqs->cpu) &&
287 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 288 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
289 " %u, cpufreq assumed %u kHz.\n",
290 freqs->old, policy->cur);
291 freqs->old = policy->cur;
1da177e4
LT
292 }
293 }
b4dfdbb3 294 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 295 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
296 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
297 break;
e4472cb3 298
1da177e4
LT
299 case CPUFREQ_POSTCHANGE:
300 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 301 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 302 (unsigned long)freqs->cpu);
25e41933 303 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 304 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 305 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
306 if (likely(policy) && likely(policy->cpu == freqs->cpu))
307 policy->cur = freqs->new;
1da177e4
LT
308 break;
309 }
1da177e4 310}
b43a7ffb
VK
311/**
312 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
313 * on frequency transition.
314 *
315 * This function calls the transition notifiers and the "adjust_jiffies"
316 * function. It is called twice on all CPU frequency changes that have
317 * external effects.
318 */
319void cpufreq_notify_transition(struct cpufreq_policy *policy,
320 struct cpufreq_freqs *freqs, unsigned int state)
321{
322 for_each_cpu(freqs->cpu, policy->cpus)
323 __cpufreq_notify_transition(policy, freqs, state);
324}
1da177e4
LT
325EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
326
327
328
329/*********************************************************************
330 * SYSFS INTERFACE *
331 *********************************************************************/
332
3bcb09a3
JF
333static struct cpufreq_governor *__find_governor(const char *str_governor)
334{
335 struct cpufreq_governor *t;
336
337 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 338 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
339 return t;
340
341 return NULL;
342}
343
1da177e4
LT
344/**
345 * cpufreq_parse_governor - parse a governor string
346 */
905d77cd 347static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
348 struct cpufreq_governor **governor)
349{
3bcb09a3 350 int err = -EINVAL;
5800043b
NZ
351 struct cpufreq_driver *driver;
352 bool has_setpolicy;
353 bool has_target;
354
355 rcu_read_lock();
356 driver = rcu_dereference(cpufreq_driver);
357 if (!driver) {
358 rcu_read_unlock();
3bcb09a3 359 goto out;
5800043b
NZ
360 }
361 has_setpolicy = driver->setpolicy ? true : false;
362 has_target = driver->target ? true : false;
363 rcu_read_unlock();
3bcb09a3 364
5800043b 365 if (has_setpolicy) {
1da177e4
LT
366 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
367 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 368 err = 0;
e08f5f5b
GS
369 } else if (!strnicmp(str_governor, "powersave",
370 CPUFREQ_NAME_LEN)) {
1da177e4 371 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 372 err = 0;
1da177e4 373 }
5800043b 374 } else if (has_target) {
1da177e4 375 struct cpufreq_governor *t;
3bcb09a3 376
3fc54d37 377 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
378
379 t = __find_governor(str_governor);
380
ea714970 381 if (t == NULL) {
1a8e1463 382 int ret;
ea714970 383
1a8e1463
KC
384 mutex_unlock(&cpufreq_governor_mutex);
385 ret = request_module("cpufreq_%s", str_governor);
386 mutex_lock(&cpufreq_governor_mutex);
ea714970 387
1a8e1463
KC
388 if (ret == 0)
389 t = __find_governor(str_governor);
ea714970
JF
390 }
391
3bcb09a3
JF
392 if (t != NULL) {
393 *governor = t;
394 err = 0;
1da177e4 395 }
3bcb09a3 396
3fc54d37 397 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 398 }
29464f28 399out:
3bcb09a3 400 return err;
1da177e4 401}
1da177e4
LT
402
403
1da177e4 404/**
e08f5f5b
GS
405 * cpufreq_per_cpu_attr_read() / show_##file_name() -
406 * print out cpufreq information
1da177e4
LT
407 *
408 * Write out information from cpufreq_driver->policy[cpu]; object must be
409 * "unsigned int".
410 */
411
32ee8c3e
DJ
412#define show_one(file_name, object) \
413static ssize_t show_##file_name \
905d77cd 414(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 415{ \
29464f28 416 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
417}
418
419show_one(cpuinfo_min_freq, cpuinfo.min_freq);
420show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 421show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
422show_one(scaling_min_freq, min);
423show_one(scaling_max_freq, max);
424show_one(scaling_cur_freq, cur);
425
e08f5f5b
GS
426static int __cpufreq_set_policy(struct cpufreq_policy *data,
427 struct cpufreq_policy *policy);
7970e08b 428
1da177e4
LT
429/**
430 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
431 */
432#define store_one(file_name, object) \
433static ssize_t store_##file_name \
905d77cd 434(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 435{ \
f55c9c26 436 unsigned int ret; \
1da177e4
LT
437 struct cpufreq_policy new_policy; \
438 \
439 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
440 if (ret) \
441 return -EINVAL; \
442 \
29464f28 443 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
444 if (ret != 1) \
445 return -EINVAL; \
446 \
7970e08b
TR
447 ret = __cpufreq_set_policy(policy, &new_policy); \
448 policy->user_policy.object = policy->object; \
1da177e4
LT
449 \
450 return ret ? ret : count; \
451}
452
29464f28
DJ
453store_one(scaling_min_freq, min);
454store_one(scaling_max_freq, max);
1da177e4
LT
455
456/**
457 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
458 */
905d77cd
DJ
459static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
460 char *buf)
1da177e4 461{
5a01f2e8 462 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
463 if (!cur_freq)
464 return sprintf(buf, "<unknown>");
465 return sprintf(buf, "%u\n", cur_freq);
466}
467
468
469/**
470 * show_scaling_governor - show the current policy for the specified CPU
471 */
905d77cd 472static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 473{
29464f28 474 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
475 return sprintf(buf, "powersave\n");
476 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
477 return sprintf(buf, "performance\n");
478 else if (policy->governor)
4b972f0b 479 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 480 policy->governor->name);
1da177e4
LT
481 return -EINVAL;
482}
483
484
485/**
486 * store_scaling_governor - store policy for the specified CPU
487 */
905d77cd
DJ
488static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
489 const char *buf, size_t count)
1da177e4 490{
f55c9c26 491 unsigned int ret;
1da177e4
LT
492 char str_governor[16];
493 struct cpufreq_policy new_policy;
494
495 ret = cpufreq_get_policy(&new_policy, policy->cpu);
496 if (ret)
497 return ret;
498
29464f28 499 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
500 if (ret != 1)
501 return -EINVAL;
502
e08f5f5b
GS
503 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
504 &new_policy.governor))
1da177e4
LT
505 return -EINVAL;
506
7970e08b
TR
507 /* Do not use cpufreq_set_policy here or the user_policy.max
508 will be wrongly overridden */
7970e08b
TR
509 ret = __cpufreq_set_policy(policy, &new_policy);
510
511 policy->user_policy.policy = policy->policy;
512 policy->user_policy.governor = policy->governor;
7970e08b 513
e08f5f5b
GS
514 if (ret)
515 return ret;
516 else
517 return count;
1da177e4
LT
518}
519
520/**
521 * show_scaling_driver - show the cpufreq driver currently loaded
522 */
905d77cd 523static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 524{
5800043b
NZ
525 ssize_t size;
526 rcu_read_lock();
527 size = scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
528 rcu_dereference(cpufreq_driver)->name);
529 rcu_read_unlock();
530 return size;
1da177e4
LT
531}
532
533/**
534 * show_scaling_available_governors - show the available CPUfreq governors
535 */
905d77cd
DJ
536static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
537 char *buf)
1da177e4
LT
538{
539 ssize_t i = 0;
540 struct cpufreq_governor *t;
541
5800043b
NZ
542 rcu_read_lock();
543 if (!rcu_dereference(cpufreq_driver)->target) {
544 rcu_read_unlock();
1da177e4
LT
545 i += sprintf(buf, "performance powersave");
546 goto out;
547 }
5800043b 548 rcu_read_unlock();
1da177e4
LT
549
550 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
551 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
552 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 553 goto out;
4b972f0b 554 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 555 }
7d5e350f 556out:
1da177e4
LT
557 i += sprintf(&buf[i], "\n");
558 return i;
559}
e8628dd0 560
835481d9 561static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
562{
563 ssize_t i = 0;
564 unsigned int cpu;
565
835481d9 566 for_each_cpu(cpu, mask) {
1da177e4
LT
567 if (i)
568 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
569 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
570 if (i >= (PAGE_SIZE - 5))
29464f28 571 break;
1da177e4
LT
572 }
573 i += sprintf(&buf[i], "\n");
574 return i;
575}
576
e8628dd0
DW
577/**
578 * show_related_cpus - show the CPUs affected by each transition even if
579 * hw coordination is in use
580 */
581static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
582{
e8628dd0
DW
583 return show_cpus(policy->related_cpus, buf);
584}
585
586/**
587 * show_affected_cpus - show the CPUs affected by each transition
588 */
589static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
590{
591 return show_cpus(policy->cpus, buf);
592}
593
9e76988e 594static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 595 const char *buf, size_t count)
9e76988e
VP
596{
597 unsigned int freq = 0;
598 unsigned int ret;
599
879000f9 600 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
601 return -EINVAL;
602
603 ret = sscanf(buf, "%u", &freq);
604 if (ret != 1)
605 return -EINVAL;
606
607 policy->governor->store_setspeed(policy, freq);
608
609 return count;
610}
611
612static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
613{
879000f9 614 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
615 return sprintf(buf, "<unsupported>\n");
616
617 return policy->governor->show_setspeed(policy, buf);
618}
1da177e4 619
e2f74f35 620/**
8bf1ac72 621 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
622 */
623static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
624{
625 unsigned int limit;
5800043b 626 int (*bios_limit)(int cpu, unsigned int *limit);
e2f74f35 627 int ret;
5800043b
NZ
628
629 rcu_read_lock();
630 bios_limit = rcu_dereference(cpufreq_driver)->bios_limit;
631 rcu_read_unlock();
632
633 if (bios_limit) {
634 ret = bios_limit(policy->cpu, &limit);
e2f74f35
TR
635 if (!ret)
636 return sprintf(buf, "%u\n", limit);
637 }
638 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
639}
640
6dad2a29
BP
641cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
642cpufreq_freq_attr_ro(cpuinfo_min_freq);
643cpufreq_freq_attr_ro(cpuinfo_max_freq);
644cpufreq_freq_attr_ro(cpuinfo_transition_latency);
645cpufreq_freq_attr_ro(scaling_available_governors);
646cpufreq_freq_attr_ro(scaling_driver);
647cpufreq_freq_attr_ro(scaling_cur_freq);
648cpufreq_freq_attr_ro(bios_limit);
649cpufreq_freq_attr_ro(related_cpus);
650cpufreq_freq_attr_ro(affected_cpus);
651cpufreq_freq_attr_rw(scaling_min_freq);
652cpufreq_freq_attr_rw(scaling_max_freq);
653cpufreq_freq_attr_rw(scaling_governor);
654cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 655
905d77cd 656static struct attribute *default_attrs[] = {
1da177e4
LT
657 &cpuinfo_min_freq.attr,
658 &cpuinfo_max_freq.attr,
ed129784 659 &cpuinfo_transition_latency.attr,
1da177e4
LT
660 &scaling_min_freq.attr,
661 &scaling_max_freq.attr,
662 &affected_cpus.attr,
e8628dd0 663 &related_cpus.attr,
1da177e4
LT
664 &scaling_governor.attr,
665 &scaling_driver.attr,
666 &scaling_available_governors.attr,
9e76988e 667 &scaling_setspeed.attr,
1da177e4
LT
668 NULL
669};
670
8aa84ad8
TR
671struct kobject *cpufreq_global_kobject;
672EXPORT_SYMBOL(cpufreq_global_kobject);
673
29464f28
DJ
674#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
675#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 676
29464f28 677static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 678{
905d77cd
DJ
679 struct cpufreq_policy *policy = to_policy(kobj);
680 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 681 ssize_t ret = -EINVAL;
a9144436 682 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 683 if (!policy)
0db4a8a9 684 goto no_policy;
5a01f2e8
VP
685
686 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 687 goto fail;
5a01f2e8 688
e08f5f5b
GS
689 if (fattr->show)
690 ret = fattr->show(policy, buf);
691 else
692 ret = -EIO;
693
5a01f2e8 694 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 695fail:
a9144436 696 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 697no_policy:
1da177e4
LT
698 return ret;
699}
700
905d77cd
DJ
701static ssize_t store(struct kobject *kobj, struct attribute *attr,
702 const char *buf, size_t count)
1da177e4 703{
905d77cd
DJ
704 struct cpufreq_policy *policy = to_policy(kobj);
705 struct freq_attr *fattr = to_attr(attr);
a07530b4 706 ssize_t ret = -EINVAL;
a9144436 707 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 708 if (!policy)
a07530b4 709 goto no_policy;
5a01f2e8
VP
710
711 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 712 goto fail;
5a01f2e8 713
e08f5f5b
GS
714 if (fattr->store)
715 ret = fattr->store(policy, buf, count);
716 else
717 ret = -EIO;
718
5a01f2e8 719 unlock_policy_rwsem_write(policy->cpu);
a07530b4 720fail:
a9144436 721 cpufreq_cpu_put_sysfs(policy);
a07530b4 722no_policy:
1da177e4
LT
723 return ret;
724}
725
905d77cd 726static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 727{
905d77cd 728 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 729 pr_debug("last reference is dropped\n");
1da177e4
LT
730 complete(&policy->kobj_unregister);
731}
732
52cf25d0 733static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
734 .show = show,
735 .store = store,
736};
737
738static struct kobj_type ktype_cpufreq = {
739 .sysfs_ops = &sysfs_ops,
740 .default_attrs = default_attrs,
741 .release = cpufreq_sysfs_release,
742};
743
19d6f7ec 744/* symlink affected CPUs */
cf3289d0
AC
745static int cpufreq_add_dev_symlink(unsigned int cpu,
746 struct cpufreq_policy *policy)
19d6f7ec
DJ
747{
748 unsigned int j;
749 int ret = 0;
750
751 for_each_cpu(j, policy->cpus) {
752 struct cpufreq_policy *managed_policy;
8a25a2fd 753 struct device *cpu_dev;
19d6f7ec
DJ
754
755 if (j == cpu)
756 continue;
19d6f7ec 757
2d06d8c4 758 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 759 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
760 cpu_dev = get_cpu_device(j);
761 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
762 "cpufreq");
763 if (ret) {
764 cpufreq_cpu_put(managed_policy);
765 return ret;
766 }
767 }
768 return ret;
769}
770
cf3289d0
AC
771static int cpufreq_add_dev_interface(unsigned int cpu,
772 struct cpufreq_policy *policy,
8a25a2fd 773 struct device *dev)
909a694e 774{
ecf7e461 775 struct cpufreq_policy new_policy;
909a694e 776 struct freq_attr **drv_attr;
5800043b 777 struct cpufreq_driver *driver;
909a694e
DJ
778 unsigned long flags;
779 int ret = 0;
780 unsigned int j;
781
782 /* prepare interface data */
783 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 784 &dev->kobj, "cpufreq");
909a694e
DJ
785 if (ret)
786 return ret;
787
788 /* set up files for this cpu device */
5800043b
NZ
789 rcu_read_lock();
790 driver = rcu_dereference(cpufreq_driver);
791 drv_attr = driver->attr;
909a694e
DJ
792 while ((drv_attr) && (*drv_attr)) {
793 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
794 if (ret)
5800043b 795 goto err_out_unlock;
909a694e
DJ
796 drv_attr++;
797 }
5800043b 798 if (driver->get) {
909a694e
DJ
799 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
800 if (ret)
5800043b 801 goto err_out_unlock;
909a694e 802 }
5800043b 803 if (driver->target) {
909a694e
DJ
804 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
805 if (ret)
5800043b 806 goto err_out_unlock;
909a694e 807 }
5800043b 808 if (driver->bios_limit) {
e2f74f35
TR
809 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
810 if (ret)
5800043b 811 goto err_out_unlock;
e2f74f35 812 }
5800043b 813 rcu_read_unlock();
909a694e 814
0d1857a1 815 write_lock_irqsave(&cpufreq_driver_lock, flags);
909a694e 816 for_each_cpu(j, policy->cpus) {
909a694e 817 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 818 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e 819 }
0d1857a1 820 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
909a694e
DJ
821
822 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
823 if (ret)
824 goto err_out_kobj_put;
825
826 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
827 /* assure that the starting sequence is run in __cpufreq_set_policy */
828 policy->governor = NULL;
829
830 /* set default policy */
831 ret = __cpufreq_set_policy(policy, &new_policy);
832 policy->user_policy.policy = policy->policy;
833 policy->user_policy.governor = policy->governor;
834
835 if (ret) {
5800043b
NZ
836 int (*exit)(struct cpufreq_policy *policy);
837
2d06d8c4 838 pr_debug("setting policy failed\n");
5800043b
NZ
839 rcu_read_lock();
840 exit = rcu_dereference(cpufreq_driver)->exit;
841 rcu_read_unlock();
842 if (exit)
843 exit(policy);
844
ecf7e461 845 }
909a694e
DJ
846 return ret;
847
5800043b
NZ
848err_out_unlock:
849 rcu_read_unlock();
909a694e
DJ
850err_out_kobj_put:
851 kobject_put(&policy->kobj);
852 wait_for_completion(&policy->kobj_unregister);
853 return ret;
854}
855
fcf80582
VK
856#ifdef CONFIG_HOTPLUG_CPU
857static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
858 struct device *dev)
859{
860 struct cpufreq_policy *policy;
861 int ret = 0;
862 unsigned long flags;
863
864 policy = cpufreq_cpu_get(sibling);
865 WARN_ON(!policy);
866
fcf80582
VK
867 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
868
2eaa3e2d
VK
869 lock_policy_rwsem_write(sibling);
870
0d1857a1 871 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 872
fcf80582 873 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 874 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 875 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 876 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 877
2eaa3e2d
VK
878 unlock_policy_rwsem_write(sibling);
879
fcf80582
VK
880 __cpufreq_governor(policy, CPUFREQ_GOV_START);
881 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
882
fcf80582
VK
883 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
884 if (ret) {
885 cpufreq_cpu_put(policy);
886 return ret;
887 }
888
889 return 0;
890}
891#endif
1da177e4
LT
892
893/**
894 * cpufreq_add_dev - add a CPU device
895 *
32ee8c3e 896 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
897 *
898 * The Oracle says: try running cpufreq registration/unregistration concurrently
899 * with with cpu hotplugging and all hell will break loose. Tried to clean this
900 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 901 */
8a25a2fd 902static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 903{
fcf80582 904 unsigned int j, cpu = dev->id;
65922465 905 int ret = -ENOMEM;
1da177e4 906 struct cpufreq_policy *policy;
5800043b
NZ
907 struct cpufreq_driver *driver;
908 int (*init)(struct cpufreq_policy *policy);
1da177e4 909 unsigned long flags;
90e41bac 910#ifdef CONFIG_HOTPLUG_CPU
fcf80582 911 struct cpufreq_governor *gov;
90e41bac
PB
912 int sibling;
913#endif
1da177e4 914
c32b6b8e
AR
915 if (cpu_is_offline(cpu))
916 return 0;
917
2d06d8c4 918 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
919
920#ifdef CONFIG_SMP
921 /* check whether a different CPU already registered this
922 * CPU because it is in the same boat. */
923 policy = cpufreq_cpu_get(cpu);
924 if (unlikely(policy)) {
8ff69732 925 cpufreq_cpu_put(policy);
1da177e4
LT
926 return 0;
927 }
fcf80582
VK
928
929#ifdef CONFIG_HOTPLUG_CPU
930 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 931 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
932 for_each_online_cpu(sibling) {
933 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 934 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 935 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 936 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 937 }
fcf80582 938 }
0d1857a1 939 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 940#endif
1da177e4
LT
941#endif
942
5800043b
NZ
943 rcu_read_lock();
944 driver = rcu_dereference(cpufreq_driver);
945 if (!try_module_get(driver->owner)) {
946 rcu_read_unlock();
1da177e4
LT
947 ret = -EINVAL;
948 goto module_out;
949 }
5800043b
NZ
950 init = driver->init;
951 rcu_read_unlock();
1da177e4 952
e98df50c 953 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 954 if (!policy)
1da177e4 955 goto nomem_out;
059019a3
DJ
956
957 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 958 goto err_free_policy;
059019a3
DJ
959
960 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 961 goto err_free_cpumask;
1da177e4
LT
962
963 policy->cpu = cpu;
65922465 964 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 965 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 966
5a01f2e8 967 /* Initially set CPU itself as the policy_cpu */
f1625066 968 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 969
1da177e4 970 init_completion(&policy->kobj_unregister);
65f27f38 971 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
972
973 /* call driver. From then on the cpufreq must be able
974 * to accept all calls to ->verify and ->setpolicy for this CPU
975 */
5800043b 976 ret = init(policy);
1da177e4 977 if (ret) {
2d06d8c4 978 pr_debug("initialization failed\n");
2eaa3e2d 979 goto err_set_policy_cpu;
1da177e4 980 }
643ae6e8 981
fcf80582
VK
982 /* related cpus should atleast have policy->cpus */
983 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
984
643ae6e8
VK
985 /*
986 * affected cpus must always be the one, which are online. We aren't
987 * managing offline cpus here.
988 */
989 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
990
187d9f4e
MC
991 policy->user_policy.min = policy->min;
992 policy->user_policy.max = policy->max;
1da177e4 993
a1531acd
TR
994 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
995 CPUFREQ_START, policy);
996
fcf80582
VK
997#ifdef CONFIG_HOTPLUG_CPU
998 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
999 if (gov) {
1000 policy->governor = gov;
1001 pr_debug("Restoring governor %s for cpu %d\n",
1002 policy->governor->name, cpu);
4bfa042c 1003 }
fcf80582 1004#endif
1da177e4 1005
8a25a2fd 1006 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
1007 if (ret)
1008 goto err_out_unregister;
8ff69732 1009
038c5b3e 1010 kobject_uevent(&policy->kobj, KOBJ_ADD);
5800043b
NZ
1011 rcu_read_lock();
1012 module_put(rcu_dereference(cpufreq_driver)->owner);
1013 rcu_read_unlock();
2d06d8c4 1014 pr_debug("initialization complete\n");
87c32271 1015
1da177e4
LT
1016 return 0;
1017
1da177e4 1018err_out_unregister:
0d1857a1 1019 write_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 1020 for_each_cpu(j, policy->cpus)
7a6aedfa 1021 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1022 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1023
c10997f6 1024 kobject_put(&policy->kobj);
1da177e4
LT
1025 wait_for_completion(&policy->kobj_unregister);
1026
2eaa3e2d
VK
1027err_set_policy_cpu:
1028 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 1029 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
1030err_free_cpumask:
1031 free_cpumask_var(policy->cpus);
1032err_free_policy:
1da177e4 1033 kfree(policy);
1da177e4 1034nomem_out:
5800043b
NZ
1035 rcu_read_lock();
1036 module_put(rcu_dereference(cpufreq_driver)->owner);
1037 rcu_read_unlock();
c32b6b8e 1038module_out:
1da177e4
LT
1039 return ret;
1040}
1041
b8eed8af
VK
1042static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1043{
1044 int j;
1045
1046 policy->last_cpu = policy->cpu;
1047 policy->cpu = cpu;
1048
3361b7b1 1049 for_each_cpu(j, policy->cpus)
b8eed8af 1050 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
1051
1052#ifdef CONFIG_CPU_FREQ_TABLE
1053 cpufreq_frequency_table_update_policy_cpu(policy);
1054#endif
1055 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1056 CPUFREQ_UPDATE_POLICY_CPU, policy);
1057}
1da177e4
LT
1058
1059/**
5a01f2e8 1060 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1061 *
1062 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1063 * Caller should already have policy_rwsem in write mode for this CPU.
1064 * This routine frees the rwsem before returning.
1da177e4 1065 */
8a25a2fd 1066static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1067{
b8eed8af 1068 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1069 unsigned long flags;
1070 struct cpufreq_policy *data;
5800043b 1071 struct cpufreq_driver *driver;
499bca9b
AW
1072 struct kobject *kobj;
1073 struct completion *cmp;
8a25a2fd 1074 struct device *cpu_dev;
5800043b
NZ
1075 bool has_target;
1076 int (*exit)(struct cpufreq_policy *policy);
1da177e4 1077
b8eed8af 1078 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1079
0d1857a1 1080 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1081
7a6aedfa 1082 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1083 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1084
0d1857a1 1085 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1086
1087 if (!data) {
b8eed8af 1088 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1089 return -EINVAL;
1090 }
1da177e4 1091
5800043b
NZ
1092 rcu_read_lock();
1093 driver = rcu_dereference(cpufreq_driver);
1094 has_target = driver->target ? true : false;
1095 exit = driver->exit;
1096 if (has_target)
f6a7409c 1097 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1098
084f3493 1099#ifdef CONFIG_HOTPLUG_CPU
5800043b 1100 if (!driver->setpolicy)
fa69e33f
DB
1101 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1102 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4 1103#endif
5800043b 1104 rcu_read_unlock();
1da177e4 1105
2eaa3e2d 1106 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1107 cpus = cpumask_weight(data->cpus);
e4969eba
VK
1108
1109 if (cpus > 1)
1110 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1111 unlock_policy_rwsem_write(cpu);
084f3493 1112
73bf0fc2
VK
1113 if (cpu != data->cpu) {
1114 sysfs_remove_link(&dev->kobj, "cpufreq");
1115 } else if (cpus > 1) {
b8eed8af
VK
1116 /* first sibling now owns the new sysfs dir */
1117 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1118 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1119 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1120 if (ret) {
1121 pr_err("%s: Failed to move kobj: %d", __func__, ret);
084f3493 1122
2eaa3e2d 1123 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1124 cpumask_set_cpu(cpu, data->cpus);
1da177e4 1125
0d1857a1 1126 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1127 per_cpu(cpufreq_cpu_data, cpu) = data;
0d1857a1 1128 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1129
499bca9b 1130 unlock_policy_rwsem_write(cpu);
1da177e4 1131
2eaa3e2d
VK
1132 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1133 "cpufreq");
b8eed8af 1134 return -EINVAL;
1da177e4 1135 }
5a01f2e8 1136
2eaa3e2d 1137 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1138 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1139 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1140 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1141 __func__, cpu_dev->id, cpu);
1da177e4 1142 }
1da177e4 1143
b8eed8af
VK
1144 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1145 cpufreq_cpu_put(data);
1da177e4 1146
b8eed8af
VK
1147 /* If cpu is last user of policy, free policy */
1148 if (cpus == 1) {
7bd353a9
VK
1149 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1150
2eaa3e2d 1151 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1152 kobj = &data->kobj;
1153 cmp = &data->kobj_unregister;
2eaa3e2d 1154 unlock_policy_rwsem_read(cpu);
b8eed8af 1155 kobject_put(kobj);
7d26e2d5 1156
b8eed8af
VK
1157 /* we need to make sure that the underlying kobj is actually
1158 * not referenced anymore by anybody before we proceed with
1159 * unloading.
1160 */
1161 pr_debug("waiting for dropping of refcount\n");
1162 wait_for_completion(cmp);
1163 pr_debug("wait complete\n");
7d26e2d5 1164
5800043b
NZ
1165 if (exit)
1166 exit(data);
27ecddc2 1167
b8eed8af
VK
1168 free_cpumask_var(data->related_cpus);
1169 free_cpumask_var(data->cpus);
1170 kfree(data);
5800043b 1171 } else if (has_target) {
b8eed8af
VK
1172 __cpufreq_governor(data, CPUFREQ_GOV_START);
1173 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
27ecddc2 1174 }
1da177e4 1175
2eaa3e2d 1176 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1177 return 0;
1178}
1179
1180
8a25a2fd 1181static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1182{
8a25a2fd 1183 unsigned int cpu = dev->id;
5a01f2e8 1184 int retval;
ec28297a
VP
1185
1186 if (cpu_is_offline(cpu))
1187 return 0;
1188
8a25a2fd 1189 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1190 return retval;
1191}
1192
1193
65f27f38 1194static void handle_update(struct work_struct *work)
1da177e4 1195{
65f27f38
DH
1196 struct cpufreq_policy *policy =
1197 container_of(work, struct cpufreq_policy, update);
1198 unsigned int cpu = policy->cpu;
2d06d8c4 1199 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1200 cpufreq_update_policy(cpu);
1201}
1202
1203/**
1204 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1205 * @cpu: cpu number
1206 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1207 * @new_freq: CPU frequency the CPU actually runs at
1208 *
29464f28
DJ
1209 * We adjust to current frequency first, and need to clean up later.
1210 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1211 */
e08f5f5b
GS
1212static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1213 unsigned int new_freq)
1da177e4 1214{
b43a7ffb 1215 struct cpufreq_policy *policy;
1da177e4 1216 struct cpufreq_freqs freqs;
b43a7ffb
VK
1217 unsigned long flags;
1218
1da177e4 1219
2d06d8c4 1220 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1221 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1222
1da177e4
LT
1223 freqs.old = old_freq;
1224 freqs.new = new_freq;
b43a7ffb
VK
1225
1226 read_lock_irqsave(&cpufreq_driver_lock, flags);
1227 policy = per_cpu(cpufreq_cpu_data, cpu);
1228 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1229
1230 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1231 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1232}
1233
1234
32ee8c3e 1235/**
4ab70df4 1236 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1237 * @cpu: CPU number
1238 *
1239 * This is the last known freq, without actually getting it from the driver.
1240 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1241 */
1242unsigned int cpufreq_quick_get(unsigned int cpu)
1243{
9e21ba8b 1244 struct cpufreq_policy *policy;
5800043b
NZ
1245 struct cpufreq_driver *driver;
1246 unsigned int (*get)(unsigned int cpu);
e08f5f5b 1247 unsigned int ret_freq = 0;
95235ca2 1248
5800043b
NZ
1249 rcu_read_lock();
1250 driver = rcu_dereference(cpufreq_driver);
1251 if (driver && driver->setpolicy && driver->get) {
1252 get = driver->get;
1253 rcu_read_unlock();
1254 return get(cpu);
1255 }
1256 rcu_read_unlock();
9e21ba8b
DB
1257
1258 policy = cpufreq_cpu_get(cpu);
95235ca2 1259 if (policy) {
e08f5f5b 1260 ret_freq = policy->cur;
95235ca2
VP
1261 cpufreq_cpu_put(policy);
1262 }
1263
4d34a67d 1264 return ret_freq;
95235ca2
VP
1265}
1266EXPORT_SYMBOL(cpufreq_quick_get);
1267
3d737108
JB
1268/**
1269 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1270 * @cpu: CPU number
1271 *
1272 * Just return the max possible frequency for a given CPU.
1273 */
1274unsigned int cpufreq_quick_get_max(unsigned int cpu)
1275{
1276 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1277 unsigned int ret_freq = 0;
1278
1279 if (policy) {
1280 ret_freq = policy->max;
1281 cpufreq_cpu_put(policy);
1282 }
1283
1284 return ret_freq;
1285}
1286EXPORT_SYMBOL(cpufreq_quick_get_max);
1287
95235ca2 1288
5a01f2e8 1289static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1290{
7a6aedfa 1291 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
5800043b
NZ
1292 struct cpufreq_driver *driver;
1293 unsigned int (*get)(unsigned int cpu);
e08f5f5b 1294 unsigned int ret_freq = 0;
5800043b
NZ
1295 u8 flags;
1296
1da177e4 1297
5800043b
NZ
1298 rcu_read_lock();
1299 driver = rcu_dereference(cpufreq_driver);
1300 if (!driver->get) {
1301 rcu_read_unlock();
4d34a67d 1302 return ret_freq;
5800043b
NZ
1303 }
1304 flags = driver->flags;
1305 get = driver->get;
1306 rcu_read_unlock();
1da177e4 1307
5800043b 1308 ret_freq = get(cpu);
1da177e4 1309
e08f5f5b 1310 if (ret_freq && policy->cur &&
5800043b 1311 !(flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1312 /* verify no discrepancy between actual and
1313 saved value exists */
1314 if (unlikely(ret_freq != policy->cur)) {
1315 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1316 schedule_work(&policy->update);
1317 }
1318 }
1319
4d34a67d 1320 return ret_freq;
5a01f2e8 1321}
1da177e4 1322
5a01f2e8
VP
1323/**
1324 * cpufreq_get - get the current CPU frequency (in kHz)
1325 * @cpu: CPU number
1326 *
1327 * Get the CPU current (static) CPU frequency
1328 */
1329unsigned int cpufreq_get(unsigned int cpu)
1330{
1331 unsigned int ret_freq = 0;
1332 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1333
1334 if (!policy)
1335 goto out;
1336
1337 if (unlikely(lock_policy_rwsem_read(cpu)))
1338 goto out_policy;
1339
1340 ret_freq = __cpufreq_get(cpu);
1341
1342 unlock_policy_rwsem_read(cpu);
1da177e4 1343
5a01f2e8
VP
1344out_policy:
1345 cpufreq_cpu_put(policy);
1346out:
4d34a67d 1347 return ret_freq;
1da177e4
LT
1348}
1349EXPORT_SYMBOL(cpufreq_get);
1350
8a25a2fd
KS
1351static struct subsys_interface cpufreq_interface = {
1352 .name = "cpufreq",
1353 .subsys = &cpu_subsys,
1354 .add_dev = cpufreq_add_dev,
1355 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1356};
1357
1da177e4 1358
42d4dc3f 1359/**
e00e56df
RW
1360 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1361 *
1362 * This function is only executed for the boot processor. The other CPUs
1363 * have been put offline by means of CPU hotplug.
42d4dc3f 1364 */
e00e56df 1365static int cpufreq_bp_suspend(void)
42d4dc3f 1366{
5800043b 1367 int (*suspend)(struct cpufreq_policy *policy);
e08f5f5b 1368 int ret = 0;
4bc5d341 1369
e00e56df 1370 int cpu = smp_processor_id();
42d4dc3f
BH
1371 struct cpufreq_policy *cpu_policy;
1372
2d06d8c4 1373 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1374
e00e56df 1375 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1376 cpu_policy = cpufreq_cpu_get(cpu);
1377 if (!cpu_policy)
e00e56df 1378 return 0;
42d4dc3f 1379
5800043b
NZ
1380 rcu_read_lock();
1381 suspend = rcu_dereference(cpufreq_driver)->suspend;
1382 rcu_read_unlock();
1383 if (suspend) {
1384 ret = suspend(cpu_policy);
ce6c3997 1385 if (ret)
42d4dc3f
BH
1386 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1387 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1388 }
1389
42d4dc3f 1390 cpufreq_cpu_put(cpu_policy);
c9060494 1391 return ret;
42d4dc3f
BH
1392}
1393
1da177e4 1394/**
e00e56df 1395 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1396 *
1397 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1398 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1399 * restored. It will verify that the current freq is in sync with
1400 * what we believe it to be. This is a bit later than when it
1401 * should be, but nonethteless it's better than calling
1402 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1403 *
1404 * This function is only executed for the boot CPU. The other CPUs have not
1405 * been turned on yet.
1da177e4 1406 */
e00e56df 1407static void cpufreq_bp_resume(void)
1da177e4 1408{
e08f5f5b 1409 int ret = 0;
5800043b 1410 int (*resume)(struct cpufreq_policy *policy);
4bc5d341 1411
e00e56df 1412 int cpu = smp_processor_id();
1da177e4
LT
1413 struct cpufreq_policy *cpu_policy;
1414
2d06d8c4 1415 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1416
e00e56df 1417 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1418 cpu_policy = cpufreq_cpu_get(cpu);
1419 if (!cpu_policy)
e00e56df 1420 return;
1da177e4 1421
5800043b
NZ
1422 rcu_read_lock();
1423 resume = rcu_dereference(cpufreq_driver)->resume;
1424 rcu_read_unlock();
1425
1426 if (resume) {
1427 ret = resume(cpu_policy);
1da177e4
LT
1428 if (ret) {
1429 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1430 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1431 goto fail;
1da177e4
LT
1432 }
1433 }
1434
1da177e4 1435 schedule_work(&cpu_policy->update);
ce6c3997 1436
c9060494 1437fail:
1da177e4 1438 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1439}
1440
e00e56df
RW
1441static struct syscore_ops cpufreq_syscore_ops = {
1442 .suspend = cpufreq_bp_suspend,
1443 .resume = cpufreq_bp_resume,
1da177e4
LT
1444};
1445
9d95046e
BP
1446/**
1447 * cpufreq_get_current_driver - return current driver's name
1448 *
1449 * Return the name string of the currently loaded cpufreq driver
1450 * or NULL, if none.
1451 */
1452const char *cpufreq_get_current_driver(void)
1453{
5800043b
NZ
1454 struct cpufreq_driver *driver;
1455 const char *name = NULL;
1456 rcu_read_lock();
1457 driver = rcu_dereference(cpufreq_driver);
1458 if (driver)
1459 name = driver->name;
1460 rcu_read_unlock();
1461 return name;
9d95046e
BP
1462}
1463EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1464
1465/*********************************************************************
1466 * NOTIFIER LISTS INTERFACE *
1467 *********************************************************************/
1468
1469/**
1470 * cpufreq_register_notifier - register a driver with cpufreq
1471 * @nb: notifier function to register
1472 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1473 *
32ee8c3e 1474 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1475 * are notified about clock rate changes (once before and once after
1476 * the transition), or a list of drivers that are notified about
1477 * changes in cpufreq policy.
1478 *
1479 * This function may sleep, and has the same return conditions as
e041c683 1480 * blocking_notifier_chain_register.
1da177e4
LT
1481 */
1482int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1483{
1484 int ret;
1485
d5aaffa9
DB
1486 if (cpufreq_disabled())
1487 return -EINVAL;
1488
74212ca4
CEB
1489 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1490
1da177e4
LT
1491 switch (list) {
1492 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1493 ret = srcu_notifier_chain_register(
e041c683 1494 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1495 break;
1496 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1497 ret = blocking_notifier_chain_register(
1498 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1499 break;
1500 default:
1501 ret = -EINVAL;
1502 }
1da177e4
LT
1503
1504 return ret;
1505}
1506EXPORT_SYMBOL(cpufreq_register_notifier);
1507
1508
1509/**
1510 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1511 * @nb: notifier block to be unregistered
1512 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1513 *
1514 * Remove a driver from the CPU frequency notifier list.
1515 *
1516 * This function may sleep, and has the same return conditions as
e041c683 1517 * blocking_notifier_chain_unregister.
1da177e4
LT
1518 */
1519int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1520{
1521 int ret;
1522
d5aaffa9
DB
1523 if (cpufreq_disabled())
1524 return -EINVAL;
1525
1da177e4
LT
1526 switch (list) {
1527 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1528 ret = srcu_notifier_chain_unregister(
e041c683 1529 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1530 break;
1531 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1532 ret = blocking_notifier_chain_unregister(
1533 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1534 break;
1535 default:
1536 ret = -EINVAL;
1537 }
1da177e4
LT
1538
1539 return ret;
1540}
1541EXPORT_SYMBOL(cpufreq_unregister_notifier);
1542
1543
1544/*********************************************************************
1545 * GOVERNORS *
1546 *********************************************************************/
1547
1548
1549int __cpufreq_driver_target(struct cpufreq_policy *policy,
1550 unsigned int target_freq,
1551 unsigned int relation)
1552{
1553 int retval = -EINVAL;
7249924e 1554 unsigned int old_target_freq = target_freq;
5800043b
NZ
1555 int (*target)(struct cpufreq_policy *policy,
1556 unsigned int target_freq,
1557 unsigned int relation);
c32b6b8e 1558
a7b422cd
KRW
1559 if (cpufreq_disabled())
1560 return -ENODEV;
1561
7249924e
VK
1562 /* Make sure that target_freq is within supported range */
1563 if (target_freq > policy->max)
1564 target_freq = policy->max;
1565 if (target_freq < policy->min)
1566 target_freq = policy->min;
1567
1568 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1569 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1570
1571 if (target_freq == policy->cur)
1572 return 0;
1573
5800043b
NZ
1574 rcu_read_lock();
1575 target = rcu_dereference(cpufreq_driver)->target;
1576 rcu_read_unlock();
1577 if (target)
1578 retval = target(policy, target_freq, relation);
90d45d17 1579
1da177e4
LT
1580 return retval;
1581}
1582EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1583
1da177e4
LT
1584int cpufreq_driver_target(struct cpufreq_policy *policy,
1585 unsigned int target_freq,
1586 unsigned int relation)
1587{
f1829e4a 1588 int ret = -EINVAL;
1da177e4
LT
1589
1590 policy = cpufreq_cpu_get(policy->cpu);
1591 if (!policy)
f1829e4a 1592 goto no_policy;
1da177e4 1593
5a01f2e8 1594 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1595 goto fail;
1da177e4
LT
1596
1597 ret = __cpufreq_driver_target(policy, target_freq, relation);
1598
5a01f2e8 1599 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1600
f1829e4a 1601fail:
1da177e4 1602 cpufreq_cpu_put(policy);
f1829e4a 1603no_policy:
1da177e4
LT
1604 return ret;
1605}
1606EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1607
bf0b90e3 1608int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1609{
1610 int ret = 0;
5800043b
NZ
1611 unsigned int (*getavg)(struct cpufreq_policy *policy,
1612 unsigned int cpu);
dfde5d62 1613
d5aaffa9
DB
1614 if (cpufreq_disabled())
1615 return ret;
1616
5800043b
NZ
1617 rcu_read_lock();
1618 getavg = rcu_dereference(cpufreq_driver)->getavg;
1619 rcu_read_unlock();
1620
1621 if (!getavg)
0676f7f2
VK
1622 return 0;
1623
dfde5d62
VP
1624 policy = cpufreq_cpu_get(policy->cpu);
1625 if (!policy)
1626 return -EINVAL;
1627
5800043b 1628 ret = getavg(policy, cpu);
dfde5d62 1629
dfde5d62
VP
1630 cpufreq_cpu_put(policy);
1631 return ret;
1632}
5a01f2e8 1633EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1634
153d7f3f 1635/*
153d7f3f
AV
1636 * when "event" is CPUFREQ_GOV_LIMITS
1637 */
1da177e4 1638
e08f5f5b
GS
1639static int __cpufreq_governor(struct cpufreq_policy *policy,
1640 unsigned int event)
1da177e4 1641{
cc993cab 1642 int ret;
6afde10c
TR
1643
1644 /* Only must be defined when default governor is known to have latency
1645 restrictions, like e.g. conservative or ondemand.
1646 That this is the case is already ensured in Kconfig
1647 */
1648#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1649 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1650#else
1651 struct cpufreq_governor *gov = NULL;
1652#endif
1c256245
TR
1653
1654 if (policy->governor->max_transition_latency &&
1655 policy->cpuinfo.transition_latency >
1656 policy->governor->max_transition_latency) {
6afde10c
TR
1657 if (!gov)
1658 return -EINVAL;
1659 else {
1660 printk(KERN_WARNING "%s governor failed, too long"
1661 " transition latency of HW, fallback"
1662 " to %s governor\n",
1663 policy->governor->name,
1664 gov->name);
1665 policy->governor = gov;
1666 }
1c256245 1667 }
1da177e4
LT
1668
1669 if (!try_module_get(policy->governor->owner))
1670 return -EINVAL;
1671
2d06d8c4 1672 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1673 policy->cpu, event);
1da177e4
LT
1674 ret = policy->governor->governor(policy, event);
1675
4d5dcc42
VK
1676 if (!ret) {
1677 if (event == CPUFREQ_GOV_POLICY_INIT)
1678 policy->governor->initialized++;
1679 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1680 policy->governor->initialized--;
1681 }
b394058f 1682
e08f5f5b
GS
1683 /* we keep one module reference alive for
1684 each CPU governed by this CPU */
1da177e4
LT
1685 if ((event != CPUFREQ_GOV_START) || ret)
1686 module_put(policy->governor->owner);
1687 if ((event == CPUFREQ_GOV_STOP) && !ret)
1688 module_put(policy->governor->owner);
1689
1690 return ret;
1691}
1692
1693
1da177e4
LT
1694int cpufreq_register_governor(struct cpufreq_governor *governor)
1695{
3bcb09a3 1696 int err;
1da177e4
LT
1697
1698 if (!governor)
1699 return -EINVAL;
1700
a7b422cd
KRW
1701 if (cpufreq_disabled())
1702 return -ENODEV;
1703
3fc54d37 1704 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1705
b394058f 1706 governor->initialized = 0;
3bcb09a3
JF
1707 err = -EBUSY;
1708 if (__find_governor(governor->name) == NULL) {
1709 err = 0;
1710 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1711 }
1da177e4 1712
32ee8c3e 1713 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1714 return err;
1da177e4
LT
1715}
1716EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1717
1718
1719void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1720{
90e41bac
PB
1721#ifdef CONFIG_HOTPLUG_CPU
1722 int cpu;
1723#endif
1724
1da177e4
LT
1725 if (!governor)
1726 return;
1727
a7b422cd
KRW
1728 if (cpufreq_disabled())
1729 return;
1730
90e41bac
PB
1731#ifdef CONFIG_HOTPLUG_CPU
1732 for_each_present_cpu(cpu) {
1733 if (cpu_online(cpu))
1734 continue;
1735 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1736 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1737 }
1738#endif
1739
3fc54d37 1740 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1741 list_del(&governor->governor_list);
3fc54d37 1742 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1743 return;
1744}
1745EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1746
1747
1748
1749/*********************************************************************
1750 * POLICY INTERFACE *
1751 *********************************************************************/
1752
1753/**
1754 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1755 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1756 * is written
1da177e4
LT
1757 *
1758 * Reads the current cpufreq policy.
1759 */
1760int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1761{
1762 struct cpufreq_policy *cpu_policy;
1763 if (!policy)
1764 return -EINVAL;
1765
1766 cpu_policy = cpufreq_cpu_get(cpu);
1767 if (!cpu_policy)
1768 return -EINVAL;
1769
1da177e4 1770 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1771
1772 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1773 return 0;
1774}
1775EXPORT_SYMBOL(cpufreq_get_policy);
1776
1777
153d7f3f 1778/*
e08f5f5b
GS
1779 * data : current policy.
1780 * policy : policy to be set.
153d7f3f 1781 */
e08f5f5b
GS
1782static int __cpufreq_set_policy(struct cpufreq_policy *data,
1783 struct cpufreq_policy *policy)
1da177e4 1784{
7bd353a9 1785 int ret = 0, failed = 1;
5800043b
NZ
1786 struct cpufreq_driver *driver;
1787 int (*verify)(struct cpufreq_policy *policy);
1788 int (*setpolicy)(struct cpufreq_policy *policy);
1da177e4 1789
2d06d8c4 1790 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1791 policy->min, policy->max);
1792
e08f5f5b
GS
1793 memcpy(&policy->cpuinfo, &data->cpuinfo,
1794 sizeof(struct cpufreq_cpuinfo));
1da177e4 1795
53391fa2 1796 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1797 ret = -EINVAL;
1798 goto error_out;
1799 }
1800
1da177e4 1801 /* verify the cpu speed can be set within this limit */
5800043b
NZ
1802 rcu_read_lock();
1803 driver = rcu_dereference(cpufreq_driver);
1804 verify = driver->verify;
1805 setpolicy = driver->setpolicy;
1806 rcu_read_unlock();
1807
1808 ret = verify(policy);
1da177e4
LT
1809 if (ret)
1810 goto error_out;
1811
1da177e4 1812 /* adjust if necessary - all reasons */
e041c683
AS
1813 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1814 CPUFREQ_ADJUST, policy);
1da177e4
LT
1815
1816 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1817 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1818 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1819
1820 /* verify the cpu speed can be set within this limit,
1821 which might be different to the first one */
5800043b 1822 ret = verify(policy);
e041c683 1823 if (ret)
1da177e4 1824 goto error_out;
1da177e4
LT
1825
1826 /* notification of the new policy */
e041c683
AS
1827 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1828 CPUFREQ_NOTIFY, policy);
1da177e4 1829
7d5e350f
DJ
1830 data->min = policy->min;
1831 data->max = policy->max;
1da177e4 1832
2d06d8c4 1833 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1834 data->min, data->max);
1da177e4 1835
5800043b 1836 if (setpolicy) {
1da177e4 1837 data->policy = policy->policy;
2d06d8c4 1838 pr_debug("setting range\n");
5800043b 1839 ret = setpolicy(policy);
1da177e4
LT
1840 } else {
1841 if (policy->governor != data->governor) {
1842 /* save old, working values */
1843 struct cpufreq_governor *old_gov = data->governor;
1844
2d06d8c4 1845 pr_debug("governor switch\n");
1da177e4
LT
1846
1847 /* end old governor */
7bd353a9 1848 if (data->governor) {
1da177e4 1849 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
7bd353a9
VK
1850 __cpufreq_governor(data,
1851 CPUFREQ_GOV_POLICY_EXIT);
1852 }
1da177e4
LT
1853
1854 /* start new governor */
1855 data->governor = policy->governor;
7bd353a9
VK
1856 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1857 if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
1858 failed = 0;
1859 else
1860 __cpufreq_governor(data,
1861 CPUFREQ_GOV_POLICY_EXIT);
1862 }
1863
1864 if (failed) {
1da177e4 1865 /* new governor failed, so re-start old one */
2d06d8c4 1866 pr_debug("starting governor %s failed\n",
e08f5f5b 1867 data->governor->name);
1da177e4
LT
1868 if (old_gov) {
1869 data->governor = old_gov;
7bd353a9
VK
1870 __cpufreq_governor(data,
1871 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1872 __cpufreq_governor(data,
1873 CPUFREQ_GOV_START);
1da177e4
LT
1874 }
1875 ret = -EINVAL;
1876 goto error_out;
1877 }
1878 /* might be a policy change, too, so fall through */
1879 }
2d06d8c4 1880 pr_debug("governor: change or update limits\n");
1da177e4
LT
1881 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1882 }
1883
7d5e350f 1884error_out:
1da177e4
LT
1885 return ret;
1886}
1887
1da177e4
LT
1888/**
1889 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1890 * @cpu: CPU which shall be re-evaluated
1891 *
25985edc 1892 * Useful for policy notifiers which have different necessities
1da177e4
LT
1893 * at different times.
1894 */
1895int cpufreq_update_policy(unsigned int cpu)
1896{
1897 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1898 struct cpufreq_policy policy;
5800043b
NZ
1899 struct cpufreq_driver *driver;
1900 unsigned int (*get)(unsigned int cpu);
1901 int (*target)(struct cpufreq_policy *policy,
1902 unsigned int target_freq,
1903 unsigned int relation);
f1829e4a 1904 int ret;
1da177e4 1905
f1829e4a
JL
1906 if (!data) {
1907 ret = -ENODEV;
1908 goto no_policy;
1909 }
1da177e4 1910
f1829e4a
JL
1911 if (unlikely(lock_policy_rwsem_write(cpu))) {
1912 ret = -EINVAL;
1913 goto fail;
1914 }
1da177e4 1915
2d06d8c4 1916 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1917 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1918 policy.min = data->user_policy.min;
1919 policy.max = data->user_policy.max;
1920 policy.policy = data->user_policy.policy;
1921 policy.governor = data->user_policy.governor;
1922
0961dd0d
TR
1923 /* BIOS might change freq behind our back
1924 -> ask driver for current freq and notify governors about a change */
5800043b
NZ
1925 rcu_read_lock();
1926 driver = rcu_access_pointer(cpufreq_driver);
1927 get = driver->get;
1928 target = driver->target;
1929 rcu_read_unlock();
1930 if (get) {
1931 policy.cur = get(cpu);
a85f7bd3 1932 if (!data->cur) {
2d06d8c4 1933 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1934 data->cur = policy.cur;
1935 } else {
5800043b 1936 if (data->cur != policy.cur && target)
e08f5f5b
GS
1937 cpufreq_out_of_sync(cpu, data->cur,
1938 policy.cur);
a85f7bd3 1939 }
0961dd0d
TR
1940 }
1941
1da177e4
LT
1942 ret = __cpufreq_set_policy(data, &policy);
1943
5a01f2e8
VP
1944 unlock_policy_rwsem_write(cpu);
1945
f1829e4a 1946fail:
1da177e4 1947 cpufreq_cpu_put(data);
f1829e4a 1948no_policy:
1da177e4
LT
1949 return ret;
1950}
1951EXPORT_SYMBOL(cpufreq_update_policy);
1952
dd184a01 1953static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1954 unsigned long action, void *hcpu)
1955{
1956 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1957 struct device *dev;
c32b6b8e 1958
8a25a2fd
KS
1959 dev = get_cpu_device(cpu);
1960 if (dev) {
c32b6b8e
AR
1961 switch (action) {
1962 case CPU_ONLINE:
8bb78442 1963 case CPU_ONLINE_FROZEN:
8a25a2fd 1964 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1965 break;
1966 case CPU_DOWN_PREPARE:
8bb78442 1967 case CPU_DOWN_PREPARE_FROZEN:
8a25a2fd 1968 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1969 break;
5a01f2e8 1970 case CPU_DOWN_FAILED:
8bb78442 1971 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1972 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1973 break;
1974 }
1975 }
1976 return NOTIFY_OK;
1977}
1978
9c36f746 1979static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1980 .notifier_call = cpufreq_cpu_callback,
1981};
1da177e4
LT
1982
1983/*********************************************************************
1984 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1985 *********************************************************************/
1986
1987/**
1988 * cpufreq_register_driver - register a CPU Frequency driver
1989 * @driver_data: A struct cpufreq_driver containing the values#
1990 * submitted by the CPU Frequency driver.
1991 *
32ee8c3e 1992 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1993 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1994 * (and isn't unregistered in the meantime).
1da177e4
LT
1995 *
1996 */
221dee28 1997int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1998{
1999 unsigned long flags;
2000 int ret;
2001
a7b422cd
KRW
2002 if (cpufreq_disabled())
2003 return -ENODEV;
2004
1da177e4
LT
2005 if (!driver_data || !driver_data->verify || !driver_data->init ||
2006 ((!driver_data->setpolicy) && (!driver_data->target)))
2007 return -EINVAL;
2008
2d06d8c4 2009 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2010
2011 if (driver_data->setpolicy)
2012 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2013
0d1857a1 2014 write_lock_irqsave(&cpufreq_driver_lock, flags);
5800043b 2015 if (rcu_access_pointer(cpufreq_driver)) {
0d1857a1 2016 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2017 return -EBUSY;
2018 }
5800043b 2019 rcu_assign_pointer(cpufreq_driver, driver_data);
0d1857a1 2020 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b 2021 synchronize_rcu();
1da177e4 2022
8a25a2fd 2023 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
2024 if (ret)
2025 goto err_null_driver;
1da177e4 2026
5800043b 2027 if (!(driver_data->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2028 int i;
2029 ret = -ENODEV;
2030
2031 /* check for at least one working CPU */
7a6aedfa
MT
2032 for (i = 0; i < nr_cpu_ids; i++)
2033 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2034 ret = 0;
7a6aedfa
MT
2035 break;
2036 }
1da177e4
LT
2037
2038 /* if all ->init() calls failed, unregister */
2039 if (ret) {
2d06d8c4 2040 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2041 driver_data->name);
8a25a2fd 2042 goto err_if_unreg;
1da177e4
LT
2043 }
2044 }
2045
8f5bc2ab 2046 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2047 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2048
8f5bc2ab 2049 return 0;
8a25a2fd
KS
2050err_if_unreg:
2051 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2052err_null_driver:
0d1857a1 2053 write_lock_irqsave(&cpufreq_driver_lock, flags);
5800043b 2054 rcu_assign_pointer(cpufreq_driver, NULL);
0d1857a1 2055 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b 2056 synchronize_rcu();
4d34a67d 2057 return ret;
1da177e4
LT
2058}
2059EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2060
2061
2062/**
2063 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2064 *
32ee8c3e 2065 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2066 * the right to do so, i.e. if you have succeeded in initialising before!
2067 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2068 * currently not initialised.
2069 */
221dee28 2070int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2071{
2072 unsigned long flags;
5800043b 2073 struct cpufreq_driver *old_driver;
1da177e4 2074
5800043b
NZ
2075 rcu_read_lock();
2076 old_driver = rcu_access_pointer(cpufreq_driver);
2077 if (!old_driver || (driver != old_driver)) {
2078 rcu_read_unlock();
1da177e4 2079 return -EINVAL;
5800043b
NZ
2080 }
2081 rcu_read_unlock();
1da177e4 2082
2d06d8c4 2083 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2084
8a25a2fd 2085 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2086 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2087
0d1857a1 2088 write_lock_irqsave(&cpufreq_driver_lock, flags);
5800043b 2089 rcu_assign_pointer(cpufreq_driver, NULL);
0d1857a1 2090 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
5800043b 2091 synchronize_rcu();
1da177e4
LT
2092
2093 return 0;
2094}
2095EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2096
2097static int __init cpufreq_core_init(void)
2098{
2099 int cpu;
2100
a7b422cd
KRW
2101 if (cpufreq_disabled())
2102 return -ENODEV;
2103
5a01f2e8 2104 for_each_possible_cpu(cpu) {
f1625066 2105 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
2106 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2107 }
8aa84ad8 2108
8a25a2fd 2109 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 2110 BUG_ON(!cpufreq_global_kobject);
e00e56df 2111 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2112
5a01f2e8
VP
2113 return 0;
2114}
5a01f2e8 2115core_initcall(cpufreq_core_init);
This page took 0.73154 seconds and 5 git commands to generate.