cpufreq: Don't pass CPU to cpufreq_add_dev_{symlink|interface}()
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
72a4ce34 20#include <asm/cputime.h>
1da177e4 21#include <linux/kernel.h>
72a4ce34 22#include <linux/kernel_stat.h>
1da177e4
LT
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/notifier.h>
26#include <linux/cpufreq.h>
27#include <linux/delay.h>
28#include <linux/interrupt.h>
29#include <linux/spinlock.h>
72a4ce34 30#include <linux/tick.h>
1da177e4
LT
31#include <linux/device.h>
32#include <linux/slab.h>
33#include <linux/cpu.h>
34#include <linux/completion.h>
3fc54d37 35#include <linux/mutex.h>
e00e56df 36#include <linux/syscore_ops.h>
1da177e4 37
6f4f2723
TR
38#include <trace/events/power.h>
39
1da177e4 40/**
cd878479 41 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
42 * level driver of CPUFreq support, and its spinlock. This lock
43 * also protects the cpufreq_cpu_data array.
44 */
1c3d85dd 45static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 46static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 47static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d
VK
48static DEFINE_RWLOCK(cpufreq_driver_lock);
49static DEFINE_MUTEX(cpufreq_governor_lock);
50
084f3493
TR
51#ifdef CONFIG_HOTPLUG_CPU
52/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 53static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 54#endif
1da177e4 55
5a01f2e8
VP
56/*
57 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
58 * all cpufreq/hotplug/workqueue/etc related lock issues.
59 *
60 * The rules for this semaphore:
61 * - Any routine that wants to read from the policy structure will
62 * do a down_read on this semaphore.
63 * - Any routine that will write to the policy structure and/or may take away
64 * the policy altogether (eg. CPU hotplug), will hold this lock in write
65 * mode before doing so.
66 *
67 * Additional rules:
5a01f2e8
VP
68 * - Governor routines that can be called in cpufreq hotplug path should not
69 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
70 * - Lock should not be held across
71 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 72 */
f1625066 73static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
74static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
75
76#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 77static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 78{ \
f1625066 79 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
80 BUG_ON(policy_cpu == -1); \
81 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
82 \
83 return 0; \
84}
85
86lock_policy_rwsem(read, cpu);
5a01f2e8 87lock_policy_rwsem(write, cpu);
5a01f2e8 88
fa1d8af4
VK
89#define unlock_policy_rwsem(mode, cpu) \
90static void unlock_policy_rwsem_##mode(int cpu) \
91{ \
92 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
93 BUG_ON(policy_cpu == -1); \
94 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 95}
5a01f2e8 96
fa1d8af4
VK
97unlock_policy_rwsem(read, cpu);
98unlock_policy_rwsem(write, cpu);
5a01f2e8 99
1da177e4 100/* internal prototypes */
29464f28
DJ
101static int __cpufreq_governor(struct cpufreq_policy *policy,
102 unsigned int event);
5a01f2e8 103static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 104static void handle_update(struct work_struct *work);
1da177e4
LT
105
106/**
32ee8c3e
DJ
107 * Two notifier lists: the "policy" list is involved in the
108 * validation process for a new CPU frequency policy; the
1da177e4
LT
109 * "transition" list for kernel code that needs to handle
110 * changes to devices when the CPU clock speed changes.
111 * The mutex locks both lists.
112 */
e041c683 113static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 114static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 115
74212ca4 116static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
117static int __init init_cpufreq_transition_notifier_list(void)
118{
119 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 120 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
121 return 0;
122}
b3438f82 123pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 124
a7b422cd 125static int off __read_mostly;
da584455 126static int cpufreq_disabled(void)
a7b422cd
KRW
127{
128 return off;
129}
130void disable_cpufreq(void)
131{
132 off = 1;
133}
1da177e4 134static LIST_HEAD(cpufreq_governor_list);
29464f28 135static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 136
4d5dcc42
VK
137bool have_governor_per_policy(void)
138{
1c3d85dd 139 return cpufreq_driver->have_governor_per_policy;
4d5dcc42 140}
3f869d6d 141EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 142
944e9a03
VK
143struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
144{
145 if (have_governor_per_policy())
146 return &policy->kobj;
147 else
148 return cpufreq_global_kobject;
149}
150EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
151
72a4ce34
VK
152static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
153{
154 u64 idle_time;
155 u64 cur_wall_time;
156 u64 busy_time;
157
158 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
159
160 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
164 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
165 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
166
167 idle_time = cur_wall_time - busy_time;
168 if (wall)
169 *wall = cputime_to_usecs(cur_wall_time);
170
171 return cputime_to_usecs(idle_time);
172}
173
174u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
175{
176 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
177
178 if (idle_time == -1ULL)
179 return get_cpu_idle_time_jiffy(cpu, wall);
180 else if (!io_busy)
181 idle_time += get_cpu_iowait_time_us(cpu, wall);
182
183 return idle_time;
184}
185EXPORT_SYMBOL_GPL(get_cpu_idle_time);
186
a9144436 187static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
188{
189 struct cpufreq_policy *data;
190 unsigned long flags;
191
7a6aedfa 192 if (cpu >= nr_cpu_ids)
1da177e4
LT
193 goto err_out;
194
195 /* get the cpufreq driver */
1c3d85dd 196 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 197
1c3d85dd 198 if (!cpufreq_driver)
1da177e4
LT
199 goto err_out_unlock;
200
1c3d85dd 201 if (!try_module_get(cpufreq_driver->owner))
1da177e4
LT
202 goto err_out_unlock;
203
1da177e4 204 /* get the CPU */
7a6aedfa 205 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
206
207 if (!data)
208 goto err_out_put_module;
209
a9144436 210 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
211 goto err_out_put_module;
212
0d1857a1 213 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
214 return data;
215
7d5e350f 216err_out_put_module:
1c3d85dd 217 module_put(cpufreq_driver->owner);
5800043b 218err_out_unlock:
1c3d85dd 219 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 220err_out:
1da177e4
LT
221 return NULL;
222}
a9144436
SB
223
224struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
225{
d5aaffa9
DB
226 if (cpufreq_disabled())
227 return NULL;
228
a9144436
SB
229 return __cpufreq_cpu_get(cpu, false);
230}
1da177e4
LT
231EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
232
a9144436
SB
233static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
234{
235 return __cpufreq_cpu_get(cpu, true);
236}
237
238static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
239{
240 if (!sysfs)
241 kobject_put(&data->kobj);
1c3d85dd 242 module_put(cpufreq_driver->owner);
a9144436 243}
7d5e350f 244
1da177e4
LT
245void cpufreq_cpu_put(struct cpufreq_policy *data)
246{
d5aaffa9
DB
247 if (cpufreq_disabled())
248 return;
249
a9144436 250 __cpufreq_cpu_put(data, false);
1da177e4
LT
251}
252EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
253
a9144436
SB
254static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
255{
256 __cpufreq_cpu_put(data, true);
257}
1da177e4 258
1da177e4
LT
259/*********************************************************************
260 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
261 *********************************************************************/
262
263/**
264 * adjust_jiffies - adjust the system "loops_per_jiffy"
265 *
266 * This function alters the system "loops_per_jiffy" for the clock
267 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 268 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
269 * per-CPU loops_per_jiffy value wherever possible.
270 */
271#ifndef CONFIG_SMP
272static unsigned long l_p_j_ref;
bb176f7d 273static unsigned int l_p_j_ref_freq;
1da177e4 274
858119e1 275static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
276{
277 if (ci->flags & CPUFREQ_CONST_LOOPS)
278 return;
279
280 if (!l_p_j_ref_freq) {
281 l_p_j_ref = loops_per_jiffy;
282 l_p_j_ref_freq = ci->old;
2d06d8c4 283 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 284 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 285 }
bb176f7d 286 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 287 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
288 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
289 ci->new);
2d06d8c4 290 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 291 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
292 }
293}
294#else
e08f5f5b
GS
295static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
296{
297 return;
298}
1da177e4
LT
299#endif
300
0956df9c 301static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 302 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
303{
304 BUG_ON(irqs_disabled());
305
d5aaffa9
DB
306 if (cpufreq_disabled())
307 return;
308
1c3d85dd 309 freqs->flags = cpufreq_driver->flags;
2d06d8c4 310 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 311 state, freqs->new);
1da177e4 312
1da177e4 313 switch (state) {
e4472cb3 314
1da177e4 315 case CPUFREQ_PRECHANGE:
266c13d7
VK
316 if (WARN(policy->transition_ongoing ==
317 cpumask_weight(policy->cpus),
7c30ed53
VK
318 "In middle of another frequency transition\n"))
319 return;
320
266c13d7 321 policy->transition_ongoing++;
7c30ed53 322
32ee8c3e 323 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
324 * which is not equal to what the cpufreq core thinks is
325 * "old frequency".
1da177e4 326 */
1c3d85dd 327 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
328 if ((policy) && (policy->cpu == freqs->cpu) &&
329 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 330 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
331 " %u, cpufreq assumed %u kHz.\n",
332 freqs->old, policy->cur);
333 freqs->old = policy->cur;
1da177e4
LT
334 }
335 }
b4dfdbb3 336 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 337 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
338 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
339 break;
e4472cb3 340
1da177e4 341 case CPUFREQ_POSTCHANGE:
7c30ed53
VK
342 if (WARN(!policy->transition_ongoing,
343 "No frequency transition in progress\n"))
344 return;
345
266c13d7 346 policy->transition_ongoing--;
7c30ed53 347
1da177e4 348 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 349 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 350 (unsigned long)freqs->cpu);
25e41933 351 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 352 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 353 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
354 if (likely(policy) && likely(policy->cpu == freqs->cpu))
355 policy->cur = freqs->new;
1da177e4
LT
356 break;
357 }
1da177e4 358}
bb176f7d 359
b43a7ffb
VK
360/**
361 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
362 * on frequency transition.
363 *
364 * This function calls the transition notifiers and the "adjust_jiffies"
365 * function. It is called twice on all CPU frequency changes that have
366 * external effects.
367 */
368void cpufreq_notify_transition(struct cpufreq_policy *policy,
369 struct cpufreq_freqs *freqs, unsigned int state)
370{
371 for_each_cpu(freqs->cpu, policy->cpus)
372 __cpufreq_notify_transition(policy, freqs, state);
373}
1da177e4
LT
374EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
375
376
1da177e4
LT
377/*********************************************************************
378 * SYSFS INTERFACE *
379 *********************************************************************/
380
3bcb09a3
JF
381static struct cpufreq_governor *__find_governor(const char *str_governor)
382{
383 struct cpufreq_governor *t;
384
385 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 386 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
387 return t;
388
389 return NULL;
390}
391
1da177e4
LT
392/**
393 * cpufreq_parse_governor - parse a governor string
394 */
905d77cd 395static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
396 struct cpufreq_governor **governor)
397{
3bcb09a3 398 int err = -EINVAL;
1c3d85dd
RW
399
400 if (!cpufreq_driver)
3bcb09a3
JF
401 goto out;
402
1c3d85dd 403 if (cpufreq_driver->setpolicy) {
1da177e4
LT
404 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
405 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 406 err = 0;
e08f5f5b
GS
407 } else if (!strnicmp(str_governor, "powersave",
408 CPUFREQ_NAME_LEN)) {
1da177e4 409 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 410 err = 0;
1da177e4 411 }
1c3d85dd 412 } else if (cpufreq_driver->target) {
1da177e4 413 struct cpufreq_governor *t;
3bcb09a3 414
3fc54d37 415 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
416
417 t = __find_governor(str_governor);
418
ea714970 419 if (t == NULL) {
1a8e1463 420 int ret;
ea714970 421
1a8e1463
KC
422 mutex_unlock(&cpufreq_governor_mutex);
423 ret = request_module("cpufreq_%s", str_governor);
424 mutex_lock(&cpufreq_governor_mutex);
ea714970 425
1a8e1463
KC
426 if (ret == 0)
427 t = __find_governor(str_governor);
ea714970
JF
428 }
429
3bcb09a3
JF
430 if (t != NULL) {
431 *governor = t;
432 err = 0;
1da177e4 433 }
3bcb09a3 434
3fc54d37 435 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 436 }
29464f28 437out:
3bcb09a3 438 return err;
1da177e4 439}
1da177e4 440
1da177e4 441/**
e08f5f5b
GS
442 * cpufreq_per_cpu_attr_read() / show_##file_name() -
443 * print out cpufreq information
1da177e4
LT
444 *
445 * Write out information from cpufreq_driver->policy[cpu]; object must be
446 * "unsigned int".
447 */
448
32ee8c3e
DJ
449#define show_one(file_name, object) \
450static ssize_t show_##file_name \
905d77cd 451(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 452{ \
29464f28 453 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
454}
455
456show_one(cpuinfo_min_freq, cpuinfo.min_freq);
457show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 458show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
459show_one(scaling_min_freq, min);
460show_one(scaling_max_freq, max);
461show_one(scaling_cur_freq, cur);
462
e08f5f5b
GS
463static int __cpufreq_set_policy(struct cpufreq_policy *data,
464 struct cpufreq_policy *policy);
7970e08b 465
1da177e4
LT
466/**
467 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
468 */
469#define store_one(file_name, object) \
470static ssize_t store_##file_name \
905d77cd 471(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 472{ \
f55c9c26 473 unsigned int ret; \
1da177e4
LT
474 struct cpufreq_policy new_policy; \
475 \
476 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
477 if (ret) \
478 return -EINVAL; \
479 \
29464f28 480 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
481 if (ret != 1) \
482 return -EINVAL; \
483 \
7970e08b
TR
484 ret = __cpufreq_set_policy(policy, &new_policy); \
485 policy->user_policy.object = policy->object; \
1da177e4
LT
486 \
487 return ret ? ret : count; \
488}
489
29464f28
DJ
490store_one(scaling_min_freq, min);
491store_one(scaling_max_freq, max);
1da177e4
LT
492
493/**
494 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
495 */
905d77cd
DJ
496static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
497 char *buf)
1da177e4 498{
5a01f2e8 499 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
500 if (!cur_freq)
501 return sprintf(buf, "<unknown>");
502 return sprintf(buf, "%u\n", cur_freq);
503}
504
1da177e4
LT
505/**
506 * show_scaling_governor - show the current policy for the specified CPU
507 */
905d77cd 508static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 509{
29464f28 510 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
511 return sprintf(buf, "powersave\n");
512 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
513 return sprintf(buf, "performance\n");
514 else if (policy->governor)
4b972f0b 515 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 516 policy->governor->name);
1da177e4
LT
517 return -EINVAL;
518}
519
1da177e4
LT
520/**
521 * store_scaling_governor - store policy for the specified CPU
522 */
905d77cd
DJ
523static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
524 const char *buf, size_t count)
1da177e4 525{
f55c9c26 526 unsigned int ret;
1da177e4
LT
527 char str_governor[16];
528 struct cpufreq_policy new_policy;
529
530 ret = cpufreq_get_policy(&new_policy, policy->cpu);
531 if (ret)
532 return ret;
533
29464f28 534 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
535 if (ret != 1)
536 return -EINVAL;
537
e08f5f5b
GS
538 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
539 &new_policy.governor))
1da177e4
LT
540 return -EINVAL;
541
bb176f7d
VK
542 /*
543 * Do not use cpufreq_set_policy here or the user_policy.max
544 * will be wrongly overridden
545 */
7970e08b
TR
546 ret = __cpufreq_set_policy(policy, &new_policy);
547
548 policy->user_policy.policy = policy->policy;
549 policy->user_policy.governor = policy->governor;
7970e08b 550
e08f5f5b
GS
551 if (ret)
552 return ret;
553 else
554 return count;
1da177e4
LT
555}
556
557/**
558 * show_scaling_driver - show the cpufreq driver currently loaded
559 */
905d77cd 560static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 561{
1c3d85dd 562 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
563}
564
565/**
566 * show_scaling_available_governors - show the available CPUfreq governors
567 */
905d77cd
DJ
568static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
569 char *buf)
1da177e4
LT
570{
571 ssize_t i = 0;
572 struct cpufreq_governor *t;
573
1c3d85dd 574 if (!cpufreq_driver->target) {
1da177e4
LT
575 i += sprintf(buf, "performance powersave");
576 goto out;
577 }
578
579 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
580 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
581 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 582 goto out;
4b972f0b 583 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 584 }
7d5e350f 585out:
1da177e4
LT
586 i += sprintf(&buf[i], "\n");
587 return i;
588}
e8628dd0 589
f4fd3797 590ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
591{
592 ssize_t i = 0;
593 unsigned int cpu;
594
835481d9 595 for_each_cpu(cpu, mask) {
1da177e4
LT
596 if (i)
597 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
598 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
599 if (i >= (PAGE_SIZE - 5))
29464f28 600 break;
1da177e4
LT
601 }
602 i += sprintf(&buf[i], "\n");
603 return i;
604}
f4fd3797 605EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 606
e8628dd0
DW
607/**
608 * show_related_cpus - show the CPUs affected by each transition even if
609 * hw coordination is in use
610 */
611static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
612{
f4fd3797 613 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
614}
615
616/**
617 * show_affected_cpus - show the CPUs affected by each transition
618 */
619static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
620{
f4fd3797 621 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
622}
623
9e76988e 624static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 625 const char *buf, size_t count)
9e76988e
VP
626{
627 unsigned int freq = 0;
628 unsigned int ret;
629
879000f9 630 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
631 return -EINVAL;
632
633 ret = sscanf(buf, "%u", &freq);
634 if (ret != 1)
635 return -EINVAL;
636
637 policy->governor->store_setspeed(policy, freq);
638
639 return count;
640}
641
642static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
643{
879000f9 644 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
645 return sprintf(buf, "<unsupported>\n");
646
647 return policy->governor->show_setspeed(policy, buf);
648}
1da177e4 649
e2f74f35 650/**
8bf1ac72 651 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
652 */
653static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
654{
655 unsigned int limit;
656 int ret;
1c3d85dd
RW
657 if (cpufreq_driver->bios_limit) {
658 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
659 if (!ret)
660 return sprintf(buf, "%u\n", limit);
661 }
662 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
663}
664
6dad2a29
BP
665cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
666cpufreq_freq_attr_ro(cpuinfo_min_freq);
667cpufreq_freq_attr_ro(cpuinfo_max_freq);
668cpufreq_freq_attr_ro(cpuinfo_transition_latency);
669cpufreq_freq_attr_ro(scaling_available_governors);
670cpufreq_freq_attr_ro(scaling_driver);
671cpufreq_freq_attr_ro(scaling_cur_freq);
672cpufreq_freq_attr_ro(bios_limit);
673cpufreq_freq_attr_ro(related_cpus);
674cpufreq_freq_attr_ro(affected_cpus);
675cpufreq_freq_attr_rw(scaling_min_freq);
676cpufreq_freq_attr_rw(scaling_max_freq);
677cpufreq_freq_attr_rw(scaling_governor);
678cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 679
905d77cd 680static struct attribute *default_attrs[] = {
1da177e4
LT
681 &cpuinfo_min_freq.attr,
682 &cpuinfo_max_freq.attr,
ed129784 683 &cpuinfo_transition_latency.attr,
1da177e4
LT
684 &scaling_min_freq.attr,
685 &scaling_max_freq.attr,
686 &affected_cpus.attr,
e8628dd0 687 &related_cpus.attr,
1da177e4
LT
688 &scaling_governor.attr,
689 &scaling_driver.attr,
690 &scaling_available_governors.attr,
9e76988e 691 &scaling_setspeed.attr,
1da177e4
LT
692 NULL
693};
694
29464f28
DJ
695#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
696#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 697
29464f28 698static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 699{
905d77cd
DJ
700 struct cpufreq_policy *policy = to_policy(kobj);
701 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 702 ssize_t ret = -EINVAL;
a9144436 703 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 704 if (!policy)
0db4a8a9 705 goto no_policy;
5a01f2e8
VP
706
707 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 708 goto fail;
5a01f2e8 709
e08f5f5b
GS
710 if (fattr->show)
711 ret = fattr->show(policy, buf);
712 else
713 ret = -EIO;
714
5a01f2e8 715 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 716fail:
a9144436 717 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 718no_policy:
1da177e4
LT
719 return ret;
720}
721
905d77cd
DJ
722static ssize_t store(struct kobject *kobj, struct attribute *attr,
723 const char *buf, size_t count)
1da177e4 724{
905d77cd
DJ
725 struct cpufreq_policy *policy = to_policy(kobj);
726 struct freq_attr *fattr = to_attr(attr);
a07530b4 727 ssize_t ret = -EINVAL;
a9144436 728 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 729 if (!policy)
a07530b4 730 goto no_policy;
5a01f2e8
VP
731
732 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 733 goto fail;
5a01f2e8 734
e08f5f5b
GS
735 if (fattr->store)
736 ret = fattr->store(policy, buf, count);
737 else
738 ret = -EIO;
739
5a01f2e8 740 unlock_policy_rwsem_write(policy->cpu);
a07530b4 741fail:
a9144436 742 cpufreq_cpu_put_sysfs(policy);
a07530b4 743no_policy:
1da177e4
LT
744 return ret;
745}
746
905d77cd 747static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 748{
905d77cd 749 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 750 pr_debug("last reference is dropped\n");
1da177e4
LT
751 complete(&policy->kobj_unregister);
752}
753
52cf25d0 754static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
755 .show = show,
756 .store = store,
757};
758
759static struct kobj_type ktype_cpufreq = {
760 .sysfs_ops = &sysfs_ops,
761 .default_attrs = default_attrs,
762 .release = cpufreq_sysfs_release,
763};
764
2361be23
VK
765struct kobject *cpufreq_global_kobject;
766EXPORT_SYMBOL(cpufreq_global_kobject);
767
768static int cpufreq_global_kobject_usage;
769
770int cpufreq_get_global_kobject(void)
771{
772 if (!cpufreq_global_kobject_usage++)
773 return kobject_add(cpufreq_global_kobject,
774 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
775
776 return 0;
777}
778EXPORT_SYMBOL(cpufreq_get_global_kobject);
779
780void cpufreq_put_global_kobject(void)
781{
782 if (!--cpufreq_global_kobject_usage)
783 kobject_del(cpufreq_global_kobject);
784}
785EXPORT_SYMBOL(cpufreq_put_global_kobject);
786
787int cpufreq_sysfs_create_file(const struct attribute *attr)
788{
789 int ret = cpufreq_get_global_kobject();
790
791 if (!ret) {
792 ret = sysfs_create_file(cpufreq_global_kobject, attr);
793 if (ret)
794 cpufreq_put_global_kobject();
795 }
796
797 return ret;
798}
799EXPORT_SYMBOL(cpufreq_sysfs_create_file);
800
801void cpufreq_sysfs_remove_file(const struct attribute *attr)
802{
803 sysfs_remove_file(cpufreq_global_kobject, attr);
804 cpufreq_put_global_kobject();
805}
806EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
807
19d6f7ec 808/* symlink affected CPUs */
308b60e7 809static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
810{
811 unsigned int j;
812 int ret = 0;
813
814 for_each_cpu(j, policy->cpus) {
8a25a2fd 815 struct device *cpu_dev;
19d6f7ec 816
308b60e7 817 if (j == policy->cpu)
19d6f7ec 818 continue;
19d6f7ec 819
e8fdde10 820 pr_debug("Adding link for CPU: %u\n", j);
308b60e7 821 cpufreq_cpu_get(policy->cpu);
8a25a2fd
KS
822 cpu_dev = get_cpu_device(j);
823 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
824 "cpufreq");
825 if (ret) {
e8fdde10 826 cpufreq_cpu_put(policy);
19d6f7ec
DJ
827 return ret;
828 }
829 }
830 return ret;
831}
832
308b60e7 833static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 834 struct device *dev)
909a694e
DJ
835{
836 struct freq_attr **drv_attr;
909a694e 837 int ret = 0;
909a694e
DJ
838
839 /* prepare interface data */
840 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 841 &dev->kobj, "cpufreq");
909a694e
DJ
842 if (ret)
843 return ret;
844
845 /* set up files for this cpu device */
1c3d85dd 846 drv_attr = cpufreq_driver->attr;
909a694e
DJ
847 while ((drv_attr) && (*drv_attr)) {
848 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
849 if (ret)
1c3d85dd 850 goto err_out_kobj_put;
909a694e
DJ
851 drv_attr++;
852 }
1c3d85dd 853 if (cpufreq_driver->get) {
909a694e
DJ
854 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
855 if (ret)
1c3d85dd 856 goto err_out_kobj_put;
909a694e 857 }
1c3d85dd 858 if (cpufreq_driver->target) {
909a694e
DJ
859 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
860 if (ret)
1c3d85dd 861 goto err_out_kobj_put;
909a694e 862 }
1c3d85dd 863 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
864 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
865 if (ret)
1c3d85dd 866 goto err_out_kobj_put;
e2f74f35 867 }
909a694e 868
308b60e7 869 ret = cpufreq_add_dev_symlink(policy);
ecf7e461
DJ
870 if (ret)
871 goto err_out_kobj_put;
872
e18f1682
SB
873 return ret;
874
875err_out_kobj_put:
876 kobject_put(&policy->kobj);
877 wait_for_completion(&policy->kobj_unregister);
878 return ret;
879}
880
881static void cpufreq_init_policy(struct cpufreq_policy *policy)
882{
883 struct cpufreq_policy new_policy;
884 int ret = 0;
885
ecf7e461
DJ
886 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
887 /* assure that the starting sequence is run in __cpufreq_set_policy */
888 policy->governor = NULL;
889
890 /* set default policy */
891 ret = __cpufreq_set_policy(policy, &new_policy);
892 policy->user_policy.policy = policy->policy;
893 policy->user_policy.governor = policy->governor;
894
895 if (ret) {
2d06d8c4 896 pr_debug("setting policy failed\n");
1c3d85dd
RW
897 if (cpufreq_driver->exit)
898 cpufreq_driver->exit(policy);
ecf7e461 899 }
909a694e
DJ
900}
901
fcf80582
VK
902#ifdef CONFIG_HOTPLUG_CPU
903static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
a82fab29 904 struct device *dev, bool frozen)
fcf80582
VK
905{
906 struct cpufreq_policy *policy;
1c3d85dd 907 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
908 unsigned long flags;
909
910 policy = cpufreq_cpu_get(sibling);
911 WARN_ON(!policy);
912
820c6ca2
VK
913 if (has_target)
914 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
fcf80582 915
2eaa3e2d
VK
916 lock_policy_rwsem_write(sibling);
917
0d1857a1 918 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 919
fcf80582 920 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 921 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 922 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 923 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 924
2eaa3e2d
VK
925 unlock_policy_rwsem_write(sibling);
926
820c6ca2
VK
927 if (has_target) {
928 __cpufreq_governor(policy, CPUFREQ_GOV_START);
929 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
930 }
fcf80582 931
a82fab29
SB
932 /* Don't touch sysfs links during light-weight init */
933 if (frozen) {
934 /* Drop the extra refcount that we took above */
fcf80582 935 cpufreq_cpu_put(policy);
a82fab29 936 return 0;
fcf80582
VK
937 }
938
a82fab29
SB
939 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
940 if (ret)
941 cpufreq_cpu_put(policy);
942
943 return ret;
fcf80582
VK
944}
945#endif
1da177e4 946
8414809c
SB
947static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
948{
949 struct cpufreq_policy *policy;
950 unsigned long flags;
951
952 write_lock_irqsave(&cpufreq_driver_lock, flags);
953
954 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
955
956 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
957
958 return policy;
959}
960
e9698cc5
SB
961static struct cpufreq_policy *cpufreq_policy_alloc(void)
962{
963 struct cpufreq_policy *policy;
964
965 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
966 if (!policy)
967 return NULL;
968
969 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
970 goto err_free_policy;
971
972 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
973 goto err_free_cpumask;
974
975 return policy;
976
977err_free_cpumask:
978 free_cpumask_var(policy->cpus);
979err_free_policy:
980 kfree(policy);
981
982 return NULL;
983}
984
985static void cpufreq_policy_free(struct cpufreq_policy *policy)
986{
987 free_cpumask_var(policy->related_cpus);
988 free_cpumask_var(policy->cpus);
989 kfree(policy);
990}
991
a82fab29
SB
992static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
993 bool frozen)
1da177e4 994{
fcf80582 995 unsigned int j, cpu = dev->id;
65922465 996 int ret = -ENOMEM;
1da177e4 997 struct cpufreq_policy *policy;
1da177e4 998 unsigned long flags;
90e41bac 999#ifdef CONFIG_HOTPLUG_CPU
fcf80582 1000 struct cpufreq_governor *gov;
90e41bac
PB
1001 int sibling;
1002#endif
1da177e4 1003
c32b6b8e
AR
1004 if (cpu_is_offline(cpu))
1005 return 0;
1006
2d06d8c4 1007 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
1008
1009#ifdef CONFIG_SMP
1010 /* check whether a different CPU already registered this
1011 * CPU because it is in the same boat. */
1012 policy = cpufreq_cpu_get(cpu);
1013 if (unlikely(policy)) {
8ff69732 1014 cpufreq_cpu_put(policy);
1da177e4
LT
1015 return 0;
1016 }
fcf80582
VK
1017
1018#ifdef CONFIG_HOTPLUG_CPU
1019 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 1020 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
1021 for_each_online_cpu(sibling) {
1022 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 1023 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 1024 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
a82fab29
SB
1025 return cpufreq_add_policy_cpu(cpu, sibling, dev,
1026 frozen);
2eaa3e2d 1027 }
fcf80582 1028 }
0d1857a1 1029 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 1030#endif
1da177e4
LT
1031#endif
1032
1c3d85dd 1033 if (!try_module_get(cpufreq_driver->owner)) {
1da177e4
LT
1034 ret = -EINVAL;
1035 goto module_out;
1036 }
1037
8414809c
SB
1038 if (frozen)
1039 /* Restore the saved policy when doing light-weight init */
1040 policy = cpufreq_policy_restore(cpu);
1041 else
1042 policy = cpufreq_policy_alloc();
1043
059019a3 1044 if (!policy)
1da177e4 1045 goto nomem_out;
059019a3 1046
1da177e4 1047 policy->cpu = cpu;
65922465 1048 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 1049 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1050
5a01f2e8 1051 /* Initially set CPU itself as the policy_cpu */
f1625066 1052 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 1053
1da177e4 1054 init_completion(&policy->kobj_unregister);
65f27f38 1055 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1056
1057 /* call driver. From then on the cpufreq must be able
1058 * to accept all calls to ->verify and ->setpolicy for this CPU
1059 */
1c3d85dd 1060 ret = cpufreq_driver->init(policy);
1da177e4 1061 if (ret) {
2d06d8c4 1062 pr_debug("initialization failed\n");
2eaa3e2d 1063 goto err_set_policy_cpu;
1da177e4 1064 }
643ae6e8 1065
fcf80582
VK
1066 /* related cpus should atleast have policy->cpus */
1067 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1068
643ae6e8
VK
1069 /*
1070 * affected cpus must always be the one, which are online. We aren't
1071 * managing offline cpus here.
1072 */
1073 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1074
187d9f4e
MC
1075 policy->user_policy.min = policy->min;
1076 policy->user_policy.max = policy->max;
1da177e4 1077
a1531acd
TR
1078 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1079 CPUFREQ_START, policy);
1080
fcf80582
VK
1081#ifdef CONFIG_HOTPLUG_CPU
1082 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1083 if (gov) {
1084 policy->governor = gov;
1085 pr_debug("Restoring governor %s for cpu %d\n",
1086 policy->governor->name, cpu);
4bfa042c 1087 }
fcf80582 1088#endif
1da177e4 1089
e18f1682
SB
1090 write_lock_irqsave(&cpufreq_driver_lock, flags);
1091 for_each_cpu(j, policy->cpus) {
1092 per_cpu(cpufreq_cpu_data, j) = policy;
1093 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
1094 }
1095 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1096
a82fab29 1097 if (!frozen) {
308b60e7 1098 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1099 if (ret)
1100 goto err_out_unregister;
1101 }
8ff69732 1102
e18f1682
SB
1103 cpufreq_init_policy(policy);
1104
038c5b3e 1105 kobject_uevent(&policy->kobj, KOBJ_ADD);
1c3d85dd 1106 module_put(cpufreq_driver->owner);
2d06d8c4 1107 pr_debug("initialization complete\n");
87c32271 1108
1da177e4
LT
1109 return 0;
1110
1da177e4 1111err_out_unregister:
0d1857a1 1112 write_lock_irqsave(&cpufreq_driver_lock, flags);
e18f1682 1113 for_each_cpu(j, policy->cpus) {
7a6aedfa 1114 per_cpu(cpufreq_cpu_data, j) = NULL;
e18f1682
SB
1115 if (j != cpu)
1116 per_cpu(cpufreq_policy_cpu, j) = -1;
1117 }
0d1857a1 1118 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1119
c10997f6 1120 kobject_put(&policy->kobj);
1da177e4
LT
1121 wait_for_completion(&policy->kobj_unregister);
1122
2eaa3e2d
VK
1123err_set_policy_cpu:
1124 per_cpu(cpufreq_policy_cpu, cpu) = -1;
e9698cc5 1125 cpufreq_policy_free(policy);
1da177e4 1126nomem_out:
1c3d85dd 1127 module_put(cpufreq_driver->owner);
c32b6b8e 1128module_out:
1da177e4
LT
1129 return ret;
1130}
1131
a82fab29
SB
1132/**
1133 * cpufreq_add_dev - add a CPU device
1134 *
1135 * Adds the cpufreq interface for a CPU device.
1136 *
1137 * The Oracle says: try running cpufreq registration/unregistration concurrently
1138 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1139 * mess up, but more thorough testing is needed. - Mathieu
1140 */
1141static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1142{
1143 return __cpufreq_add_dev(dev, sif, false);
1144}
1145
b8eed8af
VK
1146static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1147{
1148 int j;
1149
1150 policy->last_cpu = policy->cpu;
1151 policy->cpu = cpu;
1152
3361b7b1 1153 for_each_cpu(j, policy->cpus)
b8eed8af 1154 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
1155
1156#ifdef CONFIG_CPU_FREQ_TABLE
1157 cpufreq_frequency_table_update_policy_cpu(policy);
1158#endif
1159 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1160 CPUFREQ_UPDATE_POLICY_CPU, policy);
1161}
1da177e4 1162
f9ba680d 1163static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *data,
a82fab29 1164 unsigned int old_cpu, bool frozen)
f9ba680d
SB
1165{
1166 struct device *cpu_dev;
1167 unsigned long flags;
1168 int ret;
1169
1170 /* first sibling now owns the new sysfs dir */
1171 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
a82fab29
SB
1172
1173 /* Don't touch sysfs files during light-weight tear-down */
1174 if (frozen)
1175 return cpu_dev->id;
1176
f9ba680d
SB
1177 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1178 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1179 if (ret) {
1180 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1181
1182 WARN_ON(lock_policy_rwsem_write(old_cpu));
1183 cpumask_set_cpu(old_cpu, data->cpus);
1184
1185 write_lock_irqsave(&cpufreq_driver_lock, flags);
1186 per_cpu(cpufreq_cpu_data, old_cpu) = data;
1187 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1188
1189 unlock_policy_rwsem_write(old_cpu);
1190
1191 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1192 "cpufreq");
1193
1194 return -EINVAL;
1195 }
1196
1197 return cpu_dev->id;
1198}
1199
1da177e4 1200/**
5a01f2e8 1201 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1202 *
1203 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1204 * Caller should already have policy_rwsem in write mode for this CPU.
1205 * This routine frees the rwsem before returning.
1da177e4 1206 */
bb176f7d 1207static int __cpufreq_remove_dev(struct device *dev,
a82fab29 1208 struct subsys_interface *sif, bool frozen)
1da177e4 1209{
f9ba680d
SB
1210 unsigned int cpu = dev->id, cpus;
1211 int new_cpu;
1da177e4
LT
1212 unsigned long flags;
1213 struct cpufreq_policy *data;
499bca9b
AW
1214 struct kobject *kobj;
1215 struct completion *cmp;
1da177e4 1216
b8eed8af 1217 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1218
0d1857a1 1219 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1220
7a6aedfa 1221 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1222 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1223
8414809c
SB
1224 /* Save the policy somewhere when doing a light-weight tear-down */
1225 if (frozen)
1226 per_cpu(cpufreq_cpu_data_fallback, cpu) = data;
1227
0d1857a1 1228 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1229
1230 if (!data) {
b8eed8af 1231 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1232 return -EINVAL;
1233 }
1da177e4 1234
1c3d85dd 1235 if (cpufreq_driver->target)
f6a7409c 1236 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1237
084f3493 1238#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1239 if (!cpufreq_driver->setpolicy)
fa69e33f
DB
1240 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1241 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1242#endif
1243
2eaa3e2d 1244 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1245 cpus = cpumask_weight(data->cpus);
e4969eba
VK
1246
1247 if (cpus > 1)
1248 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1249 unlock_policy_rwsem_write(cpu);
084f3493 1250
a82fab29 1251 if (cpu != data->cpu && !frozen) {
73bf0fc2
VK
1252 sysfs_remove_link(&dev->kobj, "cpufreq");
1253 } else if (cpus > 1) {
084f3493 1254
a82fab29 1255 new_cpu = cpufreq_nominate_new_policy_cpu(data, cpu, frozen);
f9ba680d 1256 if (new_cpu >= 0) {
2eaa3e2d 1257 WARN_ON(lock_policy_rwsem_write(cpu));
f9ba680d 1258 update_policy_cpu(data, new_cpu);
499bca9b 1259 unlock_policy_rwsem_write(cpu);
a82fab29
SB
1260
1261 if (!frozen) {
1262 pr_debug("%s: policy Kobject moved to cpu: %d "
1263 "from: %d\n",__func__, new_cpu, cpu);
1264 }
1da177e4
LT
1265 }
1266 }
1da177e4 1267
b8eed8af
VK
1268 /* If cpu is last user of policy, free policy */
1269 if (cpus == 1) {
2a998599
RW
1270 if (cpufreq_driver->target)
1271 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1272
8414809c
SB
1273 if (!frozen) {
1274 lock_policy_rwsem_read(cpu);
1275 kobj = &data->kobj;
1276 cmp = &data->kobj_unregister;
1277 unlock_policy_rwsem_read(cpu);
1278 kobject_put(kobj);
1279
1280 /*
1281 * We need to make sure that the underlying kobj is
1282 * actually not referenced anymore by anybody before we
1283 * proceed with unloading.
1284 */
1285 pr_debug("waiting for dropping of refcount\n");
1286 wait_for_completion(cmp);
1287 pr_debug("wait complete\n");
1288 }
7d26e2d5 1289
8414809c
SB
1290 /*
1291 * Perform the ->exit() even during light-weight tear-down,
1292 * since this is a core component, and is essential for the
1293 * subsequent light-weight ->init() to succeed.
b8eed8af 1294 */
1c3d85dd
RW
1295 if (cpufreq_driver->exit)
1296 cpufreq_driver->exit(data);
27ecddc2 1297
8414809c
SB
1298 if (!frozen)
1299 cpufreq_policy_free(data);
2a998599 1300 } else {
8414809c
SB
1301
1302 if (!frozen) {
1303 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1304 cpufreq_cpu_put(data);
1305 }
1306
2a998599
RW
1307 if (cpufreq_driver->target) {
1308 __cpufreq_governor(data, CPUFREQ_GOV_START);
1309 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1310 }
27ecddc2 1311 }
1da177e4 1312
2eaa3e2d 1313 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1314 return 0;
1315}
1316
8a25a2fd 1317static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1318{
8a25a2fd 1319 unsigned int cpu = dev->id;
5a01f2e8 1320 int retval;
ec28297a
VP
1321
1322 if (cpu_is_offline(cpu))
1323 return 0;
1324
a82fab29 1325 retval = __cpufreq_remove_dev(dev, sif, false);
5a01f2e8
VP
1326 return retval;
1327}
1328
65f27f38 1329static void handle_update(struct work_struct *work)
1da177e4 1330{
65f27f38
DH
1331 struct cpufreq_policy *policy =
1332 container_of(work, struct cpufreq_policy, update);
1333 unsigned int cpu = policy->cpu;
2d06d8c4 1334 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1335 cpufreq_update_policy(cpu);
1336}
1337
1338/**
bb176f7d
VK
1339 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1340 * in deep trouble.
1da177e4
LT
1341 * @cpu: cpu number
1342 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1343 * @new_freq: CPU frequency the CPU actually runs at
1344 *
29464f28
DJ
1345 * We adjust to current frequency first, and need to clean up later.
1346 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1347 */
e08f5f5b
GS
1348static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1349 unsigned int new_freq)
1da177e4 1350{
b43a7ffb 1351 struct cpufreq_policy *policy;
1da177e4 1352 struct cpufreq_freqs freqs;
b43a7ffb
VK
1353 unsigned long flags;
1354
2d06d8c4 1355 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1356 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1357
1da177e4
LT
1358 freqs.old = old_freq;
1359 freqs.new = new_freq;
b43a7ffb
VK
1360
1361 read_lock_irqsave(&cpufreq_driver_lock, flags);
1362 policy = per_cpu(cpufreq_cpu_data, cpu);
1363 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1364
1365 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1366 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1367}
1368
32ee8c3e 1369/**
4ab70df4 1370 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1371 * @cpu: CPU number
1372 *
1373 * This is the last known freq, without actually getting it from the driver.
1374 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1375 */
1376unsigned int cpufreq_quick_get(unsigned int cpu)
1377{
9e21ba8b 1378 struct cpufreq_policy *policy;
e08f5f5b 1379 unsigned int ret_freq = 0;
95235ca2 1380
1c3d85dd
RW
1381 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1382 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1383
1384 policy = cpufreq_cpu_get(cpu);
95235ca2 1385 if (policy) {
e08f5f5b 1386 ret_freq = policy->cur;
95235ca2
VP
1387 cpufreq_cpu_put(policy);
1388 }
1389
4d34a67d 1390 return ret_freq;
95235ca2
VP
1391}
1392EXPORT_SYMBOL(cpufreq_quick_get);
1393
3d737108
JB
1394/**
1395 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1396 * @cpu: CPU number
1397 *
1398 * Just return the max possible frequency for a given CPU.
1399 */
1400unsigned int cpufreq_quick_get_max(unsigned int cpu)
1401{
1402 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1403 unsigned int ret_freq = 0;
1404
1405 if (policy) {
1406 ret_freq = policy->max;
1407 cpufreq_cpu_put(policy);
1408 }
1409
1410 return ret_freq;
1411}
1412EXPORT_SYMBOL(cpufreq_quick_get_max);
1413
5a01f2e8 1414static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1415{
7a6aedfa 1416 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1417 unsigned int ret_freq = 0;
5800043b 1418
1c3d85dd 1419 if (!cpufreq_driver->get)
4d34a67d 1420 return ret_freq;
1da177e4 1421
1c3d85dd 1422 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1423
e08f5f5b 1424 if (ret_freq && policy->cur &&
1c3d85dd 1425 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1426 /* verify no discrepancy between actual and
1427 saved value exists */
1428 if (unlikely(ret_freq != policy->cur)) {
1429 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1430 schedule_work(&policy->update);
1431 }
1432 }
1433
4d34a67d 1434 return ret_freq;
5a01f2e8 1435}
1da177e4 1436
5a01f2e8
VP
1437/**
1438 * cpufreq_get - get the current CPU frequency (in kHz)
1439 * @cpu: CPU number
1440 *
1441 * Get the CPU current (static) CPU frequency
1442 */
1443unsigned int cpufreq_get(unsigned int cpu)
1444{
1445 unsigned int ret_freq = 0;
1446 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1447
1448 if (!policy)
1449 goto out;
1450
1451 if (unlikely(lock_policy_rwsem_read(cpu)))
1452 goto out_policy;
1453
1454 ret_freq = __cpufreq_get(cpu);
1455
1456 unlock_policy_rwsem_read(cpu);
1da177e4 1457
5a01f2e8
VP
1458out_policy:
1459 cpufreq_cpu_put(policy);
1460out:
4d34a67d 1461 return ret_freq;
1da177e4
LT
1462}
1463EXPORT_SYMBOL(cpufreq_get);
1464
8a25a2fd
KS
1465static struct subsys_interface cpufreq_interface = {
1466 .name = "cpufreq",
1467 .subsys = &cpu_subsys,
1468 .add_dev = cpufreq_add_dev,
1469 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1470};
1471
42d4dc3f 1472/**
e00e56df
RW
1473 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1474 *
1475 * This function is only executed for the boot processor. The other CPUs
1476 * have been put offline by means of CPU hotplug.
42d4dc3f 1477 */
e00e56df 1478static int cpufreq_bp_suspend(void)
42d4dc3f 1479{
e08f5f5b 1480 int ret = 0;
4bc5d341 1481
e00e56df 1482 int cpu = smp_processor_id();
42d4dc3f
BH
1483 struct cpufreq_policy *cpu_policy;
1484
2d06d8c4 1485 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1486
e00e56df 1487 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1488 cpu_policy = cpufreq_cpu_get(cpu);
1489 if (!cpu_policy)
e00e56df 1490 return 0;
42d4dc3f 1491
1c3d85dd
RW
1492 if (cpufreq_driver->suspend) {
1493 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1494 if (ret)
42d4dc3f
BH
1495 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1496 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1497 }
1498
42d4dc3f 1499 cpufreq_cpu_put(cpu_policy);
c9060494 1500 return ret;
42d4dc3f
BH
1501}
1502
1da177e4 1503/**
e00e56df 1504 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1505 *
1506 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1507 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1508 * restored. It will verify that the current freq is in sync with
1509 * what we believe it to be. This is a bit later than when it
1510 * should be, but nonethteless it's better than calling
1511 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1512 *
1513 * This function is only executed for the boot CPU. The other CPUs have not
1514 * been turned on yet.
1da177e4 1515 */
e00e56df 1516static void cpufreq_bp_resume(void)
1da177e4 1517{
e08f5f5b 1518 int ret = 0;
4bc5d341 1519
e00e56df 1520 int cpu = smp_processor_id();
1da177e4
LT
1521 struct cpufreq_policy *cpu_policy;
1522
2d06d8c4 1523 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1524
e00e56df 1525 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1526 cpu_policy = cpufreq_cpu_get(cpu);
1527 if (!cpu_policy)
e00e56df 1528 return;
1da177e4 1529
1c3d85dd
RW
1530 if (cpufreq_driver->resume) {
1531 ret = cpufreq_driver->resume(cpu_policy);
1da177e4
LT
1532 if (ret) {
1533 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1534 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1535 goto fail;
1da177e4
LT
1536 }
1537 }
1538
1da177e4 1539 schedule_work(&cpu_policy->update);
ce6c3997 1540
c9060494 1541fail:
1da177e4 1542 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1543}
1544
e00e56df
RW
1545static struct syscore_ops cpufreq_syscore_ops = {
1546 .suspend = cpufreq_bp_suspend,
1547 .resume = cpufreq_bp_resume,
1da177e4
LT
1548};
1549
9d95046e
BP
1550/**
1551 * cpufreq_get_current_driver - return current driver's name
1552 *
1553 * Return the name string of the currently loaded cpufreq driver
1554 * or NULL, if none.
1555 */
1556const char *cpufreq_get_current_driver(void)
1557{
1c3d85dd
RW
1558 if (cpufreq_driver)
1559 return cpufreq_driver->name;
1560
1561 return NULL;
9d95046e
BP
1562}
1563EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1564
1565/*********************************************************************
1566 * NOTIFIER LISTS INTERFACE *
1567 *********************************************************************/
1568
1569/**
1570 * cpufreq_register_notifier - register a driver with cpufreq
1571 * @nb: notifier function to register
1572 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1573 *
32ee8c3e 1574 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1575 * are notified about clock rate changes (once before and once after
1576 * the transition), or a list of drivers that are notified about
1577 * changes in cpufreq policy.
1578 *
1579 * This function may sleep, and has the same return conditions as
e041c683 1580 * blocking_notifier_chain_register.
1da177e4
LT
1581 */
1582int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1583{
1584 int ret;
1585
d5aaffa9
DB
1586 if (cpufreq_disabled())
1587 return -EINVAL;
1588
74212ca4
CEB
1589 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1590
1da177e4
LT
1591 switch (list) {
1592 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1593 ret = srcu_notifier_chain_register(
e041c683 1594 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1595 break;
1596 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1597 ret = blocking_notifier_chain_register(
1598 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1599 break;
1600 default:
1601 ret = -EINVAL;
1602 }
1da177e4
LT
1603
1604 return ret;
1605}
1606EXPORT_SYMBOL(cpufreq_register_notifier);
1607
1da177e4
LT
1608/**
1609 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1610 * @nb: notifier block to be unregistered
bb176f7d 1611 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1612 *
1613 * Remove a driver from the CPU frequency notifier list.
1614 *
1615 * This function may sleep, and has the same return conditions as
e041c683 1616 * blocking_notifier_chain_unregister.
1da177e4
LT
1617 */
1618int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1619{
1620 int ret;
1621
d5aaffa9
DB
1622 if (cpufreq_disabled())
1623 return -EINVAL;
1624
1da177e4
LT
1625 switch (list) {
1626 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1627 ret = srcu_notifier_chain_unregister(
e041c683 1628 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1629 break;
1630 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1631 ret = blocking_notifier_chain_unregister(
1632 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1633 break;
1634 default:
1635 ret = -EINVAL;
1636 }
1da177e4
LT
1637
1638 return ret;
1639}
1640EXPORT_SYMBOL(cpufreq_unregister_notifier);
1641
1642
1643/*********************************************************************
1644 * GOVERNORS *
1645 *********************************************************************/
1646
1da177e4
LT
1647int __cpufreq_driver_target(struct cpufreq_policy *policy,
1648 unsigned int target_freq,
1649 unsigned int relation)
1650{
1651 int retval = -EINVAL;
7249924e 1652 unsigned int old_target_freq = target_freq;
c32b6b8e 1653
a7b422cd
KRW
1654 if (cpufreq_disabled())
1655 return -ENODEV;
7c30ed53
VK
1656 if (policy->transition_ongoing)
1657 return -EBUSY;
a7b422cd 1658
7249924e
VK
1659 /* Make sure that target_freq is within supported range */
1660 if (target_freq > policy->max)
1661 target_freq = policy->max;
1662 if (target_freq < policy->min)
1663 target_freq = policy->min;
1664
1665 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1666 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1667
1668 if (target_freq == policy->cur)
1669 return 0;
1670
1c3d85dd
RW
1671 if (cpufreq_driver->target)
1672 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1673
1da177e4
LT
1674 return retval;
1675}
1676EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1677
1da177e4
LT
1678int cpufreq_driver_target(struct cpufreq_policy *policy,
1679 unsigned int target_freq,
1680 unsigned int relation)
1681{
f1829e4a 1682 int ret = -EINVAL;
1da177e4 1683
5a01f2e8 1684 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1685 goto fail;
1da177e4
LT
1686
1687 ret = __cpufreq_driver_target(policy, target_freq, relation);
1688
5a01f2e8 1689 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1690
f1829e4a 1691fail:
1da177e4
LT
1692 return ret;
1693}
1694EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1695
bf0b90e3 1696int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62 1697{
d5aaffa9 1698 if (cpufreq_disabled())
a262e94c 1699 return 0;
d5aaffa9 1700
1c3d85dd 1701 if (!cpufreq_driver->getavg)
0676f7f2
VK
1702 return 0;
1703
a262e94c 1704 return cpufreq_driver->getavg(policy, cpu);
dfde5d62 1705}
5a01f2e8 1706EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1707
153d7f3f 1708/*
153d7f3f
AV
1709 * when "event" is CPUFREQ_GOV_LIMITS
1710 */
1da177e4 1711
e08f5f5b
GS
1712static int __cpufreq_governor(struct cpufreq_policy *policy,
1713 unsigned int event)
1da177e4 1714{
cc993cab 1715 int ret;
6afde10c
TR
1716
1717 /* Only must be defined when default governor is known to have latency
1718 restrictions, like e.g. conservative or ondemand.
1719 That this is the case is already ensured in Kconfig
1720 */
1721#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1722 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1723#else
1724 struct cpufreq_governor *gov = NULL;
1725#endif
1c256245
TR
1726
1727 if (policy->governor->max_transition_latency &&
1728 policy->cpuinfo.transition_latency >
1729 policy->governor->max_transition_latency) {
6afde10c
TR
1730 if (!gov)
1731 return -EINVAL;
1732 else {
1733 printk(KERN_WARNING "%s governor failed, too long"
1734 " transition latency of HW, fallback"
1735 " to %s governor\n",
1736 policy->governor->name,
1737 gov->name);
1738 policy->governor = gov;
1739 }
1c256245 1740 }
1da177e4
LT
1741
1742 if (!try_module_get(policy->governor->owner))
1743 return -EINVAL;
1744
2d06d8c4 1745 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1746 policy->cpu, event);
95731ebb
XC
1747
1748 mutex_lock(&cpufreq_governor_lock);
1749 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1750 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1751 mutex_unlock(&cpufreq_governor_lock);
1752 return -EBUSY;
1753 }
1754
1755 if (event == CPUFREQ_GOV_STOP)
1756 policy->governor_enabled = false;
1757 else if (event == CPUFREQ_GOV_START)
1758 policy->governor_enabled = true;
1759
1760 mutex_unlock(&cpufreq_governor_lock);
1761
1da177e4
LT
1762 ret = policy->governor->governor(policy, event);
1763
4d5dcc42
VK
1764 if (!ret) {
1765 if (event == CPUFREQ_GOV_POLICY_INIT)
1766 policy->governor->initialized++;
1767 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1768 policy->governor->initialized--;
95731ebb
XC
1769 } else {
1770 /* Restore original values */
1771 mutex_lock(&cpufreq_governor_lock);
1772 if (event == CPUFREQ_GOV_STOP)
1773 policy->governor_enabled = true;
1774 else if (event == CPUFREQ_GOV_START)
1775 policy->governor_enabled = false;
1776 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1777 }
b394058f 1778
e08f5f5b
GS
1779 /* we keep one module reference alive for
1780 each CPU governed by this CPU */
1da177e4
LT
1781 if ((event != CPUFREQ_GOV_START) || ret)
1782 module_put(policy->governor->owner);
1783 if ((event == CPUFREQ_GOV_STOP) && !ret)
1784 module_put(policy->governor->owner);
1785
1786 return ret;
1787}
1788
1da177e4
LT
1789int cpufreq_register_governor(struct cpufreq_governor *governor)
1790{
3bcb09a3 1791 int err;
1da177e4
LT
1792
1793 if (!governor)
1794 return -EINVAL;
1795
a7b422cd
KRW
1796 if (cpufreq_disabled())
1797 return -ENODEV;
1798
3fc54d37 1799 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1800
b394058f 1801 governor->initialized = 0;
3bcb09a3
JF
1802 err = -EBUSY;
1803 if (__find_governor(governor->name) == NULL) {
1804 err = 0;
1805 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1806 }
1da177e4 1807
32ee8c3e 1808 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1809 return err;
1da177e4
LT
1810}
1811EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1812
1da177e4
LT
1813void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1814{
90e41bac
PB
1815#ifdef CONFIG_HOTPLUG_CPU
1816 int cpu;
1817#endif
1818
1da177e4
LT
1819 if (!governor)
1820 return;
1821
a7b422cd
KRW
1822 if (cpufreq_disabled())
1823 return;
1824
90e41bac
PB
1825#ifdef CONFIG_HOTPLUG_CPU
1826 for_each_present_cpu(cpu) {
1827 if (cpu_online(cpu))
1828 continue;
1829 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1830 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1831 }
1832#endif
1833
3fc54d37 1834 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1835 list_del(&governor->governor_list);
3fc54d37 1836 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1837 return;
1838}
1839EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1840
1841
1da177e4
LT
1842/*********************************************************************
1843 * POLICY INTERFACE *
1844 *********************************************************************/
1845
1846/**
1847 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1848 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1849 * is written
1da177e4
LT
1850 *
1851 * Reads the current cpufreq policy.
1852 */
1853int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1854{
1855 struct cpufreq_policy *cpu_policy;
1856 if (!policy)
1857 return -EINVAL;
1858
1859 cpu_policy = cpufreq_cpu_get(cpu);
1860 if (!cpu_policy)
1861 return -EINVAL;
1862
1da177e4 1863 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1864
1865 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1866 return 0;
1867}
1868EXPORT_SYMBOL(cpufreq_get_policy);
1869
153d7f3f 1870/*
e08f5f5b
GS
1871 * data : current policy.
1872 * policy : policy to be set.
153d7f3f 1873 */
e08f5f5b
GS
1874static int __cpufreq_set_policy(struct cpufreq_policy *data,
1875 struct cpufreq_policy *policy)
1da177e4 1876{
7bd353a9 1877 int ret = 0, failed = 1;
1da177e4 1878
2d06d8c4 1879 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1880 policy->min, policy->max);
1881
e08f5f5b
GS
1882 memcpy(&policy->cpuinfo, &data->cpuinfo,
1883 sizeof(struct cpufreq_cpuinfo));
1da177e4 1884
53391fa2 1885 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1886 ret = -EINVAL;
1887 goto error_out;
1888 }
1889
1da177e4 1890 /* verify the cpu speed can be set within this limit */
1c3d85dd 1891 ret = cpufreq_driver->verify(policy);
1da177e4
LT
1892 if (ret)
1893 goto error_out;
1894
1da177e4 1895 /* adjust if necessary - all reasons */
e041c683
AS
1896 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1897 CPUFREQ_ADJUST, policy);
1da177e4
LT
1898
1899 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1900 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1901 CPUFREQ_INCOMPATIBLE, policy);
1da177e4 1902
bb176f7d
VK
1903 /*
1904 * verify the cpu speed can be set within this limit, which might be
1905 * different to the first one
1906 */
1c3d85dd 1907 ret = cpufreq_driver->verify(policy);
e041c683 1908 if (ret)
1da177e4 1909 goto error_out;
1da177e4
LT
1910
1911 /* notification of the new policy */
e041c683
AS
1912 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1913 CPUFREQ_NOTIFY, policy);
1da177e4 1914
7d5e350f
DJ
1915 data->min = policy->min;
1916 data->max = policy->max;
1da177e4 1917
2d06d8c4 1918 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1919 data->min, data->max);
1da177e4 1920
1c3d85dd 1921 if (cpufreq_driver->setpolicy) {
1da177e4 1922 data->policy = policy->policy;
2d06d8c4 1923 pr_debug("setting range\n");
1c3d85dd 1924 ret = cpufreq_driver->setpolicy(policy);
1da177e4
LT
1925 } else {
1926 if (policy->governor != data->governor) {
1927 /* save old, working values */
1928 struct cpufreq_governor *old_gov = data->governor;
1929
2d06d8c4 1930 pr_debug("governor switch\n");
1da177e4
LT
1931
1932 /* end old governor */
7bd353a9 1933 if (data->governor) {
1da177e4 1934 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
955ef483 1935 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1936 __cpufreq_governor(data,
1937 CPUFREQ_GOV_POLICY_EXIT);
955ef483 1938 lock_policy_rwsem_write(policy->cpu);
7bd353a9 1939 }
1da177e4
LT
1940
1941 /* start new governor */
1942 data->governor = policy->governor;
7bd353a9 1943 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
955ef483 1944 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
7bd353a9 1945 failed = 0;
955ef483
VK
1946 } else {
1947 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1948 __cpufreq_governor(data,
1949 CPUFREQ_GOV_POLICY_EXIT);
955ef483
VK
1950 lock_policy_rwsem_write(policy->cpu);
1951 }
7bd353a9
VK
1952 }
1953
1954 if (failed) {
1da177e4 1955 /* new governor failed, so re-start old one */
2d06d8c4 1956 pr_debug("starting governor %s failed\n",
e08f5f5b 1957 data->governor->name);
1da177e4
LT
1958 if (old_gov) {
1959 data->governor = old_gov;
7bd353a9
VK
1960 __cpufreq_governor(data,
1961 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1962 __cpufreq_governor(data,
1963 CPUFREQ_GOV_START);
1da177e4
LT
1964 }
1965 ret = -EINVAL;
1966 goto error_out;
1967 }
1968 /* might be a policy change, too, so fall through */
1969 }
2d06d8c4 1970 pr_debug("governor: change or update limits\n");
1da177e4
LT
1971 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1972 }
1973
7d5e350f 1974error_out:
1da177e4
LT
1975 return ret;
1976}
1977
1da177e4
LT
1978/**
1979 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1980 * @cpu: CPU which shall be re-evaluated
1981 *
25985edc 1982 * Useful for policy notifiers which have different necessities
1da177e4
LT
1983 * at different times.
1984 */
1985int cpufreq_update_policy(unsigned int cpu)
1986{
1987 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1988 struct cpufreq_policy policy;
f1829e4a 1989 int ret;
1da177e4 1990
f1829e4a
JL
1991 if (!data) {
1992 ret = -ENODEV;
1993 goto no_policy;
1994 }
1da177e4 1995
f1829e4a
JL
1996 if (unlikely(lock_policy_rwsem_write(cpu))) {
1997 ret = -EINVAL;
1998 goto fail;
1999 }
1da177e4 2000
2d06d8c4 2001 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 2002 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
2003 policy.min = data->user_policy.min;
2004 policy.max = data->user_policy.max;
2005 policy.policy = data->user_policy.policy;
2006 policy.governor = data->user_policy.governor;
2007
bb176f7d
VK
2008 /*
2009 * BIOS might change freq behind our back
2010 * -> ask driver for current freq and notify governors about a change
2011 */
1c3d85dd
RW
2012 if (cpufreq_driver->get) {
2013 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 2014 if (!data->cur) {
2d06d8c4 2015 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
2016 data->cur = policy.cur;
2017 } else {
1c3d85dd 2018 if (data->cur != policy.cur && cpufreq_driver->target)
e08f5f5b
GS
2019 cpufreq_out_of_sync(cpu, data->cur,
2020 policy.cur);
a85f7bd3 2021 }
0961dd0d
TR
2022 }
2023
1da177e4
LT
2024 ret = __cpufreq_set_policy(data, &policy);
2025
5a01f2e8
VP
2026 unlock_policy_rwsem_write(cpu);
2027
f1829e4a 2028fail:
1da177e4 2029 cpufreq_cpu_put(data);
f1829e4a 2030no_policy:
1da177e4
LT
2031 return ret;
2032}
2033EXPORT_SYMBOL(cpufreq_update_policy);
2034
2760984f 2035static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2036 unsigned long action, void *hcpu)
2037{
2038 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2039 struct device *dev;
5302c3fb 2040 bool frozen = false;
c32b6b8e 2041
8a25a2fd
KS
2042 dev = get_cpu_device(cpu);
2043 if (dev) {
5302c3fb
SB
2044
2045 if (action & CPU_TASKS_FROZEN)
2046 frozen = true;
2047
2048 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2049 case CPU_ONLINE:
5302c3fb 2050 __cpufreq_add_dev(dev, NULL, frozen);
23d32899 2051 cpufreq_update_policy(cpu);
c32b6b8e 2052 break;
5302c3fb 2053
c32b6b8e 2054 case CPU_DOWN_PREPARE:
5302c3fb 2055 __cpufreq_remove_dev(dev, NULL, frozen);
c32b6b8e 2056 break;
5302c3fb 2057
5a01f2e8 2058 case CPU_DOWN_FAILED:
5302c3fb 2059 __cpufreq_add_dev(dev, NULL, frozen);
c32b6b8e
AR
2060 break;
2061 }
2062 }
2063 return NOTIFY_OK;
2064}
2065
9c36f746 2066static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2067 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2068};
1da177e4
LT
2069
2070/*********************************************************************
2071 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2072 *********************************************************************/
2073
2074/**
2075 * cpufreq_register_driver - register a CPU Frequency driver
2076 * @driver_data: A struct cpufreq_driver containing the values#
2077 * submitted by the CPU Frequency driver.
2078 *
bb176f7d 2079 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2080 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2081 * (and isn't unregistered in the meantime).
1da177e4
LT
2082 *
2083 */
221dee28 2084int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2085{
2086 unsigned long flags;
2087 int ret;
2088
a7b422cd
KRW
2089 if (cpufreq_disabled())
2090 return -ENODEV;
2091
1da177e4
LT
2092 if (!driver_data || !driver_data->verify || !driver_data->init ||
2093 ((!driver_data->setpolicy) && (!driver_data->target)))
2094 return -EINVAL;
2095
2d06d8c4 2096 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2097
2098 if (driver_data->setpolicy)
2099 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2100
0d1857a1 2101 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2102 if (cpufreq_driver) {
0d1857a1 2103 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2104 return -EBUSY;
2105 }
1c3d85dd 2106 cpufreq_driver = driver_data;
0d1857a1 2107 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2108
8a25a2fd 2109 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
2110 if (ret)
2111 goto err_null_driver;
1da177e4 2112
1c3d85dd 2113 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2114 int i;
2115 ret = -ENODEV;
2116
2117 /* check for at least one working CPU */
7a6aedfa
MT
2118 for (i = 0; i < nr_cpu_ids; i++)
2119 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2120 ret = 0;
7a6aedfa
MT
2121 break;
2122 }
1da177e4
LT
2123
2124 /* if all ->init() calls failed, unregister */
2125 if (ret) {
2d06d8c4 2126 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2127 driver_data->name);
8a25a2fd 2128 goto err_if_unreg;
1da177e4
LT
2129 }
2130 }
2131
8f5bc2ab 2132 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2133 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2134
8f5bc2ab 2135 return 0;
8a25a2fd
KS
2136err_if_unreg:
2137 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2138err_null_driver:
0d1857a1 2139 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2140 cpufreq_driver = NULL;
0d1857a1 2141 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2142 return ret;
1da177e4
LT
2143}
2144EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2145
1da177e4
LT
2146/**
2147 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2148 *
bb176f7d 2149 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2150 * the right to do so, i.e. if you have succeeded in initialising before!
2151 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2152 * currently not initialised.
2153 */
221dee28 2154int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2155{
2156 unsigned long flags;
2157
1c3d85dd 2158 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2159 return -EINVAL;
1da177e4 2160
2d06d8c4 2161 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2162
8a25a2fd 2163 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2164 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2165
0d1857a1 2166 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2167 cpufreq_driver = NULL;
0d1857a1 2168 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
2169
2170 return 0;
2171}
2172EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2173
2174static int __init cpufreq_core_init(void)
2175{
2176 int cpu;
2177
a7b422cd
KRW
2178 if (cpufreq_disabled())
2179 return -ENODEV;
2180
5a01f2e8 2181 for_each_possible_cpu(cpu) {
f1625066 2182 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
2183 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2184 }
8aa84ad8 2185
2361be23 2186 cpufreq_global_kobject = kobject_create();
8aa84ad8 2187 BUG_ON(!cpufreq_global_kobject);
e00e56df 2188 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2189
5a01f2e8
VP
2190 return 0;
2191}
5a01f2e8 2192core_initcall(cpufreq_core_init);
This page took 0.875588 seconds and 5 git commands to generate.