cpufreq: Fix locking issues
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
7d5e350f 42static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
1da177e4
LT
48static DEFINE_SPINLOCK(cpufreq_driver_lock);
49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
5a01f2e8
VP
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
f1625066 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 71static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 72{ \
f1625066 73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
76 \
77 return 0; \
78}
79
80lock_policy_rwsem(read, cpu);
5a01f2e8 81lock_policy_rwsem(write, cpu);
5a01f2e8 82
fa1d8af4
VK
83#define unlock_policy_rwsem(mode, cpu) \
84static void unlock_policy_rwsem_##mode(int cpu) \
85{ \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 89}
5a01f2e8 90
fa1d8af4
VK
91unlock_policy_rwsem(read, cpu);
92unlock_policy_rwsem(write, cpu);
5a01f2e8 93
1da177e4 94/* internal prototypes */
29464f28
DJ
95static int __cpufreq_governor(struct cpufreq_policy *policy,
96 unsigned int event);
5a01f2e8 97static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 98static void handle_update(struct work_struct *work);
1da177e4
LT
99
100/**
32ee8c3e
DJ
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
1da177e4
LT
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
106 */
e041c683 107static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 108static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 109
74212ca4 110static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
111static int __init init_cpufreq_transition_notifier_list(void)
112{
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 114 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
115 return 0;
116}
b3438f82 117pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 118
a7b422cd 119static int off __read_mostly;
da584455 120static int cpufreq_disabled(void)
a7b422cd
KRW
121{
122 return off;
123}
124void disable_cpufreq(void)
125{
126 off = 1;
127}
1da177e4 128static LIST_HEAD(cpufreq_governor_list);
29464f28 129static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 130
a9144436 131static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
132{
133 struct cpufreq_policy *data;
134 unsigned long flags;
135
7a6aedfa 136 if (cpu >= nr_cpu_ids)
1da177e4
LT
137 goto err_out;
138
139 /* get the cpufreq driver */
140 spin_lock_irqsave(&cpufreq_driver_lock, flags);
141
142 if (!cpufreq_driver)
143 goto err_out_unlock;
144
145 if (!try_module_get(cpufreq_driver->owner))
146 goto err_out_unlock;
147
148
149 /* get the CPU */
7a6aedfa 150 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
151
152 if (!data)
153 goto err_out_put_module;
154
a9144436 155 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
156 goto err_out_put_module;
157
1da177e4 158 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
159 return data;
160
7d5e350f 161err_out_put_module:
1da177e4 162 module_put(cpufreq_driver->owner);
7d5e350f 163err_out_unlock:
1da177e4 164 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 165err_out:
1da177e4
LT
166 return NULL;
167}
a9144436
SB
168
169struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
170{
d5aaffa9
DB
171 if (cpufreq_disabled())
172 return NULL;
173
a9144436
SB
174 return __cpufreq_cpu_get(cpu, false);
175}
1da177e4
LT
176EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
177
a9144436
SB
178static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
179{
180 return __cpufreq_cpu_get(cpu, true);
181}
182
183static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
184{
185 if (!sysfs)
186 kobject_put(&data->kobj);
187 module_put(cpufreq_driver->owner);
188}
7d5e350f 189
1da177e4
LT
190void cpufreq_cpu_put(struct cpufreq_policy *data)
191{
d5aaffa9
DB
192 if (cpufreq_disabled())
193 return;
194
a9144436 195 __cpufreq_cpu_put(data, false);
1da177e4
LT
196}
197EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
198
a9144436
SB
199static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
200{
201 __cpufreq_cpu_put(data, true);
202}
1da177e4 203
1da177e4
LT
204/*********************************************************************
205 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
206 *********************************************************************/
207
208/**
209 * adjust_jiffies - adjust the system "loops_per_jiffy"
210 *
211 * This function alters the system "loops_per_jiffy" for the clock
212 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 213 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
214 * per-CPU loops_per_jiffy value wherever possible.
215 */
216#ifndef CONFIG_SMP
217static unsigned long l_p_j_ref;
218static unsigned int l_p_j_ref_freq;
219
858119e1 220static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
221{
222 if (ci->flags & CPUFREQ_CONST_LOOPS)
223 return;
224
225 if (!l_p_j_ref_freq) {
226 l_p_j_ref = loops_per_jiffy;
227 l_p_j_ref_freq = ci->old;
2d06d8c4 228 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 229 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 230 }
d08de0c1 231 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 232 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
233 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
234 ci->new);
2d06d8c4 235 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 236 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
237 }
238}
239#else
e08f5f5b
GS
240static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
241{
242 return;
243}
1da177e4
LT
244#endif
245
246
247/**
e4472cb3
DJ
248 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
249 * on frequency transition.
1da177e4 250 *
e4472cb3
DJ
251 * This function calls the transition notifiers and the "adjust_jiffies"
252 * function. It is called twice on all CPU frequency changes that have
32ee8c3e 253 * external effects.
1da177e4
LT
254 */
255void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
256{
e4472cb3 257 struct cpufreq_policy *policy;
2eaa3e2d 258 unsigned long flags;
e4472cb3 259
1da177e4
LT
260 BUG_ON(irqs_disabled());
261
d5aaffa9
DB
262 if (cpufreq_disabled())
263 return;
264
1da177e4 265 freqs->flags = cpufreq_driver->flags;
2d06d8c4 266 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 267 state, freqs->new);
1da177e4 268
2eaa3e2d 269 spin_lock_irqsave(&cpufreq_driver_lock, flags);
7a6aedfa 270 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
2eaa3e2d
VK
271 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
272
1da177e4 273 switch (state) {
e4472cb3 274
1da177e4 275 case CPUFREQ_PRECHANGE:
32ee8c3e 276 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
277 * which is not equal to what the cpufreq core thinks is
278 * "old frequency".
1da177e4
LT
279 */
280 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
281 if ((policy) && (policy->cpu == freqs->cpu) &&
282 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 283 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
284 " %u, cpufreq assumed %u kHz.\n",
285 freqs->old, policy->cur);
286 freqs->old = policy->cur;
1da177e4
LT
287 }
288 }
b4dfdbb3 289 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 290 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
291 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
292 break;
e4472cb3 293
1da177e4
LT
294 case CPUFREQ_POSTCHANGE:
295 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 296 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723
TR
297 (unsigned long)freqs->cpu);
298 trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
25e41933 299 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 300 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 301 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
302 if (likely(policy) && likely(policy->cpu == freqs->cpu))
303 policy->cur = freqs->new;
1da177e4
LT
304 break;
305 }
1da177e4
LT
306}
307EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
308
309
310
311/*********************************************************************
312 * SYSFS INTERFACE *
313 *********************************************************************/
314
3bcb09a3
JF
315static struct cpufreq_governor *__find_governor(const char *str_governor)
316{
317 struct cpufreq_governor *t;
318
319 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 320 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
321 return t;
322
323 return NULL;
324}
325
1da177e4
LT
326/**
327 * cpufreq_parse_governor - parse a governor string
328 */
905d77cd 329static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
330 struct cpufreq_governor **governor)
331{
3bcb09a3
JF
332 int err = -EINVAL;
333
1da177e4 334 if (!cpufreq_driver)
3bcb09a3
JF
335 goto out;
336
1da177e4
LT
337 if (cpufreq_driver->setpolicy) {
338 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
339 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 340 err = 0;
e08f5f5b
GS
341 } else if (!strnicmp(str_governor, "powersave",
342 CPUFREQ_NAME_LEN)) {
1da177e4 343 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 344 err = 0;
1da177e4 345 }
3bcb09a3 346 } else if (cpufreq_driver->target) {
1da177e4 347 struct cpufreq_governor *t;
3bcb09a3 348
3fc54d37 349 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
350
351 t = __find_governor(str_governor);
352
ea714970 353 if (t == NULL) {
1a8e1463 354 int ret;
ea714970 355
1a8e1463
KC
356 mutex_unlock(&cpufreq_governor_mutex);
357 ret = request_module("cpufreq_%s", str_governor);
358 mutex_lock(&cpufreq_governor_mutex);
ea714970 359
1a8e1463
KC
360 if (ret == 0)
361 t = __find_governor(str_governor);
ea714970
JF
362 }
363
3bcb09a3
JF
364 if (t != NULL) {
365 *governor = t;
366 err = 0;
1da177e4 367 }
3bcb09a3 368
3fc54d37 369 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 370 }
29464f28 371out:
3bcb09a3 372 return err;
1da177e4 373}
1da177e4
LT
374
375
1da177e4 376/**
e08f5f5b
GS
377 * cpufreq_per_cpu_attr_read() / show_##file_name() -
378 * print out cpufreq information
1da177e4
LT
379 *
380 * Write out information from cpufreq_driver->policy[cpu]; object must be
381 * "unsigned int".
382 */
383
32ee8c3e
DJ
384#define show_one(file_name, object) \
385static ssize_t show_##file_name \
905d77cd 386(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 387{ \
29464f28 388 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
389}
390
391show_one(cpuinfo_min_freq, cpuinfo.min_freq);
392show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 393show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
394show_one(scaling_min_freq, min);
395show_one(scaling_max_freq, max);
396show_one(scaling_cur_freq, cur);
397
e08f5f5b
GS
398static int __cpufreq_set_policy(struct cpufreq_policy *data,
399 struct cpufreq_policy *policy);
7970e08b 400
1da177e4
LT
401/**
402 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
403 */
404#define store_one(file_name, object) \
405static ssize_t store_##file_name \
905d77cd 406(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 407{ \
f55c9c26 408 unsigned int ret; \
1da177e4
LT
409 struct cpufreq_policy new_policy; \
410 \
411 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
412 if (ret) \
413 return -EINVAL; \
414 \
29464f28 415 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
416 if (ret != 1) \
417 return -EINVAL; \
418 \
7970e08b
TR
419 ret = __cpufreq_set_policy(policy, &new_policy); \
420 policy->user_policy.object = policy->object; \
1da177e4
LT
421 \
422 return ret ? ret : count; \
423}
424
29464f28
DJ
425store_one(scaling_min_freq, min);
426store_one(scaling_max_freq, max);
1da177e4
LT
427
428/**
429 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
430 */
905d77cd
DJ
431static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
432 char *buf)
1da177e4 433{
5a01f2e8 434 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
435 if (!cur_freq)
436 return sprintf(buf, "<unknown>");
437 return sprintf(buf, "%u\n", cur_freq);
438}
439
440
441/**
442 * show_scaling_governor - show the current policy for the specified CPU
443 */
905d77cd 444static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 445{
29464f28 446 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
447 return sprintf(buf, "powersave\n");
448 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
449 return sprintf(buf, "performance\n");
450 else if (policy->governor)
4b972f0b 451 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 452 policy->governor->name);
1da177e4
LT
453 return -EINVAL;
454}
455
456
457/**
458 * store_scaling_governor - store policy for the specified CPU
459 */
905d77cd
DJ
460static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
461 const char *buf, size_t count)
1da177e4 462{
f55c9c26 463 unsigned int ret;
1da177e4
LT
464 char str_governor[16];
465 struct cpufreq_policy new_policy;
466
467 ret = cpufreq_get_policy(&new_policy, policy->cpu);
468 if (ret)
469 return ret;
470
29464f28 471 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
472 if (ret != 1)
473 return -EINVAL;
474
e08f5f5b
GS
475 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
476 &new_policy.governor))
1da177e4
LT
477 return -EINVAL;
478
7970e08b
TR
479 /* Do not use cpufreq_set_policy here or the user_policy.max
480 will be wrongly overridden */
7970e08b
TR
481 ret = __cpufreq_set_policy(policy, &new_policy);
482
483 policy->user_policy.policy = policy->policy;
484 policy->user_policy.governor = policy->governor;
7970e08b 485
e08f5f5b
GS
486 if (ret)
487 return ret;
488 else
489 return count;
1da177e4
LT
490}
491
492/**
493 * show_scaling_driver - show the cpufreq driver currently loaded
494 */
905d77cd 495static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 496{
4b972f0b 497 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
498}
499
500/**
501 * show_scaling_available_governors - show the available CPUfreq governors
502 */
905d77cd
DJ
503static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
504 char *buf)
1da177e4
LT
505{
506 ssize_t i = 0;
507 struct cpufreq_governor *t;
508
509 if (!cpufreq_driver->target) {
510 i += sprintf(buf, "performance powersave");
511 goto out;
512 }
513
514 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
515 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
516 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 517 goto out;
4b972f0b 518 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 519 }
7d5e350f 520out:
1da177e4
LT
521 i += sprintf(&buf[i], "\n");
522 return i;
523}
e8628dd0 524
835481d9 525static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
526{
527 ssize_t i = 0;
528 unsigned int cpu;
529
835481d9 530 for_each_cpu(cpu, mask) {
1da177e4
LT
531 if (i)
532 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
533 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
534 if (i >= (PAGE_SIZE - 5))
29464f28 535 break;
1da177e4
LT
536 }
537 i += sprintf(&buf[i], "\n");
538 return i;
539}
540
e8628dd0
DW
541/**
542 * show_related_cpus - show the CPUs affected by each transition even if
543 * hw coordination is in use
544 */
545static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
546{
e8628dd0
DW
547 return show_cpus(policy->related_cpus, buf);
548}
549
550/**
551 * show_affected_cpus - show the CPUs affected by each transition
552 */
553static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
554{
555 return show_cpus(policy->cpus, buf);
556}
557
9e76988e 558static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 559 const char *buf, size_t count)
9e76988e
VP
560{
561 unsigned int freq = 0;
562 unsigned int ret;
563
879000f9 564 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
565 return -EINVAL;
566
567 ret = sscanf(buf, "%u", &freq);
568 if (ret != 1)
569 return -EINVAL;
570
571 policy->governor->store_setspeed(policy, freq);
572
573 return count;
574}
575
576static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
577{
879000f9 578 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
579 return sprintf(buf, "<unsupported>\n");
580
581 return policy->governor->show_setspeed(policy, buf);
582}
1da177e4 583
e2f74f35 584/**
8bf1ac72 585 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
586 */
587static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
588{
589 unsigned int limit;
590 int ret;
591 if (cpufreq_driver->bios_limit) {
592 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
593 if (!ret)
594 return sprintf(buf, "%u\n", limit);
595 }
596 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
597}
598
6dad2a29
BP
599cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
600cpufreq_freq_attr_ro(cpuinfo_min_freq);
601cpufreq_freq_attr_ro(cpuinfo_max_freq);
602cpufreq_freq_attr_ro(cpuinfo_transition_latency);
603cpufreq_freq_attr_ro(scaling_available_governors);
604cpufreq_freq_attr_ro(scaling_driver);
605cpufreq_freq_attr_ro(scaling_cur_freq);
606cpufreq_freq_attr_ro(bios_limit);
607cpufreq_freq_attr_ro(related_cpus);
608cpufreq_freq_attr_ro(affected_cpus);
609cpufreq_freq_attr_rw(scaling_min_freq);
610cpufreq_freq_attr_rw(scaling_max_freq);
611cpufreq_freq_attr_rw(scaling_governor);
612cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 613
905d77cd 614static struct attribute *default_attrs[] = {
1da177e4
LT
615 &cpuinfo_min_freq.attr,
616 &cpuinfo_max_freq.attr,
ed129784 617 &cpuinfo_transition_latency.attr,
1da177e4
LT
618 &scaling_min_freq.attr,
619 &scaling_max_freq.attr,
620 &affected_cpus.attr,
e8628dd0 621 &related_cpus.attr,
1da177e4
LT
622 &scaling_governor.attr,
623 &scaling_driver.attr,
624 &scaling_available_governors.attr,
9e76988e 625 &scaling_setspeed.attr,
1da177e4
LT
626 NULL
627};
628
8aa84ad8
TR
629struct kobject *cpufreq_global_kobject;
630EXPORT_SYMBOL(cpufreq_global_kobject);
631
29464f28
DJ
632#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
633#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 634
29464f28 635static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 636{
905d77cd
DJ
637 struct cpufreq_policy *policy = to_policy(kobj);
638 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 639 ssize_t ret = -EINVAL;
a9144436 640 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 641 if (!policy)
0db4a8a9 642 goto no_policy;
5a01f2e8
VP
643
644 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 645 goto fail;
5a01f2e8 646
e08f5f5b
GS
647 if (fattr->show)
648 ret = fattr->show(policy, buf);
649 else
650 ret = -EIO;
651
5a01f2e8 652 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 653fail:
a9144436 654 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 655no_policy:
1da177e4
LT
656 return ret;
657}
658
905d77cd
DJ
659static ssize_t store(struct kobject *kobj, struct attribute *attr,
660 const char *buf, size_t count)
1da177e4 661{
905d77cd
DJ
662 struct cpufreq_policy *policy = to_policy(kobj);
663 struct freq_attr *fattr = to_attr(attr);
a07530b4 664 ssize_t ret = -EINVAL;
a9144436 665 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 666 if (!policy)
a07530b4 667 goto no_policy;
5a01f2e8
VP
668
669 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 670 goto fail;
5a01f2e8 671
e08f5f5b
GS
672 if (fattr->store)
673 ret = fattr->store(policy, buf, count);
674 else
675 ret = -EIO;
676
5a01f2e8 677 unlock_policy_rwsem_write(policy->cpu);
a07530b4 678fail:
a9144436 679 cpufreq_cpu_put_sysfs(policy);
a07530b4 680no_policy:
1da177e4
LT
681 return ret;
682}
683
905d77cd 684static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 685{
905d77cd 686 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 687 pr_debug("last reference is dropped\n");
1da177e4
LT
688 complete(&policy->kobj_unregister);
689}
690
52cf25d0 691static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
692 .show = show,
693 .store = store,
694};
695
696static struct kobj_type ktype_cpufreq = {
697 .sysfs_ops = &sysfs_ops,
698 .default_attrs = default_attrs,
699 .release = cpufreq_sysfs_release,
700};
701
19d6f7ec 702/* symlink affected CPUs */
cf3289d0
AC
703static int cpufreq_add_dev_symlink(unsigned int cpu,
704 struct cpufreq_policy *policy)
19d6f7ec
DJ
705{
706 unsigned int j;
707 int ret = 0;
708
709 for_each_cpu(j, policy->cpus) {
710 struct cpufreq_policy *managed_policy;
8a25a2fd 711 struct device *cpu_dev;
19d6f7ec
DJ
712
713 if (j == cpu)
714 continue;
19d6f7ec 715
2d06d8c4 716 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 717 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
718 cpu_dev = get_cpu_device(j);
719 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
720 "cpufreq");
721 if (ret) {
722 cpufreq_cpu_put(managed_policy);
723 return ret;
724 }
725 }
726 return ret;
727}
728
cf3289d0
AC
729static int cpufreq_add_dev_interface(unsigned int cpu,
730 struct cpufreq_policy *policy,
8a25a2fd 731 struct device *dev)
909a694e 732{
ecf7e461 733 struct cpufreq_policy new_policy;
909a694e
DJ
734 struct freq_attr **drv_attr;
735 unsigned long flags;
736 int ret = 0;
737 unsigned int j;
738
739 /* prepare interface data */
740 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 741 &dev->kobj, "cpufreq");
909a694e
DJ
742 if (ret)
743 return ret;
744
745 /* set up files for this cpu device */
746 drv_attr = cpufreq_driver->attr;
747 while ((drv_attr) && (*drv_attr)) {
748 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
749 if (ret)
750 goto err_out_kobj_put;
751 drv_attr++;
752 }
753 if (cpufreq_driver->get) {
754 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
755 if (ret)
756 goto err_out_kobj_put;
757 }
758 if (cpufreq_driver->target) {
759 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
760 if (ret)
761 goto err_out_kobj_put;
762 }
e2f74f35
TR
763 if (cpufreq_driver->bios_limit) {
764 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
765 if (ret)
766 goto err_out_kobj_put;
767 }
909a694e
DJ
768
769 spin_lock_irqsave(&cpufreq_driver_lock, flags);
770 for_each_cpu(j, policy->cpus) {
909a694e 771 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 772 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e
DJ
773 }
774 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
775
776 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
777 if (ret)
778 goto err_out_kobj_put;
779
780 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
781 /* assure that the starting sequence is run in __cpufreq_set_policy */
782 policy->governor = NULL;
783
784 /* set default policy */
785 ret = __cpufreq_set_policy(policy, &new_policy);
786 policy->user_policy.policy = policy->policy;
787 policy->user_policy.governor = policy->governor;
788
789 if (ret) {
2d06d8c4 790 pr_debug("setting policy failed\n");
ecf7e461
DJ
791 if (cpufreq_driver->exit)
792 cpufreq_driver->exit(policy);
793 }
909a694e
DJ
794 return ret;
795
796err_out_kobj_put:
797 kobject_put(&policy->kobj);
798 wait_for_completion(&policy->kobj_unregister);
799 return ret;
800}
801
fcf80582
VK
802#ifdef CONFIG_HOTPLUG_CPU
803static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
804 struct device *dev)
805{
806 struct cpufreq_policy *policy;
807 int ret = 0;
808 unsigned long flags;
809
810 policy = cpufreq_cpu_get(sibling);
811 WARN_ON(!policy);
812
fcf80582
VK
813 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
814
2eaa3e2d
VK
815 lock_policy_rwsem_write(sibling);
816
fcf80582 817 spin_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 818
fcf80582 819 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 820 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582
VK
821 per_cpu(cpufreq_cpu_data, cpu) = policy;
822 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
823
2eaa3e2d
VK
824 unlock_policy_rwsem_write(sibling);
825
fcf80582
VK
826 __cpufreq_governor(policy, CPUFREQ_GOV_START);
827 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
828
fcf80582
VK
829 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
830 if (ret) {
831 cpufreq_cpu_put(policy);
832 return ret;
833 }
834
835 return 0;
836}
837#endif
1da177e4
LT
838
839/**
840 * cpufreq_add_dev - add a CPU device
841 *
32ee8c3e 842 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
843 *
844 * The Oracle says: try running cpufreq registration/unregistration concurrently
845 * with with cpu hotplugging and all hell will break loose. Tried to clean this
846 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 847 */
8a25a2fd 848static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 849{
fcf80582 850 unsigned int j, cpu = dev->id;
65922465 851 int ret = -ENOMEM;
1da177e4 852 struct cpufreq_policy *policy;
1da177e4 853 unsigned long flags;
90e41bac 854#ifdef CONFIG_HOTPLUG_CPU
fcf80582 855 struct cpufreq_governor *gov;
90e41bac
PB
856 int sibling;
857#endif
1da177e4 858
c32b6b8e
AR
859 if (cpu_is_offline(cpu))
860 return 0;
861
2d06d8c4 862 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
863
864#ifdef CONFIG_SMP
865 /* check whether a different CPU already registered this
866 * CPU because it is in the same boat. */
867 policy = cpufreq_cpu_get(cpu);
868 if (unlikely(policy)) {
8ff69732 869 cpufreq_cpu_put(policy);
1da177e4
LT
870 return 0;
871 }
fcf80582
VK
872
873#ifdef CONFIG_HOTPLUG_CPU
874 /* Check if this cpu was hot-unplugged earlier and has siblings */
2eaa3e2d 875 spin_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
876 for_each_online_cpu(sibling) {
877 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d
VK
878 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
879 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 880 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 881 }
fcf80582 882 }
2eaa3e2d 883 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 884#endif
1da177e4
LT
885#endif
886
887 if (!try_module_get(cpufreq_driver->owner)) {
888 ret = -EINVAL;
889 goto module_out;
890 }
891
e98df50c 892 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 893 if (!policy)
1da177e4 894 goto nomem_out;
059019a3
DJ
895
896 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 897 goto err_free_policy;
059019a3
DJ
898
899 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 900 goto err_free_cpumask;
1da177e4
LT
901
902 policy->cpu = cpu;
65922465 903 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 904 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 905
5a01f2e8 906 /* Initially set CPU itself as the policy_cpu */
f1625066 907 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 908
1da177e4 909 init_completion(&policy->kobj_unregister);
65f27f38 910 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
911
912 /* call driver. From then on the cpufreq must be able
913 * to accept all calls to ->verify and ->setpolicy for this CPU
914 */
915 ret = cpufreq_driver->init(policy);
916 if (ret) {
2d06d8c4 917 pr_debug("initialization failed\n");
2eaa3e2d 918 goto err_set_policy_cpu;
1da177e4 919 }
643ae6e8 920
fcf80582
VK
921 /* related cpus should atleast have policy->cpus */
922 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
923
643ae6e8
VK
924 /*
925 * affected cpus must always be the one, which are online. We aren't
926 * managing offline cpus here.
927 */
928 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
929
187d9f4e
MC
930 policy->user_policy.min = policy->min;
931 policy->user_policy.max = policy->max;
1da177e4 932
a1531acd
TR
933 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
934 CPUFREQ_START, policy);
935
fcf80582
VK
936#ifdef CONFIG_HOTPLUG_CPU
937 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
938 if (gov) {
939 policy->governor = gov;
940 pr_debug("Restoring governor %s for cpu %d\n",
941 policy->governor->name, cpu);
4bfa042c 942 }
fcf80582 943#endif
1da177e4 944
8a25a2fd 945 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
946 if (ret)
947 goto err_out_unregister;
8ff69732 948
038c5b3e 949 kobject_uevent(&policy->kobj, KOBJ_ADD);
1da177e4 950 module_put(cpufreq_driver->owner);
2d06d8c4 951 pr_debug("initialization complete\n");
87c32271 952
1da177e4
LT
953 return 0;
954
1da177e4
LT
955err_out_unregister:
956 spin_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 957 for_each_cpu(j, policy->cpus)
7a6aedfa 958 per_cpu(cpufreq_cpu_data, j) = NULL;
1da177e4
LT
959 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
960
c10997f6 961 kobject_put(&policy->kobj);
1da177e4
LT
962 wait_for_completion(&policy->kobj_unregister);
963
2eaa3e2d
VK
964err_set_policy_cpu:
965 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 966 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
967err_free_cpumask:
968 free_cpumask_var(policy->cpus);
969err_free_policy:
1da177e4 970 kfree(policy);
1da177e4
LT
971nomem_out:
972 module_put(cpufreq_driver->owner);
c32b6b8e 973module_out:
1da177e4
LT
974 return ret;
975}
976
b8eed8af
VK
977static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
978{
979 int j;
980
981 policy->last_cpu = policy->cpu;
982 policy->cpu = cpu;
983
3361b7b1 984 for_each_cpu(j, policy->cpus)
b8eed8af 985 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
986
987#ifdef CONFIG_CPU_FREQ_TABLE
988 cpufreq_frequency_table_update_policy_cpu(policy);
989#endif
990 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
991 CPUFREQ_UPDATE_POLICY_CPU, policy);
992}
1da177e4
LT
993
994/**
5a01f2e8 995 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
996 *
997 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
998 * Caller should already have policy_rwsem in write mode for this CPU.
999 * This routine frees the rwsem before returning.
1da177e4 1000 */
8a25a2fd 1001static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1002{
b8eed8af 1003 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1004 unsigned long flags;
1005 struct cpufreq_policy *data;
499bca9b
AW
1006 struct kobject *kobj;
1007 struct completion *cmp;
8a25a2fd 1008 struct device *cpu_dev;
1da177e4 1009
b8eed8af 1010 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4
LT
1011
1012 spin_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1013
7a6aedfa 1014 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1015 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1016
1017 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1018
1019 if (!data) {
b8eed8af 1020 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1021 return -EINVAL;
1022 }
1da177e4 1023
b8eed8af 1024 if (cpufreq_driver->target)
f6a7409c 1025 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
084f3493
TR
1026
1027#ifdef CONFIG_HOTPLUG_CPU
e77b89f1
DM
1028 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1029 CPUFREQ_NAME_LEN);
084f3493
TR
1030#endif
1031
2eaa3e2d 1032 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af
VK
1033 cpus = cpumask_weight(data->cpus);
1034 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1035 unlock_policy_rwsem_write(cpu);
1da177e4 1036
73bf0fc2
VK
1037 if (cpu != data->cpu) {
1038 sysfs_remove_link(&dev->kobj, "cpufreq");
1039 } else if (cpus > 1) {
b8eed8af
VK
1040 /* first sibling now owns the new sysfs dir */
1041 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1042 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1043 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1044 if (ret) {
1045 pr_err("%s: Failed to move kobj: %d", __func__, ret);
2eaa3e2d
VK
1046
1047 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1048 cpumask_set_cpu(cpu, data->cpus);
2eaa3e2d
VK
1049
1050 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1051 per_cpu(cpufreq_cpu_data, cpu) = data;
b8eed8af 1052 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
2eaa3e2d 1053
499bca9b 1054 unlock_policy_rwsem_write(cpu);
2eaa3e2d
VK
1055
1056 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1057 "cpufreq");
b8eed8af 1058 return -EINVAL;
1da177e4 1059 }
b8eed8af 1060
2eaa3e2d 1061 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1062 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1063 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1064 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1065 __func__, cpu_dev->id, cpu);
1da177e4 1066 }
1da177e4 1067
b8eed8af
VK
1068 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1069 cpufreq_cpu_put(data);
1da177e4 1070
b8eed8af
VK
1071 /* If cpu is last user of policy, free policy */
1072 if (cpus == 1) {
2eaa3e2d 1073 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1074 kobj = &data->kobj;
1075 cmp = &data->kobj_unregister;
2eaa3e2d 1076 unlock_policy_rwsem_read(cpu);
b8eed8af 1077 kobject_put(kobj);
7d26e2d5 1078
b8eed8af
VK
1079 /* we need to make sure that the underlying kobj is actually
1080 * not referenced anymore by anybody before we proceed with
1081 * unloading.
1082 */
1083 pr_debug("waiting for dropping of refcount\n");
1084 wait_for_completion(cmp);
1085 pr_debug("wait complete\n");
27ecddc2 1086
b8eed8af
VK
1087 if (cpufreq_driver->exit)
1088 cpufreq_driver->exit(data);
27ecddc2 1089
b8eed8af
VK
1090 free_cpumask_var(data->related_cpus);
1091 free_cpumask_var(data->cpus);
1092 kfree(data);
1093 } else if (cpufreq_driver->target) {
1094 __cpufreq_governor(data, CPUFREQ_GOV_START);
1095 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1096 }
1da177e4 1097
2eaa3e2d 1098 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1099 return 0;
1100}
1101
1102
8a25a2fd 1103static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1104{
8a25a2fd 1105 unsigned int cpu = dev->id;
5a01f2e8 1106 int retval;
ec28297a
VP
1107
1108 if (cpu_is_offline(cpu))
1109 return 0;
1110
8a25a2fd 1111 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1112 return retval;
1113}
1114
1115
65f27f38 1116static void handle_update(struct work_struct *work)
1da177e4 1117{
65f27f38
DH
1118 struct cpufreq_policy *policy =
1119 container_of(work, struct cpufreq_policy, update);
1120 unsigned int cpu = policy->cpu;
2d06d8c4 1121 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1122 cpufreq_update_policy(cpu);
1123}
1124
1125/**
1126 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1127 * @cpu: cpu number
1128 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1129 * @new_freq: CPU frequency the CPU actually runs at
1130 *
29464f28
DJ
1131 * We adjust to current frequency first, and need to clean up later.
1132 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1133 */
e08f5f5b
GS
1134static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1135 unsigned int new_freq)
1da177e4
LT
1136{
1137 struct cpufreq_freqs freqs;
1138
2d06d8c4 1139 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1140 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1141
1142 freqs.cpu = cpu;
1143 freqs.old = old_freq;
1144 freqs.new = new_freq;
1145 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1146 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1147}
1148
1149
32ee8c3e 1150/**
4ab70df4 1151 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1152 * @cpu: CPU number
1153 *
1154 * This is the last known freq, without actually getting it from the driver.
1155 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1156 */
1157unsigned int cpufreq_quick_get(unsigned int cpu)
1158{
1159 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
e08f5f5b 1160 unsigned int ret_freq = 0;
95235ca2
VP
1161
1162 if (policy) {
e08f5f5b 1163 ret_freq = policy->cur;
95235ca2
VP
1164 cpufreq_cpu_put(policy);
1165 }
1166
4d34a67d 1167 return ret_freq;
95235ca2
VP
1168}
1169EXPORT_SYMBOL(cpufreq_quick_get);
1170
3d737108
JB
1171/**
1172 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1173 * @cpu: CPU number
1174 *
1175 * Just return the max possible frequency for a given CPU.
1176 */
1177unsigned int cpufreq_quick_get_max(unsigned int cpu)
1178{
1179 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1180 unsigned int ret_freq = 0;
1181
1182 if (policy) {
1183 ret_freq = policy->max;
1184 cpufreq_cpu_put(policy);
1185 }
1186
1187 return ret_freq;
1188}
1189EXPORT_SYMBOL(cpufreq_quick_get_max);
1190
95235ca2 1191
5a01f2e8 1192static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1193{
7a6aedfa 1194 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1195 unsigned int ret_freq = 0;
1da177e4 1196
1da177e4 1197 if (!cpufreq_driver->get)
4d34a67d 1198 return ret_freq;
1da177e4 1199
e08f5f5b 1200 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1201
e08f5f5b
GS
1202 if (ret_freq && policy->cur &&
1203 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1204 /* verify no discrepancy between actual and
1205 saved value exists */
1206 if (unlikely(ret_freq != policy->cur)) {
1207 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1208 schedule_work(&policy->update);
1209 }
1210 }
1211
4d34a67d 1212 return ret_freq;
5a01f2e8 1213}
1da177e4 1214
5a01f2e8
VP
1215/**
1216 * cpufreq_get - get the current CPU frequency (in kHz)
1217 * @cpu: CPU number
1218 *
1219 * Get the CPU current (static) CPU frequency
1220 */
1221unsigned int cpufreq_get(unsigned int cpu)
1222{
1223 unsigned int ret_freq = 0;
1224 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1225
1226 if (!policy)
1227 goto out;
1228
1229 if (unlikely(lock_policy_rwsem_read(cpu)))
1230 goto out_policy;
1231
1232 ret_freq = __cpufreq_get(cpu);
1233
1234 unlock_policy_rwsem_read(cpu);
1da177e4 1235
5a01f2e8
VP
1236out_policy:
1237 cpufreq_cpu_put(policy);
1238out:
4d34a67d 1239 return ret_freq;
1da177e4
LT
1240}
1241EXPORT_SYMBOL(cpufreq_get);
1242
8a25a2fd
KS
1243static struct subsys_interface cpufreq_interface = {
1244 .name = "cpufreq",
1245 .subsys = &cpu_subsys,
1246 .add_dev = cpufreq_add_dev,
1247 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1248};
1249
1da177e4 1250
42d4dc3f 1251/**
e00e56df
RW
1252 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1253 *
1254 * This function is only executed for the boot processor. The other CPUs
1255 * have been put offline by means of CPU hotplug.
42d4dc3f 1256 */
e00e56df 1257static int cpufreq_bp_suspend(void)
42d4dc3f 1258{
e08f5f5b 1259 int ret = 0;
4bc5d341 1260
e00e56df 1261 int cpu = smp_processor_id();
42d4dc3f
BH
1262 struct cpufreq_policy *cpu_policy;
1263
2d06d8c4 1264 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1265
e00e56df 1266 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1267 cpu_policy = cpufreq_cpu_get(cpu);
1268 if (!cpu_policy)
e00e56df 1269 return 0;
42d4dc3f
BH
1270
1271 if (cpufreq_driver->suspend) {
7ca64e2d 1272 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1273 if (ret)
42d4dc3f
BH
1274 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1275 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1276 }
1277
42d4dc3f 1278 cpufreq_cpu_put(cpu_policy);
c9060494 1279 return ret;
42d4dc3f
BH
1280}
1281
1da177e4 1282/**
e00e56df 1283 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1284 *
1285 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1286 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1287 * restored. It will verify that the current freq is in sync with
1288 * what we believe it to be. This is a bit later than when it
1289 * should be, but nonethteless it's better than calling
1290 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1291 *
1292 * This function is only executed for the boot CPU. The other CPUs have not
1293 * been turned on yet.
1da177e4 1294 */
e00e56df 1295static void cpufreq_bp_resume(void)
1da177e4 1296{
e08f5f5b 1297 int ret = 0;
4bc5d341 1298
e00e56df 1299 int cpu = smp_processor_id();
1da177e4
LT
1300 struct cpufreq_policy *cpu_policy;
1301
2d06d8c4 1302 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1303
e00e56df 1304 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1305 cpu_policy = cpufreq_cpu_get(cpu);
1306 if (!cpu_policy)
e00e56df 1307 return;
1da177e4
LT
1308
1309 if (cpufreq_driver->resume) {
1310 ret = cpufreq_driver->resume(cpu_policy);
1311 if (ret) {
1312 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1313 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1314 goto fail;
1da177e4
LT
1315 }
1316 }
1317
1da177e4 1318 schedule_work(&cpu_policy->update);
ce6c3997 1319
c9060494 1320fail:
1da177e4 1321 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1322}
1323
e00e56df
RW
1324static struct syscore_ops cpufreq_syscore_ops = {
1325 .suspend = cpufreq_bp_suspend,
1326 .resume = cpufreq_bp_resume,
1da177e4
LT
1327};
1328
9d95046e
BP
1329/**
1330 * cpufreq_get_current_driver - return current driver's name
1331 *
1332 * Return the name string of the currently loaded cpufreq driver
1333 * or NULL, if none.
1334 */
1335const char *cpufreq_get_current_driver(void)
1336{
1337 if (cpufreq_driver)
1338 return cpufreq_driver->name;
1339
1340 return NULL;
1341}
1342EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1343
1344/*********************************************************************
1345 * NOTIFIER LISTS INTERFACE *
1346 *********************************************************************/
1347
1348/**
1349 * cpufreq_register_notifier - register a driver with cpufreq
1350 * @nb: notifier function to register
1351 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1352 *
32ee8c3e 1353 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1354 * are notified about clock rate changes (once before and once after
1355 * the transition), or a list of drivers that are notified about
1356 * changes in cpufreq policy.
1357 *
1358 * This function may sleep, and has the same return conditions as
e041c683 1359 * blocking_notifier_chain_register.
1da177e4
LT
1360 */
1361int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1362{
1363 int ret;
1364
d5aaffa9
DB
1365 if (cpufreq_disabled())
1366 return -EINVAL;
1367
74212ca4
CEB
1368 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1369
1da177e4
LT
1370 switch (list) {
1371 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1372 ret = srcu_notifier_chain_register(
e041c683 1373 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1374 break;
1375 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1376 ret = blocking_notifier_chain_register(
1377 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1378 break;
1379 default:
1380 ret = -EINVAL;
1381 }
1da177e4
LT
1382
1383 return ret;
1384}
1385EXPORT_SYMBOL(cpufreq_register_notifier);
1386
1387
1388/**
1389 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1390 * @nb: notifier block to be unregistered
1391 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1392 *
1393 * Remove a driver from the CPU frequency notifier list.
1394 *
1395 * This function may sleep, and has the same return conditions as
e041c683 1396 * blocking_notifier_chain_unregister.
1da177e4
LT
1397 */
1398int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1399{
1400 int ret;
1401
d5aaffa9
DB
1402 if (cpufreq_disabled())
1403 return -EINVAL;
1404
1da177e4
LT
1405 switch (list) {
1406 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1407 ret = srcu_notifier_chain_unregister(
e041c683 1408 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1409 break;
1410 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1411 ret = blocking_notifier_chain_unregister(
1412 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1413 break;
1414 default:
1415 ret = -EINVAL;
1416 }
1da177e4
LT
1417
1418 return ret;
1419}
1420EXPORT_SYMBOL(cpufreq_unregister_notifier);
1421
1422
1423/*********************************************************************
1424 * GOVERNORS *
1425 *********************************************************************/
1426
1427
1428int __cpufreq_driver_target(struct cpufreq_policy *policy,
1429 unsigned int target_freq,
1430 unsigned int relation)
1431{
1432 int retval = -EINVAL;
7249924e 1433 unsigned int old_target_freq = target_freq;
c32b6b8e 1434
a7b422cd
KRW
1435 if (cpufreq_disabled())
1436 return -ENODEV;
1437
7249924e
VK
1438 /* Make sure that target_freq is within supported range */
1439 if (target_freq > policy->max)
1440 target_freq = policy->max;
1441 if (target_freq < policy->min)
1442 target_freq = policy->min;
1443
1444 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1445 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1446
1447 if (target_freq == policy->cur)
1448 return 0;
1449
3361b7b1 1450 if (cpufreq_driver->target)
1da177e4 1451 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1452
1da177e4
LT
1453 return retval;
1454}
1455EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1456
1da177e4
LT
1457int cpufreq_driver_target(struct cpufreq_policy *policy,
1458 unsigned int target_freq,
1459 unsigned int relation)
1460{
f1829e4a 1461 int ret = -EINVAL;
1da177e4
LT
1462
1463 policy = cpufreq_cpu_get(policy->cpu);
1464 if (!policy)
f1829e4a 1465 goto no_policy;
1da177e4 1466
5a01f2e8 1467 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1468 goto fail;
1da177e4
LT
1469
1470 ret = __cpufreq_driver_target(policy, target_freq, relation);
1471
5a01f2e8 1472 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1473
f1829e4a 1474fail:
1da177e4 1475 cpufreq_cpu_put(policy);
f1829e4a 1476no_policy:
1da177e4
LT
1477 return ret;
1478}
1479EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1480
bf0b90e3 1481int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1482{
1483 int ret = 0;
1484
d5aaffa9
DB
1485 if (cpufreq_disabled())
1486 return ret;
1487
3361b7b1 1488 if (!cpufreq_driver->getavg)
0676f7f2
VK
1489 return 0;
1490
dfde5d62
VP
1491 policy = cpufreq_cpu_get(policy->cpu);
1492 if (!policy)
1493 return -EINVAL;
1494
0676f7f2 1495 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1496
dfde5d62
VP
1497 cpufreq_cpu_put(policy);
1498 return ret;
1499}
5a01f2e8 1500EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1501
153d7f3f 1502/*
153d7f3f
AV
1503 * when "event" is CPUFREQ_GOV_LIMITS
1504 */
1da177e4 1505
e08f5f5b
GS
1506static int __cpufreq_governor(struct cpufreq_policy *policy,
1507 unsigned int event)
1da177e4 1508{
cc993cab 1509 int ret;
6afde10c
TR
1510
1511 /* Only must be defined when default governor is known to have latency
1512 restrictions, like e.g. conservative or ondemand.
1513 That this is the case is already ensured in Kconfig
1514 */
1515#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1516 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1517#else
1518 struct cpufreq_governor *gov = NULL;
1519#endif
1c256245
TR
1520
1521 if (policy->governor->max_transition_latency &&
1522 policy->cpuinfo.transition_latency >
1523 policy->governor->max_transition_latency) {
6afde10c
TR
1524 if (!gov)
1525 return -EINVAL;
1526 else {
1527 printk(KERN_WARNING "%s governor failed, too long"
1528 " transition latency of HW, fallback"
1529 " to %s governor\n",
1530 policy->governor->name,
1531 gov->name);
1532 policy->governor = gov;
1533 }
1c256245 1534 }
1da177e4
LT
1535
1536 if (!try_module_get(policy->governor->owner))
1537 return -EINVAL;
1538
2d06d8c4 1539 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1540 policy->cpu, event);
1da177e4
LT
1541 ret = policy->governor->governor(policy, event);
1542
8e53695f
VK
1543 if (event == CPUFREQ_GOV_START)
1544 policy->governor->initialized++;
1545 else if (event == CPUFREQ_GOV_STOP)
1546 policy->governor->initialized--;
b394058f 1547
e08f5f5b
GS
1548 /* we keep one module reference alive for
1549 each CPU governed by this CPU */
1da177e4
LT
1550 if ((event != CPUFREQ_GOV_START) || ret)
1551 module_put(policy->governor->owner);
1552 if ((event == CPUFREQ_GOV_STOP) && !ret)
1553 module_put(policy->governor->owner);
1554
1555 return ret;
1556}
1557
1558
1da177e4
LT
1559int cpufreq_register_governor(struct cpufreq_governor *governor)
1560{
3bcb09a3 1561 int err;
1da177e4
LT
1562
1563 if (!governor)
1564 return -EINVAL;
1565
a7b422cd
KRW
1566 if (cpufreq_disabled())
1567 return -ENODEV;
1568
3fc54d37 1569 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1570
b394058f 1571 governor->initialized = 0;
3bcb09a3
JF
1572 err = -EBUSY;
1573 if (__find_governor(governor->name) == NULL) {
1574 err = 0;
1575 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1576 }
1da177e4 1577
32ee8c3e 1578 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1579 return err;
1da177e4
LT
1580}
1581EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1582
1583
1584void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1585{
90e41bac
PB
1586#ifdef CONFIG_HOTPLUG_CPU
1587 int cpu;
1588#endif
1589
1da177e4
LT
1590 if (!governor)
1591 return;
1592
a7b422cd
KRW
1593 if (cpufreq_disabled())
1594 return;
1595
90e41bac
PB
1596#ifdef CONFIG_HOTPLUG_CPU
1597 for_each_present_cpu(cpu) {
1598 if (cpu_online(cpu))
1599 continue;
1600 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1601 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1602 }
1603#endif
1604
3fc54d37 1605 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1606 list_del(&governor->governor_list);
3fc54d37 1607 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1608 return;
1609}
1610EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1611
1612
1613
1614/*********************************************************************
1615 * POLICY INTERFACE *
1616 *********************************************************************/
1617
1618/**
1619 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1620 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1621 * is written
1da177e4
LT
1622 *
1623 * Reads the current cpufreq policy.
1624 */
1625int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1626{
1627 struct cpufreq_policy *cpu_policy;
1628 if (!policy)
1629 return -EINVAL;
1630
1631 cpu_policy = cpufreq_cpu_get(cpu);
1632 if (!cpu_policy)
1633 return -EINVAL;
1634
1da177e4 1635 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1636
1637 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1638 return 0;
1639}
1640EXPORT_SYMBOL(cpufreq_get_policy);
1641
1642
153d7f3f 1643/*
e08f5f5b
GS
1644 * data : current policy.
1645 * policy : policy to be set.
153d7f3f 1646 */
e08f5f5b
GS
1647static int __cpufreq_set_policy(struct cpufreq_policy *data,
1648 struct cpufreq_policy *policy)
1da177e4
LT
1649{
1650 int ret = 0;
1651
2d06d8c4 1652 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1653 policy->min, policy->max);
1654
e08f5f5b
GS
1655 memcpy(&policy->cpuinfo, &data->cpuinfo,
1656 sizeof(struct cpufreq_cpuinfo));
1da177e4 1657
53391fa2 1658 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1659 ret = -EINVAL;
1660 goto error_out;
1661 }
1662
1da177e4
LT
1663 /* verify the cpu speed can be set within this limit */
1664 ret = cpufreq_driver->verify(policy);
1665 if (ret)
1666 goto error_out;
1667
1da177e4 1668 /* adjust if necessary - all reasons */
e041c683
AS
1669 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1670 CPUFREQ_ADJUST, policy);
1da177e4
LT
1671
1672 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1673 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1674 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1675
1676 /* verify the cpu speed can be set within this limit,
1677 which might be different to the first one */
1678 ret = cpufreq_driver->verify(policy);
e041c683 1679 if (ret)
1da177e4 1680 goto error_out;
1da177e4
LT
1681
1682 /* notification of the new policy */
e041c683
AS
1683 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1684 CPUFREQ_NOTIFY, policy);
1da177e4 1685
7d5e350f
DJ
1686 data->min = policy->min;
1687 data->max = policy->max;
1da177e4 1688
2d06d8c4 1689 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1690 data->min, data->max);
1da177e4
LT
1691
1692 if (cpufreq_driver->setpolicy) {
1693 data->policy = policy->policy;
2d06d8c4 1694 pr_debug("setting range\n");
1da177e4
LT
1695 ret = cpufreq_driver->setpolicy(policy);
1696 } else {
1697 if (policy->governor != data->governor) {
1698 /* save old, working values */
1699 struct cpufreq_governor *old_gov = data->governor;
1700
2d06d8c4 1701 pr_debug("governor switch\n");
1da177e4
LT
1702
1703 /* end old governor */
ffe6275f 1704 if (data->governor)
1da177e4
LT
1705 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1706
1707 /* start new governor */
1708 data->governor = policy->governor;
1709 if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1710 /* new governor failed, so re-start old one */
2d06d8c4 1711 pr_debug("starting governor %s failed\n",
e08f5f5b 1712 data->governor->name);
1da177e4
LT
1713 if (old_gov) {
1714 data->governor = old_gov;
e08f5f5b
GS
1715 __cpufreq_governor(data,
1716 CPUFREQ_GOV_START);
1da177e4
LT
1717 }
1718 ret = -EINVAL;
1719 goto error_out;
1720 }
1721 /* might be a policy change, too, so fall through */
1722 }
2d06d8c4 1723 pr_debug("governor: change or update limits\n");
1da177e4
LT
1724 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1725 }
1726
7d5e350f 1727error_out:
1da177e4
LT
1728 return ret;
1729}
1730
1da177e4
LT
1731/**
1732 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1733 * @cpu: CPU which shall be re-evaluated
1734 *
25985edc 1735 * Useful for policy notifiers which have different necessities
1da177e4
LT
1736 * at different times.
1737 */
1738int cpufreq_update_policy(unsigned int cpu)
1739{
1740 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1741 struct cpufreq_policy policy;
f1829e4a 1742 int ret;
1da177e4 1743
f1829e4a
JL
1744 if (!data) {
1745 ret = -ENODEV;
1746 goto no_policy;
1747 }
1da177e4 1748
f1829e4a
JL
1749 if (unlikely(lock_policy_rwsem_write(cpu))) {
1750 ret = -EINVAL;
1751 goto fail;
1752 }
1da177e4 1753
2d06d8c4 1754 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1755 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1756 policy.min = data->user_policy.min;
1757 policy.max = data->user_policy.max;
1758 policy.policy = data->user_policy.policy;
1759 policy.governor = data->user_policy.governor;
1760
0961dd0d
TR
1761 /* BIOS might change freq behind our back
1762 -> ask driver for current freq and notify governors about a change */
1763 if (cpufreq_driver->get) {
1764 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1765 if (!data->cur) {
2d06d8c4 1766 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1767 data->cur = policy.cur;
1768 } else {
1769 if (data->cur != policy.cur)
e08f5f5b
GS
1770 cpufreq_out_of_sync(cpu, data->cur,
1771 policy.cur);
a85f7bd3 1772 }
0961dd0d
TR
1773 }
1774
1da177e4
LT
1775 ret = __cpufreq_set_policy(data, &policy);
1776
5a01f2e8
VP
1777 unlock_policy_rwsem_write(cpu);
1778
f1829e4a 1779fail:
1da177e4 1780 cpufreq_cpu_put(data);
f1829e4a 1781no_policy:
1da177e4
LT
1782 return ret;
1783}
1784EXPORT_SYMBOL(cpufreq_update_policy);
1785
dd184a01 1786static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1787 unsigned long action, void *hcpu)
1788{
1789 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1790 struct device *dev;
c32b6b8e 1791
8a25a2fd
KS
1792 dev = get_cpu_device(cpu);
1793 if (dev) {
c32b6b8e
AR
1794 switch (action) {
1795 case CPU_ONLINE:
8bb78442 1796 case CPU_ONLINE_FROZEN:
8a25a2fd 1797 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1798 break;
1799 case CPU_DOWN_PREPARE:
8bb78442 1800 case CPU_DOWN_PREPARE_FROZEN:
8a25a2fd 1801 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1802 break;
5a01f2e8 1803 case CPU_DOWN_FAILED:
8bb78442 1804 case CPU_DOWN_FAILED_FROZEN:
8a25a2fd 1805 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1806 break;
1807 }
1808 }
1809 return NOTIFY_OK;
1810}
1811
9c36f746 1812static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1813 .notifier_call = cpufreq_cpu_callback,
1814};
1da177e4
LT
1815
1816/*********************************************************************
1817 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1818 *********************************************************************/
1819
1820/**
1821 * cpufreq_register_driver - register a CPU Frequency driver
1822 * @driver_data: A struct cpufreq_driver containing the values#
1823 * submitted by the CPU Frequency driver.
1824 *
32ee8c3e 1825 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1826 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1827 * (and isn't unregistered in the meantime).
1da177e4
LT
1828 *
1829 */
221dee28 1830int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1831{
1832 unsigned long flags;
1833 int ret;
1834
a7b422cd
KRW
1835 if (cpufreq_disabled())
1836 return -ENODEV;
1837
1da177e4
LT
1838 if (!driver_data || !driver_data->verify || !driver_data->init ||
1839 ((!driver_data->setpolicy) && (!driver_data->target)))
1840 return -EINVAL;
1841
2d06d8c4 1842 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1843
1844 if (driver_data->setpolicy)
1845 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1846
1847 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1848 if (cpufreq_driver) {
1849 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1850 return -EBUSY;
1851 }
1852 cpufreq_driver = driver_data;
1853 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1854
8a25a2fd 1855 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1856 if (ret)
1857 goto err_null_driver;
1da177e4 1858
8f5bc2ab 1859 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1860 int i;
1861 ret = -ENODEV;
1862
1863 /* check for at least one working CPU */
7a6aedfa
MT
1864 for (i = 0; i < nr_cpu_ids; i++)
1865 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1866 ret = 0;
7a6aedfa
MT
1867 break;
1868 }
1da177e4
LT
1869
1870 /* if all ->init() calls failed, unregister */
1871 if (ret) {
2d06d8c4 1872 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1873 driver_data->name);
8a25a2fd 1874 goto err_if_unreg;
1da177e4
LT
1875 }
1876 }
1877
8f5bc2ab 1878 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1879 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1880
8f5bc2ab 1881 return 0;
8a25a2fd
KS
1882err_if_unreg:
1883 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab
JS
1884err_null_driver:
1885 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1886 cpufreq_driver = NULL;
1887 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1888 return ret;
1da177e4
LT
1889}
1890EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1891
1892
1893/**
1894 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1895 *
32ee8c3e 1896 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1897 * the right to do so, i.e. if you have succeeded in initialising before!
1898 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1899 * currently not initialised.
1900 */
221dee28 1901int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1902{
1903 unsigned long flags;
1904
2d06d8c4 1905 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 1906 return -EINVAL;
1da177e4 1907
2d06d8c4 1908 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 1909
8a25a2fd 1910 subsys_interface_unregister(&cpufreq_interface);
65edc68c 1911 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4
LT
1912
1913 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1914 cpufreq_driver = NULL;
1915 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1916
1917 return 0;
1918}
1919EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1920
1921static int __init cpufreq_core_init(void)
1922{
1923 int cpu;
1924
a7b422cd
KRW
1925 if (cpufreq_disabled())
1926 return -ENODEV;
1927
5a01f2e8 1928 for_each_possible_cpu(cpu) {
f1625066 1929 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1930 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1931 }
8aa84ad8 1932
8a25a2fd 1933 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 1934 BUG_ON(!cpufreq_global_kobject);
e00e56df 1935 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 1936
5a01f2e8
VP
1937 return 0;
1938}
5a01f2e8 1939core_initcall(cpufreq_core_init);
This page took 0.788693 seconds and 5 git commands to generate.