cpufreq: speedstep: remove unused speedstep_get_state
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
e00e56df 29#include <linux/syscore_ops.h>
5ff0a268 30#include <linux/tick.h>
6f4f2723
TR
31#include <trace/events/power.h>
32
1da177e4 33/**
cd878479 34 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
1c3d85dd 38static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 39static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d 41static DEFINE_RWLOCK(cpufreq_driver_lock);
6f1e4efd 42DEFINE_MUTEX(cpufreq_governor_lock);
c88a1f8b 43static LIST_HEAD(cpufreq_policy_list);
bb176f7d 44
084f3493
TR
45#ifdef CONFIG_HOTPLUG_CPU
46/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 47static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 48#endif
1da177e4 49
9c0ebcf7
VK
50static inline bool has_target(void)
51{
52 return cpufreq_driver->target_index || cpufreq_driver->target;
53}
54
6eed9404
VK
55/*
56 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
57 * sections
58 */
59static DECLARE_RWSEM(cpufreq_rwsem);
60
1da177e4 61/* internal prototypes */
29464f28
DJ
62static int __cpufreq_governor(struct cpufreq_policy *policy,
63 unsigned int event);
5a01f2e8 64static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 65static void handle_update(struct work_struct *work);
1da177e4
LT
66
67/**
32ee8c3e
DJ
68 * Two notifier lists: the "policy" list is involved in the
69 * validation process for a new CPU frequency policy; the
1da177e4
LT
70 * "transition" list for kernel code that needs to handle
71 * changes to devices when the CPU clock speed changes.
72 * The mutex locks both lists.
73 */
e041c683 74static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 75static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 76
74212ca4 77static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
78static int __init init_cpufreq_transition_notifier_list(void)
79{
80 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 81 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
82 return 0;
83}
b3438f82 84pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 85
a7b422cd 86static int off __read_mostly;
da584455 87static int cpufreq_disabled(void)
a7b422cd
KRW
88{
89 return off;
90}
91void disable_cpufreq(void)
92{
93 off = 1;
94}
1da177e4 95static LIST_HEAD(cpufreq_governor_list);
29464f28 96static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 97
4d5dcc42
VK
98bool have_governor_per_policy(void)
99{
0b981e70 100 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
4d5dcc42 101}
3f869d6d 102EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 103
944e9a03
VK
104struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
105{
106 if (have_governor_per_policy())
107 return &policy->kobj;
108 else
109 return cpufreq_global_kobject;
110}
111EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
112
72a4ce34
VK
113static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
114{
115 u64 idle_time;
116 u64 cur_wall_time;
117 u64 busy_time;
118
119 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
120
121 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
122 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
127
128 idle_time = cur_wall_time - busy_time;
129 if (wall)
130 *wall = cputime_to_usecs(cur_wall_time);
131
132 return cputime_to_usecs(idle_time);
133}
134
135u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
136{
137 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
138
139 if (idle_time == -1ULL)
140 return get_cpu_idle_time_jiffy(cpu, wall);
141 else if (!io_busy)
142 idle_time += get_cpu_iowait_time_us(cpu, wall);
143
144 return idle_time;
145}
146EXPORT_SYMBOL_GPL(get_cpu_idle_time);
147
70e9e778
VK
148/*
149 * This is a generic cpufreq init() routine which can be used by cpufreq
150 * drivers of SMP systems. It will do following:
151 * - validate & show freq table passed
152 * - set policies transition latency
153 * - policy->cpus with all possible CPUs
154 */
155int cpufreq_generic_init(struct cpufreq_policy *policy,
156 struct cpufreq_frequency_table *table,
157 unsigned int transition_latency)
158{
159 int ret;
160
161 ret = cpufreq_table_validate_and_show(policy, table);
162 if (ret) {
163 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
164 return ret;
165 }
166
167 policy->cpuinfo.transition_latency = transition_latency;
168
169 /*
170 * The driver only supports the SMP configuartion where all processors
171 * share the clock and voltage and clock.
172 */
173 cpumask_setall(policy->cpus);
174
175 return 0;
176}
177EXPORT_SYMBOL_GPL(cpufreq_generic_init);
178
6eed9404 179struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 180{
6eed9404 181 struct cpufreq_policy *policy = NULL;
1da177e4
LT
182 unsigned long flags;
183
6eed9404
VK
184 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
185 return NULL;
186
187 if (!down_read_trylock(&cpufreq_rwsem))
188 return NULL;
1da177e4
LT
189
190 /* get the cpufreq driver */
1c3d85dd 191 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 192
6eed9404
VK
193 if (cpufreq_driver) {
194 /* get the CPU */
195 policy = per_cpu(cpufreq_cpu_data, cpu);
196 if (policy)
197 kobject_get(&policy->kobj);
198 }
1da177e4 199
6eed9404 200 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 201
3a3e9e06 202 if (!policy)
6eed9404 203 up_read(&cpufreq_rwsem);
1da177e4 204
3a3e9e06 205 return policy;
a9144436 206}
1da177e4
LT
207EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
208
3a3e9e06 209void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 210{
d5aaffa9
DB
211 if (cpufreq_disabled())
212 return;
213
6eed9404
VK
214 kobject_put(&policy->kobj);
215 up_read(&cpufreq_rwsem);
1da177e4
LT
216}
217EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
218
1da177e4
LT
219/*********************************************************************
220 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
221 *********************************************************************/
222
223/**
224 * adjust_jiffies - adjust the system "loops_per_jiffy"
225 *
226 * This function alters the system "loops_per_jiffy" for the clock
227 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 228 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
229 * per-CPU loops_per_jiffy value wherever possible.
230 */
231#ifndef CONFIG_SMP
232static unsigned long l_p_j_ref;
bb176f7d 233static unsigned int l_p_j_ref_freq;
1da177e4 234
858119e1 235static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
236{
237 if (ci->flags & CPUFREQ_CONST_LOOPS)
238 return;
239
240 if (!l_p_j_ref_freq) {
241 l_p_j_ref = loops_per_jiffy;
242 l_p_j_ref_freq = ci->old;
2d06d8c4 243 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 244 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 245 }
bb176f7d 246 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 247 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
248 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
249 ci->new);
2d06d8c4 250 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 251 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
252 }
253}
254#else
e08f5f5b
GS
255static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
256{
257 return;
258}
1da177e4
LT
259#endif
260
0956df9c 261static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 262 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
263{
264 BUG_ON(irqs_disabled());
265
d5aaffa9
DB
266 if (cpufreq_disabled())
267 return;
268
1c3d85dd 269 freqs->flags = cpufreq_driver->flags;
2d06d8c4 270 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 271 state, freqs->new);
1da177e4 272
1da177e4 273 switch (state) {
e4472cb3 274
1da177e4 275 case CPUFREQ_PRECHANGE:
32ee8c3e 276 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
277 * which is not equal to what the cpufreq core thinks is
278 * "old frequency".
1da177e4 279 */
1c3d85dd 280 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
281 if ((policy) && (policy->cpu == freqs->cpu) &&
282 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 283 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
284 " %u, cpufreq assumed %u kHz.\n",
285 freqs->old, policy->cur);
286 freqs->old = policy->cur;
1da177e4
LT
287 }
288 }
b4dfdbb3 289 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 290 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
291 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
292 break;
e4472cb3 293
1da177e4
LT
294 case CPUFREQ_POSTCHANGE:
295 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 296 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 297 (unsigned long)freqs->cpu);
25e41933 298 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 299 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 300 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
301 if (likely(policy) && likely(policy->cpu == freqs->cpu))
302 policy->cur = freqs->new;
1da177e4
LT
303 break;
304 }
1da177e4 305}
bb176f7d 306
b43a7ffb
VK
307/**
308 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
309 * on frequency transition.
310 *
311 * This function calls the transition notifiers and the "adjust_jiffies"
312 * function. It is called twice on all CPU frequency changes that have
313 * external effects.
314 */
315void cpufreq_notify_transition(struct cpufreq_policy *policy,
316 struct cpufreq_freqs *freqs, unsigned int state)
317{
318 for_each_cpu(freqs->cpu, policy->cpus)
319 __cpufreq_notify_transition(policy, freqs, state);
320}
1da177e4
LT
321EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
322
f7ba3b41
VK
323/* Do post notifications when there are chances that transition has failed */
324void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
325 struct cpufreq_freqs *freqs, int transition_failed)
326{
327 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
328 if (!transition_failed)
329 return;
330
331 swap(freqs->old, freqs->new);
332 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
333 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
334}
335EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
336
1da177e4 337
1da177e4
LT
338/*********************************************************************
339 * SYSFS INTERFACE *
340 *********************************************************************/
341
3bcb09a3
JF
342static struct cpufreq_governor *__find_governor(const char *str_governor)
343{
344 struct cpufreq_governor *t;
345
346 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 347 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
348 return t;
349
350 return NULL;
351}
352
1da177e4
LT
353/**
354 * cpufreq_parse_governor - parse a governor string
355 */
905d77cd 356static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
357 struct cpufreq_governor **governor)
358{
3bcb09a3 359 int err = -EINVAL;
1c3d85dd
RW
360
361 if (!cpufreq_driver)
3bcb09a3
JF
362 goto out;
363
1c3d85dd 364 if (cpufreq_driver->setpolicy) {
1da177e4
LT
365 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
366 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 367 err = 0;
e08f5f5b
GS
368 } else if (!strnicmp(str_governor, "powersave",
369 CPUFREQ_NAME_LEN)) {
1da177e4 370 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 371 err = 0;
1da177e4 372 }
9c0ebcf7 373 } else if (has_target()) {
1da177e4 374 struct cpufreq_governor *t;
3bcb09a3 375
3fc54d37 376 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
377
378 t = __find_governor(str_governor);
379
ea714970 380 if (t == NULL) {
1a8e1463 381 int ret;
ea714970 382
1a8e1463
KC
383 mutex_unlock(&cpufreq_governor_mutex);
384 ret = request_module("cpufreq_%s", str_governor);
385 mutex_lock(&cpufreq_governor_mutex);
ea714970 386
1a8e1463
KC
387 if (ret == 0)
388 t = __find_governor(str_governor);
ea714970
JF
389 }
390
3bcb09a3
JF
391 if (t != NULL) {
392 *governor = t;
393 err = 0;
1da177e4 394 }
3bcb09a3 395
3fc54d37 396 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 397 }
29464f28 398out:
3bcb09a3 399 return err;
1da177e4 400}
1da177e4 401
1da177e4 402/**
e08f5f5b
GS
403 * cpufreq_per_cpu_attr_read() / show_##file_name() -
404 * print out cpufreq information
1da177e4
LT
405 *
406 * Write out information from cpufreq_driver->policy[cpu]; object must be
407 * "unsigned int".
408 */
409
32ee8c3e
DJ
410#define show_one(file_name, object) \
411static ssize_t show_##file_name \
905d77cd 412(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 413{ \
29464f28 414 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
415}
416
417show_one(cpuinfo_min_freq, cpuinfo.min_freq);
418show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 419show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
420show_one(scaling_min_freq, min);
421show_one(scaling_max_freq, max);
422show_one(scaling_cur_freq, cur);
423
037ce839 424static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 425 struct cpufreq_policy *new_policy);
7970e08b 426
1da177e4
LT
427/**
428 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
429 */
430#define store_one(file_name, object) \
431static ssize_t store_##file_name \
905d77cd 432(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 433{ \
5136fa56 434 int ret; \
1da177e4
LT
435 struct cpufreq_policy new_policy; \
436 \
437 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
438 if (ret) \
439 return -EINVAL; \
440 \
29464f28 441 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
442 if (ret != 1) \
443 return -EINVAL; \
444 \
037ce839 445 ret = cpufreq_set_policy(policy, &new_policy); \
7970e08b 446 policy->user_policy.object = policy->object; \
1da177e4
LT
447 \
448 return ret ? ret : count; \
449}
450
29464f28
DJ
451store_one(scaling_min_freq, min);
452store_one(scaling_max_freq, max);
1da177e4
LT
453
454/**
455 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
456 */
905d77cd
DJ
457static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
458 char *buf)
1da177e4 459{
5a01f2e8 460 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
461 if (!cur_freq)
462 return sprintf(buf, "<unknown>");
463 return sprintf(buf, "%u\n", cur_freq);
464}
465
1da177e4
LT
466/**
467 * show_scaling_governor - show the current policy for the specified CPU
468 */
905d77cd 469static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 470{
29464f28 471 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
472 return sprintf(buf, "powersave\n");
473 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
474 return sprintf(buf, "performance\n");
475 else if (policy->governor)
4b972f0b 476 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 477 policy->governor->name);
1da177e4
LT
478 return -EINVAL;
479}
480
1da177e4
LT
481/**
482 * store_scaling_governor - store policy for the specified CPU
483 */
905d77cd
DJ
484static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
485 const char *buf, size_t count)
1da177e4 486{
5136fa56 487 int ret;
1da177e4
LT
488 char str_governor[16];
489 struct cpufreq_policy new_policy;
490
491 ret = cpufreq_get_policy(&new_policy, policy->cpu);
492 if (ret)
493 return ret;
494
29464f28 495 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
496 if (ret != 1)
497 return -EINVAL;
498
e08f5f5b
GS
499 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
500 &new_policy.governor))
1da177e4
LT
501 return -EINVAL;
502
037ce839 503 ret = cpufreq_set_policy(policy, &new_policy);
7970e08b
TR
504
505 policy->user_policy.policy = policy->policy;
506 policy->user_policy.governor = policy->governor;
7970e08b 507
e08f5f5b
GS
508 if (ret)
509 return ret;
510 else
511 return count;
1da177e4
LT
512}
513
514/**
515 * show_scaling_driver - show the cpufreq driver currently loaded
516 */
905d77cd 517static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 518{
1c3d85dd 519 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
520}
521
522/**
523 * show_scaling_available_governors - show the available CPUfreq governors
524 */
905d77cd
DJ
525static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
526 char *buf)
1da177e4
LT
527{
528 ssize_t i = 0;
529 struct cpufreq_governor *t;
530
9c0ebcf7 531 if (!has_target()) {
1da177e4
LT
532 i += sprintf(buf, "performance powersave");
533 goto out;
534 }
535
536 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
537 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
538 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 539 goto out;
4b972f0b 540 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 541 }
7d5e350f 542out:
1da177e4
LT
543 i += sprintf(&buf[i], "\n");
544 return i;
545}
e8628dd0 546
f4fd3797 547ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
548{
549 ssize_t i = 0;
550 unsigned int cpu;
551
835481d9 552 for_each_cpu(cpu, mask) {
1da177e4
LT
553 if (i)
554 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
555 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
556 if (i >= (PAGE_SIZE - 5))
29464f28 557 break;
1da177e4
LT
558 }
559 i += sprintf(&buf[i], "\n");
560 return i;
561}
f4fd3797 562EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 563
e8628dd0
DW
564/**
565 * show_related_cpus - show the CPUs affected by each transition even if
566 * hw coordination is in use
567 */
568static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
569{
f4fd3797 570 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
571}
572
573/**
574 * show_affected_cpus - show the CPUs affected by each transition
575 */
576static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
577{
f4fd3797 578 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
579}
580
9e76988e 581static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 582 const char *buf, size_t count)
9e76988e
VP
583{
584 unsigned int freq = 0;
585 unsigned int ret;
586
879000f9 587 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
588 return -EINVAL;
589
590 ret = sscanf(buf, "%u", &freq);
591 if (ret != 1)
592 return -EINVAL;
593
594 policy->governor->store_setspeed(policy, freq);
595
596 return count;
597}
598
599static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
600{
879000f9 601 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
602 return sprintf(buf, "<unsupported>\n");
603
604 return policy->governor->show_setspeed(policy, buf);
605}
1da177e4 606
e2f74f35 607/**
8bf1ac72 608 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
609 */
610static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
611{
612 unsigned int limit;
613 int ret;
1c3d85dd
RW
614 if (cpufreq_driver->bios_limit) {
615 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
616 if (!ret)
617 return sprintf(buf, "%u\n", limit);
618 }
619 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
620}
621
6dad2a29
BP
622cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
623cpufreq_freq_attr_ro(cpuinfo_min_freq);
624cpufreq_freq_attr_ro(cpuinfo_max_freq);
625cpufreq_freq_attr_ro(cpuinfo_transition_latency);
626cpufreq_freq_attr_ro(scaling_available_governors);
627cpufreq_freq_attr_ro(scaling_driver);
628cpufreq_freq_attr_ro(scaling_cur_freq);
629cpufreq_freq_attr_ro(bios_limit);
630cpufreq_freq_attr_ro(related_cpus);
631cpufreq_freq_attr_ro(affected_cpus);
632cpufreq_freq_attr_rw(scaling_min_freq);
633cpufreq_freq_attr_rw(scaling_max_freq);
634cpufreq_freq_attr_rw(scaling_governor);
635cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 636
905d77cd 637static struct attribute *default_attrs[] = {
1da177e4
LT
638 &cpuinfo_min_freq.attr,
639 &cpuinfo_max_freq.attr,
ed129784 640 &cpuinfo_transition_latency.attr,
1da177e4
LT
641 &scaling_min_freq.attr,
642 &scaling_max_freq.attr,
643 &affected_cpus.attr,
e8628dd0 644 &related_cpus.attr,
1da177e4
LT
645 &scaling_governor.attr,
646 &scaling_driver.attr,
647 &scaling_available_governors.attr,
9e76988e 648 &scaling_setspeed.attr,
1da177e4
LT
649 NULL
650};
651
29464f28
DJ
652#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
653#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 654
29464f28 655static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 656{
905d77cd
DJ
657 struct cpufreq_policy *policy = to_policy(kobj);
658 struct freq_attr *fattr = to_attr(attr);
1b750e3b 659 ssize_t ret;
6eed9404
VK
660
661 if (!down_read_trylock(&cpufreq_rwsem))
1b750e3b 662 return -EINVAL;
5a01f2e8 663
ad7722da 664 down_read(&policy->rwsem);
5a01f2e8 665
e08f5f5b
GS
666 if (fattr->show)
667 ret = fattr->show(policy, buf);
668 else
669 ret = -EIO;
670
ad7722da 671 up_read(&policy->rwsem);
6eed9404 672 up_read(&cpufreq_rwsem);
1b750e3b 673
1da177e4
LT
674 return ret;
675}
676
905d77cd
DJ
677static ssize_t store(struct kobject *kobj, struct attribute *attr,
678 const char *buf, size_t count)
1da177e4 679{
905d77cd
DJ
680 struct cpufreq_policy *policy = to_policy(kobj);
681 struct freq_attr *fattr = to_attr(attr);
a07530b4 682 ssize_t ret = -EINVAL;
6eed9404 683
4f750c93
SB
684 get_online_cpus();
685
686 if (!cpu_online(policy->cpu))
687 goto unlock;
688
6eed9404 689 if (!down_read_trylock(&cpufreq_rwsem))
4f750c93 690 goto unlock;
5a01f2e8 691
ad7722da 692 down_write(&policy->rwsem);
5a01f2e8 693
e08f5f5b
GS
694 if (fattr->store)
695 ret = fattr->store(policy, buf, count);
696 else
697 ret = -EIO;
698
ad7722da 699 up_write(&policy->rwsem);
6eed9404 700
6eed9404 701 up_read(&cpufreq_rwsem);
4f750c93
SB
702unlock:
703 put_online_cpus();
704
1da177e4
LT
705 return ret;
706}
707
905d77cd 708static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 709{
905d77cd 710 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 711 pr_debug("last reference is dropped\n");
1da177e4
LT
712 complete(&policy->kobj_unregister);
713}
714
52cf25d0 715static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
716 .show = show,
717 .store = store,
718};
719
720static struct kobj_type ktype_cpufreq = {
721 .sysfs_ops = &sysfs_ops,
722 .default_attrs = default_attrs,
723 .release = cpufreq_sysfs_release,
724};
725
2361be23
VK
726struct kobject *cpufreq_global_kobject;
727EXPORT_SYMBOL(cpufreq_global_kobject);
728
729static int cpufreq_global_kobject_usage;
730
731int cpufreq_get_global_kobject(void)
732{
733 if (!cpufreq_global_kobject_usage++)
734 return kobject_add(cpufreq_global_kobject,
735 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
736
737 return 0;
738}
739EXPORT_SYMBOL(cpufreq_get_global_kobject);
740
741void cpufreq_put_global_kobject(void)
742{
743 if (!--cpufreq_global_kobject_usage)
744 kobject_del(cpufreq_global_kobject);
745}
746EXPORT_SYMBOL(cpufreq_put_global_kobject);
747
748int cpufreq_sysfs_create_file(const struct attribute *attr)
749{
750 int ret = cpufreq_get_global_kobject();
751
752 if (!ret) {
753 ret = sysfs_create_file(cpufreq_global_kobject, attr);
754 if (ret)
755 cpufreq_put_global_kobject();
756 }
757
758 return ret;
759}
760EXPORT_SYMBOL(cpufreq_sysfs_create_file);
761
762void cpufreq_sysfs_remove_file(const struct attribute *attr)
763{
764 sysfs_remove_file(cpufreq_global_kobject, attr);
765 cpufreq_put_global_kobject();
766}
767EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
768
19d6f7ec 769/* symlink affected CPUs */
308b60e7 770static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
771{
772 unsigned int j;
773 int ret = 0;
774
775 for_each_cpu(j, policy->cpus) {
8a25a2fd 776 struct device *cpu_dev;
19d6f7ec 777
308b60e7 778 if (j == policy->cpu)
19d6f7ec 779 continue;
19d6f7ec 780
e8fdde10 781 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
782 cpu_dev = get_cpu_device(j);
783 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 784 "cpufreq");
71c3461e
RW
785 if (ret)
786 break;
19d6f7ec
DJ
787 }
788 return ret;
789}
790
308b60e7 791static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 792 struct device *dev)
909a694e
DJ
793{
794 struct freq_attr **drv_attr;
909a694e 795 int ret = 0;
909a694e
DJ
796
797 /* prepare interface data */
798 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 799 &dev->kobj, "cpufreq");
909a694e
DJ
800 if (ret)
801 return ret;
802
803 /* set up files for this cpu device */
1c3d85dd 804 drv_attr = cpufreq_driver->attr;
909a694e
DJ
805 while ((drv_attr) && (*drv_attr)) {
806 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
807 if (ret)
1c3d85dd 808 goto err_out_kobj_put;
909a694e
DJ
809 drv_attr++;
810 }
1c3d85dd 811 if (cpufreq_driver->get) {
909a694e
DJ
812 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
813 if (ret)
1c3d85dd 814 goto err_out_kobj_put;
909a694e 815 }
9c0ebcf7 816 if (has_target()) {
909a694e
DJ
817 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
818 if (ret)
1c3d85dd 819 goto err_out_kobj_put;
909a694e 820 }
1c3d85dd 821 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
822 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
823 if (ret)
1c3d85dd 824 goto err_out_kobj_put;
e2f74f35 825 }
909a694e 826
308b60e7 827 ret = cpufreq_add_dev_symlink(policy);
ecf7e461
DJ
828 if (ret)
829 goto err_out_kobj_put;
830
e18f1682
SB
831 return ret;
832
833err_out_kobj_put:
834 kobject_put(&policy->kobj);
835 wait_for_completion(&policy->kobj_unregister);
836 return ret;
837}
838
839static void cpufreq_init_policy(struct cpufreq_policy *policy)
840{
841 struct cpufreq_policy new_policy;
842 int ret = 0;
843
d5b73cd8 844 memcpy(&new_policy, policy, sizeof(*policy));
a27a9ab7
JB
845
846 /* Use the default policy if its valid. */
847 if (cpufreq_driver->setpolicy)
848 cpufreq_parse_governor(policy->governor->name,
849 &new_policy.policy, NULL);
850
037ce839 851 /* assure that the starting sequence is run in cpufreq_set_policy */
ecf7e461
DJ
852 policy->governor = NULL;
853
854 /* set default policy */
037ce839 855 ret = cpufreq_set_policy(policy, &new_policy);
ecf7e461 856 if (ret) {
2d06d8c4 857 pr_debug("setting policy failed\n");
1c3d85dd
RW
858 if (cpufreq_driver->exit)
859 cpufreq_driver->exit(policy);
ecf7e461 860 }
909a694e
DJ
861}
862
fcf80582 863#ifdef CONFIG_HOTPLUG_CPU
d8d3b471 864static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
42f921a6 865 unsigned int cpu, struct device *dev)
fcf80582 866{
9c0ebcf7 867 int ret = 0;
fcf80582
VK
868 unsigned long flags;
869
9c0ebcf7 870 if (has_target()) {
3de9bdeb
VK
871 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
872 if (ret) {
873 pr_err("%s: Failed to stop governor\n", __func__);
874 return ret;
875 }
876 }
fcf80582 877
ad7722da 878 down_write(&policy->rwsem);
2eaa3e2d 879
0d1857a1 880 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 881
fcf80582
VK
882 cpumask_set_cpu(cpu, policy->cpus);
883 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 884 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 885
ad7722da 886 up_write(&policy->rwsem);
2eaa3e2d 887
9c0ebcf7 888 if (has_target()) {
3de9bdeb
VK
889 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
890 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
891 pr_err("%s: Failed to start governor\n", __func__);
892 return ret;
893 }
820c6ca2 894 }
fcf80582 895
42f921a6 896 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
fcf80582
VK
897}
898#endif
1da177e4 899
8414809c
SB
900static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
901{
902 struct cpufreq_policy *policy;
903 unsigned long flags;
904
44871c9c 905 read_lock_irqsave(&cpufreq_driver_lock, flags);
8414809c
SB
906
907 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
908
44871c9c 909 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
8414809c
SB
910
911 return policy;
912}
913
e9698cc5
SB
914static struct cpufreq_policy *cpufreq_policy_alloc(void)
915{
916 struct cpufreq_policy *policy;
917
918 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
919 if (!policy)
920 return NULL;
921
922 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
923 goto err_free_policy;
924
925 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
926 goto err_free_cpumask;
927
c88a1f8b 928 INIT_LIST_HEAD(&policy->policy_list);
ad7722da 929 init_rwsem(&policy->rwsem);
930
e9698cc5
SB
931 return policy;
932
933err_free_cpumask:
934 free_cpumask_var(policy->cpus);
935err_free_policy:
936 kfree(policy);
937
938 return NULL;
939}
940
42f921a6
VK
941static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
942{
943 struct kobject *kobj;
944 struct completion *cmp;
945
946 down_read(&policy->rwsem);
947 kobj = &policy->kobj;
948 cmp = &policy->kobj_unregister;
949 up_read(&policy->rwsem);
950 kobject_put(kobj);
951
952 /*
953 * We need to make sure that the underlying kobj is
954 * actually not referenced anymore by anybody before we
955 * proceed with unloading.
956 */
957 pr_debug("waiting for dropping of refcount\n");
958 wait_for_completion(cmp);
959 pr_debug("wait complete\n");
960}
961
e9698cc5
SB
962static void cpufreq_policy_free(struct cpufreq_policy *policy)
963{
964 free_cpumask_var(policy->related_cpus);
965 free_cpumask_var(policy->cpus);
966 kfree(policy);
967}
968
0d66b91e
SB
969static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
970{
99ec899e 971 if (WARN_ON(cpu == policy->cpu))
cb38ed5c
SB
972 return;
973
ad7722da 974 down_write(&policy->rwsem);
8efd5765 975
0d66b91e
SB
976 policy->last_cpu = policy->cpu;
977 policy->cpu = cpu;
978
ad7722da 979 up_write(&policy->rwsem);
8efd5765 980
0d66b91e 981 cpufreq_frequency_table_update_policy_cpu(policy);
0d66b91e
SB
982 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
983 CPUFREQ_UPDATE_POLICY_CPU, policy);
984}
985
a82fab29
SB
986static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
987 bool frozen)
1da177e4 988{
fcf80582 989 unsigned int j, cpu = dev->id;
65922465 990 int ret = -ENOMEM;
1da177e4 991 struct cpufreq_policy *policy;
1da177e4 992 unsigned long flags;
90e41bac 993#ifdef CONFIG_HOTPLUG_CPU
1b274294 994 struct cpufreq_policy *tpolicy;
fcf80582 995 struct cpufreq_governor *gov;
90e41bac 996#endif
1da177e4 997
c32b6b8e
AR
998 if (cpu_is_offline(cpu))
999 return 0;
1000
2d06d8c4 1001 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
1002
1003#ifdef CONFIG_SMP
1004 /* check whether a different CPU already registered this
1005 * CPU because it is in the same boat. */
1006 policy = cpufreq_cpu_get(cpu);
1007 if (unlikely(policy)) {
8ff69732 1008 cpufreq_cpu_put(policy);
1da177e4
LT
1009 return 0;
1010 }
5025d628 1011#endif
fcf80582 1012
6eed9404
VK
1013 if (!down_read_trylock(&cpufreq_rwsem))
1014 return 0;
1015
fcf80582
VK
1016#ifdef CONFIG_HOTPLUG_CPU
1017 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 1018 read_lock_irqsave(&cpufreq_driver_lock, flags);
1b274294
VK
1019 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1020 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
0d1857a1 1021 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
42f921a6 1022 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
6eed9404
VK
1023 up_read(&cpufreq_rwsem);
1024 return ret;
2eaa3e2d 1025 }
fcf80582 1026 }
0d1857a1 1027 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1028#endif
1029
72368d12
RW
1030 /*
1031 * Restore the saved policy when doing light-weight init and fall back
1032 * to the full init if that fails.
1033 */
1034 policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
1035 if (!policy) {
1036 frozen = false;
8414809c 1037 policy = cpufreq_policy_alloc();
72368d12
RW
1038 if (!policy)
1039 goto nomem_out;
1040 }
0d66b91e
SB
1041
1042 /*
1043 * In the resume path, since we restore a saved policy, the assignment
1044 * to policy->cpu is like an update of the existing policy, rather than
1045 * the creation of a brand new one. So we need to perform this update
1046 * by invoking update_policy_cpu().
1047 */
1048 if (frozen && cpu != policy->cpu)
1049 update_policy_cpu(policy, cpu);
1050 else
1051 policy->cpu = cpu;
1052
65922465 1053 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 1054 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1055
1da177e4 1056 init_completion(&policy->kobj_unregister);
65f27f38 1057 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1058
1059 /* call driver. From then on the cpufreq must be able
1060 * to accept all calls to ->verify and ->setpolicy for this CPU
1061 */
1c3d85dd 1062 ret = cpufreq_driver->init(policy);
1da177e4 1063 if (ret) {
2d06d8c4 1064 pr_debug("initialization failed\n");
2eaa3e2d 1065 goto err_set_policy_cpu;
1da177e4 1066 }
643ae6e8 1067
da60ce9f
VK
1068 if (cpufreq_driver->get) {
1069 policy->cur = cpufreq_driver->get(policy->cpu);
1070 if (!policy->cur) {
1071 pr_err("%s: ->get() failed\n", __func__);
1072 goto err_get_freq;
1073 }
1074 }
1075
d3916691
VK
1076 /*
1077 * Sometimes boot loaders set CPU frequency to a value outside of
1078 * frequency table present with cpufreq core. In such cases CPU might be
1079 * unstable if it has to run on that frequency for long duration of time
1080 * and so its better to set it to a frequency which is specified in
1081 * freq-table. This also makes cpufreq stats inconsistent as
1082 * cpufreq-stats would fail to register because current frequency of CPU
1083 * isn't found in freq-table.
1084 *
1085 * Because we don't want this change to effect boot process badly, we go
1086 * for the next freq which is >= policy->cur ('cur' must be set by now,
1087 * otherwise we will end up setting freq to lowest of the table as 'cur'
1088 * is initialized to zero).
1089 *
1090 * We are passing target-freq as "policy->cur - 1" otherwise
1091 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1092 * equal to target-freq.
1093 */
1094 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1095 && has_target()) {
1096 /* Are we running at unknown frequency ? */
1097 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1098 if (ret == -EINVAL) {
1099 /* Warn user and fix it */
1100 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1101 __func__, policy->cpu, policy->cur);
1102 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1103 CPUFREQ_RELATION_L);
1104
1105 /*
1106 * Reaching here after boot in a few seconds may not
1107 * mean that system will remain stable at "unknown"
1108 * frequency for longer duration. Hence, a BUG_ON().
1109 */
1110 BUG_ON(ret);
1111 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1112 __func__, policy->cpu, policy->cur);
1113 }
1114 }
1115
fcf80582
VK
1116 /* related cpus should atleast have policy->cpus */
1117 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1118
643ae6e8
VK
1119 /*
1120 * affected cpus must always be the one, which are online. We aren't
1121 * managing offline cpus here.
1122 */
1123 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1124
08fd8c1c
VK
1125 if (!frozen) {
1126 policy->user_policy.min = policy->min;
1127 policy->user_policy.max = policy->max;
1128 }
1da177e4 1129
a1531acd
TR
1130 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1131 CPUFREQ_START, policy);
1132
fcf80582
VK
1133#ifdef CONFIG_HOTPLUG_CPU
1134 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1135 if (gov) {
1136 policy->governor = gov;
1137 pr_debug("Restoring governor %s for cpu %d\n",
1138 policy->governor->name, cpu);
4bfa042c 1139 }
fcf80582 1140#endif
1da177e4 1141
e18f1682 1142 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1143 for_each_cpu(j, policy->cpus)
e18f1682 1144 per_cpu(cpufreq_cpu_data, j) = policy;
e18f1682
SB
1145 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1146
a82fab29 1147 if (!frozen) {
308b60e7 1148 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1149 if (ret)
1150 goto err_out_unregister;
1151 }
8ff69732 1152
9515f4d6
VK
1153 write_lock_irqsave(&cpufreq_driver_lock, flags);
1154 list_add(&policy->policy_list, &cpufreq_policy_list);
1155 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1156
e18f1682
SB
1157 cpufreq_init_policy(policy);
1158
08fd8c1c
VK
1159 if (!frozen) {
1160 policy->user_policy.policy = policy->policy;
1161 policy->user_policy.governor = policy->governor;
1162 }
1163
038c5b3e 1164 kobject_uevent(&policy->kobj, KOBJ_ADD);
6eed9404
VK
1165 up_read(&cpufreq_rwsem);
1166
2d06d8c4 1167 pr_debug("initialization complete\n");
87c32271 1168
1da177e4
LT
1169 return 0;
1170
1da177e4 1171err_out_unregister:
0d1857a1 1172 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1173 for_each_cpu(j, policy->cpus)
7a6aedfa 1174 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1175 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1176
da60ce9f
VK
1177err_get_freq:
1178 if (cpufreq_driver->exit)
1179 cpufreq_driver->exit(policy);
2eaa3e2d 1180err_set_policy_cpu:
72368d12
RW
1181 if (frozen) {
1182 /* Do not leave stale fallback data behind. */
1183 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
42f921a6 1184 cpufreq_policy_put_kobj(policy);
72368d12 1185 }
e9698cc5 1186 cpufreq_policy_free(policy);
42f921a6 1187
1da177e4 1188nomem_out:
6eed9404
VK
1189 up_read(&cpufreq_rwsem);
1190
1da177e4
LT
1191 return ret;
1192}
1193
a82fab29
SB
1194/**
1195 * cpufreq_add_dev - add a CPU device
1196 *
1197 * Adds the cpufreq interface for a CPU device.
1198 *
1199 * The Oracle says: try running cpufreq registration/unregistration concurrently
1200 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1201 * mess up, but more thorough testing is needed. - Mathieu
1202 */
1203static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1204{
1205 return __cpufreq_add_dev(dev, sif, false);
1206}
1207
3a3e9e06 1208static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
42f921a6 1209 unsigned int old_cpu)
f9ba680d
SB
1210{
1211 struct device *cpu_dev;
f9ba680d
SB
1212 int ret;
1213
1214 /* first sibling now owns the new sysfs dir */
9c8f1ee4 1215 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
a82fab29 1216
f9ba680d 1217 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
3a3e9e06 1218 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
f9ba680d
SB
1219 if (ret) {
1220 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1221
ad7722da 1222 down_write(&policy->rwsem);
3a3e9e06 1223 cpumask_set_cpu(old_cpu, policy->cpus);
ad7722da 1224 up_write(&policy->rwsem);
f9ba680d 1225
3a3e9e06 1226 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
f9ba680d
SB
1227 "cpufreq");
1228
1229 return -EINVAL;
1230 }
1231
1232 return cpu_dev->id;
1233}
1234
cedb70af
SB
1235static int __cpufreq_remove_dev_prepare(struct device *dev,
1236 struct subsys_interface *sif,
1237 bool frozen)
1da177e4 1238{
f9ba680d 1239 unsigned int cpu = dev->id, cpus;
3de9bdeb 1240 int new_cpu, ret;
1da177e4 1241 unsigned long flags;
3a3e9e06 1242 struct cpufreq_policy *policy;
1da177e4 1243
b8eed8af 1244 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1245
0d1857a1 1246 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1247
3a3e9e06 1248 policy = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d 1249
8414809c
SB
1250 /* Save the policy somewhere when doing a light-weight tear-down */
1251 if (frozen)
3a3e9e06 1252 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
8414809c 1253
0d1857a1 1254 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1255
3a3e9e06 1256 if (!policy) {
b8eed8af 1257 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1258 return -EINVAL;
1259 }
1da177e4 1260
9c0ebcf7 1261 if (has_target()) {
3de9bdeb
VK
1262 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1263 if (ret) {
1264 pr_err("%s: Failed to stop governor\n", __func__);
1265 return ret;
1266 }
1267 }
1da177e4 1268
084f3493 1269#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1270 if (!cpufreq_driver->setpolicy)
fa69e33f 1271 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
3a3e9e06 1272 policy->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1273#endif
1274
ad7722da 1275 down_read(&policy->rwsem);
3a3e9e06 1276 cpus = cpumask_weight(policy->cpus);
ad7722da 1277 up_read(&policy->rwsem);
084f3493 1278
61173f25
SB
1279 if (cpu != policy->cpu) {
1280 if (!frozen)
1281 sysfs_remove_link(&dev->kobj, "cpufreq");
73bf0fc2 1282 } else if (cpus > 1) {
42f921a6 1283 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
f9ba680d 1284 if (new_cpu >= 0) {
3a3e9e06 1285 update_policy_cpu(policy, new_cpu);
a82fab29
SB
1286
1287 if (!frozen) {
75949c9a
VK
1288 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1289 __func__, new_cpu, cpu);
a82fab29 1290 }
1da177e4
LT
1291 }
1292 }
1da177e4 1293
cedb70af
SB
1294 return 0;
1295}
1296
1297static int __cpufreq_remove_dev_finish(struct device *dev,
1298 struct subsys_interface *sif,
1299 bool frozen)
1300{
1301 unsigned int cpu = dev->id, cpus;
1302 int ret;
1303 unsigned long flags;
1304 struct cpufreq_policy *policy;
cedb70af
SB
1305
1306 read_lock_irqsave(&cpufreq_driver_lock, flags);
1307 policy = per_cpu(cpufreq_cpu_data, cpu);
1308 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1309
1310 if (!policy) {
1311 pr_debug("%s: No cpu_data found\n", __func__);
1312 return -EINVAL;
1313 }
1314
ad7722da 1315 down_write(&policy->rwsem);
cedb70af 1316 cpus = cpumask_weight(policy->cpus);
9c8f1ee4
VK
1317
1318 if (cpus > 1)
1319 cpumask_clear_cpu(cpu, policy->cpus);
ad7722da 1320 up_write(&policy->rwsem);
cedb70af 1321
b8eed8af
VK
1322 /* If cpu is last user of policy, free policy */
1323 if (cpus == 1) {
9c0ebcf7 1324 if (has_target()) {
3de9bdeb
VK
1325 ret = __cpufreq_governor(policy,
1326 CPUFREQ_GOV_POLICY_EXIT);
1327 if (ret) {
1328 pr_err("%s: Failed to exit governor\n",
1329 __func__);
1330 return ret;
1331 }
edab2fbc 1332 }
2a998599 1333
42f921a6
VK
1334 if (!frozen)
1335 cpufreq_policy_put_kobj(policy);
7d26e2d5 1336
8414809c
SB
1337 /*
1338 * Perform the ->exit() even during light-weight tear-down,
1339 * since this is a core component, and is essential for the
1340 * subsequent light-weight ->init() to succeed.
b8eed8af 1341 */
1c3d85dd 1342 if (cpufreq_driver->exit)
3a3e9e06 1343 cpufreq_driver->exit(policy);
27ecddc2 1344
9515f4d6
VK
1345 /* Remove policy from list of active policies */
1346 write_lock_irqsave(&cpufreq_driver_lock, flags);
1347 list_del(&policy->policy_list);
1348 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1349
8414809c 1350 if (!frozen)
3a3e9e06 1351 cpufreq_policy_free(policy);
2a998599 1352 } else {
9c0ebcf7 1353 if (has_target()) {
3de9bdeb
VK
1354 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1355 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1356 pr_err("%s: Failed to start governor\n",
1357 __func__);
1358 return ret;
1359 }
2a998599 1360 }
27ecddc2 1361 }
1da177e4 1362
474deff7 1363 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1364 return 0;
1365}
1366
cedb70af 1367/**
27a862e9 1368 * cpufreq_remove_dev - remove a CPU device
cedb70af
SB
1369 *
1370 * Removes the cpufreq interface for a CPU device.
cedb70af 1371 */
8a25a2fd 1372static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1373{
8a25a2fd 1374 unsigned int cpu = dev->id;
27a862e9 1375 int ret;
ec28297a
VP
1376
1377 if (cpu_is_offline(cpu))
1378 return 0;
1379
27a862e9
VK
1380 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1381
1382 if (!ret)
1383 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1384
1385 return ret;
5a01f2e8
VP
1386}
1387
65f27f38 1388static void handle_update(struct work_struct *work)
1da177e4 1389{
65f27f38
DH
1390 struct cpufreq_policy *policy =
1391 container_of(work, struct cpufreq_policy, update);
1392 unsigned int cpu = policy->cpu;
2d06d8c4 1393 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1394 cpufreq_update_policy(cpu);
1395}
1396
1397/**
bb176f7d
VK
1398 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1399 * in deep trouble.
1da177e4
LT
1400 * @cpu: cpu number
1401 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1402 * @new_freq: CPU frequency the CPU actually runs at
1403 *
29464f28
DJ
1404 * We adjust to current frequency first, and need to clean up later.
1405 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1406 */
e08f5f5b
GS
1407static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1408 unsigned int new_freq)
1da177e4 1409{
b43a7ffb 1410 struct cpufreq_policy *policy;
1da177e4 1411 struct cpufreq_freqs freqs;
b43a7ffb
VK
1412 unsigned long flags;
1413
2d06d8c4 1414 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1415 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1416
1da177e4
LT
1417 freqs.old = old_freq;
1418 freqs.new = new_freq;
b43a7ffb
VK
1419
1420 read_lock_irqsave(&cpufreq_driver_lock, flags);
1421 policy = per_cpu(cpufreq_cpu_data, cpu);
1422 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1423
1424 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1425 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1426}
1427
32ee8c3e 1428/**
4ab70df4 1429 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1430 * @cpu: CPU number
1431 *
1432 * This is the last known freq, without actually getting it from the driver.
1433 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1434 */
1435unsigned int cpufreq_quick_get(unsigned int cpu)
1436{
9e21ba8b 1437 struct cpufreq_policy *policy;
e08f5f5b 1438 unsigned int ret_freq = 0;
95235ca2 1439
1c3d85dd
RW
1440 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1441 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1442
1443 policy = cpufreq_cpu_get(cpu);
95235ca2 1444 if (policy) {
e08f5f5b 1445 ret_freq = policy->cur;
95235ca2
VP
1446 cpufreq_cpu_put(policy);
1447 }
1448
4d34a67d 1449 return ret_freq;
95235ca2
VP
1450}
1451EXPORT_SYMBOL(cpufreq_quick_get);
1452
3d737108
JB
1453/**
1454 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1455 * @cpu: CPU number
1456 *
1457 * Just return the max possible frequency for a given CPU.
1458 */
1459unsigned int cpufreq_quick_get_max(unsigned int cpu)
1460{
1461 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1462 unsigned int ret_freq = 0;
1463
1464 if (policy) {
1465 ret_freq = policy->max;
1466 cpufreq_cpu_put(policy);
1467 }
1468
1469 return ret_freq;
1470}
1471EXPORT_SYMBOL(cpufreq_quick_get_max);
1472
5a01f2e8 1473static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1474{
7a6aedfa 1475 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1476 unsigned int ret_freq = 0;
5800043b 1477
1c3d85dd 1478 if (!cpufreq_driver->get)
4d34a67d 1479 return ret_freq;
1da177e4 1480
1c3d85dd 1481 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1482
e08f5f5b 1483 if (ret_freq && policy->cur &&
1c3d85dd 1484 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1485 /* verify no discrepancy between actual and
1486 saved value exists */
1487 if (unlikely(ret_freq != policy->cur)) {
1488 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1489 schedule_work(&policy->update);
1490 }
1491 }
1492
4d34a67d 1493 return ret_freq;
5a01f2e8 1494}
1da177e4 1495
5a01f2e8
VP
1496/**
1497 * cpufreq_get - get the current CPU frequency (in kHz)
1498 * @cpu: CPU number
1499 *
1500 * Get the CPU current (static) CPU frequency
1501 */
1502unsigned int cpufreq_get(unsigned int cpu)
1503{
ad7722da 1504 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
5a01f2e8 1505 unsigned int ret_freq = 0;
5a01f2e8 1506
26ca8694
VK
1507 if (cpufreq_disabled() || !cpufreq_driver)
1508 return -ENOENT;
1509
ad7722da 1510 BUG_ON(!policy);
1511
6eed9404
VK
1512 if (!down_read_trylock(&cpufreq_rwsem))
1513 return 0;
5a01f2e8 1514
ad7722da 1515 down_read(&policy->rwsem);
5a01f2e8
VP
1516
1517 ret_freq = __cpufreq_get(cpu);
1518
ad7722da 1519 up_read(&policy->rwsem);
6eed9404
VK
1520 up_read(&cpufreq_rwsem);
1521
4d34a67d 1522 return ret_freq;
1da177e4
LT
1523}
1524EXPORT_SYMBOL(cpufreq_get);
1525
8a25a2fd
KS
1526static struct subsys_interface cpufreq_interface = {
1527 .name = "cpufreq",
1528 .subsys = &cpu_subsys,
1529 .add_dev = cpufreq_add_dev,
1530 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1531};
1532
42d4dc3f 1533/**
e00e56df
RW
1534 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1535 *
1536 * This function is only executed for the boot processor. The other CPUs
1537 * have been put offline by means of CPU hotplug.
42d4dc3f 1538 */
e00e56df 1539static int cpufreq_bp_suspend(void)
42d4dc3f 1540{
e08f5f5b 1541 int ret = 0;
4bc5d341 1542
e00e56df 1543 int cpu = smp_processor_id();
3a3e9e06 1544 struct cpufreq_policy *policy;
42d4dc3f 1545
2d06d8c4 1546 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1547
e00e56df 1548 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1549 policy = cpufreq_cpu_get(cpu);
1550 if (!policy)
e00e56df 1551 return 0;
42d4dc3f 1552
1c3d85dd 1553 if (cpufreq_driver->suspend) {
3a3e9e06 1554 ret = cpufreq_driver->suspend(policy);
ce6c3997 1555 if (ret)
42d4dc3f 1556 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
3a3e9e06 1557 "step on CPU %u\n", policy->cpu);
42d4dc3f
BH
1558 }
1559
3a3e9e06 1560 cpufreq_cpu_put(policy);
c9060494 1561 return ret;
42d4dc3f
BH
1562}
1563
1da177e4 1564/**
e00e56df 1565 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1566 *
1567 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1568 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1569 * restored. It will verify that the current freq is in sync with
1570 * what we believe it to be. This is a bit later than when it
1571 * should be, but nonethteless it's better than calling
1572 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1573 *
1574 * This function is only executed for the boot CPU. The other CPUs have not
1575 * been turned on yet.
1da177e4 1576 */
e00e56df 1577static void cpufreq_bp_resume(void)
1da177e4 1578{
e08f5f5b 1579 int ret = 0;
4bc5d341 1580
e00e56df 1581 int cpu = smp_processor_id();
3a3e9e06 1582 struct cpufreq_policy *policy;
1da177e4 1583
2d06d8c4 1584 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1585
e00e56df 1586 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1587 policy = cpufreq_cpu_get(cpu);
1588 if (!policy)
e00e56df 1589 return;
1da177e4 1590
1c3d85dd 1591 if (cpufreq_driver->resume) {
3a3e9e06 1592 ret = cpufreq_driver->resume(policy);
1da177e4
LT
1593 if (ret) {
1594 printk(KERN_ERR "cpufreq: resume failed in ->resume "
3a3e9e06 1595 "step on CPU %u\n", policy->cpu);
c9060494 1596 goto fail;
1da177e4
LT
1597 }
1598 }
1599
3a3e9e06 1600 schedule_work(&policy->update);
ce6c3997 1601
c9060494 1602fail:
3a3e9e06 1603 cpufreq_cpu_put(policy);
1da177e4
LT
1604}
1605
e00e56df
RW
1606static struct syscore_ops cpufreq_syscore_ops = {
1607 .suspend = cpufreq_bp_suspend,
1608 .resume = cpufreq_bp_resume,
1da177e4
LT
1609};
1610
9d95046e
BP
1611/**
1612 * cpufreq_get_current_driver - return current driver's name
1613 *
1614 * Return the name string of the currently loaded cpufreq driver
1615 * or NULL, if none.
1616 */
1617const char *cpufreq_get_current_driver(void)
1618{
1c3d85dd
RW
1619 if (cpufreq_driver)
1620 return cpufreq_driver->name;
1621
1622 return NULL;
9d95046e
BP
1623}
1624EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1625
1626/*********************************************************************
1627 * NOTIFIER LISTS INTERFACE *
1628 *********************************************************************/
1629
1630/**
1631 * cpufreq_register_notifier - register a driver with cpufreq
1632 * @nb: notifier function to register
1633 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1634 *
32ee8c3e 1635 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1636 * are notified about clock rate changes (once before and once after
1637 * the transition), or a list of drivers that are notified about
1638 * changes in cpufreq policy.
1639 *
1640 * This function may sleep, and has the same return conditions as
e041c683 1641 * blocking_notifier_chain_register.
1da177e4
LT
1642 */
1643int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1644{
1645 int ret;
1646
d5aaffa9
DB
1647 if (cpufreq_disabled())
1648 return -EINVAL;
1649
74212ca4
CEB
1650 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1651
1da177e4
LT
1652 switch (list) {
1653 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1654 ret = srcu_notifier_chain_register(
e041c683 1655 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1656 break;
1657 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1658 ret = blocking_notifier_chain_register(
1659 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1660 break;
1661 default:
1662 ret = -EINVAL;
1663 }
1da177e4
LT
1664
1665 return ret;
1666}
1667EXPORT_SYMBOL(cpufreq_register_notifier);
1668
1da177e4
LT
1669/**
1670 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1671 * @nb: notifier block to be unregistered
bb176f7d 1672 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1673 *
1674 * Remove a driver from the CPU frequency notifier list.
1675 *
1676 * This function may sleep, and has the same return conditions as
e041c683 1677 * blocking_notifier_chain_unregister.
1da177e4
LT
1678 */
1679int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1680{
1681 int ret;
1682
d5aaffa9
DB
1683 if (cpufreq_disabled())
1684 return -EINVAL;
1685
1da177e4
LT
1686 switch (list) {
1687 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1688 ret = srcu_notifier_chain_unregister(
e041c683 1689 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1690 break;
1691 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1692 ret = blocking_notifier_chain_unregister(
1693 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1694 break;
1695 default:
1696 ret = -EINVAL;
1697 }
1da177e4
LT
1698
1699 return ret;
1700}
1701EXPORT_SYMBOL(cpufreq_unregister_notifier);
1702
1703
1704/*********************************************************************
1705 * GOVERNORS *
1706 *********************************************************************/
1707
1da177e4
LT
1708int __cpufreq_driver_target(struct cpufreq_policy *policy,
1709 unsigned int target_freq,
1710 unsigned int relation)
1711{
1712 int retval = -EINVAL;
7249924e 1713 unsigned int old_target_freq = target_freq;
c32b6b8e 1714
a7b422cd
KRW
1715 if (cpufreq_disabled())
1716 return -ENODEV;
1717
7249924e
VK
1718 /* Make sure that target_freq is within supported range */
1719 if (target_freq > policy->max)
1720 target_freq = policy->max;
1721 if (target_freq < policy->min)
1722 target_freq = policy->min;
1723
1724 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1725 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228 1726
9c0ebcf7
VK
1727 /*
1728 * This might look like a redundant call as we are checking it again
1729 * after finding index. But it is left intentionally for cases where
1730 * exactly same freq is called again and so we can save on few function
1731 * calls.
1732 */
5a1c0228
VK
1733 if (target_freq == policy->cur)
1734 return 0;
1735
1c3d85dd
RW
1736 if (cpufreq_driver->target)
1737 retval = cpufreq_driver->target(policy, target_freq, relation);
9c0ebcf7
VK
1738 else if (cpufreq_driver->target_index) {
1739 struct cpufreq_frequency_table *freq_table;
d4019f0a
VK
1740 struct cpufreq_freqs freqs;
1741 bool notify;
9c0ebcf7 1742 int index;
90d45d17 1743
9c0ebcf7
VK
1744 freq_table = cpufreq_frequency_get_table(policy->cpu);
1745 if (unlikely(!freq_table)) {
1746 pr_err("%s: Unable to find freq_table\n", __func__);
1747 goto out;
1748 }
1749
1750 retval = cpufreq_frequency_table_target(policy, freq_table,
1751 target_freq, relation, &index);
1752 if (unlikely(retval)) {
1753 pr_err("%s: Unable to find matching freq\n", __func__);
1754 goto out;
1755 }
1756
d4019f0a 1757 if (freq_table[index].frequency == policy->cur) {
9c0ebcf7 1758 retval = 0;
d4019f0a
VK
1759 goto out;
1760 }
1761
1762 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1763
1764 if (notify) {
1765 freqs.old = policy->cur;
1766 freqs.new = freq_table[index].frequency;
1767 freqs.flags = 0;
1768
1769 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1770 __func__, policy->cpu, freqs.old,
1771 freqs.new);
1772
1773 cpufreq_notify_transition(policy, &freqs,
1774 CPUFREQ_PRECHANGE);
1775 }
1776
1777 retval = cpufreq_driver->target_index(policy, index);
1778 if (retval)
1779 pr_err("%s: Failed to change cpu frequency: %d\n",
1780 __func__, retval);
1781
ab1b1c4e
VK
1782 if (notify)
1783 cpufreq_notify_post_transition(policy, &freqs, retval);
9c0ebcf7
VK
1784 }
1785
1786out:
1da177e4
LT
1787 return retval;
1788}
1789EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1790
1da177e4
LT
1791int cpufreq_driver_target(struct cpufreq_policy *policy,
1792 unsigned int target_freq,
1793 unsigned int relation)
1794{
f1829e4a 1795 int ret = -EINVAL;
1da177e4 1796
ad7722da 1797 down_write(&policy->rwsem);
1da177e4
LT
1798
1799 ret = __cpufreq_driver_target(policy, target_freq, relation);
1800
ad7722da 1801 up_write(&policy->rwsem);
1da177e4 1802
1da177e4
LT
1803 return ret;
1804}
1805EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1806
153d7f3f 1807/*
153d7f3f
AV
1808 * when "event" is CPUFREQ_GOV_LIMITS
1809 */
1da177e4 1810
e08f5f5b
GS
1811static int __cpufreq_governor(struct cpufreq_policy *policy,
1812 unsigned int event)
1da177e4 1813{
cc993cab 1814 int ret;
6afde10c
TR
1815
1816 /* Only must be defined when default governor is known to have latency
1817 restrictions, like e.g. conservative or ondemand.
1818 That this is the case is already ensured in Kconfig
1819 */
1820#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1821 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1822#else
1823 struct cpufreq_governor *gov = NULL;
1824#endif
1c256245
TR
1825
1826 if (policy->governor->max_transition_latency &&
1827 policy->cpuinfo.transition_latency >
1828 policy->governor->max_transition_latency) {
6afde10c
TR
1829 if (!gov)
1830 return -EINVAL;
1831 else {
1832 printk(KERN_WARNING "%s governor failed, too long"
1833 " transition latency of HW, fallback"
1834 " to %s governor\n",
1835 policy->governor->name,
1836 gov->name);
1837 policy->governor = gov;
1838 }
1c256245 1839 }
1da177e4 1840
fe492f3f
VK
1841 if (event == CPUFREQ_GOV_POLICY_INIT)
1842 if (!try_module_get(policy->governor->owner))
1843 return -EINVAL;
1da177e4 1844
2d06d8c4 1845 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1846 policy->cpu, event);
95731ebb
XC
1847
1848 mutex_lock(&cpufreq_governor_lock);
56d07db2 1849 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
f73d3933
VK
1850 || (!policy->governor_enabled
1851 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
95731ebb
XC
1852 mutex_unlock(&cpufreq_governor_lock);
1853 return -EBUSY;
1854 }
1855
1856 if (event == CPUFREQ_GOV_STOP)
1857 policy->governor_enabled = false;
1858 else if (event == CPUFREQ_GOV_START)
1859 policy->governor_enabled = true;
1860
1861 mutex_unlock(&cpufreq_governor_lock);
1862
1da177e4
LT
1863 ret = policy->governor->governor(policy, event);
1864
4d5dcc42
VK
1865 if (!ret) {
1866 if (event == CPUFREQ_GOV_POLICY_INIT)
1867 policy->governor->initialized++;
1868 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1869 policy->governor->initialized--;
95731ebb
XC
1870 } else {
1871 /* Restore original values */
1872 mutex_lock(&cpufreq_governor_lock);
1873 if (event == CPUFREQ_GOV_STOP)
1874 policy->governor_enabled = true;
1875 else if (event == CPUFREQ_GOV_START)
1876 policy->governor_enabled = false;
1877 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1878 }
b394058f 1879
fe492f3f
VK
1880 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1881 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
1882 module_put(policy->governor->owner);
1883
1884 return ret;
1885}
1886
1da177e4
LT
1887int cpufreq_register_governor(struct cpufreq_governor *governor)
1888{
3bcb09a3 1889 int err;
1da177e4
LT
1890
1891 if (!governor)
1892 return -EINVAL;
1893
a7b422cd
KRW
1894 if (cpufreq_disabled())
1895 return -ENODEV;
1896
3fc54d37 1897 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1898
b394058f 1899 governor->initialized = 0;
3bcb09a3
JF
1900 err = -EBUSY;
1901 if (__find_governor(governor->name) == NULL) {
1902 err = 0;
1903 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1904 }
1da177e4 1905
32ee8c3e 1906 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1907 return err;
1da177e4
LT
1908}
1909EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1910
1da177e4
LT
1911void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1912{
90e41bac
PB
1913#ifdef CONFIG_HOTPLUG_CPU
1914 int cpu;
1915#endif
1916
1da177e4
LT
1917 if (!governor)
1918 return;
1919
a7b422cd
KRW
1920 if (cpufreq_disabled())
1921 return;
1922
90e41bac
PB
1923#ifdef CONFIG_HOTPLUG_CPU
1924 for_each_present_cpu(cpu) {
1925 if (cpu_online(cpu))
1926 continue;
1927 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1928 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1929 }
1930#endif
1931
3fc54d37 1932 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1933 list_del(&governor->governor_list);
3fc54d37 1934 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1935 return;
1936}
1937EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1938
1939
1da177e4
LT
1940/*********************************************************************
1941 * POLICY INTERFACE *
1942 *********************************************************************/
1943
1944/**
1945 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1946 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1947 * is written
1da177e4
LT
1948 *
1949 * Reads the current cpufreq policy.
1950 */
1951int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1952{
1953 struct cpufreq_policy *cpu_policy;
1954 if (!policy)
1955 return -EINVAL;
1956
1957 cpu_policy = cpufreq_cpu_get(cpu);
1958 if (!cpu_policy)
1959 return -EINVAL;
1960
d5b73cd8 1961 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
1962
1963 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1964 return 0;
1965}
1966EXPORT_SYMBOL(cpufreq_get_policy);
1967
153d7f3f 1968/*
037ce839
VK
1969 * policy : current policy.
1970 * new_policy: policy to be set.
153d7f3f 1971 */
037ce839 1972static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 1973 struct cpufreq_policy *new_policy)
1da177e4 1974{
7bd353a9 1975 int ret = 0, failed = 1;
1da177e4 1976
3a3e9e06
VK
1977 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1978 new_policy->min, new_policy->max);
1da177e4 1979
d5b73cd8 1980 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 1981
3a3e9e06 1982 if (new_policy->min > policy->max || new_policy->max < policy->min) {
9c9a43ed
MD
1983 ret = -EINVAL;
1984 goto error_out;
1985 }
1986
1da177e4 1987 /* verify the cpu speed can be set within this limit */
3a3e9e06 1988 ret = cpufreq_driver->verify(new_policy);
1da177e4
LT
1989 if (ret)
1990 goto error_out;
1991
1da177e4 1992 /* adjust if necessary - all reasons */
e041c683 1993 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1994 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
1995
1996 /* adjust if necessary - hardware incompatibility*/
e041c683 1997 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1998 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 1999
bb176f7d
VK
2000 /*
2001 * verify the cpu speed can be set within this limit, which might be
2002 * different to the first one
2003 */
3a3e9e06 2004 ret = cpufreq_driver->verify(new_policy);
e041c683 2005 if (ret)
1da177e4 2006 goto error_out;
1da177e4
LT
2007
2008 /* notification of the new policy */
e041c683 2009 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2010 CPUFREQ_NOTIFY, new_policy);
1da177e4 2011
3a3e9e06
VK
2012 policy->min = new_policy->min;
2013 policy->max = new_policy->max;
1da177e4 2014
2d06d8c4 2015 pr_debug("new min and max freqs are %u - %u kHz\n",
3a3e9e06 2016 policy->min, policy->max);
1da177e4 2017
1c3d85dd 2018 if (cpufreq_driver->setpolicy) {
3a3e9e06 2019 policy->policy = new_policy->policy;
2d06d8c4 2020 pr_debug("setting range\n");
3a3e9e06 2021 ret = cpufreq_driver->setpolicy(new_policy);
1da177e4 2022 } else {
3a3e9e06 2023 if (new_policy->governor != policy->governor) {
1da177e4 2024 /* save old, working values */
3a3e9e06 2025 struct cpufreq_governor *old_gov = policy->governor;
1da177e4 2026
2d06d8c4 2027 pr_debug("governor switch\n");
1da177e4
LT
2028
2029 /* end old governor */
3a3e9e06
VK
2030 if (policy->governor) {
2031 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
ad7722da 2032 up_write(&policy->rwsem);
3a3e9e06 2033 __cpufreq_governor(policy,
7bd353a9 2034 CPUFREQ_GOV_POLICY_EXIT);
ad7722da 2035 down_write(&policy->rwsem);
7bd353a9 2036 }
1da177e4
LT
2037
2038 /* start new governor */
3a3e9e06
VK
2039 policy->governor = new_policy->governor;
2040 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2041 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
7bd353a9 2042 failed = 0;
955ef483 2043 } else {
ad7722da 2044 up_write(&policy->rwsem);
3a3e9e06 2045 __cpufreq_governor(policy,
7bd353a9 2046 CPUFREQ_GOV_POLICY_EXIT);
ad7722da 2047 down_write(&policy->rwsem);
955ef483 2048 }
7bd353a9
VK
2049 }
2050
2051 if (failed) {
1da177e4 2052 /* new governor failed, so re-start old one */
2d06d8c4 2053 pr_debug("starting governor %s failed\n",
3a3e9e06 2054 policy->governor->name);
1da177e4 2055 if (old_gov) {
3a3e9e06
VK
2056 policy->governor = old_gov;
2057 __cpufreq_governor(policy,
7bd353a9 2058 CPUFREQ_GOV_POLICY_INIT);
3a3e9e06 2059 __cpufreq_governor(policy,
e08f5f5b 2060 CPUFREQ_GOV_START);
1da177e4
LT
2061 }
2062 ret = -EINVAL;
2063 goto error_out;
2064 }
2065 /* might be a policy change, too, so fall through */
2066 }
2d06d8c4 2067 pr_debug("governor: change or update limits\n");
3de9bdeb 2068 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
2069 }
2070
7d5e350f 2071error_out:
1da177e4
LT
2072 return ret;
2073}
2074
1da177e4
LT
2075/**
2076 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2077 * @cpu: CPU which shall be re-evaluated
2078 *
25985edc 2079 * Useful for policy notifiers which have different necessities
1da177e4
LT
2080 * at different times.
2081 */
2082int cpufreq_update_policy(unsigned int cpu)
2083{
3a3e9e06
VK
2084 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2085 struct cpufreq_policy new_policy;
f1829e4a 2086 int ret;
1da177e4 2087
3a3e9e06 2088 if (!policy) {
f1829e4a
JL
2089 ret = -ENODEV;
2090 goto no_policy;
2091 }
1da177e4 2092
ad7722da 2093 down_write(&policy->rwsem);
1da177e4 2094
2d06d8c4 2095 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 2096 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
2097 new_policy.min = policy->user_policy.min;
2098 new_policy.max = policy->user_policy.max;
2099 new_policy.policy = policy->user_policy.policy;
2100 new_policy.governor = policy->user_policy.governor;
1da177e4 2101
bb176f7d
VK
2102 /*
2103 * BIOS might change freq behind our back
2104 * -> ask driver for current freq and notify governors about a change
2105 */
1c3d85dd 2106 if (cpufreq_driver->get) {
3a3e9e06
VK
2107 new_policy.cur = cpufreq_driver->get(cpu);
2108 if (!policy->cur) {
2d06d8c4 2109 pr_debug("Driver did not initialize current freq");
3a3e9e06 2110 policy->cur = new_policy.cur;
a85f7bd3 2111 } else {
9c0ebcf7 2112 if (policy->cur != new_policy.cur && has_target())
3a3e9e06
VK
2113 cpufreq_out_of_sync(cpu, policy->cur,
2114 new_policy.cur);
a85f7bd3 2115 }
0961dd0d
TR
2116 }
2117
037ce839 2118 ret = cpufreq_set_policy(policy, &new_policy);
1da177e4 2119
ad7722da 2120 up_write(&policy->rwsem);
5a01f2e8 2121
3a3e9e06 2122 cpufreq_cpu_put(policy);
f1829e4a 2123no_policy:
1da177e4
LT
2124 return ret;
2125}
2126EXPORT_SYMBOL(cpufreq_update_policy);
2127
2760984f 2128static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2129 unsigned long action, void *hcpu)
2130{
2131 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2132 struct device *dev;
5302c3fb 2133 bool frozen = false;
c32b6b8e 2134
8a25a2fd
KS
2135 dev = get_cpu_device(cpu);
2136 if (dev) {
5302c3fb 2137
d4faadd5
RW
2138 if (action & CPU_TASKS_FROZEN)
2139 frozen = true;
2140
5302c3fb 2141 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2142 case CPU_ONLINE:
5302c3fb 2143 __cpufreq_add_dev(dev, NULL, frozen);
23d32899 2144 cpufreq_update_policy(cpu);
c32b6b8e 2145 break;
5302c3fb 2146
c32b6b8e 2147 case CPU_DOWN_PREPARE:
cedb70af 2148 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
1aee40ac
SB
2149 break;
2150
2151 case CPU_POST_DEAD:
cedb70af 2152 __cpufreq_remove_dev_finish(dev, NULL, frozen);
c32b6b8e 2153 break;
5302c3fb 2154
5a01f2e8 2155 case CPU_DOWN_FAILED:
5302c3fb 2156 __cpufreq_add_dev(dev, NULL, frozen);
c32b6b8e
AR
2157 break;
2158 }
2159 }
2160 return NOTIFY_OK;
2161}
2162
9c36f746 2163static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2164 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2165};
1da177e4
LT
2166
2167/*********************************************************************
2168 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2169 *********************************************************************/
2170
2171/**
2172 * cpufreq_register_driver - register a CPU Frequency driver
2173 * @driver_data: A struct cpufreq_driver containing the values#
2174 * submitted by the CPU Frequency driver.
2175 *
bb176f7d 2176 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2177 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2178 * (and isn't unregistered in the meantime).
1da177e4
LT
2179 *
2180 */
221dee28 2181int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2182{
2183 unsigned long flags;
2184 int ret;
2185
a7b422cd
KRW
2186 if (cpufreq_disabled())
2187 return -ENODEV;
2188
1da177e4 2189 if (!driver_data || !driver_data->verify || !driver_data->init ||
9c0ebcf7
VK
2190 !(driver_data->setpolicy || driver_data->target_index ||
2191 driver_data->target))
1da177e4
LT
2192 return -EINVAL;
2193
2d06d8c4 2194 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2195
2196 if (driver_data->setpolicy)
2197 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2198
0d1857a1 2199 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2200 if (cpufreq_driver) {
0d1857a1 2201 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4dea5806 2202 return -EEXIST;
1da177e4 2203 }
1c3d85dd 2204 cpufreq_driver = driver_data;
0d1857a1 2205 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2206
8a25a2fd 2207 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
2208 if (ret)
2209 goto err_null_driver;
1da177e4 2210
1c3d85dd 2211 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2212 int i;
2213 ret = -ENODEV;
2214
2215 /* check for at least one working CPU */
7a6aedfa
MT
2216 for (i = 0; i < nr_cpu_ids; i++)
2217 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2218 ret = 0;
7a6aedfa
MT
2219 break;
2220 }
1da177e4
LT
2221
2222 /* if all ->init() calls failed, unregister */
2223 if (ret) {
2d06d8c4 2224 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2225 driver_data->name);
8a25a2fd 2226 goto err_if_unreg;
1da177e4
LT
2227 }
2228 }
2229
8f5bc2ab 2230 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2231 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2232
8f5bc2ab 2233 return 0;
8a25a2fd
KS
2234err_if_unreg:
2235 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2236err_null_driver:
0d1857a1 2237 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2238 cpufreq_driver = NULL;
0d1857a1 2239 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2240 return ret;
1da177e4
LT
2241}
2242EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2243
1da177e4
LT
2244/**
2245 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2246 *
bb176f7d 2247 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2248 * the right to do so, i.e. if you have succeeded in initialising before!
2249 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2250 * currently not initialised.
2251 */
221dee28 2252int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2253{
2254 unsigned long flags;
2255
1c3d85dd 2256 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2257 return -EINVAL;
1da177e4 2258
2d06d8c4 2259 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2260
8a25a2fd 2261 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2262 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2263
6eed9404 2264 down_write(&cpufreq_rwsem);
0d1857a1 2265 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2266
1c3d85dd 2267 cpufreq_driver = NULL;
6eed9404 2268
0d1857a1 2269 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2270 up_write(&cpufreq_rwsem);
1da177e4
LT
2271
2272 return 0;
2273}
2274EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2275
2276static int __init cpufreq_core_init(void)
2277{
a7b422cd
KRW
2278 if (cpufreq_disabled())
2279 return -ENODEV;
2280
2361be23 2281 cpufreq_global_kobject = kobject_create();
8aa84ad8 2282 BUG_ON(!cpufreq_global_kobject);
e00e56df 2283 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2284
5a01f2e8
VP
2285 return 0;
2286}
5a01f2e8 2287core_initcall(cpufreq_core_init);
This page took 0.856167 seconds and 5 git commands to generate.