cpufreq: Drop cpufreq_disabled() check from cpufreq_cpu_{get|put}()
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
2f0aea93 29#include <linux/suspend.h>
90de2a4a 30#include <linux/syscore_ops.h>
5ff0a268 31#include <linux/tick.h>
6f4f2723
TR
32#include <trace/events/power.h>
33
1da177e4 34/**
cd878479 35 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
36 * level driver of CPUFreq support, and its spinlock. This lock
37 * also protects the cpufreq_cpu_data array.
38 */
1c3d85dd 39static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 41static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d 42static DEFINE_RWLOCK(cpufreq_driver_lock);
6f1e4efd 43DEFINE_MUTEX(cpufreq_governor_lock);
c88a1f8b 44static LIST_HEAD(cpufreq_policy_list);
bb176f7d 45
084f3493 46/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 47static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
1da177e4 48
2f0aea93
VK
49/* Flag to suspend/resume CPUFreq governors */
50static bool cpufreq_suspended;
1da177e4 51
9c0ebcf7
VK
52static inline bool has_target(void)
53{
54 return cpufreq_driver->target_index || cpufreq_driver->target;
55}
56
6eed9404
VK
57/*
58 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
59 * sections
60 */
61static DECLARE_RWSEM(cpufreq_rwsem);
62
1da177e4 63/* internal prototypes */
29464f28
DJ
64static int __cpufreq_governor(struct cpufreq_policy *policy,
65 unsigned int event);
d92d50a4 66static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
65f27f38 67static void handle_update(struct work_struct *work);
1da177e4
LT
68
69/**
32ee8c3e
DJ
70 * Two notifier lists: the "policy" list is involved in the
71 * validation process for a new CPU frequency policy; the
1da177e4
LT
72 * "transition" list for kernel code that needs to handle
73 * changes to devices when the CPU clock speed changes.
74 * The mutex locks both lists.
75 */
e041c683 76static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 77static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 78
74212ca4 79static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
80static int __init init_cpufreq_transition_notifier_list(void)
81{
82 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 83 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
84 return 0;
85}
b3438f82 86pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 87
a7b422cd 88static int off __read_mostly;
da584455 89static int cpufreq_disabled(void)
a7b422cd
KRW
90{
91 return off;
92}
93void disable_cpufreq(void)
94{
95 off = 1;
96}
1da177e4 97static LIST_HEAD(cpufreq_governor_list);
29464f28 98static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 99
4d5dcc42
VK
100bool have_governor_per_policy(void)
101{
0b981e70 102 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
4d5dcc42 103}
3f869d6d 104EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 105
944e9a03
VK
106struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
107{
108 if (have_governor_per_policy())
109 return &policy->kobj;
110 else
111 return cpufreq_global_kobject;
112}
113EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
114
72a4ce34
VK
115static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
116{
117 u64 idle_time;
118 u64 cur_wall_time;
119 u64 busy_time;
120
121 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
122
123 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
128 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
129
130 idle_time = cur_wall_time - busy_time;
131 if (wall)
132 *wall = cputime_to_usecs(cur_wall_time);
133
134 return cputime_to_usecs(idle_time);
135}
136
137u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
138{
139 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
140
141 if (idle_time == -1ULL)
142 return get_cpu_idle_time_jiffy(cpu, wall);
143 else if (!io_busy)
144 idle_time += get_cpu_iowait_time_us(cpu, wall);
145
146 return idle_time;
147}
148EXPORT_SYMBOL_GPL(get_cpu_idle_time);
149
70e9e778
VK
150/*
151 * This is a generic cpufreq init() routine which can be used by cpufreq
152 * drivers of SMP systems. It will do following:
153 * - validate & show freq table passed
154 * - set policies transition latency
155 * - policy->cpus with all possible CPUs
156 */
157int cpufreq_generic_init(struct cpufreq_policy *policy,
158 struct cpufreq_frequency_table *table,
159 unsigned int transition_latency)
160{
161 int ret;
162
163 ret = cpufreq_table_validate_and_show(policy, table);
164 if (ret) {
165 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
166 return ret;
167 }
168
169 policy->cpuinfo.transition_latency = transition_latency;
170
171 /*
172 * The driver only supports the SMP configuartion where all processors
173 * share the clock and voltage and clock.
174 */
175 cpumask_setall(policy->cpus);
176
177 return 0;
178}
179EXPORT_SYMBOL_GPL(cpufreq_generic_init);
180
652ed95d
VK
181unsigned int cpufreq_generic_get(unsigned int cpu)
182{
183 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
184
185 if (!policy || IS_ERR(policy->clk)) {
e837f9b5
JP
186 pr_err("%s: No %s associated to cpu: %d\n",
187 __func__, policy ? "clk" : "policy", cpu);
652ed95d
VK
188 return 0;
189 }
190
191 return clk_get_rate(policy->clk) / 1000;
192}
193EXPORT_SYMBOL_GPL(cpufreq_generic_get);
194
e0b3165b
VK
195/* Only for cpufreq core internal use */
196struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
197{
198 return per_cpu(cpufreq_cpu_data, cpu);
199}
200
6eed9404 201struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 202{
6eed9404 203 struct cpufreq_policy *policy = NULL;
1da177e4
LT
204 unsigned long flags;
205
1e63eaf0 206 if (cpu >= nr_cpu_ids)
6eed9404
VK
207 return NULL;
208
209 if (!down_read_trylock(&cpufreq_rwsem))
210 return NULL;
1da177e4
LT
211
212 /* get the cpufreq driver */
1c3d85dd 213 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 214
6eed9404
VK
215 if (cpufreq_driver) {
216 /* get the CPU */
217 policy = per_cpu(cpufreq_cpu_data, cpu);
218 if (policy)
219 kobject_get(&policy->kobj);
220 }
1da177e4 221
6eed9404 222 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 223
3a3e9e06 224 if (!policy)
6eed9404 225 up_read(&cpufreq_rwsem);
1da177e4 226
3a3e9e06 227 return policy;
a9144436 228}
1da177e4
LT
229EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
230
3a3e9e06 231void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 232{
6eed9404
VK
233 kobject_put(&policy->kobj);
234 up_read(&cpufreq_rwsem);
1da177e4
LT
235}
236EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
237
1da177e4
LT
238/*********************************************************************
239 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
240 *********************************************************************/
241
242/**
243 * adjust_jiffies - adjust the system "loops_per_jiffy"
244 *
245 * This function alters the system "loops_per_jiffy" for the clock
246 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 247 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
248 * per-CPU loops_per_jiffy value wherever possible.
249 */
858119e1 250static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4 251{
39c132ee
VK
252#ifndef CONFIG_SMP
253 static unsigned long l_p_j_ref;
254 static unsigned int l_p_j_ref_freq;
255
1da177e4
LT
256 if (ci->flags & CPUFREQ_CONST_LOOPS)
257 return;
258
259 if (!l_p_j_ref_freq) {
260 l_p_j_ref = loops_per_jiffy;
261 l_p_j_ref_freq = ci->old;
e837f9b5
JP
262 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
263 l_p_j_ref, l_p_j_ref_freq);
1da177e4 264 }
0b443ead 265 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
e08f5f5b
GS
266 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
267 ci->new);
e837f9b5
JP
268 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
269 loops_per_jiffy, ci->new);
1da177e4 270 }
1da177e4 271#endif
39c132ee 272}
1da177e4 273
0956df9c 274static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 275 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
276{
277 BUG_ON(irqs_disabled());
278
d5aaffa9
DB
279 if (cpufreq_disabled())
280 return;
281
1c3d85dd 282 freqs->flags = cpufreq_driver->flags;
2d06d8c4 283 pr_debug("notification %u of frequency transition to %u kHz\n",
e837f9b5 284 state, freqs->new);
1da177e4 285
1da177e4 286 switch (state) {
e4472cb3 287
1da177e4 288 case CPUFREQ_PRECHANGE:
32ee8c3e 289 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
290 * which is not equal to what the cpufreq core thinks is
291 * "old frequency".
1da177e4 292 */
1c3d85dd 293 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
294 if ((policy) && (policy->cpu == freqs->cpu) &&
295 (policy->cur) && (policy->cur != freqs->old)) {
e837f9b5
JP
296 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
297 freqs->old, policy->cur);
e4472cb3 298 freqs->old = policy->cur;
1da177e4
LT
299 }
300 }
b4dfdbb3 301 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 302 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
303 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
304 break;
e4472cb3 305
1da177e4
LT
306 case CPUFREQ_POSTCHANGE:
307 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
e837f9b5
JP
308 pr_debug("FREQ: %lu - CPU: %lu\n",
309 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
25e41933 310 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 311 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 312 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
313 if (likely(policy) && likely(policy->cpu == freqs->cpu))
314 policy->cur = freqs->new;
1da177e4
LT
315 break;
316 }
1da177e4 317}
bb176f7d 318
b43a7ffb
VK
319/**
320 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
321 * on frequency transition.
322 *
323 * This function calls the transition notifiers and the "adjust_jiffies"
324 * function. It is called twice on all CPU frequency changes that have
325 * external effects.
326 */
236a9800 327static void cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb
VK
328 struct cpufreq_freqs *freqs, unsigned int state)
329{
330 for_each_cpu(freqs->cpu, policy->cpus)
331 __cpufreq_notify_transition(policy, freqs, state);
332}
1da177e4 333
f7ba3b41 334/* Do post notifications when there are chances that transition has failed */
236a9800 335static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
f7ba3b41
VK
336 struct cpufreq_freqs *freqs, int transition_failed)
337{
338 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
339 if (!transition_failed)
340 return;
341
342 swap(freqs->old, freqs->new);
343 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
344 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
345}
f7ba3b41 346
12478cf0
SB
347void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
348 struct cpufreq_freqs *freqs)
349{
ca654dc3
SB
350
351 /*
352 * Catch double invocations of _begin() which lead to self-deadlock.
353 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
354 * doesn't invoke _begin() on their behalf, and hence the chances of
355 * double invocations are very low. Moreover, there are scenarios
356 * where these checks can emit false-positive warnings in these
357 * drivers; so we avoid that by skipping them altogether.
358 */
359 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
360 && current == policy->transition_task);
361
12478cf0
SB
362wait:
363 wait_event(policy->transition_wait, !policy->transition_ongoing);
364
365 spin_lock(&policy->transition_lock);
366
367 if (unlikely(policy->transition_ongoing)) {
368 spin_unlock(&policy->transition_lock);
369 goto wait;
370 }
371
372 policy->transition_ongoing = true;
ca654dc3 373 policy->transition_task = current;
12478cf0
SB
374
375 spin_unlock(&policy->transition_lock);
376
377 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
378}
379EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
380
381void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
382 struct cpufreq_freqs *freqs, int transition_failed)
383{
384 if (unlikely(WARN_ON(!policy->transition_ongoing)))
385 return;
386
387 cpufreq_notify_post_transition(policy, freqs, transition_failed);
388
389 policy->transition_ongoing = false;
ca654dc3 390 policy->transition_task = NULL;
12478cf0
SB
391
392 wake_up(&policy->transition_wait);
393}
394EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
395
1da177e4 396
1da177e4
LT
397/*********************************************************************
398 * SYSFS INTERFACE *
399 *********************************************************************/
8a5c74a1 400static ssize_t show_boost(struct kobject *kobj,
6f19efc0
LM
401 struct attribute *attr, char *buf)
402{
403 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
404}
405
406static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
407 const char *buf, size_t count)
408{
409 int ret, enable;
410
411 ret = sscanf(buf, "%d", &enable);
412 if (ret != 1 || enable < 0 || enable > 1)
413 return -EINVAL;
414
415 if (cpufreq_boost_trigger_state(enable)) {
e837f9b5
JP
416 pr_err("%s: Cannot %s BOOST!\n",
417 __func__, enable ? "enable" : "disable");
6f19efc0
LM
418 return -EINVAL;
419 }
420
e837f9b5
JP
421 pr_debug("%s: cpufreq BOOST %s\n",
422 __func__, enable ? "enabled" : "disabled");
6f19efc0
LM
423
424 return count;
425}
426define_one_global_rw(boost);
1da177e4 427
42f91fa1 428static struct cpufreq_governor *find_governor(const char *str_governor)
3bcb09a3
JF
429{
430 struct cpufreq_governor *t;
431
432 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
7c4f4539 433 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
434 return t;
435
436 return NULL;
437}
438
1da177e4
LT
439/**
440 * cpufreq_parse_governor - parse a governor string
441 */
905d77cd 442static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
443 struct cpufreq_governor **governor)
444{
3bcb09a3 445 int err = -EINVAL;
1c3d85dd
RW
446
447 if (!cpufreq_driver)
3bcb09a3
JF
448 goto out;
449
1c3d85dd 450 if (cpufreq_driver->setpolicy) {
7c4f4539 451 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
1da177e4 452 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 453 err = 0;
7c4f4539 454 } else if (!strncasecmp(str_governor, "powersave",
e08f5f5b 455 CPUFREQ_NAME_LEN)) {
1da177e4 456 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 457 err = 0;
1da177e4 458 }
2e1cc3a5 459 } else {
1da177e4 460 struct cpufreq_governor *t;
3bcb09a3 461
3fc54d37 462 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3 463
42f91fa1 464 t = find_governor(str_governor);
3bcb09a3 465
ea714970 466 if (t == NULL) {
1a8e1463 467 int ret;
ea714970 468
1a8e1463
KC
469 mutex_unlock(&cpufreq_governor_mutex);
470 ret = request_module("cpufreq_%s", str_governor);
471 mutex_lock(&cpufreq_governor_mutex);
ea714970 472
1a8e1463 473 if (ret == 0)
42f91fa1 474 t = find_governor(str_governor);
ea714970
JF
475 }
476
3bcb09a3
JF
477 if (t != NULL) {
478 *governor = t;
479 err = 0;
1da177e4 480 }
3bcb09a3 481
3fc54d37 482 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 483 }
29464f28 484out:
3bcb09a3 485 return err;
1da177e4 486}
1da177e4 487
1da177e4 488/**
e08f5f5b
GS
489 * cpufreq_per_cpu_attr_read() / show_##file_name() -
490 * print out cpufreq information
1da177e4
LT
491 *
492 * Write out information from cpufreq_driver->policy[cpu]; object must be
493 * "unsigned int".
494 */
495
32ee8c3e
DJ
496#define show_one(file_name, object) \
497static ssize_t show_##file_name \
905d77cd 498(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 499{ \
29464f28 500 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
501}
502
503show_one(cpuinfo_min_freq, cpuinfo.min_freq);
504show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 505show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
506show_one(scaling_min_freq, min);
507show_one(scaling_max_freq, max);
c034b02e 508
09347b29 509static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
c034b02e
DB
510{
511 ssize_t ret;
512
513 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
514 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
515 else
516 ret = sprintf(buf, "%u\n", policy->cur);
517 return ret;
518}
1da177e4 519
037ce839 520static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 521 struct cpufreq_policy *new_policy);
7970e08b 522
1da177e4
LT
523/**
524 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
525 */
526#define store_one(file_name, object) \
527static ssize_t store_##file_name \
905d77cd 528(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 529{ \
619c144c 530 int ret, temp; \
1da177e4
LT
531 struct cpufreq_policy new_policy; \
532 \
533 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
534 if (ret) \
535 return -EINVAL; \
536 \
29464f28 537 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
538 if (ret != 1) \
539 return -EINVAL; \
540 \
619c144c 541 temp = new_policy.object; \
037ce839 542 ret = cpufreq_set_policy(policy, &new_policy); \
619c144c
VH
543 if (!ret) \
544 policy->user_policy.object = temp; \
1da177e4
LT
545 \
546 return ret ? ret : count; \
547}
548
29464f28
DJ
549store_one(scaling_min_freq, min);
550store_one(scaling_max_freq, max);
1da177e4
LT
551
552/**
553 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
554 */
905d77cd
DJ
555static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
556 char *buf)
1da177e4 557{
d92d50a4 558 unsigned int cur_freq = __cpufreq_get(policy);
1da177e4
LT
559 if (!cur_freq)
560 return sprintf(buf, "<unknown>");
561 return sprintf(buf, "%u\n", cur_freq);
562}
563
1da177e4
LT
564/**
565 * show_scaling_governor - show the current policy for the specified CPU
566 */
905d77cd 567static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 568{
29464f28 569 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
570 return sprintf(buf, "powersave\n");
571 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
572 return sprintf(buf, "performance\n");
573 else if (policy->governor)
4b972f0b 574 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 575 policy->governor->name);
1da177e4
LT
576 return -EINVAL;
577}
578
1da177e4
LT
579/**
580 * store_scaling_governor - store policy for the specified CPU
581 */
905d77cd
DJ
582static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
583 const char *buf, size_t count)
1da177e4 584{
5136fa56 585 int ret;
1da177e4
LT
586 char str_governor[16];
587 struct cpufreq_policy new_policy;
588
589 ret = cpufreq_get_policy(&new_policy, policy->cpu);
590 if (ret)
591 return ret;
592
29464f28 593 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
594 if (ret != 1)
595 return -EINVAL;
596
e08f5f5b
GS
597 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
598 &new_policy.governor))
1da177e4
LT
599 return -EINVAL;
600
037ce839 601 ret = cpufreq_set_policy(policy, &new_policy);
7970e08b
TR
602
603 policy->user_policy.policy = policy->policy;
604 policy->user_policy.governor = policy->governor;
7970e08b 605
e08f5f5b
GS
606 if (ret)
607 return ret;
608 else
609 return count;
1da177e4
LT
610}
611
612/**
613 * show_scaling_driver - show the cpufreq driver currently loaded
614 */
905d77cd 615static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 616{
1c3d85dd 617 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
618}
619
620/**
621 * show_scaling_available_governors - show the available CPUfreq governors
622 */
905d77cd
DJ
623static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
624 char *buf)
1da177e4
LT
625{
626 ssize_t i = 0;
627 struct cpufreq_governor *t;
628
9c0ebcf7 629 if (!has_target()) {
1da177e4
LT
630 i += sprintf(buf, "performance powersave");
631 goto out;
632 }
633
634 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
635 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
636 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 637 goto out;
4b972f0b 638 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 639 }
7d5e350f 640out:
1da177e4
LT
641 i += sprintf(&buf[i], "\n");
642 return i;
643}
e8628dd0 644
f4fd3797 645ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
646{
647 ssize_t i = 0;
648 unsigned int cpu;
649
835481d9 650 for_each_cpu(cpu, mask) {
1da177e4
LT
651 if (i)
652 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
653 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
654 if (i >= (PAGE_SIZE - 5))
29464f28 655 break;
1da177e4
LT
656 }
657 i += sprintf(&buf[i], "\n");
658 return i;
659}
f4fd3797 660EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 661
e8628dd0
DW
662/**
663 * show_related_cpus - show the CPUs affected by each transition even if
664 * hw coordination is in use
665 */
666static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
667{
f4fd3797 668 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
669}
670
671/**
672 * show_affected_cpus - show the CPUs affected by each transition
673 */
674static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
675{
f4fd3797 676 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
677}
678
9e76988e 679static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 680 const char *buf, size_t count)
9e76988e
VP
681{
682 unsigned int freq = 0;
683 unsigned int ret;
684
879000f9 685 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
686 return -EINVAL;
687
688 ret = sscanf(buf, "%u", &freq);
689 if (ret != 1)
690 return -EINVAL;
691
692 policy->governor->store_setspeed(policy, freq);
693
694 return count;
695}
696
697static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
698{
879000f9 699 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
700 return sprintf(buf, "<unsupported>\n");
701
702 return policy->governor->show_setspeed(policy, buf);
703}
1da177e4 704
e2f74f35 705/**
8bf1ac72 706 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
707 */
708static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
709{
710 unsigned int limit;
711 int ret;
1c3d85dd
RW
712 if (cpufreq_driver->bios_limit) {
713 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
714 if (!ret)
715 return sprintf(buf, "%u\n", limit);
716 }
717 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
718}
719
6dad2a29
BP
720cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
721cpufreq_freq_attr_ro(cpuinfo_min_freq);
722cpufreq_freq_attr_ro(cpuinfo_max_freq);
723cpufreq_freq_attr_ro(cpuinfo_transition_latency);
724cpufreq_freq_attr_ro(scaling_available_governors);
725cpufreq_freq_attr_ro(scaling_driver);
726cpufreq_freq_attr_ro(scaling_cur_freq);
727cpufreq_freq_attr_ro(bios_limit);
728cpufreq_freq_attr_ro(related_cpus);
729cpufreq_freq_attr_ro(affected_cpus);
730cpufreq_freq_attr_rw(scaling_min_freq);
731cpufreq_freq_attr_rw(scaling_max_freq);
732cpufreq_freq_attr_rw(scaling_governor);
733cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 734
905d77cd 735static struct attribute *default_attrs[] = {
1da177e4
LT
736 &cpuinfo_min_freq.attr,
737 &cpuinfo_max_freq.attr,
ed129784 738 &cpuinfo_transition_latency.attr,
1da177e4
LT
739 &scaling_min_freq.attr,
740 &scaling_max_freq.attr,
741 &affected_cpus.attr,
e8628dd0 742 &related_cpus.attr,
1da177e4
LT
743 &scaling_governor.attr,
744 &scaling_driver.attr,
745 &scaling_available_governors.attr,
9e76988e 746 &scaling_setspeed.attr,
1da177e4
LT
747 NULL
748};
749
29464f28
DJ
750#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
751#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 752
29464f28 753static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 754{
905d77cd
DJ
755 struct cpufreq_policy *policy = to_policy(kobj);
756 struct freq_attr *fattr = to_attr(attr);
1b750e3b 757 ssize_t ret;
6eed9404
VK
758
759 if (!down_read_trylock(&cpufreq_rwsem))
1b750e3b 760 return -EINVAL;
5a01f2e8 761
ad7722da 762 down_read(&policy->rwsem);
5a01f2e8 763
e08f5f5b
GS
764 if (fattr->show)
765 ret = fattr->show(policy, buf);
766 else
767 ret = -EIO;
768
ad7722da 769 up_read(&policy->rwsem);
6eed9404 770 up_read(&cpufreq_rwsem);
1b750e3b 771
1da177e4
LT
772 return ret;
773}
774
905d77cd
DJ
775static ssize_t store(struct kobject *kobj, struct attribute *attr,
776 const char *buf, size_t count)
1da177e4 777{
905d77cd
DJ
778 struct cpufreq_policy *policy = to_policy(kobj);
779 struct freq_attr *fattr = to_attr(attr);
a07530b4 780 ssize_t ret = -EINVAL;
6eed9404 781
4f750c93
SB
782 get_online_cpus();
783
784 if (!cpu_online(policy->cpu))
785 goto unlock;
786
6eed9404 787 if (!down_read_trylock(&cpufreq_rwsem))
4f750c93 788 goto unlock;
5a01f2e8 789
ad7722da 790 down_write(&policy->rwsem);
5a01f2e8 791
e08f5f5b
GS
792 if (fattr->store)
793 ret = fattr->store(policy, buf, count);
794 else
795 ret = -EIO;
796
ad7722da 797 up_write(&policy->rwsem);
6eed9404 798
6eed9404 799 up_read(&cpufreq_rwsem);
4f750c93
SB
800unlock:
801 put_online_cpus();
802
1da177e4
LT
803 return ret;
804}
805
905d77cd 806static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 807{
905d77cd 808 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 809 pr_debug("last reference is dropped\n");
1da177e4
LT
810 complete(&policy->kobj_unregister);
811}
812
52cf25d0 813static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
814 .show = show,
815 .store = store,
816};
817
818static struct kobj_type ktype_cpufreq = {
819 .sysfs_ops = &sysfs_ops,
820 .default_attrs = default_attrs,
821 .release = cpufreq_sysfs_release,
822};
823
2361be23
VK
824struct kobject *cpufreq_global_kobject;
825EXPORT_SYMBOL(cpufreq_global_kobject);
826
827static int cpufreq_global_kobject_usage;
828
829int cpufreq_get_global_kobject(void)
830{
831 if (!cpufreq_global_kobject_usage++)
832 return kobject_add(cpufreq_global_kobject,
833 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
834
835 return 0;
836}
837EXPORT_SYMBOL(cpufreq_get_global_kobject);
838
839void cpufreq_put_global_kobject(void)
840{
841 if (!--cpufreq_global_kobject_usage)
842 kobject_del(cpufreq_global_kobject);
843}
844EXPORT_SYMBOL(cpufreq_put_global_kobject);
845
846int cpufreq_sysfs_create_file(const struct attribute *attr)
847{
848 int ret = cpufreq_get_global_kobject();
849
850 if (!ret) {
851 ret = sysfs_create_file(cpufreq_global_kobject, attr);
852 if (ret)
853 cpufreq_put_global_kobject();
854 }
855
856 return ret;
857}
858EXPORT_SYMBOL(cpufreq_sysfs_create_file);
859
860void cpufreq_sysfs_remove_file(const struct attribute *attr)
861{
862 sysfs_remove_file(cpufreq_global_kobject, attr);
863 cpufreq_put_global_kobject();
864}
865EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
866
19d6f7ec 867/* symlink affected CPUs */
308b60e7 868static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
869{
870 unsigned int j;
871 int ret = 0;
872
873 for_each_cpu(j, policy->cpus) {
8a25a2fd 874 struct device *cpu_dev;
19d6f7ec 875
308b60e7 876 if (j == policy->cpu)
19d6f7ec 877 continue;
19d6f7ec 878
e8fdde10 879 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
880 cpu_dev = get_cpu_device(j);
881 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 882 "cpufreq");
71c3461e
RW
883 if (ret)
884 break;
19d6f7ec
DJ
885 }
886 return ret;
887}
888
308b60e7 889static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 890 struct device *dev)
909a694e
DJ
891{
892 struct freq_attr **drv_attr;
909a694e 893 int ret = 0;
909a694e 894
909a694e 895 /* set up files for this cpu device */
1c3d85dd 896 drv_attr = cpufreq_driver->attr;
f13f1184 897 while (drv_attr && *drv_attr) {
909a694e
DJ
898 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
899 if (ret)
6d4e81ed 900 return ret;
909a694e
DJ
901 drv_attr++;
902 }
1c3d85dd 903 if (cpufreq_driver->get) {
909a694e
DJ
904 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
905 if (ret)
6d4e81ed 906 return ret;
909a694e 907 }
c034b02e
DB
908
909 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
910 if (ret)
6d4e81ed 911 return ret;
c034b02e 912
1c3d85dd 913 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
914 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
915 if (ret)
6d4e81ed 916 return ret;
e2f74f35 917 }
909a694e 918
6d4e81ed 919 return cpufreq_add_dev_symlink(policy);
e18f1682
SB
920}
921
922static void cpufreq_init_policy(struct cpufreq_policy *policy)
923{
6e2c89d1 924 struct cpufreq_governor *gov = NULL;
e18f1682
SB
925 struct cpufreq_policy new_policy;
926 int ret = 0;
927
d5b73cd8 928 memcpy(&new_policy, policy, sizeof(*policy));
a27a9ab7 929
6e2c89d1 930 /* Update governor of new_policy to the governor used before hotplug */
42f91fa1 931 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
6e2c89d1 932 if (gov)
933 pr_debug("Restoring governor %s for cpu %d\n",
934 policy->governor->name, policy->cpu);
935 else
936 gov = CPUFREQ_DEFAULT_GOVERNOR;
937
938 new_policy.governor = gov;
939
a27a9ab7
JB
940 /* Use the default policy if its valid. */
941 if (cpufreq_driver->setpolicy)
6e2c89d1 942 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
ecf7e461
DJ
943
944 /* set default policy */
037ce839 945 ret = cpufreq_set_policy(policy, &new_policy);
ecf7e461 946 if (ret) {
2d06d8c4 947 pr_debug("setting policy failed\n");
1c3d85dd
RW
948 if (cpufreq_driver->exit)
949 cpufreq_driver->exit(policy);
ecf7e461 950 }
909a694e
DJ
951}
952
d8d3b471 953static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
42f921a6 954 unsigned int cpu, struct device *dev)
fcf80582 955{
9c0ebcf7 956 int ret = 0;
fcf80582
VK
957 unsigned long flags;
958
9c0ebcf7 959 if (has_target()) {
3de9bdeb
VK
960 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
961 if (ret) {
962 pr_err("%s: Failed to stop governor\n", __func__);
963 return ret;
964 }
965 }
fcf80582 966
ad7722da 967 down_write(&policy->rwsem);
2eaa3e2d 968
0d1857a1 969 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 970
fcf80582
VK
971 cpumask_set_cpu(cpu, policy->cpus);
972 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 973 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 974
ad7722da 975 up_write(&policy->rwsem);
2eaa3e2d 976
9c0ebcf7 977 if (has_target()) {
e5c87b76
SK
978 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
979 if (!ret)
980 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
981
982 if (ret) {
3de9bdeb
VK
983 pr_err("%s: Failed to start governor\n", __func__);
984 return ret;
985 }
820c6ca2 986 }
fcf80582 987
42f921a6 988 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
fcf80582 989}
1da177e4 990
8414809c
SB
991static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
992{
993 struct cpufreq_policy *policy;
994 unsigned long flags;
995
44871c9c 996 read_lock_irqsave(&cpufreq_driver_lock, flags);
8414809c
SB
997
998 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
999
44871c9c 1000 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
8414809c 1001
09712f55
GU
1002 if (policy)
1003 policy->governor = NULL;
6e2c89d1 1004
8414809c
SB
1005 return policy;
1006}
1007
e9698cc5
SB
1008static struct cpufreq_policy *cpufreq_policy_alloc(void)
1009{
1010 struct cpufreq_policy *policy;
1011
1012 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1013 if (!policy)
1014 return NULL;
1015
1016 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1017 goto err_free_policy;
1018
1019 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1020 goto err_free_cpumask;
1021
c88a1f8b 1022 INIT_LIST_HEAD(&policy->policy_list);
ad7722da 1023 init_rwsem(&policy->rwsem);
12478cf0
SB
1024 spin_lock_init(&policy->transition_lock);
1025 init_waitqueue_head(&policy->transition_wait);
818c5712
VK
1026 init_completion(&policy->kobj_unregister);
1027 INIT_WORK(&policy->update, handle_update);
ad7722da 1028
e9698cc5
SB
1029 return policy;
1030
1031err_free_cpumask:
1032 free_cpumask_var(policy->cpus);
1033err_free_policy:
1034 kfree(policy);
1035
1036 return NULL;
1037}
1038
42f921a6
VK
1039static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1040{
1041 struct kobject *kobj;
1042 struct completion *cmp;
1043
fcd7af91
VK
1044 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1045 CPUFREQ_REMOVE_POLICY, policy);
1046
42f921a6
VK
1047 down_read(&policy->rwsem);
1048 kobj = &policy->kobj;
1049 cmp = &policy->kobj_unregister;
1050 up_read(&policy->rwsem);
1051 kobject_put(kobj);
1052
1053 /*
1054 * We need to make sure that the underlying kobj is
1055 * actually not referenced anymore by anybody before we
1056 * proceed with unloading.
1057 */
1058 pr_debug("waiting for dropping of refcount\n");
1059 wait_for_completion(cmp);
1060 pr_debug("wait complete\n");
1061}
1062
e9698cc5
SB
1063static void cpufreq_policy_free(struct cpufreq_policy *policy)
1064{
1065 free_cpumask_var(policy->related_cpus);
1066 free_cpumask_var(policy->cpus);
1067 kfree(policy);
1068}
1069
1bfb425b
VK
1070static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1071 struct device *cpu_dev)
0d66b91e 1072{
1bfb425b
VK
1073 int ret;
1074
99ec899e 1075 if (WARN_ON(cpu == policy->cpu))
1bfb425b
VK
1076 return 0;
1077
1078 /* Move kobject to the new policy->cpu */
1079 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1080 if (ret) {
1081 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1082 return ret;
1083 }
cb38ed5c 1084
ad7722da 1085 down_write(&policy->rwsem);
0d66b91e 1086 policy->cpu = cpu;
ad7722da 1087 up_write(&policy->rwsem);
8efd5765 1088
1bfb425b 1089 return 0;
0d66b91e
SB
1090}
1091
96bbbe4a 1092static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1093{
fcf80582 1094 unsigned int j, cpu = dev->id;
65922465 1095 int ret = -ENOMEM;
7f0c020a 1096 struct cpufreq_policy *policy;
1da177e4 1097 unsigned long flags;
96bbbe4a 1098 bool recover_policy = cpufreq_suspended;
1da177e4 1099
c32b6b8e
AR
1100 if (cpu_is_offline(cpu))
1101 return 0;
1102
2d06d8c4 1103 pr_debug("adding CPU %u\n", cpu);
1da177e4 1104
1da177e4
LT
1105 /* check whether a different CPU already registered this
1106 * CPU because it is in the same boat. */
d7a9771c
VK
1107 policy = cpufreq_cpu_get_raw(cpu);
1108 if (unlikely(policy))
1da177e4 1109 return 0;
fcf80582 1110
6eed9404
VK
1111 if (!down_read_trylock(&cpufreq_rwsem))
1112 return 0;
1113
fcf80582 1114 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 1115 read_lock_irqsave(&cpufreq_driver_lock, flags);
7f0c020a
VK
1116 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1117 if (cpumask_test_cpu(cpu, policy->related_cpus)) {
0d1857a1 1118 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7f0c020a 1119 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
6eed9404
VK
1120 up_read(&cpufreq_rwsem);
1121 return ret;
2eaa3e2d 1122 }
fcf80582 1123 }
0d1857a1 1124 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1125
72368d12
RW
1126 /*
1127 * Restore the saved policy when doing light-weight init and fall back
1128 * to the full init if that fails.
1129 */
96bbbe4a 1130 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
72368d12 1131 if (!policy) {
96bbbe4a 1132 recover_policy = false;
8414809c 1133 policy = cpufreq_policy_alloc();
72368d12
RW
1134 if (!policy)
1135 goto nomem_out;
1136 }
0d66b91e
SB
1137
1138 /*
1139 * In the resume path, since we restore a saved policy, the assignment
1140 * to policy->cpu is like an update of the existing policy, rather than
1141 * the creation of a brand new one. So we need to perform this update
1142 * by invoking update_policy_cpu().
1143 */
1bfb425b
VK
1144 if (recover_policy && cpu != policy->cpu)
1145 WARN_ON(update_policy_cpu(policy, cpu, dev));
1146 else
0d66b91e
SB
1147 policy->cpu = cpu;
1148
835481d9 1149 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1150
1da177e4
LT
1151 /* call driver. From then on the cpufreq must be able
1152 * to accept all calls to ->verify and ->setpolicy for this CPU
1153 */
1c3d85dd 1154 ret = cpufreq_driver->init(policy);
1da177e4 1155 if (ret) {
2d06d8c4 1156 pr_debug("initialization failed\n");
2eaa3e2d 1157 goto err_set_policy_cpu;
1da177e4 1158 }
643ae6e8 1159
6d4e81ed
TV
1160 down_write(&policy->rwsem);
1161
5a7e56a5
VK
1162 /* related cpus should atleast have policy->cpus */
1163 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1164
1165 /*
1166 * affected cpus must always be the one, which are online. We aren't
1167 * managing offline cpus here.
1168 */
1169 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1170
96bbbe4a 1171 if (!recover_policy) {
5a7e56a5
VK
1172 policy->user_policy.min = policy->min;
1173 policy->user_policy.max = policy->max;
6d4e81ed
TV
1174
1175 /* prepare interface data */
1176 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1177 &dev->kobj, "cpufreq");
1178 if (ret) {
1179 pr_err("%s: failed to init policy->kobj: %d\n",
1180 __func__, ret);
1181 goto err_init_policy_kobj;
1182 }
5a7e56a5
VK
1183 }
1184
652ed95d
VK
1185 write_lock_irqsave(&cpufreq_driver_lock, flags);
1186 for_each_cpu(j, policy->cpus)
1187 per_cpu(cpufreq_cpu_data, j) = policy;
1188 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1189
2ed99e39 1190 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
da60ce9f
VK
1191 policy->cur = cpufreq_driver->get(policy->cpu);
1192 if (!policy->cur) {
1193 pr_err("%s: ->get() failed\n", __func__);
1194 goto err_get_freq;
1195 }
1196 }
1197
d3916691
VK
1198 /*
1199 * Sometimes boot loaders set CPU frequency to a value outside of
1200 * frequency table present with cpufreq core. In such cases CPU might be
1201 * unstable if it has to run on that frequency for long duration of time
1202 * and so its better to set it to a frequency which is specified in
1203 * freq-table. This also makes cpufreq stats inconsistent as
1204 * cpufreq-stats would fail to register because current frequency of CPU
1205 * isn't found in freq-table.
1206 *
1207 * Because we don't want this change to effect boot process badly, we go
1208 * for the next freq which is >= policy->cur ('cur' must be set by now,
1209 * otherwise we will end up setting freq to lowest of the table as 'cur'
1210 * is initialized to zero).
1211 *
1212 * We are passing target-freq as "policy->cur - 1" otherwise
1213 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1214 * equal to target-freq.
1215 */
1216 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1217 && has_target()) {
1218 /* Are we running at unknown frequency ? */
1219 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1220 if (ret == -EINVAL) {
1221 /* Warn user and fix it */
1222 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1223 __func__, policy->cpu, policy->cur);
1224 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1225 CPUFREQ_RELATION_L);
1226
1227 /*
1228 * Reaching here after boot in a few seconds may not
1229 * mean that system will remain stable at "unknown"
1230 * frequency for longer duration. Hence, a BUG_ON().
1231 */
1232 BUG_ON(ret);
1233 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1234 __func__, policy->cpu, policy->cur);
1235 }
1236 }
1237
a1531acd
TR
1238 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1239 CPUFREQ_START, policy);
1240
96bbbe4a 1241 if (!recover_policy) {
308b60e7 1242 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1243 if (ret)
1244 goto err_out_unregister;
fcd7af91
VK
1245 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1246 CPUFREQ_CREATE_POLICY, policy);
a82fab29 1247 }
8ff69732 1248
9515f4d6
VK
1249 write_lock_irqsave(&cpufreq_driver_lock, flags);
1250 list_add(&policy->policy_list, &cpufreq_policy_list);
1251 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1252
e18f1682
SB
1253 cpufreq_init_policy(policy);
1254
96bbbe4a 1255 if (!recover_policy) {
08fd8c1c
VK
1256 policy->user_policy.policy = policy->policy;
1257 policy->user_policy.governor = policy->governor;
1258 }
4e97b631 1259 up_write(&policy->rwsem);
08fd8c1c 1260
038c5b3e 1261 kobject_uevent(&policy->kobj, KOBJ_ADD);
7c45cf31 1262
6eed9404
VK
1263 up_read(&cpufreq_rwsem);
1264
7c45cf31
VK
1265 /* Callback for handling stuff after policy is ready */
1266 if (cpufreq_driver->ready)
1267 cpufreq_driver->ready(policy);
1268
2d06d8c4 1269 pr_debug("initialization complete\n");
87c32271 1270
1da177e4
LT
1271 return 0;
1272
1da177e4 1273err_out_unregister:
652ed95d 1274err_get_freq:
0d1857a1 1275 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1276 for_each_cpu(j, policy->cpus)
7a6aedfa 1277 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1278 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1279
6d4e81ed
TV
1280 if (!recover_policy) {
1281 kobject_put(&policy->kobj);
1282 wait_for_completion(&policy->kobj_unregister);
1283 }
1284err_init_policy_kobj:
7106e02b
PB
1285 up_write(&policy->rwsem);
1286
da60ce9f
VK
1287 if (cpufreq_driver->exit)
1288 cpufreq_driver->exit(policy);
2eaa3e2d 1289err_set_policy_cpu:
96bbbe4a 1290 if (recover_policy) {
72368d12
RW
1291 /* Do not leave stale fallback data behind. */
1292 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
42f921a6 1293 cpufreq_policy_put_kobj(policy);
72368d12 1294 }
e9698cc5 1295 cpufreq_policy_free(policy);
42f921a6 1296
1da177e4 1297nomem_out:
6eed9404
VK
1298 up_read(&cpufreq_rwsem);
1299
1da177e4
LT
1300 return ret;
1301}
1302
a82fab29
SB
1303/**
1304 * cpufreq_add_dev - add a CPU device
1305 *
1306 * Adds the cpufreq interface for a CPU device.
1307 *
1308 * The Oracle says: try running cpufreq registration/unregistration concurrently
1309 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1310 * mess up, but more thorough testing is needed. - Mathieu
1311 */
1312static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1313{
96bbbe4a 1314 return __cpufreq_add_dev(dev, sif);
a82fab29
SB
1315}
1316
cedb70af 1317static int __cpufreq_remove_dev_prepare(struct device *dev,
96bbbe4a 1318 struct subsys_interface *sif)
1da177e4 1319{
f9ba680d 1320 unsigned int cpu = dev->id, cpus;
1bfb425b 1321 int ret;
1da177e4 1322 unsigned long flags;
3a3e9e06 1323 struct cpufreq_policy *policy;
1da177e4 1324
b8eed8af 1325 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1326
0d1857a1 1327 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1328
3a3e9e06 1329 policy = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d 1330
8414809c 1331 /* Save the policy somewhere when doing a light-weight tear-down */
96bbbe4a 1332 if (cpufreq_suspended)
3a3e9e06 1333 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
8414809c 1334
0d1857a1 1335 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1336
3a3e9e06 1337 if (!policy) {
b8eed8af 1338 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1339 return -EINVAL;
1340 }
1da177e4 1341
9c0ebcf7 1342 if (has_target()) {
3de9bdeb
VK
1343 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1344 if (ret) {
1345 pr_err("%s: Failed to stop governor\n", __func__);
1346 return ret;
1347 }
1da177e4 1348
fa69e33f 1349 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
3a3e9e06 1350 policy->governor->name, CPUFREQ_NAME_LEN);
db5f2995 1351 }
1da177e4 1352
ad7722da 1353 down_read(&policy->rwsem);
3a3e9e06 1354 cpus = cpumask_weight(policy->cpus);
ad7722da 1355 up_read(&policy->rwsem);
084f3493 1356
61173f25 1357 if (cpu != policy->cpu) {
6964d91d 1358 sysfs_remove_link(&dev->kobj, "cpufreq");
73bf0fc2 1359 } else if (cpus > 1) {
1bfb425b
VK
1360 /* Nominate new CPU */
1361 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1362 struct device *cpu_dev = get_cpu_device(new_cpu);
a82fab29 1363
1bfb425b
VK
1364 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1365 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1366 if (ret) {
1367 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1368 "cpufreq"))
1369 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1370 __func__, cpu_dev->id);
1371 return ret;
1da177e4 1372 }
1bfb425b
VK
1373
1374 if (!cpufreq_suspended)
1375 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1376 __func__, new_cpu, cpu);
789ca243 1377 } else if (cpufreq_driver->stop_cpu) {
367dc4aa 1378 cpufreq_driver->stop_cpu(policy);
1da177e4 1379 }
1da177e4 1380
cedb70af
SB
1381 return 0;
1382}
1383
1384static int __cpufreq_remove_dev_finish(struct device *dev,
96bbbe4a 1385 struct subsys_interface *sif)
cedb70af
SB
1386{
1387 unsigned int cpu = dev->id, cpus;
1388 int ret;
1389 unsigned long flags;
1390 struct cpufreq_policy *policy;
cedb70af 1391
6ffae8c0 1392 write_lock_irqsave(&cpufreq_driver_lock, flags);
cedb70af 1393 policy = per_cpu(cpufreq_cpu_data, cpu);
6ffae8c0
VK
1394 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1395 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
cedb70af
SB
1396
1397 if (!policy) {
1398 pr_debug("%s: No cpu_data found\n", __func__);
1399 return -EINVAL;
1400 }
1401
ad7722da 1402 down_write(&policy->rwsem);
cedb70af 1403 cpus = cpumask_weight(policy->cpus);
9c8f1ee4
VK
1404
1405 if (cpus > 1)
1406 cpumask_clear_cpu(cpu, policy->cpus);
ad7722da 1407 up_write(&policy->rwsem);
cedb70af 1408
b8eed8af
VK
1409 /* If cpu is last user of policy, free policy */
1410 if (cpus == 1) {
9c0ebcf7 1411 if (has_target()) {
3de9bdeb
VK
1412 ret = __cpufreq_governor(policy,
1413 CPUFREQ_GOV_POLICY_EXIT);
1414 if (ret) {
1415 pr_err("%s: Failed to exit governor\n",
e837f9b5 1416 __func__);
3de9bdeb
VK
1417 return ret;
1418 }
edab2fbc 1419 }
2a998599 1420
96bbbe4a 1421 if (!cpufreq_suspended)
42f921a6 1422 cpufreq_policy_put_kobj(policy);
7d26e2d5 1423
8414809c
SB
1424 /*
1425 * Perform the ->exit() even during light-weight tear-down,
1426 * since this is a core component, and is essential for the
1427 * subsequent light-weight ->init() to succeed.
b8eed8af 1428 */
1c3d85dd 1429 if (cpufreq_driver->exit)
3a3e9e06 1430 cpufreq_driver->exit(policy);
27ecddc2 1431
9515f4d6
VK
1432 /* Remove policy from list of active policies */
1433 write_lock_irqsave(&cpufreq_driver_lock, flags);
1434 list_del(&policy->policy_list);
1435 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1436
96bbbe4a 1437 if (!cpufreq_suspended)
3a3e9e06 1438 cpufreq_policy_free(policy);
e5c87b76
SK
1439 } else if (has_target()) {
1440 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1441 if (!ret)
1442 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1443
1444 if (ret) {
1445 pr_err("%s: Failed to start governor\n", __func__);
1446 return ret;
2a998599 1447 }
27ecddc2 1448 }
1da177e4 1449
1da177e4
LT
1450 return 0;
1451}
1452
cedb70af 1453/**
27a862e9 1454 * cpufreq_remove_dev - remove a CPU device
cedb70af
SB
1455 *
1456 * Removes the cpufreq interface for a CPU device.
cedb70af 1457 */
8a25a2fd 1458static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1459{
8a25a2fd 1460 unsigned int cpu = dev->id;
27a862e9 1461 int ret;
ec28297a
VP
1462
1463 if (cpu_is_offline(cpu))
1464 return 0;
1465
96bbbe4a 1466 ret = __cpufreq_remove_dev_prepare(dev, sif);
27a862e9
VK
1467
1468 if (!ret)
96bbbe4a 1469 ret = __cpufreq_remove_dev_finish(dev, sif);
27a862e9
VK
1470
1471 return ret;
5a01f2e8
VP
1472}
1473
65f27f38 1474static void handle_update(struct work_struct *work)
1da177e4 1475{
65f27f38
DH
1476 struct cpufreq_policy *policy =
1477 container_of(work, struct cpufreq_policy, update);
1478 unsigned int cpu = policy->cpu;
2d06d8c4 1479 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1480 cpufreq_update_policy(cpu);
1481}
1482
1483/**
bb176f7d
VK
1484 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1485 * in deep trouble.
a1e1dc41 1486 * @policy: policy managing CPUs
1da177e4
LT
1487 * @new_freq: CPU frequency the CPU actually runs at
1488 *
29464f28
DJ
1489 * We adjust to current frequency first, and need to clean up later.
1490 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1491 */
a1e1dc41 1492static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
e08f5f5b 1493 unsigned int new_freq)
1da177e4
LT
1494{
1495 struct cpufreq_freqs freqs;
b43a7ffb 1496
e837f9b5 1497 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
a1e1dc41 1498 policy->cur, new_freq);
1da177e4 1499
a1e1dc41 1500 freqs.old = policy->cur;
1da177e4 1501 freqs.new = new_freq;
b43a7ffb 1502
8fec051e
VK
1503 cpufreq_freq_transition_begin(policy, &freqs);
1504 cpufreq_freq_transition_end(policy, &freqs, 0);
1da177e4
LT
1505}
1506
32ee8c3e 1507/**
4ab70df4 1508 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1509 * @cpu: CPU number
1510 *
1511 * This is the last known freq, without actually getting it from the driver.
1512 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1513 */
1514unsigned int cpufreq_quick_get(unsigned int cpu)
1515{
9e21ba8b 1516 struct cpufreq_policy *policy;
e08f5f5b 1517 unsigned int ret_freq = 0;
95235ca2 1518
1c3d85dd
RW
1519 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1520 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1521
1522 policy = cpufreq_cpu_get(cpu);
95235ca2 1523 if (policy) {
e08f5f5b 1524 ret_freq = policy->cur;
95235ca2
VP
1525 cpufreq_cpu_put(policy);
1526 }
1527
4d34a67d 1528 return ret_freq;
95235ca2
VP
1529}
1530EXPORT_SYMBOL(cpufreq_quick_get);
1531
3d737108
JB
1532/**
1533 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1534 * @cpu: CPU number
1535 *
1536 * Just return the max possible frequency for a given CPU.
1537 */
1538unsigned int cpufreq_quick_get_max(unsigned int cpu)
1539{
1540 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1541 unsigned int ret_freq = 0;
1542
1543 if (policy) {
1544 ret_freq = policy->max;
1545 cpufreq_cpu_put(policy);
1546 }
1547
1548 return ret_freq;
1549}
1550EXPORT_SYMBOL(cpufreq_quick_get_max);
1551
d92d50a4 1552static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1da177e4 1553{
e08f5f5b 1554 unsigned int ret_freq = 0;
5800043b 1555
1c3d85dd 1556 if (!cpufreq_driver->get)
4d34a67d 1557 return ret_freq;
1da177e4 1558
d92d50a4 1559 ret_freq = cpufreq_driver->get(policy->cpu);
1da177e4 1560
e08f5f5b 1561 if (ret_freq && policy->cur &&
1c3d85dd 1562 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1563 /* verify no discrepancy between actual and
1564 saved value exists */
1565 if (unlikely(ret_freq != policy->cur)) {
a1e1dc41 1566 cpufreq_out_of_sync(policy, ret_freq);
1da177e4
LT
1567 schedule_work(&policy->update);
1568 }
1569 }
1570
4d34a67d 1571 return ret_freq;
5a01f2e8 1572}
1da177e4 1573
5a01f2e8
VP
1574/**
1575 * cpufreq_get - get the current CPU frequency (in kHz)
1576 * @cpu: CPU number
1577 *
1578 * Get the CPU current (static) CPU frequency
1579 */
1580unsigned int cpufreq_get(unsigned int cpu)
1581{
999976e0 1582 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
5a01f2e8 1583 unsigned int ret_freq = 0;
5a01f2e8 1584
999976e0
AP
1585 if (policy) {
1586 down_read(&policy->rwsem);
d92d50a4 1587 ret_freq = __cpufreq_get(policy);
999976e0 1588 up_read(&policy->rwsem);
5a01f2e8 1589
999976e0
AP
1590 cpufreq_cpu_put(policy);
1591 }
6eed9404 1592
4d34a67d 1593 return ret_freq;
1da177e4
LT
1594}
1595EXPORT_SYMBOL(cpufreq_get);
1596
8a25a2fd
KS
1597static struct subsys_interface cpufreq_interface = {
1598 .name = "cpufreq",
1599 .subsys = &cpu_subsys,
1600 .add_dev = cpufreq_add_dev,
1601 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1602};
1603
e28867ea
VK
1604/*
1605 * In case platform wants some specific frequency to be configured
1606 * during suspend..
1607 */
1608int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1609{
1610 int ret;
1611
1612 if (!policy->suspend_freq) {
1613 pr_err("%s: suspend_freq can't be zero\n", __func__);
1614 return -EINVAL;
1615 }
1616
1617 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1618 policy->suspend_freq);
1619
1620 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1621 CPUFREQ_RELATION_H);
1622 if (ret)
1623 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1624 __func__, policy->suspend_freq, ret);
1625
1626 return ret;
1627}
1628EXPORT_SYMBOL(cpufreq_generic_suspend);
1629
42d4dc3f 1630/**
2f0aea93 1631 * cpufreq_suspend() - Suspend CPUFreq governors
e00e56df 1632 *
2f0aea93
VK
1633 * Called during system wide Suspend/Hibernate cycles for suspending governors
1634 * as some platforms can't change frequency after this point in suspend cycle.
1635 * Because some of the devices (like: i2c, regulators, etc) they use for
1636 * changing frequency are suspended quickly after this point.
42d4dc3f 1637 */
2f0aea93 1638void cpufreq_suspend(void)
42d4dc3f 1639{
3a3e9e06 1640 struct cpufreq_policy *policy;
42d4dc3f 1641
2f0aea93
VK
1642 if (!cpufreq_driver)
1643 return;
42d4dc3f 1644
2f0aea93 1645 if (!has_target())
b1b12bab 1646 goto suspend;
42d4dc3f 1647
2f0aea93
VK
1648 pr_debug("%s: Suspending Governors\n", __func__);
1649
1650 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1651 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1652 pr_err("%s: Failed to stop governor for policy: %p\n",
1653 __func__, policy);
1654 else if (cpufreq_driver->suspend
1655 && cpufreq_driver->suspend(policy))
1656 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1657 policy);
42d4dc3f 1658 }
b1b12bab
VK
1659
1660suspend:
1661 cpufreq_suspended = true;
42d4dc3f
BH
1662}
1663
1da177e4 1664/**
2f0aea93 1665 * cpufreq_resume() - Resume CPUFreq governors
1da177e4 1666 *
2f0aea93
VK
1667 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1668 * are suspended with cpufreq_suspend().
1da177e4 1669 */
2f0aea93 1670void cpufreq_resume(void)
1da177e4 1671{
3a3e9e06 1672 struct cpufreq_policy *policy;
1da177e4 1673
2f0aea93
VK
1674 if (!cpufreq_driver)
1675 return;
1da177e4 1676
8e30444e
LT
1677 cpufreq_suspended = false;
1678
2f0aea93 1679 if (!has_target())
e00e56df 1680 return;
1da177e4 1681
2f0aea93 1682 pr_debug("%s: Resuming Governors\n", __func__);
1da177e4 1683
2f0aea93 1684 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
0c5aa405
VK
1685 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1686 pr_err("%s: Failed to resume driver: %p\n", __func__,
1687 policy);
1688 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
2f0aea93
VK
1689 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1690 pr_err("%s: Failed to start governor for policy: %p\n",
1691 __func__, policy);
1da177e4 1692
2f0aea93
VK
1693 /*
1694 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1695 * policy in list. It will verify that the current freq is in
1696 * sync with what we believe it to be.
1697 */
1698 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1699 schedule_work(&policy->update);
1700 }
1701}
1da177e4 1702
9d95046e
BP
1703/**
1704 * cpufreq_get_current_driver - return current driver's name
1705 *
1706 * Return the name string of the currently loaded cpufreq driver
1707 * or NULL, if none.
1708 */
1709const char *cpufreq_get_current_driver(void)
1710{
1c3d85dd
RW
1711 if (cpufreq_driver)
1712 return cpufreq_driver->name;
1713
1714 return NULL;
9d95046e
BP
1715}
1716EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4 1717
51315cdf
TP
1718/**
1719 * cpufreq_get_driver_data - return current driver data
1720 *
1721 * Return the private data of the currently loaded cpufreq
1722 * driver, or NULL if no cpufreq driver is loaded.
1723 */
1724void *cpufreq_get_driver_data(void)
1725{
1726 if (cpufreq_driver)
1727 return cpufreq_driver->driver_data;
1728
1729 return NULL;
1730}
1731EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1732
1da177e4
LT
1733/*********************************************************************
1734 * NOTIFIER LISTS INTERFACE *
1735 *********************************************************************/
1736
1737/**
1738 * cpufreq_register_notifier - register a driver with cpufreq
1739 * @nb: notifier function to register
1740 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1741 *
32ee8c3e 1742 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1743 * are notified about clock rate changes (once before and once after
1744 * the transition), or a list of drivers that are notified about
1745 * changes in cpufreq policy.
1746 *
1747 * This function may sleep, and has the same return conditions as
e041c683 1748 * blocking_notifier_chain_register.
1da177e4
LT
1749 */
1750int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1751{
1752 int ret;
1753
d5aaffa9
DB
1754 if (cpufreq_disabled())
1755 return -EINVAL;
1756
74212ca4
CEB
1757 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1758
1da177e4
LT
1759 switch (list) {
1760 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1761 ret = srcu_notifier_chain_register(
e041c683 1762 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1763 break;
1764 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1765 ret = blocking_notifier_chain_register(
1766 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1767 break;
1768 default:
1769 ret = -EINVAL;
1770 }
1da177e4
LT
1771
1772 return ret;
1773}
1774EXPORT_SYMBOL(cpufreq_register_notifier);
1775
1da177e4
LT
1776/**
1777 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1778 * @nb: notifier block to be unregistered
bb176f7d 1779 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1780 *
1781 * Remove a driver from the CPU frequency notifier list.
1782 *
1783 * This function may sleep, and has the same return conditions as
e041c683 1784 * blocking_notifier_chain_unregister.
1da177e4
LT
1785 */
1786int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1787{
1788 int ret;
1789
d5aaffa9
DB
1790 if (cpufreq_disabled())
1791 return -EINVAL;
1792
1da177e4
LT
1793 switch (list) {
1794 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1795 ret = srcu_notifier_chain_unregister(
e041c683 1796 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1797 break;
1798 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1799 ret = blocking_notifier_chain_unregister(
1800 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1801 break;
1802 default:
1803 ret = -EINVAL;
1804 }
1da177e4
LT
1805
1806 return ret;
1807}
1808EXPORT_SYMBOL(cpufreq_unregister_notifier);
1809
1810
1811/*********************************************************************
1812 * GOVERNORS *
1813 *********************************************************************/
1814
1c03a2d0
VK
1815/* Must set freqs->new to intermediate frequency */
1816static int __target_intermediate(struct cpufreq_policy *policy,
1817 struct cpufreq_freqs *freqs, int index)
1818{
1819 int ret;
1820
1821 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1822
1823 /* We don't need to switch to intermediate freq */
1824 if (!freqs->new)
1825 return 0;
1826
1827 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1828 __func__, policy->cpu, freqs->old, freqs->new);
1829
1830 cpufreq_freq_transition_begin(policy, freqs);
1831 ret = cpufreq_driver->target_intermediate(policy, index);
1832 cpufreq_freq_transition_end(policy, freqs, ret);
1833
1834 if (ret)
1835 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1836 __func__, ret);
1837
1838 return ret;
1839}
1840
8d65775d
VK
1841static int __target_index(struct cpufreq_policy *policy,
1842 struct cpufreq_frequency_table *freq_table, int index)
1843{
1c03a2d0
VK
1844 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1845 unsigned int intermediate_freq = 0;
8d65775d
VK
1846 int retval = -EINVAL;
1847 bool notify;
1848
1849 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
8d65775d 1850 if (notify) {
1c03a2d0
VK
1851 /* Handle switching to intermediate frequency */
1852 if (cpufreq_driver->get_intermediate) {
1853 retval = __target_intermediate(policy, &freqs, index);
1854 if (retval)
1855 return retval;
1856
1857 intermediate_freq = freqs.new;
1858 /* Set old freq to intermediate */
1859 if (intermediate_freq)
1860 freqs.old = freqs.new;
1861 }
8d65775d 1862
1c03a2d0 1863 freqs.new = freq_table[index].frequency;
8d65775d
VK
1864 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1865 __func__, policy->cpu, freqs.old, freqs.new);
1866
1867 cpufreq_freq_transition_begin(policy, &freqs);
1868 }
1869
1870 retval = cpufreq_driver->target_index(policy, index);
1871 if (retval)
1872 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1873 retval);
1874
1c03a2d0 1875 if (notify) {
8d65775d
VK
1876 cpufreq_freq_transition_end(policy, &freqs, retval);
1877
1c03a2d0
VK
1878 /*
1879 * Failed after setting to intermediate freq? Driver should have
1880 * reverted back to initial frequency and so should we. Check
1881 * here for intermediate_freq instead of get_intermediate, in
1882 * case we have't switched to intermediate freq at all.
1883 */
1884 if (unlikely(retval && intermediate_freq)) {
1885 freqs.old = intermediate_freq;
1886 freqs.new = policy->restore_freq;
1887 cpufreq_freq_transition_begin(policy, &freqs);
1888 cpufreq_freq_transition_end(policy, &freqs, 0);
1889 }
1890 }
1891
8d65775d
VK
1892 return retval;
1893}
1894
1da177e4
LT
1895int __cpufreq_driver_target(struct cpufreq_policy *policy,
1896 unsigned int target_freq,
1897 unsigned int relation)
1898{
7249924e 1899 unsigned int old_target_freq = target_freq;
8d65775d 1900 int retval = -EINVAL;
c32b6b8e 1901
a7b422cd
KRW
1902 if (cpufreq_disabled())
1903 return -ENODEV;
1904
7249924e
VK
1905 /* Make sure that target_freq is within supported range */
1906 if (target_freq > policy->max)
1907 target_freq = policy->max;
1908 if (target_freq < policy->min)
1909 target_freq = policy->min;
1910
1911 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
e837f9b5 1912 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228 1913
9c0ebcf7
VK
1914 /*
1915 * This might look like a redundant call as we are checking it again
1916 * after finding index. But it is left intentionally for cases where
1917 * exactly same freq is called again and so we can save on few function
1918 * calls.
1919 */
5a1c0228
VK
1920 if (target_freq == policy->cur)
1921 return 0;
1922
1c03a2d0
VK
1923 /* Save last value to restore later on errors */
1924 policy->restore_freq = policy->cur;
1925
1c3d85dd
RW
1926 if (cpufreq_driver->target)
1927 retval = cpufreq_driver->target(policy, target_freq, relation);
9c0ebcf7
VK
1928 else if (cpufreq_driver->target_index) {
1929 struct cpufreq_frequency_table *freq_table;
1930 int index;
90d45d17 1931
9c0ebcf7
VK
1932 freq_table = cpufreq_frequency_get_table(policy->cpu);
1933 if (unlikely(!freq_table)) {
1934 pr_err("%s: Unable to find freq_table\n", __func__);
1935 goto out;
1936 }
1937
1938 retval = cpufreq_frequency_table_target(policy, freq_table,
1939 target_freq, relation, &index);
1940 if (unlikely(retval)) {
1941 pr_err("%s: Unable to find matching freq\n", __func__);
1942 goto out;
1943 }
1944
d4019f0a 1945 if (freq_table[index].frequency == policy->cur) {
9c0ebcf7 1946 retval = 0;
d4019f0a
VK
1947 goto out;
1948 }
1949
8d65775d 1950 retval = __target_index(policy, freq_table, index);
9c0ebcf7
VK
1951 }
1952
1953out:
1da177e4
LT
1954 return retval;
1955}
1956EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1957
1da177e4
LT
1958int cpufreq_driver_target(struct cpufreq_policy *policy,
1959 unsigned int target_freq,
1960 unsigned int relation)
1961{
f1829e4a 1962 int ret = -EINVAL;
1da177e4 1963
ad7722da 1964 down_write(&policy->rwsem);
1da177e4
LT
1965
1966 ret = __cpufreq_driver_target(policy, target_freq, relation);
1967
ad7722da 1968 up_write(&policy->rwsem);
1da177e4 1969
1da177e4
LT
1970 return ret;
1971}
1972EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1973
e08f5f5b
GS
1974static int __cpufreq_governor(struct cpufreq_policy *policy,
1975 unsigned int event)
1da177e4 1976{
cc993cab 1977 int ret;
6afde10c
TR
1978
1979 /* Only must be defined when default governor is known to have latency
1980 restrictions, like e.g. conservative or ondemand.
1981 That this is the case is already ensured in Kconfig
1982 */
1983#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1984 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1985#else
1986 struct cpufreq_governor *gov = NULL;
1987#endif
1c256245 1988
2f0aea93
VK
1989 /* Don't start any governor operations if we are entering suspend */
1990 if (cpufreq_suspended)
1991 return 0;
cb57720b
EZ
1992 /*
1993 * Governor might not be initiated here if ACPI _PPC changed
1994 * notification happened, so check it.
1995 */
1996 if (!policy->governor)
1997 return -EINVAL;
2f0aea93 1998
1c256245
TR
1999 if (policy->governor->max_transition_latency &&
2000 policy->cpuinfo.transition_latency >
2001 policy->governor->max_transition_latency) {
6afde10c
TR
2002 if (!gov)
2003 return -EINVAL;
2004 else {
e837f9b5
JP
2005 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2006 policy->governor->name, gov->name);
6afde10c
TR
2007 policy->governor = gov;
2008 }
1c256245 2009 }
1da177e4 2010
fe492f3f
VK
2011 if (event == CPUFREQ_GOV_POLICY_INIT)
2012 if (!try_module_get(policy->governor->owner))
2013 return -EINVAL;
1da177e4 2014
2d06d8c4 2015 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e837f9b5 2016 policy->cpu, event);
95731ebb
XC
2017
2018 mutex_lock(&cpufreq_governor_lock);
56d07db2 2019 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
f73d3933
VK
2020 || (!policy->governor_enabled
2021 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
95731ebb
XC
2022 mutex_unlock(&cpufreq_governor_lock);
2023 return -EBUSY;
2024 }
2025
2026 if (event == CPUFREQ_GOV_STOP)
2027 policy->governor_enabled = false;
2028 else if (event == CPUFREQ_GOV_START)
2029 policy->governor_enabled = true;
2030
2031 mutex_unlock(&cpufreq_governor_lock);
2032
1da177e4
LT
2033 ret = policy->governor->governor(policy, event);
2034
4d5dcc42
VK
2035 if (!ret) {
2036 if (event == CPUFREQ_GOV_POLICY_INIT)
2037 policy->governor->initialized++;
2038 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2039 policy->governor->initialized--;
95731ebb
XC
2040 } else {
2041 /* Restore original values */
2042 mutex_lock(&cpufreq_governor_lock);
2043 if (event == CPUFREQ_GOV_STOP)
2044 policy->governor_enabled = true;
2045 else if (event == CPUFREQ_GOV_START)
2046 policy->governor_enabled = false;
2047 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 2048 }
b394058f 2049
fe492f3f
VK
2050 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2051 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
2052 module_put(policy->governor->owner);
2053
2054 return ret;
2055}
2056
1da177e4
LT
2057int cpufreq_register_governor(struct cpufreq_governor *governor)
2058{
3bcb09a3 2059 int err;
1da177e4
LT
2060
2061 if (!governor)
2062 return -EINVAL;
2063
a7b422cd
KRW
2064 if (cpufreq_disabled())
2065 return -ENODEV;
2066
3fc54d37 2067 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 2068
b394058f 2069 governor->initialized = 0;
3bcb09a3 2070 err = -EBUSY;
42f91fa1 2071 if (!find_governor(governor->name)) {
3bcb09a3
JF
2072 err = 0;
2073 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 2074 }
1da177e4 2075
32ee8c3e 2076 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 2077 return err;
1da177e4
LT
2078}
2079EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2080
1da177e4
LT
2081void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2082{
90e41bac 2083 int cpu;
90e41bac 2084
1da177e4
LT
2085 if (!governor)
2086 return;
2087
a7b422cd
KRW
2088 if (cpufreq_disabled())
2089 return;
2090
90e41bac
PB
2091 for_each_present_cpu(cpu) {
2092 if (cpu_online(cpu))
2093 continue;
2094 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2095 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2096 }
90e41bac 2097
3fc54d37 2098 mutex_lock(&cpufreq_governor_mutex);
1da177e4 2099 list_del(&governor->governor_list);
3fc54d37 2100 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
2101 return;
2102}
2103EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2104
2105
1da177e4
LT
2106/*********************************************************************
2107 * POLICY INTERFACE *
2108 *********************************************************************/
2109
2110/**
2111 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
2112 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2113 * is written
1da177e4
LT
2114 *
2115 * Reads the current cpufreq policy.
2116 */
2117int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2118{
2119 struct cpufreq_policy *cpu_policy;
2120 if (!policy)
2121 return -EINVAL;
2122
2123 cpu_policy = cpufreq_cpu_get(cpu);
2124 if (!cpu_policy)
2125 return -EINVAL;
2126
d5b73cd8 2127 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
2128
2129 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
2130 return 0;
2131}
2132EXPORT_SYMBOL(cpufreq_get_policy);
2133
153d7f3f 2134/*
037ce839
VK
2135 * policy : current policy.
2136 * new_policy: policy to be set.
153d7f3f 2137 */
037ce839 2138static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 2139 struct cpufreq_policy *new_policy)
1da177e4 2140{
d9a789c7
RW
2141 struct cpufreq_governor *old_gov;
2142 int ret;
1da177e4 2143
e837f9b5
JP
2144 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2145 new_policy->cpu, new_policy->min, new_policy->max);
1da177e4 2146
d5b73cd8 2147 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 2148
d9a789c7
RW
2149 if (new_policy->min > policy->max || new_policy->max < policy->min)
2150 return -EINVAL;
9c9a43ed 2151
1da177e4 2152 /* verify the cpu speed can be set within this limit */
3a3e9e06 2153 ret = cpufreq_driver->verify(new_policy);
1da177e4 2154 if (ret)
d9a789c7 2155 return ret;
1da177e4 2156
1da177e4 2157 /* adjust if necessary - all reasons */
e041c683 2158 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2159 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
2160
2161 /* adjust if necessary - hardware incompatibility*/
e041c683 2162 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2163 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 2164
bb176f7d
VK
2165 /*
2166 * verify the cpu speed can be set within this limit, which might be
2167 * different to the first one
2168 */
3a3e9e06 2169 ret = cpufreq_driver->verify(new_policy);
e041c683 2170 if (ret)
d9a789c7 2171 return ret;
1da177e4
LT
2172
2173 /* notification of the new policy */
e041c683 2174 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2175 CPUFREQ_NOTIFY, new_policy);
1da177e4 2176
3a3e9e06
VK
2177 policy->min = new_policy->min;
2178 policy->max = new_policy->max;
1da177e4 2179
2d06d8c4 2180 pr_debug("new min and max freqs are %u - %u kHz\n",
e837f9b5 2181 policy->min, policy->max);
1da177e4 2182
1c3d85dd 2183 if (cpufreq_driver->setpolicy) {
3a3e9e06 2184 policy->policy = new_policy->policy;
2d06d8c4 2185 pr_debug("setting range\n");
d9a789c7
RW
2186 return cpufreq_driver->setpolicy(new_policy);
2187 }
1da177e4 2188
d9a789c7
RW
2189 if (new_policy->governor == policy->governor)
2190 goto out;
7bd353a9 2191
d9a789c7
RW
2192 pr_debug("governor switch\n");
2193
2194 /* save old, working values */
2195 old_gov = policy->governor;
2196 /* end old governor */
2197 if (old_gov) {
2198 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2199 up_write(&policy->rwsem);
e5c87b76 2200 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
d9a789c7 2201 down_write(&policy->rwsem);
1da177e4
LT
2202 }
2203
d9a789c7
RW
2204 /* start new governor */
2205 policy->governor = new_policy->governor;
2206 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2207 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2208 goto out;
2209
2210 up_write(&policy->rwsem);
2211 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2212 down_write(&policy->rwsem);
2213 }
2214
2215 /* new governor failed, so re-start old one */
2216 pr_debug("starting governor %s failed\n", policy->governor->name);
2217 if (old_gov) {
2218 policy->governor = old_gov;
2219 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2220 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2221 }
2222
2223 return -EINVAL;
2224
2225 out:
2226 pr_debug("governor: change or update limits\n");
2227 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
2228}
2229
1da177e4
LT
2230/**
2231 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2232 * @cpu: CPU which shall be re-evaluated
2233 *
25985edc 2234 * Useful for policy notifiers which have different necessities
1da177e4
LT
2235 * at different times.
2236 */
2237int cpufreq_update_policy(unsigned int cpu)
2238{
3a3e9e06
VK
2239 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2240 struct cpufreq_policy new_policy;
f1829e4a 2241 int ret;
1da177e4 2242
fefa8ff8
AP
2243 if (!policy)
2244 return -ENODEV;
1da177e4 2245
ad7722da 2246 down_write(&policy->rwsem);
1da177e4 2247
2d06d8c4 2248 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 2249 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
2250 new_policy.min = policy->user_policy.min;
2251 new_policy.max = policy->user_policy.max;
2252 new_policy.policy = policy->user_policy.policy;
2253 new_policy.governor = policy->user_policy.governor;
1da177e4 2254
bb176f7d
VK
2255 /*
2256 * BIOS might change freq behind our back
2257 * -> ask driver for current freq and notify governors about a change
2258 */
2ed99e39 2259 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
3a3e9e06 2260 new_policy.cur = cpufreq_driver->get(cpu);
bd0fa9bb
VK
2261 if (WARN_ON(!new_policy.cur)) {
2262 ret = -EIO;
fefa8ff8 2263 goto unlock;
bd0fa9bb
VK
2264 }
2265
3a3e9e06 2266 if (!policy->cur) {
e837f9b5 2267 pr_debug("Driver did not initialize current freq\n");
3a3e9e06 2268 policy->cur = new_policy.cur;
a85f7bd3 2269 } else {
9c0ebcf7 2270 if (policy->cur != new_policy.cur && has_target())
a1e1dc41 2271 cpufreq_out_of_sync(policy, new_policy.cur);
a85f7bd3 2272 }
0961dd0d
TR
2273 }
2274
037ce839 2275 ret = cpufreq_set_policy(policy, &new_policy);
1da177e4 2276
fefa8ff8 2277unlock:
ad7722da 2278 up_write(&policy->rwsem);
5a01f2e8 2279
3a3e9e06 2280 cpufreq_cpu_put(policy);
1da177e4
LT
2281 return ret;
2282}
2283EXPORT_SYMBOL(cpufreq_update_policy);
2284
2760984f 2285static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2286 unsigned long action, void *hcpu)
2287{
2288 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2289 struct device *dev;
c32b6b8e 2290
8a25a2fd
KS
2291 dev = get_cpu_device(cpu);
2292 if (dev) {
5302c3fb 2293 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2294 case CPU_ONLINE:
96bbbe4a 2295 __cpufreq_add_dev(dev, NULL);
c32b6b8e 2296 break;
5302c3fb 2297
c32b6b8e 2298 case CPU_DOWN_PREPARE:
96bbbe4a 2299 __cpufreq_remove_dev_prepare(dev, NULL);
1aee40ac
SB
2300 break;
2301
2302 case CPU_POST_DEAD:
96bbbe4a 2303 __cpufreq_remove_dev_finish(dev, NULL);
c32b6b8e 2304 break;
5302c3fb 2305
5a01f2e8 2306 case CPU_DOWN_FAILED:
96bbbe4a 2307 __cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
2308 break;
2309 }
2310 }
2311 return NOTIFY_OK;
2312}
2313
9c36f746 2314static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2315 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2316};
1da177e4 2317
6f19efc0
LM
2318/*********************************************************************
2319 * BOOST *
2320 *********************************************************************/
2321static int cpufreq_boost_set_sw(int state)
2322{
2323 struct cpufreq_frequency_table *freq_table;
2324 struct cpufreq_policy *policy;
2325 int ret = -EINVAL;
2326
2327 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2328 freq_table = cpufreq_frequency_get_table(policy->cpu);
2329 if (freq_table) {
2330 ret = cpufreq_frequency_table_cpuinfo(policy,
2331 freq_table);
2332 if (ret) {
2333 pr_err("%s: Policy frequency update failed\n",
2334 __func__);
2335 break;
2336 }
2337 policy->user_policy.max = policy->max;
2338 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2339 }
2340 }
2341
2342 return ret;
2343}
2344
2345int cpufreq_boost_trigger_state(int state)
2346{
2347 unsigned long flags;
2348 int ret = 0;
2349
2350 if (cpufreq_driver->boost_enabled == state)
2351 return 0;
2352
2353 write_lock_irqsave(&cpufreq_driver_lock, flags);
2354 cpufreq_driver->boost_enabled = state;
2355 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2356
2357 ret = cpufreq_driver->set_boost(state);
2358 if (ret) {
2359 write_lock_irqsave(&cpufreq_driver_lock, flags);
2360 cpufreq_driver->boost_enabled = !state;
2361 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2362
e837f9b5
JP
2363 pr_err("%s: Cannot %s BOOST\n",
2364 __func__, state ? "enable" : "disable");
6f19efc0
LM
2365 }
2366
2367 return ret;
2368}
2369
2370int cpufreq_boost_supported(void)
2371{
2372 if (likely(cpufreq_driver))
2373 return cpufreq_driver->boost_supported;
2374
2375 return 0;
2376}
2377EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2378
2379int cpufreq_boost_enabled(void)
2380{
2381 return cpufreq_driver->boost_enabled;
2382}
2383EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2384
1da177e4
LT
2385/*********************************************************************
2386 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2387 *********************************************************************/
2388
2389/**
2390 * cpufreq_register_driver - register a CPU Frequency driver
2391 * @driver_data: A struct cpufreq_driver containing the values#
2392 * submitted by the CPU Frequency driver.
2393 *
bb176f7d 2394 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2395 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2396 * (and isn't unregistered in the meantime).
1da177e4
LT
2397 *
2398 */
221dee28 2399int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2400{
2401 unsigned long flags;
2402 int ret;
2403
a7b422cd
KRW
2404 if (cpufreq_disabled())
2405 return -ENODEV;
2406
1da177e4 2407 if (!driver_data || !driver_data->verify || !driver_data->init ||
9c0ebcf7 2408 !(driver_data->setpolicy || driver_data->target_index ||
9832235f
RW
2409 driver_data->target) ||
2410 (driver_data->setpolicy && (driver_data->target_index ||
1c03a2d0
VK
2411 driver_data->target)) ||
2412 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
1da177e4
LT
2413 return -EINVAL;
2414
2d06d8c4 2415 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4 2416
0d1857a1 2417 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2418 if (cpufreq_driver) {
0d1857a1 2419 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4dea5806 2420 return -EEXIST;
1da177e4 2421 }
1c3d85dd 2422 cpufreq_driver = driver_data;
0d1857a1 2423 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2424
bc68b7df
VK
2425 if (driver_data->setpolicy)
2426 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2427
6f19efc0
LM
2428 if (cpufreq_boost_supported()) {
2429 /*
2430 * Check if driver provides function to enable boost -
2431 * if not, use cpufreq_boost_set_sw as default
2432 */
2433 if (!cpufreq_driver->set_boost)
2434 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2435
2436 ret = cpufreq_sysfs_create_file(&boost.attr);
2437 if (ret) {
2438 pr_err("%s: cannot register global BOOST sysfs file\n",
e837f9b5 2439 __func__);
6f19efc0
LM
2440 goto err_null_driver;
2441 }
2442 }
2443
8a25a2fd 2444 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab 2445 if (ret)
6f19efc0 2446 goto err_boost_unreg;
1da177e4 2447
ce1bcfe9
VK
2448 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2449 list_empty(&cpufreq_policy_list)) {
1da177e4 2450 /* if all ->init() calls failed, unregister */
ce1bcfe9
VK
2451 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2452 driver_data->name);
2453 goto err_if_unreg;
1da177e4
LT
2454 }
2455
8f5bc2ab 2456 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2457 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2458
8f5bc2ab 2459 return 0;
8a25a2fd
KS
2460err_if_unreg:
2461 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2462err_boost_unreg:
2463 if (cpufreq_boost_supported())
2464 cpufreq_sysfs_remove_file(&boost.attr);
8f5bc2ab 2465err_null_driver:
0d1857a1 2466 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2467 cpufreq_driver = NULL;
0d1857a1 2468 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2469 return ret;
1da177e4
LT
2470}
2471EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2472
1da177e4
LT
2473/**
2474 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2475 *
bb176f7d 2476 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2477 * the right to do so, i.e. if you have succeeded in initialising before!
2478 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2479 * currently not initialised.
2480 */
221dee28 2481int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2482{
2483 unsigned long flags;
2484
1c3d85dd 2485 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2486 return -EINVAL;
1da177e4 2487
2d06d8c4 2488 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2489
8a25a2fd 2490 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2491 if (cpufreq_boost_supported())
2492 cpufreq_sysfs_remove_file(&boost.attr);
2493
65edc68c 2494 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2495
6eed9404 2496 down_write(&cpufreq_rwsem);
0d1857a1 2497 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2498
1c3d85dd 2499 cpufreq_driver = NULL;
6eed9404 2500
0d1857a1 2501 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2502 up_write(&cpufreq_rwsem);
1da177e4
LT
2503
2504 return 0;
2505}
2506EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8 2507
90de2a4a
DA
2508/*
2509 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2510 * or mutexes when secondary CPUs are halted.
2511 */
2512static struct syscore_ops cpufreq_syscore_ops = {
2513 .shutdown = cpufreq_suspend,
2514};
2515
5a01f2e8
VP
2516static int __init cpufreq_core_init(void)
2517{
a7b422cd
KRW
2518 if (cpufreq_disabled())
2519 return -ENODEV;
2520
2361be23 2521 cpufreq_global_kobject = kobject_create();
8aa84ad8
TR
2522 BUG_ON(!cpufreq_global_kobject);
2523
90de2a4a
DA
2524 register_syscore_ops(&cpufreq_syscore_ops);
2525
5a01f2e8
VP
2526 return 0;
2527}
5a01f2e8 2528core_initcall(cpufreq_core_init);
This page took 0.923862 seconds and 5 git commands to generate.