cpufreq: Initialize policy->kobj while allocating policy
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
2f0aea93 29#include <linux/suspend.h>
90de2a4a 30#include <linux/syscore_ops.h>
5ff0a268 31#include <linux/tick.h>
6f4f2723
TR
32#include <trace/events/power.h>
33
b4f0676f 34static LIST_HEAD(cpufreq_policy_list);
f963735a
VK
35
36static inline bool policy_is_inactive(struct cpufreq_policy *policy)
37{
38 return cpumask_empty(policy->cpus);
39}
40
41static bool suitable_policy(struct cpufreq_policy *policy, bool active)
42{
43 return active == !policy_is_inactive(policy);
44}
45
46/* Finds Next Acive/Inactive policy */
47static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
48 bool active)
49{
50 do {
51 policy = list_next_entry(policy, policy_list);
52
53 /* No more policies in the list */
54 if (&policy->policy_list == &cpufreq_policy_list)
55 return NULL;
56 } while (!suitable_policy(policy, active));
57
58 return policy;
59}
60
61static struct cpufreq_policy *first_policy(bool active)
62{
63 struct cpufreq_policy *policy;
64
65 /* No policies in the list */
66 if (list_empty(&cpufreq_policy_list))
67 return NULL;
68
69 policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
70 policy_list);
71
72 if (!suitable_policy(policy, active))
73 policy = next_policy(policy, active);
74
75 return policy;
76}
77
78/* Macros to iterate over CPU policies */
79#define for_each_suitable_policy(__policy, __active) \
80 for (__policy = first_policy(__active); \
81 __policy; \
82 __policy = next_policy(__policy, __active))
83
84#define for_each_active_policy(__policy) \
85 for_each_suitable_policy(__policy, true)
86#define for_each_inactive_policy(__policy) \
87 for_each_suitable_policy(__policy, false)
88
89#define for_each_policy(__policy) \
b4f0676f
VK
90 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
91
f7b27061
VK
92/* Iterate over governors */
93static LIST_HEAD(cpufreq_governor_list);
94#define for_each_governor(__governor) \
95 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
96
1da177e4 97/**
cd878479 98 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
99 * level driver of CPUFreq support, and its spinlock. This lock
100 * also protects the cpufreq_cpu_data array.
101 */
1c3d85dd 102static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 103static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
bb176f7d 104static DEFINE_RWLOCK(cpufreq_driver_lock);
6f1e4efd 105DEFINE_MUTEX(cpufreq_governor_lock);
bb176f7d 106
2f0aea93
VK
107/* Flag to suspend/resume CPUFreq governors */
108static bool cpufreq_suspended;
1da177e4 109
9c0ebcf7
VK
110static inline bool has_target(void)
111{
112 return cpufreq_driver->target_index || cpufreq_driver->target;
113}
114
6eed9404
VK
115/*
116 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
117 * sections
118 */
119static DECLARE_RWSEM(cpufreq_rwsem);
120
1da177e4 121/* internal prototypes */
29464f28
DJ
122static int __cpufreq_governor(struct cpufreq_policy *policy,
123 unsigned int event);
d92d50a4 124static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
65f27f38 125static void handle_update(struct work_struct *work);
1da177e4
LT
126
127/**
32ee8c3e
DJ
128 * Two notifier lists: the "policy" list is involved in the
129 * validation process for a new CPU frequency policy; the
1da177e4
LT
130 * "transition" list for kernel code that needs to handle
131 * changes to devices when the CPU clock speed changes.
132 * The mutex locks both lists.
133 */
e041c683 134static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 135static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 136
74212ca4 137static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
138static int __init init_cpufreq_transition_notifier_list(void)
139{
140 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 141 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
142 return 0;
143}
b3438f82 144pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 145
a7b422cd 146static int off __read_mostly;
da584455 147static int cpufreq_disabled(void)
a7b422cd
KRW
148{
149 return off;
150}
151void disable_cpufreq(void)
152{
153 off = 1;
154}
29464f28 155static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 156
4d5dcc42
VK
157bool have_governor_per_policy(void)
158{
0b981e70 159 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
4d5dcc42 160}
3f869d6d 161EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 162
944e9a03
VK
163struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
164{
165 if (have_governor_per_policy())
166 return &policy->kobj;
167 else
168 return cpufreq_global_kobject;
169}
170EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
171
72a4ce34
VK
172static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
173{
174 u64 idle_time;
175 u64 cur_wall_time;
176 u64 busy_time;
177
178 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
179
180 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
181 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
182 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
183 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
184 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
185 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
186
187 idle_time = cur_wall_time - busy_time;
188 if (wall)
189 *wall = cputime_to_usecs(cur_wall_time);
190
191 return cputime_to_usecs(idle_time);
192}
193
194u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
195{
196 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
197
198 if (idle_time == -1ULL)
199 return get_cpu_idle_time_jiffy(cpu, wall);
200 else if (!io_busy)
201 idle_time += get_cpu_iowait_time_us(cpu, wall);
202
203 return idle_time;
204}
205EXPORT_SYMBOL_GPL(get_cpu_idle_time);
206
70e9e778
VK
207/*
208 * This is a generic cpufreq init() routine which can be used by cpufreq
209 * drivers of SMP systems. It will do following:
210 * - validate & show freq table passed
211 * - set policies transition latency
212 * - policy->cpus with all possible CPUs
213 */
214int cpufreq_generic_init(struct cpufreq_policy *policy,
215 struct cpufreq_frequency_table *table,
216 unsigned int transition_latency)
217{
218 int ret;
219
220 ret = cpufreq_table_validate_and_show(policy, table);
221 if (ret) {
222 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
223 return ret;
224 }
225
226 policy->cpuinfo.transition_latency = transition_latency;
227
228 /*
58405af6 229 * The driver only supports the SMP configuration where all processors
70e9e778
VK
230 * share the clock and voltage and clock.
231 */
232 cpumask_setall(policy->cpus);
233
234 return 0;
235}
236EXPORT_SYMBOL_GPL(cpufreq_generic_init);
237
988bed09
VK
238/* Only for cpufreq core internal use */
239struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
652ed95d
VK
240{
241 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
242
988bed09
VK
243 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
244}
245
246unsigned int cpufreq_generic_get(unsigned int cpu)
247{
248 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
249
652ed95d 250 if (!policy || IS_ERR(policy->clk)) {
e837f9b5
JP
251 pr_err("%s: No %s associated to cpu: %d\n",
252 __func__, policy ? "clk" : "policy", cpu);
652ed95d
VK
253 return 0;
254 }
255
256 return clk_get_rate(policy->clk) / 1000;
257}
258EXPORT_SYMBOL_GPL(cpufreq_generic_get);
259
50e9c852
VK
260/**
261 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
262 *
263 * @cpu: cpu to find policy for.
264 *
265 * This returns policy for 'cpu', returns NULL if it doesn't exist.
266 * It also increments the kobject reference count to mark it busy and so would
267 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
268 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
269 * freed as that depends on the kobj count.
270 *
271 * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
272 * valid policy is found. This is done to make sure the driver doesn't get
273 * unregistered while the policy is being used.
274 *
275 * Return: A valid policy on success, otherwise NULL on failure.
276 */
6eed9404 277struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 278{
6eed9404 279 struct cpufreq_policy *policy = NULL;
1da177e4
LT
280 unsigned long flags;
281
1b947c90 282 if (WARN_ON(cpu >= nr_cpu_ids))
6eed9404
VK
283 return NULL;
284
285 if (!down_read_trylock(&cpufreq_rwsem))
286 return NULL;
1da177e4
LT
287
288 /* get the cpufreq driver */
1c3d85dd 289 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 290
6eed9404
VK
291 if (cpufreq_driver) {
292 /* get the CPU */
988bed09 293 policy = cpufreq_cpu_get_raw(cpu);
6eed9404
VK
294 if (policy)
295 kobject_get(&policy->kobj);
296 }
1da177e4 297
6eed9404 298 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 299
3a3e9e06 300 if (!policy)
6eed9404 301 up_read(&cpufreq_rwsem);
1da177e4 302
3a3e9e06 303 return policy;
a9144436 304}
1da177e4
LT
305EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
306
50e9c852
VK
307/**
308 * cpufreq_cpu_put: Decrements the usage count of a policy
309 *
310 * @policy: policy earlier returned by cpufreq_cpu_get().
311 *
312 * This decrements the kobject reference count incremented earlier by calling
313 * cpufreq_cpu_get().
314 *
315 * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
316 */
3a3e9e06 317void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 318{
6eed9404
VK
319 kobject_put(&policy->kobj);
320 up_read(&cpufreq_rwsem);
1da177e4
LT
321}
322EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
323
1da177e4
LT
324/*********************************************************************
325 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
326 *********************************************************************/
327
328/**
329 * adjust_jiffies - adjust the system "loops_per_jiffy"
330 *
331 * This function alters the system "loops_per_jiffy" for the clock
332 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 333 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
334 * per-CPU loops_per_jiffy value wherever possible.
335 */
858119e1 336static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4 337{
39c132ee
VK
338#ifndef CONFIG_SMP
339 static unsigned long l_p_j_ref;
340 static unsigned int l_p_j_ref_freq;
341
1da177e4
LT
342 if (ci->flags & CPUFREQ_CONST_LOOPS)
343 return;
344
345 if (!l_p_j_ref_freq) {
346 l_p_j_ref = loops_per_jiffy;
347 l_p_j_ref_freq = ci->old;
e837f9b5
JP
348 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
349 l_p_j_ref, l_p_j_ref_freq);
1da177e4 350 }
0b443ead 351 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
e08f5f5b
GS
352 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
353 ci->new);
e837f9b5
JP
354 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
355 loops_per_jiffy, ci->new);
1da177e4 356 }
1da177e4 357#endif
39c132ee 358}
1da177e4 359
0956df9c 360static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 361 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
362{
363 BUG_ON(irqs_disabled());
364
d5aaffa9
DB
365 if (cpufreq_disabled())
366 return;
367
1c3d85dd 368 freqs->flags = cpufreq_driver->flags;
2d06d8c4 369 pr_debug("notification %u of frequency transition to %u kHz\n",
e837f9b5 370 state, freqs->new);
1da177e4 371
1da177e4 372 switch (state) {
e4472cb3 373
1da177e4 374 case CPUFREQ_PRECHANGE:
32ee8c3e 375 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
376 * which is not equal to what the cpufreq core thinks is
377 * "old frequency".
1da177e4 378 */
1c3d85dd 379 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
380 if ((policy) && (policy->cpu == freqs->cpu) &&
381 (policy->cur) && (policy->cur != freqs->old)) {
e837f9b5
JP
382 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
383 freqs->old, policy->cur);
e4472cb3 384 freqs->old = policy->cur;
1da177e4
LT
385 }
386 }
b4dfdbb3 387 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 388 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
389 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
390 break;
e4472cb3 391
1da177e4
LT
392 case CPUFREQ_POSTCHANGE:
393 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
e837f9b5
JP
394 pr_debug("FREQ: %lu - CPU: %lu\n",
395 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
25e41933 396 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 397 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 398 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
399 if (likely(policy) && likely(policy->cpu == freqs->cpu))
400 policy->cur = freqs->new;
1da177e4
LT
401 break;
402 }
1da177e4 403}
bb176f7d 404
b43a7ffb
VK
405/**
406 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
407 * on frequency transition.
408 *
409 * This function calls the transition notifiers and the "adjust_jiffies"
410 * function. It is called twice on all CPU frequency changes that have
411 * external effects.
412 */
236a9800 413static void cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb
VK
414 struct cpufreq_freqs *freqs, unsigned int state)
415{
416 for_each_cpu(freqs->cpu, policy->cpus)
417 __cpufreq_notify_transition(policy, freqs, state);
418}
1da177e4 419
f7ba3b41 420/* Do post notifications when there are chances that transition has failed */
236a9800 421static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
f7ba3b41
VK
422 struct cpufreq_freqs *freqs, int transition_failed)
423{
424 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
425 if (!transition_failed)
426 return;
427
428 swap(freqs->old, freqs->new);
429 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
430 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
431}
f7ba3b41 432
12478cf0
SB
433void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
434 struct cpufreq_freqs *freqs)
435{
ca654dc3
SB
436
437 /*
438 * Catch double invocations of _begin() which lead to self-deadlock.
439 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
440 * doesn't invoke _begin() on their behalf, and hence the chances of
441 * double invocations are very low. Moreover, there are scenarios
442 * where these checks can emit false-positive warnings in these
443 * drivers; so we avoid that by skipping them altogether.
444 */
445 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
446 && current == policy->transition_task);
447
12478cf0
SB
448wait:
449 wait_event(policy->transition_wait, !policy->transition_ongoing);
450
451 spin_lock(&policy->transition_lock);
452
453 if (unlikely(policy->transition_ongoing)) {
454 spin_unlock(&policy->transition_lock);
455 goto wait;
456 }
457
458 policy->transition_ongoing = true;
ca654dc3 459 policy->transition_task = current;
12478cf0
SB
460
461 spin_unlock(&policy->transition_lock);
462
463 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
464}
465EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
466
467void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
468 struct cpufreq_freqs *freqs, int transition_failed)
469{
470 if (unlikely(WARN_ON(!policy->transition_ongoing)))
471 return;
472
473 cpufreq_notify_post_transition(policy, freqs, transition_failed);
474
475 policy->transition_ongoing = false;
ca654dc3 476 policy->transition_task = NULL;
12478cf0
SB
477
478 wake_up(&policy->transition_wait);
479}
480EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
481
1da177e4 482
1da177e4
LT
483/*********************************************************************
484 * SYSFS INTERFACE *
485 *********************************************************************/
8a5c74a1 486static ssize_t show_boost(struct kobject *kobj,
6f19efc0
LM
487 struct attribute *attr, char *buf)
488{
489 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
490}
491
492static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
493 const char *buf, size_t count)
494{
495 int ret, enable;
496
497 ret = sscanf(buf, "%d", &enable);
498 if (ret != 1 || enable < 0 || enable > 1)
499 return -EINVAL;
500
501 if (cpufreq_boost_trigger_state(enable)) {
e837f9b5
JP
502 pr_err("%s: Cannot %s BOOST!\n",
503 __func__, enable ? "enable" : "disable");
6f19efc0
LM
504 return -EINVAL;
505 }
506
e837f9b5
JP
507 pr_debug("%s: cpufreq BOOST %s\n",
508 __func__, enable ? "enabled" : "disabled");
6f19efc0
LM
509
510 return count;
511}
512define_one_global_rw(boost);
1da177e4 513
42f91fa1 514static struct cpufreq_governor *find_governor(const char *str_governor)
3bcb09a3
JF
515{
516 struct cpufreq_governor *t;
517
f7b27061 518 for_each_governor(t)
7c4f4539 519 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
520 return t;
521
522 return NULL;
523}
524
1da177e4
LT
525/**
526 * cpufreq_parse_governor - parse a governor string
527 */
905d77cd 528static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
529 struct cpufreq_governor **governor)
530{
3bcb09a3 531 int err = -EINVAL;
1c3d85dd
RW
532
533 if (!cpufreq_driver)
3bcb09a3
JF
534 goto out;
535
1c3d85dd 536 if (cpufreq_driver->setpolicy) {
7c4f4539 537 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
1da177e4 538 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 539 err = 0;
7c4f4539 540 } else if (!strncasecmp(str_governor, "powersave",
e08f5f5b 541 CPUFREQ_NAME_LEN)) {
1da177e4 542 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 543 err = 0;
1da177e4 544 }
2e1cc3a5 545 } else {
1da177e4 546 struct cpufreq_governor *t;
3bcb09a3 547
3fc54d37 548 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3 549
42f91fa1 550 t = find_governor(str_governor);
3bcb09a3 551
ea714970 552 if (t == NULL) {
1a8e1463 553 int ret;
ea714970 554
1a8e1463
KC
555 mutex_unlock(&cpufreq_governor_mutex);
556 ret = request_module("cpufreq_%s", str_governor);
557 mutex_lock(&cpufreq_governor_mutex);
ea714970 558
1a8e1463 559 if (ret == 0)
42f91fa1 560 t = find_governor(str_governor);
ea714970
JF
561 }
562
3bcb09a3
JF
563 if (t != NULL) {
564 *governor = t;
565 err = 0;
1da177e4 566 }
3bcb09a3 567
3fc54d37 568 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 569 }
29464f28 570out:
3bcb09a3 571 return err;
1da177e4 572}
1da177e4 573
1da177e4 574/**
e08f5f5b
GS
575 * cpufreq_per_cpu_attr_read() / show_##file_name() -
576 * print out cpufreq information
1da177e4
LT
577 *
578 * Write out information from cpufreq_driver->policy[cpu]; object must be
579 * "unsigned int".
580 */
581
32ee8c3e
DJ
582#define show_one(file_name, object) \
583static ssize_t show_##file_name \
905d77cd 584(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 585{ \
29464f28 586 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
587}
588
589show_one(cpuinfo_min_freq, cpuinfo.min_freq);
590show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 591show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
592show_one(scaling_min_freq, min);
593show_one(scaling_max_freq, max);
c034b02e 594
09347b29 595static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
c034b02e
DB
596{
597 ssize_t ret;
598
599 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
600 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
601 else
602 ret = sprintf(buf, "%u\n", policy->cur);
603 return ret;
604}
1da177e4 605
037ce839 606static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 607 struct cpufreq_policy *new_policy);
7970e08b 608
1da177e4
LT
609/**
610 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
611 */
612#define store_one(file_name, object) \
613static ssize_t store_##file_name \
905d77cd 614(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 615{ \
619c144c 616 int ret, temp; \
1da177e4
LT
617 struct cpufreq_policy new_policy; \
618 \
619 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
620 if (ret) \
621 return -EINVAL; \
622 \
29464f28 623 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
624 if (ret != 1) \
625 return -EINVAL; \
626 \
619c144c 627 temp = new_policy.object; \
037ce839 628 ret = cpufreq_set_policy(policy, &new_policy); \
619c144c
VH
629 if (!ret) \
630 policy->user_policy.object = temp; \
1da177e4
LT
631 \
632 return ret ? ret : count; \
633}
634
29464f28
DJ
635store_one(scaling_min_freq, min);
636store_one(scaling_max_freq, max);
1da177e4
LT
637
638/**
639 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
640 */
905d77cd
DJ
641static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
642 char *buf)
1da177e4 643{
d92d50a4 644 unsigned int cur_freq = __cpufreq_get(policy);
1da177e4
LT
645 if (!cur_freq)
646 return sprintf(buf, "<unknown>");
647 return sprintf(buf, "%u\n", cur_freq);
648}
649
1da177e4
LT
650/**
651 * show_scaling_governor - show the current policy for the specified CPU
652 */
905d77cd 653static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 654{
29464f28 655 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
656 return sprintf(buf, "powersave\n");
657 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
658 return sprintf(buf, "performance\n");
659 else if (policy->governor)
4b972f0b 660 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 661 policy->governor->name);
1da177e4
LT
662 return -EINVAL;
663}
664
1da177e4
LT
665/**
666 * store_scaling_governor - store policy for the specified CPU
667 */
905d77cd
DJ
668static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
669 const char *buf, size_t count)
1da177e4 670{
5136fa56 671 int ret;
1da177e4
LT
672 char str_governor[16];
673 struct cpufreq_policy new_policy;
674
675 ret = cpufreq_get_policy(&new_policy, policy->cpu);
676 if (ret)
677 return ret;
678
29464f28 679 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
680 if (ret != 1)
681 return -EINVAL;
682
e08f5f5b
GS
683 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
684 &new_policy.governor))
1da177e4
LT
685 return -EINVAL;
686
037ce839 687 ret = cpufreq_set_policy(policy, &new_policy);
7970e08b
TR
688
689 policy->user_policy.policy = policy->policy;
690 policy->user_policy.governor = policy->governor;
7970e08b 691
e08f5f5b
GS
692 if (ret)
693 return ret;
694 else
695 return count;
1da177e4
LT
696}
697
698/**
699 * show_scaling_driver - show the cpufreq driver currently loaded
700 */
905d77cd 701static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 702{
1c3d85dd 703 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
704}
705
706/**
707 * show_scaling_available_governors - show the available CPUfreq governors
708 */
905d77cd
DJ
709static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
710 char *buf)
1da177e4
LT
711{
712 ssize_t i = 0;
713 struct cpufreq_governor *t;
714
9c0ebcf7 715 if (!has_target()) {
1da177e4
LT
716 i += sprintf(buf, "performance powersave");
717 goto out;
718 }
719
f7b27061 720 for_each_governor(t) {
29464f28
DJ
721 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
722 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 723 goto out;
4b972f0b 724 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 725 }
7d5e350f 726out:
1da177e4
LT
727 i += sprintf(&buf[i], "\n");
728 return i;
729}
e8628dd0 730
f4fd3797 731ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
732{
733 ssize_t i = 0;
734 unsigned int cpu;
735
835481d9 736 for_each_cpu(cpu, mask) {
1da177e4
LT
737 if (i)
738 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
739 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
740 if (i >= (PAGE_SIZE - 5))
29464f28 741 break;
1da177e4
LT
742 }
743 i += sprintf(&buf[i], "\n");
744 return i;
745}
f4fd3797 746EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 747
e8628dd0
DW
748/**
749 * show_related_cpus - show the CPUs affected by each transition even if
750 * hw coordination is in use
751 */
752static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
753{
f4fd3797 754 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
755}
756
757/**
758 * show_affected_cpus - show the CPUs affected by each transition
759 */
760static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
761{
f4fd3797 762 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
763}
764
9e76988e 765static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 766 const char *buf, size_t count)
9e76988e
VP
767{
768 unsigned int freq = 0;
769 unsigned int ret;
770
879000f9 771 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
772 return -EINVAL;
773
774 ret = sscanf(buf, "%u", &freq);
775 if (ret != 1)
776 return -EINVAL;
777
778 policy->governor->store_setspeed(policy, freq);
779
780 return count;
781}
782
783static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
784{
879000f9 785 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
786 return sprintf(buf, "<unsupported>\n");
787
788 return policy->governor->show_setspeed(policy, buf);
789}
1da177e4 790
e2f74f35 791/**
8bf1ac72 792 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
793 */
794static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
795{
796 unsigned int limit;
797 int ret;
1c3d85dd
RW
798 if (cpufreq_driver->bios_limit) {
799 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
800 if (!ret)
801 return sprintf(buf, "%u\n", limit);
802 }
803 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
804}
805
6dad2a29
BP
806cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
807cpufreq_freq_attr_ro(cpuinfo_min_freq);
808cpufreq_freq_attr_ro(cpuinfo_max_freq);
809cpufreq_freq_attr_ro(cpuinfo_transition_latency);
810cpufreq_freq_attr_ro(scaling_available_governors);
811cpufreq_freq_attr_ro(scaling_driver);
812cpufreq_freq_attr_ro(scaling_cur_freq);
813cpufreq_freq_attr_ro(bios_limit);
814cpufreq_freq_attr_ro(related_cpus);
815cpufreq_freq_attr_ro(affected_cpus);
816cpufreq_freq_attr_rw(scaling_min_freq);
817cpufreq_freq_attr_rw(scaling_max_freq);
818cpufreq_freq_attr_rw(scaling_governor);
819cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 820
905d77cd 821static struct attribute *default_attrs[] = {
1da177e4
LT
822 &cpuinfo_min_freq.attr,
823 &cpuinfo_max_freq.attr,
ed129784 824 &cpuinfo_transition_latency.attr,
1da177e4
LT
825 &scaling_min_freq.attr,
826 &scaling_max_freq.attr,
827 &affected_cpus.attr,
e8628dd0 828 &related_cpus.attr,
1da177e4
LT
829 &scaling_governor.attr,
830 &scaling_driver.attr,
831 &scaling_available_governors.attr,
9e76988e 832 &scaling_setspeed.attr,
1da177e4
LT
833 NULL
834};
835
29464f28
DJ
836#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
837#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 838
29464f28 839static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 840{
905d77cd
DJ
841 struct cpufreq_policy *policy = to_policy(kobj);
842 struct freq_attr *fattr = to_attr(attr);
1b750e3b 843 ssize_t ret;
6eed9404
VK
844
845 if (!down_read_trylock(&cpufreq_rwsem))
1b750e3b 846 return -EINVAL;
5a01f2e8 847
ad7722da 848 down_read(&policy->rwsem);
5a01f2e8 849
e08f5f5b
GS
850 if (fattr->show)
851 ret = fattr->show(policy, buf);
852 else
853 ret = -EIO;
854
ad7722da 855 up_read(&policy->rwsem);
6eed9404 856 up_read(&cpufreq_rwsem);
1b750e3b 857
1da177e4
LT
858 return ret;
859}
860
905d77cd
DJ
861static ssize_t store(struct kobject *kobj, struct attribute *attr,
862 const char *buf, size_t count)
1da177e4 863{
905d77cd
DJ
864 struct cpufreq_policy *policy = to_policy(kobj);
865 struct freq_attr *fattr = to_attr(attr);
a07530b4 866 ssize_t ret = -EINVAL;
6eed9404 867
4f750c93
SB
868 get_online_cpus();
869
870 if (!cpu_online(policy->cpu))
871 goto unlock;
872
6eed9404 873 if (!down_read_trylock(&cpufreq_rwsem))
4f750c93 874 goto unlock;
5a01f2e8 875
ad7722da 876 down_write(&policy->rwsem);
5a01f2e8 877
11e584cf
VK
878 /* Updating inactive policies is invalid, so avoid doing that. */
879 if (unlikely(policy_is_inactive(policy))) {
880 ret = -EBUSY;
881 goto unlock_policy_rwsem;
882 }
883
e08f5f5b
GS
884 if (fattr->store)
885 ret = fattr->store(policy, buf, count);
886 else
887 ret = -EIO;
888
11e584cf 889unlock_policy_rwsem:
ad7722da 890 up_write(&policy->rwsem);
6eed9404 891
6eed9404 892 up_read(&cpufreq_rwsem);
4f750c93
SB
893unlock:
894 put_online_cpus();
895
1da177e4
LT
896 return ret;
897}
898
905d77cd 899static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 900{
905d77cd 901 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 902 pr_debug("last reference is dropped\n");
1da177e4
LT
903 complete(&policy->kobj_unregister);
904}
905
52cf25d0 906static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
907 .show = show,
908 .store = store,
909};
910
911static struct kobj_type ktype_cpufreq = {
912 .sysfs_ops = &sysfs_ops,
913 .default_attrs = default_attrs,
914 .release = cpufreq_sysfs_release,
915};
916
2361be23
VK
917struct kobject *cpufreq_global_kobject;
918EXPORT_SYMBOL(cpufreq_global_kobject);
919
920static int cpufreq_global_kobject_usage;
921
922int cpufreq_get_global_kobject(void)
923{
924 if (!cpufreq_global_kobject_usage++)
925 return kobject_add(cpufreq_global_kobject,
926 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
927
928 return 0;
929}
930EXPORT_SYMBOL(cpufreq_get_global_kobject);
931
932void cpufreq_put_global_kobject(void)
933{
934 if (!--cpufreq_global_kobject_usage)
935 kobject_del(cpufreq_global_kobject);
936}
937EXPORT_SYMBOL(cpufreq_put_global_kobject);
938
939int cpufreq_sysfs_create_file(const struct attribute *attr)
940{
941 int ret = cpufreq_get_global_kobject();
942
943 if (!ret) {
944 ret = sysfs_create_file(cpufreq_global_kobject, attr);
945 if (ret)
946 cpufreq_put_global_kobject();
947 }
948
949 return ret;
950}
951EXPORT_SYMBOL(cpufreq_sysfs_create_file);
952
953void cpufreq_sysfs_remove_file(const struct attribute *attr)
954{
955 sysfs_remove_file(cpufreq_global_kobject, attr);
956 cpufreq_put_global_kobject();
957}
958EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
959
87549141
VK
960static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
961{
962 struct device *cpu_dev;
963
964 pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
965
966 if (!policy)
967 return 0;
968
969 cpu_dev = get_cpu_device(cpu);
970 if (WARN_ON(!cpu_dev))
971 return 0;
972
973 return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
974}
975
976static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
977{
978 struct device *cpu_dev;
979
980 pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
981
982 cpu_dev = get_cpu_device(cpu);
983 if (WARN_ON(!cpu_dev))
984 return;
985
986 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
987}
988
989/* Add/remove symlinks for all related CPUs */
308b60e7 990static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
991{
992 unsigned int j;
993 int ret = 0;
994
87549141
VK
995 /* Some related CPUs might not be present (physically hotplugged) */
996 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
9d16f207 997 if (j == policy->kobj_cpu)
19d6f7ec 998 continue;
19d6f7ec 999
87549141 1000 ret = add_cpu_dev_symlink(policy, j);
71c3461e
RW
1001 if (ret)
1002 break;
19d6f7ec 1003 }
87549141 1004
19d6f7ec
DJ
1005 return ret;
1006}
1007
87549141
VK
1008static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1009{
1010 unsigned int j;
1011
1012 /* Some related CPUs might not be present (physically hotplugged) */
1013 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
1014 if (j == policy->kobj_cpu)
1015 continue;
1016
1017 remove_cpu_dev_symlink(policy, j);
1018 }
1019}
1020
308b60e7 1021static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 1022 struct device *dev)
909a694e
DJ
1023{
1024 struct freq_attr **drv_attr;
909a694e 1025 int ret = 0;
909a694e 1026
909a694e 1027 /* set up files for this cpu device */
1c3d85dd 1028 drv_attr = cpufreq_driver->attr;
f13f1184 1029 while (drv_attr && *drv_attr) {
909a694e
DJ
1030 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1031 if (ret)
6d4e81ed 1032 return ret;
909a694e
DJ
1033 drv_attr++;
1034 }
1c3d85dd 1035 if (cpufreq_driver->get) {
909a694e
DJ
1036 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1037 if (ret)
6d4e81ed 1038 return ret;
909a694e 1039 }
c034b02e
DB
1040
1041 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1042 if (ret)
6d4e81ed 1043 return ret;
c034b02e 1044
1c3d85dd 1045 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
1046 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1047 if (ret)
6d4e81ed 1048 return ret;
e2f74f35 1049 }
909a694e 1050
6d4e81ed 1051 return cpufreq_add_dev_symlink(policy);
e18f1682
SB
1052}
1053
1054static void cpufreq_init_policy(struct cpufreq_policy *policy)
1055{
6e2c89d1 1056 struct cpufreq_governor *gov = NULL;
e18f1682
SB
1057 struct cpufreq_policy new_policy;
1058 int ret = 0;
1059
d5b73cd8 1060 memcpy(&new_policy, policy, sizeof(*policy));
a27a9ab7 1061
6e2c89d1 1062 /* Update governor of new_policy to the governor used before hotplug */
4573237b 1063 gov = find_governor(policy->last_governor);
6e2c89d1 1064 if (gov)
1065 pr_debug("Restoring governor %s for cpu %d\n",
1066 policy->governor->name, policy->cpu);
1067 else
1068 gov = CPUFREQ_DEFAULT_GOVERNOR;
1069
1070 new_policy.governor = gov;
1071
a27a9ab7
JB
1072 /* Use the default policy if its valid. */
1073 if (cpufreq_driver->setpolicy)
6e2c89d1 1074 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
ecf7e461
DJ
1075
1076 /* set default policy */
037ce839 1077 ret = cpufreq_set_policy(policy, &new_policy);
ecf7e461 1078 if (ret) {
2d06d8c4 1079 pr_debug("setting policy failed\n");
1c3d85dd
RW
1080 if (cpufreq_driver->exit)
1081 cpufreq_driver->exit(policy);
ecf7e461 1082 }
909a694e
DJ
1083}
1084
d8d3b471 1085static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
42f921a6 1086 unsigned int cpu, struct device *dev)
fcf80582 1087{
9c0ebcf7 1088 int ret = 0;
fcf80582 1089
bb29ae15
VK
1090 /* Has this CPU been taken care of already? */
1091 if (cpumask_test_cpu(cpu, policy->cpus))
1092 return 0;
1093
9c0ebcf7 1094 if (has_target()) {
3de9bdeb
VK
1095 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1096 if (ret) {
1097 pr_err("%s: Failed to stop governor\n", __func__);
1098 return ret;
1099 }
1100 }
fcf80582 1101
ad7722da 1102 down_write(&policy->rwsem);
fcf80582 1103 cpumask_set_cpu(cpu, policy->cpus);
ad7722da 1104 up_write(&policy->rwsem);
2eaa3e2d 1105
9c0ebcf7 1106 if (has_target()) {
e5c87b76
SK
1107 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1108 if (!ret)
1109 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1110
1111 if (ret) {
3de9bdeb
VK
1112 pr_err("%s: Failed to start governor\n", __func__);
1113 return ret;
1114 }
820c6ca2 1115 }
fcf80582 1116
87549141 1117 return 0;
fcf80582 1118}
1da177e4 1119
8414809c
SB
1120static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1121{
1122 struct cpufreq_policy *policy;
1123 unsigned long flags;
1124
44871c9c 1125 read_lock_irqsave(&cpufreq_driver_lock, flags);
3914d379 1126 policy = per_cpu(cpufreq_cpu_data, cpu);
44871c9c 1127 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
8414809c 1128
3914d379
VK
1129 if (likely(policy)) {
1130 /* Policy should be inactive here */
1131 WARN_ON(!policy_is_inactive(policy));
3914d379 1132 }
6e2c89d1 1133
8414809c
SB
1134 return policy;
1135}
1136
2fc3384d 1137static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
e9698cc5
SB
1138{
1139 struct cpufreq_policy *policy;
2fc3384d 1140 int ret;
e9698cc5
SB
1141
1142 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1143 if (!policy)
1144 return NULL;
1145
1146 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1147 goto err_free_policy;
1148
1149 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1150 goto err_free_cpumask;
1151
2fc3384d
VK
1152 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
1153 "cpufreq");
1154 if (ret) {
1155 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1156 goto err_free_rcpumask;
1157 }
1158
c88a1f8b 1159 INIT_LIST_HEAD(&policy->policy_list);
ad7722da 1160 init_rwsem(&policy->rwsem);
12478cf0
SB
1161 spin_lock_init(&policy->transition_lock);
1162 init_waitqueue_head(&policy->transition_wait);
818c5712
VK
1163 init_completion(&policy->kobj_unregister);
1164 INIT_WORK(&policy->update, handle_update);
ad7722da 1165
2fc3384d 1166 policy->cpu = dev->id;
87549141
VK
1167
1168 /* Set this once on allocation */
2fc3384d 1169 policy->kobj_cpu = dev->id;
87549141 1170
e9698cc5
SB
1171 return policy;
1172
2fc3384d
VK
1173err_free_rcpumask:
1174 free_cpumask_var(policy->related_cpus);
e9698cc5
SB
1175err_free_cpumask:
1176 free_cpumask_var(policy->cpus);
1177err_free_policy:
1178 kfree(policy);
1179
1180 return NULL;
1181}
1182
2fc3384d 1183static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
42f921a6
VK
1184{
1185 struct kobject *kobj;
1186 struct completion *cmp;
1187
2fc3384d
VK
1188 if (notify)
1189 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1190 CPUFREQ_REMOVE_POLICY, policy);
fcd7af91 1191
87549141
VK
1192 down_write(&policy->rwsem);
1193 cpufreq_remove_dev_symlink(policy);
42f921a6
VK
1194 kobj = &policy->kobj;
1195 cmp = &policy->kobj_unregister;
87549141 1196 up_write(&policy->rwsem);
42f921a6
VK
1197 kobject_put(kobj);
1198
1199 /*
1200 * We need to make sure that the underlying kobj is
1201 * actually not referenced anymore by anybody before we
1202 * proceed with unloading.
1203 */
1204 pr_debug("waiting for dropping of refcount\n");
1205 wait_for_completion(cmp);
1206 pr_debug("wait complete\n");
1207}
1208
e9698cc5
SB
1209static void cpufreq_policy_free(struct cpufreq_policy *policy)
1210{
988bed09
VK
1211 unsigned long flags;
1212 int cpu;
1213
1214 /* Remove policy from list */
1215 write_lock_irqsave(&cpufreq_driver_lock, flags);
1216 list_del(&policy->policy_list);
1217
1218 for_each_cpu(cpu, policy->related_cpus)
1219 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1220 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1221
e9698cc5
SB
1222 free_cpumask_var(policy->related_cpus);
1223 free_cpumask_var(policy->cpus);
1224 kfree(policy);
1225}
1226
87549141 1227static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
0d66b91e 1228{
99ec899e 1229 if (WARN_ON(cpu == policy->cpu))
87549141 1230 return;
cb38ed5c 1231
ad7722da 1232 down_write(&policy->rwsem);
0d66b91e 1233 policy->cpu = cpu;
ad7722da 1234 up_write(&policy->rwsem);
0d66b91e
SB
1235}
1236
23faf0b7
VK
1237/**
1238 * cpufreq_add_dev - add a CPU device
1239 *
1240 * Adds the cpufreq interface for a CPU device.
1241 *
1242 * The Oracle says: try running cpufreq registration/unregistration concurrently
1243 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1244 * mess up, but more thorough testing is needed. - Mathieu
1245 */
1246static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1247{
fcf80582 1248 unsigned int j, cpu = dev->id;
65922465 1249 int ret = -ENOMEM;
7f0c020a 1250 struct cpufreq_policy *policy;
1da177e4 1251 unsigned long flags;
87549141 1252 bool recover_policy = !sif;
c32b6b8e 1253
2d06d8c4 1254 pr_debug("adding CPU %u\n", cpu);
1da177e4 1255
87549141
VK
1256 /*
1257 * Only possible if 'cpu' wasn't physically present earlier and we are
1258 * here from subsys_interface add callback. A hotplug notifier will
1259 * follow and we will handle it like logical CPU hotplug then. For now,
1260 * just create the sysfs link.
1261 */
1262 if (cpu_is_offline(cpu))
1263 return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
1264
6eed9404
VK
1265 if (!down_read_trylock(&cpufreq_rwsem))
1266 return 0;
1267
bb29ae15 1268 /* Check if this CPU already has a policy to manage it */
9104bb26
VK
1269 policy = per_cpu(cpufreq_cpu_data, cpu);
1270 if (policy && !policy_is_inactive(policy)) {
1271 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1272 ret = cpufreq_add_policy_cpu(policy, cpu, dev);
1273 up_read(&cpufreq_rwsem);
1274 return ret;
fcf80582 1275 }
1da177e4 1276
72368d12
RW
1277 /*
1278 * Restore the saved policy when doing light-weight init and fall back
1279 * to the full init if that fails.
1280 */
96bbbe4a 1281 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
72368d12 1282 if (!policy) {
96bbbe4a 1283 recover_policy = false;
2fc3384d 1284 policy = cpufreq_policy_alloc(dev);
72368d12
RW
1285 if (!policy)
1286 goto nomem_out;
1287 }
0d66b91e
SB
1288
1289 /*
1290 * In the resume path, since we restore a saved policy, the assignment
1291 * to policy->cpu is like an update of the existing policy, rather than
1292 * the creation of a brand new one. So we need to perform this update
1293 * by invoking update_policy_cpu().
1294 */
87549141
VK
1295 if (recover_policy && cpu != policy->cpu)
1296 update_policy_cpu(policy, cpu);
0d66b91e 1297
835481d9 1298 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1299
1da177e4
LT
1300 /* call driver. From then on the cpufreq must be able
1301 * to accept all calls to ->verify and ->setpolicy for this CPU
1302 */
1c3d85dd 1303 ret = cpufreq_driver->init(policy);
1da177e4 1304 if (ret) {
2d06d8c4 1305 pr_debug("initialization failed\n");
2eaa3e2d 1306 goto err_set_policy_cpu;
1da177e4 1307 }
643ae6e8 1308
6d4e81ed
TV
1309 down_write(&policy->rwsem);
1310
5a7e56a5
VK
1311 /* related cpus should atleast have policy->cpus */
1312 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1313
1314 /*
1315 * affected cpus must always be the one, which are online. We aren't
1316 * managing offline cpus here.
1317 */
1318 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1319
96bbbe4a 1320 if (!recover_policy) {
5a7e56a5
VK
1321 policy->user_policy.min = policy->min;
1322 policy->user_policy.max = policy->max;
6d4e81ed 1323
988bed09
VK
1324 write_lock_irqsave(&cpufreq_driver_lock, flags);
1325 for_each_cpu(j, policy->related_cpus)
1326 per_cpu(cpufreq_cpu_data, j) = policy;
1327 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1328 }
652ed95d 1329
2ed99e39 1330 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
da60ce9f
VK
1331 policy->cur = cpufreq_driver->get(policy->cpu);
1332 if (!policy->cur) {
1333 pr_err("%s: ->get() failed\n", __func__);
1334 goto err_get_freq;
1335 }
1336 }
1337
d3916691
VK
1338 /*
1339 * Sometimes boot loaders set CPU frequency to a value outside of
1340 * frequency table present with cpufreq core. In such cases CPU might be
1341 * unstable if it has to run on that frequency for long duration of time
1342 * and so its better to set it to a frequency which is specified in
1343 * freq-table. This also makes cpufreq stats inconsistent as
1344 * cpufreq-stats would fail to register because current frequency of CPU
1345 * isn't found in freq-table.
1346 *
1347 * Because we don't want this change to effect boot process badly, we go
1348 * for the next freq which is >= policy->cur ('cur' must be set by now,
1349 * otherwise we will end up setting freq to lowest of the table as 'cur'
1350 * is initialized to zero).
1351 *
1352 * We are passing target-freq as "policy->cur - 1" otherwise
1353 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1354 * equal to target-freq.
1355 */
1356 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1357 && has_target()) {
1358 /* Are we running at unknown frequency ? */
1359 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1360 if (ret == -EINVAL) {
1361 /* Warn user and fix it */
1362 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1363 __func__, policy->cpu, policy->cur);
1364 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1365 CPUFREQ_RELATION_L);
1366
1367 /*
1368 * Reaching here after boot in a few seconds may not
1369 * mean that system will remain stable at "unknown"
1370 * frequency for longer duration. Hence, a BUG_ON().
1371 */
1372 BUG_ON(ret);
1373 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1374 __func__, policy->cpu, policy->cur);
1375 }
1376 }
1377
a1531acd
TR
1378 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1379 CPUFREQ_START, policy);
1380
96bbbe4a 1381 if (!recover_policy) {
308b60e7 1382 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1383 if (ret)
1384 goto err_out_unregister;
fcd7af91
VK
1385 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1386 CPUFREQ_CREATE_POLICY, policy);
8ff69732 1387
988bed09
VK
1388 write_lock_irqsave(&cpufreq_driver_lock, flags);
1389 list_add(&policy->policy_list, &cpufreq_policy_list);
1390 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1391 }
9515f4d6 1392
e18f1682
SB
1393 cpufreq_init_policy(policy);
1394
96bbbe4a 1395 if (!recover_policy) {
08fd8c1c
VK
1396 policy->user_policy.policy = policy->policy;
1397 policy->user_policy.governor = policy->governor;
1398 }
4e97b631 1399 up_write(&policy->rwsem);
08fd8c1c 1400
038c5b3e 1401 kobject_uevent(&policy->kobj, KOBJ_ADD);
7c45cf31 1402
6eed9404
VK
1403 up_read(&cpufreq_rwsem);
1404
7c45cf31
VK
1405 /* Callback for handling stuff after policy is ready */
1406 if (cpufreq_driver->ready)
1407 cpufreq_driver->ready(policy);
1408
2d06d8c4 1409 pr_debug("initialization complete\n");
87c32271 1410
1da177e4
LT
1411 return 0;
1412
1da177e4 1413err_out_unregister:
652ed95d 1414err_get_freq:
7106e02b
PB
1415 up_write(&policy->rwsem);
1416
da60ce9f
VK
1417 if (cpufreq_driver->exit)
1418 cpufreq_driver->exit(policy);
2eaa3e2d 1419err_set_policy_cpu:
2fc3384d 1420 cpufreq_policy_put_kobj(policy, recover_policy);
e9698cc5 1421 cpufreq_policy_free(policy);
42f921a6 1422
1da177e4 1423nomem_out:
6eed9404
VK
1424 up_read(&cpufreq_rwsem);
1425
1da177e4
LT
1426 return ret;
1427}
1428
cedb70af 1429static int __cpufreq_remove_dev_prepare(struct device *dev,
96bbbe4a 1430 struct subsys_interface *sif)
1da177e4 1431{
f9ba680d 1432 unsigned int cpu = dev->id, cpus;
1bfb425b 1433 int ret;
3a3e9e06 1434 struct cpufreq_policy *policy;
1da177e4 1435
b8eed8af 1436 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1437
988bed09 1438 policy = cpufreq_cpu_get_raw(cpu);
3a3e9e06 1439 if (!policy) {
b8eed8af 1440 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1441 return -EINVAL;
1442 }
1da177e4 1443
9c0ebcf7 1444 if (has_target()) {
3de9bdeb
VK
1445 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1446 if (ret) {
1447 pr_err("%s: Failed to stop governor\n", __func__);
1448 return ret;
1449 }
db5f2995 1450 }
1da177e4 1451
4573237b 1452 down_write(&policy->rwsem);
3a3e9e06 1453 cpus = cpumask_weight(policy->cpus);
4573237b
VK
1454
1455 if (has_target() && cpus == 1)
1456 strncpy(policy->last_governor, policy->governor->name,
1457 CPUFREQ_NAME_LEN);
1458 up_write(&policy->rwsem);
084f3493 1459
87549141
VK
1460 if (cpu != policy->cpu)
1461 return 0;
1bfb425b 1462
87549141
VK
1463 if (cpus > 1)
1464 /* Nominate new CPU */
1465 update_policy_cpu(policy, cpumask_any_but(policy->cpus, cpu));
1466 else if (cpufreq_driver->stop_cpu)
367dc4aa 1467 cpufreq_driver->stop_cpu(policy);
1da177e4 1468
cedb70af
SB
1469 return 0;
1470}
1471
1472static int __cpufreq_remove_dev_finish(struct device *dev,
96bbbe4a 1473 struct subsys_interface *sif)
cedb70af 1474{
988bed09 1475 unsigned int cpu = dev->id;
cedb70af 1476 int ret;
988bed09 1477 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
cedb70af
SB
1478
1479 if (!policy) {
1480 pr_debug("%s: No cpu_data found\n", __func__);
1481 return -EINVAL;
1482 }
1483
ad7722da 1484 down_write(&policy->rwsem);
303ae723 1485 cpumask_clear_cpu(cpu, policy->cpus);
ad7722da 1486 up_write(&policy->rwsem);
cedb70af 1487
87549141
VK
1488 /* Not the last cpu of policy, start governor again ? */
1489 if (!policy_is_inactive(policy)) {
1490 if (!has_target())
1491 return 0;
7d26e2d5 1492
e5c87b76
SK
1493 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1494 if (!ret)
1495 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1496
1497 if (ret) {
1498 pr_err("%s: Failed to start governor\n", __func__);
1499 return ret;
2a998599 1500 }
87549141
VK
1501
1502 return 0;
1503 }
1504
1505 /* If cpu is last user of policy, free policy */
1506 if (has_target()) {
1507 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1508 if (ret) {
1509 pr_err("%s: Failed to exit governor\n", __func__);
1510 return ret;
1511 }
27ecddc2 1512 }
1da177e4 1513
87549141
VK
1514 /* Free the policy kobjects only if the driver is getting removed. */
1515 if (sif)
2fc3384d 1516 cpufreq_policy_put_kobj(policy, true);
87549141
VK
1517
1518 /*
1519 * Perform the ->exit() even during light-weight tear-down,
1520 * since this is a core component, and is essential for the
1521 * subsequent light-weight ->init() to succeed.
1522 */
1523 if (cpufreq_driver->exit)
1524 cpufreq_driver->exit(policy);
1525
1526 if (sif)
1527 cpufreq_policy_free(policy);
1528
1da177e4
LT
1529 return 0;
1530}
1531
cedb70af 1532/**
27a862e9 1533 * cpufreq_remove_dev - remove a CPU device
cedb70af
SB
1534 *
1535 * Removes the cpufreq interface for a CPU device.
cedb70af 1536 */
8a25a2fd 1537static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1538{
8a25a2fd 1539 unsigned int cpu = dev->id;
27a862e9 1540 int ret;
ec28297a 1541
87549141
VK
1542 /*
1543 * Only possible if 'cpu' is getting physically removed now. A hotplug
1544 * notifier should have already been called and we just need to remove
1545 * link or free policy here.
1546 */
1547 if (cpu_is_offline(cpu)) {
1548 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1549 struct cpumask mask;
1550
1551 if (!policy)
1552 return 0;
1553
1554 cpumask_copy(&mask, policy->related_cpus);
1555 cpumask_clear_cpu(cpu, &mask);
1556
1557 /*
1558 * Free policy only if all policy->related_cpus are removed
1559 * physically.
1560 */
1561 if (cpumask_intersects(&mask, cpu_present_mask)) {
1562 remove_cpu_dev_symlink(policy, cpu);
1563 return 0;
1564 }
1565
2fc3384d 1566 cpufreq_policy_put_kobj(policy, true);
87549141 1567 cpufreq_policy_free(policy);
ec28297a 1568 return 0;
87549141 1569 }
ec28297a 1570
96bbbe4a 1571 ret = __cpufreq_remove_dev_prepare(dev, sif);
27a862e9
VK
1572
1573 if (!ret)
96bbbe4a 1574 ret = __cpufreq_remove_dev_finish(dev, sif);
27a862e9
VK
1575
1576 return ret;
5a01f2e8
VP
1577}
1578
65f27f38 1579static void handle_update(struct work_struct *work)
1da177e4 1580{
65f27f38
DH
1581 struct cpufreq_policy *policy =
1582 container_of(work, struct cpufreq_policy, update);
1583 unsigned int cpu = policy->cpu;
2d06d8c4 1584 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1585 cpufreq_update_policy(cpu);
1586}
1587
1588/**
bb176f7d
VK
1589 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1590 * in deep trouble.
a1e1dc41 1591 * @policy: policy managing CPUs
1da177e4
LT
1592 * @new_freq: CPU frequency the CPU actually runs at
1593 *
29464f28
DJ
1594 * We adjust to current frequency first, and need to clean up later.
1595 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1596 */
a1e1dc41 1597static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
e08f5f5b 1598 unsigned int new_freq)
1da177e4
LT
1599{
1600 struct cpufreq_freqs freqs;
b43a7ffb 1601
e837f9b5 1602 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
a1e1dc41 1603 policy->cur, new_freq);
1da177e4 1604
a1e1dc41 1605 freqs.old = policy->cur;
1da177e4 1606 freqs.new = new_freq;
b43a7ffb 1607
8fec051e
VK
1608 cpufreq_freq_transition_begin(policy, &freqs);
1609 cpufreq_freq_transition_end(policy, &freqs, 0);
1da177e4
LT
1610}
1611
32ee8c3e 1612/**
4ab70df4 1613 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1614 * @cpu: CPU number
1615 *
1616 * This is the last known freq, without actually getting it from the driver.
1617 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1618 */
1619unsigned int cpufreq_quick_get(unsigned int cpu)
1620{
9e21ba8b 1621 struct cpufreq_policy *policy;
e08f5f5b 1622 unsigned int ret_freq = 0;
95235ca2 1623
1c3d85dd
RW
1624 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1625 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1626
1627 policy = cpufreq_cpu_get(cpu);
95235ca2 1628 if (policy) {
e08f5f5b 1629 ret_freq = policy->cur;
95235ca2
VP
1630 cpufreq_cpu_put(policy);
1631 }
1632
4d34a67d 1633 return ret_freq;
95235ca2
VP
1634}
1635EXPORT_SYMBOL(cpufreq_quick_get);
1636
3d737108
JB
1637/**
1638 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1639 * @cpu: CPU number
1640 *
1641 * Just return the max possible frequency for a given CPU.
1642 */
1643unsigned int cpufreq_quick_get_max(unsigned int cpu)
1644{
1645 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1646 unsigned int ret_freq = 0;
1647
1648 if (policy) {
1649 ret_freq = policy->max;
1650 cpufreq_cpu_put(policy);
1651 }
1652
1653 return ret_freq;
1654}
1655EXPORT_SYMBOL(cpufreq_quick_get_max);
1656
d92d50a4 1657static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1da177e4 1658{
e08f5f5b 1659 unsigned int ret_freq = 0;
5800043b 1660
1c3d85dd 1661 if (!cpufreq_driver->get)
4d34a67d 1662 return ret_freq;
1da177e4 1663
d92d50a4 1664 ret_freq = cpufreq_driver->get(policy->cpu);
1da177e4 1665
11e584cf
VK
1666 /* Updating inactive policies is invalid, so avoid doing that. */
1667 if (unlikely(policy_is_inactive(policy)))
1668 return ret_freq;
1669
e08f5f5b 1670 if (ret_freq && policy->cur &&
1c3d85dd 1671 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1672 /* verify no discrepancy between actual and
1673 saved value exists */
1674 if (unlikely(ret_freq != policy->cur)) {
a1e1dc41 1675 cpufreq_out_of_sync(policy, ret_freq);
1da177e4
LT
1676 schedule_work(&policy->update);
1677 }
1678 }
1679
4d34a67d 1680 return ret_freq;
5a01f2e8 1681}
1da177e4 1682
5a01f2e8
VP
1683/**
1684 * cpufreq_get - get the current CPU frequency (in kHz)
1685 * @cpu: CPU number
1686 *
1687 * Get the CPU current (static) CPU frequency
1688 */
1689unsigned int cpufreq_get(unsigned int cpu)
1690{
999976e0 1691 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
5a01f2e8 1692 unsigned int ret_freq = 0;
5a01f2e8 1693
999976e0
AP
1694 if (policy) {
1695 down_read(&policy->rwsem);
d92d50a4 1696 ret_freq = __cpufreq_get(policy);
999976e0 1697 up_read(&policy->rwsem);
5a01f2e8 1698
999976e0
AP
1699 cpufreq_cpu_put(policy);
1700 }
6eed9404 1701
4d34a67d 1702 return ret_freq;
1da177e4
LT
1703}
1704EXPORT_SYMBOL(cpufreq_get);
1705
8a25a2fd
KS
1706static struct subsys_interface cpufreq_interface = {
1707 .name = "cpufreq",
1708 .subsys = &cpu_subsys,
1709 .add_dev = cpufreq_add_dev,
1710 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1711};
1712
e28867ea
VK
1713/*
1714 * In case platform wants some specific frequency to be configured
1715 * during suspend..
1716 */
1717int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1718{
1719 int ret;
1720
1721 if (!policy->suspend_freq) {
1722 pr_err("%s: suspend_freq can't be zero\n", __func__);
1723 return -EINVAL;
1724 }
1725
1726 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1727 policy->suspend_freq);
1728
1729 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1730 CPUFREQ_RELATION_H);
1731 if (ret)
1732 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1733 __func__, policy->suspend_freq, ret);
1734
1735 return ret;
1736}
1737EXPORT_SYMBOL(cpufreq_generic_suspend);
1738
42d4dc3f 1739/**
2f0aea93 1740 * cpufreq_suspend() - Suspend CPUFreq governors
e00e56df 1741 *
2f0aea93
VK
1742 * Called during system wide Suspend/Hibernate cycles for suspending governors
1743 * as some platforms can't change frequency after this point in suspend cycle.
1744 * Because some of the devices (like: i2c, regulators, etc) they use for
1745 * changing frequency are suspended quickly after this point.
42d4dc3f 1746 */
2f0aea93 1747void cpufreq_suspend(void)
42d4dc3f 1748{
3a3e9e06 1749 struct cpufreq_policy *policy;
42d4dc3f 1750
2f0aea93
VK
1751 if (!cpufreq_driver)
1752 return;
42d4dc3f 1753
2f0aea93 1754 if (!has_target())
b1b12bab 1755 goto suspend;
42d4dc3f 1756
2f0aea93
VK
1757 pr_debug("%s: Suspending Governors\n", __func__);
1758
f963735a 1759 for_each_active_policy(policy) {
2f0aea93
VK
1760 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1761 pr_err("%s: Failed to stop governor for policy: %p\n",
1762 __func__, policy);
1763 else if (cpufreq_driver->suspend
1764 && cpufreq_driver->suspend(policy))
1765 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1766 policy);
42d4dc3f 1767 }
b1b12bab
VK
1768
1769suspend:
1770 cpufreq_suspended = true;
42d4dc3f
BH
1771}
1772
1da177e4 1773/**
2f0aea93 1774 * cpufreq_resume() - Resume CPUFreq governors
1da177e4 1775 *
2f0aea93
VK
1776 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1777 * are suspended with cpufreq_suspend().
1da177e4 1778 */
2f0aea93 1779void cpufreq_resume(void)
1da177e4 1780{
3a3e9e06 1781 struct cpufreq_policy *policy;
1da177e4 1782
2f0aea93
VK
1783 if (!cpufreq_driver)
1784 return;
1da177e4 1785
8e30444e
LT
1786 cpufreq_suspended = false;
1787
2f0aea93 1788 if (!has_target())
e00e56df 1789 return;
1da177e4 1790
2f0aea93 1791 pr_debug("%s: Resuming Governors\n", __func__);
1da177e4 1792
f963735a 1793 for_each_active_policy(policy) {
0c5aa405
VK
1794 if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1795 pr_err("%s: Failed to resume driver: %p\n", __func__,
1796 policy);
1797 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
2f0aea93
VK
1798 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1799 pr_err("%s: Failed to start governor for policy: %p\n",
1800 __func__, policy);
2f0aea93 1801 }
c75de0ac
VK
1802
1803 /*
1804 * schedule call cpufreq_update_policy() for first-online CPU, as that
1805 * wouldn't be hotplugged-out on suspend. It will verify that the
1806 * current freq is in sync with what we believe it to be.
1807 */
1808 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1809 if (WARN_ON(!policy))
1810 return;
1811
1812 schedule_work(&policy->update);
2f0aea93 1813}
1da177e4 1814
9d95046e
BP
1815/**
1816 * cpufreq_get_current_driver - return current driver's name
1817 *
1818 * Return the name string of the currently loaded cpufreq driver
1819 * or NULL, if none.
1820 */
1821const char *cpufreq_get_current_driver(void)
1822{
1c3d85dd
RW
1823 if (cpufreq_driver)
1824 return cpufreq_driver->name;
1825
1826 return NULL;
9d95046e
BP
1827}
1828EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4 1829
51315cdf
TP
1830/**
1831 * cpufreq_get_driver_data - return current driver data
1832 *
1833 * Return the private data of the currently loaded cpufreq
1834 * driver, or NULL if no cpufreq driver is loaded.
1835 */
1836void *cpufreq_get_driver_data(void)
1837{
1838 if (cpufreq_driver)
1839 return cpufreq_driver->driver_data;
1840
1841 return NULL;
1842}
1843EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1844
1da177e4
LT
1845/*********************************************************************
1846 * NOTIFIER LISTS INTERFACE *
1847 *********************************************************************/
1848
1849/**
1850 * cpufreq_register_notifier - register a driver with cpufreq
1851 * @nb: notifier function to register
1852 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1853 *
32ee8c3e 1854 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1855 * are notified about clock rate changes (once before and once after
1856 * the transition), or a list of drivers that are notified about
1857 * changes in cpufreq policy.
1858 *
1859 * This function may sleep, and has the same return conditions as
e041c683 1860 * blocking_notifier_chain_register.
1da177e4
LT
1861 */
1862int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1863{
1864 int ret;
1865
d5aaffa9
DB
1866 if (cpufreq_disabled())
1867 return -EINVAL;
1868
74212ca4
CEB
1869 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1870
1da177e4
LT
1871 switch (list) {
1872 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1873 ret = srcu_notifier_chain_register(
e041c683 1874 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1875 break;
1876 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1877 ret = blocking_notifier_chain_register(
1878 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1879 break;
1880 default:
1881 ret = -EINVAL;
1882 }
1da177e4
LT
1883
1884 return ret;
1885}
1886EXPORT_SYMBOL(cpufreq_register_notifier);
1887
1da177e4
LT
1888/**
1889 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1890 * @nb: notifier block to be unregistered
bb176f7d 1891 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1892 *
1893 * Remove a driver from the CPU frequency notifier list.
1894 *
1895 * This function may sleep, and has the same return conditions as
e041c683 1896 * blocking_notifier_chain_unregister.
1da177e4
LT
1897 */
1898int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1899{
1900 int ret;
1901
d5aaffa9
DB
1902 if (cpufreq_disabled())
1903 return -EINVAL;
1904
1da177e4
LT
1905 switch (list) {
1906 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1907 ret = srcu_notifier_chain_unregister(
e041c683 1908 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1909 break;
1910 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1911 ret = blocking_notifier_chain_unregister(
1912 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1913 break;
1914 default:
1915 ret = -EINVAL;
1916 }
1da177e4
LT
1917
1918 return ret;
1919}
1920EXPORT_SYMBOL(cpufreq_unregister_notifier);
1921
1922
1923/*********************************************************************
1924 * GOVERNORS *
1925 *********************************************************************/
1926
1c03a2d0
VK
1927/* Must set freqs->new to intermediate frequency */
1928static int __target_intermediate(struct cpufreq_policy *policy,
1929 struct cpufreq_freqs *freqs, int index)
1930{
1931 int ret;
1932
1933 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1934
1935 /* We don't need to switch to intermediate freq */
1936 if (!freqs->new)
1937 return 0;
1938
1939 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1940 __func__, policy->cpu, freqs->old, freqs->new);
1941
1942 cpufreq_freq_transition_begin(policy, freqs);
1943 ret = cpufreq_driver->target_intermediate(policy, index);
1944 cpufreq_freq_transition_end(policy, freqs, ret);
1945
1946 if (ret)
1947 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1948 __func__, ret);
1949
1950 return ret;
1951}
1952
8d65775d
VK
1953static int __target_index(struct cpufreq_policy *policy,
1954 struct cpufreq_frequency_table *freq_table, int index)
1955{
1c03a2d0
VK
1956 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1957 unsigned int intermediate_freq = 0;
8d65775d
VK
1958 int retval = -EINVAL;
1959 bool notify;
1960
1961 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
8d65775d 1962 if (notify) {
1c03a2d0
VK
1963 /* Handle switching to intermediate frequency */
1964 if (cpufreq_driver->get_intermediate) {
1965 retval = __target_intermediate(policy, &freqs, index);
1966 if (retval)
1967 return retval;
1968
1969 intermediate_freq = freqs.new;
1970 /* Set old freq to intermediate */
1971 if (intermediate_freq)
1972 freqs.old = freqs.new;
1973 }
8d65775d 1974
1c03a2d0 1975 freqs.new = freq_table[index].frequency;
8d65775d
VK
1976 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1977 __func__, policy->cpu, freqs.old, freqs.new);
1978
1979 cpufreq_freq_transition_begin(policy, &freqs);
1980 }
1981
1982 retval = cpufreq_driver->target_index(policy, index);
1983 if (retval)
1984 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1985 retval);
1986
1c03a2d0 1987 if (notify) {
8d65775d
VK
1988 cpufreq_freq_transition_end(policy, &freqs, retval);
1989
1c03a2d0
VK
1990 /*
1991 * Failed after setting to intermediate freq? Driver should have
1992 * reverted back to initial frequency and so should we. Check
1993 * here for intermediate_freq instead of get_intermediate, in
58405af6 1994 * case we haven't switched to intermediate freq at all.
1c03a2d0
VK
1995 */
1996 if (unlikely(retval && intermediate_freq)) {
1997 freqs.old = intermediate_freq;
1998 freqs.new = policy->restore_freq;
1999 cpufreq_freq_transition_begin(policy, &freqs);
2000 cpufreq_freq_transition_end(policy, &freqs, 0);
2001 }
2002 }
2003
8d65775d
VK
2004 return retval;
2005}
2006
1da177e4
LT
2007int __cpufreq_driver_target(struct cpufreq_policy *policy,
2008 unsigned int target_freq,
2009 unsigned int relation)
2010{
7249924e 2011 unsigned int old_target_freq = target_freq;
8d65775d 2012 int retval = -EINVAL;
c32b6b8e 2013
a7b422cd
KRW
2014 if (cpufreq_disabled())
2015 return -ENODEV;
2016
7249924e
VK
2017 /* Make sure that target_freq is within supported range */
2018 if (target_freq > policy->max)
2019 target_freq = policy->max;
2020 if (target_freq < policy->min)
2021 target_freq = policy->min;
2022
2023 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
e837f9b5 2024 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228 2025
9c0ebcf7
VK
2026 /*
2027 * This might look like a redundant call as we are checking it again
2028 * after finding index. But it is left intentionally for cases where
2029 * exactly same freq is called again and so we can save on few function
2030 * calls.
2031 */
5a1c0228
VK
2032 if (target_freq == policy->cur)
2033 return 0;
2034
1c03a2d0
VK
2035 /* Save last value to restore later on errors */
2036 policy->restore_freq = policy->cur;
2037
1c3d85dd
RW
2038 if (cpufreq_driver->target)
2039 retval = cpufreq_driver->target(policy, target_freq, relation);
9c0ebcf7
VK
2040 else if (cpufreq_driver->target_index) {
2041 struct cpufreq_frequency_table *freq_table;
2042 int index;
90d45d17 2043
9c0ebcf7
VK
2044 freq_table = cpufreq_frequency_get_table(policy->cpu);
2045 if (unlikely(!freq_table)) {
2046 pr_err("%s: Unable to find freq_table\n", __func__);
2047 goto out;
2048 }
2049
2050 retval = cpufreq_frequency_table_target(policy, freq_table,
2051 target_freq, relation, &index);
2052 if (unlikely(retval)) {
2053 pr_err("%s: Unable to find matching freq\n", __func__);
2054 goto out;
2055 }
2056
d4019f0a 2057 if (freq_table[index].frequency == policy->cur) {
9c0ebcf7 2058 retval = 0;
d4019f0a
VK
2059 goto out;
2060 }
2061
8d65775d 2062 retval = __target_index(policy, freq_table, index);
9c0ebcf7
VK
2063 }
2064
2065out:
1da177e4
LT
2066 return retval;
2067}
2068EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2069
1da177e4
LT
2070int cpufreq_driver_target(struct cpufreq_policy *policy,
2071 unsigned int target_freq,
2072 unsigned int relation)
2073{
f1829e4a 2074 int ret = -EINVAL;
1da177e4 2075
ad7722da 2076 down_write(&policy->rwsem);
1da177e4
LT
2077
2078 ret = __cpufreq_driver_target(policy, target_freq, relation);
2079
ad7722da 2080 up_write(&policy->rwsem);
1da177e4 2081
1da177e4
LT
2082 return ret;
2083}
2084EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2085
e08f5f5b
GS
2086static int __cpufreq_governor(struct cpufreq_policy *policy,
2087 unsigned int event)
1da177e4 2088{
cc993cab 2089 int ret;
6afde10c
TR
2090
2091 /* Only must be defined when default governor is known to have latency
2092 restrictions, like e.g. conservative or ondemand.
2093 That this is the case is already ensured in Kconfig
2094 */
2095#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2096 struct cpufreq_governor *gov = &cpufreq_gov_performance;
2097#else
2098 struct cpufreq_governor *gov = NULL;
2099#endif
1c256245 2100
2f0aea93
VK
2101 /* Don't start any governor operations if we are entering suspend */
2102 if (cpufreq_suspended)
2103 return 0;
cb57720b
EZ
2104 /*
2105 * Governor might not be initiated here if ACPI _PPC changed
2106 * notification happened, so check it.
2107 */
2108 if (!policy->governor)
2109 return -EINVAL;
2f0aea93 2110
1c256245
TR
2111 if (policy->governor->max_transition_latency &&
2112 policy->cpuinfo.transition_latency >
2113 policy->governor->max_transition_latency) {
6afde10c
TR
2114 if (!gov)
2115 return -EINVAL;
2116 else {
e837f9b5
JP
2117 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2118 policy->governor->name, gov->name);
6afde10c
TR
2119 policy->governor = gov;
2120 }
1c256245 2121 }
1da177e4 2122
fe492f3f
VK
2123 if (event == CPUFREQ_GOV_POLICY_INIT)
2124 if (!try_module_get(policy->governor->owner))
2125 return -EINVAL;
1da177e4 2126
2d06d8c4 2127 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e837f9b5 2128 policy->cpu, event);
95731ebb
XC
2129
2130 mutex_lock(&cpufreq_governor_lock);
56d07db2 2131 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
f73d3933
VK
2132 || (!policy->governor_enabled
2133 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
95731ebb
XC
2134 mutex_unlock(&cpufreq_governor_lock);
2135 return -EBUSY;
2136 }
2137
2138 if (event == CPUFREQ_GOV_STOP)
2139 policy->governor_enabled = false;
2140 else if (event == CPUFREQ_GOV_START)
2141 policy->governor_enabled = true;
2142
2143 mutex_unlock(&cpufreq_governor_lock);
2144
1da177e4
LT
2145 ret = policy->governor->governor(policy, event);
2146
4d5dcc42
VK
2147 if (!ret) {
2148 if (event == CPUFREQ_GOV_POLICY_INIT)
2149 policy->governor->initialized++;
2150 else if (event == CPUFREQ_GOV_POLICY_EXIT)
2151 policy->governor->initialized--;
95731ebb
XC
2152 } else {
2153 /* Restore original values */
2154 mutex_lock(&cpufreq_governor_lock);
2155 if (event == CPUFREQ_GOV_STOP)
2156 policy->governor_enabled = true;
2157 else if (event == CPUFREQ_GOV_START)
2158 policy->governor_enabled = false;
2159 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 2160 }
b394058f 2161
fe492f3f
VK
2162 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2163 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
2164 module_put(policy->governor->owner);
2165
2166 return ret;
2167}
2168
1da177e4
LT
2169int cpufreq_register_governor(struct cpufreq_governor *governor)
2170{
3bcb09a3 2171 int err;
1da177e4
LT
2172
2173 if (!governor)
2174 return -EINVAL;
2175
a7b422cd
KRW
2176 if (cpufreq_disabled())
2177 return -ENODEV;
2178
3fc54d37 2179 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 2180
b394058f 2181 governor->initialized = 0;
3bcb09a3 2182 err = -EBUSY;
42f91fa1 2183 if (!find_governor(governor->name)) {
3bcb09a3
JF
2184 err = 0;
2185 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 2186 }
1da177e4 2187
32ee8c3e 2188 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 2189 return err;
1da177e4
LT
2190}
2191EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2192
1da177e4
LT
2193void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2194{
4573237b
VK
2195 struct cpufreq_policy *policy;
2196 unsigned long flags;
90e41bac 2197
1da177e4
LT
2198 if (!governor)
2199 return;
2200
a7b422cd
KRW
2201 if (cpufreq_disabled())
2202 return;
2203
4573237b
VK
2204 /* clear last_governor for all inactive policies */
2205 read_lock_irqsave(&cpufreq_driver_lock, flags);
2206 for_each_inactive_policy(policy) {
18bf3a12
VK
2207 if (!strcmp(policy->last_governor, governor->name)) {
2208 policy->governor = NULL;
4573237b 2209 strcpy(policy->last_governor, "\0");
18bf3a12 2210 }
90e41bac 2211 }
4573237b 2212 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
90e41bac 2213
3fc54d37 2214 mutex_lock(&cpufreq_governor_mutex);
1da177e4 2215 list_del(&governor->governor_list);
3fc54d37 2216 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
2217 return;
2218}
2219EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2220
2221
1da177e4
LT
2222/*********************************************************************
2223 * POLICY INTERFACE *
2224 *********************************************************************/
2225
2226/**
2227 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
2228 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2229 * is written
1da177e4
LT
2230 *
2231 * Reads the current cpufreq policy.
2232 */
2233int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2234{
2235 struct cpufreq_policy *cpu_policy;
2236 if (!policy)
2237 return -EINVAL;
2238
2239 cpu_policy = cpufreq_cpu_get(cpu);
2240 if (!cpu_policy)
2241 return -EINVAL;
2242
d5b73cd8 2243 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
2244
2245 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
2246 return 0;
2247}
2248EXPORT_SYMBOL(cpufreq_get_policy);
2249
153d7f3f 2250/*
037ce839
VK
2251 * policy : current policy.
2252 * new_policy: policy to be set.
153d7f3f 2253 */
037ce839 2254static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 2255 struct cpufreq_policy *new_policy)
1da177e4 2256{
d9a789c7
RW
2257 struct cpufreq_governor *old_gov;
2258 int ret;
1da177e4 2259
e837f9b5
JP
2260 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2261 new_policy->cpu, new_policy->min, new_policy->max);
1da177e4 2262
d5b73cd8 2263 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 2264
d9a789c7
RW
2265 if (new_policy->min > policy->max || new_policy->max < policy->min)
2266 return -EINVAL;
9c9a43ed 2267
1da177e4 2268 /* verify the cpu speed can be set within this limit */
3a3e9e06 2269 ret = cpufreq_driver->verify(new_policy);
1da177e4 2270 if (ret)
d9a789c7 2271 return ret;
1da177e4 2272
1da177e4 2273 /* adjust if necessary - all reasons */
e041c683 2274 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2275 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
2276
2277 /* adjust if necessary - hardware incompatibility*/
e041c683 2278 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2279 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 2280
bb176f7d
VK
2281 /*
2282 * verify the cpu speed can be set within this limit, which might be
2283 * different to the first one
2284 */
3a3e9e06 2285 ret = cpufreq_driver->verify(new_policy);
e041c683 2286 if (ret)
d9a789c7 2287 return ret;
1da177e4
LT
2288
2289 /* notification of the new policy */
e041c683 2290 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2291 CPUFREQ_NOTIFY, new_policy);
1da177e4 2292
3a3e9e06
VK
2293 policy->min = new_policy->min;
2294 policy->max = new_policy->max;
1da177e4 2295
2d06d8c4 2296 pr_debug("new min and max freqs are %u - %u kHz\n",
e837f9b5 2297 policy->min, policy->max);
1da177e4 2298
1c3d85dd 2299 if (cpufreq_driver->setpolicy) {
3a3e9e06 2300 policy->policy = new_policy->policy;
2d06d8c4 2301 pr_debug("setting range\n");
d9a789c7
RW
2302 return cpufreq_driver->setpolicy(new_policy);
2303 }
1da177e4 2304
d9a789c7
RW
2305 if (new_policy->governor == policy->governor)
2306 goto out;
7bd353a9 2307
d9a789c7
RW
2308 pr_debug("governor switch\n");
2309
2310 /* save old, working values */
2311 old_gov = policy->governor;
2312 /* end old governor */
2313 if (old_gov) {
2314 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2315 up_write(&policy->rwsem);
e5c87b76 2316 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
d9a789c7 2317 down_write(&policy->rwsem);
1da177e4
LT
2318 }
2319
d9a789c7
RW
2320 /* start new governor */
2321 policy->governor = new_policy->governor;
2322 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2323 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2324 goto out;
2325
2326 up_write(&policy->rwsem);
2327 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2328 down_write(&policy->rwsem);
2329 }
2330
2331 /* new governor failed, so re-start old one */
2332 pr_debug("starting governor %s failed\n", policy->governor->name);
2333 if (old_gov) {
2334 policy->governor = old_gov;
2335 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2336 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2337 }
2338
2339 return -EINVAL;
2340
2341 out:
2342 pr_debug("governor: change or update limits\n");
2343 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
2344}
2345
1da177e4
LT
2346/**
2347 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2348 * @cpu: CPU which shall be re-evaluated
2349 *
25985edc 2350 * Useful for policy notifiers which have different necessities
1da177e4
LT
2351 * at different times.
2352 */
2353int cpufreq_update_policy(unsigned int cpu)
2354{
3a3e9e06
VK
2355 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2356 struct cpufreq_policy new_policy;
f1829e4a 2357 int ret;
1da177e4 2358
fefa8ff8
AP
2359 if (!policy)
2360 return -ENODEV;
1da177e4 2361
ad7722da 2362 down_write(&policy->rwsem);
1da177e4 2363
2d06d8c4 2364 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 2365 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
2366 new_policy.min = policy->user_policy.min;
2367 new_policy.max = policy->user_policy.max;
2368 new_policy.policy = policy->user_policy.policy;
2369 new_policy.governor = policy->user_policy.governor;
1da177e4 2370
bb176f7d
VK
2371 /*
2372 * BIOS might change freq behind our back
2373 * -> ask driver for current freq and notify governors about a change
2374 */
2ed99e39 2375 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
3a3e9e06 2376 new_policy.cur = cpufreq_driver->get(cpu);
bd0fa9bb
VK
2377 if (WARN_ON(!new_policy.cur)) {
2378 ret = -EIO;
fefa8ff8 2379 goto unlock;
bd0fa9bb
VK
2380 }
2381
3a3e9e06 2382 if (!policy->cur) {
e837f9b5 2383 pr_debug("Driver did not initialize current freq\n");
3a3e9e06 2384 policy->cur = new_policy.cur;
a85f7bd3 2385 } else {
9c0ebcf7 2386 if (policy->cur != new_policy.cur && has_target())
a1e1dc41 2387 cpufreq_out_of_sync(policy, new_policy.cur);
a85f7bd3 2388 }
0961dd0d
TR
2389 }
2390
037ce839 2391 ret = cpufreq_set_policy(policy, &new_policy);
1da177e4 2392
fefa8ff8 2393unlock:
ad7722da 2394 up_write(&policy->rwsem);
5a01f2e8 2395
3a3e9e06 2396 cpufreq_cpu_put(policy);
1da177e4
LT
2397 return ret;
2398}
2399EXPORT_SYMBOL(cpufreq_update_policy);
2400
2760984f 2401static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2402 unsigned long action, void *hcpu)
2403{
2404 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2405 struct device *dev;
c32b6b8e 2406
8a25a2fd
KS
2407 dev = get_cpu_device(cpu);
2408 if (dev) {
5302c3fb 2409 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2410 case CPU_ONLINE:
23faf0b7 2411 cpufreq_add_dev(dev, NULL);
c32b6b8e 2412 break;
5302c3fb 2413
c32b6b8e 2414 case CPU_DOWN_PREPARE:
96bbbe4a 2415 __cpufreq_remove_dev_prepare(dev, NULL);
1aee40ac
SB
2416 break;
2417
2418 case CPU_POST_DEAD:
96bbbe4a 2419 __cpufreq_remove_dev_finish(dev, NULL);
c32b6b8e 2420 break;
5302c3fb 2421
5a01f2e8 2422 case CPU_DOWN_FAILED:
23faf0b7 2423 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
2424 break;
2425 }
2426 }
2427 return NOTIFY_OK;
2428}
2429
9c36f746 2430static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2431 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2432};
1da177e4 2433
6f19efc0
LM
2434/*********************************************************************
2435 * BOOST *
2436 *********************************************************************/
2437static int cpufreq_boost_set_sw(int state)
2438{
2439 struct cpufreq_frequency_table *freq_table;
2440 struct cpufreq_policy *policy;
2441 int ret = -EINVAL;
2442
f963735a 2443 for_each_active_policy(policy) {
6f19efc0
LM
2444 freq_table = cpufreq_frequency_get_table(policy->cpu);
2445 if (freq_table) {
2446 ret = cpufreq_frequency_table_cpuinfo(policy,
2447 freq_table);
2448 if (ret) {
2449 pr_err("%s: Policy frequency update failed\n",
2450 __func__);
2451 break;
2452 }
2453 policy->user_policy.max = policy->max;
2454 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2455 }
2456 }
2457
2458 return ret;
2459}
2460
2461int cpufreq_boost_trigger_state(int state)
2462{
2463 unsigned long flags;
2464 int ret = 0;
2465
2466 if (cpufreq_driver->boost_enabled == state)
2467 return 0;
2468
2469 write_lock_irqsave(&cpufreq_driver_lock, flags);
2470 cpufreq_driver->boost_enabled = state;
2471 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2472
2473 ret = cpufreq_driver->set_boost(state);
2474 if (ret) {
2475 write_lock_irqsave(&cpufreq_driver_lock, flags);
2476 cpufreq_driver->boost_enabled = !state;
2477 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2478
e837f9b5
JP
2479 pr_err("%s: Cannot %s BOOST\n",
2480 __func__, state ? "enable" : "disable");
6f19efc0
LM
2481 }
2482
2483 return ret;
2484}
2485
2486int cpufreq_boost_supported(void)
2487{
2488 if (likely(cpufreq_driver))
2489 return cpufreq_driver->boost_supported;
2490
2491 return 0;
2492}
2493EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2494
2495int cpufreq_boost_enabled(void)
2496{
2497 return cpufreq_driver->boost_enabled;
2498}
2499EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2500
1da177e4
LT
2501/*********************************************************************
2502 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2503 *********************************************************************/
2504
2505/**
2506 * cpufreq_register_driver - register a CPU Frequency driver
2507 * @driver_data: A struct cpufreq_driver containing the values#
2508 * submitted by the CPU Frequency driver.
2509 *
bb176f7d 2510 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2511 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2512 * (and isn't unregistered in the meantime).
1da177e4
LT
2513 *
2514 */
221dee28 2515int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2516{
2517 unsigned long flags;
2518 int ret;
2519
a7b422cd
KRW
2520 if (cpufreq_disabled())
2521 return -ENODEV;
2522
1da177e4 2523 if (!driver_data || !driver_data->verify || !driver_data->init ||
9c0ebcf7 2524 !(driver_data->setpolicy || driver_data->target_index ||
9832235f
RW
2525 driver_data->target) ||
2526 (driver_data->setpolicy && (driver_data->target_index ||
1c03a2d0
VK
2527 driver_data->target)) ||
2528 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
1da177e4
LT
2529 return -EINVAL;
2530
2d06d8c4 2531 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4 2532
0d1857a1 2533 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2534 if (cpufreq_driver) {
0d1857a1 2535 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4dea5806 2536 return -EEXIST;
1da177e4 2537 }
1c3d85dd 2538 cpufreq_driver = driver_data;
0d1857a1 2539 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2540
bc68b7df
VK
2541 if (driver_data->setpolicy)
2542 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2543
6f19efc0
LM
2544 if (cpufreq_boost_supported()) {
2545 /*
2546 * Check if driver provides function to enable boost -
2547 * if not, use cpufreq_boost_set_sw as default
2548 */
2549 if (!cpufreq_driver->set_boost)
2550 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2551
2552 ret = cpufreq_sysfs_create_file(&boost.attr);
2553 if (ret) {
2554 pr_err("%s: cannot register global BOOST sysfs file\n",
e837f9b5 2555 __func__);
6f19efc0
LM
2556 goto err_null_driver;
2557 }
2558 }
2559
8a25a2fd 2560 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab 2561 if (ret)
6f19efc0 2562 goto err_boost_unreg;
1da177e4 2563
ce1bcfe9
VK
2564 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2565 list_empty(&cpufreq_policy_list)) {
1da177e4 2566 /* if all ->init() calls failed, unregister */
ce1bcfe9
VK
2567 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2568 driver_data->name);
2569 goto err_if_unreg;
1da177e4
LT
2570 }
2571
8f5bc2ab 2572 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2573 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2574
8f5bc2ab 2575 return 0;
8a25a2fd
KS
2576err_if_unreg:
2577 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2578err_boost_unreg:
2579 if (cpufreq_boost_supported())
2580 cpufreq_sysfs_remove_file(&boost.attr);
8f5bc2ab 2581err_null_driver:
0d1857a1 2582 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2583 cpufreq_driver = NULL;
0d1857a1 2584 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2585 return ret;
1da177e4
LT
2586}
2587EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2588
1da177e4
LT
2589/**
2590 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2591 *
bb176f7d 2592 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2593 * the right to do so, i.e. if you have succeeded in initialising before!
2594 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2595 * currently not initialised.
2596 */
221dee28 2597int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2598{
2599 unsigned long flags;
2600
1c3d85dd 2601 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2602 return -EINVAL;
1da177e4 2603
2d06d8c4 2604 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2605
8a25a2fd 2606 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2607 if (cpufreq_boost_supported())
2608 cpufreq_sysfs_remove_file(&boost.attr);
2609
65edc68c 2610 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2611
6eed9404 2612 down_write(&cpufreq_rwsem);
0d1857a1 2613 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2614
1c3d85dd 2615 cpufreq_driver = NULL;
6eed9404 2616
0d1857a1 2617 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2618 up_write(&cpufreq_rwsem);
1da177e4
LT
2619
2620 return 0;
2621}
2622EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8 2623
90de2a4a
DA
2624/*
2625 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2626 * or mutexes when secondary CPUs are halted.
2627 */
2628static struct syscore_ops cpufreq_syscore_ops = {
2629 .shutdown = cpufreq_suspend,
2630};
2631
5a01f2e8
VP
2632static int __init cpufreq_core_init(void)
2633{
a7b422cd
KRW
2634 if (cpufreq_disabled())
2635 return -ENODEV;
2636
2361be23 2637 cpufreq_global_kobject = kobject_create();
8aa84ad8
TR
2638 BUG_ON(!cpufreq_global_kobject);
2639
90de2a4a
DA
2640 register_syscore_ops(&cpufreq_syscore_ops);
2641
5a01f2e8
VP
2642 return 0;
2643}
5a01f2e8 2644core_initcall(cpufreq_core_init);
This page took 1.44676 seconds and 5 git commands to generate.