cpufreq: Remove cpufreq_generic_exit()
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
2f0aea93 29#include <linux/suspend.h>
5ff0a268 30#include <linux/tick.h>
6f4f2723
TR
31#include <trace/events/power.h>
32
1da177e4 33/**
cd878479 34 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
1c3d85dd 38static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 39static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d 41static DEFINE_RWLOCK(cpufreq_driver_lock);
6f1e4efd 42DEFINE_MUTEX(cpufreq_governor_lock);
c88a1f8b 43static LIST_HEAD(cpufreq_policy_list);
bb176f7d 44
084f3493 45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
1da177e4 47
2f0aea93
VK
48/* Flag to suspend/resume CPUFreq governors */
49static bool cpufreq_suspended;
50
9c0ebcf7
VK
51static inline bool has_target(void)
52{
53 return cpufreq_driver->target_index || cpufreq_driver->target;
54}
55
6eed9404
VK
56/*
57 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
58 * sections
59 */
60static DECLARE_RWSEM(cpufreq_rwsem);
61
1da177e4 62/* internal prototypes */
29464f28
DJ
63static int __cpufreq_governor(struct cpufreq_policy *policy,
64 unsigned int event);
5a01f2e8 65static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 66static void handle_update(struct work_struct *work);
1da177e4
LT
67
68/**
32ee8c3e
DJ
69 * Two notifier lists: the "policy" list is involved in the
70 * validation process for a new CPU frequency policy; the
1da177e4
LT
71 * "transition" list for kernel code that needs to handle
72 * changes to devices when the CPU clock speed changes.
73 * The mutex locks both lists.
74 */
e041c683 75static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 76static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 77
74212ca4 78static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
79static int __init init_cpufreq_transition_notifier_list(void)
80{
81 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 82 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
83 return 0;
84}
b3438f82 85pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 86
a7b422cd 87static int off __read_mostly;
da584455 88static int cpufreq_disabled(void)
a7b422cd
KRW
89{
90 return off;
91}
92void disable_cpufreq(void)
93{
94 off = 1;
95}
1da177e4 96static LIST_HEAD(cpufreq_governor_list);
29464f28 97static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 98
4d5dcc42
VK
99bool have_governor_per_policy(void)
100{
0b981e70 101 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
4d5dcc42 102}
3f869d6d 103EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 104
944e9a03
VK
105struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
106{
107 if (have_governor_per_policy())
108 return &policy->kobj;
109 else
110 return cpufreq_global_kobject;
111}
112EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
113
72a4ce34
VK
114static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
115{
116 u64 idle_time;
117 u64 cur_wall_time;
118 u64 busy_time;
119
120 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
121
122 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
128
129 idle_time = cur_wall_time - busy_time;
130 if (wall)
131 *wall = cputime_to_usecs(cur_wall_time);
132
133 return cputime_to_usecs(idle_time);
134}
135
136u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
137{
138 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
139
140 if (idle_time == -1ULL)
141 return get_cpu_idle_time_jiffy(cpu, wall);
142 else if (!io_busy)
143 idle_time += get_cpu_iowait_time_us(cpu, wall);
144
145 return idle_time;
146}
147EXPORT_SYMBOL_GPL(get_cpu_idle_time);
148
70e9e778
VK
149/*
150 * This is a generic cpufreq init() routine which can be used by cpufreq
151 * drivers of SMP systems. It will do following:
152 * - validate & show freq table passed
153 * - set policies transition latency
154 * - policy->cpus with all possible CPUs
155 */
156int cpufreq_generic_init(struct cpufreq_policy *policy,
157 struct cpufreq_frequency_table *table,
158 unsigned int transition_latency)
159{
160 int ret;
161
162 ret = cpufreq_table_validate_and_show(policy, table);
163 if (ret) {
164 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
165 return ret;
166 }
167
168 policy->cpuinfo.transition_latency = transition_latency;
169
170 /*
171 * The driver only supports the SMP configuartion where all processors
172 * share the clock and voltage and clock.
173 */
174 cpumask_setall(policy->cpus);
175
176 return 0;
177}
178EXPORT_SYMBOL_GPL(cpufreq_generic_init);
179
652ed95d
VK
180unsigned int cpufreq_generic_get(unsigned int cpu)
181{
182 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
183
184 if (!policy || IS_ERR(policy->clk)) {
e837f9b5
JP
185 pr_err("%s: No %s associated to cpu: %d\n",
186 __func__, policy ? "clk" : "policy", cpu);
652ed95d
VK
187 return 0;
188 }
189
190 return clk_get_rate(policy->clk) / 1000;
191}
192EXPORT_SYMBOL_GPL(cpufreq_generic_get);
193
e0b3165b
VK
194/* Only for cpufreq core internal use */
195struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
196{
197 return per_cpu(cpufreq_cpu_data, cpu);
198}
199
6eed9404 200struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 201{
6eed9404 202 struct cpufreq_policy *policy = NULL;
1da177e4
LT
203 unsigned long flags;
204
6eed9404
VK
205 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
206 return NULL;
207
208 if (!down_read_trylock(&cpufreq_rwsem))
209 return NULL;
1da177e4
LT
210
211 /* get the cpufreq driver */
1c3d85dd 212 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 213
6eed9404
VK
214 if (cpufreq_driver) {
215 /* get the CPU */
216 policy = per_cpu(cpufreq_cpu_data, cpu);
217 if (policy)
218 kobject_get(&policy->kobj);
219 }
1da177e4 220
6eed9404 221 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 222
3a3e9e06 223 if (!policy)
6eed9404 224 up_read(&cpufreq_rwsem);
1da177e4 225
3a3e9e06 226 return policy;
a9144436 227}
1da177e4
LT
228EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
229
3a3e9e06 230void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 231{
d5aaffa9
DB
232 if (cpufreq_disabled())
233 return;
234
6eed9404
VK
235 kobject_put(&policy->kobj);
236 up_read(&cpufreq_rwsem);
1da177e4
LT
237}
238EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
239
1da177e4
LT
240/*********************************************************************
241 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
242 *********************************************************************/
243
244/**
245 * adjust_jiffies - adjust the system "loops_per_jiffy"
246 *
247 * This function alters the system "loops_per_jiffy" for the clock
248 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 249 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
250 * per-CPU loops_per_jiffy value wherever possible.
251 */
252#ifndef CONFIG_SMP
253static unsigned long l_p_j_ref;
bb176f7d 254static unsigned int l_p_j_ref_freq;
1da177e4 255
858119e1 256static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
257{
258 if (ci->flags & CPUFREQ_CONST_LOOPS)
259 return;
260
261 if (!l_p_j_ref_freq) {
262 l_p_j_ref = loops_per_jiffy;
263 l_p_j_ref_freq = ci->old;
e837f9b5
JP
264 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
265 l_p_j_ref, l_p_j_ref_freq);
1da177e4 266 }
bb176f7d 267 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 268 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
269 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
270 ci->new);
e837f9b5
JP
271 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
272 loops_per_jiffy, ci->new);
1da177e4
LT
273 }
274}
275#else
e08f5f5b
GS
276static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
277{
278 return;
279}
1da177e4
LT
280#endif
281
0956df9c 282static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 283 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
284{
285 BUG_ON(irqs_disabled());
286
d5aaffa9
DB
287 if (cpufreq_disabled())
288 return;
289
1c3d85dd 290 freqs->flags = cpufreq_driver->flags;
2d06d8c4 291 pr_debug("notification %u of frequency transition to %u kHz\n",
e837f9b5 292 state, freqs->new);
1da177e4 293
1da177e4 294 switch (state) {
e4472cb3 295
1da177e4 296 case CPUFREQ_PRECHANGE:
32ee8c3e 297 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
298 * which is not equal to what the cpufreq core thinks is
299 * "old frequency".
1da177e4 300 */
1c3d85dd 301 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
302 if ((policy) && (policy->cpu == freqs->cpu) &&
303 (policy->cur) && (policy->cur != freqs->old)) {
e837f9b5
JP
304 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
305 freqs->old, policy->cur);
e4472cb3 306 freqs->old = policy->cur;
1da177e4
LT
307 }
308 }
b4dfdbb3 309 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 310 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
311 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
312 break;
e4472cb3 313
1da177e4
LT
314 case CPUFREQ_POSTCHANGE:
315 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
e837f9b5
JP
316 pr_debug("FREQ: %lu - CPU: %lu\n",
317 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
25e41933 318 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 319 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 320 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
321 if (likely(policy) && likely(policy->cpu == freqs->cpu))
322 policy->cur = freqs->new;
1da177e4
LT
323 break;
324 }
1da177e4 325}
bb176f7d 326
b43a7ffb
VK
327/**
328 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
329 * on frequency transition.
330 *
331 * This function calls the transition notifiers and the "adjust_jiffies"
332 * function. It is called twice on all CPU frequency changes that have
333 * external effects.
334 */
335void cpufreq_notify_transition(struct cpufreq_policy *policy,
336 struct cpufreq_freqs *freqs, unsigned int state)
337{
338 for_each_cpu(freqs->cpu, policy->cpus)
339 __cpufreq_notify_transition(policy, freqs, state);
340}
1da177e4
LT
341EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
342
f7ba3b41
VK
343/* Do post notifications when there are chances that transition has failed */
344void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
345 struct cpufreq_freqs *freqs, int transition_failed)
346{
347 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
348 if (!transition_failed)
349 return;
350
351 swap(freqs->old, freqs->new);
352 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
353 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
354}
355EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
356
1da177e4 357
1da177e4
LT
358/*********************************************************************
359 * SYSFS INTERFACE *
360 *********************************************************************/
8a5c74a1 361static ssize_t show_boost(struct kobject *kobj,
6f19efc0
LM
362 struct attribute *attr, char *buf)
363{
364 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
365}
366
367static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
368 const char *buf, size_t count)
369{
370 int ret, enable;
371
372 ret = sscanf(buf, "%d", &enable);
373 if (ret != 1 || enable < 0 || enable > 1)
374 return -EINVAL;
375
376 if (cpufreq_boost_trigger_state(enable)) {
e837f9b5
JP
377 pr_err("%s: Cannot %s BOOST!\n",
378 __func__, enable ? "enable" : "disable");
6f19efc0
LM
379 return -EINVAL;
380 }
381
e837f9b5
JP
382 pr_debug("%s: cpufreq BOOST %s\n",
383 __func__, enable ? "enabled" : "disabled");
6f19efc0
LM
384
385 return count;
386}
387define_one_global_rw(boost);
1da177e4 388
3bcb09a3
JF
389static struct cpufreq_governor *__find_governor(const char *str_governor)
390{
391 struct cpufreq_governor *t;
392
393 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 394 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
395 return t;
396
397 return NULL;
398}
399
1da177e4
LT
400/**
401 * cpufreq_parse_governor - parse a governor string
402 */
905d77cd 403static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
404 struct cpufreq_governor **governor)
405{
3bcb09a3 406 int err = -EINVAL;
1c3d85dd
RW
407
408 if (!cpufreq_driver)
3bcb09a3
JF
409 goto out;
410
1c3d85dd 411 if (cpufreq_driver->setpolicy) {
1da177e4
LT
412 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
413 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 414 err = 0;
e08f5f5b
GS
415 } else if (!strnicmp(str_governor, "powersave",
416 CPUFREQ_NAME_LEN)) {
1da177e4 417 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 418 err = 0;
1da177e4 419 }
9c0ebcf7 420 } else if (has_target()) {
1da177e4 421 struct cpufreq_governor *t;
3bcb09a3 422
3fc54d37 423 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
424
425 t = __find_governor(str_governor);
426
ea714970 427 if (t == NULL) {
1a8e1463 428 int ret;
ea714970 429
1a8e1463
KC
430 mutex_unlock(&cpufreq_governor_mutex);
431 ret = request_module("cpufreq_%s", str_governor);
432 mutex_lock(&cpufreq_governor_mutex);
ea714970 433
1a8e1463
KC
434 if (ret == 0)
435 t = __find_governor(str_governor);
ea714970
JF
436 }
437
3bcb09a3
JF
438 if (t != NULL) {
439 *governor = t;
440 err = 0;
1da177e4 441 }
3bcb09a3 442
3fc54d37 443 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 444 }
29464f28 445out:
3bcb09a3 446 return err;
1da177e4 447}
1da177e4 448
1da177e4 449/**
e08f5f5b
GS
450 * cpufreq_per_cpu_attr_read() / show_##file_name() -
451 * print out cpufreq information
1da177e4
LT
452 *
453 * Write out information from cpufreq_driver->policy[cpu]; object must be
454 * "unsigned int".
455 */
456
32ee8c3e
DJ
457#define show_one(file_name, object) \
458static ssize_t show_##file_name \
905d77cd 459(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 460{ \
29464f28 461 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
462}
463
464show_one(cpuinfo_min_freq, cpuinfo.min_freq);
465show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 466show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
467show_one(scaling_min_freq, min);
468show_one(scaling_max_freq, max);
469show_one(scaling_cur_freq, cur);
470
037ce839 471static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 472 struct cpufreq_policy *new_policy);
7970e08b 473
1da177e4
LT
474/**
475 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
476 */
477#define store_one(file_name, object) \
478static ssize_t store_##file_name \
905d77cd 479(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 480{ \
5136fa56 481 int ret; \
1da177e4
LT
482 struct cpufreq_policy new_policy; \
483 \
484 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
485 if (ret) \
486 return -EINVAL; \
487 \
29464f28 488 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
489 if (ret != 1) \
490 return -EINVAL; \
491 \
037ce839 492 ret = cpufreq_set_policy(policy, &new_policy); \
7970e08b 493 policy->user_policy.object = policy->object; \
1da177e4
LT
494 \
495 return ret ? ret : count; \
496}
497
29464f28
DJ
498store_one(scaling_min_freq, min);
499store_one(scaling_max_freq, max);
1da177e4
LT
500
501/**
502 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
503 */
905d77cd
DJ
504static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
505 char *buf)
1da177e4 506{
5a01f2e8 507 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
508 if (!cur_freq)
509 return sprintf(buf, "<unknown>");
510 return sprintf(buf, "%u\n", cur_freq);
511}
512
1da177e4
LT
513/**
514 * show_scaling_governor - show the current policy for the specified CPU
515 */
905d77cd 516static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 517{
29464f28 518 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
519 return sprintf(buf, "powersave\n");
520 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
521 return sprintf(buf, "performance\n");
522 else if (policy->governor)
4b972f0b 523 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 524 policy->governor->name);
1da177e4
LT
525 return -EINVAL;
526}
527
1da177e4
LT
528/**
529 * store_scaling_governor - store policy for the specified CPU
530 */
905d77cd
DJ
531static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
532 const char *buf, size_t count)
1da177e4 533{
5136fa56 534 int ret;
1da177e4
LT
535 char str_governor[16];
536 struct cpufreq_policy new_policy;
537
538 ret = cpufreq_get_policy(&new_policy, policy->cpu);
539 if (ret)
540 return ret;
541
29464f28 542 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
543 if (ret != 1)
544 return -EINVAL;
545
e08f5f5b
GS
546 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
547 &new_policy.governor))
1da177e4
LT
548 return -EINVAL;
549
037ce839 550 ret = cpufreq_set_policy(policy, &new_policy);
7970e08b
TR
551
552 policy->user_policy.policy = policy->policy;
553 policy->user_policy.governor = policy->governor;
7970e08b 554
e08f5f5b
GS
555 if (ret)
556 return ret;
557 else
558 return count;
1da177e4
LT
559}
560
561/**
562 * show_scaling_driver - show the cpufreq driver currently loaded
563 */
905d77cd 564static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 565{
1c3d85dd 566 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
567}
568
569/**
570 * show_scaling_available_governors - show the available CPUfreq governors
571 */
905d77cd
DJ
572static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
573 char *buf)
1da177e4
LT
574{
575 ssize_t i = 0;
576 struct cpufreq_governor *t;
577
9c0ebcf7 578 if (!has_target()) {
1da177e4
LT
579 i += sprintf(buf, "performance powersave");
580 goto out;
581 }
582
583 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
584 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
585 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 586 goto out;
4b972f0b 587 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 588 }
7d5e350f 589out:
1da177e4
LT
590 i += sprintf(&buf[i], "\n");
591 return i;
592}
e8628dd0 593
f4fd3797 594ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
595{
596 ssize_t i = 0;
597 unsigned int cpu;
598
835481d9 599 for_each_cpu(cpu, mask) {
1da177e4
LT
600 if (i)
601 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
602 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
603 if (i >= (PAGE_SIZE - 5))
29464f28 604 break;
1da177e4
LT
605 }
606 i += sprintf(&buf[i], "\n");
607 return i;
608}
f4fd3797 609EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 610
e8628dd0
DW
611/**
612 * show_related_cpus - show the CPUs affected by each transition even if
613 * hw coordination is in use
614 */
615static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
616{
f4fd3797 617 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
618}
619
620/**
621 * show_affected_cpus - show the CPUs affected by each transition
622 */
623static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
624{
f4fd3797 625 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
626}
627
9e76988e 628static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 629 const char *buf, size_t count)
9e76988e
VP
630{
631 unsigned int freq = 0;
632 unsigned int ret;
633
879000f9 634 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
635 return -EINVAL;
636
637 ret = sscanf(buf, "%u", &freq);
638 if (ret != 1)
639 return -EINVAL;
640
641 policy->governor->store_setspeed(policy, freq);
642
643 return count;
644}
645
646static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
647{
879000f9 648 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
649 return sprintf(buf, "<unsupported>\n");
650
651 return policy->governor->show_setspeed(policy, buf);
652}
1da177e4 653
e2f74f35 654/**
8bf1ac72 655 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
656 */
657static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
658{
659 unsigned int limit;
660 int ret;
1c3d85dd
RW
661 if (cpufreq_driver->bios_limit) {
662 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
663 if (!ret)
664 return sprintf(buf, "%u\n", limit);
665 }
666 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
667}
668
6dad2a29
BP
669cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
670cpufreq_freq_attr_ro(cpuinfo_min_freq);
671cpufreq_freq_attr_ro(cpuinfo_max_freq);
672cpufreq_freq_attr_ro(cpuinfo_transition_latency);
673cpufreq_freq_attr_ro(scaling_available_governors);
674cpufreq_freq_attr_ro(scaling_driver);
675cpufreq_freq_attr_ro(scaling_cur_freq);
676cpufreq_freq_attr_ro(bios_limit);
677cpufreq_freq_attr_ro(related_cpus);
678cpufreq_freq_attr_ro(affected_cpus);
679cpufreq_freq_attr_rw(scaling_min_freq);
680cpufreq_freq_attr_rw(scaling_max_freq);
681cpufreq_freq_attr_rw(scaling_governor);
682cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 683
905d77cd 684static struct attribute *default_attrs[] = {
1da177e4
LT
685 &cpuinfo_min_freq.attr,
686 &cpuinfo_max_freq.attr,
ed129784 687 &cpuinfo_transition_latency.attr,
1da177e4
LT
688 &scaling_min_freq.attr,
689 &scaling_max_freq.attr,
690 &affected_cpus.attr,
e8628dd0 691 &related_cpus.attr,
1da177e4
LT
692 &scaling_governor.attr,
693 &scaling_driver.attr,
694 &scaling_available_governors.attr,
9e76988e 695 &scaling_setspeed.attr,
1da177e4
LT
696 NULL
697};
698
29464f28
DJ
699#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
700#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 701
29464f28 702static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 703{
905d77cd
DJ
704 struct cpufreq_policy *policy = to_policy(kobj);
705 struct freq_attr *fattr = to_attr(attr);
1b750e3b 706 ssize_t ret;
6eed9404
VK
707
708 if (!down_read_trylock(&cpufreq_rwsem))
1b750e3b 709 return -EINVAL;
5a01f2e8 710
ad7722da 711 down_read(&policy->rwsem);
5a01f2e8 712
e08f5f5b
GS
713 if (fattr->show)
714 ret = fattr->show(policy, buf);
715 else
716 ret = -EIO;
717
ad7722da 718 up_read(&policy->rwsem);
6eed9404 719 up_read(&cpufreq_rwsem);
1b750e3b 720
1da177e4
LT
721 return ret;
722}
723
905d77cd
DJ
724static ssize_t store(struct kobject *kobj, struct attribute *attr,
725 const char *buf, size_t count)
1da177e4 726{
905d77cd
DJ
727 struct cpufreq_policy *policy = to_policy(kobj);
728 struct freq_attr *fattr = to_attr(attr);
a07530b4 729 ssize_t ret = -EINVAL;
6eed9404 730
4f750c93
SB
731 get_online_cpus();
732
733 if (!cpu_online(policy->cpu))
734 goto unlock;
735
6eed9404 736 if (!down_read_trylock(&cpufreq_rwsem))
4f750c93 737 goto unlock;
5a01f2e8 738
ad7722da 739 down_write(&policy->rwsem);
5a01f2e8 740
e08f5f5b
GS
741 if (fattr->store)
742 ret = fattr->store(policy, buf, count);
743 else
744 ret = -EIO;
745
ad7722da 746 up_write(&policy->rwsem);
6eed9404 747
6eed9404 748 up_read(&cpufreq_rwsem);
4f750c93
SB
749unlock:
750 put_online_cpus();
751
1da177e4
LT
752 return ret;
753}
754
905d77cd 755static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 756{
905d77cd 757 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 758 pr_debug("last reference is dropped\n");
1da177e4
LT
759 complete(&policy->kobj_unregister);
760}
761
52cf25d0 762static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
763 .show = show,
764 .store = store,
765};
766
767static struct kobj_type ktype_cpufreq = {
768 .sysfs_ops = &sysfs_ops,
769 .default_attrs = default_attrs,
770 .release = cpufreq_sysfs_release,
771};
772
2361be23
VK
773struct kobject *cpufreq_global_kobject;
774EXPORT_SYMBOL(cpufreq_global_kobject);
775
776static int cpufreq_global_kobject_usage;
777
778int cpufreq_get_global_kobject(void)
779{
780 if (!cpufreq_global_kobject_usage++)
781 return kobject_add(cpufreq_global_kobject,
782 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
783
784 return 0;
785}
786EXPORT_SYMBOL(cpufreq_get_global_kobject);
787
788void cpufreq_put_global_kobject(void)
789{
790 if (!--cpufreq_global_kobject_usage)
791 kobject_del(cpufreq_global_kobject);
792}
793EXPORT_SYMBOL(cpufreq_put_global_kobject);
794
795int cpufreq_sysfs_create_file(const struct attribute *attr)
796{
797 int ret = cpufreq_get_global_kobject();
798
799 if (!ret) {
800 ret = sysfs_create_file(cpufreq_global_kobject, attr);
801 if (ret)
802 cpufreq_put_global_kobject();
803 }
804
805 return ret;
806}
807EXPORT_SYMBOL(cpufreq_sysfs_create_file);
808
809void cpufreq_sysfs_remove_file(const struct attribute *attr)
810{
811 sysfs_remove_file(cpufreq_global_kobject, attr);
812 cpufreq_put_global_kobject();
813}
814EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
815
19d6f7ec 816/* symlink affected CPUs */
308b60e7 817static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
818{
819 unsigned int j;
820 int ret = 0;
821
822 for_each_cpu(j, policy->cpus) {
8a25a2fd 823 struct device *cpu_dev;
19d6f7ec 824
308b60e7 825 if (j == policy->cpu)
19d6f7ec 826 continue;
19d6f7ec 827
e8fdde10 828 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
829 cpu_dev = get_cpu_device(j);
830 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 831 "cpufreq");
71c3461e
RW
832 if (ret)
833 break;
19d6f7ec
DJ
834 }
835 return ret;
836}
837
308b60e7 838static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 839 struct device *dev)
909a694e
DJ
840{
841 struct freq_attr **drv_attr;
909a694e 842 int ret = 0;
909a694e
DJ
843
844 /* prepare interface data */
845 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 846 &dev->kobj, "cpufreq");
909a694e
DJ
847 if (ret)
848 return ret;
849
850 /* set up files for this cpu device */
1c3d85dd 851 drv_attr = cpufreq_driver->attr;
909a694e
DJ
852 while ((drv_attr) && (*drv_attr)) {
853 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
854 if (ret)
1c3d85dd 855 goto err_out_kobj_put;
909a694e
DJ
856 drv_attr++;
857 }
1c3d85dd 858 if (cpufreq_driver->get) {
909a694e
DJ
859 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
860 if (ret)
1c3d85dd 861 goto err_out_kobj_put;
909a694e 862 }
9c0ebcf7 863 if (has_target()) {
909a694e
DJ
864 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
865 if (ret)
1c3d85dd 866 goto err_out_kobj_put;
909a694e 867 }
1c3d85dd 868 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
869 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
870 if (ret)
1c3d85dd 871 goto err_out_kobj_put;
e2f74f35 872 }
909a694e 873
308b60e7 874 ret = cpufreq_add_dev_symlink(policy);
ecf7e461
DJ
875 if (ret)
876 goto err_out_kobj_put;
877
e18f1682
SB
878 return ret;
879
880err_out_kobj_put:
881 kobject_put(&policy->kobj);
882 wait_for_completion(&policy->kobj_unregister);
883 return ret;
884}
885
886static void cpufreq_init_policy(struct cpufreq_policy *policy)
887{
6e2c89d1 888 struct cpufreq_governor *gov = NULL;
e18f1682
SB
889 struct cpufreq_policy new_policy;
890 int ret = 0;
891
d5b73cd8 892 memcpy(&new_policy, policy, sizeof(*policy));
a27a9ab7 893
6e2c89d1 894 /* Update governor of new_policy to the governor used before hotplug */
895 gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
896 if (gov)
897 pr_debug("Restoring governor %s for cpu %d\n",
898 policy->governor->name, policy->cpu);
899 else
900 gov = CPUFREQ_DEFAULT_GOVERNOR;
901
902 new_policy.governor = gov;
903
a27a9ab7
JB
904 /* Use the default policy if its valid. */
905 if (cpufreq_driver->setpolicy)
6e2c89d1 906 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
ecf7e461
DJ
907
908 /* set default policy */
037ce839 909 ret = cpufreq_set_policy(policy, &new_policy);
ecf7e461 910 if (ret) {
2d06d8c4 911 pr_debug("setting policy failed\n");
1c3d85dd
RW
912 if (cpufreq_driver->exit)
913 cpufreq_driver->exit(policy);
ecf7e461 914 }
909a694e
DJ
915}
916
fcf80582 917#ifdef CONFIG_HOTPLUG_CPU
d8d3b471 918static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
42f921a6 919 unsigned int cpu, struct device *dev)
fcf80582 920{
9c0ebcf7 921 int ret = 0;
fcf80582
VK
922 unsigned long flags;
923
9c0ebcf7 924 if (has_target()) {
3de9bdeb
VK
925 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
926 if (ret) {
927 pr_err("%s: Failed to stop governor\n", __func__);
928 return ret;
929 }
930 }
fcf80582 931
ad7722da 932 down_write(&policy->rwsem);
2eaa3e2d 933
0d1857a1 934 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 935
fcf80582
VK
936 cpumask_set_cpu(cpu, policy->cpus);
937 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 938 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 939
ad7722da 940 up_write(&policy->rwsem);
2eaa3e2d 941
9c0ebcf7 942 if (has_target()) {
3de9bdeb
VK
943 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
944 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
945 pr_err("%s: Failed to start governor\n", __func__);
946 return ret;
947 }
820c6ca2 948 }
fcf80582 949
42f921a6 950 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
fcf80582
VK
951}
952#endif
1da177e4 953
8414809c
SB
954static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
955{
956 struct cpufreq_policy *policy;
957 unsigned long flags;
958
44871c9c 959 read_lock_irqsave(&cpufreq_driver_lock, flags);
8414809c
SB
960
961 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
962
44871c9c 963 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
8414809c 964
6e2c89d1 965 policy->governor = NULL;
966
8414809c
SB
967 return policy;
968}
969
e9698cc5
SB
970static struct cpufreq_policy *cpufreq_policy_alloc(void)
971{
972 struct cpufreq_policy *policy;
973
974 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
975 if (!policy)
976 return NULL;
977
978 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
979 goto err_free_policy;
980
981 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
982 goto err_free_cpumask;
983
c88a1f8b 984 INIT_LIST_HEAD(&policy->policy_list);
ad7722da 985 init_rwsem(&policy->rwsem);
986
e9698cc5
SB
987 return policy;
988
989err_free_cpumask:
990 free_cpumask_var(policy->cpus);
991err_free_policy:
992 kfree(policy);
993
994 return NULL;
995}
996
42f921a6
VK
997static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
998{
999 struct kobject *kobj;
1000 struct completion *cmp;
1001
fcd7af91
VK
1002 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1003 CPUFREQ_REMOVE_POLICY, policy);
1004
42f921a6
VK
1005 down_read(&policy->rwsem);
1006 kobj = &policy->kobj;
1007 cmp = &policy->kobj_unregister;
1008 up_read(&policy->rwsem);
1009 kobject_put(kobj);
1010
1011 /*
1012 * We need to make sure that the underlying kobj is
1013 * actually not referenced anymore by anybody before we
1014 * proceed with unloading.
1015 */
1016 pr_debug("waiting for dropping of refcount\n");
1017 wait_for_completion(cmp);
1018 pr_debug("wait complete\n");
1019}
1020
e9698cc5
SB
1021static void cpufreq_policy_free(struct cpufreq_policy *policy)
1022{
1023 free_cpumask_var(policy->related_cpus);
1024 free_cpumask_var(policy->cpus);
1025 kfree(policy);
1026}
1027
0d66b91e
SB
1028static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1029{
99ec899e 1030 if (WARN_ON(cpu == policy->cpu))
cb38ed5c
SB
1031 return;
1032
ad7722da 1033 down_write(&policy->rwsem);
8efd5765 1034
0d66b91e
SB
1035 policy->last_cpu = policy->cpu;
1036 policy->cpu = cpu;
1037
ad7722da 1038 up_write(&policy->rwsem);
8efd5765 1039
0d66b91e
SB
1040 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1041 CPUFREQ_UPDATE_POLICY_CPU, policy);
1042}
1043
a82fab29
SB
1044static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1045 bool frozen)
1da177e4 1046{
fcf80582 1047 unsigned int j, cpu = dev->id;
65922465 1048 int ret = -ENOMEM;
1da177e4 1049 struct cpufreq_policy *policy;
1da177e4 1050 unsigned long flags;
90e41bac 1051#ifdef CONFIG_HOTPLUG_CPU
1b274294 1052 struct cpufreq_policy *tpolicy;
90e41bac 1053#endif
1da177e4 1054
c32b6b8e
AR
1055 if (cpu_is_offline(cpu))
1056 return 0;
1057
2d06d8c4 1058 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
1059
1060#ifdef CONFIG_SMP
1061 /* check whether a different CPU already registered this
1062 * CPU because it is in the same boat. */
1063 policy = cpufreq_cpu_get(cpu);
1064 if (unlikely(policy)) {
8ff69732 1065 cpufreq_cpu_put(policy);
1da177e4
LT
1066 return 0;
1067 }
5025d628 1068#endif
fcf80582 1069
6eed9404
VK
1070 if (!down_read_trylock(&cpufreq_rwsem))
1071 return 0;
1072
fcf80582
VK
1073#ifdef CONFIG_HOTPLUG_CPU
1074 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 1075 read_lock_irqsave(&cpufreq_driver_lock, flags);
1b274294
VK
1076 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1077 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
0d1857a1 1078 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
42f921a6 1079 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
6eed9404
VK
1080 up_read(&cpufreq_rwsem);
1081 return ret;
2eaa3e2d 1082 }
fcf80582 1083 }
0d1857a1 1084 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1085#endif
1086
72368d12
RW
1087 /*
1088 * Restore the saved policy when doing light-weight init and fall back
1089 * to the full init if that fails.
1090 */
1091 policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
1092 if (!policy) {
1093 frozen = false;
8414809c 1094 policy = cpufreq_policy_alloc();
72368d12
RW
1095 if (!policy)
1096 goto nomem_out;
1097 }
0d66b91e
SB
1098
1099 /*
1100 * In the resume path, since we restore a saved policy, the assignment
1101 * to policy->cpu is like an update of the existing policy, rather than
1102 * the creation of a brand new one. So we need to perform this update
1103 * by invoking update_policy_cpu().
1104 */
1105 if (frozen && cpu != policy->cpu)
1106 update_policy_cpu(policy, cpu);
1107 else
1108 policy->cpu = cpu;
1109
835481d9 1110 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1111
1da177e4 1112 init_completion(&policy->kobj_unregister);
65f27f38 1113 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1114
1115 /* call driver. From then on the cpufreq must be able
1116 * to accept all calls to ->verify and ->setpolicy for this CPU
1117 */
1c3d85dd 1118 ret = cpufreq_driver->init(policy);
1da177e4 1119 if (ret) {
2d06d8c4 1120 pr_debug("initialization failed\n");
2eaa3e2d 1121 goto err_set_policy_cpu;
1da177e4 1122 }
643ae6e8 1123
5a7e56a5
VK
1124 /* related cpus should atleast have policy->cpus */
1125 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1126
1127 /*
1128 * affected cpus must always be the one, which are online. We aren't
1129 * managing offline cpus here.
1130 */
1131 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1132
1133 if (!frozen) {
1134 policy->user_policy.min = policy->min;
1135 policy->user_policy.max = policy->max;
1136 }
1137
4e97b631 1138 down_write(&policy->rwsem);
652ed95d
VK
1139 write_lock_irqsave(&cpufreq_driver_lock, flags);
1140 for_each_cpu(j, policy->cpus)
1141 per_cpu(cpufreq_cpu_data, j) = policy;
1142 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1143
da60ce9f
VK
1144 if (cpufreq_driver->get) {
1145 policy->cur = cpufreq_driver->get(policy->cpu);
1146 if (!policy->cur) {
1147 pr_err("%s: ->get() failed\n", __func__);
1148 goto err_get_freq;
1149 }
1150 }
1151
d3916691
VK
1152 /*
1153 * Sometimes boot loaders set CPU frequency to a value outside of
1154 * frequency table present with cpufreq core. In such cases CPU might be
1155 * unstable if it has to run on that frequency for long duration of time
1156 * and so its better to set it to a frequency which is specified in
1157 * freq-table. This also makes cpufreq stats inconsistent as
1158 * cpufreq-stats would fail to register because current frequency of CPU
1159 * isn't found in freq-table.
1160 *
1161 * Because we don't want this change to effect boot process badly, we go
1162 * for the next freq which is >= policy->cur ('cur' must be set by now,
1163 * otherwise we will end up setting freq to lowest of the table as 'cur'
1164 * is initialized to zero).
1165 *
1166 * We are passing target-freq as "policy->cur - 1" otherwise
1167 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1168 * equal to target-freq.
1169 */
1170 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1171 && has_target()) {
1172 /* Are we running at unknown frequency ? */
1173 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1174 if (ret == -EINVAL) {
1175 /* Warn user and fix it */
1176 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1177 __func__, policy->cpu, policy->cur);
1178 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1179 CPUFREQ_RELATION_L);
1180
1181 /*
1182 * Reaching here after boot in a few seconds may not
1183 * mean that system will remain stable at "unknown"
1184 * frequency for longer duration. Hence, a BUG_ON().
1185 */
1186 BUG_ON(ret);
1187 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1188 __func__, policy->cpu, policy->cur);
1189 }
1190 }
1191
a1531acd
TR
1192 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1193 CPUFREQ_START, policy);
1194
a82fab29 1195 if (!frozen) {
308b60e7 1196 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1197 if (ret)
1198 goto err_out_unregister;
fcd7af91
VK
1199 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1200 CPUFREQ_CREATE_POLICY, policy);
a82fab29 1201 }
8ff69732 1202
9515f4d6
VK
1203 write_lock_irqsave(&cpufreq_driver_lock, flags);
1204 list_add(&policy->policy_list, &cpufreq_policy_list);
1205 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1206
e18f1682
SB
1207 cpufreq_init_policy(policy);
1208
08fd8c1c
VK
1209 if (!frozen) {
1210 policy->user_policy.policy = policy->policy;
1211 policy->user_policy.governor = policy->governor;
1212 }
4e97b631 1213 up_write(&policy->rwsem);
08fd8c1c 1214
038c5b3e 1215 kobject_uevent(&policy->kobj, KOBJ_ADD);
6eed9404
VK
1216 up_read(&cpufreq_rwsem);
1217
2d06d8c4 1218 pr_debug("initialization complete\n");
87c32271 1219
1da177e4
LT
1220 return 0;
1221
1da177e4 1222err_out_unregister:
652ed95d 1223err_get_freq:
0d1857a1 1224 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1225 for_each_cpu(j, policy->cpus)
7a6aedfa 1226 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1227 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1228
da60ce9f
VK
1229 if (cpufreq_driver->exit)
1230 cpufreq_driver->exit(policy);
2eaa3e2d 1231err_set_policy_cpu:
72368d12
RW
1232 if (frozen) {
1233 /* Do not leave stale fallback data behind. */
1234 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
42f921a6 1235 cpufreq_policy_put_kobj(policy);
72368d12 1236 }
e9698cc5 1237 cpufreq_policy_free(policy);
42f921a6 1238
1da177e4 1239nomem_out:
6eed9404
VK
1240 up_read(&cpufreq_rwsem);
1241
1da177e4
LT
1242 return ret;
1243}
1244
a82fab29
SB
1245/**
1246 * cpufreq_add_dev - add a CPU device
1247 *
1248 * Adds the cpufreq interface for a CPU device.
1249 *
1250 * The Oracle says: try running cpufreq registration/unregistration concurrently
1251 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1252 * mess up, but more thorough testing is needed. - Mathieu
1253 */
1254static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1255{
1256 return __cpufreq_add_dev(dev, sif, false);
1257}
1258
3a3e9e06 1259static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
42f921a6 1260 unsigned int old_cpu)
f9ba680d
SB
1261{
1262 struct device *cpu_dev;
f9ba680d
SB
1263 int ret;
1264
1265 /* first sibling now owns the new sysfs dir */
9c8f1ee4 1266 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
a82fab29 1267
f9ba680d 1268 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
3a3e9e06 1269 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
f9ba680d 1270 if (ret) {
e837f9b5 1271 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
f9ba680d 1272
ad7722da 1273 down_write(&policy->rwsem);
3a3e9e06 1274 cpumask_set_cpu(old_cpu, policy->cpus);
ad7722da 1275 up_write(&policy->rwsem);
f9ba680d 1276
3a3e9e06 1277 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
f9ba680d
SB
1278 "cpufreq");
1279
1280 return -EINVAL;
1281 }
1282
1283 return cpu_dev->id;
1284}
1285
cedb70af
SB
1286static int __cpufreq_remove_dev_prepare(struct device *dev,
1287 struct subsys_interface *sif,
1288 bool frozen)
1da177e4 1289{
f9ba680d 1290 unsigned int cpu = dev->id, cpus;
3de9bdeb 1291 int new_cpu, ret;
1da177e4 1292 unsigned long flags;
3a3e9e06 1293 struct cpufreq_policy *policy;
1da177e4 1294
b8eed8af 1295 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1296
0d1857a1 1297 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1298
3a3e9e06 1299 policy = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d 1300
8414809c
SB
1301 /* Save the policy somewhere when doing a light-weight tear-down */
1302 if (frozen)
3a3e9e06 1303 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
8414809c 1304
0d1857a1 1305 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1306
3a3e9e06 1307 if (!policy) {
b8eed8af 1308 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1309 return -EINVAL;
1310 }
1da177e4 1311
9c0ebcf7 1312 if (has_target()) {
3de9bdeb
VK
1313 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1314 if (ret) {
1315 pr_err("%s: Failed to stop governor\n", __func__);
1316 return ret;
1317 }
1318 }
1da177e4 1319
1c3d85dd 1320 if (!cpufreq_driver->setpolicy)
fa69e33f 1321 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
3a3e9e06 1322 policy->governor->name, CPUFREQ_NAME_LEN);
1da177e4 1323
ad7722da 1324 down_read(&policy->rwsem);
3a3e9e06 1325 cpus = cpumask_weight(policy->cpus);
ad7722da 1326 up_read(&policy->rwsem);
084f3493 1327
61173f25 1328 if (cpu != policy->cpu) {
6964d91d 1329 sysfs_remove_link(&dev->kobj, "cpufreq");
73bf0fc2 1330 } else if (cpus > 1) {
42f921a6 1331 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
f9ba680d 1332 if (new_cpu >= 0) {
3a3e9e06 1333 update_policy_cpu(policy, new_cpu);
a82fab29
SB
1334
1335 if (!frozen) {
75949c9a 1336 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
e837f9b5 1337 __func__, new_cpu, cpu);
a82fab29 1338 }
1da177e4
LT
1339 }
1340 }
1da177e4 1341
cedb70af
SB
1342 return 0;
1343}
1344
1345static int __cpufreq_remove_dev_finish(struct device *dev,
1346 struct subsys_interface *sif,
1347 bool frozen)
1348{
1349 unsigned int cpu = dev->id, cpus;
1350 int ret;
1351 unsigned long flags;
1352 struct cpufreq_policy *policy;
cedb70af
SB
1353
1354 read_lock_irqsave(&cpufreq_driver_lock, flags);
1355 policy = per_cpu(cpufreq_cpu_data, cpu);
1356 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1357
1358 if (!policy) {
1359 pr_debug("%s: No cpu_data found\n", __func__);
1360 return -EINVAL;
1361 }
1362
ad7722da 1363 down_write(&policy->rwsem);
cedb70af 1364 cpus = cpumask_weight(policy->cpus);
9c8f1ee4
VK
1365
1366 if (cpus > 1)
1367 cpumask_clear_cpu(cpu, policy->cpus);
ad7722da 1368 up_write(&policy->rwsem);
cedb70af 1369
b8eed8af
VK
1370 /* If cpu is last user of policy, free policy */
1371 if (cpus == 1) {
9c0ebcf7 1372 if (has_target()) {
3de9bdeb
VK
1373 ret = __cpufreq_governor(policy,
1374 CPUFREQ_GOV_POLICY_EXIT);
1375 if (ret) {
1376 pr_err("%s: Failed to exit governor\n",
e837f9b5 1377 __func__);
3de9bdeb
VK
1378 return ret;
1379 }
edab2fbc 1380 }
2a998599 1381
42f921a6
VK
1382 if (!frozen)
1383 cpufreq_policy_put_kobj(policy);
7d26e2d5 1384
8414809c
SB
1385 /*
1386 * Perform the ->exit() even during light-weight tear-down,
1387 * since this is a core component, and is essential for the
1388 * subsequent light-weight ->init() to succeed.
b8eed8af 1389 */
1c3d85dd 1390 if (cpufreq_driver->exit)
3a3e9e06 1391 cpufreq_driver->exit(policy);
27ecddc2 1392
9515f4d6
VK
1393 /* Remove policy from list of active policies */
1394 write_lock_irqsave(&cpufreq_driver_lock, flags);
1395 list_del(&policy->policy_list);
1396 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1397
8414809c 1398 if (!frozen)
3a3e9e06 1399 cpufreq_policy_free(policy);
2a998599 1400 } else {
9c0ebcf7 1401 if (has_target()) {
3de9bdeb
VK
1402 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1403 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1404 pr_err("%s: Failed to start governor\n",
e837f9b5 1405 __func__);
3de9bdeb
VK
1406 return ret;
1407 }
2a998599 1408 }
27ecddc2 1409 }
1da177e4 1410
474deff7 1411 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1412 return 0;
1413}
1414
cedb70af 1415/**
27a862e9 1416 * cpufreq_remove_dev - remove a CPU device
cedb70af
SB
1417 *
1418 * Removes the cpufreq interface for a CPU device.
cedb70af 1419 */
8a25a2fd 1420static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1421{
8a25a2fd 1422 unsigned int cpu = dev->id;
27a862e9 1423 int ret;
ec28297a
VP
1424
1425 if (cpu_is_offline(cpu))
1426 return 0;
1427
27a862e9
VK
1428 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1429
1430 if (!ret)
1431 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1432
1433 return ret;
5a01f2e8
VP
1434}
1435
65f27f38 1436static void handle_update(struct work_struct *work)
1da177e4 1437{
65f27f38
DH
1438 struct cpufreq_policy *policy =
1439 container_of(work, struct cpufreq_policy, update);
1440 unsigned int cpu = policy->cpu;
2d06d8c4 1441 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1442 cpufreq_update_policy(cpu);
1443}
1444
1445/**
bb176f7d
VK
1446 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1447 * in deep trouble.
1da177e4
LT
1448 * @cpu: cpu number
1449 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1450 * @new_freq: CPU frequency the CPU actually runs at
1451 *
29464f28
DJ
1452 * We adjust to current frequency first, and need to clean up later.
1453 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1454 */
e08f5f5b
GS
1455static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1456 unsigned int new_freq)
1da177e4 1457{
b43a7ffb 1458 struct cpufreq_policy *policy;
1da177e4 1459 struct cpufreq_freqs freqs;
b43a7ffb
VK
1460 unsigned long flags;
1461
e837f9b5
JP
1462 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1463 old_freq, new_freq);
1da177e4 1464
1da177e4
LT
1465 freqs.old = old_freq;
1466 freqs.new = new_freq;
b43a7ffb
VK
1467
1468 read_lock_irqsave(&cpufreq_driver_lock, flags);
1469 policy = per_cpu(cpufreq_cpu_data, cpu);
1470 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1471
1472 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1473 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1474}
1475
32ee8c3e 1476/**
4ab70df4 1477 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1478 * @cpu: CPU number
1479 *
1480 * This is the last known freq, without actually getting it from the driver.
1481 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1482 */
1483unsigned int cpufreq_quick_get(unsigned int cpu)
1484{
9e21ba8b 1485 struct cpufreq_policy *policy;
e08f5f5b 1486 unsigned int ret_freq = 0;
95235ca2 1487
1c3d85dd
RW
1488 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1489 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1490
1491 policy = cpufreq_cpu_get(cpu);
95235ca2 1492 if (policy) {
e08f5f5b 1493 ret_freq = policy->cur;
95235ca2
VP
1494 cpufreq_cpu_put(policy);
1495 }
1496
4d34a67d 1497 return ret_freq;
95235ca2
VP
1498}
1499EXPORT_SYMBOL(cpufreq_quick_get);
1500
3d737108
JB
1501/**
1502 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1503 * @cpu: CPU number
1504 *
1505 * Just return the max possible frequency for a given CPU.
1506 */
1507unsigned int cpufreq_quick_get_max(unsigned int cpu)
1508{
1509 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1510 unsigned int ret_freq = 0;
1511
1512 if (policy) {
1513 ret_freq = policy->max;
1514 cpufreq_cpu_put(policy);
1515 }
1516
1517 return ret_freq;
1518}
1519EXPORT_SYMBOL(cpufreq_quick_get_max);
1520
5a01f2e8 1521static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1522{
7a6aedfa 1523 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1524 unsigned int ret_freq = 0;
5800043b 1525
1c3d85dd 1526 if (!cpufreq_driver->get)
4d34a67d 1527 return ret_freq;
1da177e4 1528
1c3d85dd 1529 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1530
e08f5f5b 1531 if (ret_freq && policy->cur &&
1c3d85dd 1532 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1533 /* verify no discrepancy between actual and
1534 saved value exists */
1535 if (unlikely(ret_freq != policy->cur)) {
1536 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1537 schedule_work(&policy->update);
1538 }
1539 }
1540
4d34a67d 1541 return ret_freq;
5a01f2e8 1542}
1da177e4 1543
5a01f2e8
VP
1544/**
1545 * cpufreq_get - get the current CPU frequency (in kHz)
1546 * @cpu: CPU number
1547 *
1548 * Get the CPU current (static) CPU frequency
1549 */
1550unsigned int cpufreq_get(unsigned int cpu)
1551{
999976e0 1552 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
5a01f2e8 1553 unsigned int ret_freq = 0;
5a01f2e8 1554
999976e0
AP
1555 if (policy) {
1556 down_read(&policy->rwsem);
1557 ret_freq = __cpufreq_get(cpu);
1558 up_read(&policy->rwsem);
5a01f2e8 1559
999976e0
AP
1560 cpufreq_cpu_put(policy);
1561 }
6eed9404 1562
4d34a67d 1563 return ret_freq;
1da177e4
LT
1564}
1565EXPORT_SYMBOL(cpufreq_get);
1566
8a25a2fd
KS
1567static struct subsys_interface cpufreq_interface = {
1568 .name = "cpufreq",
1569 .subsys = &cpu_subsys,
1570 .add_dev = cpufreq_add_dev,
1571 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1572};
1573
e28867ea
VK
1574/*
1575 * In case platform wants some specific frequency to be configured
1576 * during suspend..
1577 */
1578int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1579{
1580 int ret;
1581
1582 if (!policy->suspend_freq) {
1583 pr_err("%s: suspend_freq can't be zero\n", __func__);
1584 return -EINVAL;
1585 }
1586
1587 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1588 policy->suspend_freq);
1589
1590 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1591 CPUFREQ_RELATION_H);
1592 if (ret)
1593 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1594 __func__, policy->suspend_freq, ret);
1595
1596 return ret;
1597}
1598EXPORT_SYMBOL(cpufreq_generic_suspend);
1599
42d4dc3f 1600/**
2f0aea93 1601 * cpufreq_suspend() - Suspend CPUFreq governors
e00e56df 1602 *
2f0aea93
VK
1603 * Called during system wide Suspend/Hibernate cycles for suspending governors
1604 * as some platforms can't change frequency after this point in suspend cycle.
1605 * Because some of the devices (like: i2c, regulators, etc) they use for
1606 * changing frequency are suspended quickly after this point.
42d4dc3f 1607 */
2f0aea93 1608void cpufreq_suspend(void)
42d4dc3f 1609{
3a3e9e06 1610 struct cpufreq_policy *policy;
42d4dc3f 1611
2f0aea93
VK
1612 if (!cpufreq_driver)
1613 return;
42d4dc3f 1614
2f0aea93
VK
1615 if (!has_target())
1616 return;
42d4dc3f 1617
2f0aea93
VK
1618 pr_debug("%s: Suspending Governors\n", __func__);
1619
1620 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1621 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1622 pr_err("%s: Failed to stop governor for policy: %p\n",
1623 __func__, policy);
1624 else if (cpufreq_driver->suspend
1625 && cpufreq_driver->suspend(policy))
1626 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1627 policy);
42d4dc3f
BH
1628 }
1629
2f0aea93 1630 cpufreq_suspended = true;
42d4dc3f
BH
1631}
1632
1da177e4 1633/**
2f0aea93 1634 * cpufreq_resume() - Resume CPUFreq governors
1da177e4 1635 *
2f0aea93
VK
1636 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1637 * are suspended with cpufreq_suspend().
1da177e4 1638 */
2f0aea93 1639void cpufreq_resume(void)
1da177e4 1640{
3a3e9e06 1641 struct cpufreq_policy *policy;
1da177e4 1642
2f0aea93
VK
1643 if (!cpufreq_driver)
1644 return;
1da177e4 1645
2f0aea93 1646 if (!has_target())
e00e56df 1647 return;
1da177e4 1648
2f0aea93 1649 pr_debug("%s: Resuming Governors\n", __func__);
1da177e4 1650
2f0aea93 1651 cpufreq_suspended = false;
ce6c3997 1652
2f0aea93
VK
1653 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1654 if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1655 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1656 pr_err("%s: Failed to start governor for policy: %p\n",
1657 __func__, policy);
1658 else if (cpufreq_driver->resume
1659 && cpufreq_driver->resume(policy))
1660 pr_err("%s: Failed to resume driver: %p\n", __func__,
1661 policy);
1da177e4 1662
2f0aea93
VK
1663 /*
1664 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1665 * policy in list. It will verify that the current freq is in
1666 * sync with what we believe it to be.
1667 */
1668 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1669 schedule_work(&policy->update);
1670 }
1671}
1da177e4 1672
9d95046e
BP
1673/**
1674 * cpufreq_get_current_driver - return current driver's name
1675 *
1676 * Return the name string of the currently loaded cpufreq driver
1677 * or NULL, if none.
1678 */
1679const char *cpufreq_get_current_driver(void)
1680{
1c3d85dd
RW
1681 if (cpufreq_driver)
1682 return cpufreq_driver->name;
1683
1684 return NULL;
9d95046e
BP
1685}
1686EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1687
1688/*********************************************************************
1689 * NOTIFIER LISTS INTERFACE *
1690 *********************************************************************/
1691
1692/**
1693 * cpufreq_register_notifier - register a driver with cpufreq
1694 * @nb: notifier function to register
1695 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1696 *
32ee8c3e 1697 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1698 * are notified about clock rate changes (once before and once after
1699 * the transition), or a list of drivers that are notified about
1700 * changes in cpufreq policy.
1701 *
1702 * This function may sleep, and has the same return conditions as
e041c683 1703 * blocking_notifier_chain_register.
1da177e4
LT
1704 */
1705int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1706{
1707 int ret;
1708
d5aaffa9
DB
1709 if (cpufreq_disabled())
1710 return -EINVAL;
1711
74212ca4
CEB
1712 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1713
1da177e4
LT
1714 switch (list) {
1715 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1716 ret = srcu_notifier_chain_register(
e041c683 1717 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1718 break;
1719 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1720 ret = blocking_notifier_chain_register(
1721 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1722 break;
1723 default:
1724 ret = -EINVAL;
1725 }
1da177e4
LT
1726
1727 return ret;
1728}
1729EXPORT_SYMBOL(cpufreq_register_notifier);
1730
1da177e4
LT
1731/**
1732 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1733 * @nb: notifier block to be unregistered
bb176f7d 1734 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1735 *
1736 * Remove a driver from the CPU frequency notifier list.
1737 *
1738 * This function may sleep, and has the same return conditions as
e041c683 1739 * blocking_notifier_chain_unregister.
1da177e4
LT
1740 */
1741int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1742{
1743 int ret;
1744
d5aaffa9
DB
1745 if (cpufreq_disabled())
1746 return -EINVAL;
1747
1da177e4
LT
1748 switch (list) {
1749 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1750 ret = srcu_notifier_chain_unregister(
e041c683 1751 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1752 break;
1753 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1754 ret = blocking_notifier_chain_unregister(
1755 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1756 break;
1757 default:
1758 ret = -EINVAL;
1759 }
1da177e4
LT
1760
1761 return ret;
1762}
1763EXPORT_SYMBOL(cpufreq_unregister_notifier);
1764
1765
1766/*********************************************************************
1767 * GOVERNORS *
1768 *********************************************************************/
1769
1da177e4
LT
1770int __cpufreq_driver_target(struct cpufreq_policy *policy,
1771 unsigned int target_freq,
1772 unsigned int relation)
1773{
1774 int retval = -EINVAL;
7249924e 1775 unsigned int old_target_freq = target_freq;
c32b6b8e 1776
a7b422cd
KRW
1777 if (cpufreq_disabled())
1778 return -ENODEV;
1779
7249924e
VK
1780 /* Make sure that target_freq is within supported range */
1781 if (target_freq > policy->max)
1782 target_freq = policy->max;
1783 if (target_freq < policy->min)
1784 target_freq = policy->min;
1785
1786 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
e837f9b5 1787 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228 1788
9c0ebcf7
VK
1789 /*
1790 * This might look like a redundant call as we are checking it again
1791 * after finding index. But it is left intentionally for cases where
1792 * exactly same freq is called again and so we can save on few function
1793 * calls.
1794 */
5a1c0228
VK
1795 if (target_freq == policy->cur)
1796 return 0;
1797
1c3d85dd
RW
1798 if (cpufreq_driver->target)
1799 retval = cpufreq_driver->target(policy, target_freq, relation);
9c0ebcf7
VK
1800 else if (cpufreq_driver->target_index) {
1801 struct cpufreq_frequency_table *freq_table;
d4019f0a
VK
1802 struct cpufreq_freqs freqs;
1803 bool notify;
9c0ebcf7 1804 int index;
90d45d17 1805
9c0ebcf7
VK
1806 freq_table = cpufreq_frequency_get_table(policy->cpu);
1807 if (unlikely(!freq_table)) {
1808 pr_err("%s: Unable to find freq_table\n", __func__);
1809 goto out;
1810 }
1811
1812 retval = cpufreq_frequency_table_target(policy, freq_table,
1813 target_freq, relation, &index);
1814 if (unlikely(retval)) {
1815 pr_err("%s: Unable to find matching freq\n", __func__);
1816 goto out;
1817 }
1818
d4019f0a 1819 if (freq_table[index].frequency == policy->cur) {
9c0ebcf7 1820 retval = 0;
d4019f0a
VK
1821 goto out;
1822 }
1823
1824 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1825
1826 if (notify) {
1827 freqs.old = policy->cur;
1828 freqs.new = freq_table[index].frequency;
1829 freqs.flags = 0;
1830
1831 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
e837f9b5 1832 __func__, policy->cpu, freqs.old, freqs.new);
d4019f0a
VK
1833
1834 cpufreq_notify_transition(policy, &freqs,
1835 CPUFREQ_PRECHANGE);
1836 }
1837
1838 retval = cpufreq_driver->target_index(policy, index);
1839 if (retval)
1840 pr_err("%s: Failed to change cpu frequency: %d\n",
e837f9b5 1841 __func__, retval);
d4019f0a 1842
ab1b1c4e
VK
1843 if (notify)
1844 cpufreq_notify_post_transition(policy, &freqs, retval);
9c0ebcf7
VK
1845 }
1846
1847out:
1da177e4
LT
1848 return retval;
1849}
1850EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1851
1da177e4
LT
1852int cpufreq_driver_target(struct cpufreq_policy *policy,
1853 unsigned int target_freq,
1854 unsigned int relation)
1855{
f1829e4a 1856 int ret = -EINVAL;
1da177e4 1857
ad7722da 1858 down_write(&policy->rwsem);
1da177e4
LT
1859
1860 ret = __cpufreq_driver_target(policy, target_freq, relation);
1861
ad7722da 1862 up_write(&policy->rwsem);
1da177e4 1863
1da177e4
LT
1864 return ret;
1865}
1866EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1867
153d7f3f 1868/*
153d7f3f
AV
1869 * when "event" is CPUFREQ_GOV_LIMITS
1870 */
1da177e4 1871
e08f5f5b
GS
1872static int __cpufreq_governor(struct cpufreq_policy *policy,
1873 unsigned int event)
1da177e4 1874{
cc993cab 1875 int ret;
6afde10c
TR
1876
1877 /* Only must be defined when default governor is known to have latency
1878 restrictions, like e.g. conservative or ondemand.
1879 That this is the case is already ensured in Kconfig
1880 */
1881#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1882 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1883#else
1884 struct cpufreq_governor *gov = NULL;
1885#endif
1c256245 1886
2f0aea93
VK
1887 /* Don't start any governor operations if we are entering suspend */
1888 if (cpufreq_suspended)
1889 return 0;
1890
1c256245
TR
1891 if (policy->governor->max_transition_latency &&
1892 policy->cpuinfo.transition_latency >
1893 policy->governor->max_transition_latency) {
6afde10c
TR
1894 if (!gov)
1895 return -EINVAL;
1896 else {
e837f9b5
JP
1897 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1898 policy->governor->name, gov->name);
6afde10c
TR
1899 policy->governor = gov;
1900 }
1c256245 1901 }
1da177e4 1902
fe492f3f
VK
1903 if (event == CPUFREQ_GOV_POLICY_INIT)
1904 if (!try_module_get(policy->governor->owner))
1905 return -EINVAL;
1da177e4 1906
2d06d8c4 1907 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e837f9b5 1908 policy->cpu, event);
95731ebb
XC
1909
1910 mutex_lock(&cpufreq_governor_lock);
56d07db2 1911 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
f73d3933
VK
1912 || (!policy->governor_enabled
1913 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
95731ebb
XC
1914 mutex_unlock(&cpufreq_governor_lock);
1915 return -EBUSY;
1916 }
1917
1918 if (event == CPUFREQ_GOV_STOP)
1919 policy->governor_enabled = false;
1920 else if (event == CPUFREQ_GOV_START)
1921 policy->governor_enabled = true;
1922
1923 mutex_unlock(&cpufreq_governor_lock);
1924
1da177e4
LT
1925 ret = policy->governor->governor(policy, event);
1926
4d5dcc42
VK
1927 if (!ret) {
1928 if (event == CPUFREQ_GOV_POLICY_INIT)
1929 policy->governor->initialized++;
1930 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1931 policy->governor->initialized--;
95731ebb
XC
1932 } else {
1933 /* Restore original values */
1934 mutex_lock(&cpufreq_governor_lock);
1935 if (event == CPUFREQ_GOV_STOP)
1936 policy->governor_enabled = true;
1937 else if (event == CPUFREQ_GOV_START)
1938 policy->governor_enabled = false;
1939 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1940 }
b394058f 1941
fe492f3f
VK
1942 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1943 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
1944 module_put(policy->governor->owner);
1945
1946 return ret;
1947}
1948
1da177e4
LT
1949int cpufreq_register_governor(struct cpufreq_governor *governor)
1950{
3bcb09a3 1951 int err;
1da177e4
LT
1952
1953 if (!governor)
1954 return -EINVAL;
1955
a7b422cd
KRW
1956 if (cpufreq_disabled())
1957 return -ENODEV;
1958
3fc54d37 1959 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1960
b394058f 1961 governor->initialized = 0;
3bcb09a3
JF
1962 err = -EBUSY;
1963 if (__find_governor(governor->name) == NULL) {
1964 err = 0;
1965 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1966 }
1da177e4 1967
32ee8c3e 1968 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1969 return err;
1da177e4
LT
1970}
1971EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1972
1da177e4
LT
1973void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1974{
90e41bac 1975 int cpu;
90e41bac 1976
1da177e4
LT
1977 if (!governor)
1978 return;
1979
a7b422cd
KRW
1980 if (cpufreq_disabled())
1981 return;
1982
90e41bac
PB
1983 for_each_present_cpu(cpu) {
1984 if (cpu_online(cpu))
1985 continue;
1986 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1987 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1988 }
90e41bac 1989
3fc54d37 1990 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1991 list_del(&governor->governor_list);
3fc54d37 1992 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1993 return;
1994}
1995EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1996
1997
1da177e4
LT
1998/*********************************************************************
1999 * POLICY INTERFACE *
2000 *********************************************************************/
2001
2002/**
2003 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
2004 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2005 * is written
1da177e4
LT
2006 *
2007 * Reads the current cpufreq policy.
2008 */
2009int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2010{
2011 struct cpufreq_policy *cpu_policy;
2012 if (!policy)
2013 return -EINVAL;
2014
2015 cpu_policy = cpufreq_cpu_get(cpu);
2016 if (!cpu_policy)
2017 return -EINVAL;
2018
d5b73cd8 2019 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
2020
2021 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
2022 return 0;
2023}
2024EXPORT_SYMBOL(cpufreq_get_policy);
2025
153d7f3f 2026/*
037ce839
VK
2027 * policy : current policy.
2028 * new_policy: policy to be set.
153d7f3f 2029 */
037ce839 2030static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 2031 struct cpufreq_policy *new_policy)
1da177e4 2032{
d9a789c7
RW
2033 struct cpufreq_governor *old_gov;
2034 int ret;
1da177e4 2035
e837f9b5
JP
2036 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2037 new_policy->cpu, new_policy->min, new_policy->max);
1da177e4 2038
d5b73cd8 2039 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 2040
d9a789c7
RW
2041 if (new_policy->min > policy->max || new_policy->max < policy->min)
2042 return -EINVAL;
9c9a43ed 2043
1da177e4 2044 /* verify the cpu speed can be set within this limit */
3a3e9e06 2045 ret = cpufreq_driver->verify(new_policy);
1da177e4 2046 if (ret)
d9a789c7 2047 return ret;
1da177e4 2048
1da177e4 2049 /* adjust if necessary - all reasons */
e041c683 2050 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2051 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
2052
2053 /* adjust if necessary - hardware incompatibility*/
e041c683 2054 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2055 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 2056
bb176f7d
VK
2057 /*
2058 * verify the cpu speed can be set within this limit, which might be
2059 * different to the first one
2060 */
3a3e9e06 2061 ret = cpufreq_driver->verify(new_policy);
e041c683 2062 if (ret)
d9a789c7 2063 return ret;
1da177e4
LT
2064
2065 /* notification of the new policy */
e041c683 2066 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 2067 CPUFREQ_NOTIFY, new_policy);
1da177e4 2068
3a3e9e06
VK
2069 policy->min = new_policy->min;
2070 policy->max = new_policy->max;
1da177e4 2071
2d06d8c4 2072 pr_debug("new min and max freqs are %u - %u kHz\n",
e837f9b5 2073 policy->min, policy->max);
1da177e4 2074
1c3d85dd 2075 if (cpufreq_driver->setpolicy) {
3a3e9e06 2076 policy->policy = new_policy->policy;
2d06d8c4 2077 pr_debug("setting range\n");
d9a789c7
RW
2078 return cpufreq_driver->setpolicy(new_policy);
2079 }
1da177e4 2080
d9a789c7
RW
2081 if (new_policy->governor == policy->governor)
2082 goto out;
7bd353a9 2083
d9a789c7
RW
2084 pr_debug("governor switch\n");
2085
2086 /* save old, working values */
2087 old_gov = policy->governor;
2088 /* end old governor */
2089 if (old_gov) {
2090 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2091 up_write(&policy->rwsem);
2092 __cpufreq_governor(policy,CPUFREQ_GOV_POLICY_EXIT);
2093 down_write(&policy->rwsem);
1da177e4
LT
2094 }
2095
d9a789c7
RW
2096 /* start new governor */
2097 policy->governor = new_policy->governor;
2098 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2099 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2100 goto out;
2101
2102 up_write(&policy->rwsem);
2103 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2104 down_write(&policy->rwsem);
2105 }
2106
2107 /* new governor failed, so re-start old one */
2108 pr_debug("starting governor %s failed\n", policy->governor->name);
2109 if (old_gov) {
2110 policy->governor = old_gov;
2111 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2112 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2113 }
2114
2115 return -EINVAL;
2116
2117 out:
2118 pr_debug("governor: change or update limits\n");
2119 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
2120}
2121
1da177e4
LT
2122/**
2123 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2124 * @cpu: CPU which shall be re-evaluated
2125 *
25985edc 2126 * Useful for policy notifiers which have different necessities
1da177e4
LT
2127 * at different times.
2128 */
2129int cpufreq_update_policy(unsigned int cpu)
2130{
3a3e9e06
VK
2131 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2132 struct cpufreq_policy new_policy;
f1829e4a 2133 int ret;
1da177e4 2134
3a3e9e06 2135 if (!policy) {
f1829e4a
JL
2136 ret = -ENODEV;
2137 goto no_policy;
2138 }
1da177e4 2139
ad7722da 2140 down_write(&policy->rwsem);
1da177e4 2141
2d06d8c4 2142 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 2143 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
2144 new_policy.min = policy->user_policy.min;
2145 new_policy.max = policy->user_policy.max;
2146 new_policy.policy = policy->user_policy.policy;
2147 new_policy.governor = policy->user_policy.governor;
1da177e4 2148
bb176f7d
VK
2149 /*
2150 * BIOS might change freq behind our back
2151 * -> ask driver for current freq and notify governors about a change
2152 */
1c3d85dd 2153 if (cpufreq_driver->get) {
3a3e9e06 2154 new_policy.cur = cpufreq_driver->get(cpu);
bd0fa9bb
VK
2155 if (WARN_ON(!new_policy.cur)) {
2156 ret = -EIO;
2157 goto no_policy;
2158 }
2159
3a3e9e06 2160 if (!policy->cur) {
e837f9b5 2161 pr_debug("Driver did not initialize current freq\n");
3a3e9e06 2162 policy->cur = new_policy.cur;
a85f7bd3 2163 } else {
9c0ebcf7 2164 if (policy->cur != new_policy.cur && has_target())
3a3e9e06
VK
2165 cpufreq_out_of_sync(cpu, policy->cur,
2166 new_policy.cur);
a85f7bd3 2167 }
0961dd0d
TR
2168 }
2169
037ce839 2170 ret = cpufreq_set_policy(policy, &new_policy);
1da177e4 2171
ad7722da 2172 up_write(&policy->rwsem);
5a01f2e8 2173
3a3e9e06 2174 cpufreq_cpu_put(policy);
f1829e4a 2175no_policy:
1da177e4
LT
2176 return ret;
2177}
2178EXPORT_SYMBOL(cpufreq_update_policy);
2179
2760984f 2180static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2181 unsigned long action, void *hcpu)
2182{
2183 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2184 struct device *dev;
5302c3fb 2185 bool frozen = false;
c32b6b8e 2186
8a25a2fd
KS
2187 dev = get_cpu_device(cpu);
2188 if (dev) {
5302c3fb 2189
d4faadd5
RW
2190 if (action & CPU_TASKS_FROZEN)
2191 frozen = true;
2192
5302c3fb 2193 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2194 case CPU_ONLINE:
5302c3fb 2195 __cpufreq_add_dev(dev, NULL, frozen);
c32b6b8e 2196 break;
5302c3fb 2197
c32b6b8e 2198 case CPU_DOWN_PREPARE:
cedb70af 2199 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
1aee40ac
SB
2200 break;
2201
2202 case CPU_POST_DEAD:
cedb70af 2203 __cpufreq_remove_dev_finish(dev, NULL, frozen);
c32b6b8e 2204 break;
5302c3fb 2205
5a01f2e8 2206 case CPU_DOWN_FAILED:
5302c3fb 2207 __cpufreq_add_dev(dev, NULL, frozen);
c32b6b8e
AR
2208 break;
2209 }
2210 }
2211 return NOTIFY_OK;
2212}
2213
9c36f746 2214static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2215 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2216};
1da177e4 2217
6f19efc0
LM
2218/*********************************************************************
2219 * BOOST *
2220 *********************************************************************/
2221static int cpufreq_boost_set_sw(int state)
2222{
2223 struct cpufreq_frequency_table *freq_table;
2224 struct cpufreq_policy *policy;
2225 int ret = -EINVAL;
2226
2227 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2228 freq_table = cpufreq_frequency_get_table(policy->cpu);
2229 if (freq_table) {
2230 ret = cpufreq_frequency_table_cpuinfo(policy,
2231 freq_table);
2232 if (ret) {
2233 pr_err("%s: Policy frequency update failed\n",
2234 __func__);
2235 break;
2236 }
2237 policy->user_policy.max = policy->max;
2238 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2239 }
2240 }
2241
2242 return ret;
2243}
2244
2245int cpufreq_boost_trigger_state(int state)
2246{
2247 unsigned long flags;
2248 int ret = 0;
2249
2250 if (cpufreq_driver->boost_enabled == state)
2251 return 0;
2252
2253 write_lock_irqsave(&cpufreq_driver_lock, flags);
2254 cpufreq_driver->boost_enabled = state;
2255 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2256
2257 ret = cpufreq_driver->set_boost(state);
2258 if (ret) {
2259 write_lock_irqsave(&cpufreq_driver_lock, flags);
2260 cpufreq_driver->boost_enabled = !state;
2261 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2262
e837f9b5
JP
2263 pr_err("%s: Cannot %s BOOST\n",
2264 __func__, state ? "enable" : "disable");
6f19efc0
LM
2265 }
2266
2267 return ret;
2268}
2269
2270int cpufreq_boost_supported(void)
2271{
2272 if (likely(cpufreq_driver))
2273 return cpufreq_driver->boost_supported;
2274
2275 return 0;
2276}
2277EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2278
2279int cpufreq_boost_enabled(void)
2280{
2281 return cpufreq_driver->boost_enabled;
2282}
2283EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2284
1da177e4
LT
2285/*********************************************************************
2286 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2287 *********************************************************************/
2288
2289/**
2290 * cpufreq_register_driver - register a CPU Frequency driver
2291 * @driver_data: A struct cpufreq_driver containing the values#
2292 * submitted by the CPU Frequency driver.
2293 *
bb176f7d 2294 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2295 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2296 * (and isn't unregistered in the meantime).
1da177e4
LT
2297 *
2298 */
221dee28 2299int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2300{
2301 unsigned long flags;
2302 int ret;
2303
a7b422cd
KRW
2304 if (cpufreq_disabled())
2305 return -ENODEV;
2306
1da177e4 2307 if (!driver_data || !driver_data->verify || !driver_data->init ||
9c0ebcf7
VK
2308 !(driver_data->setpolicy || driver_data->target_index ||
2309 driver_data->target))
1da177e4
LT
2310 return -EINVAL;
2311
2d06d8c4 2312 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2313
2314 if (driver_data->setpolicy)
2315 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2316
0d1857a1 2317 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2318 if (cpufreq_driver) {
0d1857a1 2319 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4dea5806 2320 return -EEXIST;
1da177e4 2321 }
1c3d85dd 2322 cpufreq_driver = driver_data;
0d1857a1 2323 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2324
6f19efc0
LM
2325 if (cpufreq_boost_supported()) {
2326 /*
2327 * Check if driver provides function to enable boost -
2328 * if not, use cpufreq_boost_set_sw as default
2329 */
2330 if (!cpufreq_driver->set_boost)
2331 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2332
2333 ret = cpufreq_sysfs_create_file(&boost.attr);
2334 if (ret) {
2335 pr_err("%s: cannot register global BOOST sysfs file\n",
e837f9b5 2336 __func__);
6f19efc0
LM
2337 goto err_null_driver;
2338 }
2339 }
2340
8a25a2fd 2341 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab 2342 if (ret)
6f19efc0 2343 goto err_boost_unreg;
1da177e4 2344
1c3d85dd 2345 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2346 int i;
2347 ret = -ENODEV;
2348
2349 /* check for at least one working CPU */
7a6aedfa
MT
2350 for (i = 0; i < nr_cpu_ids; i++)
2351 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2352 ret = 0;
7a6aedfa
MT
2353 break;
2354 }
1da177e4
LT
2355
2356 /* if all ->init() calls failed, unregister */
2357 if (ret) {
2d06d8c4 2358 pr_debug("no CPU initialized for driver %s\n",
e837f9b5 2359 driver_data->name);
8a25a2fd 2360 goto err_if_unreg;
1da177e4
LT
2361 }
2362 }
2363
8f5bc2ab 2364 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2365 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2366
8f5bc2ab 2367 return 0;
8a25a2fd
KS
2368err_if_unreg:
2369 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2370err_boost_unreg:
2371 if (cpufreq_boost_supported())
2372 cpufreq_sysfs_remove_file(&boost.attr);
8f5bc2ab 2373err_null_driver:
0d1857a1 2374 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2375 cpufreq_driver = NULL;
0d1857a1 2376 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2377 return ret;
1da177e4
LT
2378}
2379EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2380
1da177e4
LT
2381/**
2382 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2383 *
bb176f7d 2384 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2385 * the right to do so, i.e. if you have succeeded in initialising before!
2386 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2387 * currently not initialised.
2388 */
221dee28 2389int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2390{
2391 unsigned long flags;
2392
1c3d85dd 2393 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2394 return -EINVAL;
1da177e4 2395
2d06d8c4 2396 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2397
8a25a2fd 2398 subsys_interface_unregister(&cpufreq_interface);
6f19efc0
LM
2399 if (cpufreq_boost_supported())
2400 cpufreq_sysfs_remove_file(&boost.attr);
2401
65edc68c 2402 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2403
6eed9404 2404 down_write(&cpufreq_rwsem);
0d1857a1 2405 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2406
1c3d85dd 2407 cpufreq_driver = NULL;
6eed9404 2408
0d1857a1 2409 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2410 up_write(&cpufreq_rwsem);
1da177e4
LT
2411
2412 return 0;
2413}
2414EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2415
2416static int __init cpufreq_core_init(void)
2417{
a7b422cd
KRW
2418 if (cpufreq_disabled())
2419 return -ENODEV;
2420
2361be23 2421 cpufreq_global_kobject = kobject_create();
8aa84ad8
TR
2422 BUG_ON(!cpufreq_global_kobject);
2423
5a01f2e8
VP
2424 return 0;
2425}
5a01f2e8 2426core_initcall(cpufreq_core_init);
This page took 0.779596 seconds and 5 git commands to generate.