cpufreq: unicore2: don't initialize part of policy set by core
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
bb176f7d 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
1da177e4 7 *
c32b6b8e 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 9 * Added handling for CPU hotplug
8ff69732
DJ
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 12 *
1da177e4
LT
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
1da177e4
LT
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
5ff0a268 20#include <linux/cpu.h>
1da177e4
LT
21#include <linux/cpufreq.h>
22#include <linux/delay.h>
1da177e4 23#include <linux/device.h>
5ff0a268
VK
24#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
3fc54d37 27#include <linux/mutex.h>
5ff0a268 28#include <linux/slab.h>
e00e56df 29#include <linux/syscore_ops.h>
5ff0a268 30#include <linux/tick.h>
6f4f2723
TR
31#include <trace/events/power.h>
32
1da177e4 33/**
cd878479 34 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
1c3d85dd 38static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 39static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
8414809c 40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
bb176f7d
VK
41static DEFINE_RWLOCK(cpufreq_driver_lock);
42static DEFINE_MUTEX(cpufreq_governor_lock);
c88a1f8b 43static LIST_HEAD(cpufreq_policy_list);
bb176f7d 44
084f3493
TR
45#ifdef CONFIG_HOTPLUG_CPU
46/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 47static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 48#endif
1da177e4 49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
5a01f2e8
VP
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
5a01f2e8
VP
67static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
68
69#define lock_policy_rwsem(mode, cpu) \
1b750e3b 70static void lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 71{ \
474deff7
VK
72 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
73 BUG_ON(!policy); \
74 down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
5a01f2e8
VP
75}
76
77lock_policy_rwsem(read, cpu);
5a01f2e8 78lock_policy_rwsem(write, cpu);
5a01f2e8 79
fa1d8af4
VK
80#define unlock_policy_rwsem(mode, cpu) \
81static void unlock_policy_rwsem_##mode(int cpu) \
82{ \
474deff7
VK
83 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
84 BUG_ON(!policy); \
85 up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
5a01f2e8 86}
5a01f2e8 87
fa1d8af4
VK
88unlock_policy_rwsem(read, cpu);
89unlock_policy_rwsem(write, cpu);
5a01f2e8 90
6eed9404
VK
91/*
92 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
93 * sections
94 */
95static DECLARE_RWSEM(cpufreq_rwsem);
96
1da177e4 97/* internal prototypes */
29464f28
DJ
98static int __cpufreq_governor(struct cpufreq_policy *policy,
99 unsigned int event);
5a01f2e8 100static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 101static void handle_update(struct work_struct *work);
1da177e4
LT
102
103/**
32ee8c3e
DJ
104 * Two notifier lists: the "policy" list is involved in the
105 * validation process for a new CPU frequency policy; the
1da177e4
LT
106 * "transition" list for kernel code that needs to handle
107 * changes to devices when the CPU clock speed changes.
108 * The mutex locks both lists.
109 */
e041c683 110static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 111static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 112
74212ca4 113static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
114static int __init init_cpufreq_transition_notifier_list(void)
115{
116 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 117 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
118 return 0;
119}
b3438f82 120pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 121
a7b422cd 122static int off __read_mostly;
da584455 123static int cpufreq_disabled(void)
a7b422cd
KRW
124{
125 return off;
126}
127void disable_cpufreq(void)
128{
129 off = 1;
130}
1da177e4 131static LIST_HEAD(cpufreq_governor_list);
29464f28 132static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 133
4d5dcc42
VK
134bool have_governor_per_policy(void)
135{
0b981e70 136 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
4d5dcc42 137}
3f869d6d 138EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 139
944e9a03
VK
140struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
141{
142 if (have_governor_per_policy())
143 return &policy->kobj;
144 else
145 return cpufreq_global_kobject;
146}
147EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
148
72a4ce34
VK
149static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
150{
151 u64 idle_time;
152 u64 cur_wall_time;
153 u64 busy_time;
154
155 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
156
157 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
158 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
159 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
160 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
163
164 idle_time = cur_wall_time - busy_time;
165 if (wall)
166 *wall = cputime_to_usecs(cur_wall_time);
167
168 return cputime_to_usecs(idle_time);
169}
170
171u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
172{
173 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
174
175 if (idle_time == -1ULL)
176 return get_cpu_idle_time_jiffy(cpu, wall);
177 else if (!io_busy)
178 idle_time += get_cpu_iowait_time_us(cpu, wall);
179
180 return idle_time;
181}
182EXPORT_SYMBOL_GPL(get_cpu_idle_time);
183
6eed9404 184struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
1da177e4 185{
6eed9404 186 struct cpufreq_policy *policy = NULL;
1da177e4
LT
187 unsigned long flags;
188
6eed9404
VK
189 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
190 return NULL;
191
192 if (!down_read_trylock(&cpufreq_rwsem))
193 return NULL;
1da177e4
LT
194
195 /* get the cpufreq driver */
1c3d85dd 196 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 197
6eed9404
VK
198 if (cpufreq_driver) {
199 /* get the CPU */
200 policy = per_cpu(cpufreq_cpu_data, cpu);
201 if (policy)
202 kobject_get(&policy->kobj);
203 }
1da177e4 204
6eed9404 205 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 206
3a3e9e06 207 if (!policy)
6eed9404 208 up_read(&cpufreq_rwsem);
1da177e4 209
3a3e9e06 210 return policy;
a9144436 211}
1da177e4
LT
212EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
213
3a3e9e06 214void cpufreq_cpu_put(struct cpufreq_policy *policy)
1da177e4 215{
d5aaffa9
DB
216 if (cpufreq_disabled())
217 return;
218
6eed9404
VK
219 kobject_put(&policy->kobj);
220 up_read(&cpufreq_rwsem);
1da177e4
LT
221}
222EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
223
1da177e4
LT
224/*********************************************************************
225 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
226 *********************************************************************/
227
228/**
229 * adjust_jiffies - adjust the system "loops_per_jiffy"
230 *
231 * This function alters the system "loops_per_jiffy" for the clock
232 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 233 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
234 * per-CPU loops_per_jiffy value wherever possible.
235 */
236#ifndef CONFIG_SMP
237static unsigned long l_p_j_ref;
bb176f7d 238static unsigned int l_p_j_ref_freq;
1da177e4 239
858119e1 240static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
241{
242 if (ci->flags & CPUFREQ_CONST_LOOPS)
243 return;
244
245 if (!l_p_j_ref_freq) {
246 l_p_j_ref = loops_per_jiffy;
247 l_p_j_ref_freq = ci->old;
2d06d8c4 248 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 249 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 250 }
bb176f7d 251 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 252 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
253 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
254 ci->new);
2d06d8c4 255 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 256 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
257 }
258}
259#else
e08f5f5b
GS
260static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
261{
262 return;
263}
1da177e4
LT
264#endif
265
0956df9c 266static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
b43a7ffb 267 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
268{
269 BUG_ON(irqs_disabled());
270
d5aaffa9
DB
271 if (cpufreq_disabled())
272 return;
273
1c3d85dd 274 freqs->flags = cpufreq_driver->flags;
2d06d8c4 275 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 276 state, freqs->new);
1da177e4 277
1da177e4 278 switch (state) {
e4472cb3 279
1da177e4 280 case CPUFREQ_PRECHANGE:
32ee8c3e 281 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
282 * which is not equal to what the cpufreq core thinks is
283 * "old frequency".
1da177e4 284 */
1c3d85dd 285 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
286 if ((policy) && (policy->cpu == freqs->cpu) &&
287 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 288 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
289 " %u, cpufreq assumed %u kHz.\n",
290 freqs->old, policy->cur);
291 freqs->old = policy->cur;
1da177e4
LT
292 }
293 }
b4dfdbb3 294 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 295 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
296 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
297 break;
e4472cb3 298
1da177e4
LT
299 case CPUFREQ_POSTCHANGE:
300 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 301 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 302 (unsigned long)freqs->cpu);
25e41933 303 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 304 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 305 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
306 if (likely(policy) && likely(policy->cpu == freqs->cpu))
307 policy->cur = freqs->new;
1da177e4
LT
308 break;
309 }
1da177e4 310}
bb176f7d 311
b43a7ffb
VK
312/**
313 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
314 * on frequency transition.
315 *
316 * This function calls the transition notifiers and the "adjust_jiffies"
317 * function. It is called twice on all CPU frequency changes that have
318 * external effects.
319 */
320void cpufreq_notify_transition(struct cpufreq_policy *policy,
321 struct cpufreq_freqs *freqs, unsigned int state)
322{
323 for_each_cpu(freqs->cpu, policy->cpus)
324 __cpufreq_notify_transition(policy, freqs, state);
325}
1da177e4
LT
326EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
327
328
1da177e4
LT
329/*********************************************************************
330 * SYSFS INTERFACE *
331 *********************************************************************/
332
3bcb09a3
JF
333static struct cpufreq_governor *__find_governor(const char *str_governor)
334{
335 struct cpufreq_governor *t;
336
337 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 338 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
339 return t;
340
341 return NULL;
342}
343
1da177e4
LT
344/**
345 * cpufreq_parse_governor - parse a governor string
346 */
905d77cd 347static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
348 struct cpufreq_governor **governor)
349{
3bcb09a3 350 int err = -EINVAL;
1c3d85dd
RW
351
352 if (!cpufreq_driver)
3bcb09a3
JF
353 goto out;
354
1c3d85dd 355 if (cpufreq_driver->setpolicy) {
1da177e4
LT
356 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
357 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 358 err = 0;
e08f5f5b
GS
359 } else if (!strnicmp(str_governor, "powersave",
360 CPUFREQ_NAME_LEN)) {
1da177e4 361 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 362 err = 0;
1da177e4 363 }
1c3d85dd 364 } else if (cpufreq_driver->target) {
1da177e4 365 struct cpufreq_governor *t;
3bcb09a3 366
3fc54d37 367 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
368
369 t = __find_governor(str_governor);
370
ea714970 371 if (t == NULL) {
1a8e1463 372 int ret;
ea714970 373
1a8e1463
KC
374 mutex_unlock(&cpufreq_governor_mutex);
375 ret = request_module("cpufreq_%s", str_governor);
376 mutex_lock(&cpufreq_governor_mutex);
ea714970 377
1a8e1463
KC
378 if (ret == 0)
379 t = __find_governor(str_governor);
ea714970
JF
380 }
381
3bcb09a3
JF
382 if (t != NULL) {
383 *governor = t;
384 err = 0;
1da177e4 385 }
3bcb09a3 386
3fc54d37 387 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 388 }
29464f28 389out:
3bcb09a3 390 return err;
1da177e4 391}
1da177e4 392
1da177e4 393/**
e08f5f5b
GS
394 * cpufreq_per_cpu_attr_read() / show_##file_name() -
395 * print out cpufreq information
1da177e4
LT
396 *
397 * Write out information from cpufreq_driver->policy[cpu]; object must be
398 * "unsigned int".
399 */
400
32ee8c3e
DJ
401#define show_one(file_name, object) \
402static ssize_t show_##file_name \
905d77cd 403(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 404{ \
29464f28 405 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
406}
407
408show_one(cpuinfo_min_freq, cpuinfo.min_freq);
409show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 410show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
411show_one(scaling_min_freq, min);
412show_one(scaling_max_freq, max);
413show_one(scaling_cur_freq, cur);
414
037ce839 415static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 416 struct cpufreq_policy *new_policy);
7970e08b 417
1da177e4
LT
418/**
419 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
420 */
421#define store_one(file_name, object) \
422static ssize_t store_##file_name \
905d77cd 423(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 424{ \
5136fa56 425 int ret; \
1da177e4
LT
426 struct cpufreq_policy new_policy; \
427 \
428 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
429 if (ret) \
430 return -EINVAL; \
431 \
29464f28 432 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
433 if (ret != 1) \
434 return -EINVAL; \
435 \
037ce839 436 ret = cpufreq_set_policy(policy, &new_policy); \
7970e08b 437 policy->user_policy.object = policy->object; \
1da177e4
LT
438 \
439 return ret ? ret : count; \
440}
441
29464f28
DJ
442store_one(scaling_min_freq, min);
443store_one(scaling_max_freq, max);
1da177e4
LT
444
445/**
446 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
447 */
905d77cd
DJ
448static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
449 char *buf)
1da177e4 450{
5a01f2e8 451 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
452 if (!cur_freq)
453 return sprintf(buf, "<unknown>");
454 return sprintf(buf, "%u\n", cur_freq);
455}
456
1da177e4
LT
457/**
458 * show_scaling_governor - show the current policy for the specified CPU
459 */
905d77cd 460static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 461{
29464f28 462 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
463 return sprintf(buf, "powersave\n");
464 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
465 return sprintf(buf, "performance\n");
466 else if (policy->governor)
4b972f0b 467 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 468 policy->governor->name);
1da177e4
LT
469 return -EINVAL;
470}
471
1da177e4
LT
472/**
473 * store_scaling_governor - store policy for the specified CPU
474 */
905d77cd
DJ
475static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
476 const char *buf, size_t count)
1da177e4 477{
5136fa56 478 int ret;
1da177e4
LT
479 char str_governor[16];
480 struct cpufreq_policy new_policy;
481
482 ret = cpufreq_get_policy(&new_policy, policy->cpu);
483 if (ret)
484 return ret;
485
29464f28 486 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
487 if (ret != 1)
488 return -EINVAL;
489
e08f5f5b
GS
490 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
491 &new_policy.governor))
1da177e4
LT
492 return -EINVAL;
493
037ce839 494 ret = cpufreq_set_policy(policy, &new_policy);
7970e08b
TR
495
496 policy->user_policy.policy = policy->policy;
497 policy->user_policy.governor = policy->governor;
7970e08b 498
e08f5f5b
GS
499 if (ret)
500 return ret;
501 else
502 return count;
1da177e4
LT
503}
504
505/**
506 * show_scaling_driver - show the cpufreq driver currently loaded
507 */
905d77cd 508static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 509{
1c3d85dd 510 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
511}
512
513/**
514 * show_scaling_available_governors - show the available CPUfreq governors
515 */
905d77cd
DJ
516static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
517 char *buf)
1da177e4
LT
518{
519 ssize_t i = 0;
520 struct cpufreq_governor *t;
521
1c3d85dd 522 if (!cpufreq_driver->target) {
1da177e4
LT
523 i += sprintf(buf, "performance powersave");
524 goto out;
525 }
526
527 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
528 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
529 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 530 goto out;
4b972f0b 531 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 532 }
7d5e350f 533out:
1da177e4
LT
534 i += sprintf(&buf[i], "\n");
535 return i;
536}
e8628dd0 537
f4fd3797 538ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
539{
540 ssize_t i = 0;
541 unsigned int cpu;
542
835481d9 543 for_each_cpu(cpu, mask) {
1da177e4
LT
544 if (i)
545 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
546 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
547 if (i >= (PAGE_SIZE - 5))
29464f28 548 break;
1da177e4
LT
549 }
550 i += sprintf(&buf[i], "\n");
551 return i;
552}
f4fd3797 553EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
1da177e4 554
e8628dd0
DW
555/**
556 * show_related_cpus - show the CPUs affected by each transition even if
557 * hw coordination is in use
558 */
559static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
560{
f4fd3797 561 return cpufreq_show_cpus(policy->related_cpus, buf);
e8628dd0
DW
562}
563
564/**
565 * show_affected_cpus - show the CPUs affected by each transition
566 */
567static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
568{
f4fd3797 569 return cpufreq_show_cpus(policy->cpus, buf);
e8628dd0
DW
570}
571
9e76988e 572static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 573 const char *buf, size_t count)
9e76988e
VP
574{
575 unsigned int freq = 0;
576 unsigned int ret;
577
879000f9 578 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
579 return -EINVAL;
580
581 ret = sscanf(buf, "%u", &freq);
582 if (ret != 1)
583 return -EINVAL;
584
585 policy->governor->store_setspeed(policy, freq);
586
587 return count;
588}
589
590static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
591{
879000f9 592 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
593 return sprintf(buf, "<unsupported>\n");
594
595 return policy->governor->show_setspeed(policy, buf);
596}
1da177e4 597
e2f74f35 598/**
8bf1ac72 599 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
600 */
601static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
602{
603 unsigned int limit;
604 int ret;
1c3d85dd
RW
605 if (cpufreq_driver->bios_limit) {
606 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
607 if (!ret)
608 return sprintf(buf, "%u\n", limit);
609 }
610 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
611}
612
6dad2a29
BP
613cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
614cpufreq_freq_attr_ro(cpuinfo_min_freq);
615cpufreq_freq_attr_ro(cpuinfo_max_freq);
616cpufreq_freq_attr_ro(cpuinfo_transition_latency);
617cpufreq_freq_attr_ro(scaling_available_governors);
618cpufreq_freq_attr_ro(scaling_driver);
619cpufreq_freq_attr_ro(scaling_cur_freq);
620cpufreq_freq_attr_ro(bios_limit);
621cpufreq_freq_attr_ro(related_cpus);
622cpufreq_freq_attr_ro(affected_cpus);
623cpufreq_freq_attr_rw(scaling_min_freq);
624cpufreq_freq_attr_rw(scaling_max_freq);
625cpufreq_freq_attr_rw(scaling_governor);
626cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 627
905d77cd 628static struct attribute *default_attrs[] = {
1da177e4
LT
629 &cpuinfo_min_freq.attr,
630 &cpuinfo_max_freq.attr,
ed129784 631 &cpuinfo_transition_latency.attr,
1da177e4
LT
632 &scaling_min_freq.attr,
633 &scaling_max_freq.attr,
634 &affected_cpus.attr,
e8628dd0 635 &related_cpus.attr,
1da177e4
LT
636 &scaling_governor.attr,
637 &scaling_driver.attr,
638 &scaling_available_governors.attr,
9e76988e 639 &scaling_setspeed.attr,
1da177e4
LT
640 NULL
641};
642
29464f28
DJ
643#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
644#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 645
29464f28 646static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 647{
905d77cd
DJ
648 struct cpufreq_policy *policy = to_policy(kobj);
649 struct freq_attr *fattr = to_attr(attr);
1b750e3b 650 ssize_t ret;
6eed9404
VK
651
652 if (!down_read_trylock(&cpufreq_rwsem))
1b750e3b 653 return -EINVAL;
5a01f2e8 654
1b750e3b 655 lock_policy_rwsem_read(policy->cpu);
5a01f2e8 656
e08f5f5b
GS
657 if (fattr->show)
658 ret = fattr->show(policy, buf);
659 else
660 ret = -EIO;
661
5a01f2e8 662 unlock_policy_rwsem_read(policy->cpu);
6eed9404 663 up_read(&cpufreq_rwsem);
1b750e3b 664
1da177e4
LT
665 return ret;
666}
667
905d77cd
DJ
668static ssize_t store(struct kobject *kobj, struct attribute *attr,
669 const char *buf, size_t count)
1da177e4 670{
905d77cd
DJ
671 struct cpufreq_policy *policy = to_policy(kobj);
672 struct freq_attr *fattr = to_attr(attr);
a07530b4 673 ssize_t ret = -EINVAL;
6eed9404 674
4f750c93
SB
675 get_online_cpus();
676
677 if (!cpu_online(policy->cpu))
678 goto unlock;
679
6eed9404 680 if (!down_read_trylock(&cpufreq_rwsem))
4f750c93 681 goto unlock;
5a01f2e8 682
1b750e3b 683 lock_policy_rwsem_write(policy->cpu);
5a01f2e8 684
e08f5f5b
GS
685 if (fattr->store)
686 ret = fattr->store(policy, buf, count);
687 else
688 ret = -EIO;
689
5a01f2e8 690 unlock_policy_rwsem_write(policy->cpu);
6eed9404 691
6eed9404 692 up_read(&cpufreq_rwsem);
4f750c93
SB
693unlock:
694 put_online_cpus();
695
1da177e4
LT
696 return ret;
697}
698
905d77cd 699static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 700{
905d77cd 701 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 702 pr_debug("last reference is dropped\n");
1da177e4
LT
703 complete(&policy->kobj_unregister);
704}
705
52cf25d0 706static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
707 .show = show,
708 .store = store,
709};
710
711static struct kobj_type ktype_cpufreq = {
712 .sysfs_ops = &sysfs_ops,
713 .default_attrs = default_attrs,
714 .release = cpufreq_sysfs_release,
715};
716
2361be23
VK
717struct kobject *cpufreq_global_kobject;
718EXPORT_SYMBOL(cpufreq_global_kobject);
719
720static int cpufreq_global_kobject_usage;
721
722int cpufreq_get_global_kobject(void)
723{
724 if (!cpufreq_global_kobject_usage++)
725 return kobject_add(cpufreq_global_kobject,
726 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
727
728 return 0;
729}
730EXPORT_SYMBOL(cpufreq_get_global_kobject);
731
732void cpufreq_put_global_kobject(void)
733{
734 if (!--cpufreq_global_kobject_usage)
735 kobject_del(cpufreq_global_kobject);
736}
737EXPORT_SYMBOL(cpufreq_put_global_kobject);
738
739int cpufreq_sysfs_create_file(const struct attribute *attr)
740{
741 int ret = cpufreq_get_global_kobject();
742
743 if (!ret) {
744 ret = sysfs_create_file(cpufreq_global_kobject, attr);
745 if (ret)
746 cpufreq_put_global_kobject();
747 }
748
749 return ret;
750}
751EXPORT_SYMBOL(cpufreq_sysfs_create_file);
752
753void cpufreq_sysfs_remove_file(const struct attribute *attr)
754{
755 sysfs_remove_file(cpufreq_global_kobject, attr);
756 cpufreq_put_global_kobject();
757}
758EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
759
19d6f7ec 760/* symlink affected CPUs */
308b60e7 761static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
19d6f7ec
DJ
762{
763 unsigned int j;
764 int ret = 0;
765
766 for_each_cpu(j, policy->cpus) {
8a25a2fd 767 struct device *cpu_dev;
19d6f7ec 768
308b60e7 769 if (j == policy->cpu)
19d6f7ec 770 continue;
19d6f7ec 771
e8fdde10 772 pr_debug("Adding link for CPU: %u\n", j);
8a25a2fd
KS
773 cpu_dev = get_cpu_device(j);
774 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec 775 "cpufreq");
71c3461e
RW
776 if (ret)
777 break;
19d6f7ec
DJ
778 }
779 return ret;
780}
781
308b60e7 782static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
8a25a2fd 783 struct device *dev)
909a694e
DJ
784{
785 struct freq_attr **drv_attr;
909a694e 786 int ret = 0;
909a694e
DJ
787
788 /* prepare interface data */
789 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 790 &dev->kobj, "cpufreq");
909a694e
DJ
791 if (ret)
792 return ret;
793
794 /* set up files for this cpu device */
1c3d85dd 795 drv_attr = cpufreq_driver->attr;
909a694e
DJ
796 while ((drv_attr) && (*drv_attr)) {
797 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
798 if (ret)
1c3d85dd 799 goto err_out_kobj_put;
909a694e
DJ
800 drv_attr++;
801 }
1c3d85dd 802 if (cpufreq_driver->get) {
909a694e
DJ
803 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
804 if (ret)
1c3d85dd 805 goto err_out_kobj_put;
909a694e 806 }
1c3d85dd 807 if (cpufreq_driver->target) {
909a694e
DJ
808 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
809 if (ret)
1c3d85dd 810 goto err_out_kobj_put;
909a694e 811 }
1c3d85dd 812 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
813 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
814 if (ret)
1c3d85dd 815 goto err_out_kobj_put;
e2f74f35 816 }
909a694e 817
308b60e7 818 ret = cpufreq_add_dev_symlink(policy);
ecf7e461
DJ
819 if (ret)
820 goto err_out_kobj_put;
821
e18f1682
SB
822 return ret;
823
824err_out_kobj_put:
825 kobject_put(&policy->kobj);
826 wait_for_completion(&policy->kobj_unregister);
827 return ret;
828}
829
830static void cpufreq_init_policy(struct cpufreq_policy *policy)
831{
832 struct cpufreq_policy new_policy;
833 int ret = 0;
834
d5b73cd8 835 memcpy(&new_policy, policy, sizeof(*policy));
037ce839 836 /* assure that the starting sequence is run in cpufreq_set_policy */
ecf7e461
DJ
837 policy->governor = NULL;
838
839 /* set default policy */
037ce839 840 ret = cpufreq_set_policy(policy, &new_policy);
ecf7e461
DJ
841 policy->user_policy.policy = policy->policy;
842 policy->user_policy.governor = policy->governor;
843
844 if (ret) {
2d06d8c4 845 pr_debug("setting policy failed\n");
1c3d85dd
RW
846 if (cpufreq_driver->exit)
847 cpufreq_driver->exit(policy);
ecf7e461 848 }
909a694e
DJ
849}
850
fcf80582 851#ifdef CONFIG_HOTPLUG_CPU
d8d3b471
VK
852static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
853 unsigned int cpu, struct device *dev,
854 bool frozen)
fcf80582 855{
1c3d85dd 856 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
857 unsigned long flags;
858
3de9bdeb
VK
859 if (has_target) {
860 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
861 if (ret) {
862 pr_err("%s: Failed to stop governor\n", __func__);
863 return ret;
864 }
865 }
fcf80582 866
d8d3b471 867 lock_policy_rwsem_write(policy->cpu);
2eaa3e2d 868
0d1857a1 869 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 870
fcf80582
VK
871 cpumask_set_cpu(cpu, policy->cpus);
872 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 873 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 874
d8d3b471 875 unlock_policy_rwsem_write(policy->cpu);
2eaa3e2d 876
820c6ca2 877 if (has_target) {
3de9bdeb
VK
878 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
879 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
880 pr_err("%s: Failed to start governor\n", __func__);
881 return ret;
882 }
820c6ca2 883 }
fcf80582 884
a82fab29 885 /* Don't touch sysfs links during light-weight init */
71c3461e
RW
886 if (!frozen)
887 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
a82fab29
SB
888
889 return ret;
fcf80582
VK
890}
891#endif
1da177e4 892
8414809c
SB
893static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
894{
895 struct cpufreq_policy *policy;
896 unsigned long flags;
897
44871c9c 898 read_lock_irqsave(&cpufreq_driver_lock, flags);
8414809c
SB
899
900 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
901
44871c9c 902 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
8414809c
SB
903
904 return policy;
905}
906
e9698cc5
SB
907static struct cpufreq_policy *cpufreq_policy_alloc(void)
908{
909 struct cpufreq_policy *policy;
910
911 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
912 if (!policy)
913 return NULL;
914
915 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
916 goto err_free_policy;
917
918 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
919 goto err_free_cpumask;
920
c88a1f8b 921 INIT_LIST_HEAD(&policy->policy_list);
e9698cc5
SB
922 return policy;
923
924err_free_cpumask:
925 free_cpumask_var(policy->cpus);
926err_free_policy:
927 kfree(policy);
928
929 return NULL;
930}
931
932static void cpufreq_policy_free(struct cpufreq_policy *policy)
933{
934 free_cpumask_var(policy->related_cpus);
935 free_cpumask_var(policy->cpus);
936 kfree(policy);
937}
938
0d66b91e
SB
939static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
940{
cb38ed5c
SB
941 if (cpu == policy->cpu)
942 return;
943
8efd5765
VK
944 /*
945 * Take direct locks as lock_policy_rwsem_write wouldn't work here.
946 * Also lock for last cpu is enough here as contention will happen only
947 * after policy->cpu is changed and after it is changed, other threads
948 * will try to acquire lock for new cpu. And policy is already updated
949 * by then.
950 */
951 down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
952
0d66b91e
SB
953 policy->last_cpu = policy->cpu;
954 policy->cpu = cpu;
955
8efd5765
VK
956 up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
957
0d66b91e
SB
958#ifdef CONFIG_CPU_FREQ_TABLE
959 cpufreq_frequency_table_update_policy_cpu(policy);
960#endif
961 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
962 CPUFREQ_UPDATE_POLICY_CPU, policy);
963}
964
a82fab29
SB
965static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
966 bool frozen)
1da177e4 967{
fcf80582 968 unsigned int j, cpu = dev->id;
65922465 969 int ret = -ENOMEM;
1da177e4 970 struct cpufreq_policy *policy;
1da177e4 971 unsigned long flags;
90e41bac 972#ifdef CONFIG_HOTPLUG_CPU
1b274294 973 struct cpufreq_policy *tpolicy;
fcf80582 974 struct cpufreq_governor *gov;
90e41bac 975#endif
1da177e4 976
c32b6b8e
AR
977 if (cpu_is_offline(cpu))
978 return 0;
979
2d06d8c4 980 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
981
982#ifdef CONFIG_SMP
983 /* check whether a different CPU already registered this
984 * CPU because it is in the same boat. */
985 policy = cpufreq_cpu_get(cpu);
986 if (unlikely(policy)) {
8ff69732 987 cpufreq_cpu_put(policy);
1da177e4
LT
988 return 0;
989 }
5025d628 990#endif
fcf80582 991
6eed9404
VK
992 if (!down_read_trylock(&cpufreq_rwsem))
993 return 0;
994
fcf80582
VK
995#ifdef CONFIG_HOTPLUG_CPU
996 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 997 read_lock_irqsave(&cpufreq_driver_lock, flags);
1b274294
VK
998 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
999 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
0d1857a1 1000 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1b274294 1001 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
6eed9404
VK
1002 up_read(&cpufreq_rwsem);
1003 return ret;
2eaa3e2d 1004 }
fcf80582 1005 }
0d1857a1 1006 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1007#endif
1008
8414809c
SB
1009 if (frozen)
1010 /* Restore the saved policy when doing light-weight init */
1011 policy = cpufreq_policy_restore(cpu);
1012 else
1013 policy = cpufreq_policy_alloc();
1014
059019a3 1015 if (!policy)
1da177e4 1016 goto nomem_out;
059019a3 1017
0d66b91e
SB
1018
1019 /*
1020 * In the resume path, since we restore a saved policy, the assignment
1021 * to policy->cpu is like an update of the existing policy, rather than
1022 * the creation of a brand new one. So we need to perform this update
1023 * by invoking update_policy_cpu().
1024 */
1025 if (frozen && cpu != policy->cpu)
1026 update_policy_cpu(policy, cpu);
1027 else
1028 policy->cpu = cpu;
1029
65922465 1030 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 1031 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 1032
1da177e4 1033 init_completion(&policy->kobj_unregister);
65f27f38 1034 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
1035
1036 /* call driver. From then on the cpufreq must be able
1037 * to accept all calls to ->verify and ->setpolicy for this CPU
1038 */
1c3d85dd 1039 ret = cpufreq_driver->init(policy);
1da177e4 1040 if (ret) {
2d06d8c4 1041 pr_debug("initialization failed\n");
2eaa3e2d 1042 goto err_set_policy_cpu;
1da177e4 1043 }
643ae6e8 1044
da60ce9f
VK
1045 if (cpufreq_driver->get) {
1046 policy->cur = cpufreq_driver->get(policy->cpu);
1047 if (!policy->cur) {
1048 pr_err("%s: ->get() failed\n", __func__);
1049 goto err_get_freq;
1050 }
1051 }
1052
fcf80582
VK
1053 /* related cpus should atleast have policy->cpus */
1054 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1055
643ae6e8
VK
1056 /*
1057 * affected cpus must always be the one, which are online. We aren't
1058 * managing offline cpus here.
1059 */
1060 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1061
187d9f4e
MC
1062 policy->user_policy.min = policy->min;
1063 policy->user_policy.max = policy->max;
1da177e4 1064
a1531acd
TR
1065 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1066 CPUFREQ_START, policy);
1067
fcf80582
VK
1068#ifdef CONFIG_HOTPLUG_CPU
1069 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1070 if (gov) {
1071 policy->governor = gov;
1072 pr_debug("Restoring governor %s for cpu %d\n",
1073 policy->governor->name, cpu);
4bfa042c 1074 }
fcf80582 1075#endif
1da177e4 1076
e18f1682 1077 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1078 for_each_cpu(j, policy->cpus)
e18f1682 1079 per_cpu(cpufreq_cpu_data, j) = policy;
e18f1682
SB
1080 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1081
a82fab29 1082 if (!frozen) {
308b60e7 1083 ret = cpufreq_add_dev_interface(policy, dev);
a82fab29
SB
1084 if (ret)
1085 goto err_out_unregister;
1086 }
8ff69732 1087
9515f4d6
VK
1088 write_lock_irqsave(&cpufreq_driver_lock, flags);
1089 list_add(&policy->policy_list, &cpufreq_policy_list);
1090 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1091
e18f1682
SB
1092 cpufreq_init_policy(policy);
1093
038c5b3e 1094 kobject_uevent(&policy->kobj, KOBJ_ADD);
6eed9404
VK
1095 up_read(&cpufreq_rwsem);
1096
2d06d8c4 1097 pr_debug("initialization complete\n");
87c32271 1098
1da177e4
LT
1099 return 0;
1100
1da177e4 1101err_out_unregister:
0d1857a1 1102 write_lock_irqsave(&cpufreq_driver_lock, flags);
474deff7 1103 for_each_cpu(j, policy->cpus)
7a6aedfa 1104 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 1105 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1106
da60ce9f
VK
1107err_get_freq:
1108 if (cpufreq_driver->exit)
1109 cpufreq_driver->exit(policy);
2eaa3e2d 1110err_set_policy_cpu:
e9698cc5 1111 cpufreq_policy_free(policy);
1da177e4 1112nomem_out:
6eed9404
VK
1113 up_read(&cpufreq_rwsem);
1114
1da177e4
LT
1115 return ret;
1116}
1117
a82fab29
SB
1118/**
1119 * cpufreq_add_dev - add a CPU device
1120 *
1121 * Adds the cpufreq interface for a CPU device.
1122 *
1123 * The Oracle says: try running cpufreq registration/unregistration concurrently
1124 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1125 * mess up, but more thorough testing is needed. - Mathieu
1126 */
1127static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1128{
1129 return __cpufreq_add_dev(dev, sif, false);
1130}
1131
3a3e9e06 1132static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
a82fab29 1133 unsigned int old_cpu, bool frozen)
f9ba680d
SB
1134{
1135 struct device *cpu_dev;
f9ba680d
SB
1136 int ret;
1137
1138 /* first sibling now owns the new sysfs dir */
9c8f1ee4 1139 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
a82fab29
SB
1140
1141 /* Don't touch sysfs files during light-weight tear-down */
1142 if (frozen)
1143 return cpu_dev->id;
1144
f9ba680d 1145 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
3a3e9e06 1146 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
f9ba680d
SB
1147 if (ret) {
1148 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1149
1b750e3b 1150 lock_policy_rwsem_write(old_cpu);
3a3e9e06 1151 cpumask_set_cpu(old_cpu, policy->cpus);
f9ba680d
SB
1152 unlock_policy_rwsem_write(old_cpu);
1153
3a3e9e06 1154 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
f9ba680d
SB
1155 "cpufreq");
1156
1157 return -EINVAL;
1158 }
1159
1160 return cpu_dev->id;
1161}
1162
cedb70af
SB
1163static int __cpufreq_remove_dev_prepare(struct device *dev,
1164 struct subsys_interface *sif,
1165 bool frozen)
1da177e4 1166{
f9ba680d 1167 unsigned int cpu = dev->id, cpus;
3de9bdeb 1168 int new_cpu, ret;
1da177e4 1169 unsigned long flags;
3a3e9e06 1170 struct cpufreq_policy *policy;
1da177e4 1171
b8eed8af 1172 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1173
0d1857a1 1174 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1175
3a3e9e06 1176 policy = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d 1177
8414809c
SB
1178 /* Save the policy somewhere when doing a light-weight tear-down */
1179 if (frozen)
3a3e9e06 1180 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
8414809c 1181
0d1857a1 1182 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1183
3a3e9e06 1184 if (!policy) {
b8eed8af 1185 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1186 return -EINVAL;
1187 }
1da177e4 1188
3de9bdeb
VK
1189 if (cpufreq_driver->target) {
1190 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1191 if (ret) {
1192 pr_err("%s: Failed to stop governor\n", __func__);
1193 return ret;
1194 }
1195 }
1da177e4 1196
084f3493 1197#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1198 if (!cpufreq_driver->setpolicy)
fa69e33f 1199 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
3a3e9e06 1200 policy->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1201#endif
1202
9c8f1ee4 1203 lock_policy_rwsem_read(cpu);
3a3e9e06 1204 cpus = cpumask_weight(policy->cpus);
9c8f1ee4 1205 unlock_policy_rwsem_read(cpu);
084f3493 1206
61173f25
SB
1207 if (cpu != policy->cpu) {
1208 if (!frozen)
1209 sysfs_remove_link(&dev->kobj, "cpufreq");
73bf0fc2 1210 } else if (cpus > 1) {
3a3e9e06 1211 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
f9ba680d 1212 if (new_cpu >= 0) {
3a3e9e06 1213 update_policy_cpu(policy, new_cpu);
a82fab29
SB
1214
1215 if (!frozen) {
75949c9a
VK
1216 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1217 __func__, new_cpu, cpu);
a82fab29 1218 }
1da177e4
LT
1219 }
1220 }
1da177e4 1221
cedb70af
SB
1222 return 0;
1223}
1224
1225static int __cpufreq_remove_dev_finish(struct device *dev,
1226 struct subsys_interface *sif,
1227 bool frozen)
1228{
1229 unsigned int cpu = dev->id, cpus;
1230 int ret;
1231 unsigned long flags;
1232 struct cpufreq_policy *policy;
1233 struct kobject *kobj;
1234 struct completion *cmp;
1235
1236 read_lock_irqsave(&cpufreq_driver_lock, flags);
1237 policy = per_cpu(cpufreq_cpu_data, cpu);
1238 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1239
1240 if (!policy) {
1241 pr_debug("%s: No cpu_data found\n", __func__);
1242 return -EINVAL;
1243 }
1244
1b750e3b 1245 lock_policy_rwsem_write(cpu);
cedb70af 1246 cpus = cpumask_weight(policy->cpus);
9c8f1ee4
VK
1247
1248 if (cpus > 1)
1249 cpumask_clear_cpu(cpu, policy->cpus);
1250 unlock_policy_rwsem_write(cpu);
cedb70af 1251
b8eed8af
VK
1252 /* If cpu is last user of policy, free policy */
1253 if (cpus == 1) {
3de9bdeb
VK
1254 if (cpufreq_driver->target) {
1255 ret = __cpufreq_governor(policy,
1256 CPUFREQ_GOV_POLICY_EXIT);
1257 if (ret) {
1258 pr_err("%s: Failed to exit governor\n",
1259 __func__);
1260 return ret;
1261 }
edab2fbc 1262 }
2a998599 1263
8414809c
SB
1264 if (!frozen) {
1265 lock_policy_rwsem_read(cpu);
3a3e9e06
VK
1266 kobj = &policy->kobj;
1267 cmp = &policy->kobj_unregister;
8414809c
SB
1268 unlock_policy_rwsem_read(cpu);
1269 kobject_put(kobj);
1270
1271 /*
1272 * We need to make sure that the underlying kobj is
1273 * actually not referenced anymore by anybody before we
1274 * proceed with unloading.
1275 */
1276 pr_debug("waiting for dropping of refcount\n");
1277 wait_for_completion(cmp);
1278 pr_debug("wait complete\n");
1279 }
7d26e2d5 1280
8414809c
SB
1281 /*
1282 * Perform the ->exit() even during light-weight tear-down,
1283 * since this is a core component, and is essential for the
1284 * subsequent light-weight ->init() to succeed.
b8eed8af 1285 */
1c3d85dd 1286 if (cpufreq_driver->exit)
3a3e9e06 1287 cpufreq_driver->exit(policy);
27ecddc2 1288
9515f4d6
VK
1289 /* Remove policy from list of active policies */
1290 write_lock_irqsave(&cpufreq_driver_lock, flags);
1291 list_del(&policy->policy_list);
1292 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1293
8414809c 1294 if (!frozen)
3a3e9e06 1295 cpufreq_policy_free(policy);
2a998599 1296 } else {
2a998599 1297 if (cpufreq_driver->target) {
3de9bdeb
VK
1298 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1299 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1300 pr_err("%s: Failed to start governor\n",
1301 __func__);
1302 return ret;
1303 }
2a998599 1304 }
27ecddc2 1305 }
1da177e4 1306
474deff7 1307 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1da177e4
LT
1308 return 0;
1309}
1310
cedb70af 1311/**
27a862e9 1312 * cpufreq_remove_dev - remove a CPU device
cedb70af
SB
1313 *
1314 * Removes the cpufreq interface for a CPU device.
cedb70af 1315 */
8a25a2fd 1316static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1317{
8a25a2fd 1318 unsigned int cpu = dev->id;
27a862e9 1319 int ret;
ec28297a
VP
1320
1321 if (cpu_is_offline(cpu))
1322 return 0;
1323
27a862e9
VK
1324 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1325
1326 if (!ret)
1327 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1328
1329 return ret;
5a01f2e8
VP
1330}
1331
65f27f38 1332static void handle_update(struct work_struct *work)
1da177e4 1333{
65f27f38
DH
1334 struct cpufreq_policy *policy =
1335 container_of(work, struct cpufreq_policy, update);
1336 unsigned int cpu = policy->cpu;
2d06d8c4 1337 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1338 cpufreq_update_policy(cpu);
1339}
1340
1341/**
bb176f7d
VK
1342 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1343 * in deep trouble.
1da177e4
LT
1344 * @cpu: cpu number
1345 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1346 * @new_freq: CPU frequency the CPU actually runs at
1347 *
29464f28
DJ
1348 * We adjust to current frequency first, and need to clean up later.
1349 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1350 */
e08f5f5b
GS
1351static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1352 unsigned int new_freq)
1da177e4 1353{
b43a7ffb 1354 struct cpufreq_policy *policy;
1da177e4 1355 struct cpufreq_freqs freqs;
b43a7ffb
VK
1356 unsigned long flags;
1357
2d06d8c4 1358 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1359 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1360
1da177e4
LT
1361 freqs.old = old_freq;
1362 freqs.new = new_freq;
b43a7ffb
VK
1363
1364 read_lock_irqsave(&cpufreq_driver_lock, flags);
1365 policy = per_cpu(cpufreq_cpu_data, cpu);
1366 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1367
1368 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1369 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1370}
1371
32ee8c3e 1372/**
4ab70df4 1373 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1374 * @cpu: CPU number
1375 *
1376 * This is the last known freq, without actually getting it from the driver.
1377 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1378 */
1379unsigned int cpufreq_quick_get(unsigned int cpu)
1380{
9e21ba8b 1381 struct cpufreq_policy *policy;
e08f5f5b 1382 unsigned int ret_freq = 0;
95235ca2 1383
1c3d85dd
RW
1384 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1385 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1386
1387 policy = cpufreq_cpu_get(cpu);
95235ca2 1388 if (policy) {
e08f5f5b 1389 ret_freq = policy->cur;
95235ca2
VP
1390 cpufreq_cpu_put(policy);
1391 }
1392
4d34a67d 1393 return ret_freq;
95235ca2
VP
1394}
1395EXPORT_SYMBOL(cpufreq_quick_get);
1396
3d737108
JB
1397/**
1398 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1399 * @cpu: CPU number
1400 *
1401 * Just return the max possible frequency for a given CPU.
1402 */
1403unsigned int cpufreq_quick_get_max(unsigned int cpu)
1404{
1405 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1406 unsigned int ret_freq = 0;
1407
1408 if (policy) {
1409 ret_freq = policy->max;
1410 cpufreq_cpu_put(policy);
1411 }
1412
1413 return ret_freq;
1414}
1415EXPORT_SYMBOL(cpufreq_quick_get_max);
1416
5a01f2e8 1417static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1418{
7a6aedfa 1419 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1420 unsigned int ret_freq = 0;
5800043b 1421
1c3d85dd 1422 if (!cpufreq_driver->get)
4d34a67d 1423 return ret_freq;
1da177e4 1424
1c3d85dd 1425 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1426
e08f5f5b 1427 if (ret_freq && policy->cur &&
1c3d85dd 1428 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1429 /* verify no discrepancy between actual and
1430 saved value exists */
1431 if (unlikely(ret_freq != policy->cur)) {
1432 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1433 schedule_work(&policy->update);
1434 }
1435 }
1436
4d34a67d 1437 return ret_freq;
5a01f2e8 1438}
1da177e4 1439
5a01f2e8
VP
1440/**
1441 * cpufreq_get - get the current CPU frequency (in kHz)
1442 * @cpu: CPU number
1443 *
1444 * Get the CPU current (static) CPU frequency
1445 */
1446unsigned int cpufreq_get(unsigned int cpu)
1447{
1448 unsigned int ret_freq = 0;
5a01f2e8 1449
26ca8694
VK
1450 if (cpufreq_disabled() || !cpufreq_driver)
1451 return -ENOENT;
1452
6eed9404
VK
1453 if (!down_read_trylock(&cpufreq_rwsem))
1454 return 0;
5a01f2e8 1455
1b750e3b 1456 lock_policy_rwsem_read(cpu);
5a01f2e8
VP
1457
1458 ret_freq = __cpufreq_get(cpu);
1459
1460 unlock_policy_rwsem_read(cpu);
6eed9404
VK
1461 up_read(&cpufreq_rwsem);
1462
4d34a67d 1463 return ret_freq;
1da177e4
LT
1464}
1465EXPORT_SYMBOL(cpufreq_get);
1466
8a25a2fd
KS
1467static struct subsys_interface cpufreq_interface = {
1468 .name = "cpufreq",
1469 .subsys = &cpu_subsys,
1470 .add_dev = cpufreq_add_dev,
1471 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1472};
1473
42d4dc3f 1474/**
e00e56df
RW
1475 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1476 *
1477 * This function is only executed for the boot processor. The other CPUs
1478 * have been put offline by means of CPU hotplug.
42d4dc3f 1479 */
e00e56df 1480static int cpufreq_bp_suspend(void)
42d4dc3f 1481{
e08f5f5b 1482 int ret = 0;
4bc5d341 1483
e00e56df 1484 int cpu = smp_processor_id();
3a3e9e06 1485 struct cpufreq_policy *policy;
42d4dc3f 1486
2d06d8c4 1487 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1488
e00e56df 1489 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1490 policy = cpufreq_cpu_get(cpu);
1491 if (!policy)
e00e56df 1492 return 0;
42d4dc3f 1493
1c3d85dd 1494 if (cpufreq_driver->suspend) {
3a3e9e06 1495 ret = cpufreq_driver->suspend(policy);
ce6c3997 1496 if (ret)
42d4dc3f 1497 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
3a3e9e06 1498 "step on CPU %u\n", policy->cpu);
42d4dc3f
BH
1499 }
1500
3a3e9e06 1501 cpufreq_cpu_put(policy);
c9060494 1502 return ret;
42d4dc3f
BH
1503}
1504
1da177e4 1505/**
e00e56df 1506 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1507 *
1508 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1509 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1510 * restored. It will verify that the current freq is in sync with
1511 * what we believe it to be. This is a bit later than when it
1512 * should be, but nonethteless it's better than calling
1513 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1514 *
1515 * This function is only executed for the boot CPU. The other CPUs have not
1516 * been turned on yet.
1da177e4 1517 */
e00e56df 1518static void cpufreq_bp_resume(void)
1da177e4 1519{
e08f5f5b 1520 int ret = 0;
4bc5d341 1521
e00e56df 1522 int cpu = smp_processor_id();
3a3e9e06 1523 struct cpufreq_policy *policy;
1da177e4 1524
2d06d8c4 1525 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1526
e00e56df 1527 /* If there's no policy for the boot CPU, we have nothing to do. */
3a3e9e06
VK
1528 policy = cpufreq_cpu_get(cpu);
1529 if (!policy)
e00e56df 1530 return;
1da177e4 1531
1c3d85dd 1532 if (cpufreq_driver->resume) {
3a3e9e06 1533 ret = cpufreq_driver->resume(policy);
1da177e4
LT
1534 if (ret) {
1535 printk(KERN_ERR "cpufreq: resume failed in ->resume "
3a3e9e06 1536 "step on CPU %u\n", policy->cpu);
c9060494 1537 goto fail;
1da177e4
LT
1538 }
1539 }
1540
3a3e9e06 1541 schedule_work(&policy->update);
ce6c3997 1542
c9060494 1543fail:
3a3e9e06 1544 cpufreq_cpu_put(policy);
1da177e4
LT
1545}
1546
e00e56df
RW
1547static struct syscore_ops cpufreq_syscore_ops = {
1548 .suspend = cpufreq_bp_suspend,
1549 .resume = cpufreq_bp_resume,
1da177e4
LT
1550};
1551
9d95046e
BP
1552/**
1553 * cpufreq_get_current_driver - return current driver's name
1554 *
1555 * Return the name string of the currently loaded cpufreq driver
1556 * or NULL, if none.
1557 */
1558const char *cpufreq_get_current_driver(void)
1559{
1c3d85dd
RW
1560 if (cpufreq_driver)
1561 return cpufreq_driver->name;
1562
1563 return NULL;
9d95046e
BP
1564}
1565EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1566
1567/*********************************************************************
1568 * NOTIFIER LISTS INTERFACE *
1569 *********************************************************************/
1570
1571/**
1572 * cpufreq_register_notifier - register a driver with cpufreq
1573 * @nb: notifier function to register
1574 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1575 *
32ee8c3e 1576 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1577 * are notified about clock rate changes (once before and once after
1578 * the transition), or a list of drivers that are notified about
1579 * changes in cpufreq policy.
1580 *
1581 * This function may sleep, and has the same return conditions as
e041c683 1582 * blocking_notifier_chain_register.
1da177e4
LT
1583 */
1584int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1585{
1586 int ret;
1587
d5aaffa9
DB
1588 if (cpufreq_disabled())
1589 return -EINVAL;
1590
74212ca4
CEB
1591 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1592
1da177e4
LT
1593 switch (list) {
1594 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1595 ret = srcu_notifier_chain_register(
e041c683 1596 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1597 break;
1598 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1599 ret = blocking_notifier_chain_register(
1600 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1601 break;
1602 default:
1603 ret = -EINVAL;
1604 }
1da177e4
LT
1605
1606 return ret;
1607}
1608EXPORT_SYMBOL(cpufreq_register_notifier);
1609
1da177e4
LT
1610/**
1611 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1612 * @nb: notifier block to be unregistered
bb176f7d 1613 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1da177e4
LT
1614 *
1615 * Remove a driver from the CPU frequency notifier list.
1616 *
1617 * This function may sleep, and has the same return conditions as
e041c683 1618 * blocking_notifier_chain_unregister.
1da177e4
LT
1619 */
1620int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1621{
1622 int ret;
1623
d5aaffa9
DB
1624 if (cpufreq_disabled())
1625 return -EINVAL;
1626
1da177e4
LT
1627 switch (list) {
1628 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1629 ret = srcu_notifier_chain_unregister(
e041c683 1630 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1631 break;
1632 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1633 ret = blocking_notifier_chain_unregister(
1634 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1635 break;
1636 default:
1637 ret = -EINVAL;
1638 }
1da177e4
LT
1639
1640 return ret;
1641}
1642EXPORT_SYMBOL(cpufreq_unregister_notifier);
1643
1644
1645/*********************************************************************
1646 * GOVERNORS *
1647 *********************************************************************/
1648
1da177e4
LT
1649int __cpufreq_driver_target(struct cpufreq_policy *policy,
1650 unsigned int target_freq,
1651 unsigned int relation)
1652{
1653 int retval = -EINVAL;
7249924e 1654 unsigned int old_target_freq = target_freq;
c32b6b8e 1655
a7b422cd
KRW
1656 if (cpufreq_disabled())
1657 return -ENODEV;
1658
7249924e
VK
1659 /* Make sure that target_freq is within supported range */
1660 if (target_freq > policy->max)
1661 target_freq = policy->max;
1662 if (target_freq < policy->min)
1663 target_freq = policy->min;
1664
1665 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1666 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1667
1668 if (target_freq == policy->cur)
1669 return 0;
1670
1c3d85dd
RW
1671 if (cpufreq_driver->target)
1672 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1673
1da177e4
LT
1674 return retval;
1675}
1676EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1677
1da177e4
LT
1678int cpufreq_driver_target(struct cpufreq_policy *policy,
1679 unsigned int target_freq,
1680 unsigned int relation)
1681{
f1829e4a 1682 int ret = -EINVAL;
1da177e4 1683
1b750e3b 1684 lock_policy_rwsem_write(policy->cpu);
1da177e4
LT
1685
1686 ret = __cpufreq_driver_target(policy, target_freq, relation);
1687
5a01f2e8 1688 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1689
1da177e4
LT
1690 return ret;
1691}
1692EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1693
153d7f3f 1694/*
153d7f3f
AV
1695 * when "event" is CPUFREQ_GOV_LIMITS
1696 */
1da177e4 1697
e08f5f5b
GS
1698static int __cpufreq_governor(struct cpufreq_policy *policy,
1699 unsigned int event)
1da177e4 1700{
cc993cab 1701 int ret;
6afde10c
TR
1702
1703 /* Only must be defined when default governor is known to have latency
1704 restrictions, like e.g. conservative or ondemand.
1705 That this is the case is already ensured in Kconfig
1706 */
1707#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1708 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1709#else
1710 struct cpufreq_governor *gov = NULL;
1711#endif
1c256245
TR
1712
1713 if (policy->governor->max_transition_latency &&
1714 policy->cpuinfo.transition_latency >
1715 policy->governor->max_transition_latency) {
6afde10c
TR
1716 if (!gov)
1717 return -EINVAL;
1718 else {
1719 printk(KERN_WARNING "%s governor failed, too long"
1720 " transition latency of HW, fallback"
1721 " to %s governor\n",
1722 policy->governor->name,
1723 gov->name);
1724 policy->governor = gov;
1725 }
1c256245 1726 }
1da177e4 1727
fe492f3f
VK
1728 if (event == CPUFREQ_GOV_POLICY_INIT)
1729 if (!try_module_get(policy->governor->owner))
1730 return -EINVAL;
1da177e4 1731
2d06d8c4 1732 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1733 policy->cpu, event);
95731ebb
XC
1734
1735 mutex_lock(&cpufreq_governor_lock);
56d07db2 1736 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
f73d3933
VK
1737 || (!policy->governor_enabled
1738 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
95731ebb
XC
1739 mutex_unlock(&cpufreq_governor_lock);
1740 return -EBUSY;
1741 }
1742
1743 if (event == CPUFREQ_GOV_STOP)
1744 policy->governor_enabled = false;
1745 else if (event == CPUFREQ_GOV_START)
1746 policy->governor_enabled = true;
1747
1748 mutex_unlock(&cpufreq_governor_lock);
1749
1da177e4
LT
1750 ret = policy->governor->governor(policy, event);
1751
4d5dcc42
VK
1752 if (!ret) {
1753 if (event == CPUFREQ_GOV_POLICY_INIT)
1754 policy->governor->initialized++;
1755 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1756 policy->governor->initialized--;
95731ebb
XC
1757 } else {
1758 /* Restore original values */
1759 mutex_lock(&cpufreq_governor_lock);
1760 if (event == CPUFREQ_GOV_STOP)
1761 policy->governor_enabled = true;
1762 else if (event == CPUFREQ_GOV_START)
1763 policy->governor_enabled = false;
1764 mutex_unlock(&cpufreq_governor_lock);
4d5dcc42 1765 }
b394058f 1766
fe492f3f
VK
1767 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1768 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1da177e4
LT
1769 module_put(policy->governor->owner);
1770
1771 return ret;
1772}
1773
1da177e4
LT
1774int cpufreq_register_governor(struct cpufreq_governor *governor)
1775{
3bcb09a3 1776 int err;
1da177e4
LT
1777
1778 if (!governor)
1779 return -EINVAL;
1780
a7b422cd
KRW
1781 if (cpufreq_disabled())
1782 return -ENODEV;
1783
3fc54d37 1784 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1785
b394058f 1786 governor->initialized = 0;
3bcb09a3
JF
1787 err = -EBUSY;
1788 if (__find_governor(governor->name) == NULL) {
1789 err = 0;
1790 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1791 }
1da177e4 1792
32ee8c3e 1793 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1794 return err;
1da177e4
LT
1795}
1796EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1797
1da177e4
LT
1798void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1799{
90e41bac
PB
1800#ifdef CONFIG_HOTPLUG_CPU
1801 int cpu;
1802#endif
1803
1da177e4
LT
1804 if (!governor)
1805 return;
1806
a7b422cd
KRW
1807 if (cpufreq_disabled())
1808 return;
1809
90e41bac
PB
1810#ifdef CONFIG_HOTPLUG_CPU
1811 for_each_present_cpu(cpu) {
1812 if (cpu_online(cpu))
1813 continue;
1814 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1815 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1816 }
1817#endif
1818
3fc54d37 1819 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1820 list_del(&governor->governor_list);
3fc54d37 1821 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1822 return;
1823}
1824EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1825
1826
1da177e4
LT
1827/*********************************************************************
1828 * POLICY INTERFACE *
1829 *********************************************************************/
1830
1831/**
1832 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1833 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1834 * is written
1da177e4
LT
1835 *
1836 * Reads the current cpufreq policy.
1837 */
1838int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1839{
1840 struct cpufreq_policy *cpu_policy;
1841 if (!policy)
1842 return -EINVAL;
1843
1844 cpu_policy = cpufreq_cpu_get(cpu);
1845 if (!cpu_policy)
1846 return -EINVAL;
1847
d5b73cd8 1848 memcpy(policy, cpu_policy, sizeof(*policy));
1da177e4
LT
1849
1850 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1851 return 0;
1852}
1853EXPORT_SYMBOL(cpufreq_get_policy);
1854
153d7f3f 1855/*
037ce839
VK
1856 * policy : current policy.
1857 * new_policy: policy to be set.
153d7f3f 1858 */
037ce839 1859static int cpufreq_set_policy(struct cpufreq_policy *policy,
3a3e9e06 1860 struct cpufreq_policy *new_policy)
1da177e4 1861{
7bd353a9 1862 int ret = 0, failed = 1;
1da177e4 1863
3a3e9e06
VK
1864 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1865 new_policy->min, new_policy->max);
1da177e4 1866
d5b73cd8 1867 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1da177e4 1868
3a3e9e06 1869 if (new_policy->min > policy->max || new_policy->max < policy->min) {
9c9a43ed
MD
1870 ret = -EINVAL;
1871 goto error_out;
1872 }
1873
1da177e4 1874 /* verify the cpu speed can be set within this limit */
3a3e9e06 1875 ret = cpufreq_driver->verify(new_policy);
1da177e4
LT
1876 if (ret)
1877 goto error_out;
1878
1da177e4 1879 /* adjust if necessary - all reasons */
e041c683 1880 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1881 CPUFREQ_ADJUST, new_policy);
1da177e4
LT
1882
1883 /* adjust if necessary - hardware incompatibility*/
e041c683 1884 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1885 CPUFREQ_INCOMPATIBLE, new_policy);
1da177e4 1886
bb176f7d
VK
1887 /*
1888 * verify the cpu speed can be set within this limit, which might be
1889 * different to the first one
1890 */
3a3e9e06 1891 ret = cpufreq_driver->verify(new_policy);
e041c683 1892 if (ret)
1da177e4 1893 goto error_out;
1da177e4
LT
1894
1895 /* notification of the new policy */
e041c683 1896 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
3a3e9e06 1897 CPUFREQ_NOTIFY, new_policy);
1da177e4 1898
3a3e9e06
VK
1899 policy->min = new_policy->min;
1900 policy->max = new_policy->max;
1da177e4 1901
2d06d8c4 1902 pr_debug("new min and max freqs are %u - %u kHz\n",
3a3e9e06 1903 policy->min, policy->max);
1da177e4 1904
1c3d85dd 1905 if (cpufreq_driver->setpolicy) {
3a3e9e06 1906 policy->policy = new_policy->policy;
2d06d8c4 1907 pr_debug("setting range\n");
3a3e9e06 1908 ret = cpufreq_driver->setpolicy(new_policy);
1da177e4 1909 } else {
3a3e9e06 1910 if (new_policy->governor != policy->governor) {
1da177e4 1911 /* save old, working values */
3a3e9e06 1912 struct cpufreq_governor *old_gov = policy->governor;
1da177e4 1913
2d06d8c4 1914 pr_debug("governor switch\n");
1da177e4
LT
1915
1916 /* end old governor */
3a3e9e06
VK
1917 if (policy->governor) {
1918 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1919 unlock_policy_rwsem_write(new_policy->cpu);
1920 __cpufreq_governor(policy,
7bd353a9 1921 CPUFREQ_GOV_POLICY_EXIT);
3a3e9e06 1922 lock_policy_rwsem_write(new_policy->cpu);
7bd353a9 1923 }
1da177e4
LT
1924
1925 /* start new governor */
3a3e9e06
VK
1926 policy->governor = new_policy->governor;
1927 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
1928 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
7bd353a9 1929 failed = 0;
955ef483 1930 } else {
3a3e9e06
VK
1931 unlock_policy_rwsem_write(new_policy->cpu);
1932 __cpufreq_governor(policy,
7bd353a9 1933 CPUFREQ_GOV_POLICY_EXIT);
3a3e9e06 1934 lock_policy_rwsem_write(new_policy->cpu);
955ef483 1935 }
7bd353a9
VK
1936 }
1937
1938 if (failed) {
1da177e4 1939 /* new governor failed, so re-start old one */
2d06d8c4 1940 pr_debug("starting governor %s failed\n",
3a3e9e06 1941 policy->governor->name);
1da177e4 1942 if (old_gov) {
3a3e9e06
VK
1943 policy->governor = old_gov;
1944 __cpufreq_governor(policy,
7bd353a9 1945 CPUFREQ_GOV_POLICY_INIT);
3a3e9e06 1946 __cpufreq_governor(policy,
e08f5f5b 1947 CPUFREQ_GOV_START);
1da177e4
LT
1948 }
1949 ret = -EINVAL;
1950 goto error_out;
1951 }
1952 /* might be a policy change, too, so fall through */
1953 }
2d06d8c4 1954 pr_debug("governor: change or update limits\n");
3de9bdeb 1955 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1da177e4
LT
1956 }
1957
7d5e350f 1958error_out:
1da177e4
LT
1959 return ret;
1960}
1961
1da177e4
LT
1962/**
1963 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1964 * @cpu: CPU which shall be re-evaluated
1965 *
25985edc 1966 * Useful for policy notifiers which have different necessities
1da177e4
LT
1967 * at different times.
1968 */
1969int cpufreq_update_policy(unsigned int cpu)
1970{
3a3e9e06
VK
1971 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1972 struct cpufreq_policy new_policy;
f1829e4a 1973 int ret;
1da177e4 1974
3a3e9e06 1975 if (!policy) {
f1829e4a
JL
1976 ret = -ENODEV;
1977 goto no_policy;
1978 }
1da177e4 1979
1b750e3b 1980 lock_policy_rwsem_write(cpu);
1da177e4 1981
2d06d8c4 1982 pr_debug("updating policy for CPU %u\n", cpu);
d5b73cd8 1983 memcpy(&new_policy, policy, sizeof(*policy));
3a3e9e06
VK
1984 new_policy.min = policy->user_policy.min;
1985 new_policy.max = policy->user_policy.max;
1986 new_policy.policy = policy->user_policy.policy;
1987 new_policy.governor = policy->user_policy.governor;
1da177e4 1988
bb176f7d
VK
1989 /*
1990 * BIOS might change freq behind our back
1991 * -> ask driver for current freq and notify governors about a change
1992 */
1c3d85dd 1993 if (cpufreq_driver->get) {
3a3e9e06
VK
1994 new_policy.cur = cpufreq_driver->get(cpu);
1995 if (!policy->cur) {
2d06d8c4 1996 pr_debug("Driver did not initialize current freq");
3a3e9e06 1997 policy->cur = new_policy.cur;
a85f7bd3 1998 } else {
3a3e9e06
VK
1999 if (policy->cur != new_policy.cur && cpufreq_driver->target)
2000 cpufreq_out_of_sync(cpu, policy->cur,
2001 new_policy.cur);
a85f7bd3 2002 }
0961dd0d
TR
2003 }
2004
037ce839 2005 ret = cpufreq_set_policy(policy, &new_policy);
1da177e4 2006
5a01f2e8
VP
2007 unlock_policy_rwsem_write(cpu);
2008
3a3e9e06 2009 cpufreq_cpu_put(policy);
f1829e4a 2010no_policy:
1da177e4
LT
2011 return ret;
2012}
2013EXPORT_SYMBOL(cpufreq_update_policy);
2014
2760984f 2015static int cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
2016 unsigned long action, void *hcpu)
2017{
2018 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 2019 struct device *dev;
5302c3fb 2020 bool frozen = false;
c32b6b8e 2021
8a25a2fd
KS
2022 dev = get_cpu_device(cpu);
2023 if (dev) {
5302c3fb
SB
2024
2025 if (action & CPU_TASKS_FROZEN)
2026 frozen = true;
2027
2028 switch (action & ~CPU_TASKS_FROZEN) {
c32b6b8e 2029 case CPU_ONLINE:
5302c3fb 2030 __cpufreq_add_dev(dev, NULL, frozen);
23d32899 2031 cpufreq_update_policy(cpu);
c32b6b8e 2032 break;
5302c3fb 2033
c32b6b8e 2034 case CPU_DOWN_PREPARE:
cedb70af 2035 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
1aee40ac
SB
2036 break;
2037
2038 case CPU_POST_DEAD:
cedb70af 2039 __cpufreq_remove_dev_finish(dev, NULL, frozen);
c32b6b8e 2040 break;
5302c3fb 2041
5a01f2e8 2042 case CPU_DOWN_FAILED:
5302c3fb 2043 __cpufreq_add_dev(dev, NULL, frozen);
c32b6b8e
AR
2044 break;
2045 }
2046 }
2047 return NOTIFY_OK;
2048}
2049
9c36f746 2050static struct notifier_block __refdata cpufreq_cpu_notifier = {
bb176f7d 2051 .notifier_call = cpufreq_cpu_callback,
c32b6b8e 2052};
1da177e4
LT
2053
2054/*********************************************************************
2055 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2056 *********************************************************************/
2057
2058/**
2059 * cpufreq_register_driver - register a CPU Frequency driver
2060 * @driver_data: A struct cpufreq_driver containing the values#
2061 * submitted by the CPU Frequency driver.
2062 *
bb176f7d 2063 * Registers a CPU Frequency driver to this core code. This code
1da177e4 2064 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 2065 * (and isn't unregistered in the meantime).
1da177e4
LT
2066 *
2067 */
221dee28 2068int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
2069{
2070 unsigned long flags;
2071 int ret;
2072
a7b422cd
KRW
2073 if (cpufreq_disabled())
2074 return -ENODEV;
2075
1da177e4
LT
2076 if (!driver_data || !driver_data->verify || !driver_data->init ||
2077 ((!driver_data->setpolicy) && (!driver_data->target)))
2078 return -EINVAL;
2079
2d06d8c4 2080 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
2081
2082 if (driver_data->setpolicy)
2083 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2084
0d1857a1 2085 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2086 if (cpufreq_driver) {
0d1857a1 2087 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4dea5806 2088 return -EEXIST;
1da177e4 2089 }
1c3d85dd 2090 cpufreq_driver = driver_data;
0d1857a1 2091 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 2092
8a25a2fd 2093 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
2094 if (ret)
2095 goto err_null_driver;
1da177e4 2096
1c3d85dd 2097 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
2098 int i;
2099 ret = -ENODEV;
2100
2101 /* check for at least one working CPU */
7a6aedfa
MT
2102 for (i = 0; i < nr_cpu_ids; i++)
2103 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 2104 ret = 0;
7a6aedfa
MT
2105 break;
2106 }
1da177e4
LT
2107
2108 /* if all ->init() calls failed, unregister */
2109 if (ret) {
2d06d8c4 2110 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 2111 driver_data->name);
8a25a2fd 2112 goto err_if_unreg;
1da177e4
LT
2113 }
2114 }
2115
8f5bc2ab 2116 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 2117 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 2118
8f5bc2ab 2119 return 0;
8a25a2fd
KS
2120err_if_unreg:
2121 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 2122err_null_driver:
0d1857a1 2123 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 2124 cpufreq_driver = NULL;
0d1857a1 2125 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 2126 return ret;
1da177e4
LT
2127}
2128EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2129
1da177e4
LT
2130/**
2131 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2132 *
bb176f7d 2133 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
2134 * the right to do so, i.e. if you have succeeded in initialising before!
2135 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2136 * currently not initialised.
2137 */
221dee28 2138int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
2139{
2140 unsigned long flags;
2141
1c3d85dd 2142 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 2143 return -EINVAL;
1da177e4 2144
2d06d8c4 2145 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 2146
8a25a2fd 2147 subsys_interface_unregister(&cpufreq_interface);
65edc68c 2148 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 2149
6eed9404 2150 down_write(&cpufreq_rwsem);
0d1857a1 2151 write_lock_irqsave(&cpufreq_driver_lock, flags);
6eed9404 2152
1c3d85dd 2153 cpufreq_driver = NULL;
6eed9404 2154
0d1857a1 2155 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
6eed9404 2156 up_write(&cpufreq_rwsem);
1da177e4
LT
2157
2158 return 0;
2159}
2160EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
2161
2162static int __init cpufreq_core_init(void)
2163{
2164 int cpu;
2165
a7b422cd
KRW
2166 if (cpufreq_disabled())
2167 return -ENODEV;
2168
474deff7 2169 for_each_possible_cpu(cpu)
5a01f2e8 2170 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
8aa84ad8 2171
2361be23 2172 cpufreq_global_kobject = kobject_create();
8aa84ad8 2173 BUG_ON(!cpufreq_global_kobject);
e00e56df 2174 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 2175
5a01f2e8
VP
2176 return 0;
2177}
5a01f2e8 2178core_initcall(cpufreq_core_init);
This page took 0.846593 seconds and 5 git commands to generate.