cpufreq: governors: Move get_governor_parent_kobj() to cpufreq.c
[deliverable/linux.git] / drivers / cpufreq / cpufreq.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
c32b6b8e 7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
32ee8c3e 8 * Added handling for CPU hotplug
8ff69732
DJ
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
c32b6b8e 11 *
1da177e4
LT
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
db701151
VK
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
1da177e4
LT
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/notifier.h>
24#include <linux/cpufreq.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/device.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/completion.h>
3fc54d37 32#include <linux/mutex.h>
e00e56df 33#include <linux/syscore_ops.h>
1da177e4 34
6f4f2723
TR
35#include <trace/events/power.h>
36
1da177e4 37/**
cd878479 38 * The "cpufreq driver" - the arch- or hardware-dependent low
1da177e4
LT
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
1c3d85dd 42static struct cpufreq_driver *cpufreq_driver;
7a6aedfa 43static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
084f3493
TR
44#ifdef CONFIG_HOTPLUG_CPU
45/* This one keeps track of the previously set governor of a removed CPU */
e77b89f1 46static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
084f3493 47#endif
0d1857a1 48static DEFINE_RWLOCK(cpufreq_driver_lock);
1da177e4 49
5a01f2e8
VP
50/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
5a01f2e8
VP
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
395913d0
MD
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
5a01f2e8 66 */
f1625066 67static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
5a01f2e8
VP
68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70#define lock_policy_rwsem(mode, cpu) \
fa1d8af4 71static int lock_policy_rwsem_##mode(int cpu) \
5a01f2e8 72{ \
f1625066 73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
5a01f2e8
VP
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8
VP
76 \
77 return 0; \
78}
79
80lock_policy_rwsem(read, cpu);
5a01f2e8 81lock_policy_rwsem(write, cpu);
5a01f2e8 82
fa1d8af4
VK
83#define unlock_policy_rwsem(mode, cpu) \
84static void unlock_policy_rwsem_##mode(int cpu) \
85{ \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
5a01f2e8 89}
5a01f2e8 90
fa1d8af4
VK
91unlock_policy_rwsem(read, cpu);
92unlock_policy_rwsem(write, cpu);
5a01f2e8 93
1da177e4 94/* internal prototypes */
29464f28
DJ
95static int __cpufreq_governor(struct cpufreq_policy *policy,
96 unsigned int event);
5a01f2e8 97static unsigned int __cpufreq_get(unsigned int cpu);
65f27f38 98static void handle_update(struct work_struct *work);
1da177e4
LT
99
100/**
32ee8c3e
DJ
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
1da177e4
LT
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
106 */
e041c683 107static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
b4dfdbb3 108static struct srcu_notifier_head cpufreq_transition_notifier_list;
1da177e4 109
74212ca4 110static bool init_cpufreq_transition_notifier_list_called;
b4dfdbb3
AS
111static int __init init_cpufreq_transition_notifier_list(void)
112{
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
74212ca4 114 init_cpufreq_transition_notifier_list_called = true;
b4dfdbb3
AS
115 return 0;
116}
b3438f82 117pure_initcall(init_cpufreq_transition_notifier_list);
1da177e4 118
a7b422cd 119static int off __read_mostly;
da584455 120static int cpufreq_disabled(void)
a7b422cd
KRW
121{
122 return off;
123}
124void disable_cpufreq(void)
125{
126 off = 1;
127}
1da177e4 128static LIST_HEAD(cpufreq_governor_list);
29464f28 129static DEFINE_MUTEX(cpufreq_governor_mutex);
1da177e4 130
4d5dcc42
VK
131bool have_governor_per_policy(void)
132{
1c3d85dd 133 return cpufreq_driver->have_governor_per_policy;
4d5dcc42 134}
3f869d6d 135EXPORT_SYMBOL_GPL(have_governor_per_policy);
4d5dcc42 136
944e9a03
VK
137struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
138{
139 if (have_governor_per_policy())
140 return &policy->kobj;
141 else
142 return cpufreq_global_kobject;
143}
144EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
145
a9144436 146static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
1da177e4
LT
147{
148 struct cpufreq_policy *data;
149 unsigned long flags;
150
7a6aedfa 151 if (cpu >= nr_cpu_ids)
1da177e4
LT
152 goto err_out;
153
154 /* get the cpufreq driver */
1c3d85dd 155 read_lock_irqsave(&cpufreq_driver_lock, flags);
1da177e4 156
1c3d85dd 157 if (!cpufreq_driver)
1da177e4
LT
158 goto err_out_unlock;
159
1c3d85dd 160 if (!try_module_get(cpufreq_driver->owner))
1da177e4
LT
161 goto err_out_unlock;
162
163
164 /* get the CPU */
7a6aedfa 165 data = per_cpu(cpufreq_cpu_data, cpu);
1da177e4
LT
166
167 if (!data)
168 goto err_out_put_module;
169
a9144436 170 if (!sysfs && !kobject_get(&data->kobj))
1da177e4
LT
171 goto err_out_put_module;
172
0d1857a1 173 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
174 return data;
175
7d5e350f 176err_out_put_module:
1c3d85dd 177 module_put(cpufreq_driver->owner);
5800043b 178err_out_unlock:
1c3d85dd 179 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
7d5e350f 180err_out:
1da177e4
LT
181 return NULL;
182}
a9144436
SB
183
184struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
185{
d5aaffa9
DB
186 if (cpufreq_disabled())
187 return NULL;
188
a9144436
SB
189 return __cpufreq_cpu_get(cpu, false);
190}
1da177e4
LT
191EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
192
a9144436
SB
193static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
194{
195 return __cpufreq_cpu_get(cpu, true);
196}
197
198static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
199{
200 if (!sysfs)
201 kobject_put(&data->kobj);
1c3d85dd 202 module_put(cpufreq_driver->owner);
a9144436 203}
7d5e350f 204
1da177e4
LT
205void cpufreq_cpu_put(struct cpufreq_policy *data)
206{
d5aaffa9
DB
207 if (cpufreq_disabled())
208 return;
209
a9144436 210 __cpufreq_cpu_put(data, false);
1da177e4
LT
211}
212EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
213
a9144436
SB
214static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
215{
216 __cpufreq_cpu_put(data, true);
217}
1da177e4 218
1da177e4
LT
219/*********************************************************************
220 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
221 *********************************************************************/
222
223/**
224 * adjust_jiffies - adjust the system "loops_per_jiffy"
225 *
226 * This function alters the system "loops_per_jiffy" for the clock
227 * speed change. Note that loops_per_jiffy cannot be updated on SMP
32ee8c3e 228 * systems as each CPU might be scaled differently. So, use the arch
1da177e4
LT
229 * per-CPU loops_per_jiffy value wherever possible.
230 */
231#ifndef CONFIG_SMP
232static unsigned long l_p_j_ref;
233static unsigned int l_p_j_ref_freq;
234
858119e1 235static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
1da177e4
LT
236{
237 if (ci->flags & CPUFREQ_CONST_LOOPS)
238 return;
239
240 if (!l_p_j_ref_freq) {
241 l_p_j_ref = loops_per_jiffy;
242 l_p_j_ref_freq = ci->old;
2d06d8c4 243 pr_debug("saving %lu as reference value for loops_per_jiffy; "
e08f5f5b 244 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
1da177e4 245 }
d08de0c1 246 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
42d4dc3f 247 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
e08f5f5b
GS
248 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
249 ci->new);
2d06d8c4 250 pr_debug("scaling loops_per_jiffy to %lu "
e08f5f5b 251 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
1da177e4
LT
252 }
253}
254#else
e08f5f5b
GS
255static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
256{
257 return;
258}
1da177e4
LT
259#endif
260
261
b43a7ffb
VK
262void __cpufreq_notify_transition(struct cpufreq_policy *policy,
263 struct cpufreq_freqs *freqs, unsigned int state)
1da177e4
LT
264{
265 BUG_ON(irqs_disabled());
266
d5aaffa9
DB
267 if (cpufreq_disabled())
268 return;
269
1c3d85dd 270 freqs->flags = cpufreq_driver->flags;
2d06d8c4 271 pr_debug("notification %u of frequency transition to %u kHz\n",
e4472cb3 272 state, freqs->new);
1da177e4 273
1da177e4 274 switch (state) {
e4472cb3 275
1da177e4 276 case CPUFREQ_PRECHANGE:
32ee8c3e 277 /* detect if the driver reported a value as "old frequency"
e4472cb3
DJ
278 * which is not equal to what the cpufreq core thinks is
279 * "old frequency".
1da177e4 280 */
1c3d85dd 281 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e4472cb3
DJ
282 if ((policy) && (policy->cpu == freqs->cpu) &&
283 (policy->cur) && (policy->cur != freqs->old)) {
2d06d8c4 284 pr_debug("Warning: CPU frequency is"
e4472cb3
DJ
285 " %u, cpufreq assumed %u kHz.\n",
286 freqs->old, policy->cur);
287 freqs->old = policy->cur;
1da177e4
LT
288 }
289 }
b4dfdbb3 290 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 291 CPUFREQ_PRECHANGE, freqs);
1da177e4
LT
292 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
293 break;
e4472cb3 294
1da177e4
LT
295 case CPUFREQ_POSTCHANGE:
296 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
2d06d8c4 297 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
6f4f2723 298 (unsigned long)freqs->cpu);
25e41933 299 trace_cpu_frequency(freqs->new, freqs->cpu);
b4dfdbb3 300 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
e041c683 301 CPUFREQ_POSTCHANGE, freqs);
e4472cb3
DJ
302 if (likely(policy) && likely(policy->cpu == freqs->cpu))
303 policy->cur = freqs->new;
1da177e4
LT
304 break;
305 }
1da177e4 306}
b43a7ffb
VK
307/**
308 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
309 * on frequency transition.
310 *
311 * This function calls the transition notifiers and the "adjust_jiffies"
312 * function. It is called twice on all CPU frequency changes that have
313 * external effects.
314 */
315void cpufreq_notify_transition(struct cpufreq_policy *policy,
316 struct cpufreq_freqs *freqs, unsigned int state)
317{
318 for_each_cpu(freqs->cpu, policy->cpus)
319 __cpufreq_notify_transition(policy, freqs, state);
320}
1da177e4
LT
321EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
322
323
324
325/*********************************************************************
326 * SYSFS INTERFACE *
327 *********************************************************************/
328
3bcb09a3
JF
329static struct cpufreq_governor *__find_governor(const char *str_governor)
330{
331 struct cpufreq_governor *t;
332
333 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
29464f28 334 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
3bcb09a3
JF
335 return t;
336
337 return NULL;
338}
339
1da177e4
LT
340/**
341 * cpufreq_parse_governor - parse a governor string
342 */
905d77cd 343static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
1da177e4
LT
344 struct cpufreq_governor **governor)
345{
3bcb09a3 346 int err = -EINVAL;
1c3d85dd
RW
347
348 if (!cpufreq_driver)
3bcb09a3
JF
349 goto out;
350
1c3d85dd 351 if (cpufreq_driver->setpolicy) {
1da177e4
LT
352 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
353 *policy = CPUFREQ_POLICY_PERFORMANCE;
3bcb09a3 354 err = 0;
e08f5f5b
GS
355 } else if (!strnicmp(str_governor, "powersave",
356 CPUFREQ_NAME_LEN)) {
1da177e4 357 *policy = CPUFREQ_POLICY_POWERSAVE;
3bcb09a3 358 err = 0;
1da177e4 359 }
1c3d85dd 360 } else if (cpufreq_driver->target) {
1da177e4 361 struct cpufreq_governor *t;
3bcb09a3 362
3fc54d37 363 mutex_lock(&cpufreq_governor_mutex);
3bcb09a3
JF
364
365 t = __find_governor(str_governor);
366
ea714970 367 if (t == NULL) {
1a8e1463 368 int ret;
ea714970 369
1a8e1463
KC
370 mutex_unlock(&cpufreq_governor_mutex);
371 ret = request_module("cpufreq_%s", str_governor);
372 mutex_lock(&cpufreq_governor_mutex);
ea714970 373
1a8e1463
KC
374 if (ret == 0)
375 t = __find_governor(str_governor);
ea714970
JF
376 }
377
3bcb09a3
JF
378 if (t != NULL) {
379 *governor = t;
380 err = 0;
1da177e4 381 }
3bcb09a3 382
3fc54d37 383 mutex_unlock(&cpufreq_governor_mutex);
1da177e4 384 }
29464f28 385out:
3bcb09a3 386 return err;
1da177e4 387}
1da177e4
LT
388
389
1da177e4 390/**
e08f5f5b
GS
391 * cpufreq_per_cpu_attr_read() / show_##file_name() -
392 * print out cpufreq information
1da177e4
LT
393 *
394 * Write out information from cpufreq_driver->policy[cpu]; object must be
395 * "unsigned int".
396 */
397
32ee8c3e
DJ
398#define show_one(file_name, object) \
399static ssize_t show_##file_name \
905d77cd 400(struct cpufreq_policy *policy, char *buf) \
32ee8c3e 401{ \
29464f28 402 return sprintf(buf, "%u\n", policy->object); \
1da177e4
LT
403}
404
405show_one(cpuinfo_min_freq, cpuinfo.min_freq);
406show_one(cpuinfo_max_freq, cpuinfo.max_freq);
ed129784 407show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
1da177e4
LT
408show_one(scaling_min_freq, min);
409show_one(scaling_max_freq, max);
410show_one(scaling_cur_freq, cur);
411
e08f5f5b
GS
412static int __cpufreq_set_policy(struct cpufreq_policy *data,
413 struct cpufreq_policy *policy);
7970e08b 414
1da177e4
LT
415/**
416 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
417 */
418#define store_one(file_name, object) \
419static ssize_t store_##file_name \
905d77cd 420(struct cpufreq_policy *policy, const char *buf, size_t count) \
1da177e4 421{ \
f55c9c26 422 unsigned int ret; \
1da177e4
LT
423 struct cpufreq_policy new_policy; \
424 \
425 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
426 if (ret) \
427 return -EINVAL; \
428 \
29464f28 429 ret = sscanf(buf, "%u", &new_policy.object); \
1da177e4
LT
430 if (ret != 1) \
431 return -EINVAL; \
432 \
7970e08b
TR
433 ret = __cpufreq_set_policy(policy, &new_policy); \
434 policy->user_policy.object = policy->object; \
1da177e4
LT
435 \
436 return ret ? ret : count; \
437}
438
29464f28
DJ
439store_one(scaling_min_freq, min);
440store_one(scaling_max_freq, max);
1da177e4
LT
441
442/**
443 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
444 */
905d77cd
DJ
445static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
446 char *buf)
1da177e4 447{
5a01f2e8 448 unsigned int cur_freq = __cpufreq_get(policy->cpu);
1da177e4
LT
449 if (!cur_freq)
450 return sprintf(buf, "<unknown>");
451 return sprintf(buf, "%u\n", cur_freq);
452}
453
454
455/**
456 * show_scaling_governor - show the current policy for the specified CPU
457 */
905d77cd 458static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
1da177e4 459{
29464f28 460 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
1da177e4
LT
461 return sprintf(buf, "powersave\n");
462 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
463 return sprintf(buf, "performance\n");
464 else if (policy->governor)
4b972f0b 465 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
29464f28 466 policy->governor->name);
1da177e4
LT
467 return -EINVAL;
468}
469
470
471/**
472 * store_scaling_governor - store policy for the specified CPU
473 */
905d77cd
DJ
474static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
475 const char *buf, size_t count)
1da177e4 476{
f55c9c26 477 unsigned int ret;
1da177e4
LT
478 char str_governor[16];
479 struct cpufreq_policy new_policy;
480
481 ret = cpufreq_get_policy(&new_policy, policy->cpu);
482 if (ret)
483 return ret;
484
29464f28 485 ret = sscanf(buf, "%15s", str_governor);
1da177e4
LT
486 if (ret != 1)
487 return -EINVAL;
488
e08f5f5b
GS
489 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
490 &new_policy.governor))
1da177e4
LT
491 return -EINVAL;
492
7970e08b
TR
493 /* Do not use cpufreq_set_policy here or the user_policy.max
494 will be wrongly overridden */
7970e08b
TR
495 ret = __cpufreq_set_policy(policy, &new_policy);
496
497 policy->user_policy.policy = policy->policy;
498 policy->user_policy.governor = policy->governor;
7970e08b 499
e08f5f5b
GS
500 if (ret)
501 return ret;
502 else
503 return count;
1da177e4
LT
504}
505
506/**
507 * show_scaling_driver - show the cpufreq driver currently loaded
508 */
905d77cd 509static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
1da177e4 510{
1c3d85dd 511 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
1da177e4
LT
512}
513
514/**
515 * show_scaling_available_governors - show the available CPUfreq governors
516 */
905d77cd
DJ
517static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
518 char *buf)
1da177e4
LT
519{
520 ssize_t i = 0;
521 struct cpufreq_governor *t;
522
1c3d85dd 523 if (!cpufreq_driver->target) {
1da177e4
LT
524 i += sprintf(buf, "performance powersave");
525 goto out;
526 }
527
528 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
29464f28
DJ
529 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
530 - (CPUFREQ_NAME_LEN + 2)))
1da177e4 531 goto out;
4b972f0b 532 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
1da177e4 533 }
7d5e350f 534out:
1da177e4
LT
535 i += sprintf(&buf[i], "\n");
536 return i;
537}
e8628dd0 538
835481d9 539static ssize_t show_cpus(const struct cpumask *mask, char *buf)
1da177e4
LT
540{
541 ssize_t i = 0;
542 unsigned int cpu;
543
835481d9 544 for_each_cpu(cpu, mask) {
1da177e4
LT
545 if (i)
546 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
547 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
548 if (i >= (PAGE_SIZE - 5))
29464f28 549 break;
1da177e4
LT
550 }
551 i += sprintf(&buf[i], "\n");
552 return i;
553}
554
e8628dd0
DW
555/**
556 * show_related_cpus - show the CPUs affected by each transition even if
557 * hw coordination is in use
558 */
559static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
560{
e8628dd0
DW
561 return show_cpus(policy->related_cpus, buf);
562}
563
564/**
565 * show_affected_cpus - show the CPUs affected by each transition
566 */
567static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
568{
569 return show_cpus(policy->cpus, buf);
570}
571
9e76988e 572static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
905d77cd 573 const char *buf, size_t count)
9e76988e
VP
574{
575 unsigned int freq = 0;
576 unsigned int ret;
577
879000f9 578 if (!policy->governor || !policy->governor->store_setspeed)
9e76988e
VP
579 return -EINVAL;
580
581 ret = sscanf(buf, "%u", &freq);
582 if (ret != 1)
583 return -EINVAL;
584
585 policy->governor->store_setspeed(policy, freq);
586
587 return count;
588}
589
590static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
591{
879000f9 592 if (!policy->governor || !policy->governor->show_setspeed)
9e76988e
VP
593 return sprintf(buf, "<unsupported>\n");
594
595 return policy->governor->show_setspeed(policy, buf);
596}
1da177e4 597
e2f74f35 598/**
8bf1ac72 599 * show_bios_limit - show the current cpufreq HW/BIOS limitation
e2f74f35
TR
600 */
601static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
602{
603 unsigned int limit;
604 int ret;
1c3d85dd
RW
605 if (cpufreq_driver->bios_limit) {
606 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
e2f74f35
TR
607 if (!ret)
608 return sprintf(buf, "%u\n", limit);
609 }
610 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
611}
612
6dad2a29
BP
613cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
614cpufreq_freq_attr_ro(cpuinfo_min_freq);
615cpufreq_freq_attr_ro(cpuinfo_max_freq);
616cpufreq_freq_attr_ro(cpuinfo_transition_latency);
617cpufreq_freq_attr_ro(scaling_available_governors);
618cpufreq_freq_attr_ro(scaling_driver);
619cpufreq_freq_attr_ro(scaling_cur_freq);
620cpufreq_freq_attr_ro(bios_limit);
621cpufreq_freq_attr_ro(related_cpus);
622cpufreq_freq_attr_ro(affected_cpus);
623cpufreq_freq_attr_rw(scaling_min_freq);
624cpufreq_freq_attr_rw(scaling_max_freq);
625cpufreq_freq_attr_rw(scaling_governor);
626cpufreq_freq_attr_rw(scaling_setspeed);
1da177e4 627
905d77cd 628static struct attribute *default_attrs[] = {
1da177e4
LT
629 &cpuinfo_min_freq.attr,
630 &cpuinfo_max_freq.attr,
ed129784 631 &cpuinfo_transition_latency.attr,
1da177e4
LT
632 &scaling_min_freq.attr,
633 &scaling_max_freq.attr,
634 &affected_cpus.attr,
e8628dd0 635 &related_cpus.attr,
1da177e4
LT
636 &scaling_governor.attr,
637 &scaling_driver.attr,
638 &scaling_available_governors.attr,
9e76988e 639 &scaling_setspeed.attr,
1da177e4
LT
640 NULL
641};
642
8aa84ad8
TR
643struct kobject *cpufreq_global_kobject;
644EXPORT_SYMBOL(cpufreq_global_kobject);
645
29464f28
DJ
646#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
647#define to_attr(a) container_of(a, struct freq_attr, attr)
1da177e4 648
29464f28 649static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1da177e4 650{
905d77cd
DJ
651 struct cpufreq_policy *policy = to_policy(kobj);
652 struct freq_attr *fattr = to_attr(attr);
0db4a8a9 653 ssize_t ret = -EINVAL;
a9144436 654 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 655 if (!policy)
0db4a8a9 656 goto no_policy;
5a01f2e8
VP
657
658 if (lock_policy_rwsem_read(policy->cpu) < 0)
0db4a8a9 659 goto fail;
5a01f2e8 660
e08f5f5b
GS
661 if (fattr->show)
662 ret = fattr->show(policy, buf);
663 else
664 ret = -EIO;
665
5a01f2e8 666 unlock_policy_rwsem_read(policy->cpu);
0db4a8a9 667fail:
a9144436 668 cpufreq_cpu_put_sysfs(policy);
0db4a8a9 669no_policy:
1da177e4
LT
670 return ret;
671}
672
905d77cd
DJ
673static ssize_t store(struct kobject *kobj, struct attribute *attr,
674 const char *buf, size_t count)
1da177e4 675{
905d77cd
DJ
676 struct cpufreq_policy *policy = to_policy(kobj);
677 struct freq_attr *fattr = to_attr(attr);
a07530b4 678 ssize_t ret = -EINVAL;
a9144436 679 policy = cpufreq_cpu_get_sysfs(policy->cpu);
1da177e4 680 if (!policy)
a07530b4 681 goto no_policy;
5a01f2e8
VP
682
683 if (lock_policy_rwsem_write(policy->cpu) < 0)
a07530b4 684 goto fail;
5a01f2e8 685
e08f5f5b
GS
686 if (fattr->store)
687 ret = fattr->store(policy, buf, count);
688 else
689 ret = -EIO;
690
5a01f2e8 691 unlock_policy_rwsem_write(policy->cpu);
a07530b4 692fail:
a9144436 693 cpufreq_cpu_put_sysfs(policy);
a07530b4 694no_policy:
1da177e4
LT
695 return ret;
696}
697
905d77cd 698static void cpufreq_sysfs_release(struct kobject *kobj)
1da177e4 699{
905d77cd 700 struct cpufreq_policy *policy = to_policy(kobj);
2d06d8c4 701 pr_debug("last reference is dropped\n");
1da177e4
LT
702 complete(&policy->kobj_unregister);
703}
704
52cf25d0 705static const struct sysfs_ops sysfs_ops = {
1da177e4
LT
706 .show = show,
707 .store = store,
708};
709
710static struct kobj_type ktype_cpufreq = {
711 .sysfs_ops = &sysfs_ops,
712 .default_attrs = default_attrs,
713 .release = cpufreq_sysfs_release,
714};
715
19d6f7ec 716/* symlink affected CPUs */
cf3289d0
AC
717static int cpufreq_add_dev_symlink(unsigned int cpu,
718 struct cpufreq_policy *policy)
19d6f7ec
DJ
719{
720 unsigned int j;
721 int ret = 0;
722
723 for_each_cpu(j, policy->cpus) {
724 struct cpufreq_policy *managed_policy;
8a25a2fd 725 struct device *cpu_dev;
19d6f7ec
DJ
726
727 if (j == cpu)
728 continue;
19d6f7ec 729
2d06d8c4 730 pr_debug("CPU %u already managed, adding link\n", j);
19d6f7ec 731 managed_policy = cpufreq_cpu_get(cpu);
8a25a2fd
KS
732 cpu_dev = get_cpu_device(j);
733 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
19d6f7ec
DJ
734 "cpufreq");
735 if (ret) {
736 cpufreq_cpu_put(managed_policy);
737 return ret;
738 }
739 }
740 return ret;
741}
742
cf3289d0
AC
743static int cpufreq_add_dev_interface(unsigned int cpu,
744 struct cpufreq_policy *policy,
8a25a2fd 745 struct device *dev)
909a694e 746{
ecf7e461 747 struct cpufreq_policy new_policy;
909a694e
DJ
748 struct freq_attr **drv_attr;
749 unsigned long flags;
750 int ret = 0;
751 unsigned int j;
752
753 /* prepare interface data */
754 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
8a25a2fd 755 &dev->kobj, "cpufreq");
909a694e
DJ
756 if (ret)
757 return ret;
758
759 /* set up files for this cpu device */
1c3d85dd 760 drv_attr = cpufreq_driver->attr;
909a694e
DJ
761 while ((drv_attr) && (*drv_attr)) {
762 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
763 if (ret)
1c3d85dd 764 goto err_out_kobj_put;
909a694e
DJ
765 drv_attr++;
766 }
1c3d85dd 767 if (cpufreq_driver->get) {
909a694e
DJ
768 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
769 if (ret)
1c3d85dd 770 goto err_out_kobj_put;
909a694e 771 }
1c3d85dd 772 if (cpufreq_driver->target) {
909a694e
DJ
773 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
774 if (ret)
1c3d85dd 775 goto err_out_kobj_put;
909a694e 776 }
1c3d85dd 777 if (cpufreq_driver->bios_limit) {
e2f74f35
TR
778 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
779 if (ret)
1c3d85dd 780 goto err_out_kobj_put;
e2f74f35 781 }
909a694e 782
0d1857a1 783 write_lock_irqsave(&cpufreq_driver_lock, flags);
909a694e 784 for_each_cpu(j, policy->cpus) {
909a694e 785 per_cpu(cpufreq_cpu_data, j) = policy;
f1625066 786 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
909a694e 787 }
0d1857a1 788 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
909a694e
DJ
789
790 ret = cpufreq_add_dev_symlink(cpu, policy);
ecf7e461
DJ
791 if (ret)
792 goto err_out_kobj_put;
793
794 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
795 /* assure that the starting sequence is run in __cpufreq_set_policy */
796 policy->governor = NULL;
797
798 /* set default policy */
799 ret = __cpufreq_set_policy(policy, &new_policy);
800 policy->user_policy.policy = policy->policy;
801 policy->user_policy.governor = policy->governor;
802
803 if (ret) {
2d06d8c4 804 pr_debug("setting policy failed\n");
1c3d85dd
RW
805 if (cpufreq_driver->exit)
806 cpufreq_driver->exit(policy);
ecf7e461 807 }
909a694e
DJ
808 return ret;
809
810err_out_kobj_put:
811 kobject_put(&policy->kobj);
812 wait_for_completion(&policy->kobj_unregister);
813 return ret;
814}
815
fcf80582
VK
816#ifdef CONFIG_HOTPLUG_CPU
817static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
818 struct device *dev)
819{
820 struct cpufreq_policy *policy;
1c3d85dd 821 int ret = 0, has_target = !!cpufreq_driver->target;
fcf80582
VK
822 unsigned long flags;
823
824 policy = cpufreq_cpu_get(sibling);
825 WARN_ON(!policy);
826
820c6ca2
VK
827 if (has_target)
828 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
fcf80582 829
2eaa3e2d
VK
830 lock_policy_rwsem_write(sibling);
831
0d1857a1 832 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 833
fcf80582 834 cpumask_set_cpu(cpu, policy->cpus);
2eaa3e2d 835 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
fcf80582 836 per_cpu(cpufreq_cpu_data, cpu) = policy;
0d1857a1 837 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 838
2eaa3e2d
VK
839 unlock_policy_rwsem_write(sibling);
840
820c6ca2
VK
841 if (has_target) {
842 __cpufreq_governor(policy, CPUFREQ_GOV_START);
843 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
844 }
fcf80582 845
fcf80582
VK
846 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
847 if (ret) {
848 cpufreq_cpu_put(policy);
849 return ret;
850 }
851
852 return 0;
853}
854#endif
1da177e4
LT
855
856/**
857 * cpufreq_add_dev - add a CPU device
858 *
32ee8c3e 859 * Adds the cpufreq interface for a CPU device.
3f4a782b
MD
860 *
861 * The Oracle says: try running cpufreq registration/unregistration concurrently
862 * with with cpu hotplugging and all hell will break loose. Tried to clean this
863 * mess up, but more thorough testing is needed. - Mathieu
1da177e4 864 */
8a25a2fd 865static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 866{
fcf80582 867 unsigned int j, cpu = dev->id;
65922465 868 int ret = -ENOMEM;
1da177e4 869 struct cpufreq_policy *policy;
1da177e4 870 unsigned long flags;
90e41bac 871#ifdef CONFIG_HOTPLUG_CPU
fcf80582 872 struct cpufreq_governor *gov;
90e41bac
PB
873 int sibling;
874#endif
1da177e4 875
c32b6b8e
AR
876 if (cpu_is_offline(cpu))
877 return 0;
878
2d06d8c4 879 pr_debug("adding CPU %u\n", cpu);
1da177e4
LT
880
881#ifdef CONFIG_SMP
882 /* check whether a different CPU already registered this
883 * CPU because it is in the same boat. */
884 policy = cpufreq_cpu_get(cpu);
885 if (unlikely(policy)) {
8ff69732 886 cpufreq_cpu_put(policy);
1da177e4
LT
887 return 0;
888 }
fcf80582
VK
889
890#ifdef CONFIG_HOTPLUG_CPU
891 /* Check if this cpu was hot-unplugged earlier and has siblings */
0d1857a1 892 read_lock_irqsave(&cpufreq_driver_lock, flags);
fcf80582
VK
893 for_each_online_cpu(sibling) {
894 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
2eaa3e2d 895 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
0d1857a1 896 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 897 return cpufreq_add_policy_cpu(cpu, sibling, dev);
2eaa3e2d 898 }
fcf80582 899 }
0d1857a1 900 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
fcf80582 901#endif
1da177e4
LT
902#endif
903
1c3d85dd 904 if (!try_module_get(cpufreq_driver->owner)) {
1da177e4
LT
905 ret = -EINVAL;
906 goto module_out;
907 }
908
e98df50c 909 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
059019a3 910 if (!policy)
1da177e4 911 goto nomem_out;
059019a3
DJ
912
913 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
3f4a782b 914 goto err_free_policy;
059019a3
DJ
915
916 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
3f4a782b 917 goto err_free_cpumask;
1da177e4
LT
918
919 policy->cpu = cpu;
65922465 920 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
835481d9 921 cpumask_copy(policy->cpus, cpumask_of(cpu));
1da177e4 922
5a01f2e8 923 /* Initially set CPU itself as the policy_cpu */
f1625066 924 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
5a01f2e8 925
1da177e4 926 init_completion(&policy->kobj_unregister);
65f27f38 927 INIT_WORK(&policy->update, handle_update);
1da177e4
LT
928
929 /* call driver. From then on the cpufreq must be able
930 * to accept all calls to ->verify and ->setpolicy for this CPU
931 */
1c3d85dd 932 ret = cpufreq_driver->init(policy);
1da177e4 933 if (ret) {
2d06d8c4 934 pr_debug("initialization failed\n");
2eaa3e2d 935 goto err_set_policy_cpu;
1da177e4 936 }
643ae6e8 937
fcf80582
VK
938 /* related cpus should atleast have policy->cpus */
939 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
940
643ae6e8
VK
941 /*
942 * affected cpus must always be the one, which are online. We aren't
943 * managing offline cpus here.
944 */
945 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
946
187d9f4e
MC
947 policy->user_policy.min = policy->min;
948 policy->user_policy.max = policy->max;
1da177e4 949
a1531acd
TR
950 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
951 CPUFREQ_START, policy);
952
fcf80582
VK
953#ifdef CONFIG_HOTPLUG_CPU
954 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
955 if (gov) {
956 policy->governor = gov;
957 pr_debug("Restoring governor %s for cpu %d\n",
958 policy->governor->name, cpu);
4bfa042c 959 }
fcf80582 960#endif
1da177e4 961
8a25a2fd 962 ret = cpufreq_add_dev_interface(cpu, policy, dev);
19d6f7ec
DJ
963 if (ret)
964 goto err_out_unregister;
8ff69732 965
038c5b3e 966 kobject_uevent(&policy->kobj, KOBJ_ADD);
1c3d85dd 967 module_put(cpufreq_driver->owner);
2d06d8c4 968 pr_debug("initialization complete\n");
87c32271 969
1da177e4
LT
970 return 0;
971
1da177e4 972err_out_unregister:
0d1857a1 973 write_lock_irqsave(&cpufreq_driver_lock, flags);
835481d9 974 for_each_cpu(j, policy->cpus)
7a6aedfa 975 per_cpu(cpufreq_cpu_data, j) = NULL;
0d1857a1 976 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 977
c10997f6 978 kobject_put(&policy->kobj);
1da177e4
LT
979 wait_for_completion(&policy->kobj_unregister);
980
2eaa3e2d
VK
981err_set_policy_cpu:
982 per_cpu(cpufreq_policy_cpu, cpu) = -1;
cad70a6a 983 free_cpumask_var(policy->related_cpus);
3f4a782b
MD
984err_free_cpumask:
985 free_cpumask_var(policy->cpus);
986err_free_policy:
1da177e4 987 kfree(policy);
1da177e4 988nomem_out:
1c3d85dd 989 module_put(cpufreq_driver->owner);
c32b6b8e 990module_out:
1da177e4
LT
991 return ret;
992}
993
b8eed8af
VK
994static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
995{
996 int j;
997
998 policy->last_cpu = policy->cpu;
999 policy->cpu = cpu;
1000
3361b7b1 1001 for_each_cpu(j, policy->cpus)
b8eed8af 1002 per_cpu(cpufreq_policy_cpu, j) = cpu;
b8eed8af
VK
1003
1004#ifdef CONFIG_CPU_FREQ_TABLE
1005 cpufreq_frequency_table_update_policy_cpu(policy);
1006#endif
1007 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1008 CPUFREQ_UPDATE_POLICY_CPU, policy);
1009}
1da177e4
LT
1010
1011/**
5a01f2e8 1012 * __cpufreq_remove_dev - remove a CPU device
1da177e4
LT
1013 *
1014 * Removes the cpufreq interface for a CPU device.
5a01f2e8
VP
1015 * Caller should already have policy_rwsem in write mode for this CPU.
1016 * This routine frees the rwsem before returning.
1da177e4 1017 */
8a25a2fd 1018static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1da177e4 1019{
b8eed8af 1020 unsigned int cpu = dev->id, ret, cpus;
1da177e4
LT
1021 unsigned long flags;
1022 struct cpufreq_policy *data;
499bca9b
AW
1023 struct kobject *kobj;
1024 struct completion *cmp;
8a25a2fd 1025 struct device *cpu_dev;
1da177e4 1026
b8eed8af 1027 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1da177e4 1028
0d1857a1 1029 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1030
7a6aedfa 1031 data = per_cpu(cpufreq_cpu_data, cpu);
2eaa3e2d
VK
1032 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1033
0d1857a1 1034 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1035
1036 if (!data) {
b8eed8af 1037 pr_debug("%s: No cpu_data found\n", __func__);
1da177e4
LT
1038 return -EINVAL;
1039 }
1da177e4 1040
1c3d85dd 1041 if (cpufreq_driver->target)
f6a7409c 1042 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1da177e4 1043
084f3493 1044#ifdef CONFIG_HOTPLUG_CPU
1c3d85dd 1045 if (!cpufreq_driver->setpolicy)
fa69e33f
DB
1046 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1047 data->governor->name, CPUFREQ_NAME_LEN);
1da177e4
LT
1048#endif
1049
2eaa3e2d 1050 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1051 cpus = cpumask_weight(data->cpus);
e4969eba
VK
1052
1053 if (cpus > 1)
1054 cpumask_clear_cpu(cpu, data->cpus);
2eaa3e2d 1055 unlock_policy_rwsem_write(cpu);
084f3493 1056
73bf0fc2
VK
1057 if (cpu != data->cpu) {
1058 sysfs_remove_link(&dev->kobj, "cpufreq");
1059 } else if (cpus > 1) {
b8eed8af
VK
1060 /* first sibling now owns the new sysfs dir */
1061 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1062 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1063 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1064 if (ret) {
1065 pr_err("%s: Failed to move kobj: %d", __func__, ret);
084f3493 1066
2eaa3e2d 1067 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1068 cpumask_set_cpu(cpu, data->cpus);
1da177e4 1069
0d1857a1 1070 write_lock_irqsave(&cpufreq_driver_lock, flags);
2eaa3e2d 1071 per_cpu(cpufreq_cpu_data, cpu) = data;
0d1857a1 1072 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1073
499bca9b 1074 unlock_policy_rwsem_write(cpu);
1da177e4 1075
2eaa3e2d
VK
1076 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1077 "cpufreq");
b8eed8af 1078 return -EINVAL;
1da177e4 1079 }
5a01f2e8 1080
2eaa3e2d 1081 WARN_ON(lock_policy_rwsem_write(cpu));
b8eed8af 1082 update_policy_cpu(data, cpu_dev->id);
2eaa3e2d 1083 unlock_policy_rwsem_write(cpu);
b8eed8af
VK
1084 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1085 __func__, cpu_dev->id, cpu);
1da177e4 1086 }
1da177e4 1087
d96038e0
VK
1088 if ((cpus == 1) && (cpufreq_driver->target))
1089 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1090
b8eed8af
VK
1091 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1092 cpufreq_cpu_put(data);
1da177e4 1093
b8eed8af
VK
1094 /* If cpu is last user of policy, free policy */
1095 if (cpus == 1) {
2eaa3e2d 1096 lock_policy_rwsem_read(cpu);
b8eed8af
VK
1097 kobj = &data->kobj;
1098 cmp = &data->kobj_unregister;
2eaa3e2d 1099 unlock_policy_rwsem_read(cpu);
b8eed8af 1100 kobject_put(kobj);
7d26e2d5 1101
b8eed8af
VK
1102 /* we need to make sure that the underlying kobj is actually
1103 * not referenced anymore by anybody before we proceed with
1104 * unloading.
1105 */
1106 pr_debug("waiting for dropping of refcount\n");
1107 wait_for_completion(cmp);
1108 pr_debug("wait complete\n");
7d26e2d5 1109
1c3d85dd
RW
1110 if (cpufreq_driver->exit)
1111 cpufreq_driver->exit(data);
27ecddc2 1112
b8eed8af
VK
1113 free_cpumask_var(data->related_cpus);
1114 free_cpumask_var(data->cpus);
1115 kfree(data);
1c3d85dd 1116 } else if (cpufreq_driver->target) {
b8eed8af
VK
1117 __cpufreq_governor(data, CPUFREQ_GOV_START);
1118 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
27ecddc2 1119 }
1da177e4 1120
2eaa3e2d 1121 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1da177e4
LT
1122 return 0;
1123}
1124
1125
8a25a2fd 1126static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
5a01f2e8 1127{
8a25a2fd 1128 unsigned int cpu = dev->id;
5a01f2e8 1129 int retval;
ec28297a
VP
1130
1131 if (cpu_is_offline(cpu))
1132 return 0;
1133
8a25a2fd 1134 retval = __cpufreq_remove_dev(dev, sif);
5a01f2e8
VP
1135 return retval;
1136}
1137
1138
65f27f38 1139static void handle_update(struct work_struct *work)
1da177e4 1140{
65f27f38
DH
1141 struct cpufreq_policy *policy =
1142 container_of(work, struct cpufreq_policy, update);
1143 unsigned int cpu = policy->cpu;
2d06d8c4 1144 pr_debug("handle_update for cpu %u called\n", cpu);
1da177e4
LT
1145 cpufreq_update_policy(cpu);
1146}
1147
1148/**
1149 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1150 * @cpu: cpu number
1151 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1152 * @new_freq: CPU frequency the CPU actually runs at
1153 *
29464f28
DJ
1154 * We adjust to current frequency first, and need to clean up later.
1155 * So either call to cpufreq_update_policy() or schedule handle_update()).
1da177e4 1156 */
e08f5f5b
GS
1157static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1158 unsigned int new_freq)
1da177e4 1159{
b43a7ffb 1160 struct cpufreq_policy *policy;
1da177e4 1161 struct cpufreq_freqs freqs;
b43a7ffb
VK
1162 unsigned long flags;
1163
1da177e4 1164
2d06d8c4 1165 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1da177e4
LT
1166 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1167
1da177e4
LT
1168 freqs.old = old_freq;
1169 freqs.new = new_freq;
b43a7ffb
VK
1170
1171 read_lock_irqsave(&cpufreq_driver_lock, flags);
1172 policy = per_cpu(cpufreq_cpu_data, cpu);
1173 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1174
1175 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1176 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1da177e4
LT
1177}
1178
1179
32ee8c3e 1180/**
4ab70df4 1181 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
95235ca2
VP
1182 * @cpu: CPU number
1183 *
1184 * This is the last known freq, without actually getting it from the driver.
1185 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1186 */
1187unsigned int cpufreq_quick_get(unsigned int cpu)
1188{
9e21ba8b 1189 struct cpufreq_policy *policy;
e08f5f5b 1190 unsigned int ret_freq = 0;
95235ca2 1191
1c3d85dd
RW
1192 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1193 return cpufreq_driver->get(cpu);
9e21ba8b
DB
1194
1195 policy = cpufreq_cpu_get(cpu);
95235ca2 1196 if (policy) {
e08f5f5b 1197 ret_freq = policy->cur;
95235ca2
VP
1198 cpufreq_cpu_put(policy);
1199 }
1200
4d34a67d 1201 return ret_freq;
95235ca2
VP
1202}
1203EXPORT_SYMBOL(cpufreq_quick_get);
1204
3d737108
JB
1205/**
1206 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1207 * @cpu: CPU number
1208 *
1209 * Just return the max possible frequency for a given CPU.
1210 */
1211unsigned int cpufreq_quick_get_max(unsigned int cpu)
1212{
1213 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1214 unsigned int ret_freq = 0;
1215
1216 if (policy) {
1217 ret_freq = policy->max;
1218 cpufreq_cpu_put(policy);
1219 }
1220
1221 return ret_freq;
1222}
1223EXPORT_SYMBOL(cpufreq_quick_get_max);
1224
95235ca2 1225
5a01f2e8 1226static unsigned int __cpufreq_get(unsigned int cpu)
1da177e4 1227{
7a6aedfa 1228 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
e08f5f5b 1229 unsigned int ret_freq = 0;
5800043b 1230
1c3d85dd 1231 if (!cpufreq_driver->get)
4d34a67d 1232 return ret_freq;
1da177e4 1233
1c3d85dd 1234 ret_freq = cpufreq_driver->get(cpu);
1da177e4 1235
e08f5f5b 1236 if (ret_freq && policy->cur &&
1c3d85dd 1237 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
e08f5f5b
GS
1238 /* verify no discrepancy between actual and
1239 saved value exists */
1240 if (unlikely(ret_freq != policy->cur)) {
1241 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1da177e4
LT
1242 schedule_work(&policy->update);
1243 }
1244 }
1245
4d34a67d 1246 return ret_freq;
5a01f2e8 1247}
1da177e4 1248
5a01f2e8
VP
1249/**
1250 * cpufreq_get - get the current CPU frequency (in kHz)
1251 * @cpu: CPU number
1252 *
1253 * Get the CPU current (static) CPU frequency
1254 */
1255unsigned int cpufreq_get(unsigned int cpu)
1256{
1257 unsigned int ret_freq = 0;
1258 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1259
1260 if (!policy)
1261 goto out;
1262
1263 if (unlikely(lock_policy_rwsem_read(cpu)))
1264 goto out_policy;
1265
1266 ret_freq = __cpufreq_get(cpu);
1267
1268 unlock_policy_rwsem_read(cpu);
1da177e4 1269
5a01f2e8
VP
1270out_policy:
1271 cpufreq_cpu_put(policy);
1272out:
4d34a67d 1273 return ret_freq;
1da177e4
LT
1274}
1275EXPORT_SYMBOL(cpufreq_get);
1276
8a25a2fd
KS
1277static struct subsys_interface cpufreq_interface = {
1278 .name = "cpufreq",
1279 .subsys = &cpu_subsys,
1280 .add_dev = cpufreq_add_dev,
1281 .remove_dev = cpufreq_remove_dev,
e00e56df
RW
1282};
1283
1da177e4 1284
42d4dc3f 1285/**
e00e56df
RW
1286 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1287 *
1288 * This function is only executed for the boot processor. The other CPUs
1289 * have been put offline by means of CPU hotplug.
42d4dc3f 1290 */
e00e56df 1291static int cpufreq_bp_suspend(void)
42d4dc3f 1292{
e08f5f5b 1293 int ret = 0;
4bc5d341 1294
e00e56df 1295 int cpu = smp_processor_id();
42d4dc3f
BH
1296 struct cpufreq_policy *cpu_policy;
1297
2d06d8c4 1298 pr_debug("suspending cpu %u\n", cpu);
42d4dc3f 1299
e00e56df 1300 /* If there's no policy for the boot CPU, we have nothing to do. */
42d4dc3f
BH
1301 cpu_policy = cpufreq_cpu_get(cpu);
1302 if (!cpu_policy)
e00e56df 1303 return 0;
42d4dc3f 1304
1c3d85dd
RW
1305 if (cpufreq_driver->suspend) {
1306 ret = cpufreq_driver->suspend(cpu_policy);
ce6c3997 1307 if (ret)
42d4dc3f
BH
1308 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1309 "step on CPU %u\n", cpu_policy->cpu);
42d4dc3f
BH
1310 }
1311
42d4dc3f 1312 cpufreq_cpu_put(cpu_policy);
c9060494 1313 return ret;
42d4dc3f
BH
1314}
1315
1da177e4 1316/**
e00e56df 1317 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1da177e4
LT
1318 *
1319 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
ce6c3997
DB
1320 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1321 * restored. It will verify that the current freq is in sync with
1322 * what we believe it to be. This is a bit later than when it
1323 * should be, but nonethteless it's better than calling
1324 * cpufreq_driver->get() here which might re-enable interrupts...
e00e56df
RW
1325 *
1326 * This function is only executed for the boot CPU. The other CPUs have not
1327 * been turned on yet.
1da177e4 1328 */
e00e56df 1329static void cpufreq_bp_resume(void)
1da177e4 1330{
e08f5f5b 1331 int ret = 0;
4bc5d341 1332
e00e56df 1333 int cpu = smp_processor_id();
1da177e4
LT
1334 struct cpufreq_policy *cpu_policy;
1335
2d06d8c4 1336 pr_debug("resuming cpu %u\n", cpu);
1da177e4 1337
e00e56df 1338 /* If there's no policy for the boot CPU, we have nothing to do. */
1da177e4
LT
1339 cpu_policy = cpufreq_cpu_get(cpu);
1340 if (!cpu_policy)
e00e56df 1341 return;
1da177e4 1342
1c3d85dd
RW
1343 if (cpufreq_driver->resume) {
1344 ret = cpufreq_driver->resume(cpu_policy);
1da177e4
LT
1345 if (ret) {
1346 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1347 "step on CPU %u\n", cpu_policy->cpu);
c9060494 1348 goto fail;
1da177e4
LT
1349 }
1350 }
1351
1da177e4 1352 schedule_work(&cpu_policy->update);
ce6c3997 1353
c9060494 1354fail:
1da177e4 1355 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1356}
1357
e00e56df
RW
1358static struct syscore_ops cpufreq_syscore_ops = {
1359 .suspend = cpufreq_bp_suspend,
1360 .resume = cpufreq_bp_resume,
1da177e4
LT
1361};
1362
9d95046e
BP
1363/**
1364 * cpufreq_get_current_driver - return current driver's name
1365 *
1366 * Return the name string of the currently loaded cpufreq driver
1367 * or NULL, if none.
1368 */
1369const char *cpufreq_get_current_driver(void)
1370{
1c3d85dd
RW
1371 if (cpufreq_driver)
1372 return cpufreq_driver->name;
1373
1374 return NULL;
9d95046e
BP
1375}
1376EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1da177e4
LT
1377
1378/*********************************************************************
1379 * NOTIFIER LISTS INTERFACE *
1380 *********************************************************************/
1381
1382/**
1383 * cpufreq_register_notifier - register a driver with cpufreq
1384 * @nb: notifier function to register
1385 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1386 *
32ee8c3e 1387 * Add a driver to one of two lists: either a list of drivers that
1da177e4
LT
1388 * are notified about clock rate changes (once before and once after
1389 * the transition), or a list of drivers that are notified about
1390 * changes in cpufreq policy.
1391 *
1392 * This function may sleep, and has the same return conditions as
e041c683 1393 * blocking_notifier_chain_register.
1da177e4
LT
1394 */
1395int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1396{
1397 int ret;
1398
d5aaffa9
DB
1399 if (cpufreq_disabled())
1400 return -EINVAL;
1401
74212ca4
CEB
1402 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1403
1da177e4
LT
1404 switch (list) {
1405 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1406 ret = srcu_notifier_chain_register(
e041c683 1407 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1408 break;
1409 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1410 ret = blocking_notifier_chain_register(
1411 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1412 break;
1413 default:
1414 ret = -EINVAL;
1415 }
1da177e4
LT
1416
1417 return ret;
1418}
1419EXPORT_SYMBOL(cpufreq_register_notifier);
1420
1421
1422/**
1423 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1424 * @nb: notifier block to be unregistered
1425 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1426 *
1427 * Remove a driver from the CPU frequency notifier list.
1428 *
1429 * This function may sleep, and has the same return conditions as
e041c683 1430 * blocking_notifier_chain_unregister.
1da177e4
LT
1431 */
1432int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1433{
1434 int ret;
1435
d5aaffa9
DB
1436 if (cpufreq_disabled())
1437 return -EINVAL;
1438
1da177e4
LT
1439 switch (list) {
1440 case CPUFREQ_TRANSITION_NOTIFIER:
b4dfdbb3 1441 ret = srcu_notifier_chain_unregister(
e041c683 1442 &cpufreq_transition_notifier_list, nb);
1da177e4
LT
1443 break;
1444 case CPUFREQ_POLICY_NOTIFIER:
e041c683
AS
1445 ret = blocking_notifier_chain_unregister(
1446 &cpufreq_policy_notifier_list, nb);
1da177e4
LT
1447 break;
1448 default:
1449 ret = -EINVAL;
1450 }
1da177e4
LT
1451
1452 return ret;
1453}
1454EXPORT_SYMBOL(cpufreq_unregister_notifier);
1455
1456
1457/*********************************************************************
1458 * GOVERNORS *
1459 *********************************************************************/
1460
1461
1462int __cpufreq_driver_target(struct cpufreq_policy *policy,
1463 unsigned int target_freq,
1464 unsigned int relation)
1465{
1466 int retval = -EINVAL;
7249924e 1467 unsigned int old_target_freq = target_freq;
c32b6b8e 1468
a7b422cd
KRW
1469 if (cpufreq_disabled())
1470 return -ENODEV;
1471
7249924e
VK
1472 /* Make sure that target_freq is within supported range */
1473 if (target_freq > policy->max)
1474 target_freq = policy->max;
1475 if (target_freq < policy->min)
1476 target_freq = policy->min;
1477
1478 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1479 policy->cpu, target_freq, relation, old_target_freq);
5a1c0228
VK
1480
1481 if (target_freq == policy->cur)
1482 return 0;
1483
1c3d85dd
RW
1484 if (cpufreq_driver->target)
1485 retval = cpufreq_driver->target(policy, target_freq, relation);
90d45d17 1486
1da177e4
LT
1487 return retval;
1488}
1489EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1490
1da177e4
LT
1491int cpufreq_driver_target(struct cpufreq_policy *policy,
1492 unsigned int target_freq,
1493 unsigned int relation)
1494{
f1829e4a 1495 int ret = -EINVAL;
1da177e4
LT
1496
1497 policy = cpufreq_cpu_get(policy->cpu);
1498 if (!policy)
f1829e4a 1499 goto no_policy;
1da177e4 1500
5a01f2e8 1501 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
f1829e4a 1502 goto fail;
1da177e4
LT
1503
1504 ret = __cpufreq_driver_target(policy, target_freq, relation);
1505
5a01f2e8 1506 unlock_policy_rwsem_write(policy->cpu);
1da177e4 1507
f1829e4a 1508fail:
1da177e4 1509 cpufreq_cpu_put(policy);
f1829e4a 1510no_policy:
1da177e4
LT
1511 return ret;
1512}
1513EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1514
bf0b90e3 1515int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
dfde5d62
VP
1516{
1517 int ret = 0;
1518
d5aaffa9
DB
1519 if (cpufreq_disabled())
1520 return ret;
1521
1c3d85dd 1522 if (!cpufreq_driver->getavg)
0676f7f2
VK
1523 return 0;
1524
dfde5d62
VP
1525 policy = cpufreq_cpu_get(policy->cpu);
1526 if (!policy)
1527 return -EINVAL;
1528
1c3d85dd 1529 ret = cpufreq_driver->getavg(policy, cpu);
dfde5d62 1530
dfde5d62
VP
1531 cpufreq_cpu_put(policy);
1532 return ret;
1533}
5a01f2e8 1534EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
dfde5d62 1535
153d7f3f 1536/*
153d7f3f
AV
1537 * when "event" is CPUFREQ_GOV_LIMITS
1538 */
1da177e4 1539
e08f5f5b
GS
1540static int __cpufreq_governor(struct cpufreq_policy *policy,
1541 unsigned int event)
1da177e4 1542{
cc993cab 1543 int ret;
6afde10c
TR
1544
1545 /* Only must be defined when default governor is known to have latency
1546 restrictions, like e.g. conservative or ondemand.
1547 That this is the case is already ensured in Kconfig
1548 */
1549#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1550 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1551#else
1552 struct cpufreq_governor *gov = NULL;
1553#endif
1c256245
TR
1554
1555 if (policy->governor->max_transition_latency &&
1556 policy->cpuinfo.transition_latency >
1557 policy->governor->max_transition_latency) {
6afde10c
TR
1558 if (!gov)
1559 return -EINVAL;
1560 else {
1561 printk(KERN_WARNING "%s governor failed, too long"
1562 " transition latency of HW, fallback"
1563 " to %s governor\n",
1564 policy->governor->name,
1565 gov->name);
1566 policy->governor = gov;
1567 }
1c256245 1568 }
1da177e4
LT
1569
1570 if (!try_module_get(policy->governor->owner))
1571 return -EINVAL;
1572
2d06d8c4 1573 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
e08f5f5b 1574 policy->cpu, event);
1da177e4
LT
1575 ret = policy->governor->governor(policy, event);
1576
4d5dcc42
VK
1577 if (!ret) {
1578 if (event == CPUFREQ_GOV_POLICY_INIT)
1579 policy->governor->initialized++;
1580 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1581 policy->governor->initialized--;
1582 }
b394058f 1583
e08f5f5b
GS
1584 /* we keep one module reference alive for
1585 each CPU governed by this CPU */
1da177e4
LT
1586 if ((event != CPUFREQ_GOV_START) || ret)
1587 module_put(policy->governor->owner);
1588 if ((event == CPUFREQ_GOV_STOP) && !ret)
1589 module_put(policy->governor->owner);
1590
1591 return ret;
1592}
1593
1594
1da177e4
LT
1595int cpufreq_register_governor(struct cpufreq_governor *governor)
1596{
3bcb09a3 1597 int err;
1da177e4
LT
1598
1599 if (!governor)
1600 return -EINVAL;
1601
a7b422cd
KRW
1602 if (cpufreq_disabled())
1603 return -ENODEV;
1604
3fc54d37 1605 mutex_lock(&cpufreq_governor_mutex);
32ee8c3e 1606
b394058f 1607 governor->initialized = 0;
3bcb09a3
JF
1608 err = -EBUSY;
1609 if (__find_governor(governor->name) == NULL) {
1610 err = 0;
1611 list_add(&governor->governor_list, &cpufreq_governor_list);
1da177e4 1612 }
1da177e4 1613
32ee8c3e 1614 mutex_unlock(&cpufreq_governor_mutex);
3bcb09a3 1615 return err;
1da177e4
LT
1616}
1617EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1618
1619
1620void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1621{
90e41bac
PB
1622#ifdef CONFIG_HOTPLUG_CPU
1623 int cpu;
1624#endif
1625
1da177e4
LT
1626 if (!governor)
1627 return;
1628
a7b422cd
KRW
1629 if (cpufreq_disabled())
1630 return;
1631
90e41bac
PB
1632#ifdef CONFIG_HOTPLUG_CPU
1633 for_each_present_cpu(cpu) {
1634 if (cpu_online(cpu))
1635 continue;
1636 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1637 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1638 }
1639#endif
1640
3fc54d37 1641 mutex_lock(&cpufreq_governor_mutex);
1da177e4 1642 list_del(&governor->governor_list);
3fc54d37 1643 mutex_unlock(&cpufreq_governor_mutex);
1da177e4
LT
1644 return;
1645}
1646EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1647
1648
1649
1650/*********************************************************************
1651 * POLICY INTERFACE *
1652 *********************************************************************/
1653
1654/**
1655 * cpufreq_get_policy - get the current cpufreq_policy
29464f28
DJ
1656 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1657 * is written
1da177e4
LT
1658 *
1659 * Reads the current cpufreq policy.
1660 */
1661int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1662{
1663 struct cpufreq_policy *cpu_policy;
1664 if (!policy)
1665 return -EINVAL;
1666
1667 cpu_policy = cpufreq_cpu_get(cpu);
1668 if (!cpu_policy)
1669 return -EINVAL;
1670
1da177e4 1671 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1da177e4
LT
1672
1673 cpufreq_cpu_put(cpu_policy);
1da177e4
LT
1674 return 0;
1675}
1676EXPORT_SYMBOL(cpufreq_get_policy);
1677
1678
153d7f3f 1679/*
e08f5f5b
GS
1680 * data : current policy.
1681 * policy : policy to be set.
153d7f3f 1682 */
e08f5f5b
GS
1683static int __cpufreq_set_policy(struct cpufreq_policy *data,
1684 struct cpufreq_policy *policy)
1da177e4 1685{
7bd353a9 1686 int ret = 0, failed = 1;
1da177e4 1687
2d06d8c4 1688 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1da177e4
LT
1689 policy->min, policy->max);
1690
e08f5f5b
GS
1691 memcpy(&policy->cpuinfo, &data->cpuinfo,
1692 sizeof(struct cpufreq_cpuinfo));
1da177e4 1693
53391fa2 1694 if (policy->min > data->max || policy->max < data->min) {
9c9a43ed
MD
1695 ret = -EINVAL;
1696 goto error_out;
1697 }
1698
1da177e4 1699 /* verify the cpu speed can be set within this limit */
1c3d85dd 1700 ret = cpufreq_driver->verify(policy);
1da177e4
LT
1701 if (ret)
1702 goto error_out;
1703
1da177e4 1704 /* adjust if necessary - all reasons */
e041c683
AS
1705 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1706 CPUFREQ_ADJUST, policy);
1da177e4
LT
1707
1708 /* adjust if necessary - hardware incompatibility*/
e041c683
AS
1709 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1710 CPUFREQ_INCOMPATIBLE, policy);
1da177e4
LT
1711
1712 /* verify the cpu speed can be set within this limit,
1713 which might be different to the first one */
1c3d85dd 1714 ret = cpufreq_driver->verify(policy);
e041c683 1715 if (ret)
1da177e4 1716 goto error_out;
1da177e4
LT
1717
1718 /* notification of the new policy */
e041c683
AS
1719 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1720 CPUFREQ_NOTIFY, policy);
1da177e4 1721
7d5e350f
DJ
1722 data->min = policy->min;
1723 data->max = policy->max;
1da177e4 1724
2d06d8c4 1725 pr_debug("new min and max freqs are %u - %u kHz\n",
e08f5f5b 1726 data->min, data->max);
1da177e4 1727
1c3d85dd 1728 if (cpufreq_driver->setpolicy) {
1da177e4 1729 data->policy = policy->policy;
2d06d8c4 1730 pr_debug("setting range\n");
1c3d85dd 1731 ret = cpufreq_driver->setpolicy(policy);
1da177e4
LT
1732 } else {
1733 if (policy->governor != data->governor) {
1734 /* save old, working values */
1735 struct cpufreq_governor *old_gov = data->governor;
1736
2d06d8c4 1737 pr_debug("governor switch\n");
1da177e4
LT
1738
1739 /* end old governor */
7bd353a9 1740 if (data->governor) {
1da177e4 1741 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
955ef483 1742 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1743 __cpufreq_governor(data,
1744 CPUFREQ_GOV_POLICY_EXIT);
955ef483 1745 lock_policy_rwsem_write(policy->cpu);
7bd353a9 1746 }
1da177e4
LT
1747
1748 /* start new governor */
1749 data->governor = policy->governor;
7bd353a9 1750 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
955ef483 1751 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
7bd353a9 1752 failed = 0;
955ef483
VK
1753 } else {
1754 unlock_policy_rwsem_write(policy->cpu);
7bd353a9
VK
1755 __cpufreq_governor(data,
1756 CPUFREQ_GOV_POLICY_EXIT);
955ef483
VK
1757 lock_policy_rwsem_write(policy->cpu);
1758 }
7bd353a9
VK
1759 }
1760
1761 if (failed) {
1da177e4 1762 /* new governor failed, so re-start old one */
2d06d8c4 1763 pr_debug("starting governor %s failed\n",
e08f5f5b 1764 data->governor->name);
1da177e4
LT
1765 if (old_gov) {
1766 data->governor = old_gov;
7bd353a9
VK
1767 __cpufreq_governor(data,
1768 CPUFREQ_GOV_POLICY_INIT);
e08f5f5b
GS
1769 __cpufreq_governor(data,
1770 CPUFREQ_GOV_START);
1da177e4
LT
1771 }
1772 ret = -EINVAL;
1773 goto error_out;
1774 }
1775 /* might be a policy change, too, so fall through */
1776 }
2d06d8c4 1777 pr_debug("governor: change or update limits\n");
1da177e4
LT
1778 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1779 }
1780
7d5e350f 1781error_out:
1da177e4
LT
1782 return ret;
1783}
1784
1da177e4
LT
1785/**
1786 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1787 * @cpu: CPU which shall be re-evaluated
1788 *
25985edc 1789 * Useful for policy notifiers which have different necessities
1da177e4
LT
1790 * at different times.
1791 */
1792int cpufreq_update_policy(unsigned int cpu)
1793{
1794 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1795 struct cpufreq_policy policy;
f1829e4a 1796 int ret;
1da177e4 1797
f1829e4a
JL
1798 if (!data) {
1799 ret = -ENODEV;
1800 goto no_policy;
1801 }
1da177e4 1802
f1829e4a
JL
1803 if (unlikely(lock_policy_rwsem_write(cpu))) {
1804 ret = -EINVAL;
1805 goto fail;
1806 }
1da177e4 1807
2d06d8c4 1808 pr_debug("updating policy for CPU %u\n", cpu);
7d5e350f 1809 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1da177e4
LT
1810 policy.min = data->user_policy.min;
1811 policy.max = data->user_policy.max;
1812 policy.policy = data->user_policy.policy;
1813 policy.governor = data->user_policy.governor;
1814
0961dd0d
TR
1815 /* BIOS might change freq behind our back
1816 -> ask driver for current freq and notify governors about a change */
1c3d85dd
RW
1817 if (cpufreq_driver->get) {
1818 policy.cur = cpufreq_driver->get(cpu);
a85f7bd3 1819 if (!data->cur) {
2d06d8c4 1820 pr_debug("Driver did not initialize current freq");
a85f7bd3
TR
1821 data->cur = policy.cur;
1822 } else {
1c3d85dd 1823 if (data->cur != policy.cur && cpufreq_driver->target)
e08f5f5b
GS
1824 cpufreq_out_of_sync(cpu, data->cur,
1825 policy.cur);
a85f7bd3 1826 }
0961dd0d
TR
1827 }
1828
1da177e4
LT
1829 ret = __cpufreq_set_policy(data, &policy);
1830
5a01f2e8
VP
1831 unlock_policy_rwsem_write(cpu);
1832
f1829e4a 1833fail:
1da177e4 1834 cpufreq_cpu_put(data);
f1829e4a 1835no_policy:
1da177e4
LT
1836 return ret;
1837}
1838EXPORT_SYMBOL(cpufreq_update_policy);
1839
dd184a01 1840static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
c32b6b8e
AR
1841 unsigned long action, void *hcpu)
1842{
1843 unsigned int cpu = (unsigned long)hcpu;
8a25a2fd 1844 struct device *dev;
c32b6b8e 1845
8a25a2fd
KS
1846 dev = get_cpu_device(cpu);
1847 if (dev) {
c32b6b8e
AR
1848 switch (action) {
1849 case CPU_ONLINE:
8a25a2fd 1850 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1851 break;
1852 case CPU_DOWN_PREPARE:
a66b2e50 1853 case CPU_UP_CANCELED_FROZEN:
8a25a2fd 1854 __cpufreq_remove_dev(dev, NULL);
c32b6b8e 1855 break;
5a01f2e8 1856 case CPU_DOWN_FAILED:
8a25a2fd 1857 cpufreq_add_dev(dev, NULL);
c32b6b8e
AR
1858 break;
1859 }
1860 }
1861 return NOTIFY_OK;
1862}
1863
9c36f746 1864static struct notifier_block __refdata cpufreq_cpu_notifier = {
c32b6b8e
AR
1865 .notifier_call = cpufreq_cpu_callback,
1866};
1da177e4
LT
1867
1868/*********************************************************************
1869 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1870 *********************************************************************/
1871
1872/**
1873 * cpufreq_register_driver - register a CPU Frequency driver
1874 * @driver_data: A struct cpufreq_driver containing the values#
1875 * submitted by the CPU Frequency driver.
1876 *
32ee8c3e 1877 * Registers a CPU Frequency driver to this core code. This code
1da177e4 1878 * returns zero on success, -EBUSY when another driver got here first
32ee8c3e 1879 * (and isn't unregistered in the meantime).
1da177e4
LT
1880 *
1881 */
221dee28 1882int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1da177e4
LT
1883{
1884 unsigned long flags;
1885 int ret;
1886
a7b422cd
KRW
1887 if (cpufreq_disabled())
1888 return -ENODEV;
1889
1da177e4
LT
1890 if (!driver_data || !driver_data->verify || !driver_data->init ||
1891 ((!driver_data->setpolicy) && (!driver_data->target)))
1892 return -EINVAL;
1893
2d06d8c4 1894 pr_debug("trying to register driver %s\n", driver_data->name);
1da177e4
LT
1895
1896 if (driver_data->setpolicy)
1897 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1898
0d1857a1 1899 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1900 if (cpufreq_driver) {
0d1857a1 1901 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1902 return -EBUSY;
1903 }
1c3d85dd 1904 cpufreq_driver = driver_data;
0d1857a1 1905 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4 1906
8a25a2fd 1907 ret = subsys_interface_register(&cpufreq_interface);
8f5bc2ab
JS
1908 if (ret)
1909 goto err_null_driver;
1da177e4 1910
1c3d85dd 1911 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1da177e4
LT
1912 int i;
1913 ret = -ENODEV;
1914
1915 /* check for at least one working CPU */
7a6aedfa
MT
1916 for (i = 0; i < nr_cpu_ids; i++)
1917 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1da177e4 1918 ret = 0;
7a6aedfa
MT
1919 break;
1920 }
1da177e4
LT
1921
1922 /* if all ->init() calls failed, unregister */
1923 if (ret) {
2d06d8c4 1924 pr_debug("no CPU initialized for driver %s\n",
e08f5f5b 1925 driver_data->name);
8a25a2fd 1926 goto err_if_unreg;
1da177e4
LT
1927 }
1928 }
1929
8f5bc2ab 1930 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2d06d8c4 1931 pr_debug("driver %s up and running\n", driver_data->name);
1da177e4 1932
8f5bc2ab 1933 return 0;
8a25a2fd
KS
1934err_if_unreg:
1935 subsys_interface_unregister(&cpufreq_interface);
8f5bc2ab 1936err_null_driver:
0d1857a1 1937 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1938 cpufreq_driver = NULL;
0d1857a1 1939 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
4d34a67d 1940 return ret;
1da177e4
LT
1941}
1942EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1943
1944
1945/**
1946 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1947 *
32ee8c3e 1948 * Unregister the current CPUFreq driver. Only call this if you have
1da177e4
LT
1949 * the right to do so, i.e. if you have succeeded in initialising before!
1950 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1951 * currently not initialised.
1952 */
221dee28 1953int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1da177e4
LT
1954{
1955 unsigned long flags;
1956
1c3d85dd 1957 if (!cpufreq_driver || (driver != cpufreq_driver))
1da177e4 1958 return -EINVAL;
1da177e4 1959
2d06d8c4 1960 pr_debug("unregistering driver %s\n", driver->name);
1da177e4 1961
8a25a2fd 1962 subsys_interface_unregister(&cpufreq_interface);
65edc68c 1963 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1da177e4 1964
0d1857a1 1965 write_lock_irqsave(&cpufreq_driver_lock, flags);
1c3d85dd 1966 cpufreq_driver = NULL;
0d1857a1 1967 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1da177e4
LT
1968
1969 return 0;
1970}
1971EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
5a01f2e8
VP
1972
1973static int __init cpufreq_core_init(void)
1974{
1975 int cpu;
1976
a7b422cd
KRW
1977 if (cpufreq_disabled())
1978 return -ENODEV;
1979
5a01f2e8 1980 for_each_possible_cpu(cpu) {
f1625066 1981 per_cpu(cpufreq_policy_cpu, cpu) = -1;
5a01f2e8
VP
1982 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1983 }
8aa84ad8 1984
8a25a2fd 1985 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
8aa84ad8 1986 BUG_ON(!cpufreq_global_kobject);
e00e56df 1987 register_syscore_ops(&cpufreq_syscore_ops);
8aa84ad8 1988
5a01f2e8
VP
1989 return 0;
1990}
5a01f2e8 1991core_initcall(cpufreq_core_init);
This page took 0.827645 seconds and 5 git commands to generate.